mirror of
https://github.com/koush/scrypted.git
synced 2026-02-05 23:22:13 +00:00
Compare commits
95 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
43e5822c93 | ||
|
|
bc579514e7 | ||
|
|
825100f94e | ||
|
|
803bfc1560 | ||
|
|
b2013a54ed | ||
|
|
f252407935 | ||
|
|
516f2a2a7b | ||
|
|
c1677ce691 | ||
|
|
5028fb812d | ||
|
|
2db4e2579f | ||
|
|
b339ca6cd2 | ||
|
|
f100999cb1 | ||
|
|
2863756bd6 | ||
|
|
cc408850a0 | ||
|
|
ed1ceeda51 | ||
|
|
df09d8e92a | ||
|
|
298ac960d1 | ||
|
|
62d4d55aae | ||
|
|
a2121c0dc5 | ||
|
|
9b5ea27c0b | ||
|
|
0b0e90fc04 | ||
|
|
d8aff609bf | ||
|
|
d8283c261a | ||
|
|
e3aca964be | ||
|
|
a96025c45f | ||
|
|
6afd4b4579 | ||
|
|
f97669949d | ||
|
|
0a0a31574f | ||
|
|
90fb751a22 | ||
|
|
b8d06fada5 | ||
|
|
2cecb1686f | ||
|
|
db03775530 | ||
|
|
cccbc33f1a | ||
|
|
5f23873366 | ||
|
|
e43accae67 | ||
|
|
b3a0cda6f9 | ||
|
|
58c3348282 | ||
|
|
a9e6d76e99 | ||
|
|
3b58936387 | ||
|
|
3a14ab81c8 | ||
|
|
291178a7b5 | ||
|
|
b65faf1a79 | ||
|
|
9d8a1353c0 | ||
|
|
b29d793178 | ||
|
|
d8e406d415 | ||
|
|
4529872fd6 | ||
|
|
fa86c31340 | ||
|
|
94ded75d40 | ||
|
|
887b61cd7a | ||
|
|
48e3d30987 | ||
|
|
02dba3cd71 | ||
|
|
195769034d | ||
|
|
39c08aa378 | ||
|
|
fa8056d38e | ||
|
|
145f116c68 | ||
|
|
15b6f336e4 | ||
|
|
8b46f0a466 | ||
|
|
a20cc5cd89 | ||
|
|
3d068929fd | ||
|
|
928f9b7579 | ||
|
|
c1c5a42645 | ||
|
|
12643cdde2 | ||
|
|
0bff96a6e6 | ||
|
|
4e7e67de54 | ||
|
|
65c4a30004 | ||
|
|
309a1dc11f | ||
|
|
b7904b73b2 | ||
|
|
9e9ddbc5f3 | ||
|
|
ceda54f91b | ||
|
|
1d4052b839 | ||
|
|
6a5d6e6617 | ||
|
|
f55cc6066f | ||
|
|
527714e434 | ||
|
|
8a1633ffa3 | ||
|
|
56b2ab9c4f | ||
|
|
d330e2eb9d | ||
|
|
b55e7cacb3 | ||
|
|
c70375db06 | ||
|
|
2c23021d40 | ||
|
|
84a4ef4539 | ||
|
|
7f3db0549b | ||
|
|
de0e1784a3 | ||
|
|
5a8798638e | ||
|
|
14da49728c | ||
|
|
55423b2d09 | ||
|
|
596106247b | ||
|
|
5472d90368 | ||
|
|
fcf58413fc | ||
|
|
0d03b91753 | ||
|
|
2fd088e4d6 | ||
|
|
c6933198b2 | ||
|
|
210e684a22 | ||
|
|
53cc4b6ef3 | ||
|
|
d58d138a68 | ||
|
|
c0199a2b76 |
@@ -361,8 +361,7 @@ export interface RebroadcasterOptions {
|
||||
},
|
||||
}
|
||||
|
||||
export async function handleRebroadcasterClient(duplex: Promise<Duplex> | Duplex, options?: RebroadcasterOptions) {
|
||||
const socket = await duplex;
|
||||
export function handleRebroadcasterClient(socket: Duplex, options?: RebroadcasterOptions) {
|
||||
const firstWriteData = (data: StreamChunk) => {
|
||||
if (data.startStream) {
|
||||
socket.write(data.startStream)
|
||||
|
||||
@@ -62,4 +62,4 @@ export async function bind(server: dgram.Socket, port: number) {
|
||||
}
|
||||
}
|
||||
|
||||
export { listenZero, listenZeroSingleClient } from "@scrypted/server/src/listen-zero";
|
||||
export { listenZero, listenZeroSingleClient, ListenZeroSingleClientTimeoutError } from "@scrypted/server/src/listen-zero";
|
||||
|
||||
@@ -250,7 +250,8 @@ export class BrowserSignalingSession implements RTCSignalingSession {
|
||||
function logSendCandidate(console: Console, type: string, session: RTCSignalingSession): RTCSignalingSendIceCandidate {
|
||||
return async (candidate) => {
|
||||
try {
|
||||
console.log(`${type} trickled candidate:`, candidate.sdpMLineIndex, candidate.candidate);
|
||||
if (localStorage.getItem('debugLog') === 'true')
|
||||
console.log(`${type} trickled candidate:`, candidate.sdpMLineIndex, candidate.candidate);
|
||||
await session.addIceCandidate(candidate);
|
||||
}
|
||||
catch (e) {
|
||||
@@ -308,11 +309,13 @@ export async function connectRTCSignalingClients(
|
||||
|
||||
const offer = await offerClient.createLocalDescription('offer', offerSetup as RTCAVSignalingSetup,
|
||||
disableTrickle ? undefined : answerQueue.queueSendCandidate);
|
||||
console.log('offer sdp', offer.sdp);
|
||||
if (localStorage.getItem('debugLog') === 'true')
|
||||
console.log('offer sdp', offer.sdp);
|
||||
await answerClient.setRemoteDescription(offer, answerSetup as RTCAVSignalingSetup);
|
||||
const answer = await answerClient.createLocalDescription('answer', answerSetup as RTCAVSignalingSetup,
|
||||
disableTrickle ? undefined : offerQueue.queueSendCandidate);
|
||||
console.log('answer sdp', answer.sdp);
|
||||
if (localStorage.getItem('debugLog') === 'true')
|
||||
console.log('answer sdp', answer.sdp);
|
||||
await offerClient.setRemoteDescription(answer, offerSetup as RTCAVSignalingSetup);
|
||||
offerQueue.flush();
|
||||
answerQueue.flush();
|
||||
|
||||
@@ -129,6 +129,16 @@ export function getNaluTypes(streamChunk: StreamChunk) {
|
||||
return getNaluTypesInNalu(streamChunk.chunks[streamChunk.chunks.length - 1].subarray(12))
|
||||
}
|
||||
|
||||
export function getNaluFragmentInformation(nalu: Buffer) {
|
||||
const naluType = nalu[0] & 0x1f;
|
||||
const fua = naluType === H264_NAL_TYPE_FU_A;
|
||||
return {
|
||||
fua,
|
||||
fuaStart: fua && !!(nalu[1] & 0x80),
|
||||
fuaEnd: fua && !!(nalu[1] & 0x40),
|
||||
}
|
||||
}
|
||||
|
||||
export function getNaluTypesInNalu(nalu: Buffer, fuaRequireStart = false, fuaRequireEnd = false) {
|
||||
const ret = new Set<number>();
|
||||
const naluType = nalu[0] & 0x1f;
|
||||
|
||||
@@ -63,7 +63,7 @@ RUN apt-get -y install \
|
||||
# which causes weird behavior in python which looks at the arch version
|
||||
# which still reports 64bit, even if running in 32bit docker.
|
||||
# this scenario is not supported and will be reported at runtime.
|
||||
RUN if [ "$(uname -m)" = "armv7l" ]; \
|
||||
RUN if [ "$(uname -m)" != "x86_64" ]; \
|
||||
then \
|
||||
apt-get -y install \
|
||||
python3-matplotlib \
|
||||
@@ -95,7 +95,8 @@ ENV SCRYPTED_INSTALL_PATH="/server"
|
||||
|
||||
# changing this forces pip and npm to perform reinstalls.
|
||||
# if this base image changes, this version must be updated.
|
||||
ENV SCRYPTED_BASE_VERSION=20230322
|
||||
ENV SCRYPTED_BASE_VERSION=20230329
|
||||
ENV SCRYPTED_DOCKER_FLAVOR=full
|
||||
|
||||
################################################################
|
||||
# End section generated from template/Dockerfile.full.footer
|
||||
|
||||
@@ -42,4 +42,5 @@ ENV SCRYPTED_INSTALL_PATH="/server"
|
||||
|
||||
# changing this forces pip and npm to perform reinstalls.
|
||||
# if this base image changes, this version must be updated.
|
||||
ENV SCRYPTED_BASE_VERSION=20230322
|
||||
ENV SCRYPTED_BASE_VERSION=20230329
|
||||
ENV SCRYPTED_DOCKER_FLAVOR=lite
|
||||
|
||||
@@ -21,4 +21,5 @@ ENV SCRYPTED_INSTALL_PATH="/server"
|
||||
|
||||
# changing this forces pip and npm to perform reinstalls.
|
||||
# if this base image changes, this version must be updated.
|
||||
ENV SCRYPTED_BASE_VERSION=20230322
|
||||
ENV SCRYPTED_BASE_VERSION=20230329
|
||||
ENV SCRYPTED_DOCKER_FLAVOR=thin
|
||||
|
||||
@@ -90,4 +90,4 @@ services:
|
||||
# Must match the port in the auto update url above.
|
||||
- 10444:8080
|
||||
# check for updates once an hour (interval is in seconds)
|
||||
command: --interval 3600 --cleanup
|
||||
command: --interval 3600 --cleanup --scope scrypted
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[server]
|
||||
#host-name=
|
||||
use-ipv4=yes
|
||||
use-ipv6=no
|
||||
use-ipv6=yes
|
||||
enable-dbus=yes
|
||||
ratelimit-interval-usec=1000000
|
||||
ratelimit-burst=1000
|
||||
@@ -14,4 +14,4 @@ rlimit-core=0
|
||||
rlimit-data=4194304
|
||||
rlimit-fsize=0
|
||||
rlimit-nofile=768
|
||||
rlimit-stack=4194304
|
||||
rlimit-stack=4194304
|
||||
|
||||
@@ -44,51 +44,25 @@ RUN_IGNORE brew install node@18
|
||||
RUN brew install libvips
|
||||
# dlib
|
||||
RUN brew install cmake
|
||||
# gstreamer plugins
|
||||
RUN_IGNORE brew install gstreamer gst-plugins-base gst-plugins-good gst-plugins-bad gst-plugins-ugly
|
||||
# gst python bindings
|
||||
RUN_IGNORE brew install gst-python
|
||||
# python image library
|
||||
# todo: consider removing this
|
||||
RUN_IGNORE brew install pillow
|
||||
|
||||
### HACK WORKAROUND
|
||||
### https://github.com/koush/scrypted/issues/544
|
||||
|
||||
brew unpin gstreamer
|
||||
brew unpin gst-python
|
||||
brew unpin gst-plugins-ugly
|
||||
brew unpin gst-plugins-good
|
||||
brew unpin gst-plugins-base
|
||||
brew unpin gst-plugins-good
|
||||
brew unpin gst-plugins-bad
|
||||
brew unpin gst-plugins-ugly
|
||||
brew unpin gst-libav
|
||||
|
||||
brew unlink gstreamer
|
||||
brew unlink gst-python
|
||||
brew unlink gst-plugins-ugly
|
||||
brew unlink gst-plugins-good
|
||||
brew unlink gst-plugins-base
|
||||
brew unlink gst-plugins-bad
|
||||
brew unlink gst-libav
|
||||
|
||||
curl -O https://raw.githubusercontent.com/Homebrew/homebrew-core/49a8667f0c1a6579fe887bc0fa1c0ce682eb01c8/Formula/gstreamer.rb && brew install ./gstreamer.rb
|
||||
curl -O https://raw.githubusercontent.com/Homebrew/homebrew-core/49a8667f0c1a6579fe887bc0fa1c0ce682eb01c8/Formula/gst-python.rb && brew install ./gst-python.rb
|
||||
curl -O https://raw.githubusercontent.com/Homebrew/homebrew-core/49a8667f0c1a6579fe887bc0fa1c0ce682eb01c8/Formula/gst-plugins-ugly.rb && brew install ./gst-plugins-ugly.rb
|
||||
curl -O https://raw.githubusercontent.com/Homebrew/homebrew-core/49a8667f0c1a6579fe887bc0fa1c0ce682eb01c8/Formula/gst-plugins-good.rb && brew install ./gst-plugins-good.rb
|
||||
curl -O https://raw.githubusercontent.com/Homebrew/homebrew-core/49a8667f0c1a6579fe887bc0fa1c0ce682eb01c8/Formula/gst-plugins-base.rb && brew install ./gst-plugins-base.rb
|
||||
curl -O https://raw.githubusercontent.com/Homebrew/homebrew-core/49a8667f0c1a6579fe887bc0fa1c0ce682eb01c8/Formula/gst-plugins-bad.rb && brew install ./gst-plugins-bad.rb
|
||||
curl -O https://raw.githubusercontent.com/Homebrew/homebrew-core/49a8667f0c1a6579fe887bc0fa1c0ce682eb01c8/Formula/gst-libav.rb && brew install ./gst-libav.rb
|
||||
|
||||
brew pin gstreamer
|
||||
brew pin gst-python
|
||||
brew pin gst-plugins-ugly
|
||||
brew pin gst-plugins-good
|
||||
brew pin gst-plugins-base
|
||||
brew pin gst-plugins-bad
|
||||
brew pin gst-libav
|
||||
brew unpin gst-python
|
||||
|
||||
### END HACK WORKAROUND
|
||||
|
||||
# gstreamer plugins
|
||||
RUN_IGNORE brew install gstreamer gst-plugins-base gst-plugins-good gst-plugins-bad gst-libav
|
||||
# gst python bindings
|
||||
RUN_IGNORE brew install gst-python
|
||||
|
||||
ARCH=$(arch)
|
||||
if [ "$ARCH" = "arm64" ]
|
||||
then
|
||||
|
||||
@@ -42,7 +42,7 @@ fi
|
||||
WATCHTOWER_HTTP_API_TOKEN=$(echo $RANDOM | md5sum)
|
||||
DOCKER_COMPOSE_YML=$SCRYPTED_HOME/docker-compose.yml
|
||||
echo "Created $DOCKER_COMPOSE_YML"
|
||||
curl -s https://raw.githubusercontent.com/koush/scrypted/main/docker/docker-compose.yml | sed s/SET_THIS_TO_SOME_RANDOM_TEXT/"$(echo $RANDOM | md5sum)"/g > $DOCKER_COMPOSE_YML
|
||||
curl -s https://raw.githubusercontent.com/koush/scrypted/main/docker/docker-compose.yml | sed s/SET_THIS_TO_SOME_RANDOM_TEXT/"$(echo $RANDOM | md5sum | head -c 32)"/g > $DOCKER_COMPOSE_YML
|
||||
|
||||
echo "Setting permissions on $SCRYPTED_HOME"
|
||||
chown -R $SERVICE_USER $SCRYPTED_HOME
|
||||
|
||||
@@ -10,7 +10,8 @@ ENV SCRYPTED_INSTALL_PATH="/server"
|
||||
|
||||
# changing this forces pip and npm to perform reinstalls.
|
||||
# if this base image changes, this version must be updated.
|
||||
ENV SCRYPTED_BASE_VERSION=20230322
|
||||
ENV SCRYPTED_BASE_VERSION=20230329
|
||||
ENV SCRYPTED_DOCKER_FLAVOR=full
|
||||
|
||||
################################################################
|
||||
# End section generated from template/Dockerfile.full.footer
|
||||
|
||||
@@ -60,7 +60,7 @@ RUN apt-get -y install \
|
||||
# which causes weird behavior in python which looks at the arch version
|
||||
# which still reports 64bit, even if running in 32bit docker.
|
||||
# this scenario is not supported and will be reported at runtime.
|
||||
RUN if [ "$(uname -m)" = "armv7l" ]; \
|
||||
RUN if [ "$(uname -m)" != "x86_64" ]; \
|
||||
then \
|
||||
apt-get -y install \
|
||||
python3-matplotlib \
|
||||
|
||||
8
packages/client/package-lock.json
generated
8
packages/client/package-lock.json
generated
@@ -9,7 +9,7 @@
|
||||
"version": "1.1.43",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@scrypted/types": "^0.2.76",
|
||||
"@scrypted/types": "^0.2.78",
|
||||
"axios": "^0.25.0",
|
||||
"engine.io-client": "^6.4.0",
|
||||
"rimraf": "^3.0.2"
|
||||
@@ -21,9 +21,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@scrypted/types": {
|
||||
"version": "0.2.76",
|
||||
"resolved": "https://registry.npmjs.org/@scrypted/types/-/types-0.2.76.tgz",
|
||||
"integrity": "sha512-/7n8ICkXj8TGba4cHvckLCgSNsOmOGQ8I+Jd8fX9sxkthgsZhF5At8PHhHdkCDS+yfSmfXHkcqluZZOfYPkpAg=="
|
||||
"version": "0.2.78",
|
||||
"resolved": "https://registry.npmjs.org/@scrypted/types/-/types-0.2.78.tgz",
|
||||
"integrity": "sha512-SiIUh9ph96aZPjt/oO+W/mlJobrP02ADwFDI9jnvw8/UegUti2x/7JE8Pi3kGXOIkN+cX74Qg4xJEMIpdpO1zw=="
|
||||
},
|
||||
"node_modules/@socket.io/component-emitter": {
|
||||
"version": "3.1.0",
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"typescript": "^4.9.5"
|
||||
},
|
||||
"dependencies": {
|
||||
"@scrypted/types": "^0.2.76",
|
||||
"@scrypted/types": "^0.2.78",
|
||||
"axios": "^0.25.0",
|
||||
"engine.io-client": "^6.4.0",
|
||||
"rimraf": "^3.0.2"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/alexa",
|
||||
"version": "0.2.3",
|
||||
"version": "0.2.4",
|
||||
"scripts": {
|
||||
"scrypted-setup-project": "scrypted-setup-project",
|
||||
"prescrypted-setup-project": "scrypted-package-json",
|
||||
|
||||
@@ -15,6 +15,11 @@ const includeToken = 4;
|
||||
|
||||
export let DEBUG = false;
|
||||
|
||||
function debug(...args: any[]) {
|
||||
if (DEBUG)
|
||||
console.debug(...args);
|
||||
}
|
||||
|
||||
class AlexaPlugin extends ScryptedDeviceBase implements HttpRequestHandler, MixinProvider, Settings {
|
||||
storageSettings = new StorageSettings(this, {
|
||||
tokenInfo: {
|
||||
@@ -34,6 +39,14 @@ class AlexaPlugin extends ScryptedDeviceBase implements HttpRequestHandler, Mixi
|
||||
description: 'This is the endpoint Alexa will use to send events to. This is set after you login.',
|
||||
type: 'string',
|
||||
readonly: true
|
||||
},
|
||||
debug: {
|
||||
title: 'Debug Events',
|
||||
description: 'Log all events to the console. This will be very noisy and should not be left enabled.',
|
||||
type: 'boolean',
|
||||
onPut(oldValue: boolean, newValue: boolean) {
|
||||
DEBUG = newValue;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@@ -44,6 +57,8 @@ class AlexaPlugin extends ScryptedDeviceBase implements HttpRequestHandler, Mixi
|
||||
constructor(nativeId?: string) {
|
||||
super(nativeId);
|
||||
|
||||
DEBUG = this.storageSettings.values.debug ?? false;
|
||||
|
||||
alexaHandlers.set('Alexa.Authorization/AcceptGrant', this.onAlexaAuthorization);
|
||||
alexaHandlers.set('Alexa.Discovery/Discover', this.onDiscoverEndpoints);
|
||||
|
||||
@@ -141,12 +156,23 @@ class AlexaPlugin extends ScryptedDeviceBase implements HttpRequestHandler, Mixi
|
||||
if (!supportedType)
|
||||
return;
|
||||
|
||||
const report = await supportedType.sendEvent(eventSource, eventDetails, eventData);
|
||||
let report = await supportedType.sendEvent(eventSource, eventDetails, eventData);
|
||||
|
||||
if (!report && eventDetails.eventInterface === ScryptedInterface.Online) {
|
||||
report = {};
|
||||
}
|
||||
|
||||
if (!report && eventDetails.eventInterface === ScryptedInterface.Battery) {
|
||||
report = {};
|
||||
}
|
||||
|
||||
if (!report) {
|
||||
this.console.warn(`${eventDetails.eventInterface}.${eventDetails.property} not supported for device ${eventSource.type}`);
|
||||
return;
|
||||
}
|
||||
|
||||
debug("event", eventDetails.eventInterface, eventDetails.property, eventSource.type);
|
||||
|
||||
let data = {
|
||||
"event": {
|
||||
"header": {
|
||||
@@ -234,7 +260,7 @@ class AlexaPlugin extends ScryptedDeviceBase implements HttpRequestHandler, Mixi
|
||||
const endpoint = await this.getAlexaEndpoint();
|
||||
const self = this;
|
||||
|
||||
this.console.assert(!DEBUG, `event:`, data);
|
||||
debug("send event to alexa", data);
|
||||
|
||||
return axios.post(`https://${endpoint}/v3/events`, data, {
|
||||
headers: {
|
||||
@@ -570,6 +596,8 @@ class AlexaPlugin extends ScryptedDeviceBase implements HttpRequestHandler, Mixi
|
||||
const { authorization } = request.headers;
|
||||
if (!this.validAuths.has(authorization)) {
|
||||
try {
|
||||
debug("making authorization request to Scrypted");
|
||||
|
||||
await axios.get('https://home.scrypted.app/_punch/getcookie', {
|
||||
headers: {
|
||||
'Authorization': authorization,
|
||||
@@ -590,11 +618,11 @@ class AlexaPlugin extends ScryptedDeviceBase implements HttpRequestHandler, Mixi
|
||||
const { directive } = body;
|
||||
const { namespace, name } = directive.header;
|
||||
|
||||
this.console.assert(!DEBUG, `request: ${namespace}/${name}`);
|
||||
|
||||
const mapName = `${namespace}/${name}`;
|
||||
const handler = alexaHandlers.get(mapName);
|
||||
|
||||
debug("received directive from alexa", mapName, body);
|
||||
|
||||
const handler = alexaHandlers.get(mapName);
|
||||
if (handler)
|
||||
return handler.apply(this, [request, response, directive]);
|
||||
|
||||
@@ -641,7 +669,7 @@ class HttpResponseLoggingImpl implements AlexaHttpResponse {
|
||||
if (options.code !== 200)
|
||||
this.console.error(`response error ${options.code}:`, body);
|
||||
else
|
||||
this.console.assert(!DEBUG, `response ${options.code}:`, body);
|
||||
debug("response to alexa directive", options.code, body);
|
||||
|
||||
if (typeof body === 'object')
|
||||
body = JSON.stringify(body);
|
||||
|
||||
4
plugins/amcrest/package-lock.json
generated
4
plugins/amcrest/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/amcrest",
|
||||
"version": "0.0.119",
|
||||
"version": "0.0.120",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/amcrest",
|
||||
"version": "0.0.119",
|
||||
"version": "0.0.120",
|
||||
"license": "Apache",
|
||||
"dependencies": {
|
||||
"@koush/axios-digest-auth": "^0.8.5",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/amcrest",
|
||||
"version": "0.0.119",
|
||||
"version": "0.0.120",
|
||||
"description": "Amcrest Plugin for Scrypted",
|
||||
"author": "Scrypted",
|
||||
"license": "Apache",
|
||||
|
||||
@@ -616,7 +616,7 @@ class AmcrestProvider extends RtspProvider {
|
||||
this.console.warn('Error probing two way audio', e);
|
||||
}
|
||||
}
|
||||
settings.newCamera ||= 'Hikvision Camera';
|
||||
settings.newCamera ||= 'Amcrest Camera';
|
||||
|
||||
nativeId = await super.createDevice(settings, nativeId);
|
||||
|
||||
|
||||
4
plugins/arlo/package-lock.json
generated
4
plugins/arlo/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/arlo",
|
||||
"version": "0.7.4",
|
||||
"version": "0.7.12",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/arlo",
|
||||
"version": "0.7.4",
|
||||
"version": "0.7.12",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/arlo",
|
||||
"version": "0.7.4",
|
||||
"version": "0.7.12",
|
||||
"description": "Arlo Plugin for Scrypted",
|
||||
"keywords": [
|
||||
"scrypted",
|
||||
|
||||
@@ -383,6 +383,33 @@ class Arlo(object):
|
||||
self.HandleEvents(basestation, resource, [('is', 'motionDetected')], callbackwrapper)
|
||||
)
|
||||
|
||||
def SubscribeToAudioEvents(self, basestation, camera, callback):
|
||||
"""
|
||||
Use this method to subscribe to audio events. You must provide a callback function which will get called once per audio event.
|
||||
|
||||
The callback function should have the following signature:
|
||||
def callback(self, event)
|
||||
|
||||
This is an example of handling a specific event, in reality, you'd probably want to write a callback for HandleEvents()
|
||||
that has a big switch statement in it to handle all the various events Arlo produces.
|
||||
|
||||
Returns the Task object that contains the subscription loop.
|
||||
"""
|
||||
resource = f"cameras/{camera.get('deviceId')}"
|
||||
|
||||
def callbackwrapper(self, event):
|
||||
properties = event.get('properties', {})
|
||||
stop = None
|
||||
if 'audioDetected' in properties:
|
||||
stop = callback(properties['audioDetected'])
|
||||
if not stop:
|
||||
return None
|
||||
return stop
|
||||
|
||||
return asyncio.get_event_loop().create_task(
|
||||
self.HandleEvents(basestation, resource, [('is', 'audioDetected')], callbackwrapper)
|
||||
)
|
||||
|
||||
def SubscribeToBatteryEvents(self, basestation, camera, callback):
|
||||
"""
|
||||
Use this method to subscribe to battery events. You must provide a callback function which will get called once per battery event.
|
||||
@@ -711,7 +738,20 @@ class Arlo(object):
|
||||
callback,
|
||||
)
|
||||
|
||||
def SirenOn(self, basestation):
|
||||
def SirenOn(self, basestation, camera=None):
|
||||
if camera is not None:
|
||||
resource = f"siren/{camera.get('deviceId')}"
|
||||
return self.Notify(basestation, {
|
||||
"action": "set",
|
||||
"resource": resource,
|
||||
"publishResponse": True,
|
||||
"properties": {
|
||||
"sirenState": "on",
|
||||
"duration": 300,
|
||||
"volume": 8,
|
||||
"pattern": "alarm"
|
||||
}
|
||||
})
|
||||
return self.Notify(basestation, {
|
||||
"action": "set",
|
||||
"resource": "siren",
|
||||
@@ -724,7 +764,20 @@ class Arlo(object):
|
||||
}
|
||||
})
|
||||
|
||||
def SirenOff(self, basestation):
|
||||
def SirenOff(self, basestation, camera=None):
|
||||
if camera is not None:
|
||||
resource = f"siren/{camera.get('deviceId')}"
|
||||
return self.Notify(basestation, {
|
||||
"action": "set",
|
||||
"resource": resource,
|
||||
"publishResponse": True,
|
||||
"properties": {
|
||||
"sirenState": "off",
|
||||
"duration": 300,
|
||||
"volume": 8,
|
||||
"pattern": "alarm"
|
||||
}
|
||||
})
|
||||
return self.Notify(basestation, {
|
||||
"action": "set",
|
||||
"resource": "siren",
|
||||
@@ -737,6 +790,58 @@ class Arlo(object):
|
||||
}
|
||||
})
|
||||
|
||||
def SpotlightOn(self, basestation, camera):
|
||||
resource = f"cameras/{camera.get('deviceId')}"
|
||||
return self.Notify(basestation, {
|
||||
"action": "set",
|
||||
"resource": resource,
|
||||
"publishResponse": True,
|
||||
"properties": {
|
||||
"spotlight": {
|
||||
"enabled": True,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
def SpotlightOff(self, basestation, camera):
|
||||
resource = f"cameras/{camera.get('deviceId')}"
|
||||
return self.Notify(basestation, {
|
||||
"action": "set",
|
||||
"resource": resource,
|
||||
"publishResponse": True,
|
||||
"properties": {
|
||||
"spotlight": {
|
||||
"enabled": False,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
def FloodlightOn(self, basestation, camera):
|
||||
resource = f"cameras/{camera.get('deviceId')}"
|
||||
return self.Notify(basestation, {
|
||||
"action": "set",
|
||||
"resource": resource,
|
||||
"publishResponse": True,
|
||||
"properties": {
|
||||
"floodlight": {
|
||||
"on": True,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
def FloodlightOff(self, basestation, camera):
|
||||
resource = f"cameras/{camera.get('deviceId')}"
|
||||
return self.Notify(basestation, {
|
||||
"action": "set",
|
||||
"resource": resource,
|
||||
"publishResponse": True,
|
||||
"properties": {
|
||||
"floodlight": {
|
||||
"on": False,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
def GetLibrary(self, device, from_date: datetime, to_date: datetime):
|
||||
"""
|
||||
This call returns the following:
|
||||
@@ -784,4 +889,13 @@ class Arlo(object):
|
||||
'dateFrom': from_date,
|
||||
'dateTo': to_date
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
def GetSmartFeatures(self, device) -> dict:
|
||||
smart_features = self._getSmartFeaturesCached()
|
||||
key = f"{device['owner']['ownerId']}_{device['deviceId']}"
|
||||
return smart_features["features"].get(key, {})
|
||||
|
||||
@cached(cache=TTLCache(maxsize=1, ttl=60))
|
||||
def _getSmartFeaturesCached(self) -> dict:
|
||||
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/subscription/smart/features')
|
||||
@@ -66,15 +66,4 @@ class ArloDeviceBase(ScryptedDeviceBase, ScryptedDeviceLoggerMixin, BackgroundTa
|
||||
|
||||
def get_builtin_child_device_manifests(self) -> List[Device]:
|
||||
"""Returns the list of child device manifests representing hardware features built into this device."""
|
||||
return []
|
||||
|
||||
@classmethod
|
||||
def async_print_exception_guard(self, fn):
|
||||
"""Decorator to print an exception's stack trace before re-raising the exception."""
|
||||
async def wrapped(*args, **kwargs):
|
||||
try:
|
||||
return await fn(*args, **kwargs)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
raise
|
||||
return wrapped
|
||||
return []
|
||||
@@ -14,11 +14,20 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class ArloBasestation(ArloDeviceBase, DeviceProvider):
|
||||
MODELS_WITH_SIRENS = [
|
||||
"vmb4000",
|
||||
"vmb4500"
|
||||
]
|
||||
|
||||
vss: ArloSirenVirtualSecuritySystem = None
|
||||
|
||||
def __init__(self, nativeId: str, arlo_basestation: dict, provider: ArloProvider) -> None:
|
||||
super().__init__(nativeId=nativeId, arlo_device=arlo_basestation, arlo_basestation=arlo_basestation, provider=provider)
|
||||
|
||||
@property
|
||||
def has_siren(self) -> bool:
|
||||
return any([self.arlo_device["modelId"].lower().startswith(model) for model in ArloBasestation.MODELS_WITH_SIRENS])
|
||||
|
||||
def get_applicable_interfaces(self) -> List[str]:
|
||||
return [ScryptedInterface.DeviceProvider.value]
|
||||
|
||||
@@ -26,8 +35,11 @@ class ArloBasestation(ArloDeviceBase, DeviceProvider):
|
||||
return ScryptedDeviceType.DeviceProvider.value
|
||||
|
||||
def get_builtin_child_device_manifests(self) -> List[Device]:
|
||||
vss_id = f'{self.arlo_device["deviceId"]}.vss'
|
||||
vss = self.get_or_create_vss(vss_id)
|
||||
if not self.has_siren:
|
||||
# this basestation has no builtin siren, so no manifests to return
|
||||
return []
|
||||
|
||||
vss = self.get_or_create_vss()
|
||||
return [
|
||||
{
|
||||
"info": {
|
||||
@@ -36,7 +48,7 @@ class ArloBasestation(ArloDeviceBase, DeviceProvider):
|
||||
"firmware": self.arlo_device.get("firmwareVersion"),
|
||||
"serialNumber": self.arlo_device["deviceId"],
|
||||
},
|
||||
"nativeId": vss_id,
|
||||
"nativeId": vss.nativeId,
|
||||
"name": f'{self.arlo_device["deviceName"]} Siren Virtual Security System',
|
||||
"interfaces": vss.get_applicable_interfaces(),
|
||||
"type": vss.get_device_type(),
|
||||
@@ -48,11 +60,12 @@ class ArloBasestation(ArloDeviceBase, DeviceProvider):
|
||||
if not nativeId.startswith(self.nativeId):
|
||||
# must be a camera, so get it from the provider
|
||||
return await self.provider.getDevice(nativeId)
|
||||
return self.get_or_create_vss(nativeId)
|
||||
|
||||
def get_or_create_vss(self, nativeId: str) -> ArloSirenVirtualSecuritySystem:
|
||||
if not nativeId.endswith("vss"):
|
||||
return None
|
||||
return self.get_or_create_vss()
|
||||
|
||||
def get_or_create_vss(self) -> ArloSirenVirtualSecuritySystem:
|
||||
vss_id = f'{self.arlo_device["deviceId"]}.vss'
|
||||
if not self.vss:
|
||||
self.vss = ArloSirenVirtualSecuritySystem(nativeId, self.arlo_device, self.arlo_basestation, self.provider)
|
||||
self.vss = ArloSirenVirtualSecuritySystem(vss_id, self.arlo_device, self.arlo_basestation, self.provider, self)
|
||||
return self.vss
|
||||
@@ -10,24 +10,76 @@ from typing import List, TYPE_CHECKING
|
||||
import scrypted_arlo_go
|
||||
|
||||
import scrypted_sdk
|
||||
from scrypted_sdk.types import Setting, Settings, Camera, VideoCamera, VideoClips, VideoClip, VideoClipOptions, MotionSensor, Battery, MediaObject, ResponsePictureOptions, ResponseMediaStreamOptions, ScryptedMimeTypes, ScryptedInterface, ScryptedDeviceType
|
||||
from scrypted_sdk.types import Setting, Settings, Device, Camera, VideoCamera, VideoClips, VideoClip, VideoClipOptions, MotionSensor, AudioSensor, Battery, DeviceProvider, MediaObject, ResponsePictureOptions, ResponseMediaStreamOptions, ScryptedMimeTypes, ScryptedInterface, ScryptedDeviceType
|
||||
|
||||
from .base import ArloDeviceBase
|
||||
from .spotlight import ArloSpotlight, ArloFloodlight
|
||||
from .vss import ArloSirenVirtualSecuritySystem
|
||||
from .child_process import HeartbeatChildProcess
|
||||
from .util import BackgroundTaskMixin
|
||||
from .util import BackgroundTaskMixin, async_print_exception_guard
|
||||
|
||||
if TYPE_CHECKING:
|
||||
# https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/
|
||||
from .provider import ArloProvider
|
||||
|
||||
|
||||
class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, VideoClips, MotionSensor, Battery):
|
||||
class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, DeviceProvider, VideoClips, MotionSensor, AudioSensor, Battery):
|
||||
MODELS_WITH_SPOTLIGHTS = [
|
||||
"vmc4040p",
|
||||
"vmc2030",
|
||||
"vmc2032",
|
||||
"vmc4041p",
|
||||
"vmc4050p",
|
||||
"vmc5040",
|
||||
"vml2030",
|
||||
"vml4030",
|
||||
]
|
||||
|
||||
MODELS_WITH_FLOODLIGHTS = ["fb1001"]
|
||||
|
||||
MODELS_WITH_SIRENS = [
|
||||
"vmc4040p",
|
||||
"fb1001",
|
||||
"vmc2030",
|
||||
"vmc2020",
|
||||
"vmc2032",
|
||||
"vmc4041p",
|
||||
"vmc4050p",
|
||||
"vmc5040",
|
||||
"vml2030",
|
||||
"vmc4030",
|
||||
"vml4030",
|
||||
"vmc4030p",
|
||||
]
|
||||
|
||||
MODELS_WITH_AUDIO_SENSORS = [
|
||||
"vmc4040p",
|
||||
"fb1001",
|
||||
"vmc4041p",
|
||||
"vmc4050p",
|
||||
"vmc5040",
|
||||
"vmc3040",
|
||||
"vmc3040s",
|
||||
"vmc4030",
|
||||
"vml4030",
|
||||
"vmc4030p",
|
||||
]
|
||||
|
||||
MODELS_WITHOUT_BATTERY = [
|
||||
"avd1001",
|
||||
"vmc3040",
|
||||
"vmc3040s",
|
||||
]
|
||||
|
||||
timeout: int = 30
|
||||
intercom_session = None
|
||||
light: ArloSpotlight = None
|
||||
vss: ArloSirenVirtualSecuritySystem = None
|
||||
|
||||
def __init__(self, nativeId: str, arlo_device: dict, arlo_basestation: dict, provider: ArloProvider) -> None:
|
||||
super().__init__(nativeId=nativeId, arlo_device=arlo_device, arlo_basestation=arlo_basestation, provider=provider)
|
||||
self.start_motion_subscription()
|
||||
self.start_audio_subscription()
|
||||
self.start_battery_subscription()
|
||||
|
||||
def start_motion_subscription(self) -> None:
|
||||
@@ -39,7 +91,22 @@ class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, VideoClips, Moti
|
||||
self.provider.arlo.SubscribeToMotionEvents(self.arlo_basestation, self.arlo_device, callback)
|
||||
)
|
||||
|
||||
def start_audio_subscription(self) -> None:
|
||||
if not self.has_audio_sensor:
|
||||
return
|
||||
|
||||
def callback(audioDetected):
|
||||
self.audioDetected = audioDetected
|
||||
return self.stop_subscriptions
|
||||
|
||||
self.register_task(
|
||||
self.provider.arlo.SubscribeToAudioEvents(self.arlo_basestation, self.arlo_device, callback)
|
||||
)
|
||||
|
||||
def start_battery_subscription(self) -> None:
|
||||
if self.wired_to_power:
|
||||
return
|
||||
|
||||
def callback(batteryLevel):
|
||||
self.batteryLevel = batteryLevel
|
||||
return self.stop_subscriptions
|
||||
@@ -53,9 +120,7 @@ class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, VideoClips, Moti
|
||||
ScryptedInterface.VideoCamera.value,
|
||||
ScryptedInterface.Camera.value,
|
||||
ScryptedInterface.MotionSensor.value,
|
||||
ScryptedInterface.Battery.value,
|
||||
ScryptedInterface.Settings.value,
|
||||
ScryptedInterface.VideoClips.value,
|
||||
])
|
||||
|
||||
if self.two_way_audio:
|
||||
@@ -66,6 +131,21 @@ class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, VideoClips, Moti
|
||||
results.add(ScryptedInterface.RTCSignalingChannel.value)
|
||||
results.discard(ScryptedInterface.Intercom.value)
|
||||
|
||||
if self.has_battery:
|
||||
results.add(ScryptedInterface.Battery.value)
|
||||
|
||||
if self.wired_to_power:
|
||||
results.discard(ScryptedInterface.Battery.value)
|
||||
|
||||
if self.has_siren or self.has_spotlight or self.has_floodlight:
|
||||
results.add(ScryptedInterface.DeviceProvider.value)
|
||||
|
||||
if self.has_audio_sensor:
|
||||
results.add(ScryptedInterface.AudioSensor.value)
|
||||
|
||||
if self.has_cloud_recording:
|
||||
results.add(ScryptedInterface.VideoClips.value)
|
||||
|
||||
if not self._can_push_to_talk():
|
||||
results.discard(ScryptedInterface.RTCSignalingChannel.value)
|
||||
results.discard(ScryptedInterface.Intercom.value)
|
||||
@@ -75,6 +155,42 @@ class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, VideoClips, Moti
|
||||
def get_device_type(self) -> str:
|
||||
return ScryptedDeviceType.Camera.value
|
||||
|
||||
def get_builtin_child_device_manifests(self) -> List[Device]:
|
||||
results = []
|
||||
if self.has_spotlight or self.has_floodlight:
|
||||
light = self.get_or_create_spotlight_or_floodlight()
|
||||
results.append({
|
||||
"info": {
|
||||
"model": f"{self.arlo_device['modelId']} {self.arlo_device['properties'].get('hwVersion', '')}".strip(),
|
||||
"manufacturer": "Arlo",
|
||||
"firmware": self.arlo_device.get("firmwareVersion"),
|
||||
"serialNumber": self.arlo_device["deviceId"],
|
||||
},
|
||||
"nativeId": light.nativeId,
|
||||
"name": f'{self.arlo_device["deviceName"]} {"Spotlight" if self.has_spotlight else "Floodlight"}',
|
||||
"interfaces": light.get_applicable_interfaces(),
|
||||
"type": light.get_device_type(),
|
||||
"providerNativeId": self.nativeId,
|
||||
})
|
||||
if self.has_siren:
|
||||
vss = self.get_or_create_vss()
|
||||
results.extend([
|
||||
{
|
||||
"info": {
|
||||
"model": f"{self.arlo_device['modelId']} {self.arlo_device['properties'].get('hwVersion', '')}".strip(),
|
||||
"manufacturer": "Arlo",
|
||||
"firmware": self.arlo_device.get("firmwareVersion"),
|
||||
"serialNumber": self.arlo_device["deviceId"],
|
||||
},
|
||||
"nativeId": vss.nativeId,
|
||||
"name": f'{self.arlo_device["deviceName"]} Siren Virtual Security System',
|
||||
"interfaces": vss.get_applicable_interfaces(),
|
||||
"type": vss.get_device_type(),
|
||||
"providerNativeId": self.nativeId,
|
||||
},
|
||||
] + vss.get_builtin_child_device_manifests())
|
||||
return results
|
||||
|
||||
@property
|
||||
def webrtc_emulation(self) -> bool:
|
||||
if self.storage:
|
||||
@@ -92,9 +208,53 @@ class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, VideoClips, Moti
|
||||
else:
|
||||
return True
|
||||
|
||||
@property
|
||||
def wired_to_power(self) -> bool:
|
||||
if self.storage:
|
||||
return True if self.storage.getItem("wired_to_power") else False
|
||||
else:
|
||||
return False
|
||||
|
||||
@property
|
||||
def has_cloud_recording(self) -> bool:
|
||||
return self.provider.arlo.GetSmartFeatures(self.arlo_device).get("planFeatures", {}).get("eventRecording", False)
|
||||
|
||||
@property
|
||||
def has_spotlight(self) -> bool:
|
||||
return any([self.arlo_device["modelId"].lower().startswith(model) for model in ArloCamera.MODELS_WITH_SPOTLIGHTS])
|
||||
|
||||
@property
|
||||
def has_floodlight(self) -> bool:
|
||||
return any([self.arlo_device["modelId"].lower().startswith(model) for model in ArloCamera.MODELS_WITH_FLOODLIGHTS])
|
||||
|
||||
@property
|
||||
def has_siren(self) -> bool:
|
||||
return any([self.arlo_device["modelId"].lower().startswith(model) for model in ArloCamera.MODELS_WITH_SIRENS])
|
||||
|
||||
@property
|
||||
def has_audio_sensor(self) -> bool:
|
||||
return any([self.arlo_device["modelId"].lower().startswith(model) for model in ArloCamera.MODELS_WITH_AUDIO_SENSORS])
|
||||
|
||||
@property
|
||||
def has_battery(self) -> bool:
|
||||
return not any([self.arlo_device["modelId"].lower().startswith(model) for model in ArloCamera.MODELS_WITHOUT_BATTERY])
|
||||
|
||||
async def getSettings(self) -> List[Setting]:
|
||||
result = []
|
||||
if self.has_battery:
|
||||
result.append(
|
||||
{
|
||||
"key": "wired_to_power",
|
||||
"title": "Plugged In to External Power",
|
||||
"value": self.wired_to_power,
|
||||
"description": "Informs Scrypted that this device is plugged in to an external power source. " + \
|
||||
"Will allow features like persistent prebuffer to work, however will no longer report this device's battery percentage. " + \
|
||||
"Note that a persistent prebuffer may cause excess battery drain if the external power is not able to charge faster than the battery consumption rate.",
|
||||
"type": "boolean",
|
||||
},
|
||||
)
|
||||
if self._can_push_to_talk():
|
||||
return [
|
||||
result.extend([
|
||||
{
|
||||
"key": "two_way_audio",
|
||||
"title": "(Experimental) Enable native two-way audio",
|
||||
@@ -110,17 +270,18 @@ class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, VideoClips, Moti
|
||||
"If enabled, takes precedence over native two-way audio. May use increased system resources.",
|
||||
"type": "boolean",
|
||||
},
|
||||
]
|
||||
return []
|
||||
])
|
||||
return result
|
||||
|
||||
async def putSetting(self, key, value) -> None:
|
||||
if key in ["webrtc_emulation", "two_way_audio"]:
|
||||
if key in ["webrtc_emulation", "two_way_audio", "wired_to_power"]:
|
||||
self.storage.setItem(key, value == "true")
|
||||
await self.provider.discoverDevices()
|
||||
await self.provider.discover_devices()
|
||||
|
||||
async def getPictureOptions(self) -> List[ResponsePictureOptions]:
|
||||
return []
|
||||
|
||||
@async_print_exception_guard
|
||||
async def takePicture(self, options: dict = None) -> MediaObject:
|
||||
self.logger.info("Taking picture")
|
||||
|
||||
@@ -128,7 +289,11 @@ class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, VideoClips, Moti
|
||||
msos = await real_device.getVideoStreamOptions()
|
||||
if any(["prebuffer" in m for m in msos]):
|
||||
self.logger.info("Getting snapshot from prebuffer")
|
||||
return await real_device.getVideoStream({"refresh": False})
|
||||
try:
|
||||
return await real_device.getVideoStream({"refresh": False})
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Could not fetch from prebuffer due to: {e}")
|
||||
self.logger.warning("Will try to fetch snapshot from Arlo cloud")
|
||||
|
||||
pic_url = await asyncio.wait_for(self.provider.arlo.TriggerFullFrameSnapshot(self.arlo_basestation, self.arlo_device), timeout=self.timeout)
|
||||
self.logger.debug(f"Got snapshot URL for at {pic_url}")
|
||||
@@ -180,32 +345,30 @@ class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, VideoClips, Moti
|
||||
}
|
||||
return await scrypted_sdk.mediaManager.createFFmpegMediaObject(ffmpeg_input)
|
||||
|
||||
@async_print_exception_guard
|
||||
async def startRTCSignalingSession(self, scrypted_session):
|
||||
try:
|
||||
plugin_session = ArloCameraRTCSignalingSession(self)
|
||||
await plugin_session.initialize()
|
||||
plugin_session = ArloCameraRTCSignalingSession(self)
|
||||
await plugin_session.initialize()
|
||||
|
||||
scrypted_setup = {
|
||||
"type": "offer",
|
||||
"audio": {
|
||||
"direction": "sendrecv" if self._can_push_to_talk() else "recvonly",
|
||||
},
|
||||
"video": {
|
||||
"direction": "recvonly",
|
||||
}
|
||||
scrypted_setup = {
|
||||
"type": "offer",
|
||||
"audio": {
|
||||
"direction": "sendrecv" if self._can_push_to_talk() else "recvonly",
|
||||
},
|
||||
"video": {
|
||||
"direction": "recvonly",
|
||||
}
|
||||
plugin_setup = {}
|
||||
}
|
||||
plugin_setup = {}
|
||||
|
||||
scrypted_offer = await scrypted_session.createLocalDescription("offer", scrypted_setup, sendIceCandidate=plugin_session.addIceCandidate)
|
||||
self.logger.info(f"Scrypted offer sdp:\n{scrypted_offer['sdp']}")
|
||||
await plugin_session.setRemoteDescription(scrypted_offer, plugin_setup)
|
||||
plugin_answer = await plugin_session.createLocalDescription("answer", plugin_setup, scrypted_session.sendIceCandidate)
|
||||
self.logger.info(f"Scrypted answer sdp:\n{plugin_answer['sdp']}")
|
||||
await scrypted_session.setRemoteDescription(plugin_answer, scrypted_setup)
|
||||
scrypted_offer = await scrypted_session.createLocalDescription("offer", scrypted_setup, sendIceCandidate=plugin_session.addIceCandidate)
|
||||
self.logger.info(f"Scrypted offer sdp:\n{scrypted_offer['sdp']}")
|
||||
await plugin_session.setRemoteDescription(scrypted_offer, plugin_setup)
|
||||
plugin_answer = await plugin_session.createLocalDescription("answer", plugin_setup, scrypted_session.sendIceCandidate)
|
||||
self.logger.info(f"Scrypted answer sdp:\n{plugin_answer['sdp']}")
|
||||
await scrypted_session.setRemoteDescription(plugin_answer, scrypted_setup)
|
||||
|
||||
return ArloCameraRTCSessionControl(plugin_session)
|
||||
except Exception as e:
|
||||
self.logger.error(e)
|
||||
return ArloCameraRTCSessionControl(plugin_session)
|
||||
|
||||
async def startIntercom(self, media) -> None:
|
||||
self.logger.info("Starting intercom")
|
||||
@@ -281,11 +444,36 @@ class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, VideoClips, Moti
|
||||
clips.reverse()
|
||||
return clips
|
||||
|
||||
@async_print_exception_guard
|
||||
async def removeVideoClips(self, videoClipIds: List[str]) -> None:
|
||||
# Arlo does support deleting, but let's be safe and disable that
|
||||
self.logger.error("deleting Arlo video clips is not implemented by this plugin")
|
||||
raise Exception("deleting Arlo video clips is not implemented by this plugin")
|
||||
|
||||
async def getDevice(self, nativeId: str) -> ArloDeviceBase:
|
||||
if (nativeId.endswith("spotlight") and self.has_spotlight) or (nativeId.endswith("floodlight") and self.has_floodlight):
|
||||
return self.get_or_create_spotlight_or_floodlight()
|
||||
if nativeId.endswith("vss") and self.has_siren:
|
||||
return self.get_or_create_vss()
|
||||
return None
|
||||
|
||||
def get_or_create_spotlight_or_floodlight(self) -> ArloSpotlight:
|
||||
if self.has_spotlight:
|
||||
light_id = f'{self.arlo_device["deviceId"]}.spotlight'
|
||||
if not self.light:
|
||||
self.light = ArloSpotlight(light_id, self.arlo_device, self.arlo_basestation, self.provider, self)
|
||||
elif self.has_floodlight:
|
||||
light_id = f'{self.arlo_device["deviceId"]}.floodlight'
|
||||
if not self.light:
|
||||
self.light = ArloFloodlight(light_id, self.arlo_device, self.arlo_basestation, self.provider, self)
|
||||
return self.light
|
||||
|
||||
def get_or_create_vss(self) -> ArloSirenVirtualSecuritySystem:
|
||||
if self.has_siren:
|
||||
vss_id = f'{self.arlo_device["deviceId"]}.vss'
|
||||
if not self.vss:
|
||||
self.vss = ArloSirenVirtualSecuritySystem(vss_id, self.arlo_device, self.arlo_basestation, self.provider, self)
|
||||
return self.vss
|
||||
|
||||
|
||||
class ArloCameraRTCSignalingSession(BackgroundTaskMixin):
|
||||
def __init__(self, camera):
|
||||
|
||||
@@ -31,8 +31,4 @@ class ArloDoorbell(ArloCamera, BinarySensor):
|
||||
def get_applicable_interfaces(self) -> List[str]:
|
||||
camera_interfaces = super().get_applicable_interfaces()
|
||||
camera_interfaces.append(ScryptedInterface.BinarySensor.value)
|
||||
|
||||
model_id = self.arlo_device['modelId'].lower()
|
||||
if model_id.startswith("avd1001"):
|
||||
camera_interfaces.remove(ScryptedInterface.Battery.value)
|
||||
return camera_interfaces
|
||||
|
||||
@@ -10,26 +10,27 @@ from typing import List
|
||||
|
||||
import scrypted_sdk
|
||||
from scrypted_sdk import ScryptedDeviceBase
|
||||
from scrypted_sdk.types import Setting, SettingValue, Settings, DeviceProvider, DeviceDiscovery, ScryptedInterface
|
||||
from scrypted_sdk.types import Setting, SettingValue, Settings, DeviceProvider, ScryptedInterface
|
||||
|
||||
from .arlo import Arlo
|
||||
from .arlo.arlo_async import change_stream_class
|
||||
from .arlo.logging import logger as arlo_lib_logger
|
||||
from .logging import ScryptedDeviceLoggerMixin
|
||||
from .util import BackgroundTaskMixin
|
||||
from .util import BackgroundTaskMixin, async_print_exception_guard
|
||||
from .camera import ArloCamera
|
||||
from .doorbell import ArloDoorbell
|
||||
from .basestation import ArloBasestation
|
||||
from .base import ArloDeviceBase
|
||||
|
||||
|
||||
class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery, ScryptedDeviceLoggerMixin, BackgroundTaskMixin):
|
||||
class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, ScryptedDeviceLoggerMixin, BackgroundTaskMixin):
|
||||
arlo_cameras = None
|
||||
arlo_basestations = None
|
||||
_arlo_mfa_code = None
|
||||
scrypted_devices = None
|
||||
_arlo = None
|
||||
_arlo_mfa_complete_auth = None
|
||||
device_discovery_lock: asyncio.Lock = None
|
||||
|
||||
plugin_verbosity_choices = {
|
||||
"Normal": logging.INFO,
|
||||
@@ -50,6 +51,7 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
|
||||
self.imap = None
|
||||
self.imap_signal = None
|
||||
self.imap_skip_emails = None
|
||||
self.device_discovery_lock = asyncio.Lock()
|
||||
|
||||
self.propagate_verbosity()
|
||||
self.propagate_transport()
|
||||
@@ -188,14 +190,11 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
|
||||
|
||||
async def do_arlo_setup(self) -> None:
|
||||
try:
|
||||
await self.discoverDevices()
|
||||
await self.discover_devices()
|
||||
await self.arlo.Subscribe([
|
||||
(self.arlo_basestations[camera["parentId"]], camera) for camera in self.arlo_cameras.values()
|
||||
])
|
||||
|
||||
for nativeId in self.arlo_cameras.keys():
|
||||
await self.getDevice(nativeId)
|
||||
|
||||
self.arlo.event_stream.set_refresh_interval(self.refresh_interval)
|
||||
except requests.exceptions.HTTPError as e:
|
||||
traceback.print_exc()
|
||||
@@ -472,11 +471,10 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
|
||||
{
|
||||
"group": "General",
|
||||
"key": "plugin_verbosity",
|
||||
"title": "Plugin Verbosity",
|
||||
"description": "Select the verbosity of this plugin. 'Verbose' will show debugging messages, "
|
||||
"including events received from connected Arlo cameras.",
|
||||
"value": self.plugin_verbosity,
|
||||
"choices": sorted(self.plugin_verbosity_choices.keys()),
|
||||
"title": "Verbose Logging",
|
||||
"description": "Enable this option to show debug messages, including events received from connected Arlo cameras.",
|
||||
"value": self.plugin_verbosity == "Verbose",
|
||||
"type": "boolean",
|
||||
},
|
||||
])
|
||||
|
||||
@@ -493,13 +491,14 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
|
||||
elif key == "force_reauth":
|
||||
# force arlo client to be invalidated and reloaded
|
||||
self.invalidate_arlo_client()
|
||||
elif key == "plugin_verbosity":
|
||||
self.storage.setItem(key, "Verbose" if value == "true" else "Normal")
|
||||
self.propagate_verbosity()
|
||||
skip_arlo_client = True
|
||||
else:
|
||||
self.storage.setItem(key, value)
|
||||
|
||||
if key == "plugin_verbosity":
|
||||
self.propagate_verbosity()
|
||||
skip_arlo_client = True
|
||||
elif key == "arlo_transport":
|
||||
if key == "arlo_transport":
|
||||
self.propagate_transport()
|
||||
# force arlo client to be invalidated and reloaded, but
|
||||
# keep any mfa codes
|
||||
@@ -558,7 +557,12 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
|
||||
return False
|
||||
return True
|
||||
|
||||
async def discoverDevices(self, duration: int = 0) -> None:
|
||||
@async_print_exception_guard
|
||||
async def discover_devices(self) -> None:
|
||||
async with self.device_discovery_lock:
|
||||
return await self.discover_devices_impl()
|
||||
|
||||
async def discover_devices_impl(self) -> None:
|
||||
if not self.arlo:
|
||||
raise Exception("Arlo client not connected, cannot discover devices")
|
||||
|
||||
@@ -573,16 +577,17 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
|
||||
basestations = self.arlo.GetDevices(['basestation', 'siren'])
|
||||
for basestation in basestations:
|
||||
nativeId = basestation["deviceId"]
|
||||
self.logger.debug(f"Adding {nativeId}")
|
||||
|
||||
if nativeId in self.arlo_basestations:
|
||||
self.logger.info(f"Skipping basestation {nativeId} ({basestation['modelId']}) as it has already been added")
|
||||
continue
|
||||
self.arlo_basestations[nativeId] = basestation
|
||||
|
||||
device = await self.getDevice(nativeId)
|
||||
device = await self.getDevice_impl(nativeId)
|
||||
scrypted_interfaces = device.get_applicable_interfaces()
|
||||
manifest = device.get_device_manifest()
|
||||
self.logger.info(f"Interfaces for {nativeId} ({basestation['modelId']}): {scrypted_interfaces}")
|
||||
self.logger.debug(f"Interfaces for {nativeId} ({basestation['modelId']}): {scrypted_interfaces}")
|
||||
|
||||
# for basestations, we want to add them to the top level DeviceProvider
|
||||
provider_to_device_map.setdefault(None, []).append(manifest)
|
||||
@@ -601,11 +606,13 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
|
||||
|
||||
cameras = self.arlo.GetDevices(['camera', "arloq", "arloqs", "doorbell"])
|
||||
for camera in cameras:
|
||||
nativeId = camera["deviceId"]
|
||||
self.logger.debug(f"Adding {nativeId}")
|
||||
|
||||
if camera["deviceId"] != camera["parentId"] and camera["parentId"] not in self.arlo_basestations:
|
||||
self.logger.info(f"Skipping camera {camera['deviceId']} ({camera['modelId']}) because its basestation was not found")
|
||||
continue
|
||||
|
||||
nativeId = camera["deviceId"]
|
||||
if nativeId in self.arlo_cameras:
|
||||
self.logger.info(f"Skipping camera {nativeId} ({camera['modelId']}) as it has already been added")
|
||||
continue
|
||||
@@ -616,10 +623,10 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
|
||||
# own basestation
|
||||
self.arlo_basestations[camera["deviceId"]] = camera
|
||||
|
||||
device: ArloDeviceBase = await self.getDevice(nativeId)
|
||||
device = await self.getDevice_impl(nativeId)
|
||||
scrypted_interfaces = device.get_applicable_interfaces()
|
||||
manifest = device.get_device_manifest()
|
||||
self.logger.info(f"Interfaces for {nativeId} ({camera['modelId']}): {scrypted_interfaces}")
|
||||
self.logger.debug(f"Interfaces for {nativeId} ({camera['modelId']}): {scrypted_interfaces}")
|
||||
|
||||
if camera["deviceId"] == camera["parentId"]:
|
||||
provider_to_device_map.setdefault(None, []).append(manifest)
|
||||
@@ -656,6 +663,10 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
|
||||
})
|
||||
|
||||
async def getDevice(self, nativeId: str) -> ArloDeviceBase:
|
||||
async with self.device_discovery_lock:
|
||||
return await self.getDevice_impl(nativeId)
|
||||
|
||||
async def getDevice_impl(self, nativeId: str) -> ArloDeviceBase:
|
||||
ret = self.scrypted_devices.get(nativeId, None)
|
||||
if ret is None:
|
||||
ret = self.create_device(nativeId)
|
||||
|
||||
@@ -5,6 +5,7 @@ from typing import List, TYPE_CHECKING
|
||||
from scrypted_sdk.types import OnOff, SecuritySystemMode, ScryptedInterface, ScryptedDeviceType
|
||||
|
||||
from .base import ArloDeviceBase
|
||||
from .util import async_print_exception_guard
|
||||
|
||||
if TYPE_CHECKING:
|
||||
# https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/
|
||||
@@ -25,8 +26,9 @@ class ArloSiren(ArloDeviceBase, OnOff):
|
||||
def get_device_type(self) -> str:
|
||||
return ScryptedDeviceType.Siren.value
|
||||
|
||||
@ArloDeviceBase.async_print_exception_guard
|
||||
@async_print_exception_guard
|
||||
async def turnOn(self) -> None:
|
||||
from .basestation import ArloBasestation
|
||||
self.logger.info("Turning on")
|
||||
|
||||
if self.vss.securitySystemState["mode"] == SecuritySystemMode.Disarmed.value:
|
||||
@@ -42,7 +44,12 @@ class ArloSiren(ArloDeviceBase, OnOff):
|
||||
}
|
||||
return
|
||||
|
||||
self.provider.arlo.SirenOn(self.arlo_device)
|
||||
if isinstance(self.vss.parent, ArloBasestation):
|
||||
self.logger.debug("Parent device is a basestation")
|
||||
self.provider.arlo.SirenOn(self.arlo_basestation)
|
||||
else:
|
||||
self.logger.debug("Parent device is a camera")
|
||||
self.provider.arlo.SirenOn(self.arlo_basestation, self.arlo_device)
|
||||
|
||||
self.on = True
|
||||
self.vss.securitySystemState = {
|
||||
@@ -50,10 +57,14 @@ class ArloSiren(ArloDeviceBase, OnOff):
|
||||
"triggered": True,
|
||||
}
|
||||
|
||||
@ArloDeviceBase.async_print_exception_guard
|
||||
@async_print_exception_guard
|
||||
async def turnOff(self) -> None:
|
||||
from .basestation import ArloBasestation
|
||||
self.logger.info("Turning off")
|
||||
self.provider.arlo.SirenOff(self.arlo_device)
|
||||
if isinstance(self.vss.parent, ArloBasestation):
|
||||
self.provider.arlo.SirenOff(self.arlo_basestation)
|
||||
else:
|
||||
self.provider.arlo.SirenOff(self.arlo_basestation, self.arlo_device)
|
||||
self.on = False
|
||||
self.vss.securitySystemState = {
|
||||
**self.vss.securitySystemState,
|
||||
|
||||
54
plugins/arlo/src/arlo_plugin/spotlight.py
Normal file
54
plugins/arlo/src/arlo_plugin/spotlight.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, TYPE_CHECKING
|
||||
|
||||
from scrypted_sdk.types import OnOff, ScryptedInterface, ScryptedDeviceType
|
||||
|
||||
from .base import ArloDeviceBase
|
||||
from .util import async_print_exception_guard
|
||||
|
||||
if TYPE_CHECKING:
|
||||
# https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/
|
||||
from .provider import ArloProvider
|
||||
from .camera import ArloCamera
|
||||
|
||||
|
||||
class ArloSpotlight(ArloDeviceBase, OnOff):
|
||||
camera: ArloCamera = None
|
||||
|
||||
def __init__(self, nativeId: str, arlo_device: dict, arlo_basestation: dict, provider: ArloProvider, camera: ArloCamera) -> None:
|
||||
super().__init__(nativeId=nativeId, arlo_device=arlo_device, arlo_basestation=arlo_basestation, provider=provider)
|
||||
self.camera = camera
|
||||
|
||||
def get_applicable_interfaces(self) -> List[str]:
|
||||
return [ScryptedInterface.OnOff.value]
|
||||
|
||||
def get_device_type(self) -> str:
|
||||
return ScryptedDeviceType.Light.value
|
||||
|
||||
@async_print_exception_guard
|
||||
async def turnOn(self) -> None:
|
||||
self.logger.info("Turning on")
|
||||
self.provider.arlo.SpotlightOn(self.arlo_basestation, self.arlo_device)
|
||||
self.on = True
|
||||
|
||||
@async_print_exception_guard
|
||||
async def turnOff(self) -> None:
|
||||
self.logger.info("Turning off")
|
||||
self.provider.arlo.SpotlightOff(self.arlo_basestation, self.arlo_device)
|
||||
self.on = False
|
||||
|
||||
|
||||
class ArloFloodlight(ArloSpotlight):
|
||||
|
||||
@async_print_exception_guard
|
||||
async def turnOn(self) -> None:
|
||||
self.logger.info("Turning on")
|
||||
self.provider.arlo.FloodlightOn(self.arlo_basestation, self.arlo_device)
|
||||
self.on = True
|
||||
|
||||
@async_print_exception_guard
|
||||
async def turnOff(self) -> None:
|
||||
self.logger.info("Turning off")
|
||||
self.provider.arlo.FloodlightOff(self.arlo_basestation, self.arlo_device)
|
||||
self.on = False
|
||||
@@ -1,4 +1,5 @@
|
||||
import asyncio
|
||||
import traceback
|
||||
|
||||
|
||||
class BackgroundTaskMixin:
|
||||
@@ -25,4 +26,14 @@ class BackgroundTaskMixin:
|
||||
if not hasattr(self, "background_tasks"):
|
||||
return
|
||||
for task in self.background_tasks:
|
||||
task.cancel()
|
||||
task.cancel()
|
||||
|
||||
def async_print_exception_guard(fn):
|
||||
"""Decorator to print an exception's stack trace before re-raising the exception."""
|
||||
async def wrapped(*args, **kwargs):
|
||||
try:
|
||||
return await fn(*args, **kwargs)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
raise
|
||||
return wrapped
|
||||
@@ -7,21 +7,26 @@ from scrypted_sdk.types import Device, DeviceProvider, Setting, Settings, Settin
|
||||
|
||||
from .base import ArloDeviceBase
|
||||
from .siren import ArloSiren
|
||||
from .util import async_print_exception_guard
|
||||
|
||||
if TYPE_CHECKING:
|
||||
# https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/
|
||||
from .provider import ArloProvider
|
||||
from .basestation import ArloBasestation
|
||||
from .camera import ArloCamera
|
||||
|
||||
|
||||
class ArloSirenVirtualSecuritySystem(ArloDeviceBase, SecuritySystem, DeviceProvider):
|
||||
class ArloSirenVirtualSecuritySystem(ArloDeviceBase, SecuritySystem, Settings, Readme, DeviceProvider):
|
||||
"""A virtual, emulated security system that controls when scrypted events can trip the real physical siren."""
|
||||
|
||||
SUPPORTED_MODES = [SecuritySystemMode.AwayArmed.value, SecuritySystemMode.HomeArmed.value, SecuritySystemMode.Disarmed.value]
|
||||
|
||||
siren: ArloSiren = None
|
||||
parent: ArloBasestation | ArloCamera = None
|
||||
|
||||
def __init__(self, nativeId: str, arlo_device: dict, arlo_basestation: dict, provider: ArloProvider) -> None:
|
||||
def __init__(self, nativeId: str, arlo_device: dict, arlo_basestation: dict, provider: ArloProvider, parent: ArloBasestation | ArloCamera) -> None:
|
||||
super().__init__(nativeId=nativeId, arlo_device=arlo_device, arlo_basestation=arlo_basestation, provider=provider)
|
||||
self.parent = parent
|
||||
self.create_task(self.delayed_init())
|
||||
|
||||
@property
|
||||
@@ -56,7 +61,7 @@ class ArloSirenVirtualSecuritySystem(ArloDeviceBase, SecuritySystem, DeviceProvi
|
||||
}
|
||||
return
|
||||
except Exception as e:
|
||||
self.logger.info(f"Delayed init failed, will try again: {e}")
|
||||
self.logger.debug(f"Delayed init failed, will try again: {e}")
|
||||
await asyncio.sleep(0.1)
|
||||
iterations += 1
|
||||
|
||||
@@ -129,6 +134,7 @@ If this virtual security system is synced to Homekit, the siren device will be m
|
||||
self.siren = ArloSiren(siren_id, self.arlo_device, self.arlo_basestation, self.provider, self)
|
||||
return self.siren
|
||||
|
||||
@async_print_exception_guard
|
||||
async def armSecuritySystem(self, mode: SecuritySystemMode) -> None:
|
||||
self.logger.info(f"Arming {mode}")
|
||||
self.mode = mode
|
||||
@@ -139,7 +145,7 @@ If this virtual security system is synced to Homekit, the siren device will be m
|
||||
if mode == SecuritySystemMode.Disarmed.value:
|
||||
await self.get_or_create_siren().turnOff()
|
||||
|
||||
@ArloDeviceBase.async_print_exception_guard
|
||||
@async_print_exception_guard
|
||||
async def disarmSecuritySystem(self) -> None:
|
||||
self.logger.info(f"Disarming")
|
||||
self.mode = SecuritySystemMode.Disarmed.value
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
paho-mqtt==1.6.1
|
||||
sseclient==0.0.22
|
||||
requests
|
||||
cachetools
|
||||
requests==2.28.2
|
||||
cachetools==5.3.0
|
||||
scrypted-arlo-go==0.0.1
|
||||
--extra-index-url=https://www.piwheels.org/simple/
|
||||
--extra-index-url=https://bjia56.github.io/scrypted-arlo-go/
|
||||
|
||||
4
plugins/core/package-lock.json
generated
4
plugins/core/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/core",
|
||||
"version": "0.1.103",
|
||||
"version": "0.1.108",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/core",
|
||||
"version": "0.1.103",
|
||||
"version": "0.1.108",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/core",
|
||||
"version": "0.1.103",
|
||||
"version": "0.1.108",
|
||||
"description": "Scrypted Core plugin. Provides the UI, websocket, and engine.io APIs.",
|
||||
"author": "Scrypted",
|
||||
"license": "Apache-2.0",
|
||||
|
||||
@@ -92,6 +92,17 @@ class ScryptedCore extends ScryptedDeviceBase implements HttpRequestHandler, Eng
|
||||
this.automationCore = new AutomationCore();
|
||||
})();
|
||||
|
||||
deviceManager.onDeviceDiscovered({
|
||||
name: 'Add to Launcher',
|
||||
nativeId: 'launcher',
|
||||
interfaces: [
|
||||
'@scrypted/launcher-ignore',
|
||||
ScryptedInterface.MixinProvider,
|
||||
ScryptedInterface.Readme,
|
||||
],
|
||||
type: ScryptedDeviceType.Builtin,
|
||||
});
|
||||
|
||||
(async () => {
|
||||
await deviceManager.onDeviceDiscovered(
|
||||
{
|
||||
|
||||
@@ -23,6 +23,15 @@ export class User extends ScryptedDeviceBase implements Settings, ScryptedUser {
|
||||
})
|
||||
|
||||
async getScryptedUserAccessControl(): Promise<ScryptedUserAccessControl> {
|
||||
const usersService = await sdk.systemManager.getComponent('users');
|
||||
const users: DBUser[] = await usersService.getAllUsers();
|
||||
const user = users.find(user => user.username === this.username);
|
||||
if (!user)
|
||||
throw new Error("user not found");
|
||||
|
||||
if (user.admin)
|
||||
return;
|
||||
|
||||
const self = sdk.deviceManager.getDeviceState(this.nativeId);
|
||||
|
||||
const ret: ScryptedUserAccessControl = {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { timeoutPromise } from "@scrypted/common/src/promise-utils";
|
||||
import { MixinProvider, ScryptedDevice, ScryptedDeviceType, ScryptedInterface, SystemManager } from "@scrypted/types";
|
||||
|
||||
export async function setMixin(systemManager: SystemManager, device: ScryptedDevice, mixinId: string, enabled: boolean) {
|
||||
@@ -14,19 +15,21 @@ export async function setMixin(systemManager: SystemManager, device: ScryptedDev
|
||||
plugins.setMixins(device.id, mixins);
|
||||
}
|
||||
|
||||
export function getAllDevices(systemManager: SystemManager) {
|
||||
return Object.keys(systemManager.getSystemState()).map(id => systemManager.getDeviceById(id)).filter(device => !!device);
|
||||
export function getAllDevices<T>(systemManager: SystemManager) {
|
||||
return Object.keys(systemManager.getSystemState()).map(id => systemManager.getDeviceById(id) as T & ScryptedDevice).filter(device => !!device);
|
||||
}
|
||||
|
||||
export async function getDeviceAvailableMixins(systemManager: SystemManager, device: ScryptedDevice): Promise<(ScryptedDevice & MixinProvider)[]> {
|
||||
const results = await Promise.all(getAllDevices(systemManager).map(async (check) => {
|
||||
const results = await Promise.all(getAllDevices<MixinProvider>(systemManager).map(async (check) => {
|
||||
try {
|
||||
if (check.interfaces.includes(ScryptedInterface.MixinProvider)) {
|
||||
if (await (check as any as MixinProvider).canMixin(device.type, device.interfaces))
|
||||
return check as MixinProvider & ScryptedDevice;
|
||||
const canMixin = await timeoutPromise(5000, check.canMixin(device.type, device.interfaces));
|
||||
if (canMixin)
|
||||
return check;
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
console.warn(check.name, 'canMixin error', e)
|
||||
}
|
||||
}));
|
||||
|
||||
@@ -47,7 +50,7 @@ export async function getMixinProviderAvailableDevices(systemManager: SystemMana
|
||||
devices.map(async (device) => {
|
||||
try {
|
||||
if (device.mixins?.includes(mixinProvider.id) || (await mixinProvider.canMixin(device.type, device.interfaces)))
|
||||
return device;
|
||||
return device;
|
||||
}
|
||||
catch (e) {
|
||||
}
|
||||
|
||||
@@ -215,6 +215,7 @@ import Notifier from "../interfaces/Notifier.vue";
|
||||
import OnOff from "../interfaces/OnOff.vue";
|
||||
import Brightness from "../interfaces/Brightness.vue";
|
||||
import Battery from "../interfaces/Battery.vue";
|
||||
import Charger from "../interfaces/Charger.vue";
|
||||
import Lock from "../interfaces/Lock.vue";
|
||||
import ColorSettingHsv from "../interfaces/ColorSettingHsv.vue";
|
||||
import ColorSettingRgb from "../interfaces/ColorSettingRgb.vue";
|
||||
@@ -263,6 +264,7 @@ const cardHeaderInterfaces = [
|
||||
ScryptedInterface.AudioSensor,
|
||||
ScryptedInterface.HumiditySensor,
|
||||
ScryptedInterface.Thermometer,
|
||||
ScryptedInterface.Charger,
|
||||
ScryptedInterface.Battery,
|
||||
ScryptedInterface.Lock,
|
||||
ScryptedInterface.OnOff,
|
||||
@@ -362,6 +364,7 @@ export default {
|
||||
|
||||
Lock,
|
||||
OnOff,
|
||||
Charger,
|
||||
Battery,
|
||||
Thermometer,
|
||||
HumiditySensor,
|
||||
|
||||
52
plugins/core/ui/src/interfaces/Charger.vue
Normal file
52
plugins/core/ui/src/interfaces/Charger.vue
Normal file
@@ -0,0 +1,52 @@
|
||||
<template>
|
||||
<v-tooltip left>
|
||||
<template v-slot:activator="{ on }">
|
||||
<v-icon
|
||||
v-on="on"
|
||||
v-if="lazyValue.chargeState === Charging"
|
||||
class="mr-1 mr-1"
|
||||
small
|
||||
>fa-plug</v-icon>
|
||||
<v-icon
|
||||
v-on="on"
|
||||
v-else-if="lazyValue.chargeState == Trickle"
|
||||
class="mr-1 mr-1"
|
||||
small
|
||||
>fa-plug-circle-minus</v-icon>
|
||||
<v-icon
|
||||
v-on="on"
|
||||
v-else
|
||||
class="mr-1 mr-1"
|
||||
small
|
||||
>fa-plug-circle-xmark</v-icon>
|
||||
</template>
|
||||
<span>{{ chargeText }}</span>
|
||||
</v-tooltip>
|
||||
</template>
|
||||
|
||||
<script>
|
||||
import { ChargeState } from '@scrypted/types';
|
||||
import RPCInterface from "./RPCInterface.vue";
|
||||
|
||||
export default {
|
||||
mixins: [RPCInterface],
|
||||
data() {
|
||||
return {
|
||||
Charging: ChargeState.Charging,
|
||||
Trickle: ChargeState.Trickle,
|
||||
NotCharging: ChargeState.NotCharging,
|
||||
};
|
||||
},
|
||||
computed: {
|
||||
chargeText() {
|
||||
if (this.lazyValue.chargeState === "trickle") {
|
||||
return "Trickle Charging";
|
||||
}
|
||||
if (this.lazyValue.chargeState === "charging") {
|
||||
return "Charging";
|
||||
}
|
||||
return "Not Charging";
|
||||
},
|
||||
},
|
||||
};
|
||||
</script>
|
||||
@@ -81,6 +81,7 @@ export default {
|
||||
const mediaManager = this.$scrypted.mediaManager;
|
||||
const mo = await mediaManager.createMediaObject(buffer, 'image/*');
|
||||
const detected = await this.rpc().detectObjects(mo);
|
||||
console.log(detected);
|
||||
this.lastDetection = detected;
|
||||
},
|
||||
allowDrop(ev) {
|
||||
|
||||
@@ -41,8 +41,7 @@
|
||||
</template>
|
||||
</DevicePicker>
|
||||
<DevicePicker v-else-if="lazyValue.type === 'interface'" v-model="lazyValue.value" :multiple="lazyValue.multiple"
|
||||
:readonly="lazyValue.readonly" :devices="interfaces" :title="lazyValue.title"
|
||||
:description="lazyValue.description">
|
||||
:readonly="lazyValue.readonly" :devices="interfaces" :title="lazyValue.title" :description="lazyValue.description">
|
||||
<template v-slot:append-outer>
|
||||
<v-btn v-if="dirty && device" color="success" @click="save" class="shift-up">
|
||||
<v-icon>send</v-icon>
|
||||
@@ -52,7 +51,7 @@
|
||||
<div v-else-if="lazyValue.type === 'clippath'" class="mb-2">
|
||||
<v-btn small block @click="editZone">{{ lazyValue.title }} </v-btn>
|
||||
<Camera :value="device" :device="device" :clipPathValue="sanitizedClipPathValue" :showDialog="editingZone"
|
||||
:hidePreview="true" @dialog="editingZoneChanged" @clipPath="lazyValue.value = $event"></Camera>
|
||||
:hidePreview="true" @dialog="editingZoneChanged" @clipPath="updateClipPath"></Camera>
|
||||
</div>
|
||||
<v-textarea v-else-if="lazyValue.type === 'textarea'" v-model="lazyValue.value" outlined persistent-hint
|
||||
:hint="lazyValue.description" :label="lazyValue.title">
|
||||
@@ -88,6 +87,7 @@ export default {
|
||||
data() {
|
||||
return {
|
||||
editingZone: false,
|
||||
clipPathThrottle: null,
|
||||
};
|
||||
},
|
||||
watch: {
|
||||
@@ -142,7 +142,7 @@ export default {
|
||||
);
|
||||
},
|
||||
set(val) {
|
||||
this.lazyValue.value = val.toString();
|
||||
this.lazyValue.value = !!val;
|
||||
},
|
||||
},
|
||||
dirty() {
|
||||
@@ -228,6 +228,17 @@ export default {
|
||||
},
|
||||
methods: {
|
||||
onChange() { },
|
||||
updateClipPath(e) {
|
||||
clearTimeout(this.clipPathThrottle);
|
||||
this.clipPathThrottle = setTimeout(() => {
|
||||
this.lazyValue.value = e;
|
||||
this.rpc().putSetting(
|
||||
this.lazyValue.key,
|
||||
this.createInputValue().value
|
||||
);
|
||||
this.onInput();
|
||||
}, 500)
|
||||
},
|
||||
editingZoneChanged(value) {
|
||||
this.editingZone = value;
|
||||
if (!value) {
|
||||
|
||||
@@ -58,6 +58,8 @@ import {
|
||||
faLightbulb,
|
||||
faToggleOn,
|
||||
faPlug,
|
||||
faPlugCircleMinus,
|
||||
faPlugCircleXmark,
|
||||
faExclamationTriangle,
|
||||
faSun,
|
||||
faCode,
|
||||
@@ -150,6 +152,8 @@ const icons: IconDefinition[] =[
|
||||
faLightbulb,
|
||||
faToggleOn,
|
||||
faPlug,
|
||||
faPlugCircleMinus,
|
||||
faPlugCircleXmark,
|
||||
faExclamationTriangle,
|
||||
faSun,
|
||||
faCode,
|
||||
|
||||
4
plugins/coreml/package-lock.json
generated
4
plugins/coreml/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/coreml",
|
||||
"version": "0.1.5",
|
||||
"version": "0.1.8",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/coreml",
|
||||
"version": "0.1.5",
|
||||
"version": "0.1.8",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
|
||||
@@ -41,5 +41,5 @@
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.1.5"
|
||||
"version": "0.1.8"
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ from PIL import Image
|
||||
import asyncio
|
||||
import concurrent.futures
|
||||
|
||||
predictExecutor = concurrent.futures.ThreadPoolExecutor(2, "CoreML-Predict")
|
||||
predictExecutor = concurrent.futures.ThreadPoolExecutor(8, "CoreML-Predict")
|
||||
|
||||
def parse_label_contents(contents: str):
|
||||
lines = contents.splitlines()
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../../tensorflow-lite/src/pipeline
|
||||
@@ -1,10 +1,5 @@
|
||||
# plugin
|
||||
Pillow>=5.4.1
|
||||
PyGObject>=3.30.4
|
||||
coremltools~=6.1
|
||||
av>=10.0.0; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
|
||||
coremltools
|
||||
|
||||
# sort_oh
|
||||
scipy
|
||||
filterpy
|
||||
numpy
|
||||
# pillow for anything not intel linux, pillow-simd is available on x64 linux
|
||||
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
|
||||
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
|
||||
|
||||
48
plugins/homekit/package-lock.json
generated
48
plugins/homekit/package-lock.json
generated
@@ -1,25 +1,25 @@
|
||||
{
|
||||
"name": "@scrypted/homekit",
|
||||
"version": "1.2.20",
|
||||
"version": "1.2.23",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/homekit",
|
||||
"version": "1.2.20",
|
||||
"version": "1.2.23",
|
||||
"dependencies": {
|
||||
"@koush/werift-src": "file:../../external/werift",
|
||||
"check-disk-space": "^3.3.1",
|
||||
"hap-nodejs": "^0.11.0",
|
||||
"lodash": "^4.17.21",
|
||||
"mkdirp": "^2.1.5"
|
||||
"mkdirp": "^2.1.6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
"@scrypted/sdk": "file:../../sdk",
|
||||
"@types/debug": "^4.1.7",
|
||||
"@types/lodash": "^4.14.191",
|
||||
"@types/node": "^18.15.5",
|
||||
"@types/lodash": "^4.14.192",
|
||||
"@types/node": "^18.15.11",
|
||||
"@types/url-parse": "^1.4.8"
|
||||
}
|
||||
},
|
||||
@@ -126,7 +126,7 @@
|
||||
},
|
||||
"../../sdk": {
|
||||
"name": "@scrypted/sdk",
|
||||
"version": "0.2.85",
|
||||
"version": "0.2.86",
|
||||
"dev": true,
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
@@ -276,9 +276,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@types/lodash": {
|
||||
"version": "4.14.191",
|
||||
"resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.191.tgz",
|
||||
"integrity": "sha512-BdZ5BCCvho3EIXw6wUCXHe7rS53AIDPLE+JzwgT+OsJk53oBfbSmZZ7CX4VaRoN78N+TJpFi9QPlfIVNmJYWxQ==",
|
||||
"version": "4.14.192",
|
||||
"resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.192.tgz",
|
||||
"integrity": "sha512-km+Vyn3BYm5ytMO13k9KTp27O75rbQ0NFw+U//g+PX7VZyjCioXaRFisqSIJRECljcTv73G3i6BpglNGHgUQ5A==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/@types/ms": {
|
||||
@@ -288,9 +288,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/@types/node": {
|
||||
"version": "18.15.5",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.15.5.tgz",
|
||||
"integrity": "sha512-Ark2WDjjZO7GmvsyFFf81MXuGTA/d6oP38anyxWOL6EREyBKAxKoFHwBhaZxCfLRLpO8JgVXwqOwSwa7jRcjew==",
|
||||
"version": "18.15.11",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.15.11.tgz",
|
||||
"integrity": "sha512-E5Kwq2n4SbMzQOn6wnmBjuK9ouqlURrcZDVfbo9ftDDTFt3nk7ZKK4GMOzoYgnpQJKcxwQw+lGaBvvlMo0qN/Q==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/@types/url-parse": {
|
||||
@@ -856,9 +856,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/mkdirp": {
|
||||
"version": "2.1.5",
|
||||
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-2.1.5.tgz",
|
||||
"integrity": "sha512-jbjfql+shJtAPrFoKxHOXip4xS+kul9W3OzfzzrqueWK2QMGon2bFH2opl6W9EagBThjEz+iysyi/swOoVfB/w==",
|
||||
"version": "2.1.6",
|
||||
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-2.1.6.tgz",
|
||||
"integrity": "sha512-+hEnITedc8LAtIP9u3HJDFIdcLV2vXP33sqLLIzkv1Db1zO/1OxbvYf0Y1OC/S/Qo5dxHXepofhmxL02PsKe+A==",
|
||||
"bin": {
|
||||
"mkdirp": "dist/cjs/src/bin.js"
|
||||
},
|
||||
@@ -1276,9 +1276,9 @@
|
||||
}
|
||||
},
|
||||
"@types/lodash": {
|
||||
"version": "4.14.191",
|
||||
"resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.191.tgz",
|
||||
"integrity": "sha512-BdZ5BCCvho3EIXw6wUCXHe7rS53AIDPLE+JzwgT+OsJk53oBfbSmZZ7CX4VaRoN78N+TJpFi9QPlfIVNmJYWxQ==",
|
||||
"version": "4.14.192",
|
||||
"resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.192.tgz",
|
||||
"integrity": "sha512-km+Vyn3BYm5ytMO13k9KTp27O75rbQ0NFw+U//g+PX7VZyjCioXaRFisqSIJRECljcTv73G3i6BpglNGHgUQ5A==",
|
||||
"dev": true
|
||||
},
|
||||
"@types/ms": {
|
||||
@@ -1288,9 +1288,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"@types/node": {
|
||||
"version": "18.15.5",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.15.5.tgz",
|
||||
"integrity": "sha512-Ark2WDjjZO7GmvsyFFf81MXuGTA/d6oP38anyxWOL6EREyBKAxKoFHwBhaZxCfLRLpO8JgVXwqOwSwa7jRcjew==",
|
||||
"version": "18.15.11",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.15.11.tgz",
|
||||
"integrity": "sha512-E5Kwq2n4SbMzQOn6wnmBjuK9ouqlURrcZDVfbo9ftDDTFt3nk7ZKK4GMOzoYgnpQJKcxwQw+lGaBvvlMo0qN/Q==",
|
||||
"dev": true
|
||||
},
|
||||
"@types/url-parse": {
|
||||
@@ -1698,9 +1698,9 @@
|
||||
"integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA=="
|
||||
},
|
||||
"mkdirp": {
|
||||
"version": "2.1.5",
|
||||
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-2.1.5.tgz",
|
||||
"integrity": "sha512-jbjfql+shJtAPrFoKxHOXip4xS+kul9W3OzfzzrqueWK2QMGon2bFH2opl6W9EagBThjEz+iysyi/swOoVfB/w=="
|
||||
"version": "2.1.6",
|
||||
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-2.1.6.tgz",
|
||||
"integrity": "sha512-+hEnITedc8LAtIP9u3HJDFIdcLV2vXP33sqLLIzkv1Db1zO/1OxbvYf0Y1OC/S/Qo5dxHXepofhmxL02PsKe+A=="
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.1.2",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/homekit",
|
||||
"version": "1.2.20",
|
||||
"version": "1.2.23",
|
||||
"description": "HomeKit Plugin for Scrypted",
|
||||
"scripts": {
|
||||
"scrypted-setup-project": "scrypted-setup-project",
|
||||
@@ -38,14 +38,14 @@
|
||||
"check-disk-space": "^3.3.1",
|
||||
"hap-nodejs": "^0.11.0",
|
||||
"lodash": "^4.17.21",
|
||||
"mkdirp": "^2.1.5"
|
||||
"mkdirp": "^2.1.6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
"@scrypted/sdk": "file:../../sdk",
|
||||
"@types/debug": "^4.1.7",
|
||||
"@types/lodash": "^4.14.191",
|
||||
"@types/node": "^18.15.5",
|
||||
"@types/lodash": "^4.14.192",
|
||||
"@types/node": "^18.15.11",
|
||||
"@types/url-parse": "^1.4.8"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -303,13 +303,19 @@ addSupportedType({
|
||||
}
|
||||
}
|
||||
|
||||
// if the camera is a device provider, merge in child devices and
|
||||
// ensure the devices are skipped by the rest of homekit by
|
||||
// reporting that they've been merged
|
||||
if (device.interfaces.includes(ScryptedInterface.DeviceProvider)) {
|
||||
// merge in lights
|
||||
const { devices } = mergeOnOffDevicesByType(device as ScryptedDevice as ScryptedDevice & DeviceProvider, accessory, ScryptedDeviceType.Light);
|
||||
mergeOnOffDevicesByType(device as ScryptedDevice as ScryptedDevice & DeviceProvider, accessory, ScryptedDeviceType.Light).devices.forEach(device => {
|
||||
homekitPlugin.mergedDevices.add(device.id)
|
||||
});
|
||||
|
||||
// ensure child devices are skipped by the rest of homekit by
|
||||
// reporting that they've been merged
|
||||
devices.map(device => homekitPlugin.mergedDevices.add(device.id));
|
||||
// merge in sirens
|
||||
mergeOnOffDevicesByType(device as ScryptedDevice as ScryptedDevice & DeviceProvider, accessory, ScryptedDeviceType.Siren).devices.forEach(device => {
|
||||
homekitPlugin.mergedDevices.add(device.id)
|
||||
});
|
||||
}
|
||||
|
||||
return accessory;
|
||||
|
||||
@@ -15,9 +15,9 @@ import os from 'os';
|
||||
import { getAddressOverride } from '../../address-override';
|
||||
import { AudioStreamingCodecType, CameraController, CameraStreamingDelegate, PrepareStreamCallback, PrepareStreamRequest, PrepareStreamResponse, StartStreamRequest, StreamingRequest, StreamRequestCallback, StreamRequestTypes } from '../../hap';
|
||||
import type { HomeKitPlugin } from "../../main";
|
||||
import { createReturnAudioSdp } from './camera-return-audio';
|
||||
import { createSnapshotHandler } from '../camera/camera-snapshot';
|
||||
import { getDebugMode } from './camera-debug-mode-storage';
|
||||
import { createReturnAudioSdp } from './camera-return-audio';
|
||||
import { startCameraStreamFfmpeg } from './camera-streaming-ffmpeg';
|
||||
import { CameraStreamingSession } from './camera-streaming-session';
|
||||
import { getStreamingConfiguration } from './camera-utils';
|
||||
@@ -375,6 +375,12 @@ export function createCameraStreamingDelegate(device: ScryptedDevice & VideoCame
|
||||
let playing = false;
|
||||
session.audioReturn.once('message', async buffer => {
|
||||
try {
|
||||
const decrypted = srtpSession.decrypt(buffer);
|
||||
const rtp = RtpPacket.deSerialize(decrypted);
|
||||
|
||||
if (rtp.header.payloadType !== session.startRequest.audio.pt)
|
||||
return;
|
||||
|
||||
const { clientPromise, url } = await listenZeroSingleClient();
|
||||
const rtspUrl = url.replace('tcp', 'rtsp');
|
||||
let sdp = createReturnAudioSdp(session.startRequest.audio);
|
||||
|
||||
@@ -64,6 +64,9 @@ export class H264Repacketizer {
|
||||
extraPackets = 0;
|
||||
fuaMax: number;
|
||||
pendingFuA: RtpPacket[];
|
||||
// log whether a stapa sps/pps has been seen.
|
||||
// resets on every idr frame, to trigger codec information
|
||||
// to be resent.
|
||||
seenStapASps = false;
|
||||
fuaMin: number;
|
||||
|
||||
@@ -402,8 +405,12 @@ export class H264Repacketizer {
|
||||
// if this is an idr frame, but no sps has been sent via a stapa, dummy one up.
|
||||
// the stream may not contain codec information in stapa or may be sending it
|
||||
// in separate sps/pps packets which is not supported by homekit.
|
||||
if (originalNalType === NAL_TYPE_IDR && !this.seenStapASps)
|
||||
this.maybeSendSpsPps(packet, ret);
|
||||
if (originalNalType === NAL_TYPE_IDR) {
|
||||
if (!this.seenStapASps)
|
||||
this.maybeSendSpsPps(packet, ret);
|
||||
this.seenStapASps = false;
|
||||
}
|
||||
|
||||
}
|
||||
else {
|
||||
if (this.pendingFuA) {
|
||||
@@ -486,10 +493,12 @@ export class H264Repacketizer {
|
||||
return;
|
||||
}
|
||||
|
||||
if (nalType === NAL_TYPE_IDR && !this.seenStapASps) {
|
||||
if (nalType === NAL_TYPE_IDR) {
|
||||
// if this is an idr frame, but no sps has been sent, dummy one up.
|
||||
// the stream may not contain sps.
|
||||
this.maybeSendSpsPps(packet, ret);
|
||||
if (!this.seenStapASps)
|
||||
this.maybeSendSpsPps(packet, ret);
|
||||
this.seenStapASps = false;
|
||||
}
|
||||
|
||||
this.fragment(packet, ret);
|
||||
|
||||
@@ -9,7 +9,7 @@ export function probe(device: DummyDevice): boolean {
|
||||
}
|
||||
|
||||
export function getService(device: ScryptedDevice & OnOff, accessory: Accessory, serviceType: any): Service {
|
||||
const service = accessory.addService(serviceType, device.name);
|
||||
const service = accessory.addService(serviceType, device.name, device.nativeId);
|
||||
service.getCharacteristic(Characteristic.On)
|
||||
.on(CharacteristicEventTypes.SET, (value: CharacteristicValue, callback: CharacteristicSetCallback) => {
|
||||
callback();
|
||||
|
||||
4
plugins/objectdetector/package-lock.json
generated
4
plugins/objectdetector/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/objectdetector",
|
||||
"version": "0.0.116",
|
||||
"version": "0.0.122",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/objectdetector",
|
||||
"version": "0.0.116",
|
||||
"version": "0.0.122",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/objectdetector",
|
||||
"version": "0.0.116",
|
||||
"version": "0.0.122",
|
||||
"description": "Scrypted Video Analysis Plugin. Installed alongside a detection service like OpenCV or TensorFlow.",
|
||||
"author": "Scrypted",
|
||||
"license": "Apache-2.0",
|
||||
|
||||
@@ -1,12 +1,32 @@
|
||||
import { Deferred } from "@scrypted/common/src/deferred";
|
||||
import { addVideoFilterArguments } from "@scrypted/common/src/ffmpeg-helpers";
|
||||
import { ffmpegLogInitialOutput, safeKillFFmpeg, safePrintFFmpegArguments } from "@scrypted/common/src/media-helpers";
|
||||
import { readLength, readLine } from "@scrypted/common/src/read-stream";
|
||||
import { addVideoFilterArguments } from "@scrypted/common/src/ffmpeg-helpers";
|
||||
import sdk, { FFmpegInput, Image, ImageOptions, MediaObject, ScryptedDeviceBase, ScryptedMimeTypes, VideoFrame, VideoFrameGenerator, VideoFrameGeneratorOptions } from "@scrypted/sdk";
|
||||
import child_process from 'child_process';
|
||||
import sharp from 'sharp';
|
||||
import type sharp from 'sharp';
|
||||
import { Readable } from 'stream';
|
||||
|
||||
export let sharpLib: (input?:
|
||||
| Buffer
|
||||
| Uint8Array
|
||||
| Uint8ClampedArray
|
||||
| Int8Array
|
||||
| Uint16Array
|
||||
| Int16Array
|
||||
| Uint32Array
|
||||
| Int32Array
|
||||
| Float32Array
|
||||
| Float64Array
|
||||
| string,
|
||||
options?: sharp.SharpOptions) => sharp.Sharp;
|
||||
try {
|
||||
sharpLib = require('sharp');
|
||||
}
|
||||
catch (e) {
|
||||
console.warn('Sharp failed to load. FFmpeg Frame Generator will not function properly.')
|
||||
}
|
||||
|
||||
async function createVipsMediaObject(image: VipsImage): Promise<VideoFrame & MediaObject> {
|
||||
const ret = await sdk.mediaManager.createMediaObject(image, ScryptedMimeTypes.Image, {
|
||||
format: null,
|
||||
@@ -30,7 +50,7 @@ interface RawFrame {
|
||||
}
|
||||
|
||||
class VipsImage implements Image {
|
||||
constructor(public image: sharp.Sharp, public width: number, public height: number) {
|
||||
constructor(public image: sharp.Sharp, public width: number, public height: number, public channels: number) {
|
||||
}
|
||||
|
||||
toImageInternal(options: ImageOptions) {
|
||||
@@ -55,12 +75,18 @@ class VipsImage implements Image {
|
||||
|
||||
async toBuffer(options: ImageOptions) {
|
||||
const transformed = this.toImageInternal(options);
|
||||
if (options?.format === 'rgb') {
|
||||
transformed.removeAlpha().toFormat('raw');
|
||||
}
|
||||
else if (options?.format === 'jpg') {
|
||||
if (options?.format === 'jpg') {
|
||||
transformed.toFormat('jpg');
|
||||
}
|
||||
else {
|
||||
if (this.channels === 1 && (options?.format === 'gray' || !options.format))
|
||||
transformed.extractChannel(0);
|
||||
else if (options?.format === 'gray')
|
||||
transformed.toColorspace('b-w');
|
||||
else if (options?.format === 'rgb')
|
||||
transformed.removeAlpha()
|
||||
transformed.raw();
|
||||
}
|
||||
return transformed.toBuffer();
|
||||
}
|
||||
|
||||
@@ -70,12 +96,25 @@ class VipsImage implements Image {
|
||||
resolveWithObject: true,
|
||||
});
|
||||
|
||||
const newImage = sharp(data, {
|
||||
const sharpLib = require('sharp') as (input?:
|
||||
| Buffer
|
||||
| Uint8Array
|
||||
| Uint8ClampedArray
|
||||
| Int8Array
|
||||
| Uint16Array
|
||||
| Int16Array
|
||||
| Uint32Array
|
||||
| Int32Array
|
||||
| Float32Array
|
||||
| Float64Array
|
||||
| string,
|
||||
options?) => sharp.Sharp;
|
||||
const newImage = sharpLib(data, {
|
||||
raw: info,
|
||||
});
|
||||
|
||||
const newMetadata = await newImage.metadata();
|
||||
const newVipsImage = new VipsImage(newImage, newMetadata.width, newMetadata.height);
|
||||
const newVipsImage = new VipsImage(newImage, newMetadata.width, newMetadata.height, newMetadata.channels);
|
||||
return newVipsImage;
|
||||
}
|
||||
|
||||
@@ -90,12 +129,14 @@ class VipsImage implements Image {
|
||||
export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements VideoFrameGenerator {
|
||||
async *generateVideoFramesInternal(mediaObject: MediaObject, options?: VideoFrameGeneratorOptions, filter?: (videoFrame: VideoFrame & MediaObject) => Promise<boolean>): AsyncGenerator<VideoFrame & MediaObject, any, unknown> {
|
||||
const ffmpegInput = await sdk.mediaManager.convertMediaObjectToJSON<FFmpegInput>(mediaObject, ScryptedMimeTypes.FFmpegInput);
|
||||
const gray = options?.format === 'gray';
|
||||
const channels = gray ? 1 : 3;
|
||||
const args = [
|
||||
'-hide_banner',
|
||||
//'-hwaccel', 'auto',
|
||||
...ffmpegInput.inputArguments,
|
||||
'-vcodec', 'pam',
|
||||
'-pix_fmt', 'rgb24',
|
||||
'-pix_fmt', gray ? 'gray' : 'rgb24',
|
||||
'-f', 'image2pipe',
|
||||
'pipe:3',
|
||||
];
|
||||
@@ -127,7 +168,7 @@ export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements Vid
|
||||
}
|
||||
|
||||
|
||||
if (headers['TUPLTYPE'] !== 'RGB')
|
||||
if (headers['TUPLTYPE'] !== 'RGB' && headers['TUPLTYPE'] !== 'GRAYSCALE')
|
||||
throw new Error(`Unexpected TUPLTYPE in PAM stream: ${headers['TUPLTYPE']}`);
|
||||
|
||||
const width = parseInt(headers['WIDTH']);
|
||||
@@ -135,7 +176,7 @@ export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements Vid
|
||||
if (!width || !height)
|
||||
throw new Error('Invalid dimensions in PAM stream');
|
||||
|
||||
const length = width * height * 3;
|
||||
const length = width * height * channels;
|
||||
headers.clear();
|
||||
const data = await readLength(readable, length);
|
||||
|
||||
@@ -149,7 +190,7 @@ export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements Vid
|
||||
});
|
||||
}
|
||||
else {
|
||||
this.console.warn('skipped frame');
|
||||
// this.console.warn('skipped frame');
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -169,14 +210,14 @@ export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements Vid
|
||||
const raw = await frameDeferred.promise;
|
||||
const { width, height, data } = raw;
|
||||
|
||||
const image = sharp(data, {
|
||||
const image = sharpLib(data, {
|
||||
raw: {
|
||||
width,
|
||||
height,
|
||||
channels: 3,
|
||||
channels,
|
||||
}
|
||||
});
|
||||
const vipsImage = new VipsImage(image, width, height);
|
||||
const vipsImage = new VipsImage(image, width, height, channels);
|
||||
try {
|
||||
const mo = await createVipsMediaObject(vipsImage);
|
||||
yield mo;
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import sdk, { Camera, DeviceProvider, DeviceState, EventListenerRegister, MediaObject, MediaStreamDestination, MixinDeviceBase, MixinProvider, MotionSensor, ObjectDetection, ObjectDetectionCallbacks, ObjectDetectionModel, ObjectDetectionResult, ObjectDetectionTypes, ObjectDetector, ObjectsDetected, ScryptedDevice, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, ScryptedNativeId, Setting, Settings, SettingValue, VideoCamera, VideoFrame, VideoFrameGenerator } from '@scrypted/sdk';
|
||||
import sdk, { Camera, DeviceProvider, DeviceState, EventListenerRegister, MediaObject, MediaStreamDestination, MixinDeviceBase, MixinProvider, MotionSensor, ObjectDetection, ObjectDetectionModel, ObjectDetectionResult, ObjectDetectionTypes, ObjectDetector, ObjectsDetected, ScryptedDevice, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, ScryptedNativeId, Setting, Settings, SettingValue, VideoCamera, VideoFrame, VideoFrameGenerator } from '@scrypted/sdk';
|
||||
import { StorageSettings } from '@scrypted/sdk/storage-settings';
|
||||
import crypto from 'crypto';
|
||||
import cloneDeep from 'lodash/cloneDeep';
|
||||
import { AutoenableMixinProvider } from "../../../common/src/autoenable-mixin-provider";
|
||||
import { SettingsMixinDeviceBase } from "../../../common/src/settings-mixin";
|
||||
import { DenoisedDetectionEntry, DenoisedDetectionState, denoiseDetections } from './denoise';
|
||||
import { FFmpegVideoFrameGenerator } from './ffmpeg-videoframes';
|
||||
import { DenoisedDetectionState } from './denoise';
|
||||
import { FFmpegVideoFrameGenerator, sharpLib } from './ffmpeg-videoframes';
|
||||
import { serverSupportsMixinEventMasking } from './server-version';
|
||||
import { sleep } from './sleep';
|
||||
import { getAllDevices, safeParseJson } from './util';
|
||||
@@ -19,8 +18,6 @@ const defaultDetectionDuration = 20;
|
||||
const defaultDetectionInterval = 60;
|
||||
const defaultDetectionTimeout = 60;
|
||||
const defaultMotionDuration = 10;
|
||||
const defaultScoreThreshold = .2;
|
||||
const defaultSecondScoreThreshold = .7;
|
||||
|
||||
const BUILTIN_MOTION_SENSOR_ASSIST = 'Assist';
|
||||
const BUILTIN_MOTION_SENSOR_REPLACE = 'Replace';
|
||||
@@ -44,9 +41,8 @@ type TrackedDetection = ObjectDetectionResult & {
|
||||
bestSecondPassScore?: number;
|
||||
};
|
||||
|
||||
class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera & MotionSensor & ObjectDetector> implements ObjectDetector, Settings, ObjectDetectionCallbacks {
|
||||
class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera & MotionSensor & ObjectDetector> implements ObjectDetector, Settings {
|
||||
motionListener: EventListenerRegister;
|
||||
detectorListener: EventListenerRegister;
|
||||
motionMixinListener: EventListenerRegister;
|
||||
detections = new Map<string, MediaObject>();
|
||||
cameraDevice: ScryptedDevice & Camera & VideoCamera & MotionSensor & ObjectDetector;
|
||||
@@ -65,6 +61,10 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
choices,
|
||||
}
|
||||
},
|
||||
onPut: () => {
|
||||
this.endObjectDetection();
|
||||
this.maybeStartMotionDetection();
|
||||
},
|
||||
defaultValue: 'Default',
|
||||
},
|
||||
motionSensorSupplementation: {
|
||||
@@ -81,16 +81,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
this.maybeStartMotionDetection();
|
||||
}
|
||||
},
|
||||
captureMode: {
|
||||
title: 'Capture Mode',
|
||||
description: 'The method to capture frames for analysis. Video will require more processing power.',
|
||||
choices: [
|
||||
'Default',
|
||||
'Video',
|
||||
'Snapshot',
|
||||
],
|
||||
defaultValue: 'Default',
|
||||
},
|
||||
detectionDuration: {
|
||||
title: 'Detection Duration',
|
||||
subgroup: 'Advanced',
|
||||
@@ -121,23 +111,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
defaultValue: defaultDetectionInterval,
|
||||
hide: true,
|
||||
},
|
||||
scoreThreshold: {
|
||||
title: 'Minimum Detection Confidence',
|
||||
subgroup: 'Advanced',
|
||||
description: 'Higher values eliminate false positives and low quality recognition candidates.',
|
||||
type: 'number',
|
||||
placeholder: '.2',
|
||||
defaultValue: defaultScoreThreshold,
|
||||
},
|
||||
secondScoreThreshold: {
|
||||
title: 'Second Pass Confidence',
|
||||
subgroup: 'Advanced',
|
||||
description: 'Crop and reanalyze a result from the initial detection pass to get more accurate results.',
|
||||
key: 'secondScoreThreshold',
|
||||
type: 'number',
|
||||
defaultValue: defaultSecondScoreThreshold,
|
||||
placeholder: '.7',
|
||||
},
|
||||
});
|
||||
motionTimeout: NodeJS.Timeout;
|
||||
zones = this.getZones();
|
||||
@@ -178,7 +151,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
if (this.hasMotionType) {
|
||||
// force a motion detection restart if it quit
|
||||
if (this.motionSensorSupplementation === BUILTIN_MOTION_SENSOR_REPLACE)
|
||||
await this.startStreamAnalysis();
|
||||
await this.startPipelineAnalysis();
|
||||
return;
|
||||
}
|
||||
}, this.storageSettings.values.detectionInterval * 1000);
|
||||
@@ -216,91 +189,46 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
return ret;
|
||||
}
|
||||
|
||||
async snapshotDetection() {
|
||||
const picture = await this.cameraDevice.takePicture();
|
||||
let detections = await this.objectDetection.detectObjects(picture, {
|
||||
detectionId: this.detectionId,
|
||||
settings: this.getCurrentSettings(),
|
||||
});
|
||||
detections = await this.trackObjects(detections, true);
|
||||
this.reportObjectDetections(detections);
|
||||
}
|
||||
|
||||
async maybeStartMotionDetection() {
|
||||
if (!this.hasMotionType)
|
||||
return;
|
||||
if (this.motionSensorSupplementation !== BUILTIN_MOTION_SENSOR_REPLACE)
|
||||
return;
|
||||
await this.startStreamAnalysis();
|
||||
await this.startPipelineAnalysis();
|
||||
}
|
||||
|
||||
endObjectDetection() {
|
||||
this.detectorRunning = false;
|
||||
this.objectDetection?.detectObjects(undefined, {
|
||||
detectionId: this.detectionId,
|
||||
settings: this.getCurrentSettings(),
|
||||
});
|
||||
}
|
||||
|
||||
bindObjectDetection() {
|
||||
if (this.hasMotionType)
|
||||
this.motionDetected = false;
|
||||
|
||||
this.detectorRunning = false;
|
||||
this.detectorListener?.removeListener();
|
||||
this.detectorListener = undefined;
|
||||
this.endObjectDetection();
|
||||
|
||||
this.maybeStartMotionDetection();
|
||||
}
|
||||
|
||||
async register() {
|
||||
const model = await this.objectDetection.getDetectionModel();
|
||||
|
||||
if (!this.hasMotionType) {
|
||||
if (model.triggerClasses?.includes('motion')) {
|
||||
this.motionListener = this.cameraDevice.listen(ScryptedInterface.MotionSensor, async () => {
|
||||
if (!this.cameraDevice.motionDetected) {
|
||||
if (this.detectorRunning) {
|
||||
// allow anaysis due to user request.
|
||||
if (this.analyzeStop > Date.now())
|
||||
return;
|
||||
|
||||
this.console.log('motion stopped, cancelling ongoing detection')
|
||||
this.endObjectDetection();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
await this.startStreamAnalysis();
|
||||
});
|
||||
}
|
||||
|
||||
const nonMotion = model.triggerClasses?.find(t => t !== 'motion');
|
||||
if (nonMotion) {
|
||||
this.detectorListener = this.cameraDevice.listen(ScryptedInterface.ObjectDetector, async (s, d, data: ObjectsDetected) => {
|
||||
if (!model.triggerClasses)
|
||||
return;
|
||||
if (!data.detectionId)
|
||||
return;
|
||||
const { detections } = data;
|
||||
if (!detections?.length)
|
||||
return;
|
||||
|
||||
const set = new Set(detections.map(d => d.className));
|
||||
for (const trigger of model.triggerClasses) {
|
||||
if (trigger === 'motion')
|
||||
continue;
|
||||
|
||||
if (set.has(trigger)) {
|
||||
const jpeg = await this.cameraDevice.getDetectionInput(data.detectionId, data.eventId);
|
||||
const found = await this.objectDetection.detectObjects(jpeg);
|
||||
found.detectionId = data.detectionId;
|
||||
this.handleDetectionEvent(found, undefined, jpeg);
|
||||
this.motionListener = this.cameraDevice.listen(ScryptedInterface.MotionSensor, async () => {
|
||||
if (!this.cameraDevice.motionDetected) {
|
||||
if (this.detectorRunning) {
|
||||
// allow anaysis due to user request.
|
||||
if (this.analyzeStop > Date.now())
|
||||
return;
|
||||
}
|
||||
|
||||
this.console.log('motion stopped, cancelling ongoing detection')
|
||||
this.endObjectDetection();
|
||||
}
|
||||
});
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
await this.startPipelineAnalysis();
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
@@ -317,7 +245,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
return;
|
||||
if (!this.detectorRunning)
|
||||
this.console.log('built in motion sensor started motion, starting video detection.');
|
||||
await this.startStreamAnalysis();
|
||||
await this.startPipelineAnalysis();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -332,163 +260,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
}
|
||||
}
|
||||
|
||||
async handleDetectionEvent(detection: ObjectsDetected, redetect?: (boundingBox: [number, number, number, number]) => Promise<ObjectDetectionResult[]>, mediaObject?: MediaObject) {
|
||||
this.detectorRunning = detection.running;
|
||||
|
||||
detection = await this.trackObjects(detection);
|
||||
|
||||
// apply the zones to the detections and get a shallow copy list of detections after
|
||||
// exclusion zones have applied
|
||||
const zonedDetections = this.applyZones(detection)
|
||||
.filter(d => {
|
||||
if (!d.zones?.length)
|
||||
return d.bestSecondPassScore >= this.secondScoreThreshold || d.score >= this.scoreThreshold;
|
||||
|
||||
for (const zone of d.zones || []) {
|
||||
const zi = this.zoneInfos[zone];
|
||||
const scoreThreshold = zi?.scoreThreshold || this.scoreThreshold;
|
||||
const secondScoreThreshold = zi?.secondScoreThreshold || this.secondScoreThreshold;
|
||||
// keep the object if it passes the score check, or has already passed a second score check.
|
||||
if (d.bestSecondPassScore >= secondScoreThreshold || d.score >= scoreThreshold)
|
||||
return true;
|
||||
}
|
||||
});
|
||||
|
||||
let retainImage = false;
|
||||
|
||||
if (!this.hasMotionType && redetect && this.secondScoreThreshold && detection.detections) {
|
||||
const detections = detection.detections as TrackedDetection[];
|
||||
const newOrBetterDetections = zonedDetections.filter(d => d.newOrBetterDetection);
|
||||
detections?.forEach(d => d.newOrBetterDetection = false);
|
||||
|
||||
// anything with a higher pass initial score should be redetected
|
||||
// as it may yield a better second pass score and thus a better thumbnail.
|
||||
await Promise.allSettled(newOrBetterDetections.map(async d => {
|
||||
const maybeUpdateSecondPassScore = (secondPassScore: number) => {
|
||||
let better = false;
|
||||
// initialize second pass result
|
||||
if (!d.bestSecondPassScore) {
|
||||
better = true;
|
||||
d.bestSecondPassScore = 0;
|
||||
}
|
||||
// retain passing the second pass threshold for first time.
|
||||
if (d.bestSecondPassScore < this.secondScoreThreshold && secondPassScore >= this.secondScoreThreshold) {
|
||||
this.console.log('improved', d.id, secondPassScore, d.score);
|
||||
better = true;
|
||||
retainImage = true;
|
||||
}
|
||||
else if (secondPassScore > d.bestSecondPassScore * 1.1) {
|
||||
this.console.log('improved', d.id, secondPassScore, d.score);
|
||||
better = true;
|
||||
retainImage = true;
|
||||
}
|
||||
if (better)
|
||||
d.bestSecondPassScore = secondPassScore;
|
||||
return better;
|
||||
}
|
||||
|
||||
// the initial score may be sufficient.
|
||||
if (d.score >= this.secondScoreThreshold) {
|
||||
maybeUpdateSecondPassScore(d.score);
|
||||
return;
|
||||
}
|
||||
|
||||
const redetected = await redetect(d.boundingBox);
|
||||
const best = redetected.filter(r => r.className === d.className).sort((a, b) => b.score - a.score)?.[0];
|
||||
if (best) {
|
||||
if (maybeUpdateSecondPassScore(best.score)) {
|
||||
d.boundingBox = best.boundingBox;
|
||||
}
|
||||
}
|
||||
}));
|
||||
|
||||
const secondPassDetections = zonedDetections.filter(d => d.bestSecondPassScore >= this.secondScoreThreshold)
|
||||
.map(d => ({
|
||||
...d,
|
||||
score: d.bestSecondPassScore,
|
||||
}));
|
||||
detection.detections = secondPassDetections;
|
||||
}
|
||||
else {
|
||||
detection.detections = zonedDetections;
|
||||
}
|
||||
|
||||
if (detection.detections) {
|
||||
const trackedDetections = cloneDeep(detection.detections) as TrackedDetection[];
|
||||
for (const d of trackedDetections) {
|
||||
delete d.bestScore;
|
||||
delete d.bestSecondPassScore;
|
||||
delete d.newOrBetterDetection;
|
||||
}
|
||||
detection.detections = trackedDetections;
|
||||
}
|
||||
|
||||
const now = Date.now();
|
||||
if (this.lastDetectionInput + this.storageSettings.values.detectionTimeout * 1000 < Date.now())
|
||||
retainImage = true;
|
||||
|
||||
if (retainImage && mediaObject) {
|
||||
this.lastDetectionInput = now;
|
||||
this.setDetection(detection, mediaObject);
|
||||
}
|
||||
|
||||
this.reportObjectDetections(detection);
|
||||
return retainImage;
|
||||
}
|
||||
|
||||
get scoreThreshold() {
|
||||
return parseFloat(this.storage.getItem('scoreThreshold')) || defaultScoreThreshold;
|
||||
}
|
||||
|
||||
get secondScoreThreshold() {
|
||||
const r = parseFloat(this.storage.getItem('secondScoreThreshold'));
|
||||
if (isNaN(r))
|
||||
return defaultSecondScoreThreshold;
|
||||
return r;
|
||||
}
|
||||
|
||||
async onDetection(detection: ObjectsDetected, redetect?: (boundingBox: [number, number, number, number]) => Promise<ObjectDetectionResult[]>, mediaObject?: MediaObject): Promise<boolean> {
|
||||
// detection.detections = detection.detections?.filter(d => d.score >= this.scoreThreshold);
|
||||
return this.handleDetectionEvent(detection, redetect, mediaObject);
|
||||
}
|
||||
|
||||
async onDetectionEnded(detection: ObjectsDetected): Promise<void> {
|
||||
this.handleDetectionEvent(detection);
|
||||
}
|
||||
|
||||
async startSnapshotAnalysis() {
|
||||
if (this.detectorRunning)
|
||||
return;
|
||||
|
||||
this.detectorRunning = true;
|
||||
this.analyzeStop = Date.now() + this.getDetectionDuration();
|
||||
|
||||
while (this.detectorRunning) {
|
||||
const now = Date.now();
|
||||
if (now > this.analyzeStop)
|
||||
break;
|
||||
try {
|
||||
const mo = await this.mixinDevice.takePicture({
|
||||
reason: 'event',
|
||||
});
|
||||
const found = await this.objectDetection.detectObjects(mo, {
|
||||
detectionId: this.detectionId,
|
||||
duration: this.getDetectionDuration(),
|
||||
settings: this.getCurrentSettings(),
|
||||
}, this);
|
||||
}
|
||||
catch (e) {
|
||||
this.console.error('snapshot detection error', e);
|
||||
}
|
||||
// cameras tend to only refresh every 1s at best.
|
||||
// maybe get this value from somewhere? or sha the jpeg?
|
||||
const diff = now + 1100 - Date.now();
|
||||
if (diff > 0)
|
||||
await sleep(diff);
|
||||
}
|
||||
this.endObjectDetection();
|
||||
}
|
||||
|
||||
async startPipelineAnalysis() {
|
||||
if (this.detectorRunning)
|
||||
return;
|
||||
@@ -565,28 +336,14 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
if (!this.detectorRunning) {
|
||||
break;
|
||||
}
|
||||
const now = Date.now();
|
||||
if (now > this.analyzeStop) {
|
||||
if (!this.hasMotionType && Date.now() > this.analyzeStop) {
|
||||
break;
|
||||
}
|
||||
|
||||
// apply the zones to the detections and get a shallow copy list of detections after
|
||||
// exclusion zones have applied
|
||||
const zonedDetections = this.applyZones(detected.detected);
|
||||
const filteredDetections = zonedDetections
|
||||
.filter(d => {
|
||||
if (!d.zones?.length)
|
||||
return d.score >= this.scoreThreshold;
|
||||
|
||||
for (const zone of d.zones || []) {
|
||||
const zi = this.zoneInfos[zone];
|
||||
const scoreThreshold = zi?.scoreThreshold || this.scoreThreshold;
|
||||
if (d.score >= scoreThreshold)
|
||||
return true;
|
||||
}
|
||||
});
|
||||
|
||||
detected.detected.detections = filteredDetections;
|
||||
detected.detected.detections = zonedDetections;
|
||||
|
||||
detections++;
|
||||
// this.console.warn('dps', detections / (Date.now() - start) * 1000);
|
||||
@@ -615,79 +372,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
}
|
||||
}
|
||||
|
||||
async startStreamAnalysis() {
|
||||
if (this.newPipeline) {
|
||||
await this.startPipelineAnalysis();
|
||||
}
|
||||
else if (!this.hasMotionType && this.storageSettings.values.captureMode === 'Snapshot') {
|
||||
await this.startSnapshotAnalysis();
|
||||
}
|
||||
else {
|
||||
await this.startVideoDetection();
|
||||
}
|
||||
}
|
||||
|
||||
async extendedObjectDetect(force?: boolean) {
|
||||
if (!this.hasMotionType && this.storageSettings.values.captureMode === 'Snapshot') {
|
||||
this.analyzeStop = Date.now() + this.getDetectionDuration();
|
||||
}
|
||||
else {
|
||||
try {
|
||||
if (!force && !this.motionDetected)
|
||||
return;
|
||||
await this.objectDetection?.detectObjects(undefined, {
|
||||
detectionId: this.detectionId,
|
||||
duration: this.getDetectionDuration(),
|
||||
settings: this.getCurrentSettings(),
|
||||
}, this);
|
||||
}
|
||||
catch (e) {
|
||||
// ignore any
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async startVideoDetection() {
|
||||
try {
|
||||
const settings = this.getCurrentSettings();
|
||||
|
||||
// prevent stream retrieval noise until notified that the detection is no longer running.
|
||||
if (this.detectorRunning) {
|
||||
const session = await this.objectDetection?.detectObjects(undefined, {
|
||||
detectionId: this.detectionId,
|
||||
duration: this.getDetectionDuration(),
|
||||
settings,
|
||||
}, this);
|
||||
this.detectorRunning = session.running;
|
||||
if (this.detectorRunning)
|
||||
return;
|
||||
}
|
||||
|
||||
// dummy up the last detection time to prevent the idle timers from purging everything.
|
||||
this.detectionState.lastDetection = Date.now();
|
||||
|
||||
this.detectorRunning = true;
|
||||
let stream: MediaObject;
|
||||
|
||||
stream = await this.cameraDevice.getVideoStream({
|
||||
destination: !this.hasMotionType ? 'local-recorder' : 'low-resolution',
|
||||
// ask rebroadcast to mute audio, not needed.
|
||||
audio: null,
|
||||
});
|
||||
const session = await this.objectDetection?.detectObjects(stream, {
|
||||
detectionId: this.detectionId,
|
||||
duration: this.getDetectionDuration(),
|
||||
settings,
|
||||
}, this);
|
||||
|
||||
this.detectorRunning = session.running;
|
||||
}
|
||||
catch (e) {
|
||||
this.console.log('failure retrieving stream', e);
|
||||
this.detectorRunning = false;
|
||||
}
|
||||
}
|
||||
|
||||
normalizeBox(boundingBox: [number, number, number, number], inputDimensions: [number, number]) {
|
||||
let [x, y, width, height] = boundingBox;
|
||||
let x2 = x + width;
|
||||
@@ -806,88 +490,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
this.onDeviceEvent(ScryptedInterface.ObjectDetector, detection);
|
||||
}
|
||||
|
||||
async trackObjects(detectionResult: ObjectsDetected, showAll?: boolean) {
|
||||
// do not denoise
|
||||
if (this.hasMotionType) {
|
||||
return detectionResult;
|
||||
}
|
||||
|
||||
if (!detectionResult?.detections) {
|
||||
// detection session ended.
|
||||
return detectionResult;
|
||||
}
|
||||
|
||||
const { detections } = detectionResult;
|
||||
|
||||
const found: DenoisedDetectionEntry<TrackedDetection>[] = [];
|
||||
denoiseDetections<TrackedDetection>(this.detectionState, detections.map(detection => ({
|
||||
get id() {
|
||||
return detection.id;
|
||||
},
|
||||
set id(id) {
|
||||
detection.id = id;
|
||||
},
|
||||
name: detection.className,
|
||||
score: detection.score,
|
||||
detection,
|
||||
get firstSeen() {
|
||||
return detection.history?.firstSeen
|
||||
},
|
||||
set firstSeen(value) {
|
||||
detection.history = detection.history || {
|
||||
firstSeen: value,
|
||||
lastSeen: value,
|
||||
};
|
||||
detection.history.firstSeen = value;
|
||||
},
|
||||
get lastSeen() {
|
||||
return detection.history?.lastSeen
|
||||
},
|
||||
set lastSeen(value) {
|
||||
detection.history = detection.history || {
|
||||
firstSeen: value,
|
||||
lastSeen: value,
|
||||
};
|
||||
detection.history.lastSeen = value;
|
||||
},
|
||||
boundingBox: detection.boundingBox,
|
||||
})), {
|
||||
timeout: this.storageSettings.values.detectionTimeout * 1000,
|
||||
added: d => {
|
||||
found.push(d);
|
||||
d.detection.bestScore = d.detection.score;
|
||||
d.detection.newOrBetterDetection = true;
|
||||
},
|
||||
removed: d => {
|
||||
this.console.log('expired detection:', `${d.detection.className} (${d.detection.score})`);
|
||||
if (detectionResult.running)
|
||||
this.extendedObjectDetect();
|
||||
},
|
||||
retained: (d, o) => {
|
||||
if (d.detection.score > o.detection.bestScore) {
|
||||
d.detection.bestScore = d.detection.score;
|
||||
d.detection.newOrBetterDetection = true;
|
||||
}
|
||||
else {
|
||||
d.detection.bestScore = o.detection.bestScore;
|
||||
}
|
||||
d.detection.bestSecondPassScore = o.detection.bestSecondPassScore;
|
||||
},
|
||||
expiring: (d) => {
|
||||
},
|
||||
});
|
||||
if (found.length) {
|
||||
this.console.log('new detection:', found.map(d => `${d.id} ${d.detection.className} (${d.detection.score})`).join(', '));
|
||||
if (detectionResult.running)
|
||||
this.extendedObjectDetect();
|
||||
}
|
||||
if (found.length || showAll) {
|
||||
this.console.log('current detections:', this.detectionState.previousDetections.map(d => `${d.detection.className} (${d.detection.score}, ${d.detection.boundingBox?.join(', ')})`).join(', '));
|
||||
}
|
||||
|
||||
return detectionResult;
|
||||
}
|
||||
|
||||
setDetection(detection: ObjectsDetected, detectionInput: MediaObject) {
|
||||
if (!detection.detectionId)
|
||||
detection.detectionId = crypto.randomBytes(4).toString('hex');
|
||||
@@ -942,9 +544,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
}
|
||||
|
||||
get newPipeline() {
|
||||
if (!this.plugin.storageSettings.values.newPipeline)
|
||||
return;
|
||||
|
||||
const newPipeline = this.storageSettings.values.newPipeline;
|
||||
if (!newPipeline)
|
||||
return newPipeline;
|
||||
@@ -979,8 +578,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
}
|
||||
|
||||
this.storageSettings.settings.motionSensorSupplementation.hide = !this.hasMotionType || !this.mixinDeviceInterfaces.includes(ScryptedInterface.MotionSensor);
|
||||
this.storageSettings.settings.captureMode.hide = this.hasMotionType || !!this.plugin.storageSettings.values.newPipeline;
|
||||
this.storageSettings.settings.newPipeline.hide = !this.plugin.storageSettings.values.newPipeline;
|
||||
this.storageSettings.settings.detectionDuration.hide = this.hasMotionType;
|
||||
this.storageSettings.settings.detectionTimeout.hide = this.hasMotionType;
|
||||
this.storageSettings.settings.motionDuration.hide = !this.hasMotionType;
|
||||
@@ -988,23 +585,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
|
||||
settings.push(...await this.storageSettings.getSettings());
|
||||
|
||||
let hideThreshold = true;
|
||||
if (!this.hasMotionType) {
|
||||
let hasInclusionZone = false;
|
||||
for (const zone of Object.keys(this.zones)) {
|
||||
const zi = this.zoneInfos[zone];
|
||||
if (!zi?.exclusion) {
|
||||
hasInclusionZone = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!hasInclusionZone) {
|
||||
hideThreshold = false;
|
||||
}
|
||||
}
|
||||
this.storageSettings.settings.scoreThreshold.hide = hideThreshold;
|
||||
this.storageSettings.settings.secondScoreThreshold.hide = hideThreshold;
|
||||
|
||||
settings.push({
|
||||
key: 'zones',
|
||||
title: 'Zones',
|
||||
@@ -1048,38 +628,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
],
|
||||
value: zi?.type || 'Intersect',
|
||||
});
|
||||
|
||||
if (!this.hasMotionType) {
|
||||
settings.push(
|
||||
{
|
||||
subgroup,
|
||||
key: `zoneinfo-classes-${name}`,
|
||||
title: `Detection Classes`,
|
||||
description: 'The detection classes to match inside this zone. An empty list will match all classes.',
|
||||
choices: (await this.getObjectTypes())?.classes || [],
|
||||
value: zi?.classes || [],
|
||||
multiple: true,
|
||||
},
|
||||
{
|
||||
subgroup,
|
||||
title: 'Minimum Detection Confidence',
|
||||
description: 'Higher values eliminate false positives and low quality recognition candidates.',
|
||||
key: `zoneinfo-scoreThreshold-${name}`,
|
||||
type: 'number',
|
||||
value: zi?.scoreThreshold || this.scoreThreshold,
|
||||
placeholder: '.2',
|
||||
},
|
||||
{
|
||||
subgroup,
|
||||
title: 'Second Pass Confidence',
|
||||
description: 'Crop and reanalyze a result from the initial detection pass to get more accurate results.',
|
||||
key: `zoneinfo-secondScoreThreshold-${name}`,
|
||||
type: 'number',
|
||||
value: zi?.secondScoreThreshold || this.secondScoreThreshold,
|
||||
placeholder: '.7',
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (!this.hasMotionType) {
|
||||
@@ -1157,7 +705,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
if (key === 'analyzeButton') {
|
||||
this.analyzeStop = Date.now() + 60000;
|
||||
// await this.snapshotDetection();
|
||||
await this.startStreamAnalysis();
|
||||
await this.startPipelineAnalysis();
|
||||
}
|
||||
else {
|
||||
const settings = this.getCurrentSettings();
|
||||
@@ -1175,7 +723,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
this.clearMotionTimeout();
|
||||
this.motionListener?.removeListener();
|
||||
this.motionMixinListener?.removeListener();
|
||||
this.detectorListener?.removeListener();
|
||||
this.endObjectDetection();
|
||||
}
|
||||
}
|
||||
@@ -1246,12 +793,6 @@ class ObjectDetectionPlugin extends AutoenableMixinProvider implements Settings,
|
||||
currentMixins = new Set<ObjectDetectorMixin>();
|
||||
|
||||
storageSettings = new StorageSettings(this, {
|
||||
newPipeline: {
|
||||
title: 'New Video Pipeline',
|
||||
description: 'Enables the new video pipeline addded on 2023/03/25. If there are issues with motion or object detection, disable this to switch back to the old pipeline. Then reload the plugin.',
|
||||
type: 'boolean',
|
||||
defaultValue: true,
|
||||
},
|
||||
activeMotionDetections: {
|
||||
title: 'Active Motion Detection Sessions',
|
||||
readonly: true,
|
||||
@@ -1281,9 +822,9 @@ class ObjectDetectionPlugin extends AutoenableMixinProvider implements Settings,
|
||||
{
|
||||
name: 'FFmpeg Frame Generator',
|
||||
type: ScryptedDeviceType.Builtin,
|
||||
interfaces: [
|
||||
interfaces: sharpLib ? [
|
||||
ScryptedInterface.VideoFrameGenerator,
|
||||
],
|
||||
] : [],
|
||||
nativeId: 'ffmpeg',
|
||||
}
|
||||
]
|
||||
|
||||
4
plugins/opencv/package-lock.json
generated
4
plugins/opencv/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/opencv",
|
||||
"version": "0.0.69",
|
||||
"version": "0.0.74",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/opencv",
|
||||
"version": "0.0.69",
|
||||
"version": "0.0.74",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
|
||||
@@ -36,5 +36,5 @@
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.0.69"
|
||||
"version": "0.0.74"
|
||||
}
|
||||
|
||||
@@ -1,22 +1,46 @@
|
||||
from __future__ import annotations
|
||||
from time import sleep
|
||||
from detect import DetectionSession, DetectPlugin
|
||||
from typing import Any, List, Tuple
|
||||
import numpy as np
|
||||
|
||||
import asyncio
|
||||
import concurrent.futures
|
||||
from typing import Any, List, Tuple
|
||||
|
||||
import cv2
|
||||
import imutils
|
||||
Gst = None
|
||||
try:
|
||||
from gi.repository import Gst
|
||||
except:
|
||||
pass
|
||||
from scrypted_sdk.types import ObjectDetectionModel, ObjectDetectionResult, ObjectsDetected, Setting, VideoFrame
|
||||
import numpy as np
|
||||
import scrypted_sdk
|
||||
from PIL import Image
|
||||
from scrypted_sdk.types import (ObjectDetectionGeneratorSession,
|
||||
ObjectDetectionResult, ObjectsDetected,
|
||||
Setting, VideoFrame)
|
||||
|
||||
class OpenCVDetectionSession(DetectionSession):
|
||||
from detect import DetectPlugin
|
||||
|
||||
# vips is already multithreaded, but needs to be kicked off the python asyncio thread.
|
||||
toThreadExecutor = concurrent.futures.ThreadPoolExecutor(max_workers=2, thread_name_prefix="image")
|
||||
|
||||
async def to_thread(f):
|
||||
loop = asyncio.get_running_loop()
|
||||
return await loop.run_in_executor(toThreadExecutor, f)
|
||||
|
||||
async def ensureGrayData(data: bytes, size: Tuple[int, int], format: str):
|
||||
if format == 'gray':
|
||||
return data
|
||||
|
||||
def convert():
|
||||
if format == 'rgba':
|
||||
image = Image.frombuffer('RGBA', size, data)
|
||||
else:
|
||||
image = Image.frombuffer('RGB', size, data)
|
||||
|
||||
try:
|
||||
return image.convert('L').tobytes()
|
||||
finally:
|
||||
image.close()
|
||||
return await to_thread(convert)
|
||||
|
||||
|
||||
class OpenCVDetectionSession:
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.cap: cv2.VideoCapture = None
|
||||
self.previous_frame: Any = None
|
||||
self.curFrame = None
|
||||
@@ -35,21 +59,6 @@ defaultBlur = 5
|
||||
class OpenCVPlugin(DetectPlugin):
|
||||
def __init__(self, nativeId: str | None = None):
|
||||
super().__init__(nativeId=nativeId)
|
||||
self.color2Gray = None
|
||||
self.pixelFormat = "I420"
|
||||
self.pixelFormatChannelCount = 1
|
||||
|
||||
if True:
|
||||
self.retainAspectRatio = False
|
||||
self.color2Gray = None
|
||||
self.pixelFormat = "I420"
|
||||
self.pixelFormatChannelCount = 1
|
||||
else:
|
||||
self.retainAspectRatio = True
|
||||
self.color2Gray = cv2.COLOR_BGRA2GRAY
|
||||
self.pixelFormat = "BGRA"
|
||||
self.pixelFormatChannelCount = 4
|
||||
|
||||
|
||||
def getClasses(self) -> list[str]:
|
||||
return ['motion']
|
||||
@@ -91,9 +100,6 @@ class OpenCVPlugin(DetectPlugin):
|
||||
]
|
||||
|
||||
return settings
|
||||
|
||||
def get_pixel_format(self):
|
||||
return self.pixelFormat
|
||||
|
||||
def get_input_format(self) -> str:
|
||||
return 'gray'
|
||||
@@ -110,17 +116,10 @@ class OpenCVPlugin(DetectPlugin):
|
||||
blur = int(settings.get('blur', blur))
|
||||
return area, threshold, interval, blur
|
||||
|
||||
def detect(self, detection_session: OpenCVDetectionSession, frame, src_size, convert_to_src_size) -> ObjectsDetected:
|
||||
settings = detection_session.settings
|
||||
def detect(self, frame, settings: Any, detection_session: OpenCVDetectionSession, src_size, convert_to_src_size) -> ObjectsDetected:
|
||||
area, threshold, interval, blur = self.parse_settings(settings)
|
||||
|
||||
# see get_detection_input_size on undocumented size requirements for GRAY8
|
||||
if self.color2Gray != None:
|
||||
detection_session.gray = cv2.cvtColor(
|
||||
frame, self.color2Gray, dst=detection_session.gray)
|
||||
gray = detection_session.gray
|
||||
else:
|
||||
gray = frame
|
||||
gray = frame
|
||||
detection_session.curFrame = cv2.GaussianBlur(
|
||||
gray, (blur, blur), 0, dst=detection_session.curFrame)
|
||||
|
||||
@@ -154,8 +153,8 @@ class OpenCVPlugin(DetectPlugin):
|
||||
# if w * h != contour_area:
|
||||
# print("mismatch w/h", contour_area - w * h)
|
||||
|
||||
x2, y2, _ = convert_to_src_size((x + w, y + h))
|
||||
x, y, _ = convert_to_src_size((x, y))
|
||||
x2, y2 = convert_to_src_size((x + w, y + h))
|
||||
x, y = convert_to_src_size((x, y))
|
||||
w = x2 - x + 1
|
||||
h = y2 - y + 1
|
||||
|
||||
@@ -206,11 +205,24 @@ class OpenCVPlugin(DetectPlugin):
|
||||
detection_session.cap = None
|
||||
return super().end_session(detection_session)
|
||||
|
||||
async def run_detection_image(self, detection_session: DetectionSession, image: Image.Image, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
|
||||
# todo
|
||||
raise Exception('can not run motion detection on image')
|
||||
|
||||
async def run_detection_videoframe(self, videoFrame: VideoFrame, detection_session: OpenCVDetectionSession) -> ObjectsDetected:
|
||||
async def generateObjectDetections(self, videoFrames: Any, session: ObjectDetectionGeneratorSession = None) -> Any:
|
||||
try:
|
||||
ds = OpenCVDetectionSession()
|
||||
videoFrames = await scrypted_sdk.sdk.connectRPCObject(videoFrames)
|
||||
async for videoFrame in videoFrames:
|
||||
detected = await self.run_detection_videoframe(videoFrame, session and session.get('settings'), ds)
|
||||
yield {
|
||||
'__json_copy_serialize_children': True,
|
||||
'detected': detected,
|
||||
'videoFrame': videoFrame,
|
||||
}
|
||||
finally:
|
||||
try:
|
||||
await videoFrames.aclose()
|
||||
except:
|
||||
pass
|
||||
|
||||
async def run_detection_videoframe(self, videoFrame: VideoFrame, settings: Any, detection_session: OpenCVDetectionSession) -> ObjectsDetected:
|
||||
width = videoFrame.width
|
||||
height = videoFrame.height
|
||||
|
||||
@@ -234,64 +246,26 @@ class OpenCVPlugin(DetectPlugin):
|
||||
'height': height,
|
||||
}
|
||||
|
||||
format = videoFrame.format or 'gray'
|
||||
buffer = await videoFrame.toBuffer({
|
||||
'resize': resize,
|
||||
'format': format,
|
||||
})
|
||||
|
||||
def convert_to_src_size(point, normalize = False):
|
||||
return point[0] * scale, point[1] * scale, True
|
||||
mat = np.ndarray((height, width, self.pixelFormatChannelCount), buffer=buffer, dtype=np.uint8)
|
||||
detections = self.detect(
|
||||
detection_session, mat, (width, height), convert_to_src_size)
|
||||
return detections
|
||||
|
||||
async def run_detection_avframe(self, detection_session: DetectionSession, avframe, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
|
||||
if avframe.format.name != 'yuv420p' and avframe.format.name != 'yuvj420p':
|
||||
mat = avframe.to_ndarray(format='gray8')
|
||||
if format == 'gray':
|
||||
expectedLength = width * height
|
||||
# check if resize could not be completed
|
||||
if expectedLength != len(buffer):
|
||||
image = Image.frombuffer('L', (videoFrame.width, videoFrame.height), buffer)
|
||||
try:
|
||||
buffer = image.resize((width, height), Image.BILINEAR).tobytes()
|
||||
finally:
|
||||
image.close()
|
||||
else:
|
||||
mat = np.ndarray((avframe.height, avframe.width, self.pixelFormatChannelCount), buffer=avframe.planes[0], dtype=np.uint8)
|
||||
detections = self.detect(
|
||||
detection_session, mat, src_size, convert_to_src_size)
|
||||
if not detections or not len(detections['detections']):
|
||||
await self.detection_sleep(settings)
|
||||
return None, None
|
||||
return detections, None
|
||||
buffer = await ensureGrayData(buffer, (width, height), format)
|
||||
|
||||
async def run_detection_gstsample(self, detection_session: OpenCVDetectionSession, gst_sample, settings: Any, src_size, convert_to_src_size) -> ObjectsDetected:
|
||||
buf = gst_sample.get_buffer()
|
||||
caps = gst_sample.get_caps()
|
||||
# can't trust the width value, compute the stride
|
||||
height = caps.get_structure(0).get_value('height')
|
||||
width = caps.get_structure(0).get_value('width')
|
||||
result, info = buf.map(Gst.MapFlags.READ)
|
||||
if not result:
|
||||
return None, None
|
||||
try:
|
||||
mat = np.ndarray(
|
||||
(height,
|
||||
width,
|
||||
self.pixelFormatChannelCount),
|
||||
buffer=info.data,
|
||||
dtype=np.uint8)
|
||||
detections = self.detect(
|
||||
detection_session, mat, src_size, convert_to_src_size)
|
||||
# no point in triggering empty events.
|
||||
finally:
|
||||
buf.unmap(info)
|
||||
|
||||
if not detections or not len(detections['detections']):
|
||||
await self.detection_sleep(settings)
|
||||
return None, None
|
||||
return detections, None
|
||||
|
||||
def create_detection_session(self):
|
||||
return OpenCVDetectionSession()
|
||||
|
||||
async def detection_sleep(self, settings: Any):
|
||||
area, threshold, interval, blur = self.parse_settings(settings)
|
||||
# it is safe to block here because gstreamer creates a queue thread
|
||||
await asyncio.sleep(interval / 1000)
|
||||
|
||||
async def detection_event_notified(self, settings: Any):
|
||||
await self.detection_sleep(settings)
|
||||
return await super().detection_event_notified(settings)
|
||||
def convert_to_src_size(point):
|
||||
return point[0] * scale, point[1] * scale
|
||||
mat = np.ndarray((height, width, 1), buffer=buffer, dtype=np.uint8)
|
||||
detections = self.detect(mat, settings, detection_session, (videoFrame.width, videoFrame.height), convert_to_src_size)
|
||||
return detections
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../../tensorflow-lite/src/pipeline
|
||||
@@ -3,9 +3,6 @@ numpy>=1.16.2
|
||||
# pillow for anything not intel linux
|
||||
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
|
||||
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
|
||||
PyGObject>=3.30.4; sys_platform != 'win32'
|
||||
imutils>=0.5.0
|
||||
# not available on armhf
|
||||
av>=10.0.0; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
|
||||
# not available on armhf
|
||||
opencv-python; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
|
||||
opencv-python; sys_platform != 'linux' or platform_machine == 'x86_64'
|
||||
|
||||
4
plugins/pam-diff/package-lock.json
generated
4
plugins/pam-diff/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/pam-diff",
|
||||
"version": "0.0.18",
|
||||
"version": "0.0.20",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/pam-diff",
|
||||
"version": "0.0.18",
|
||||
"version": "0.0.20",
|
||||
"hasInstallScript": true,
|
||||
"dependencies": {
|
||||
"@types/node": "^16.6.1",
|
||||
|
||||
@@ -43,5 +43,5 @@
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.0.18"
|
||||
"version": "0.0.20"
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import { PassThrough, Writable } from 'stream';
|
||||
const { mediaManager } = sdk;
|
||||
|
||||
const defaultDifference = 9;
|
||||
const defaultPercentage = 15;
|
||||
const defaultPercentage = 2;
|
||||
|
||||
interface PamDiffSession {
|
||||
id: string;
|
||||
|
||||
2
plugins/prebuffer-mixin/.vscode/launch.json
vendored
2
plugins/prebuffer-mixin/.vscode/launch.json
vendored
@@ -10,7 +10,7 @@
|
||||
"port": 10081,
|
||||
"request": "attach",
|
||||
"skipFiles": [
|
||||
"**/plugin-remote-worker.*",
|
||||
"**/plugin-console.*",
|
||||
"<node_internals>/**"
|
||||
],
|
||||
"autoAttachChildProcesses": true,
|
||||
|
||||
4
plugins/prebuffer-mixin/package-lock.json
generated
4
plugins/prebuffer-mixin/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/prebuffer-mixin",
|
||||
"version": "0.9.77",
|
||||
"version": "0.9.80",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/prebuffer-mixin",
|
||||
"version": "0.9.77",
|
||||
"version": "0.9.80",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/prebuffer-mixin",
|
||||
"version": "0.9.77",
|
||||
"version": "0.9.80",
|
||||
"description": "Video Stream Rebroadcast, Prebuffer, and Management Plugin for Scrypted.",
|
||||
"author": "Scrypted",
|
||||
"license": "Apache-2.0",
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import { Deferred } from "@scrypted/common/src/deferred";
|
||||
import { Headers, RtspServer } from "@scrypted/common/src/rtsp-server";
|
||||
import fs from 'fs';
|
||||
import { format } from "path";
|
||||
import { Duplex } from "stream";
|
||||
|
||||
// non standard extension that dumps the rtp payload to a file.
|
||||
@@ -28,17 +30,49 @@ export class FileRtspServer extends RtspServer {
|
||||
ws?.end(() => ws?.destroy());
|
||||
}
|
||||
|
||||
write(url: string, requestHeaders: Headers) {
|
||||
this.cleanup();
|
||||
this.segmentBytesWritten = 0;
|
||||
|
||||
async write(url: string, requestHeaders: Headers) {
|
||||
const file = requestHeaders['x-scrypted-rtsp-file'];
|
||||
|
||||
if (!file)
|
||||
return this.respond(400, 'Bad Request', requestHeaders, {});
|
||||
|
||||
const truncate = requestHeaders['x-scrypted-rtsp-file-truncate'];
|
||||
|
||||
// this.writeConsole?.log('RTSP WRITE file', file);
|
||||
this.writeStream = fs.createWriteStream(file);
|
||||
|
||||
// truncation preparation must happen before cleanup.
|
||||
let truncateWriteStream: fs.WriteStream;
|
||||
if (truncate) {
|
||||
try {
|
||||
const d = new Deferred<number>();
|
||||
fs.open(truncate, 'w', (e, fd) => {
|
||||
if (e)
|
||||
d.reject(e);
|
||||
else
|
||||
d.resolve(fd);
|
||||
});
|
||||
const fd = await d.promise;
|
||||
try {
|
||||
await fs.promises.rename(truncate, file);
|
||||
truncateWriteStream = fs.createWriteStream(undefined, {
|
||||
fd,
|
||||
})
|
||||
// this.writeConsole?.log('truncating', truncate);
|
||||
}
|
||||
catch (e) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
this.writeConsole?.error('RTSP WRITE error during truncate file', truncate, e);
|
||||
}
|
||||
}
|
||||
|
||||
// everything after this point must be sync due to cleanup potentially causing dangling state.
|
||||
this.cleanup();
|
||||
this.segmentBytesWritten = 0;
|
||||
|
||||
this.writeStream = truncateWriteStream || fs.createWriteStream(file);
|
||||
this.writeStream.on('error', e => {
|
||||
this.writeConsole?.error('RTSP WRITE error', e);
|
||||
});
|
||||
|
||||
@@ -3,7 +3,7 @@ import { AutoenableMixinProvider } from '@scrypted/common/src/autoenable-mixin-p
|
||||
import { getDebugModeH264EncoderArgs, getH264EncoderArgs } from '@scrypted/common/src/ffmpeg-hardware-acceleration';
|
||||
import { addVideoFilterArguments } from '@scrypted/common/src/ffmpeg-helpers';
|
||||
import { handleRebroadcasterClient, ParserOptions, ParserSession, startParserSession } from '@scrypted/common/src/ffmpeg-rebroadcast';
|
||||
import { closeQuiet, listenZeroSingleClient } from '@scrypted/common/src/listen-cluster';
|
||||
import { closeQuiet, listenZeroSingleClient, ListenZeroSingleClientTimeoutError } from '@scrypted/common/src/listen-cluster';
|
||||
import { readLength } from '@scrypted/common/src/read-stream';
|
||||
import { createRtspParser, findH264NaluType, getNaluTypes, H264_NAL_TYPE_FU_B, H264_NAL_TYPE_IDR, H264_NAL_TYPE_MTAP16, H264_NAL_TYPE_MTAP32, H264_NAL_TYPE_RESERVED0, H264_NAL_TYPE_RESERVED30, H264_NAL_TYPE_RESERVED31, H264_NAL_TYPE_SEI, H264_NAL_TYPE_STAP_B, listenSingleRtspClient, RtspServer, RtspTrack } from '@scrypted/common/src/rtsp-server';
|
||||
import { addTrackControls, parseSdp } from '@scrypted/common/src/sdp-utils';
|
||||
@@ -946,23 +946,35 @@ class PrebufferSession {
|
||||
const { isActiveClient, container, session, socketPromise, requestedPrebuffer } = options;
|
||||
this.console.log('sending prebuffer', requestedPrebuffer);
|
||||
|
||||
// in case the client never connects, do an inactivity check.
|
||||
socketPromise.catch(() => this.inactivityCheck(session, false));
|
||||
socketPromise.then(socket => {
|
||||
let socket: Duplex;
|
||||
|
||||
try {
|
||||
socket = await socketPromise;
|
||||
}
|
||||
catch (e) {
|
||||
// in case the client never connects, do an inactivity check.
|
||||
this.inactivityCheck(session, false);
|
||||
if (e instanceof ListenZeroSingleClientTimeoutError)
|
||||
this.console.warn('client connection timed out');
|
||||
else
|
||||
this.console.error('client connection error', e);
|
||||
return;
|
||||
}
|
||||
|
||||
if (isActiveClient) {
|
||||
this.activeClients++;
|
||||
this.printActiveClients();
|
||||
}
|
||||
|
||||
socket.once('close', () => {
|
||||
if (isActiveClient) {
|
||||
this.activeClients++;
|
||||
this.activeClients--;
|
||||
this.printActiveClients();
|
||||
}
|
||||
socket.once('close', () => {
|
||||
if (isActiveClient) {
|
||||
this.activeClients--;
|
||||
this.printActiveClients();
|
||||
}
|
||||
this.inactivityCheck(session, isActiveClient);
|
||||
})
|
||||
this.inactivityCheck(session, isActiveClient);
|
||||
});
|
||||
|
||||
handleRebroadcasterClient(socketPromise, {
|
||||
handleRebroadcasterClient(socket, {
|
||||
// console: this.console,
|
||||
connect: (connection) => {
|
||||
const now = Date.now();
|
||||
@@ -1138,7 +1150,7 @@ class PrebufferSession {
|
||||
}
|
||||
// server.console = this.console;
|
||||
await server.handlePlayback();
|
||||
server.handleTeardown().finally(() => server.client.destroy());
|
||||
server.handleTeardown().catch(() => {}).finally(() => server.client.destroy());
|
||||
for (const track of Object.values(server.setupTracks)) {
|
||||
if (track.protocol === 'udp') {
|
||||
serverPortMap.set(track.codec, track);
|
||||
|
||||
@@ -111,7 +111,7 @@ export function createStreamSettings(device: MixinDeviceBase<VideoCamera>) {
|
||||
placeholder: '-hwaccel auto',
|
||||
choices: Object.keys(getH264DecoderArgs()),
|
||||
combobox: true,
|
||||
mapPut: (oldValue, newValue) => getH264DecoderArgs()[newValue]?.join(' ') || newValue,
|
||||
mapPut: (oldValue, newValue) => getH264DecoderArgs()[newValue]?.join(' ') || newValue || '',
|
||||
hide: true,
|
||||
},
|
||||
videoFilterArguments: {
|
||||
|
||||
4
plugins/python-codecs/package-lock.json
generated
4
plugins/python-codecs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/python-codecs",
|
||||
"version": "0.1.19",
|
||||
"version": "0.1.30",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/python-codecs",
|
||||
"version": "0.1.19",
|
||||
"version": "0.1.30",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/python-codecs",
|
||||
"version": "0.1.19",
|
||||
"version": "0.1.30",
|
||||
"description": "Python Codecs for Scrypted",
|
||||
"keywords": [
|
||||
"scrypted",
|
||||
|
||||
@@ -27,6 +27,8 @@ async def generateVideoFramesLibav(mediaObject: scrypted_sdk.MediaObject, option
|
||||
# stream.codec_context.options['-analyzeduration'] = '0'
|
||||
# stream.codec_context.options['-probesize'] = '500000'
|
||||
|
||||
gray = options and options.get('format') == 'gray'
|
||||
|
||||
start = 0
|
||||
try:
|
||||
for idx, frame in enumerate(container.decode(stream)):
|
||||
@@ -39,7 +41,12 @@ async def generateVideoFramesLibav(mediaObject: scrypted_sdk.MediaObject, option
|
||||
continue
|
||||
# print(frame)
|
||||
if vipsimage.pyvips:
|
||||
vips = vipsimage.pyvips.Image.new_from_array(frame.to_ndarray(format='rgb24'))
|
||||
if gray and frame.format.name.startswith('yuv') and frame.planes and len(frame.planes):
|
||||
vips = vipsimage.new_from_memory(memoryview(frame.planes[0]), frame.width, frame.height, 1)
|
||||
elif gray:
|
||||
vips = vipsimage.pyvips.Image.new_from_array(frame.to_ndarray(format='gray'))
|
||||
else:
|
||||
vips = vipsimage.pyvips.Image.new_from_array(frame.to_ndarray(format='rgb24'))
|
||||
vipsImage = vipsimage.VipsImage(vips)
|
||||
try:
|
||||
mo = await vipsimage.createVipsMediaObject(vipsImage)
|
||||
@@ -48,7 +55,16 @@ async def generateVideoFramesLibav(mediaObject: scrypted_sdk.MediaObject, option
|
||||
vipsImage.vipsImage = None
|
||||
vips.invalidate()
|
||||
else:
|
||||
pil = frame.to_image()
|
||||
if gray and frame.format.name.startswith('yuv') and frame.planes and len(frame.planes):
|
||||
pil = pilimage.new_from_memory(memoryview(frame.planes[0]), frame.width, frame.height, 1)
|
||||
elif gray:
|
||||
rgb = frame.to_image()
|
||||
try:
|
||||
pil = rgb.convert('L')
|
||||
finally:
|
||||
rgb.close()
|
||||
else:
|
||||
pil = frame.to_image()
|
||||
pilImage = pilimage.PILImage(pil)
|
||||
try:
|
||||
mo = await pilimage.createPILMediaObject(pilImage)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import traceback
|
||||
import asyncio
|
||||
import scrypted_sdk
|
||||
from scrypted_sdk import Setting, SettingValue
|
||||
@@ -6,6 +7,7 @@ import gstreamer
|
||||
import libav
|
||||
import vipsimage
|
||||
import pilimage
|
||||
import time
|
||||
|
||||
Gst = None
|
||||
try:
|
||||
@@ -128,19 +130,29 @@ def create_scrypted_plugin():
|
||||
|
||||
class CodecFork:
|
||||
async def generateVideoFramesGstreamer(self, mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None, h264Decoder: str = None) -> scrypted_sdk.VideoFrame:
|
||||
start = time.time()
|
||||
try:
|
||||
async for data in gstreamer.generateVideoFramesGstreamer(mediaObject, options, filter, h264Decoder):
|
||||
yield data
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
raise
|
||||
finally:
|
||||
print('gstreamer finished after %s' % (time.time() - start))
|
||||
import os
|
||||
os._exit(os.EX_OK)
|
||||
pass
|
||||
|
||||
async def generateVideoFramesLibav(self, mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
|
||||
start = time.time()
|
||||
try:
|
||||
async for data in libav.generateVideoFramesLibav(mediaObject, options, filter):
|
||||
yield data
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
raise
|
||||
finally:
|
||||
print('libav finished after %s' % (time.time() - start))
|
||||
import os
|
||||
os._exit(os.EX_OK)
|
||||
pass
|
||||
|
||||
@@ -21,19 +21,27 @@ class PILImage(scrypted_sdk.VideoFrame):
|
||||
|
||||
if not options or not options.get('format', None):
|
||||
def format():
|
||||
bytesArray = io.BytesIO()
|
||||
pilImage.pilImage.save(bytesArray, format='JPEG')
|
||||
return bytesArray.getvalue()
|
||||
return pilImage.pilImage.tobytes()
|
||||
return await to_thread(format)
|
||||
elif options['format'] == 'rgb':
|
||||
def format():
|
||||
rgb = pilImage.pilImage
|
||||
if rgb.format == 'RGBA':
|
||||
rgb = rgb.convert('RGB')
|
||||
return rgb.tobytes()
|
||||
rgbx = pilImage.pilImage
|
||||
if rgbx.mode != 'RGBA':
|
||||
return rgbx.tobytes()
|
||||
rgb = rgbx.convert('RGB')
|
||||
try:
|
||||
return rgb.tobytes()
|
||||
finally:
|
||||
rgb.close()
|
||||
return await to_thread(format)
|
||||
|
||||
return await to_thread(lambda: pilImage.pilImage.write_to_buffer('.' + options['format']))
|
||||
def save():
|
||||
bytesArray = io.BytesIO()
|
||||
pilImage.pilImage.save(bytesArray, format='JPEG')
|
||||
# pilImage.pilImage.save(bytesArray, format=options['format'])
|
||||
return bytesArray.getvalue()
|
||||
|
||||
return await to_thread(lambda: save())
|
||||
|
||||
async def toPILImage(self, options: scrypted_sdk.ImageOptions = None):
|
||||
return await to_thread(lambda: toPILImage(self, options))
|
||||
@@ -66,7 +74,7 @@ def toPILImage(pilImageWrapper: PILImage, options: scrypted_sdk.ImageOptions = N
|
||||
if not width:
|
||||
width = pilImage.width * yscale
|
||||
|
||||
pilImage = pilImage.resize((width, height), resample=Image.Resampling.BILINEAR)
|
||||
pilImage = pilImage.resize((width, height), resample=Image.BILINEAR)
|
||||
|
||||
return PILImage(pilImage)
|
||||
|
||||
@@ -89,6 +97,7 @@ class ImageReader(scrypted_sdk.ScryptedDeviceBase, scrypted_sdk.BufferConverter)
|
||||
|
||||
async def convert(self, data: Any, fromMimeType: str, toMimeType: str, options: scrypted_sdk.MediaObjectOptions = None) -> Any:
|
||||
pil = Image.open(io.BytesIO(data))
|
||||
pil.load()
|
||||
return await createPILMediaObject(PILImage(pil))
|
||||
|
||||
class ImageWriter(scrypted_sdk.ScryptedDeviceBase, scrypted_sdk.BufferConverter):
|
||||
|
||||
@@ -1,7 +1,14 @@
|
||||
# plugin
|
||||
# needed by libav to_ndarray
|
||||
numpy>=1.16.2
|
||||
|
||||
# gobject instrospection for gstreamer.
|
||||
PyGObject>=3.30.4; sys_platform != 'win32'
|
||||
|
||||
# libav doesnt work on arm7
|
||||
av>=10.0.0; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
|
||||
|
||||
# pyvips is not available on windows, and is preinstalled as part of the installer scripts on
|
||||
# mac and linux.
|
||||
pyvips; sys_platform != 'win32'
|
||||
|
||||
# in case pyvips fails to load, use a pillow fallback.
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import asyncio
|
||||
from typing import Any
|
||||
import concurrent.futures
|
||||
|
||||
# vips is already multithreaded, but needs to be kicked off the python asyncio thread.
|
||||
|
||||
@@ -6,7 +6,6 @@ try:
|
||||
except:
|
||||
Image = None
|
||||
pyvips = None
|
||||
pass
|
||||
from thread import to_thread
|
||||
|
||||
class VipsImage(scrypted_sdk.VideoFrame):
|
||||
@@ -32,6 +31,10 @@ class VipsImage(scrypted_sdk.VideoFrame):
|
||||
mem = memoryview(rgb.write_to_memory())
|
||||
return mem
|
||||
return await to_thread(format)
|
||||
elif options['format'] == 'gray':
|
||||
def format():
|
||||
return memoryview(vipsImage.vipsImage.write_to_memory())
|
||||
return await to_thread(format)
|
||||
|
||||
return await to_thread(lambda: vipsImage.vipsImage.write_to_buffer('.' + options['format']))
|
||||
|
||||
|
||||
4
plugins/reolink/package-lock.json
generated
4
plugins/reolink/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/reolink",
|
||||
"version": "0.0.17",
|
||||
"version": "0.0.19",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/reolink",
|
||||
"version": "0.0.17",
|
||||
"version": "0.0.19",
|
||||
"license": "Apache",
|
||||
"dependencies": {
|
||||
"@koush/axios-digest-auth": "^0.8.5",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/reolink",
|
||||
"version": "0.0.17",
|
||||
"version": "0.0.19",
|
||||
"description": "Reolink Plugin for Scrypted",
|
||||
"author": "Scrypted",
|
||||
"license": "Apache",
|
||||
|
||||
@@ -115,6 +115,20 @@ class ReolinkCamera extends RtspSmartCamera implements Camera {
|
||||
});
|
||||
}
|
||||
|
||||
// rough guesses for rebroadcast stream selection.
|
||||
ret[0].video = {
|
||||
width: 2560,
|
||||
height: 1920,
|
||||
}
|
||||
ret[1].video = {
|
||||
width: 896,
|
||||
height: 672,
|
||||
}
|
||||
ret[2].video = {
|
||||
width: 640,
|
||||
height: 480,
|
||||
}
|
||||
|
||||
const channel = (this.getRtspChannel() + 1).toString().padStart(2, '0');
|
||||
const rtspPreviews = [
|
||||
`h264Preview_${channel}_main`,
|
||||
|
||||
2
plugins/ring/.vscode/settings.json
vendored
2
plugins/ring/.vscode/settings.json
vendored
@@ -1,4 +1,4 @@
|
||||
|
||||
{
|
||||
"scrypted.debugHost": "koushik-ubuntu",
|
||||
"scrypted.debugHost": "127.0.0.1",
|
||||
}
|
||||
4
plugins/ring/package-lock.json
generated
4
plugins/ring/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/ring",
|
||||
"version": "0.0.106",
|
||||
"version": "0.0.107",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/ring",
|
||||
"version": "0.0.106",
|
||||
"version": "0.0.107",
|
||||
"dependencies": {
|
||||
"@koush/ring-client-api": "file:../../external/ring-client-api",
|
||||
"@scrypted/common": "file:../../common",
|
||||
|
||||
@@ -44,5 +44,5 @@
|
||||
"got": "11.8.6",
|
||||
"socket.io-client": "^2.5.0"
|
||||
},
|
||||
"version": "0.0.106"
|
||||
"version": "0.0.107"
|
||||
}
|
||||
|
||||
@@ -702,7 +702,6 @@ export class RingCameraDevice extends ScryptedDeviceBase implements DeviceProvid
|
||||
}
|
||||
|
||||
async getVideoClips(options?: VideoClipOptions): Promise<VideoClip[]> {
|
||||
this.videoClips = new Map<string, VideoClip>;
|
||||
const response = await this.camera.videoSearch({
|
||||
dateFrom: options.startTime,
|
||||
dateTo: options.endTime,
|
||||
|
||||
Submodule plugins/sample-cameraprovider updated: 24ee252e63...3b0721d898
@@ -1,118 +1,21 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from asyncio.events import AbstractEventLoop, TimerHandle
|
||||
from asyncio.futures import Future
|
||||
from typing import Any, Mapping, Tuple
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from pipeline import GstPipeline, GstPipelineBase, create_pipeline_sink, safe_set_result
|
||||
import scrypted_sdk
|
||||
import json
|
||||
import asyncio
|
||||
import time
|
||||
import os
|
||||
import binascii
|
||||
from urllib.parse import urlparse
|
||||
import threading
|
||||
from pipeline import run_pipeline
|
||||
import platform
|
||||
from .corohelper import run_coro_threadsafe
|
||||
from PIL import Image
|
||||
import math
|
||||
import io
|
||||
from typing import Any, Tuple
|
||||
|
||||
Gst = None
|
||||
try:
|
||||
from gi.repository import Gst
|
||||
except:
|
||||
pass
|
||||
|
||||
av = None
|
||||
try:
|
||||
import av
|
||||
av.logging.set_level(av.logging.PANIC)
|
||||
except:
|
||||
pass
|
||||
|
||||
from scrypted_sdk.types import ObjectDetectionGeneratorSession, ObjectDetectionModel, Setting, FFmpegInput, MediaObject, ObjectDetection, ObjectDetectionCallbacks, ObjectDetectionSession, ObjectsDetected, ScryptedInterface, ScryptedMimeTypes
|
||||
|
||||
def optional_chain(root, *keys):
|
||||
result = root
|
||||
for k in keys:
|
||||
if isinstance(result, dict):
|
||||
result = result.get(k, None)
|
||||
else:
|
||||
result = getattr(result, k, None)
|
||||
if result is None:
|
||||
break
|
||||
return result
|
||||
|
||||
|
||||
class DetectionSession:
|
||||
id: str
|
||||
timerHandle: TimerHandle
|
||||
future: Future
|
||||
loop: AbstractEventLoop
|
||||
settings: Any
|
||||
running: bool
|
||||
plugin: DetectPlugin
|
||||
callbacks: ObjectDetectionCallbacks
|
||||
user_callback: Any
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.timerHandle = None
|
||||
self.future = Future()
|
||||
self.running = False
|
||||
self.mutex = threading.Lock()
|
||||
self.last_sample = time.time()
|
||||
self.user_callback = None
|
||||
|
||||
def clearTimeoutLocked(self):
|
||||
if self.timerHandle:
|
||||
self.timerHandle.cancel()
|
||||
self.timerHandle = None
|
||||
|
||||
def clearTimeout(self):
|
||||
with self.mutex:
|
||||
self.clearTimeoutLocked()
|
||||
|
||||
def timedOut(self):
|
||||
self.plugin.end_session(self)
|
||||
|
||||
def setTimeout(self, duration: float):
|
||||
with self.mutex:
|
||||
self.clearTimeoutLocked()
|
||||
self.timerHandle = self.loop.call_later(
|
||||
duration, lambda: self.timedOut())
|
||||
|
||||
|
||||
class DetectionSink(TypedDict):
|
||||
pipeline: str
|
||||
input_size: Tuple[float, float]
|
||||
import scrypted_sdk
|
||||
from scrypted_sdk.types import (MediaObject, ObjectDetection,
|
||||
ObjectDetectionCallbacks,
|
||||
ObjectDetectionGeneratorSession,
|
||||
ObjectDetectionModel, ObjectDetectionSession,
|
||||
ObjectsDetected, ScryptedMimeTypes, Setting)
|
||||
|
||||
|
||||
class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
|
||||
def __init__(self, nativeId: str | None = None):
|
||||
super().__init__(nativeId=nativeId)
|
||||
self.detection_sessions: Mapping[str, DetectionSession] = {}
|
||||
self.session_mutex = threading.Lock()
|
||||
self.crop = False
|
||||
self.loop = asyncio.get_event_loop()
|
||||
|
||||
async def getSettings(self) -> list[Setting]:
|
||||
activeSessions: Setting = {
|
||||
'key': 'activeSessions',
|
||||
'readonly': True,
|
||||
'title': 'Active Detection Sessions',
|
||||
'value': len(self.detection_sessions),
|
||||
}
|
||||
return [
|
||||
activeSessions
|
||||
]
|
||||
|
||||
async def putSetting(self, key: str, value: scrypted_sdk.SettingValue) -> None:
|
||||
pass
|
||||
|
||||
def getClasses(self) -> list[str]:
|
||||
pass
|
||||
|
||||
@@ -138,165 +41,21 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
|
||||
'settings': [],
|
||||
}
|
||||
|
||||
decoderSetting: Setting = {
|
||||
'title': "Decoder",
|
||||
'description': "The tool used to decode the stream. The may be libav or a gstreamer element.",
|
||||
'combobox': True,
|
||||
'value': 'Default',
|
||||
'placeholder': 'Default',
|
||||
'key': 'decoder',
|
||||
'subgroup': 'Advanced',
|
||||
'choices': [
|
||||
'Default',
|
||||
'libav',
|
||||
'decodebin',
|
||||
'vtdec_hw',
|
||||
'nvh264dec',
|
||||
'vaapih264dec',
|
||||
],
|
||||
}
|
||||
|
||||
d['settings'] += self.getModelSettings(settings)
|
||||
d['settings'].append(decoderSetting)
|
||||
|
||||
return d
|
||||
|
||||
async def detection_event(self, detection_session: DetectionSession, detection_result: ObjectsDetected, redetect: Any = None, mediaObject = None):
|
||||
if not detection_session.running and detection_result.get('running'):
|
||||
return
|
||||
|
||||
detection_result['timestamp'] = int(time.time() * 1000)
|
||||
if detection_session.callbacks:
|
||||
if detection_session.running:
|
||||
return await detection_session.callbacks.onDetection(detection_result, redetect, mediaObject)
|
||||
else:
|
||||
await detection_session.callbacks.onDetectionEnded(detection_result)
|
||||
else:
|
||||
# legacy path, nuke this pattern in opencv, pam diff, and full tensorflow.
|
||||
detection_result['detectionId'] = detection_session.id
|
||||
await self.onDeviceEvent(ScryptedInterface.ObjectDetection.value, detection_result)
|
||||
|
||||
def end_session(self, detection_session: DetectionSession):
|
||||
print('detection ended', detection_session.id)
|
||||
|
||||
detection_session.clearTimeout()
|
||||
# leave detection_session.running as True to avoid race conditions.
|
||||
# the removal from detection_sessions will restart it.
|
||||
safe_set_result(detection_session.loop, detection_session.future)
|
||||
with self.session_mutex:
|
||||
self.detection_sessions.pop(detection_session.id, None)
|
||||
|
||||
detection_result: ObjectsDetected = {}
|
||||
detection_result['running'] = False
|
||||
|
||||
asyncio.run_coroutine_threadsafe(self.detection_event(detection_session, detection_result), loop=detection_session.loop)
|
||||
|
||||
def create_detection_result_status(self, detection_id: str, running: bool):
|
||||
detection_result: ObjectsDetected = {}
|
||||
detection_result['detectionId'] = detection_id
|
||||
detection_result['running'] = running
|
||||
detection_result['timestamp'] = int(time.time() * 1000)
|
||||
return detection_result
|
||||
|
||||
def run_detection_jpeg(self, detection_session: DetectionSession, image_bytes: bytes, settings: Any) -> ObjectsDetected:
|
||||
pass
|
||||
|
||||
def get_detection_input_size(self, src_size):
|
||||
pass
|
||||
|
||||
def create_detection_session(self):
|
||||
return DetectionSession()
|
||||
|
||||
def run_detection_gstsample(self, detection_session: DetectionSession, gst_sample, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
|
||||
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, detection_session: ObjectDetectionSession) -> ObjectsDetected:
|
||||
pass
|
||||
|
||||
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, detection_session: DetectionSession) -> ObjectsDetected:
|
||||
pass
|
||||
|
||||
async def run_detection_avframe(self, detection_session: DetectionSession, avframe, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
|
||||
pil: Image.Image = avframe.to_image()
|
||||
return await self.run_detection_image(detection_session, pil, settings, src_size, convert_to_src_size)
|
||||
|
||||
async def run_detection_image(self, detection_session: DetectionSession, image: Image.Image, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
|
||||
pass
|
||||
|
||||
def run_detection_crop(self, detection_session: DetectionSession, sample: Any, settings: Any, src_size, convert_to_src_size, bounding_box: Tuple[float, float, float, float]) -> ObjectsDetected:
|
||||
print("not implemented")
|
||||
pass
|
||||
|
||||
def ensure_session(self, mediaObjectMimeType: str, session: ObjectDetectionSession) -> Tuple[bool, DetectionSession, ObjectsDetected]:
|
||||
settings = None
|
||||
duration = None
|
||||
detection_id = None
|
||||
detection_session = None
|
||||
|
||||
if session:
|
||||
detection_id = session.get('detectionId', None)
|
||||
duration = session.get('duration', None)
|
||||
settings = session.get('settings', None)
|
||||
|
||||
is_image = mediaObjectMimeType and mediaObjectMimeType.startswith(
|
||||
'image/')
|
||||
|
||||
ending = False
|
||||
new_session = False
|
||||
with self.session_mutex:
|
||||
if not is_image and not detection_id:
|
||||
detection_id = binascii.b2a_hex(os.urandom(15)).decode('utf8')
|
||||
|
||||
if detection_id:
|
||||
detection_session = self.detection_sessions.get(
|
||||
detection_id, None)
|
||||
|
||||
if duration == None and not is_image:
|
||||
ending = True
|
||||
elif detection_id and not detection_session:
|
||||
if not mediaObjectMimeType:
|
||||
return (False, None, self.create_detection_result_status(detection_id, False))
|
||||
|
||||
new_session = True
|
||||
detection_session = self.create_detection_session()
|
||||
detection_session.plugin = self
|
||||
detection_session.id = detection_id
|
||||
detection_session.settings = settings
|
||||
loop = asyncio.get_event_loop()
|
||||
detection_session.loop = loop
|
||||
self.detection_sessions[detection_id] = detection_session
|
||||
|
||||
detection_session.future.add_done_callback(
|
||||
lambda _: self.end_session(detection_session))
|
||||
|
||||
if not ending and detection_session and time.time() - detection_session.last_sample > 30 and not mediaObjectMimeType:
|
||||
print('detection session has not received a sample in 30 seconds, terminating',
|
||||
detection_session.id)
|
||||
ending = True
|
||||
|
||||
if ending:
|
||||
if detection_session:
|
||||
self.end_session(detection_session)
|
||||
return (False, None, self.create_detection_result_status(detection_id, False))
|
||||
|
||||
if is_image:
|
||||
return (False, detection_session, None)
|
||||
|
||||
detection_session.setTimeout(duration / 1000)
|
||||
if settings != None:
|
||||
detection_session.settings = settings
|
||||
|
||||
if not new_session:
|
||||
print("existing session", detection_session.id)
|
||||
return (False, detection_session, self.create_detection_result_status(detection_id, detection_session.running))
|
||||
|
||||
return (True, detection_session, None)
|
||||
|
||||
async def generateObjectDetections(self, videoFrames: Any, session: ObjectDetectionGeneratorSession = None) -> Any:
|
||||
try:
|
||||
videoFrames = await scrypted_sdk.sdk.connectRPCObject(videoFrames)
|
||||
detection_session = self.create_detection_session()
|
||||
detection_session.plugin = self
|
||||
detection_session.settings = session and session.get('settings')
|
||||
async for videoFrame in videoFrames:
|
||||
detected = await self.run_detection_videoframe(videoFrame, detection_session)
|
||||
detected = await self.run_detection_videoframe(videoFrame, session)
|
||||
yield {
|
||||
'__json_copy_serialize_children': True,
|
||||
'detected': detected,
|
||||
@@ -309,261 +68,10 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
|
||||
pass
|
||||
|
||||
async def detectObjects(self, mediaObject: MediaObject, session: ObjectDetectionSession = None, callbacks: ObjectDetectionCallbacks = None) -> ObjectsDetected:
|
||||
is_image = mediaObject and (mediaObject.mimeType.startswith('image/') or mediaObject.mimeType.endswith('/x-raw-image'))
|
||||
|
||||
settings = None
|
||||
duration = None
|
||||
|
||||
if session:
|
||||
duration = session.get('duration', None)
|
||||
settings = session.get('settings', None)
|
||||
|
||||
vf: scrypted_sdk.VideoFrame
|
||||
if mediaObject and mediaObject.mimeType == ScryptedMimeTypes.Image.value:
|
||||
vf: scrypted_sdk.VideoFrame = mediaObject
|
||||
return await self.run_detection_videoframe(vf, settings)
|
||||
vf = mediaObject
|
||||
else:
|
||||
vf = await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, ScryptedMimeTypes.Image.value)
|
||||
|
||||
create, detection_session, objects_detected = self.ensure_session(
|
||||
mediaObject and mediaObject.mimeType, session)
|
||||
if detection_session:
|
||||
detection_session.callbacks = callbacks
|
||||
|
||||
if is_image:
|
||||
stream = io.BytesIO(bytes(await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, 'image/jpeg')))
|
||||
image = Image.open(stream)
|
||||
if detection_session:
|
||||
if not detection_session.user_callback:
|
||||
detection_session.user_callback = self.create_user_callback(self.run_detection_image, detection_session, duration)
|
||||
def convert_to_src_size(point, normalize = False):
|
||||
x, y = point
|
||||
return (int(math.ceil(x)), int(math.ceil(y)), True)
|
||||
|
||||
detection_session.running = True
|
||||
try:
|
||||
return await detection_session.user_callback(image, image.size, convert_to_src_size)
|
||||
finally:
|
||||
detection_session.running = False
|
||||
else:
|
||||
return await self.run_detection_jpeg(detection_session, bytes(await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, 'image/jpeg')), settings)
|
||||
|
||||
if not create:
|
||||
# a detection session may have been created, but not started
|
||||
# if the initial request was for an image.
|
||||
# however, attached sessions should be unchoked, as the pipeline
|
||||
# is not managed here.
|
||||
if not detection_session or detection_session.running or not mediaObject:
|
||||
return objects_detected
|
||||
|
||||
detection_id = detection_session.id
|
||||
detection_session.running = True
|
||||
|
||||
print('detection starting', detection_id)
|
||||
b = await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, ScryptedMimeTypes.FFmpegInput.value)
|
||||
s = b.decode('utf8')
|
||||
j: FFmpegInput = json.loads(s)
|
||||
|
||||
container = j.get('container', None)
|
||||
videosrc = j['url']
|
||||
videoCodec = optional_chain(j, 'mediaStreamOptions', 'video', 'codec')
|
||||
|
||||
decoder = settings and settings.get('decoder')
|
||||
if decoder == 'Default':
|
||||
decoder = None
|
||||
if decoder == 'libav' and not av:
|
||||
decoder = None
|
||||
elif decoder != 'libav' and not Gst:
|
||||
decoder = None
|
||||
|
||||
if not decoder:
|
||||
if Gst:
|
||||
if videoCodec == 'h264':
|
||||
# hw acceleration is "safe" to use on mac, but not
|
||||
# on other hosts where it may crash.
|
||||
# defaults must be safe.
|
||||
if platform.system() == 'Darwin':
|
||||
decoder = 'vtdec_hw'
|
||||
else:
|
||||
decoder = 'avdec_h264'
|
||||
else:
|
||||
# decodebin may pick a hardware accelerated decoder, which isn't ideal
|
||||
# so use a known software decoder for h264 and decodebin for anything else.
|
||||
decoder = 'decodebin'
|
||||
elif av:
|
||||
decoder = 'libav'
|
||||
|
||||
if decoder == 'libav':
|
||||
user_callback = self.create_user_callback(self.run_detection_avframe, detection_session, duration)
|
||||
|
||||
async def inference_loop():
|
||||
options = {
|
||||
'analyzeduration': '0',
|
||||
'probesize': '500000',
|
||||
'reorder_queue_size': '0',
|
||||
}
|
||||
container = av.open(videosrc, options = options)
|
||||
stream = container.streams.video[0]
|
||||
|
||||
start = 0
|
||||
for idx, frame in enumerate(container.decode(stream)):
|
||||
if detection_session.future.done():
|
||||
container.close()
|
||||
break
|
||||
now = time.time()
|
||||
if not start:
|
||||
start = now
|
||||
elapsed = now - start
|
||||
if (frame.time or 0) < elapsed - 0.500:
|
||||
# print('too slow, skipping frame')
|
||||
continue
|
||||
# print(frame)
|
||||
size = (frame.width, frame.height)
|
||||
def convert_to_src_size(point, normalize = False):
|
||||
x, y = point
|
||||
return (int(math.ceil(x)), int(math.ceil(y)), True)
|
||||
await user_callback(frame, size, convert_to_src_size)
|
||||
|
||||
def thread_main():
|
||||
loop = asyncio.new_event_loop()
|
||||
loop.run_until_complete(inference_loop())
|
||||
|
||||
thread = threading.Thread(target=thread_main)
|
||||
thread.start()
|
||||
return self.create_detection_result_status(detection_id, True)
|
||||
|
||||
if not Gst:
|
||||
raise Exception('Gstreamer is unavailable')
|
||||
|
||||
if videosrc.startswith('tcp://'):
|
||||
parsed_url = urlparse(videosrc)
|
||||
videosrc = 'tcpclientsrc port=%s host=%s' % (
|
||||
parsed_url.port, parsed_url.hostname)
|
||||
if container == 'mpegts':
|
||||
videosrc += ' ! tsdemux'
|
||||
elif container == 'sdp':
|
||||
videosrc += ' ! sdpdemux'
|
||||
else:
|
||||
raise Exception('unknown container %s' % container)
|
||||
elif videosrc.startswith('rtsp'):
|
||||
videosrc = 'rtspsrc buffer-mode=0 location=%s protocols=tcp latency=0 is-live=false' % videosrc
|
||||
if videoCodec == 'h264':
|
||||
videosrc += ' ! rtph264depay ! h264parse'
|
||||
|
||||
videosrc += " ! %s" % decoder
|
||||
|
||||
width = optional_chain(j, 'mediaStreamOptions',
|
||||
'video', 'width') or 1920
|
||||
height = optional_chain(j, 'mediaStreamOptions',
|
||||
'video', 'height') or 1080
|
||||
src_size = (width, height)
|
||||
|
||||
self.run_pipeline(detection_session, duration, src_size, videosrc)
|
||||
|
||||
return self.create_detection_result_status(detection_id, True)
|
||||
|
||||
def get_pixel_format(self):
|
||||
return 'RGB'
|
||||
|
||||
def create_pipeline_sink(self, src_size) -> DetectionSink:
|
||||
inference_size = self.get_detection_input_size(src_size)
|
||||
ret: DetectionSink = {}
|
||||
|
||||
ret['input_size'] = inference_size
|
||||
ret['pipeline'] = create_pipeline_sink(
|
||||
type(self).__name__, inference_size, self.get_pixel_format())
|
||||
|
||||
return ret
|
||||
|
||||
async def detection_event_notified(self, settings: Any):
|
||||
pass
|
||||
|
||||
async def createMedia(self, data: Any) -> MediaObject:
|
||||
pass
|
||||
|
||||
def invalidateMedia(self, detection_session: DetectionSession, data: Any):
|
||||
pass
|
||||
|
||||
def create_user_callback(self, run_detection: Any, detection_session: DetectionSession, duration: float):
|
||||
first_frame = True
|
||||
|
||||
current_data = None
|
||||
current_src_size = None
|
||||
current_convert_to_src_size = None
|
||||
|
||||
async def redetect(boundingBox: Tuple[float, float, float, float]):
|
||||
nonlocal current_data
|
||||
nonlocal current_src_size
|
||||
nonlocal current_convert_to_src_size
|
||||
if not current_data:
|
||||
raise Exception('no sample')
|
||||
|
||||
detection_result = await self.run_detection_crop(
|
||||
detection_session, current_data, detection_session.settings, current_src_size, current_convert_to_src_size, boundingBox)
|
||||
|
||||
return detection_result['detections']
|
||||
|
||||
async def user_callback(sample, src_size, convert_to_src_size):
|
||||
try:
|
||||
detection_session.last_sample = time.time()
|
||||
|
||||
nonlocal first_frame
|
||||
if first_frame:
|
||||
first_frame = False
|
||||
print("first frame received", detection_session.id)
|
||||
|
||||
detection_result, data = await run_detection(
|
||||
detection_session, sample, detection_session.settings, src_size, convert_to_src_size)
|
||||
if detection_result:
|
||||
detection_result['running'] = True
|
||||
|
||||
mo = None
|
||||
retain = False
|
||||
|
||||
def maybeInvalidate():
|
||||
if not retain:
|
||||
self.invalidateMedia(detection_session, data)
|
||||
# else:
|
||||
# print('retaining')
|
||||
|
||||
mo = await self.createMedia(data)
|
||||
try:
|
||||
nonlocal current_data
|
||||
nonlocal current_src_size
|
||||
nonlocal current_convert_to_src_size
|
||||
try:
|
||||
current_data = data
|
||||
current_src_size = src_size
|
||||
current_convert_to_src_size = convert_to_src_size
|
||||
retain = await run_coro_threadsafe(self.detection_event(detection_session, detection_result, redetect, mo), other_loop=detection_session.loop)
|
||||
finally:
|
||||
current_data = None
|
||||
current_convert_to_src_size = None
|
||||
current_src_size = None
|
||||
maybeInvalidate()
|
||||
except Exception as e:
|
||||
print(e)
|
||||
self.invalidateMedia(detection_session, data)
|
||||
|
||||
# asyncio.run_coroutine_threadsafe(, loop = self.loop).result()
|
||||
await self.detection_event_notified(detection_session.settings)
|
||||
|
||||
if not detection_session or duration == None:
|
||||
safe_set_result(detection_session.loop,
|
||||
detection_session.future)
|
||||
|
||||
return detection_result
|
||||
finally:
|
||||
pass
|
||||
|
||||
return user_callback
|
||||
|
||||
def run_pipeline(self, detection_session: DetectionSession, duration, src_size, video_input):
|
||||
inference_size = self.get_detection_input_size(src_size)
|
||||
|
||||
pipeline = run_pipeline(detection_session.loop, detection_session.future, self.create_user_callback(self.run_detection_gstsample, detection_session, duration),
|
||||
appsink_name=type(self).__name__,
|
||||
appsink_size=inference_size,
|
||||
video_input=video_input,
|
||||
pixel_format=self.get_pixel_format(),
|
||||
crop=self.crop,
|
||||
)
|
||||
task = pipeline.run()
|
||||
asyncio.ensure_future(task)
|
||||
return await self.run_detection_videoframe(vf, session)
|
||||
|
||||
@@ -1,315 +0,0 @@
|
||||
from asyncio.events import AbstractEventLoop
|
||||
from asyncio.futures import Future
|
||||
import threading
|
||||
|
||||
from .safe_set_result import safe_set_result
|
||||
import math
|
||||
import asyncio
|
||||
|
||||
try:
|
||||
import gi
|
||||
gi.require_version('Gst', '1.0')
|
||||
gi.require_version('GstBase', '1.0')
|
||||
|
||||
from gi.repository import GObject, Gst
|
||||
GObject.threads_init()
|
||||
Gst.init(None)
|
||||
except:
|
||||
pass
|
||||
|
||||
class GstPipelineBase:
|
||||
def __init__(self, loop: AbstractEventLoop, finished: Future) -> None:
|
||||
self.loop = loop
|
||||
self.finished = finished
|
||||
self.gst = None
|
||||
|
||||
def attach_launch(self, gst):
|
||||
self.gst = gst
|
||||
|
||||
def parse_launch(self, pipeline: str):
|
||||
self.attach_launch(Gst.parse_launch(pipeline))
|
||||
|
||||
# Set up a pipeline bus watch to catch errors.
|
||||
self.bus = self.gst.get_bus()
|
||||
self.watchId = self.bus.connect('message', self.on_bus_message)
|
||||
self.bus.add_signal_watch()
|
||||
|
||||
def on_bus_message(self, bus, message):
|
||||
# seeing the following error on pi 32 bit
|
||||
# OverflowError: Python int too large to convert to C long
|
||||
t = str(message.type)
|
||||
if t == str(Gst.MessageType.EOS):
|
||||
safe_set_result(self.loop, self.finished)
|
||||
elif t == str(Gst.MessageType.WARNING):
|
||||
err, debug = message.parse_warning()
|
||||
print('Warning: %s: %s\n' % (err, debug))
|
||||
elif t == str(Gst.MessageType.ERROR):
|
||||
err, debug = message.parse_error()
|
||||
print('Error: %s: %s\n' % (err, debug))
|
||||
safe_set_result(self.loop, self.finished)
|
||||
return True
|
||||
|
||||
async def run_attached(self):
|
||||
try:
|
||||
await self.finished
|
||||
except:
|
||||
pass
|
||||
|
||||
async def attach(self):
|
||||
pass
|
||||
|
||||
async def detach(self):
|
||||
pass
|
||||
|
||||
async def run(self):
|
||||
await self.attach()
|
||||
|
||||
# Run pipeline.
|
||||
self.gst.set_state(Gst.State.PLAYING)
|
||||
|
||||
try:
|
||||
await self.run_attached()
|
||||
finally:
|
||||
# Clean up.
|
||||
self.bus.remove_signal_watch()
|
||||
self.bus.disconnect(self.watchId)
|
||||
self.gst.set_state(Gst.State.NULL)
|
||||
self.bus = None
|
||||
self.watchId = None
|
||||
self.gst = None
|
||||
await self.detach()
|
||||
|
||||
|
||||
|
||||
class GstPipeline(GstPipelineBase):
|
||||
def __init__(self, loop: AbstractEventLoop, finished: Future, appsink_name: str, user_callback, crop=False):
|
||||
super().__init__(loop, finished)
|
||||
self.appsink_name = appsink_name
|
||||
self.user_callback = user_callback
|
||||
self.running = False
|
||||
self.gstsample = None
|
||||
self.sink_size = None
|
||||
self.src_size = None
|
||||
self.dst_size = None
|
||||
self.pad_size = None
|
||||
self.scale_size = None
|
||||
self.crop = crop
|
||||
self.condition = None
|
||||
|
||||
def attach_launch(self, gst):
|
||||
super().attach_launch(gst)
|
||||
|
||||
appsink = self.gst.get_by_name(self.appsink_name)
|
||||
appsink.connect('new-preroll', self.on_new_sample, True)
|
||||
appsink.connect('new-sample', self.on_new_sample, False)
|
||||
|
||||
async def attach(self):
|
||||
# Start inference worker.
|
||||
self.running = True
|
||||
worker = threading.Thread(target=self.inference_main)
|
||||
worker.start()
|
||||
while not self.condition:
|
||||
await asyncio.sleep(.1)
|
||||
|
||||
async def detach(self):
|
||||
async def notifier():
|
||||
async with self.condition:
|
||||
self.condition.notify_all()
|
||||
self.running = False
|
||||
asyncio.run_coroutine_threadsafe(notifier(), loop = self.selfLoop)
|
||||
|
||||
def on_new_sample(self, sink, preroll):
|
||||
sample = sink.emit('pull-preroll' if preroll else 'pull-sample')
|
||||
if not self.sink_size:
|
||||
s = sample.get_caps().get_structure(0)
|
||||
self.sink_size = (s.get_value('width'), s.get_value('height'))
|
||||
self.gstsample = sample
|
||||
async def notifier():
|
||||
async with self.condition:
|
||||
self.condition.notify_all()
|
||||
try:
|
||||
if self.running:
|
||||
asyncio.run_coroutine_threadsafe(notifier(), loop = self.selfLoop).result()
|
||||
except Exception as e:
|
||||
# now what?
|
||||
# print('sample error')
|
||||
# print(e)
|
||||
pass
|
||||
return Gst.FlowReturn.OK
|
||||
|
||||
def get_src_size(self):
|
||||
if not self.src_size:
|
||||
videoconvert = self.gst.get_by_name('videoconvert')
|
||||
structure = videoconvert.srcpads[0].get_current_caps(
|
||||
).get_structure(0)
|
||||
_, w = structure.get_int('width')
|
||||
_, h = structure.get_int('height')
|
||||
self.src_size = (w, h)
|
||||
|
||||
videoscale = self.gst.get_by_name('videoscale')
|
||||
structure = videoscale.srcpads[0].get_current_caps(
|
||||
).get_structure(0)
|
||||
_, w = structure.get_int('width')
|
||||
_, h = structure.get_int('height')
|
||||
self.dst_size = (w, h)
|
||||
|
||||
appsink = self.gst.get_by_name(self.appsink_name)
|
||||
structure = appsink.sinkpads[0].get_current_caps().get_structure(0)
|
||||
_, w = structure.get_int('width')
|
||||
_, h = structure.get_int('height')
|
||||
self.dst_size = (w, h)
|
||||
|
||||
# the dimension with the higher scale value got cropped or boxed.
|
||||
# use the other dimension to figure out the crop/box amount.
|
||||
scales = (self.dst_size[0] / self.src_size[0],
|
||||
self.dst_size[1] / self.src_size[1])
|
||||
if self.crop:
|
||||
scale = max(scales[0], scales[1])
|
||||
else:
|
||||
scale = min(scales[0], scales[1])
|
||||
self.scale_size = scale
|
||||
|
||||
dx = self.src_size[0] * scale
|
||||
dy = self.src_size[1] * scale
|
||||
|
||||
px = math.ceil((self.dst_size[0] - dx) / 2)
|
||||
py = math.ceil((self.dst_size[1] - dy) / 2)
|
||||
|
||||
self.pad_size = (px, py)
|
||||
|
||||
return self.src_size
|
||||
|
||||
def convert_to_src_size(self, point, normalize=False):
|
||||
valid = True
|
||||
px, py = self.pad_size
|
||||
x, y = point
|
||||
|
||||
if normalize:
|
||||
x = max(0, x)
|
||||
x = min(x, self.src_size[0] - 1)
|
||||
y = max(0, y)
|
||||
y = min(y, self.src_size[1] - 1)
|
||||
|
||||
x = (x - px) / self.scale_size
|
||||
if x < 0:
|
||||
x = 0
|
||||
valid = False
|
||||
if x >= self.src_size[0]:
|
||||
x = self.src_size[0] - 1
|
||||
valid = False
|
||||
|
||||
y = (y - py) / self.scale_size
|
||||
if y < 0:
|
||||
y = 0
|
||||
valid = False
|
||||
if y >= self.src_size[1]:
|
||||
y = self.src_size[1] - 1
|
||||
valid = False
|
||||
|
||||
return (int(math.ceil(x)), int(math.ceil(y)), valid)
|
||||
|
||||
|
||||
def inference_main(self):
|
||||
loop = asyncio.new_event_loop()
|
||||
self.selfLoop = loop
|
||||
try:
|
||||
loop.run_until_complete(self.inference_loop())
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
async def inference_loop(self):
|
||||
self.condition = asyncio.Condition()
|
||||
while self.running:
|
||||
async with self.condition:
|
||||
while not self.gstsample and self.running:
|
||||
await self.condition.wait()
|
||||
if not self.running:
|
||||
return
|
||||
gstsample = self.gstsample
|
||||
self.gstsample = None
|
||||
try:
|
||||
await self.user_callback(gstsample, self.get_src_size(
|
||||
), lambda p, normalize=False: self.convert_to_src_size(p, normalize))
|
||||
except Exception as e:
|
||||
print("callback failure")
|
||||
print(e)
|
||||
raise
|
||||
|
||||
|
||||
def get_dev_board_model():
|
||||
try:
|
||||
model = open('/sys/firmware/devicetree/base/model').read().lower()
|
||||
if 'mx8mq' in model:
|
||||
return 'mx8mq'
|
||||
if 'mt8167' in model:
|
||||
return 'mt8167'
|
||||
except:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def create_pipeline_sink(
|
||||
appsink_name,
|
||||
appsink_size,
|
||||
pixel_format,
|
||||
crop=False):
|
||||
SINK_ELEMENT = 'appsink name={appsink_name} emit-signals=true max-buffers=-1 drop=true sync=false'.format(
|
||||
appsink_name=appsink_name)
|
||||
|
||||
(width, height) = appsink_size
|
||||
|
||||
SINK_CAPS = 'video/x-raw,format={pixel_format}'
|
||||
if width and height:
|
||||
SINK_CAPS += ',width={width},height={height},pixel-aspect-ratio=1/1'
|
||||
|
||||
sink_caps = SINK_CAPS.format(
|
||||
width=width, height=height, pixel_format=pixel_format)
|
||||
pipeline = " {sink_caps} ! {sink_element}".format(
|
||||
sink_caps=sink_caps,
|
||||
sink_element=SINK_ELEMENT)
|
||||
|
||||
return pipeline
|
||||
|
||||
|
||||
def create_pipeline(
|
||||
appsink_name,
|
||||
appsink_size,
|
||||
video_input,
|
||||
pixel_format,
|
||||
crop=False,
|
||||
parse_only=False):
|
||||
if parse_only:
|
||||
sink = 'appsink name={appsink_name} emit-signals=true sync=false'.format(
|
||||
appsink_name=appsink_name)
|
||||
PIPELINE = """ {video_input}
|
||||
! {sink}
|
||||
"""
|
||||
else:
|
||||
sink = create_pipeline_sink(
|
||||
appsink_name, appsink_size, pixel_format, crop=crop)
|
||||
if crop:
|
||||
PIPELINE = """ {video_input} ! queue leaky=downstream max-size-buffers=0 ! videoconvert name=videoconvert ! aspectratiocrop aspect-ratio=1/1 ! videoscale name=videoscale ! queue leaky=downstream max-size-buffers=0
|
||||
! {sink}
|
||||
"""
|
||||
else:
|
||||
PIPELINE = """ {video_input} ! queue leaky=downstream max-size-buffers=0 ! videoconvert name=videoconvert ! videoscale name=videoscale ! queue leaky=downstream max-size-buffers=0
|
||||
! {sink}
|
||||
"""
|
||||
pipeline = PIPELINE.format(video_input=video_input, sink=sink)
|
||||
print('Gstreamer pipeline:\n', pipeline)
|
||||
return pipeline
|
||||
|
||||
|
||||
def run_pipeline(loop, finished,
|
||||
user_callback,
|
||||
appsink_name,
|
||||
appsink_size,
|
||||
video_input,
|
||||
pixel_format,
|
||||
crop=False,
|
||||
parse_only=False):
|
||||
gst = GstPipeline(loop, finished, appsink_name, user_callback, crop=crop)
|
||||
pipeline = create_pipeline(
|
||||
appsink_name, appsink_size, video_input, pixel_format, crop=crop, parse_only=parse_only)
|
||||
gst.parse_launch(pipeline)
|
||||
return gst
|
||||
@@ -1,11 +0,0 @@
|
||||
from asyncio.futures import Future
|
||||
from asyncio import AbstractEventLoop
|
||||
|
||||
def safe_set_result(loop: AbstractEventLoop, future: Future):
|
||||
def loop_set_result():
|
||||
try:
|
||||
if not future.done():
|
||||
future.set_result(None)
|
||||
except:
|
||||
pass
|
||||
loop.call_soon_threadsafe(loop_set_result)
|
||||
@@ -1,37 +1,40 @@
|
||||
from __future__ import annotations
|
||||
from scrypted_sdk.types import ObjectDetectionResult, ObjectsDetected, Setting
|
||||
import io
|
||||
from PIL import Image
|
||||
import re
|
||||
import scrypted_sdk
|
||||
from typing import Any, List, Tuple, Mapping
|
||||
|
||||
import asyncio
|
||||
import time
|
||||
from .rectangle import Rectangle, intersect_area, intersect_rect, to_bounding_box, from_bounding_box, combine_rect
|
||||
import urllib.request
|
||||
import concurrent.futures
|
||||
import os
|
||||
import re
|
||||
import urllib.request
|
||||
from typing import Any, List, Tuple
|
||||
|
||||
from detect import DetectionSession, DetectPlugin
|
||||
import scrypted_sdk
|
||||
from PIL import Image
|
||||
from scrypted_sdk.types import (ObjectDetectionResult, ObjectDetectionSession,
|
||||
ObjectsDetected, Setting)
|
||||
|
||||
from .sort_oh import tracker
|
||||
import numpy as np
|
||||
import traceback
|
||||
from detect import DetectPlugin
|
||||
|
||||
try:
|
||||
from gi.repository import Gst
|
||||
except:
|
||||
pass
|
||||
from .rectangle import (Rectangle, combine_rect, from_bounding_box,
|
||||
intersect_area, intersect_rect, to_bounding_box)
|
||||
|
||||
class PredictSession(DetectionSession):
|
||||
image: Image.Image
|
||||
tracker: sort_oh.tracker.Sort_OH
|
||||
# vips is already multithreaded, but needs to be kicked off the python asyncio thread.
|
||||
toThreadExecutor = concurrent.futures.ThreadPoolExecutor(max_workers=2, thread_name_prefix="image")
|
||||
|
||||
def __init__(self, start_time: float) -> None:
|
||||
super().__init__()
|
||||
self.image = None
|
||||
self.processed = 0
|
||||
self.start_time = start_time
|
||||
self.tracker = None
|
||||
async def to_thread(f):
|
||||
loop = asyncio.get_running_loop()
|
||||
return await loop.run_in_executor(toThreadExecutor, f)
|
||||
|
||||
async def ensureRGBData(data: bytes, size: Tuple[int, int], format: str):
|
||||
if format != 'rgba':
|
||||
return Image.frombuffer('RGB', size, data)
|
||||
|
||||
def convert():
|
||||
rgba = Image.frombuffer('RGBA', size, data)
|
||||
try:
|
||||
return rgba.convert('RGB')
|
||||
finally:
|
||||
rgba.close()
|
||||
return await to_thread(convert)
|
||||
|
||||
def parse_label_contents(contents: str):
|
||||
lines = contents.splitlines()
|
||||
@@ -121,7 +124,6 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
self.toMimeType = scrypted_sdk.ScryptedMimeTypes.MediaObject.value
|
||||
|
||||
self.crop = False
|
||||
self.trackers: Mapping[str, tracker.Sort_OH] = {}
|
||||
|
||||
# periodic restart because there seems to be leaks in tflite or coral API.
|
||||
loop = asyncio.get_event_loop()
|
||||
@@ -148,42 +150,6 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
mo = await scrypted_sdk.mediaManager.createMediaObject(data, self.fromMimeType)
|
||||
return mo
|
||||
|
||||
def end_session(self, detection_session: PredictSession):
|
||||
image = detection_session.image
|
||||
if image:
|
||||
detection_session.image = None
|
||||
image.close()
|
||||
|
||||
dps = detection_session.processed / (time.time() - detection_session.start_time)
|
||||
print("Detections per second %s" % dps)
|
||||
return super().end_session(detection_session)
|
||||
|
||||
def invalidateMedia(self, detection_session: PredictSession, data: RawImage):
|
||||
if not data:
|
||||
return
|
||||
image = data.image
|
||||
data.image = None
|
||||
if image:
|
||||
if not detection_session.image:
|
||||
detection_session.image = image
|
||||
else:
|
||||
image.close()
|
||||
data.jpegMediaObject = None
|
||||
|
||||
async def convert(self, data: RawImage, fromMimeType: str, toMimeType: str, options: scrypted_sdk.BufferConvertorOptions = None) -> Any:
|
||||
mo = data.jpegMediaObject
|
||||
if not mo:
|
||||
image = data.image
|
||||
if not image:
|
||||
raise Exception('data is no longer valid')
|
||||
|
||||
bio = io.BytesIO()
|
||||
image.save(bio, format='JPEG')
|
||||
jpegBytes = bio.getvalue()
|
||||
mo = await scrypted_sdk.mediaManager.createMediaObject(jpegBytes, 'image/jpeg')
|
||||
data.jpegMediaObject = mo
|
||||
return mo
|
||||
|
||||
def requestRestart(self):
|
||||
asyncio.ensure_future(scrypted_sdk.deviceManager.requestRestart())
|
||||
|
||||
@@ -210,23 +176,7 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
],
|
||||
}
|
||||
|
||||
trackerWindow: Setting = {
|
||||
'title': 'Tracker Window',
|
||||
'subgroup': 'Advanced',
|
||||
'description': 'Internal Setting. Do not change.',
|
||||
'key': 'trackerWindow',
|
||||
'value': 3,
|
||||
'type': 'number',
|
||||
}
|
||||
trackerCertainty: Setting = {
|
||||
'title': 'Tracker Certainty',
|
||||
'subgroup': 'Advanced',
|
||||
'description': 'Internal Setting. Do not change.',
|
||||
'key': 'trackerCertainty',
|
||||
'value': .2,
|
||||
'type': 'number',
|
||||
}
|
||||
return [allowList, trackerWindow, trackerCertainty]
|
||||
return [allowList]
|
||||
|
||||
def create_detection_result(self, objs: List[Prediction], size, allowList, convert_to_src_size=None) -> ObjectsDetected:
|
||||
detections: List[ObjectDetectionResult] = []
|
||||
@@ -250,27 +200,15 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
detection_result['detections'] = []
|
||||
for detection in detections:
|
||||
bb = detection['boundingBox']
|
||||
x, y, valid = convert_to_src_size((bb[0], bb[1]), True)
|
||||
x2, y2, valid2 = convert_to_src_size(
|
||||
(bb[0] + bb[2], bb[1] + bb[3]), True)
|
||||
if not valid or not valid2:
|
||||
# print("filtering out", detection['className'])
|
||||
continue
|
||||
x, y = convert_to_src_size((bb[0], bb[1]))
|
||||
x2, y2 = convert_to_src_size(
|
||||
(bb[0] + bb[2], bb[1] + bb[3]))
|
||||
detection['boundingBox'] = (x, y, x2 - x + 1, y2 - y + 1)
|
||||
detection_result['detections'].append(detection)
|
||||
|
||||
# print(detection_result)
|
||||
return detection_result
|
||||
|
||||
async def run_detection_jpeg(self, detection_session: PredictSession, image_bytes: bytes, settings: Any) -> ObjectsDetected:
|
||||
stream = io.BytesIO(image_bytes)
|
||||
image = Image.open(stream)
|
||||
if image.mode == 'RGBA':
|
||||
image = image.convert('RGB')
|
||||
|
||||
detections, _ = await self.run_detection_image(detection_session, image, settings, image.size)
|
||||
return detections
|
||||
|
||||
def get_detection_input_size(self, src_size):
|
||||
# signals to pipeline that any input size is fine
|
||||
# previous code used to resize to correct size and run detection that way.
|
||||
@@ -284,8 +222,8 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss) -> ObjectsDetected:
|
||||
pass
|
||||
|
||||
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, detection_session: PredictSession) -> ObjectsDetected:
|
||||
settings = detection_session and detection_session.settings
|
||||
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, detection_session: ObjectDetectionSession) -> ObjectsDetected:
|
||||
settings = detection_session and detection_session.get('settings')
|
||||
src_size = videoFrame.width, videoFrame.height
|
||||
w, h = self.get_input_size()
|
||||
iw, ih = src_size
|
||||
@@ -293,16 +231,13 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
hs = h / ih
|
||||
s = max(ws, hs)
|
||||
if ws == 1 and hs == 1:
|
||||
def cvss(point, normalize=False):
|
||||
return point[0], point[1], True
|
||||
def cvss(point):
|
||||
return point[0], point[1]
|
||||
|
||||
data = await videoFrame.toBuffer({
|
||||
'format': videoFrame.format or 'rgb',
|
||||
})
|
||||
if videoFrame.format == 'rgba':
|
||||
image = Image.frombuffer('RGBA', (w, h), data).convert('RGB')
|
||||
else:
|
||||
image = Image.frombuffer('RGB', (w, h), data)
|
||||
image = await ensureRGBData(data, (w, h), videoFrame.format)
|
||||
try:
|
||||
ret = await self.detect_once(image, settings, src_size, cvss)
|
||||
return ret
|
||||
@@ -347,19 +282,15 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
})
|
||||
)
|
||||
|
||||
if videoFrame.format == 'rgba':
|
||||
first = Image.frombuffer('RGBA', (w, h), firstData).convert('RGB')
|
||||
else:
|
||||
first = Image.frombuffer('RGB', (w, h), firstData)
|
||||
if videoFrame.format == 'rgba':
|
||||
second = Image.frombuffer('RGBA', (w, h), secondData).convert('RGB')
|
||||
else:
|
||||
second = Image.frombuffer('RGB', (w, h), secondData)
|
||||
first, second = await asyncio.gather(
|
||||
ensureRGBData(firstData, (w, h), videoFrame.format),
|
||||
ensureRGBData(secondData, (w, h), videoFrame.format)
|
||||
)
|
||||
|
||||
def cvss1(point, normalize=False):
|
||||
return point[0] / s, point[1] / s, True
|
||||
def cvss2(point, normalize=False):
|
||||
return point[0] / s + ow, point[1] / s + oh, True
|
||||
def cvss1(point):
|
||||
return point[0] / s, point[1] / s
|
||||
def cvss2(point):
|
||||
return point[0] / s + ow, point[1] / s + oh
|
||||
|
||||
ret1 = await self.detect_once(first, settings, src_size, cvss1)
|
||||
first.close()
|
||||
@@ -395,242 +326,3 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
ret = ret1
|
||||
ret['detections'] = dedupe_detections(ret1['detections'] + ret2['detections'], is_same_detection=is_same_detection_middle)
|
||||
return ret
|
||||
|
||||
async def run_detection_image(self, detection_session: PredictSession, image: Image.Image, settings: Any, src_size, convert_to_src_size: Any = None, multipass_crop: Tuple[float, float, float, float] = None):
|
||||
(w, h) = self.get_input_size() or image.size
|
||||
(iw, ih) = image.size
|
||||
|
||||
if detection_session and not detection_session.tracker:
|
||||
t = self.trackers.get(detection_session.id)
|
||||
if not t:
|
||||
t = tracker.Sort_OH(scene=np.array([iw, ih]))
|
||||
trackerCertainty = settings.get('trackerCertainty')
|
||||
if not isinstance(trackerCertainty, int):
|
||||
trackerCertainty = .2
|
||||
t.conf_three_frame_certainty = trackerCertainty * 3
|
||||
trackerWindow = settings.get('trackerWindow')
|
||||
if not isinstance(trackerWindow, int):
|
||||
trackerWindow = 3
|
||||
t.conf_unmatched_history_size = trackerWindow
|
||||
self.trackers[detection_session.id] = t
|
||||
detection_session.tracker = t
|
||||
# conf_trgt = 0.35
|
||||
# conf_objt = 0.75
|
||||
# detection_session.tracker.conf_trgt = conf_trgt
|
||||
# detection_session.tracker.conf_objt = conf_objt
|
||||
|
||||
# this a single pass or the second pass. detect once and return results.
|
||||
if multipass_crop:
|
||||
(l, t, dx, dy) = multipass_crop
|
||||
|
||||
# find center
|
||||
cx = l + dx / 2
|
||||
cy = t + dy / 2
|
||||
|
||||
# fix aspect ratio on box
|
||||
if dx / w > dy / h:
|
||||
dy = dx / w * h
|
||||
else:
|
||||
dx = dy / h * w
|
||||
|
||||
if dx > image.width:
|
||||
s = image.width / dx
|
||||
dx = image.width
|
||||
dy *= s
|
||||
|
||||
if dy > image.height:
|
||||
s = image.height / dy
|
||||
dy = image.height
|
||||
dx *= s
|
||||
|
||||
# crop size to fit input size
|
||||
if dx < w:
|
||||
dx = w
|
||||
if dy < h:
|
||||
dy = h
|
||||
|
||||
l = cx - dx / 2
|
||||
t = cy - dy / 2
|
||||
if l < 0:
|
||||
l = 0
|
||||
if t < 0:
|
||||
t = 0
|
||||
if l + dx > iw:
|
||||
l = iw - dx
|
||||
if t + dy > ih:
|
||||
t = ih - dy
|
||||
crop_box = (l, t, l + dx, t + dy)
|
||||
if dx == w and dy == h:
|
||||
input = image.crop(crop_box)
|
||||
else:
|
||||
input = image.resize((w, h), Image.ANTIALIAS, crop_box)
|
||||
|
||||
def cvss(point, normalize=False):
|
||||
unscaled = ((point[0] / w) * dx + l, (point[1] / h) * dy + t)
|
||||
converted = convert_to_src_size(unscaled, normalize) if convert_to_src_size else (unscaled[0], unscaled[1], True)
|
||||
return converted
|
||||
|
||||
ret = await self.detect_once(input, settings, src_size, cvss)
|
||||
input.close()
|
||||
detection_session.processed = detection_session.processed + 1
|
||||
return ret, RawImage(image)
|
||||
|
||||
ws = w / iw
|
||||
hs = h / ih
|
||||
s = max(ws, hs)
|
||||
if ws == 1 and hs == 1:
|
||||
def cvss(point, normalize=False):
|
||||
converted = convert_to_src_size(point, normalize) if convert_to_src_size else (point[0], point[1], True)
|
||||
return converted
|
||||
|
||||
ret = await self.detect_once(image, settings, src_size, cvss)
|
||||
if detection_session:
|
||||
detection_session.processed = detection_session.processed + 1
|
||||
else:
|
||||
sw = int(w / s)
|
||||
sh = int(h / s)
|
||||
first_crop = (0, 0, sw, sh)
|
||||
first = image.resize((w, h), Image.ANTIALIAS, first_crop)
|
||||
ow = iw - sw
|
||||
oh = ih - sh
|
||||
second_crop = (ow, oh, ow + sw, oh + sh)
|
||||
second = image.resize((w, h), Image.ANTIALIAS, second_crop)
|
||||
|
||||
def cvss1(point, normalize=False):
|
||||
unscaled = (point[0] / s, point[1] / s)
|
||||
converted = convert_to_src_size(unscaled, normalize) if convert_to_src_size else (unscaled[0], unscaled[1], True)
|
||||
return converted
|
||||
def cvss2(point, normalize=False):
|
||||
unscaled = (point[0] / s + ow, point[1] / s + oh)
|
||||
converted = convert_to_src_size(unscaled, normalize) if convert_to_src_size else (unscaled[0], unscaled[1], True)
|
||||
return converted
|
||||
|
||||
ret1 = await self.detect_once(first, settings, src_size, cvss1)
|
||||
first.close()
|
||||
if detection_session:
|
||||
detection_session.processed = detection_session.processed + 1
|
||||
ret2 = await self.detect_once(second, settings, src_size, cvss2)
|
||||
if detection_session:
|
||||
detection_session.processed = detection_session.processed + 1
|
||||
second.close()
|
||||
|
||||
two_intersect = intersect_rect(Rectangle(*first_crop), Rectangle(*second_crop))
|
||||
|
||||
def is_same_detection_middle(d1: ObjectDetectionResult, d2: ObjectDetectionResult):
|
||||
same, ret = is_same_detection(d1, d2)
|
||||
if same:
|
||||
return same, ret
|
||||
|
||||
if d1['className'] != d2['className']:
|
||||
return False, None
|
||||
|
||||
r1 = from_bounding_box(d1['boundingBox'])
|
||||
m1 = intersect_rect(two_intersect, r1)
|
||||
if not m1:
|
||||
return False, None
|
||||
|
||||
r2 = from_bounding_box(d2['boundingBox'])
|
||||
m2 = intersect_rect(two_intersect, r2)
|
||||
if not m2:
|
||||
return False, None
|
||||
|
||||
same, ret = is_same_box(to_bounding_box(m1), to_bounding_box(m2))
|
||||
if not same:
|
||||
return False, None
|
||||
c = to_bounding_box(combine_rect(r1, r2))
|
||||
return True, c
|
||||
|
||||
ret = ret1
|
||||
ret['detections'] = dedupe_detections(ret1['detections'] + ret2['detections'], is_same_detection=is_same_detection_middle)
|
||||
|
||||
if detection_session:
|
||||
self.track(detection_session, ret)
|
||||
|
||||
if not len(ret['detections']):
|
||||
return ret, RawImage(image)
|
||||
|
||||
return ret, RawImage(image)
|
||||
|
||||
def track(self, detection_session: PredictSession, ret: ObjectsDetected):
|
||||
detections = ret['detections']
|
||||
sort_input = []
|
||||
for d in ret['detections']:
|
||||
r: ObjectDetectionResult = d
|
||||
l, t, w, h = r['boundingBox']
|
||||
sort_input.append([l, t, l + w, t + h, r['score']])
|
||||
trackers, unmatched_trckr, unmatched_gts = detection_session.tracker.update(np.array(sort_input), [])
|
||||
for td in trackers:
|
||||
x0, y0, x1, y1, trackID = td[0].item(), td[1].item(
|
||||
), td[2].item(), td[3].item(), td[4].item()
|
||||
slop = 0
|
||||
obj: ObjectDetectionResult = None
|
||||
ta = (x1 - x0) * (y1 - y0)
|
||||
box = Rectangle(x0, y0, x1, y1)
|
||||
for d in detections:
|
||||
if d.get('id'):
|
||||
continue
|
||||
ob: ObjectDetectionResult = d
|
||||
dx0, dy0, dw, dh = ob['boundingBox']
|
||||
dx1 = dx0 + dw
|
||||
dy1 = dy0 + dh
|
||||
da = dw * dh
|
||||
area = intersect_area(Rectangle(dx0, dy0, dx1, dy1), box)
|
||||
if not area:
|
||||
continue
|
||||
# intersect area always gonna be smaller than
|
||||
# the detection or tracker area.
|
||||
# greater numbers, ie approaching 2, is better.
|
||||
dslop = area / ta + area / da
|
||||
if (dslop > slop):
|
||||
slop = dslop
|
||||
obj = ob
|
||||
if obj:
|
||||
obj['id'] = str(trackID)
|
||||
# this may happen if tracker predicts something is still in the scene
|
||||
# but was not detected
|
||||
# else:
|
||||
# print('unresolved tracker')
|
||||
# for d in detections:
|
||||
# if not d.get('id'):
|
||||
# # this happens if the tracker is not confident in a new detection yet due
|
||||
# # to low score or has not been found in enough frames
|
||||
# if d['className'] == 'person':
|
||||
# print('untracked %s: %s' % (d['className'], d['score']))
|
||||
|
||||
|
||||
async def run_detection_crop(self, detection_session: DetectionSession, sample: RawImage, settings: Any, src_size, convert_to_src_size, bounding_box: Tuple[float, float, float, float]) -> ObjectsDetected:
|
||||
(ret, _) = await self.run_detection_image(detection_session, sample.image, settings, src_size, convert_to_src_size, bounding_box)
|
||||
return ret
|
||||
|
||||
async def run_detection_gstsample(self, detection_session: PredictSession, gstsample, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Image.Image]:
|
||||
caps = gstsample.get_caps()
|
||||
# can't trust the width value, compute the stride
|
||||
height = caps.get_structure(0).get_value('height')
|
||||
width = caps.get_structure(0).get_value('width')
|
||||
gst_buffer = gstsample.get_buffer()
|
||||
result, info = gst_buffer.map(Gst.MapFlags.READ)
|
||||
if not result:
|
||||
return
|
||||
try:
|
||||
image = detection_session.image
|
||||
detection_session.image = None
|
||||
|
||||
if image and (image.width != width or image.height != height):
|
||||
image.close()
|
||||
image = None
|
||||
if image:
|
||||
image.frombytes(bytes(info.data))
|
||||
else:
|
||||
image = Image.frombuffer('RGB', (width, height), bytes(info.data))
|
||||
finally:
|
||||
gst_buffer.unmap(info)
|
||||
|
||||
try:
|
||||
return await self.run_detection_image(detection_session, image, settings, src_size, convert_to_src_size)
|
||||
except:
|
||||
image.close()
|
||||
traceback.print_exc()
|
||||
raise
|
||||
|
||||
def create_detection_session(self):
|
||||
return PredictSession(start_time=time.time())
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../../../sort-tracker/sort_oh/libs
|
||||
@@ -1,16 +1,7 @@
|
||||
--extra-index-url https://google-coral.github.io/py-repo/
|
||||
|
||||
# plugin
|
||||
numpy>=1.16.2
|
||||
# pillow for anything not intel linux
|
||||
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
|
||||
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
|
||||
pycoral~=2.0
|
||||
PyGObject>=3.30.4; sys_platform != 'win32'
|
||||
# libav doesnt work on arm7
|
||||
av>=10.0.0; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
|
||||
tflite-runtime==2.5.0.post1
|
||||
|
||||
# sort_oh
|
||||
scipy
|
||||
filterpy
|
||||
# pillow for anything not intel linux, pillow-simd is available on x64 linux
|
||||
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
|
||||
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from __future__ import annotations
|
||||
import threading
|
||||
from .common import *
|
||||
from PIL import Image
|
||||
from pycoral.adapters import detect
|
||||
|
||||
4
plugins/tensorflow/package-lock.json
generated
4
plugins/tensorflow/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/tensorflow-lite",
|
||||
"version": "0.1.7",
|
||||
"version": "0.1.8",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/tensorflow-lite",
|
||||
"version": "0.1.7",
|
||||
"version": "0.1.8",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
|
||||
@@ -41,5 +41,5 @@
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.1.7"
|
||||
"version": "0.1.8"
|
||||
}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../../tensorflow-lite/src/pipeline
|
||||
@@ -1,14 +1,8 @@
|
||||
# plugin
|
||||
numpy>=1.16.2
|
||||
# pillow for anything not intel linux
|
||||
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
|
||||
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
|
||||
tensorflow-macos; sys_platform == 'darwin'
|
||||
tensorflow; sys_platform != 'darwin'
|
||||
PyGObject>=3.30.4; sys_platform != 'win32'
|
||||
# not available on armhf
|
||||
av>=10.0.0; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
|
||||
|
||||
# sort_oh
|
||||
scipy
|
||||
filterpy
|
||||
numpy>=1.16.2
|
||||
|
||||
# pillow for anything not intel linux, pillow-simd is available on x64 linux
|
||||
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
|
||||
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
|
||||
|
||||
6
plugins/unifi-protect/package-lock.json
generated
6
plugins/unifi-protect/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/unifi-protect",
|
||||
"version": "0.0.131",
|
||||
"version": "0.0.132",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/unifi-protect",
|
||||
"version": "0.0.131",
|
||||
"version": "0.0.132",
|
||||
"license": "Apache",
|
||||
"dependencies": {
|
||||
"@koush/unifi-protect": "file:../../external/unifi-protect",
|
||||
@@ -61,7 +61,7 @@
|
||||
},
|
||||
"../../sdk": {
|
||||
"name": "@scrypted/sdk",
|
||||
"version": "0.2.68",
|
||||
"version": "0.2.86",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@babel/preset-typescript": "^7.18.6",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user