mirror of
https://github.com/koush/scrypted.git
synced 2026-02-06 23:42:19 +00:00
Compare commits
13 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fa86c31340 | ||
|
|
94ded75d40 | ||
|
|
887b61cd7a | ||
|
|
48e3d30987 | ||
|
|
02dba3cd71 | ||
|
|
195769034d | ||
|
|
39c08aa378 | ||
|
|
fa8056d38e | ||
|
|
145f116c68 | ||
|
|
15b6f336e4 | ||
|
|
8b46f0a466 | ||
|
|
a20cc5cd89 | ||
|
|
3d068929fd |
@@ -129,6 +129,16 @@ export function getNaluTypes(streamChunk: StreamChunk) {
|
||||
return getNaluTypesInNalu(streamChunk.chunks[streamChunk.chunks.length - 1].subarray(12))
|
||||
}
|
||||
|
||||
export function getNaluFragmentInformation(nalu: Buffer) {
|
||||
const naluType = nalu[0] & 0x1f;
|
||||
const fua = naluType === H264_NAL_TYPE_FU_A;
|
||||
return {
|
||||
fua,
|
||||
fuaStart: fua && !!(nalu[1] & 0x80),
|
||||
fuaEnd: fua && !!(nalu[1] & 0x40),
|
||||
}
|
||||
}
|
||||
|
||||
export function getNaluTypesInNalu(nalu: Buffer, fuaRequireStart = false, fuaRequireEnd = false) {
|
||||
const ret = new Set<number>();
|
||||
const naluType = nalu[0] & 0x1f;
|
||||
|
||||
@@ -63,7 +63,7 @@ RUN apt-get -y install \
|
||||
# which causes weird behavior in python which looks at the arch version
|
||||
# which still reports 64bit, even if running in 32bit docker.
|
||||
# this scenario is not supported and will be reported at runtime.
|
||||
RUN if [ "$(uname -m)" = "armv7l" ]; \
|
||||
RUN if [ "$(uname -m)" != "x86_64" ]; \
|
||||
then \
|
||||
apt-get -y install \
|
||||
python3-matplotlib \
|
||||
@@ -95,7 +95,8 @@ ENV SCRYPTED_INSTALL_PATH="/server"
|
||||
|
||||
# changing this forces pip and npm to perform reinstalls.
|
||||
# if this base image changes, this version must be updated.
|
||||
ENV SCRYPTED_BASE_VERSION=20230322
|
||||
ENV SCRYPTED_BASE_VERSION=20230329
|
||||
ENV SCRYPTED_DOCKER_FLAVOR=full
|
||||
|
||||
################################################################
|
||||
# End section generated from template/Dockerfile.full.footer
|
||||
|
||||
@@ -42,4 +42,5 @@ ENV SCRYPTED_INSTALL_PATH="/server"
|
||||
|
||||
# changing this forces pip and npm to perform reinstalls.
|
||||
# if this base image changes, this version must be updated.
|
||||
ENV SCRYPTED_BASE_VERSION=20230322
|
||||
ENV SCRYPTED_BASE_VERSION=20230329
|
||||
ENV SCRYPTED_DOCKER_FLAVOR=lite
|
||||
|
||||
@@ -21,4 +21,5 @@ ENV SCRYPTED_INSTALL_PATH="/server"
|
||||
|
||||
# changing this forces pip and npm to perform reinstalls.
|
||||
# if this base image changes, this version must be updated.
|
||||
ENV SCRYPTED_BASE_VERSION=20230322
|
||||
ENV SCRYPTED_BASE_VERSION=20230329
|
||||
ENV SCRYPTED_DOCKER_FLAVOR=thin
|
||||
|
||||
@@ -42,7 +42,7 @@ fi
|
||||
WATCHTOWER_HTTP_API_TOKEN=$(echo $RANDOM | md5sum)
|
||||
DOCKER_COMPOSE_YML=$SCRYPTED_HOME/docker-compose.yml
|
||||
echo "Created $DOCKER_COMPOSE_YML"
|
||||
curl -s https://raw.githubusercontent.com/koush/scrypted/main/docker/docker-compose.yml | sed s/SET_THIS_TO_SOME_RANDOM_TEXT/"$(echo $RANDOM | md5sum)"/g > $DOCKER_COMPOSE_YML
|
||||
curl -s https://raw.githubusercontent.com/koush/scrypted/main/docker/docker-compose.yml | sed s/SET_THIS_TO_SOME_RANDOM_TEXT/"$(echo $RANDOM | md5sum | head -c 32)"/g > $DOCKER_COMPOSE_YML
|
||||
|
||||
echo "Setting permissions on $SCRYPTED_HOME"
|
||||
chown -R $SERVICE_USER $SCRYPTED_HOME
|
||||
|
||||
@@ -10,7 +10,8 @@ ENV SCRYPTED_INSTALL_PATH="/server"
|
||||
|
||||
# changing this forces pip and npm to perform reinstalls.
|
||||
# if this base image changes, this version must be updated.
|
||||
ENV SCRYPTED_BASE_VERSION=20230322
|
||||
ENV SCRYPTED_BASE_VERSION=20230329
|
||||
ENV SCRYPTED_DOCKER_FLAVOR=full
|
||||
|
||||
################################################################
|
||||
# End section generated from template/Dockerfile.full.footer
|
||||
|
||||
@@ -60,7 +60,7 @@ RUN apt-get -y install \
|
||||
# which causes weird behavior in python which looks at the arch version
|
||||
# which still reports 64bit, even if running in 32bit docker.
|
||||
# this scenario is not supported and will be reported at runtime.
|
||||
RUN if [ "$(uname -m)" = "armv7l" ]; \
|
||||
RUN if [ "$(uname -m)" != "x86_64" ]; \
|
||||
then \
|
||||
apt-get -y install \
|
||||
python3-matplotlib \
|
||||
|
||||
4
plugins/coreml/package-lock.json
generated
4
plugins/coreml/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/coreml",
|
||||
"version": "0.1.5",
|
||||
"version": "0.1.8",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/coreml",
|
||||
"version": "0.1.5",
|
||||
"version": "0.1.8",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
|
||||
@@ -41,5 +41,5 @@
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.1.5"
|
||||
"version": "0.1.8"
|
||||
}
|
||||
|
||||
@@ -64,6 +64,9 @@ export class H264Repacketizer {
|
||||
extraPackets = 0;
|
||||
fuaMax: number;
|
||||
pendingFuA: RtpPacket[];
|
||||
// log whether a stapa sps/pps has been seen.
|
||||
// resets on every idr frame, to trigger codec information
|
||||
// to be resent.
|
||||
seenStapASps = false;
|
||||
fuaMin: number;
|
||||
|
||||
@@ -402,8 +405,12 @@ export class H264Repacketizer {
|
||||
// if this is an idr frame, but no sps has been sent via a stapa, dummy one up.
|
||||
// the stream may not contain codec information in stapa or may be sending it
|
||||
// in separate sps/pps packets which is not supported by homekit.
|
||||
if (originalNalType === NAL_TYPE_IDR && !this.seenStapASps)
|
||||
this.maybeSendSpsPps(packet, ret);
|
||||
if (originalNalType === NAL_TYPE_IDR) {
|
||||
if (!this.seenStapASps)
|
||||
this.maybeSendSpsPps(packet, ret);
|
||||
this.seenStapASps = false;
|
||||
}
|
||||
|
||||
}
|
||||
else {
|
||||
if (this.pendingFuA) {
|
||||
@@ -486,10 +493,12 @@ export class H264Repacketizer {
|
||||
return;
|
||||
}
|
||||
|
||||
if (nalType === NAL_TYPE_IDR && !this.seenStapASps) {
|
||||
if (nalType === NAL_TYPE_IDR) {
|
||||
// if this is an idr frame, but no sps has been sent, dummy one up.
|
||||
// the stream may not contain sps.
|
||||
this.maybeSendSpsPps(packet, ret);
|
||||
if (!this.seenStapASps)
|
||||
this.maybeSendSpsPps(packet, ret);
|
||||
this.seenStapASps = false;
|
||||
}
|
||||
|
||||
this.fragment(packet, ret);
|
||||
|
||||
4
plugins/opencv/package-lock.json
generated
4
plugins/opencv/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/opencv",
|
||||
"version": "0.0.70",
|
||||
"version": "0.0.72",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/opencv",
|
||||
"version": "0.0.70",
|
||||
"version": "0.0.72",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
|
||||
@@ -36,5 +36,5 @@
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.0.70"
|
||||
"version": "0.0.72"
|
||||
}
|
||||
|
||||
@@ -1,22 +1,21 @@
|
||||
from __future__ import annotations
|
||||
from time import sleep
|
||||
from detect import DetectionSession, DetectPlugin
|
||||
|
||||
from typing import Any, List, Tuple
|
||||
import numpy as np
|
||||
import asyncio
|
||||
|
||||
import cv2
|
||||
import imutils
|
||||
Gst = None
|
||||
try:
|
||||
from gi.repository import Gst
|
||||
except:
|
||||
pass
|
||||
from scrypted_sdk.types import ObjectDetectionModel, ObjectDetectionResult, ObjectsDetected, Setting, VideoFrame
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
|
||||
class OpenCVDetectionSession(DetectionSession):
|
||||
from detect import DetectPlugin
|
||||
|
||||
import scrypted_sdk
|
||||
from scrypted_sdk.types import (ObjectDetectionGeneratorSession,
|
||||
ObjectDetectionResult,
|
||||
ObjectsDetected, Setting, VideoFrame)
|
||||
|
||||
|
||||
class OpenCVDetectionSession:
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.cap: cv2.VideoCapture = None
|
||||
self.previous_frame: Any = None
|
||||
self.curFrame = None
|
||||
@@ -110,8 +109,7 @@ class OpenCVPlugin(DetectPlugin):
|
||||
blur = int(settings.get('blur', blur))
|
||||
return area, threshold, interval, blur
|
||||
|
||||
def detect(self, detection_session: OpenCVDetectionSession, frame, src_size, convert_to_src_size) -> ObjectsDetected:
|
||||
settings = detection_session.settings
|
||||
def detect(self, frame, settings: Any, detection_session: OpenCVDetectionSession, src_size, convert_to_src_size) -> ObjectsDetected:
|
||||
area, threshold, interval, blur = self.parse_settings(settings)
|
||||
|
||||
# see get_detection_input_size on undocumented size requirements for GRAY8
|
||||
@@ -154,8 +152,8 @@ class OpenCVPlugin(DetectPlugin):
|
||||
# if w * h != contour_area:
|
||||
# print("mismatch w/h", contour_area - w * h)
|
||||
|
||||
x2, y2, _ = convert_to_src_size((x + w, y + h))
|
||||
x, y, _ = convert_to_src_size((x, y))
|
||||
x2, y2 = convert_to_src_size((x + w, y + h))
|
||||
x, y = convert_to_src_size((x, y))
|
||||
w = x2 - x + 1
|
||||
h = y2 - y + 1
|
||||
|
||||
@@ -206,11 +204,24 @@ class OpenCVPlugin(DetectPlugin):
|
||||
detection_session.cap = None
|
||||
return super().end_session(detection_session)
|
||||
|
||||
async def run_detection_image(self, detection_session: DetectionSession, image: Image.Image, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
|
||||
# todo
|
||||
raise Exception('can not run motion detection on image')
|
||||
|
||||
async def run_detection_videoframe(self, videoFrame: VideoFrame, detection_session: OpenCVDetectionSession) -> ObjectsDetected:
|
||||
async def generateObjectDetections(self, videoFrames: Any, session: ObjectDetectionGeneratorSession = None) -> Any:
|
||||
try:
|
||||
ds = OpenCVDetectionSession()
|
||||
videoFrames = await scrypted_sdk.sdk.connectRPCObject(videoFrames)
|
||||
async for videoFrame in videoFrames:
|
||||
detected = await self.run_detection_videoframe(videoFrame, session and session.get('settings'), ds)
|
||||
yield {
|
||||
'__json_copy_serialize_children': True,
|
||||
'detected': detected,
|
||||
'videoFrame': videoFrame,
|
||||
}
|
||||
finally:
|
||||
try:
|
||||
await videoFrames.aclose()
|
||||
except:
|
||||
pass
|
||||
|
||||
async def run_detection_videoframe(self, videoFrame: VideoFrame, settings: Any, detection_session: OpenCVDetectionSession) -> ObjectsDetected:
|
||||
width = videoFrame.width
|
||||
height = videoFrame.height
|
||||
|
||||
@@ -238,60 +249,8 @@ class OpenCVPlugin(DetectPlugin):
|
||||
'resize': resize,
|
||||
})
|
||||
|
||||
def convert_to_src_size(point, normalize = False):
|
||||
return point[0] * scale, point[1] * scale, True
|
||||
def convert_to_src_size(point):
|
||||
return point[0] * scale, point[1] * scale
|
||||
mat = np.ndarray((height, width, self.pixelFormatChannelCount), buffer=buffer, dtype=np.uint8)
|
||||
detections = self.detect(
|
||||
detection_session, mat, (width, height), convert_to_src_size)
|
||||
detections = self.detect(mat, settings, detection_session, (width, height), convert_to_src_size)
|
||||
return detections
|
||||
|
||||
async def run_detection_avframe(self, detection_session: DetectionSession, avframe, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
|
||||
if avframe.format.name != 'yuv420p' and avframe.format.name != 'yuvj420p':
|
||||
mat = avframe.to_ndarray(format='gray8')
|
||||
else:
|
||||
mat = np.ndarray((avframe.height, avframe.width, self.pixelFormatChannelCount), buffer=avframe.planes[0], dtype=np.uint8)
|
||||
detections = self.detect(
|
||||
detection_session, mat, src_size, convert_to_src_size)
|
||||
if not detections or not len(detections['detections']):
|
||||
await self.detection_sleep(settings)
|
||||
return None, None
|
||||
return detections, None
|
||||
|
||||
async def run_detection_gstsample(self, detection_session: OpenCVDetectionSession, gst_sample, settings: Any, src_size, convert_to_src_size) -> ObjectsDetected:
|
||||
buf = gst_sample.get_buffer()
|
||||
caps = gst_sample.get_caps()
|
||||
# can't trust the width value, compute the stride
|
||||
height = caps.get_structure(0).get_value('height')
|
||||
width = caps.get_structure(0).get_value('width')
|
||||
result, info = buf.map(Gst.MapFlags.READ)
|
||||
if not result:
|
||||
return None, None
|
||||
try:
|
||||
mat = np.ndarray(
|
||||
(height,
|
||||
width,
|
||||
self.pixelFormatChannelCount),
|
||||
buffer=info.data,
|
||||
dtype=np.uint8)
|
||||
detections = self.detect(
|
||||
detection_session, mat, src_size, convert_to_src_size)
|
||||
# no point in triggering empty events.
|
||||
finally:
|
||||
buf.unmap(info)
|
||||
|
||||
if not detections or not len(detections['detections']):
|
||||
await self.detection_sleep(settings)
|
||||
return None, None
|
||||
return detections, None
|
||||
|
||||
def create_detection_session(self):
|
||||
return OpenCVDetectionSession()
|
||||
|
||||
async def detection_sleep(self, settings: Any):
|
||||
area, threshold, interval, blur = self.parse_settings(settings)
|
||||
# it is safe to block here because gstreamer creates a queue thread
|
||||
await asyncio.sleep(interval / 1000)
|
||||
|
||||
async def detection_event_notified(self, settings: Any):
|
||||
await self.detection_sleep(settings)
|
||||
return await super().detection_event_notified(settings)
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../../tensorflow-lite/src/pipeline
|
||||
@@ -3,10 +3,6 @@ numpy>=1.16.2
|
||||
# pillow for anything not intel linux
|
||||
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
|
||||
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
|
||||
PyGObject>=3.30.4; sys_platform != 'win32'
|
||||
imutils>=0.5.0
|
||||
# not available on armhf
|
||||
av>=10.0.0; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
|
||||
# not available on armhf
|
||||
opencv-python; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
|
||||
|
||||
opencv-python; sys_platform != 'linux' or platform_machine == 'x86_64'
|
||||
4
plugins/tensorflow/package-lock.json
generated
4
plugins/tensorflow/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/tensorflow-lite",
|
||||
"version": "0.1.7",
|
||||
"version": "0.1.8",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/tensorflow-lite",
|
||||
"version": "0.1.7",
|
||||
"version": "0.1.8",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
|
||||
@@ -41,5 +41,5 @@
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.1.7"
|
||||
"version": "0.1.8"
|
||||
}
|
||||
|
||||
4
plugins/webrtc/package-lock.json
generated
4
plugins/webrtc/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/webrtc",
|
||||
"version": "0.1.37",
|
||||
"version": "0.1.38",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/webrtc",
|
||||
"version": "0.1.37",
|
||||
"version": "0.1.38",
|
||||
"dependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
"@scrypted/sdk": "file:../../sdk",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/webrtc",
|
||||
"version": "0.1.37",
|
||||
"version": "0.1.38",
|
||||
"scripts": {
|
||||
"scrypted-setup-project": "scrypted-setup-project",
|
||||
"prescrypted-setup-project": "scrypted-package-json",
|
||||
|
||||
4
server/package-lock.json
generated
4
server/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/server",
|
||||
"version": "0.7.37",
|
||||
"version": "0.7.39",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/server",
|
||||
"version": "0.7.37",
|
||||
"version": "0.7.39",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@mapbox/node-pre-gyp": "^1.0.10",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/server",
|
||||
"version": "0.7.38",
|
||||
"version": "0.7.41",
|
||||
"description": "",
|
||||
"dependencies": {
|
||||
"@mapbox/node-pre-gyp": "^1.0.10",
|
||||
|
||||
@@ -423,7 +423,18 @@ class PluginRemote:
|
||||
pass
|
||||
|
||||
if need_pip:
|
||||
shutil.rmtree(python_prefix)
|
||||
try:
|
||||
for de in os.listdir(plugin_volume):
|
||||
if de.startswith('linux') or de.startswith('darwin') or de.startswith('win32') or de.startswith('python') or de.startswith('node'):
|
||||
filePath = os.path.join(plugin_volume, de)
|
||||
print('Removing old dependencies: %s' % filePath)
|
||||
try:
|
||||
shutil.rmtree(filePath)
|
||||
except:
|
||||
pass
|
||||
except:
|
||||
pass
|
||||
|
||||
os.makedirs(python_prefix)
|
||||
|
||||
print('requirements.txt (outdated)')
|
||||
|
||||
@@ -311,7 +311,9 @@ export class PluginHost {
|
||||
|
||||
this.worker.stdout.on('data', data => console.log(data.toString()));
|
||||
this.worker.stderr.on('data', data => console.error(data.toString()));
|
||||
const consoleHeader = `${os.platform()} ${os.arch()} ${os.version()}\nserver version: ${serverVersion}\nplugin version: ${this.pluginId} ${this.packageJson.version}\n`;
|
||||
let consoleHeader = `${os.platform()} ${os.arch()} ${os.version()}\nserver version: ${serverVersion}\nplugin version: ${this.pluginId} ${this.packageJson.version}\n`;
|
||||
if (process.env.SCRYPTED_DOCKER_FLAVOR)
|
||||
consoleHeader += `${process.env.SCRYPTED_DOCKER_FLAVOR}\n`;
|
||||
this.consoleServer = createConsoleServer(this.worker.stdout, this.worker.stderr, consoleHeader);
|
||||
|
||||
const disconnect = () => {
|
||||
|
||||
Reference in New Issue
Block a user