Compare commits

..

13 Commits

Author SHA1 Message Date
Koushik Dutta
fa86c31340 prerelease 2023-03-29 12:41:56 -07:00
Koushik Dutta
94ded75d40 docker: fix watchtower token 2023-03-29 12:17:05 -07:00
Koushik Dutta
887b61cd7a prebeta 2023-03-29 11:58:54 -07:00
Koushik Dutta
48e3d30987 server: output docker flavor to logs 2023-03-29 11:58:43 -07:00
Koushik Dutta
02dba3cd71 docker: include flavor in env variable 2023-03-29 11:57:11 -07:00
Koushik Dutta
195769034d docker: include flavor in env variable 2023-03-29 11:56:50 -07:00
Koushik Dutta
39c08aa378 prebeta 2023-03-29 10:19:18 -07:00
Koushik Dutta
fa8056d38e python: purge packages on update 2023-03-29 10:18:34 -07:00
Koushik Dutta
145f116c68 webrtc/h264: reset stapa sent flag after every idr frame 2023-03-29 09:37:41 -07:00
Koushik Dutta
15b6f336e4 common: add h264 fragment information parsing 2023-03-29 08:18:13 -07:00
Koushik Dutta
8b46f0a466 openv: use new pipieline 2023-03-29 08:17:52 -07:00
Koushik Dutta
a20cc5cd89 docker: always install packages for arm 2023-03-29 08:01:08 -07:00
Koushik Dutta
3d068929fd predict: publish 2023-03-28 19:40:14 -07:00
23 changed files with 101 additions and 111 deletions

View File

@@ -129,6 +129,16 @@ export function getNaluTypes(streamChunk: StreamChunk) {
return getNaluTypesInNalu(streamChunk.chunks[streamChunk.chunks.length - 1].subarray(12))
}
export function getNaluFragmentInformation(nalu: Buffer) {
const naluType = nalu[0] & 0x1f;
const fua = naluType === H264_NAL_TYPE_FU_A;
return {
fua,
fuaStart: fua && !!(nalu[1] & 0x80),
fuaEnd: fua && !!(nalu[1] & 0x40),
}
}
export function getNaluTypesInNalu(nalu: Buffer, fuaRequireStart = false, fuaRequireEnd = false) {
const ret = new Set<number>();
const naluType = nalu[0] & 0x1f;

View File

@@ -63,7 +63,7 @@ RUN apt-get -y install \
# which causes weird behavior in python which looks at the arch version
# which still reports 64bit, even if running in 32bit docker.
# this scenario is not supported and will be reported at runtime.
RUN if [ "$(uname -m)" = "armv7l" ]; \
RUN if [ "$(uname -m)" != "x86_64" ]; \
then \
apt-get -y install \
python3-matplotlib \
@@ -95,7 +95,8 @@ ENV SCRYPTED_INSTALL_PATH="/server"
# changing this forces pip and npm to perform reinstalls.
# if this base image changes, this version must be updated.
ENV SCRYPTED_BASE_VERSION=20230322
ENV SCRYPTED_BASE_VERSION=20230329
ENV SCRYPTED_DOCKER_FLAVOR=full
################################################################
# End section generated from template/Dockerfile.full.footer

View File

@@ -42,4 +42,5 @@ ENV SCRYPTED_INSTALL_PATH="/server"
# changing this forces pip and npm to perform reinstalls.
# if this base image changes, this version must be updated.
ENV SCRYPTED_BASE_VERSION=20230322
ENV SCRYPTED_BASE_VERSION=20230329
ENV SCRYPTED_DOCKER_FLAVOR=lite

View File

@@ -21,4 +21,5 @@ ENV SCRYPTED_INSTALL_PATH="/server"
# changing this forces pip and npm to perform reinstalls.
# if this base image changes, this version must be updated.
ENV SCRYPTED_BASE_VERSION=20230322
ENV SCRYPTED_BASE_VERSION=20230329
ENV SCRYPTED_DOCKER_FLAVOR=thin

View File

@@ -42,7 +42,7 @@ fi
WATCHTOWER_HTTP_API_TOKEN=$(echo $RANDOM | md5sum)
DOCKER_COMPOSE_YML=$SCRYPTED_HOME/docker-compose.yml
echo "Created $DOCKER_COMPOSE_YML"
curl -s https://raw.githubusercontent.com/koush/scrypted/main/docker/docker-compose.yml | sed s/SET_THIS_TO_SOME_RANDOM_TEXT/"$(echo $RANDOM | md5sum)"/g > $DOCKER_COMPOSE_YML
curl -s https://raw.githubusercontent.com/koush/scrypted/main/docker/docker-compose.yml | sed s/SET_THIS_TO_SOME_RANDOM_TEXT/"$(echo $RANDOM | md5sum | head -c 32)"/g > $DOCKER_COMPOSE_YML
echo "Setting permissions on $SCRYPTED_HOME"
chown -R $SERVICE_USER $SCRYPTED_HOME

View File

@@ -10,7 +10,8 @@ ENV SCRYPTED_INSTALL_PATH="/server"
# changing this forces pip and npm to perform reinstalls.
# if this base image changes, this version must be updated.
ENV SCRYPTED_BASE_VERSION=20230322
ENV SCRYPTED_BASE_VERSION=20230329
ENV SCRYPTED_DOCKER_FLAVOR=full
################################################################
# End section generated from template/Dockerfile.full.footer

View File

@@ -60,7 +60,7 @@ RUN apt-get -y install \
# which causes weird behavior in python which looks at the arch version
# which still reports 64bit, even if running in 32bit docker.
# this scenario is not supported and will be reported at runtime.
RUN if [ "$(uname -m)" = "armv7l" ]; \
RUN if [ "$(uname -m)" != "x86_64" ]; \
then \
apt-get -y install \
python3-matplotlib \

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/coreml",
"version": "0.1.5",
"version": "0.1.8",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/coreml",
"version": "0.1.5",
"version": "0.1.8",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -41,5 +41,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.5"
"version": "0.1.8"
}

View File

@@ -64,6 +64,9 @@ export class H264Repacketizer {
extraPackets = 0;
fuaMax: number;
pendingFuA: RtpPacket[];
// log whether a stapa sps/pps has been seen.
// resets on every idr frame, to trigger codec information
// to be resent.
seenStapASps = false;
fuaMin: number;
@@ -402,8 +405,12 @@ export class H264Repacketizer {
// if this is an idr frame, but no sps has been sent via a stapa, dummy one up.
// the stream may not contain codec information in stapa or may be sending it
// in separate sps/pps packets which is not supported by homekit.
if (originalNalType === NAL_TYPE_IDR && !this.seenStapASps)
this.maybeSendSpsPps(packet, ret);
if (originalNalType === NAL_TYPE_IDR) {
if (!this.seenStapASps)
this.maybeSendSpsPps(packet, ret);
this.seenStapASps = false;
}
}
else {
if (this.pendingFuA) {
@@ -486,10 +493,12 @@ export class H264Repacketizer {
return;
}
if (nalType === NAL_TYPE_IDR && !this.seenStapASps) {
if (nalType === NAL_TYPE_IDR) {
// if this is an idr frame, but no sps has been sent, dummy one up.
// the stream may not contain sps.
this.maybeSendSpsPps(packet, ret);
if (!this.seenStapASps)
this.maybeSendSpsPps(packet, ret);
this.seenStapASps = false;
}
this.fragment(packet, ret);

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/opencv",
"version": "0.0.70",
"version": "0.0.72",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/opencv",
"version": "0.0.70",
"version": "0.0.72",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -36,5 +36,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.0.70"
"version": "0.0.72"
}

View File

@@ -1,22 +1,21 @@
from __future__ import annotations
from time import sleep
from detect import DetectionSession, DetectPlugin
from typing import Any, List, Tuple
import numpy as np
import asyncio
import cv2
import imutils
Gst = None
try:
from gi.repository import Gst
except:
pass
from scrypted_sdk.types import ObjectDetectionModel, ObjectDetectionResult, ObjectsDetected, Setting, VideoFrame
from PIL import Image
import numpy as np
class OpenCVDetectionSession(DetectionSession):
from detect import DetectPlugin
import scrypted_sdk
from scrypted_sdk.types import (ObjectDetectionGeneratorSession,
ObjectDetectionResult,
ObjectsDetected, Setting, VideoFrame)
class OpenCVDetectionSession:
def __init__(self) -> None:
super().__init__()
self.cap: cv2.VideoCapture = None
self.previous_frame: Any = None
self.curFrame = None
@@ -110,8 +109,7 @@ class OpenCVPlugin(DetectPlugin):
blur = int(settings.get('blur', blur))
return area, threshold, interval, blur
def detect(self, detection_session: OpenCVDetectionSession, frame, src_size, convert_to_src_size) -> ObjectsDetected:
settings = detection_session.settings
def detect(self, frame, settings: Any, detection_session: OpenCVDetectionSession, src_size, convert_to_src_size) -> ObjectsDetected:
area, threshold, interval, blur = self.parse_settings(settings)
# see get_detection_input_size on undocumented size requirements for GRAY8
@@ -154,8 +152,8 @@ class OpenCVPlugin(DetectPlugin):
# if w * h != contour_area:
# print("mismatch w/h", contour_area - w * h)
x2, y2, _ = convert_to_src_size((x + w, y + h))
x, y, _ = convert_to_src_size((x, y))
x2, y2 = convert_to_src_size((x + w, y + h))
x, y = convert_to_src_size((x, y))
w = x2 - x + 1
h = y2 - y + 1
@@ -206,11 +204,24 @@ class OpenCVPlugin(DetectPlugin):
detection_session.cap = None
return super().end_session(detection_session)
async def run_detection_image(self, detection_session: DetectionSession, image: Image.Image, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
# todo
raise Exception('can not run motion detection on image')
async def run_detection_videoframe(self, videoFrame: VideoFrame, detection_session: OpenCVDetectionSession) -> ObjectsDetected:
async def generateObjectDetections(self, videoFrames: Any, session: ObjectDetectionGeneratorSession = None) -> Any:
try:
ds = OpenCVDetectionSession()
videoFrames = await scrypted_sdk.sdk.connectRPCObject(videoFrames)
async for videoFrame in videoFrames:
detected = await self.run_detection_videoframe(videoFrame, session and session.get('settings'), ds)
yield {
'__json_copy_serialize_children': True,
'detected': detected,
'videoFrame': videoFrame,
}
finally:
try:
await videoFrames.aclose()
except:
pass
async def run_detection_videoframe(self, videoFrame: VideoFrame, settings: Any, detection_session: OpenCVDetectionSession) -> ObjectsDetected:
width = videoFrame.width
height = videoFrame.height
@@ -238,60 +249,8 @@ class OpenCVPlugin(DetectPlugin):
'resize': resize,
})
def convert_to_src_size(point, normalize = False):
return point[0] * scale, point[1] * scale, True
def convert_to_src_size(point):
return point[0] * scale, point[1] * scale
mat = np.ndarray((height, width, self.pixelFormatChannelCount), buffer=buffer, dtype=np.uint8)
detections = self.detect(
detection_session, mat, (width, height), convert_to_src_size)
detections = self.detect(mat, settings, detection_session, (width, height), convert_to_src_size)
return detections
async def run_detection_avframe(self, detection_session: DetectionSession, avframe, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
if avframe.format.name != 'yuv420p' and avframe.format.name != 'yuvj420p':
mat = avframe.to_ndarray(format='gray8')
else:
mat = np.ndarray((avframe.height, avframe.width, self.pixelFormatChannelCount), buffer=avframe.planes[0], dtype=np.uint8)
detections = self.detect(
detection_session, mat, src_size, convert_to_src_size)
if not detections or not len(detections['detections']):
await self.detection_sleep(settings)
return None, None
return detections, None
async def run_detection_gstsample(self, detection_session: OpenCVDetectionSession, gst_sample, settings: Any, src_size, convert_to_src_size) -> ObjectsDetected:
buf = gst_sample.get_buffer()
caps = gst_sample.get_caps()
# can't trust the width value, compute the stride
height = caps.get_structure(0).get_value('height')
width = caps.get_structure(0).get_value('width')
result, info = buf.map(Gst.MapFlags.READ)
if not result:
return None, None
try:
mat = np.ndarray(
(height,
width,
self.pixelFormatChannelCount),
buffer=info.data,
dtype=np.uint8)
detections = self.detect(
detection_session, mat, src_size, convert_to_src_size)
# no point in triggering empty events.
finally:
buf.unmap(info)
if not detections or not len(detections['detections']):
await self.detection_sleep(settings)
return None, None
return detections, None
def create_detection_session(self):
return OpenCVDetectionSession()
async def detection_sleep(self, settings: Any):
area, threshold, interval, blur = self.parse_settings(settings)
# it is safe to block here because gstreamer creates a queue thread
await asyncio.sleep(interval / 1000)
async def detection_event_notified(self, settings: Any):
await self.detection_sleep(settings)
return await super().detection_event_notified(settings)

View File

@@ -1 +0,0 @@
../../tensorflow-lite/src/pipeline

View File

@@ -3,10 +3,6 @@ numpy>=1.16.2
# pillow for anything not intel linux
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
PyGObject>=3.30.4; sys_platform != 'win32'
imutils>=0.5.0
# not available on armhf
av>=10.0.0; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
# not available on armhf
opencv-python; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
opencv-python; sys_platform != 'linux' or platform_machine == 'x86_64'

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/tensorflow-lite",
"version": "0.1.7",
"version": "0.1.8",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/tensorflow-lite",
"version": "0.1.7",
"version": "0.1.8",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -41,5 +41,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.7"
"version": "0.1.8"
}

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/webrtc",
"version": "0.1.37",
"version": "0.1.38",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/webrtc",
"version": "0.1.37",
"version": "0.1.38",
"dependencies": {
"@scrypted/common": "file:../../common",
"@scrypted/sdk": "file:../../sdk",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/webrtc",
"version": "0.1.37",
"version": "0.1.38",
"scripts": {
"scrypted-setup-project": "scrypted-setup-project",
"prescrypted-setup-project": "scrypted-package-json",

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/server",
"version": "0.7.37",
"version": "0.7.39",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/server",
"version": "0.7.37",
"version": "0.7.39",
"license": "ISC",
"dependencies": {
"@mapbox/node-pre-gyp": "^1.0.10",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/server",
"version": "0.7.38",
"version": "0.7.41",
"description": "",
"dependencies": {
"@mapbox/node-pre-gyp": "^1.0.10",

View File

@@ -423,7 +423,18 @@ class PluginRemote:
pass
if need_pip:
shutil.rmtree(python_prefix)
try:
for de in os.listdir(plugin_volume):
if de.startswith('linux') or de.startswith('darwin') or de.startswith('win32') or de.startswith('python') or de.startswith('node'):
filePath = os.path.join(plugin_volume, de)
print('Removing old dependencies: %s' % filePath)
try:
shutil.rmtree(filePath)
except:
pass
except:
pass
os.makedirs(python_prefix)
print('requirements.txt (outdated)')

View File

@@ -311,7 +311,9 @@ export class PluginHost {
this.worker.stdout.on('data', data => console.log(data.toString()));
this.worker.stderr.on('data', data => console.error(data.toString()));
const consoleHeader = `${os.platform()} ${os.arch()} ${os.version()}\nserver version: ${serverVersion}\nplugin version: ${this.pluginId} ${this.packageJson.version}\n`;
let consoleHeader = `${os.platform()} ${os.arch()} ${os.version()}\nserver version: ${serverVersion}\nplugin version: ${this.pluginId} ${this.packageJson.version}\n`;
if (process.env.SCRYPTED_DOCKER_FLAVOR)
consoleHeader += `${process.env.SCRYPTED_DOCKER_FLAVOR}\n`;
this.consoleServer = createConsoleServer(this.worker.stdout, this.worker.stderr, consoleHeader);
const disconnect = () => {