fix per frame rpc gc churn

This commit is contained in:
Koushik Dutta
2023-05-12 20:26:11 -07:00
parent d8f3edee1e
commit 205fdb0222
18 changed files with 150 additions and 95 deletions

View File

@@ -13,7 +13,7 @@
},
"../../sdk": {
"name": "@scrypted/sdk",
"version": "0.2.85",
"version": "0.2.101",
"dev": true,
"license": "ISC",
"dependencies": {

View File

@@ -39,10 +39,7 @@
"Settings",
"MixinProvider"
],
"realfs": true,
"pluginDependencies": [
"@scrypted/python-codecs"
]
"realfs": true
},
"optionalDependencies": {},
"dependencies": {

View File

@@ -12,16 +12,13 @@ interface RawFrame {
data: Buffer;
}
async function createRawImageMediaObject(image: RawImage): Promise<VideoFrame & MediaObject> {
async function createRawImageMediaObject(image: RawImage): Promise<Image & MediaObject> {
const ret = await sdk.mediaManager.createMediaObject(image, ScryptedMimeTypes.Image, {
format: null,
timestamp: 0,
width: image.width,
height: image.height,
queued: 0,
toBuffer: (options: ImageOptions) => image.toBuffer(options),
toImage: (options: ImageOptions) => image.toImage(options),
flush: async () => { },
});
return ret;
@@ -50,7 +47,7 @@ class RawImage implements Image, RawFrame {
}
export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements VideoFrameGenerator {
async *generateVideoFramesInternal(mediaObject: MediaObject, options?: VideoFrameGeneratorOptions, filter?: (videoFrame: VideoFrame & MediaObject) => Promise<boolean>): AsyncGenerator<VideoFrame & MediaObject, any, unknown> {
async *generateVideoFramesInternal(mediaObject: MediaObject, options?: VideoFrameGeneratorOptions, filter?: (videoFrame: VideoFrame) => Promise<boolean>): AsyncGenerator<VideoFrame, any, unknown> {
const ffmpegInput = await sdk.mediaManager.convertMediaObjectToJSON<FFmpegInput>(mediaObject, ScryptedMimeTypes.FFmpegInput);
const gray = options?.format === 'gray';
const channels = gray ? 1 : 3;
@@ -138,6 +135,8 @@ export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements Vid
try {
reader();
const flush = async () => { };
while (!finished) {
frameDeferred = new Deferred();
const raw = await frameDeferred.promise;
@@ -145,8 +144,14 @@ export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements Vid
const rawImage = new RawImage(data, width, height, format);
try {
const mo = await createRawImageMediaObject(rawImage);
yield mo;
const image = await createRawImageMediaObject(rawImage);
yield {
__json_copy_serialize_children: true,
timestamp: 0,
queued: 0,
image,
flush,
};
}
finally {
rawImage.data = undefined;
@@ -163,7 +168,7 @@ export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements Vid
}
async generateVideoFrames(mediaObject: MediaObject, options?: VideoFrameGeneratorOptions, filter?: (videoFrame: VideoFrame & MediaObject) => Promise<boolean>): Promise<AsyncGenerator<VideoFrame & MediaObject, any, unknown>> {
async generateVideoFrames(mediaObject: MediaObject, options?: VideoFrameGeneratorOptions, filter?: (videoFrame: VideoFrame & MediaObject) => Promise<boolean>): Promise<AsyncGenerator<VideoFrame, any, unknown>> {
return this.generateVideoFramesInternal(mediaObject, options, filter);
}
}

View File

@@ -27,19 +27,16 @@ catch (e) {
console.warn('Sharp failed to load. FFmpeg Frame Generator will not function properly.')
}
async function createVipsMediaObject(image: VipsImage): Promise<VideoFrame & MediaObject> {
async function createVipsMediaObject(image: VipsImage): Promise<Image & MediaObject> {
const ret = await sdk.mediaManager.createMediaObject(image, ScryptedMimeTypes.Image, {
format: null,
timestamp: 0,
width: image.width,
height: image.height,
queued: 0,
toBuffer: (options: ImageOptions) => image.toBuffer(options),
toImage: async (options: ImageOptions) => {
const newImage = await image.toVipsImage(options);
return createVipsMediaObject(newImage);
},
flush: async () => {},
});
return ret;
@@ -129,7 +126,7 @@ class VipsImage implements Image {
}
export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements VideoFrameGenerator {
async *generateVideoFramesInternal(mediaObject: MediaObject, options?: VideoFrameGeneratorOptions, filter?: (videoFrame: VideoFrame & MediaObject) => Promise<boolean>): AsyncGenerator<VideoFrame & MediaObject, any, unknown> {
async *generateVideoFramesInternal(mediaObject: MediaObject, options?: VideoFrameGeneratorOptions, filter?: (videoFrame: VideoFrame) => Promise<boolean>): AsyncGenerator<VideoFrame, any, unknown> {
const ffmpegInput = await sdk.mediaManager.convertMediaObjectToJSON<FFmpegInput>(mediaObject, ScryptedMimeTypes.FFmpegInput);
const gray = options?.format === 'gray';
const channels = gray ? 1 : 3;
@@ -207,6 +204,7 @@ export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements Vid
try {
reader();
const flush = async () => { };
while (!finished) {
frameDeferred = new Deferred();
const raw = await frameDeferred.promise;
@@ -221,8 +219,14 @@ export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements Vid
});
const vipsImage = new VipsImage(image, width, height, channels);
try {
const mo = await createVipsMediaObject(vipsImage);
yield mo;
const image = await createVipsMediaObject(vipsImage);
yield {
__json_copy_serialize_children: true,
timestamp: 0,
queued: 0,
image,
flush,
};
}
finally {
vipsImage.image = undefined;
@@ -240,7 +244,7 @@ export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements Vid
}
async generateVideoFrames(mediaObject: MediaObject, options?: VideoFrameGeneratorOptions, filter?: (videoFrame: VideoFrame & MediaObject) => Promise<boolean>): Promise<AsyncGenerator<VideoFrame & MediaObject, any, unknown>> {
async generateVideoFrames(mediaObject: MediaObject, options?: VideoFrameGeneratorOptions, filter?: (videoFrame: VideoFrame) => Promise<boolean>): Promise<AsyncGenerator<VideoFrame, any, unknown>> {
return this.generateVideoFramesInternal(mediaObject, options, filter);
}
}

View File

@@ -1,6 +1,6 @@
import { Deferred } from '@scrypted/common/src/deferred';
import { sleep } from '@scrypted/common/src/sleep';
import sdk, { Camera, DeviceProvider, DeviceState, EventListenerRegister, MediaObject, MediaStreamDestination, MixinDeviceBase, MixinProvider, MotionSensor, ObjectDetection, ObjectDetectionGeneratorResult, ObjectDetectionModel, ObjectDetectionTypes, ObjectDetectionZone, ObjectDetector, ObjectsDetected, ScryptedDevice, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, ScryptedNativeId, Setting, Settings, SettingValue, VideoCamera, VideoFrame, VideoFrameGenerator } from '@scrypted/sdk';
import sdk, { Camera, DeviceProvider, DeviceState, EventListenerRegister, Image, MediaObject, MediaStreamDestination, MixinDeviceBase, MixinProvider, MotionSensor, ObjectDetection, ObjectDetectionGeneratorResult, ObjectDetectionModel, ObjectDetectionTypes, ObjectDetectionZone, ObjectDetector, ObjectsDetected, ScryptedDevice, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, ScryptedNativeId, Setting, Settings, SettingValue, VideoCamera, VideoFrame, VideoFrameGenerator } from '@scrypted/sdk';
import { StorageSettings } from '@scrypted/sdk/storage-settings';
import crypto from 'crypto';
import { AutoenableMixinProvider } from "../../../common/src/autoenable-mixin-provider";
@@ -297,7 +297,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
async createFrameGenerator(signal: Deferred<void>, options: {
snapshotPipeline: boolean,
suppress?: boolean,
}, updatePipelineStatus: (status: string) => void) {
}, updatePipelineStatus: (status: string) => void): Promise<AsyncGenerator<VideoFrame, any, unknown>> {
let frameGenerator: string = this.frameGenerator;
if (!this.hasMotionType && options.snapshotPipeline) {
@@ -311,6 +311,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
const self = this;
return (async function* gen() {
try {
const flush = async () => {};
while (!signal.finished) {
const now = Date.now();
const sleeper = async () => {
@@ -318,7 +319,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
if (diff > 0)
await sleep(diff);
};
let image: MediaObject & VideoFrame;
let image: Image & MediaObject;
try {
updatePipelineStatus('takePicture');
const mo = await self.cameraDevice.takePicture({
@@ -335,7 +336,13 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
// self.console.log('yield')
updatePipelineStatus('processing image');
yield image;
yield {
__json_copy_serialize_children: true,
timestamp: now,
queued: 0,
flush,
image,
};
// self.console.log('done yield')
await sleeper();
}
@@ -462,8 +469,9 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
if (detected.detected.detectionId) {
updatePipelineStatus('creating jpeg');
// const start = Date.now();
const vf = await sdk.connectRPCObject(detected.videoFrame);
const jpeg = await vf.toBuffer({
let { image } = detected.videoFrame;
image = await sdk.connectRPCObject(image);
const jpeg = await image.toBuffer({
format: 'jpg',
});
const mo = await sdk.mediaManager.createMediaObject(jpeg, 'image/jpeg');
@@ -980,12 +988,18 @@ class ObjectDetectionPlugin extends AutoenableMixinProvider implements Settings,
shouldUseSnapshotPipeline() {
this.pruneOldStatistics();
// find any concurrent cameras with as many or more that had passable results
for (const [k, v] of this.objectDetectionStatistics.entries()) {
if (v.dps > 2 && k >= this.statsSnapshotConcurrent)
return false;
}
// find any concurrent camera with less or as many that had struggle bus
for (const [k, v] of this.objectDetectionStatistics.entries()) {
// check the stats history to see if any sessions
// with same or lower number of cameras were on the struggle bus.
if (v.dps < 2 && k <= this.statsSnapshotConcurrent)
return true;
}
return false;
}

View File

@@ -5,12 +5,12 @@
// "scrypted.serverRoot": "/home/pi/.scrypted",
// docker installation
"scrypted.debugHost": "koushik-ubuntu",
"scrypted.serverRoot": "/server",
// "scrypted.debugHost": "koushik-ubuntu",
// "scrypted.serverRoot": "/server",
// local checkout
// "scrypted.debugHost": "127.0.0.1",
// "scrypted.serverRoot": "/Users/koush/.scrypted",
"scrypted.debugHost": "127.0.0.1",
"scrypted.serverRoot": "/Users/koush/.scrypted",
// "scrypted.debugHost": "koushik-windows",
// "scrypted.serverRoot": "C:\\Users\\koush\\.scrypted",

View File

@@ -30,7 +30,8 @@
"Settings"
],
"pluginDependencies": [
"@scrypted/objectdetector"
"@scrypted/objectdetector",
"@scrypted/python-codecs"
]
},
"devDependencies": {

View File

@@ -212,7 +212,7 @@ class OpenCVPlugin(DetectPlugin):
settings['session'] = OpenCVDetectionSession()
return super().generateObjectDetections(videoFrames, detection_session)
async def run_detection_videoframe(self, videoFrame: VideoFrame, detection_session: ObjectDetectionSession) -> ObjectsDetected:
async def run_detection_image(self, videoFrame: scrypted_sdk.Image, detection_session: ObjectDetectionSession) -> ObjectsDetected:
width = videoFrame.width
height = videoFrame.height

View File

@@ -70,8 +70,10 @@ TUPLTYPE RGB
ENDHDR
`;
const buffer = await videoFrame.toBuffer({
resize: (videoFrame.width !== width || videoFrame.height !== height) ? {
const { image } = videoFrame;
const buffer = await image.toBuffer({
resize: (image.width !== width || image.height !== height) ? {
width,
height,
} : undefined,

View File

@@ -0,0 +1,14 @@
import scrypted_sdk
import time
async def flush():
pass
def createVideoFrame(image) -> scrypted_sdk.VideoFrame:
return {
'__json_copy_serialize_children': True,
'image': image,
'queued': 0,
'timestamp': time.time() * 1000,
'flush': flush,
}

View File

@@ -6,6 +6,7 @@ from urllib.parse import urlparse
import vipsimage
import pilimage
import platform
from generator_common import createVideoFrame
Gst = None
try:
@@ -87,6 +88,11 @@ async def generateVideoFramesGstreamer(mediaObject: scrypted_sdk.MediaObject, op
videosrc += ' ! {decoder} ! queue leaky=downstream max-size-buffers=0 ! videoconvert ! {videorate} {videocaps}'.format(decoder=decoder, videocaps=videocaps, videorate=videorate)
gst, gen = await createPipelineIterator(videosrc)
vipsImage: vipsimage.VipsImage = None
pilImage: pilimage.PILImage = None
mo: scrypted_sdk.MediaObject = None
async for gstsample in gen():
caps = gstsample.get_caps()
height = caps.get_structure(0).get_value('height')
@@ -99,19 +105,27 @@ async def generateVideoFramesGstreamer(mediaObject: scrypted_sdk.MediaObject, op
try:
if vipsimage.pyvips:
vips = vipsimage.new_from_memory(info.data, width, height, bands)
vipsImage = vipsimage.VipsImage(vips)
try:
if not mo:
vipsImage = vipsimage.VipsImage(vips)
mo = await vipsimage.createVipsMediaObject(vipsImage)
yield mo
vipsImage.vipsImage = vips
try:
yield createVideoFrame(mo)
finally:
vipsImage.vipsImage = None
vips.invalidate()
else:
pil = pilimage.new_from_memory(info.data, width, height, bands)
pilImage = pilimage.PILImage(pil)
try:
if not mo:
pilImage = pilimage.PILImage(pil)
mo = await pilimage.createPILMediaObject(pilImage)
yield mo
pilImage.pilImage = pil
try:
yield createVideoFrame(mo)
finally:
pilImage.pilImage = None
pil.close()

View File

@@ -3,6 +3,7 @@ import scrypted_sdk
from typing import Any
import vipsimage
import pilimage
from generator_common import createVideoFrame
av = None
try:
@@ -30,6 +31,10 @@ async def generateVideoFramesLibav(mediaObject: scrypted_sdk.MediaObject, option
start = 0
try:
vipsImage: vipsimage.VipsImage = None
pilImage: pilimage.PILImage = None
mo: scrypted_sdk.MediaObject = None
for idx, frame in enumerate(container.decode(stream)):
now = time.time()
if not start:
@@ -46,10 +51,14 @@ async def generateVideoFramesLibav(mediaObject: scrypted_sdk.MediaObject, option
vips = vipsimage.pyvips.Image.new_from_array(frame.to_ndarray(format='gray'))
else:
vips = vipsimage.pyvips.Image.new_from_array(frame.to_ndarray(format='rgb24'))
vipsImage = vipsimage.VipsImage(vips)
try:
if not mo:
vipsImage = vipsimage.VipsImage(vips)
mo = await vipsimage.createVipsMediaObject(vipsImage)
yield mo
vipsImage.vipsImage = vips
try:
yield createVideoFrame(mo)
finally:
vipsImage.vipsImage = None
vips.invalidate()
@@ -64,10 +73,14 @@ async def generateVideoFramesLibav(mediaObject: scrypted_sdk.MediaObject, option
rgb.close()
else:
pil = frame.to_image()
pilImage = pilimage.PILImage(pil)
try:
if not mo:
pilImage = pilimage.PILImage(pil)
mo = await pilimage.createPILMediaObject(pilImage)
yield mo
pilImage.pilImage = pil
try:
yield createVideoFrame(mo)
finally:
pilImage.pilImage = None
pil.close()

View File

@@ -2,7 +2,6 @@ import scrypted_sdk
from typing import Any
from thread import to_thread
import io
import time
try:
from PIL import Image
@@ -10,7 +9,7 @@ except:
# Image = None
pass
class PILImage(scrypted_sdk.VideoFrame):
class PILImage(scrypted_sdk.Image):
def __init__(self, pilImage: Image.Image) -> None:
super().__init__()
self.pilImage = pilImage
@@ -91,7 +90,6 @@ def toPILImage(pilImageWrapper: PILImage, options: scrypted_sdk.ImageOptions = N
async def createPILMediaObject(image: PILImage):
ret = await scrypted_sdk.mediaManager.createMediaObject(image, scrypted_sdk.ScryptedMimeTypes.Image.value, {
'timestamp': time.time() * 1000,
'format': None,
'width': image.width,
'height': image.height,

View File

@@ -1,4 +1,5 @@
import scrypted_sdk
import asyncio
from typing import Any
try:
import pyvips
@@ -7,9 +8,8 @@ except:
Image = None
pyvips = None
from thread import to_thread
import time
class VipsImage(scrypted_sdk.VideoFrame):
class VipsImage(scrypted_sdk.Image):
def __init__(self, vipsImage: Image) -> None:
super().__init__()
self.vipsImage = vipsImage
@@ -91,7 +91,6 @@ def toVipsImage(vipsImageWrapper: VipsImage, options: scrypted_sdk.ImageOptions
async def createVipsMediaObject(image: VipsImage):
ret = await scrypted_sdk.mediaManager.createMediaObject(image, scrypted_sdk.ScryptedMimeTypes.Image.value, {
'timestamp': time.time() * 1000,
'format': None,
'width': image.width,
'height': image.height,

View File

@@ -47,15 +47,16 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
def get_detection_input_size(self, src_size):
pass
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, detection_session: ObjectDetectionSession) -> ObjectsDetected:
async def run_detection_image(self, videoFrame: scrypted_sdk.Image, detection_session: ObjectDetectionSession) -> ObjectsDetected:
pass
async def generateObjectDetections(self, videoFrames: Any, session: ObjectDetectionGeneratorSession = None) -> Any:
try:
videoFrames = await scrypted_sdk.sdk.connectRPCObject(videoFrames)
videoFrame: scrypted_sdk.VideoFrame
async for videoFrame in videoFrames:
videoFrame = await scrypted_sdk.sdk.connectRPCObject(videoFrame)
detected = await self.run_detection_videoframe(videoFrame, session)
image = await scrypted_sdk.sdk.connectRPCObject(videoFrame['image'])
detected = await self.run_detection_image(image, session)
yield {
'__json_copy_serialize_children': True,
'detected': detected,
@@ -68,10 +69,10 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
pass
async def detectObjects(self, mediaObject: MediaObject, session: ObjectDetectionSession = None) -> ObjectsDetected:
vf: scrypted_sdk.VideoFrame
if mediaObject and mediaObject.mimeType == ScryptedMimeTypes.Image.value:
vf = await scrypted_sdk.sdk.connectRPCObject(mediaObject)
image: scrypted_sdk.Image
if mediaObject.mimeType == ScryptedMimeTypes.Image.value:
image = await scrypted_sdk.sdk.connectRPCObject(mediaObject)
else:
vf = await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, ScryptedMimeTypes.Image.value)
image = await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, ScryptedMimeTypes.Image.value)
return await self.run_detection_videoframe(vf, session)
return await self.run_detection_image(image, session)

View File

@@ -186,9 +186,9 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss) -> ObjectsDetected:
pass
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, detection_session: ObjectDetectionSession) -> ObjectsDetected:
async def run_detection_image(self, image: scrypted_sdk.Image, detection_session: ObjectDetectionSession) -> ObjectsDetected:
settings = detection_session and detection_session.get('settings')
src_size = videoFrame.width, videoFrame.height
src_size = image.width, image.height
w, h = self.get_input_size()
input_aspect_ratio = w / h
iw, ih = src_size
@@ -210,16 +210,16 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
'height': h,
}
data = await videoFrame.toBuffer({
data = await image.toBuffer({
'resize': resize,
'format': videoFrame.format or 'rgb',
'format': image.format or 'rgb',
})
image = await ensureRGBData(data, (w, h), videoFrame.format)
single = await ensureRGBData(data, (w, h), image.format)
try:
ret = await self.detect_once(image, settings, src_size, cvss)
ret = await self.detect_once(single, settings, src_size, cvss)
return ret
finally:
image.close()
single.close()
sw = int(w / s)
sh = int(h / s)
@@ -231,7 +231,7 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
second_crop = (ow, oh, ow + sw, oh + sh)
firstData, secondData = await asyncio.gather(
videoFrame.toBuffer({
image.toBuffer({
'resize': {
'width': w,
'height': h,
@@ -242,9 +242,9 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
'width': sw,
'height': sh,
},
'format': videoFrame.format or 'rgb',
'format': image.format or 'rgb',
}),
videoFrame.toBuffer({
image.toBuffer({
'resize': {
'width': w,
'height': h,
@@ -255,13 +255,13 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
'width': sw,
'height': sh,
},
'format': videoFrame.format or 'rgb',
'format': image.format or 'rgb',
})
)
first, second = await asyncio.gather(
ensureRGBData(firstData, (w, h), videoFrame.format),
ensureRGBData(secondData, (w, h), videoFrame.format)
ensureRGBData(firstData, (w, h), image.format),
ensureRGBData(secondData, (w, h), image.format)
)
def cvss1(point):

View File

@@ -500,7 +500,7 @@ class NotifierOptions(TypedDict):
class ObjectDetectionGeneratorResult(TypedDict):
__json_copy_serialize_children: Any
detected: ObjectsDetected
videoFrame: Union[VideoFrame, MediaObject]
videoFrame: VideoFrame
pass
class ObjectDetectionGeneratorSession(TypedDict):
@@ -955,7 +955,7 @@ class OauthClient:
class ObjectDetection:
async def detectObjects(self, mediaObject: MediaObject, session: ObjectDetectionSession = None) -> ObjectsDetected:
pass
async def generateObjectDetections(self, videoFrames: AsyncGenerator, session: ObjectDetectionGeneratorSession) -> ObjectDetectionGeneratorResult:
async def generateObjectDetections(self, videoFrames: VideoFrame, session: ObjectDetectionGeneratorSession) -> ObjectDetectionGeneratorResult:
pass
async def getDetectionModel(self, settings: Any = None) -> ObjectDetectionModel:
pass
@@ -1198,7 +1198,7 @@ class VideoClips:
pass
class VideoFrameGenerator:
async def generateVideoFrames(self, mediaObject: MediaObject, options: VideoFrameGeneratorOptions = None, filter: Any = None) -> AsyncGenerator:
async def generateVideoFrames(self, mediaObject: MediaObject, options: VideoFrameGeneratorOptions = None, filter: Any = None) -> VideoFrame:
pass
pass
@@ -2585,17 +2585,12 @@ class HttpResponse:
pass
class VideoFrame:
format: ImageFormat
height: float
__json_copy_serialize_children: Any
image: Union[Image, MediaObject]
queued: float
timestamp: float
width: float
async def flush(self, count: float = None) -> None:
pass
async def toBuffer(self, options: ImageOptions = None) -> bytearray:
pass
async def toImage(self, options: ImageOptions = None) -> Union[Image, MediaObject]:
pass
pass
class Image:

View File

@@ -1318,13 +1318,9 @@ export interface ObjectDetectionModel extends ObjectDetectionTypes {
triggerClasses?: string[];
prebuffer?: number;
}
export interface ObjectDetectionCallbacks {
onDetection(detection: ObjectsDetected, redetect?: (boundingBox: [number, number, number, number]) => Promise<ObjectDetectionResult[]>, mediaObject?: MediaObject): Promise<boolean>;
onDetectionEnded(detection: ObjectsDetected): Promise<void>;
}
export interface ObjectDetectionGeneratorResult {
__json_copy_serialize_children: true,
videoFrame: VideoFrame & MediaObject;
__json_copy_serialize_children: true;
videoFrame: VideoFrame;
detected: ObjectsDetected;
}
export interface ObjectDetectionZone {
@@ -1338,7 +1334,7 @@ export interface ObjectDetectionZone {
* E.g. TensorFlow, OpenCV, or a Coral TPU.
*/
export interface ObjectDetection {
generateObjectDetections(videoFrames: AsyncGenerator<VideoFrame & MediaObject, void>, session: ObjectDetectionGeneratorSession): Promise<AsyncGenerator<ObjectDetectionGeneratorResult, void>>;
generateObjectDetections(videoFrames: AsyncGenerator<VideoFrame, void>, session: ObjectDetectionGeneratorSession): Promise<AsyncGenerator<ObjectDetectionGeneratorResult, void>>;
detectObjects(mediaObject: MediaObject, session?: ObjectDetectionSession): Promise<ObjectsDetected>;
getDetectionModel(settings?: { [key: string]: any }): Promise<ObjectDetectionModel>;
}
@@ -1368,9 +1364,11 @@ export interface Image {
toBuffer(options?: ImageOptions): Promise<Buffer>;
toImage(options?: ImageOptions): Promise<Image & MediaObject>;
}
export interface VideoFrame extends Image {
export interface VideoFrame {
__json_copy_serialize_children: true;
timestamp: number;
queued: number;
image: Image & MediaObject;
flush(count?: number): Promise<void>;
}
export interface VideoFrameGeneratorOptions extends ImageOptions {
@@ -1378,7 +1376,7 @@ export interface VideoFrameGeneratorOptions extends ImageOptions {
fps?: number;
}
export interface VideoFrameGenerator {
generateVideoFrames(mediaObject: MediaObject, options?: VideoFrameGeneratorOptions, filter?: (videoFrame: VideoFrame & MediaObject) => Promise<boolean>): Promise<AsyncGenerator<VideoFrame & MediaObject>>;
generateVideoFrames(mediaObject: MediaObject, options?: VideoFrameGeneratorOptions, filter?: (videoFrame: VideoFrame) => Promise<boolean>): Promise<AsyncGenerator<VideoFrame>>;
}
/**
* Logger is exposed via log.* to allow writing to the Scrypted log.