openvino: new model, use huggingface as model source

This commit is contained in:
Koushik Dutta
2026-01-10 15:02:20 -08:00
parent af8abb6072
commit 38bac58fc6
8 changed files with 52 additions and 41 deletions

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/openvino",
"version": "0.1.192",
"version": "0.1.193",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/openvino",
"version": "0.1.192",
"version": "0.1.193",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -50,5 +50,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.192"
"version": "0.1.193"
}

View File

@@ -1,5 +1,6 @@
from __future__ import annotations
import os
import asyncio
import concurrent.futures
import json
@@ -168,15 +169,8 @@ class OpenVINOPlugin(
ovmodel = "best-converted"
model_version = "v7"
xmlFile = self.downloadFile(
f"https://huggingface.co/scrypted/plugin-models/resolve/main/openvino/{model}/{ovmodel}.xml",
f"{model_version}/{model}/{ovmodel}.xml",
)
self.downloadFile(
f"https://huggingface.co/scrypted/plugin-models/resolve/main/openvino/{model}/{ovmodel}.bin",
f"{model_version}/{model}/{ovmodel}.bin",
)
model_path = self.downloadHuggingFaceModelLocalFallback(model)
xmlFile = os.path.join(model_path, f"{ovmodel}.xml")
try:
self.compiled_model = self.core.compile_model(xmlFile, mode)

View File

@@ -1,11 +1,12 @@
from __future__ import annotations
import asyncio
import os
import numpy as np
import openvino as ov
from PIL import Image
import openvino as ov
from ov import async_infer
from predict.face_recognize import FaceRecognizeDetection
@@ -20,18 +21,12 @@ class OpenVINOFaceRecognition(FaceRecognizeDetection):
super().__init__(plugin=plugin, nativeId=nativeId)
def downloadModel(self, model: str):
scrypted_yolov9 = "scrypted_yolov9" in model
inception = "inception" in model
ovmodel = "best-converted" if scrypted_yolov9 else "best"
model_version = "v8"
xmlFile = self.downloadFile(
f"https://huggingface.co/scrypted/plugin-models/resolve/main/openvino/{model}/{ovmodel}.xml",
f"{model_version}/{model}/{ovmodel}.xml",
)
self.downloadFile(
f"https://huggingface.co/scrypted/plugin-models/resolve/main/openvino/{model}/{ovmodel}.bin",
f"{model_version}/{model}/{ovmodel}.bin",
)
ovmodel = "best-converted" if not inception else "best"
if not inception:
model = model + "_int8"
model_path = self.downloadHuggingFaceModelLocalFallback(model)
xmlFile = os.path.join(model_path, f"{ovmodel}.xml")
if inception:
model = self.plugin.core.read_model(xmlFile)
model.reshape([1, 3, 160, 160])

View File

@@ -1,10 +1,11 @@
from __future__ import annotations
import asyncio
import os
import numpy as np
import openvino as ov
import openvino as ov
from ov import async_infer
from predict.text_recognize import TextRecognition
@@ -17,15 +18,8 @@ textRecognizePrepare, textRecognizePredict = async_infer.create_executors(
class OpenVINOTextRecognition(TextRecognition):
def downloadModel(self, model: str):
ovmodel = "best"
model_version = "v6"
xmlFile = self.downloadFile(
f"https://huggingface.co/scrypted/plugin-models/resolve/main/openvino/{model}/{ovmodel}.xml",
f"{model_version}/{model}/{ovmodel}.xml",
)
self.downloadFile(
f"https://huggingface.co/scrypted/plugin-models/resolve/main/openvino/{model}/{ovmodel}.bin",
f"{model_version}/{model}/{ovmodel}.bin",
)
model_path = self.downloadHuggingFaceModelLocalFallback(model)
xmlFile = os.path.join(model_path, f"{ovmodel}.xml")
if "vgg" in model:
model = self.plugin.core.read_model(xmlFile)
# this reshape causes a crash on GPU but causes a crash if NOT used with NPU...

View File

@@ -20,6 +20,10 @@ import common.colors
from detect import DetectPlugin
from predict.rectangle import Rectangle
cache_dir = os.path.join(os.environ["SCRYPTED_PLUGIN_VOLUME"], "files", "hf")
os.makedirs(cache_dir, exist_ok=True)
os.environ['HF_HUB_CACHE'] = cache_dir
original_getaddrinfo = socket.getaddrinfo
# Sort the results to put IPv4 addresses first
@@ -84,6 +88,32 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.ClusterForkInterface, scrypted_sd
if not self.plugin and not self.forked:
asyncio.ensure_future(self.startCluster(), loop=self.loop)
def downloadHuggingFaceModel(self, model: str, local_files_only: bool = False) -> str:
from huggingface_hub import snapshot_download
plugin_suffix = self.pluginId.split('/')[1]
local_path = snapshot_download(
repo_id="scrypted/plugin-models",
allow_patterns=f"{plugin_suffix}/{model}/*",
local_files_only=local_files_only,
)
local_path = os.path.join(local_path, plugin_suffix, model)
return local_path
def downloadHuggingFaceModelLocalFallback(self, model: str) -> str:
try:
local_path = self.downloadHuggingFaceModel(model)
print("Downloaded/refreshed model:", model)
return local_path
except Exception:
traceback.print_exc()
print("Unable to download model:", model)
print('This may be due to network or firewall issues.')
print("Trying model from Hugging Face Hub (offline):", model)
local_path = self.downloadHuggingFaceModel(model, local_files_only=True)
return local_path
def downloadFile(self, url: str, filename: str):
try:
filesPath = os.path.join(os.environ["SCRYPTED_PLUGIN_VOLUME"], "files")

View File

@@ -2,7 +2,6 @@ from __future__ import annotations
import asyncio
import base64
import os
from typing import Tuple
import scrypted_sdk
@@ -25,30 +24,26 @@ class ClipEmbedding(PredictPlugin, scrypted_sdk.TextEmbedding, scrypted_sdk.Imag
self.minThreshold = 0.5
self.model = self.initModel()
cache_dir = os.path.join(os.environ["SCRYPTED_PLUGIN_VOLUME"], "files", "hf")
os.makedirs(cache_dir, exist_ok=True)
self.processor = None
print("Loading CLIP processor from local cache.")
try:
self.processor = CLIPProcessor.from_pretrained(
hf_id,
cache_dir=cache_dir,
local_files_only=True,
)
print("Loaded CLIP processor from local cache.")
except Exception:
print("CLIP processor not available in local cache yet.")
asyncio.ensure_future(self.refreshClipProcessor(hf_id, cache_dir), loop=self.loop)
asyncio.ensure_future(self.refreshClipProcessor(hf_id), loop=self.loop)
async def refreshClipProcessor(self, hf_id: str, cache_dir: str):
async def refreshClipProcessor(self, hf_id: str):
try:
print("Refreshing CLIP processor cache (online).")
processor = await asyncio.to_thread(
CLIPProcessor.from_pretrained,
hf_id,
cache_dir=cache_dir,
)
self.processor = processor
print("Refreshed CLIP processor cache.")

View File

@@ -13,4 +13,7 @@ openvino==2024.5.0
Pillow==10.3.0
opencv-python-headless==4.10.0.84
# clip processor
transformers==4.52.4
# model downloads
huggingface-hub