Spaces:
Runtime error
Runtime error
File size: 3,843 Bytes
2eafbc4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
from typing import List, Optional, Union
from pydantic import BaseModel, Field
from inference.core.interfaces.camera.video_source import (
BufferConsumptionStrategy,
BufferFillingStrategy,
)
class UDPSinkConfiguration(BaseModel):
type: str = Field(
description="Type identifier field. Must be `udp_sink`", default="udp_sink"
)
host: str = Field(description="Host of UDP sink.")
port: int = Field(description="Port of UDP sink.")
class ObjectDetectionModelConfiguration(BaseModel):
type: str = Field(
description="Type identifier field. Must be `object-detection`",
default="object-detection",
)
class_agnostic_nms: Optional[bool] = Field(
description="Flag to decide if class agnostic NMS to be applied. If not given, default or InferencePipeline host env will be used.",
default=None,
)
confidence: Optional[float] = Field(
description="Confidence threshold for predictions. If not given, default or InferencePipeline host env will be used.",
default=None,
)
iou_threshold: Optional[float] = Field(
description="IoU threshold of post-processing. If not given, default or InferencePipeline host env will be used.",
default=None,
)
max_candidates: Optional[int] = Field(
description="Max candidates in post-processing. If not given, default or InferencePipeline host env will be used.",
default=None,
)
max_detections: Optional[int] = Field(
description="Max detections in post-processing. If not given, default or InferencePipeline host env will be used.",
default=None,
)
class PipelineInitialisationRequest(BaseModel):
model_id: str = Field(description="Roboflow model id")
video_reference: Union[str, int] = Field(
description="Reference to video source - either stream, video file or device. It must be accessible from the host running inference stream"
)
sink_configuration: UDPSinkConfiguration = Field(
description="Configuration of the sink."
)
api_key: Optional[str] = Field(description="Roboflow API key", default=None)
max_fps: Optional[Union[float, int]] = Field(
description="Limit of FPS in video processing.", default=None
)
source_buffer_filling_strategy: Optional[str] = Field(
description=f"`source_buffer_filling_strategy` parameter of Inference Pipeline (see docs). One of {[e.value for e in BufferFillingStrategy]}",
default=None,
)
source_buffer_consumption_strategy: Optional[str] = Field(
description=f"`source_buffer_consumption_strategy` parameter of Inference Pipeline (see docs). One of {[e.value for e in BufferConsumptionStrategy]}",
default=None,
)
model_configuration: ObjectDetectionModelConfiguration = Field(
description="Configuration of the model",
default_factory=ObjectDetectionModelConfiguration,
)
active_learning_enabled: Optional[bool] = Field(
description="Flag to decide if Active Learning middleware should be enabled. If not given - env variable `ACTIVE_LEARNING_ENABLED` will be used (with default `True`).",
default=None,
)
class CommandContext(BaseModel):
request_id: Optional[str] = Field(
description="Server-side request ID", default=None
)
pipeline_id: Optional[str] = Field(
description="Identifier of pipeline connected to operation", default=None
)
class CommandResponse(BaseModel):
status: str = Field(description="Operation status")
context: CommandContext = Field(description="Context of the command.")
class InferencePipelineStatusResponse(CommandResponse):
report: dict
class ListPipelinesResponse(CommandResponse):
pipelines: List[str] = Field(description="List IDs of active pipelines")
|