Spaces:
Sleeping
Sleeping
File size: 3,699 Bytes
bf6d237 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
import time
import uuid
from collections.abc import Iterator
from typing import Literal
from llama_index.llms import ChatResponse, CompletionResponse
from pydantic import BaseModel, Field
from private_gpt.server.chunks.chunks_service import Chunk
class OpenAIDelta(BaseModel):
"""A piece of completion that needs to be concatenated to get the full message."""
content: str | None
class OpenAIMessage(BaseModel):
"""Inference result, with the source of the message.
Role could be the assistant or system
(providing a default response, not AI generated).
"""
role: Literal["assistant", "system", "user"] = Field(default="user")
content: str | None
class OpenAIChoice(BaseModel):
"""Response from AI.
Either the delta or the message will be present, but never both.
Sources used will be returned in case context retrieval was enabled.
"""
finish_reason: str | None = Field(examples=["stop"])
delta: OpenAIDelta | None = None
message: OpenAIMessage | None = None
sources: list[Chunk] | None = None
index: int = 0
class OpenAICompletion(BaseModel):
"""Clone of OpenAI Completion model.
For more information see: https://platform.openai.com/docs/api-reference/chat/object
"""
id: str
object: Literal["completion", "completion.chunk"] = Field(default="completion")
created: int = Field(..., examples=[1623340000])
model: Literal["private-gpt"]
choices: list[OpenAIChoice]
@classmethod
def from_text(
cls,
text: str | None,
finish_reason: str | None = None,
sources: list[Chunk] | None = None,
) -> "OpenAICompletion":
return OpenAICompletion(
id=str(uuid.uuid4()),
object="completion",
created=int(time.time()),
model="private-gpt",
choices=[
OpenAIChoice(
message=OpenAIMessage(role="assistant", content=text),
finish_reason=finish_reason,
sources=sources,
)
],
)
@classmethod
def json_from_delta(
cls,
*,
text: str | None,
finish_reason: str | None = None,
sources: list[Chunk] | None = None,
) -> str:
chunk = OpenAICompletion(
id=str(uuid.uuid4()),
object="completion.chunk",
created=int(time.time()),
model="private-gpt",
choices=[
OpenAIChoice(
delta=OpenAIDelta(content=text),
finish_reason=finish_reason,
sources=sources,
)
],
)
return chunk.model_dump_json()
def to_openai_response(
response: str | ChatResponse, sources: list[Chunk] | None = None
) -> OpenAICompletion:
if isinstance(response, ChatResponse):
return OpenAICompletion.from_text(response.delta, finish_reason="stop")
else:
return OpenAICompletion.from_text(
response, finish_reason="stop", sources=sources
)
def to_openai_sse_stream(
response_generator: Iterator[str | CompletionResponse | ChatResponse],
sources: list[Chunk] | None = None,
) -> Iterator[str]:
for response in response_generator:
if isinstance(response, CompletionResponse | ChatResponse):
yield f"data: {OpenAICompletion.json_from_delta(text=response.delta)}\n\n"
else:
yield f"data: {OpenAICompletion.json_from_delta(text=response, sources=sources)}\n\n"
yield f"data: {OpenAICompletion.json_from_delta(text=None, finish_reason='stop')}\n\n"
yield "data: [DONE]\n\n"
|