File size: 1,558 Bytes
0181a1f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
# inference.py

from typing import List, Dict, Generator, Optional
from hf_client import get_inference_client

def chat_completion(
    model_id: str,
    messages: List[Dict[str, str]],
    provider: Optional[str] = None,
    max_tokens: int = 4096
) -> str:
    """
    Send a chat completion request to the appropriate inference provider.

    Args:
        model_id: The model identifier to use.
        messages: A list of OpenAI‑style {'role':'...', 'content':'...'} messages.
        provider: Optional override for provider; uses model default if None.
        max_tokens: Maximum tokens to generate.

    Returns:
        The assistant's response content.
    """
    client = get_inference_client(model_id, provider or "auto")
    response = client.chat.completions.create(
        model=model_id,
        messages=messages,
        max_tokens=max_tokens
    )
    return response.choices[0].message.content


def stream_chat_completion(
    model_id: str,
    messages: List[Dict[str, str]],
    provider: Optional[str] = None,
    max_tokens: int = 4096
) -> Generator[str, None, None]:
    """
    Generator for streaming chat completions.
    Yields partial message chunks as strings.
    """
    client = get_inference_client(model_id, provider or "auto")
    stream = client.chat.completions.create(
        model=model_id,
        messages=messages,
        max_tokens=max_tokens,
        stream=True
    )
    for chunk in stream:
        delta = getattr(chunk.choices[0].delta, "content", None)
        if delta:
            yield delta