Spaces:
Sleeping
Sleeping
File size: 1,788 Bytes
224e4de e382003 224e4de 887083d 224e4de ce63277 224e4de 887083d ce63277 887083d 224e4de ce63277 224e4de 93d5af1 224e4de 887083d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import os
from pathlib import Path
import requests
from repository import ModelRoles, Model, Repository
class OndemandRepository(Repository):
session_url = "https://api.on-demand.io/chat/v1/sessions"
def __init__(self, model_info: Model, system_message: str = None, log_to_file: Path = None):
self.model_info = model_info
self.system_message = system_message
self.log_to_file = log_to_file
self.session_id = None
def init(self):
self._start_new_session_if_needed()
def _start_new_session_if_needed(self):
if not self.session_id:
headers = {"apiKey": os.getenv("API_KEY")}
session_body = {"pluginIds": [], "externalUserId": "virtualDAM", "modelConfigs":
{"temperature": 0, "fulfillmentPrompt": self.system_message}
}
response = requests.post(self.session_url, headers=headers, json=session_body)
response_data = response.json()
self.session_id = response_data["data"]["id"]
def get_model_roles(self) -> ModelRoles:
return self.model_info.roles
def get_model_info(self) -> Model:
return self.model_info
def send_prompt(self, prompt: str, add_to_history: bool = None) -> dict[str, str]:
self._start_new_session_if_needed()
headers = {"apiKey": os.getenv("API_KEY")}
body = {'endpointId': 'predefined-openai-gpt4o', 'query': prompt, 'pluginIds': [], 'responseMode': 'sync'}
url = f'https://api.on-demand.io/chat/v1/sessions/{self.session_id}/query'
response = requests.post(url, headers=headers, json=body)
return {"content": response.json()["data"]["answer"]}
def get_message_history(self) -> list[dict[str, str]]:
return []
|