DeMaking commited on
Commit
3578fad
verified
1 Parent(s): 7487c49

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -25
app.py CHANGED
@@ -1,68 +1,126 @@
1
- import subprocess
2
  import os
3
  import logging
4
  import time
5
  from fastapi import FastAPI, Request
6
- from transformers import pipeline
7
  from huggingface_hub import InferenceClient, login
8
  import langid
9
- import asyncio
10
 
11
- # Environment variables
12
- HF_HUB_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
13
 
 
 
 
 
 
 
 
14
  if not HF_HUB_TOKEN:
15
  raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN.")
16
 
 
 
17
  login(token=HF_HUB_TOKEN)
18
  client = InferenceClient(api_key=HF_HUB_TOKEN)
19
 
 
 
20
  app = FastAPI()
21
 
22
 
23
- # Function to detect language
24
- def detect_language(user_input):
 
 
 
 
 
 
 
 
 
 
 
 
25
  try:
26
  lang, _ = langid.classify(user_input)
27
- return "hebrew" if lang == "he" else "english" if lang == "en" else "unsupported"
 
 
 
 
 
28
  except Exception as e:
29
- logging.error(f"Language detection error: {e}")
30
  return "unsupported"
31
 
32
 
33
- # Function to generate response
34
- def generate_response(text):
35
- language = detect_language(text)
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  if language == "hebrew":
38
- content = "转注谞讛 讘拽爪专讛 讗讘诇 转砖转祝 讗转 转讛诇讬讱 拽讘诇转 讛讛讞诇讟讜转 砖诇讱, " + text
39
  model = "microsoft/Phi-3.5-mini-instruct"
40
  elif language == "english":
41
- content = "keep it short but tell your decision making process, " + text
42
  model = "mistralai/Mistral-Nemo-Instruct-2407"
43
  else:
44
  return "Sorry, I only support Hebrew and English."
45
-
46
- messages = [{"role": "user", "content": content}]
47
 
48
- completion = client.chat.completions.create(
49
- model=model,
50
- messages=messages,
51
- max_tokens=2048,
52
- temperature=0.5,
53
- top_p=0.7
54
- )
55
- return completion.choices[0].message.content
 
 
 
 
 
56
 
57
 
58
  @app.post("/generate_response")
59
  async def generate_text(request: Request):
 
 
 
 
60
  try:
61
  data = await request.json()
62
  text = data.get("text", "").strip()
63
  if not text:
64
  return {"error": "No text provided"}
65
-
66
  response = generate_response(text)
67
  return {"response": response}
68
  except Exception as e:
@@ -72,6 +130,9 @@ async def generate_text(request: Request):
72
 
73
  @app.get("/")
74
  async def root():
 
 
 
75
  return {"message": "Decision Helper API is running!"}
76
 
77
 
 
 
1
  import os
2
  import logging
3
  import time
4
  from fastapi import FastAPI, Request
5
+ # from transformers import pipeline
6
  from huggingface_hub import InferenceClient, login
7
  import langid
8
+ # import asyncio
9
 
 
 
10
 
11
+ # Configure logging
12
+ logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s", level=logging.INFO)
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ # Get Hugging Face API token from environment variable
17
+ HF_HUB_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
18
  if not HF_HUB_TOKEN:
19
  raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN.")
20
 
21
+
22
+ # Login and initialize the client
23
  login(token=HF_HUB_TOKEN)
24
  client = InferenceClient(api_key=HF_HUB_TOKEN)
25
 
26
+
27
+ # Create FastAPI app
28
  app = FastAPI()
29
 
30
 
31
+ # # Function to detect language
32
+ # def detect_language(user_input):
33
+ # try:
34
+ # lang, _ = langid.classify(user_input)
35
+ # return "hebrew" if lang == "he" else "english" if lang == "en" else "unsupported"
36
+ # except Exception as e:
37
+ # logging.error(f"Language detection error: {e}")
38
+ # return "unsupported"
39
+
40
+ def detect_language(user_input: str) -> str:
41
+ """
42
+ Detects the language of the input text.
43
+ Returns "hebrew" for Hebrew, "english" for English, and "unsupported" otherwise.
44
+ """
45
  try:
46
  lang, _ = langid.classify(user_input)
47
+ if lang == "he":
48
+ return "hebrew"
49
+ elif lang == "en":
50
+ return "english"
51
+ else:
52
+ return "unsupported"
53
  except Exception as e:
54
+ logger.error(f"Language detection error: {e}")
55
  return "unsupported"
56
 
57
 
58
+ # # Function to generate response
59
+ # def generate_response(text):
60
+ # language = detect_language(text)
61
 
62
+ # if language == "hebrew":
63
+ # content = "转注谞讛 讘拽爪专讛 讗讘诇 转砖转祝 讗转 转讛诇讬讱 拽讘诇转 讛讛讞诇讟讜转 砖诇讱, " + text
64
+ # model = "microsoft/Phi-3.5-mini-instruct"
65
+ # elif language == "english":
66
+ # content = "keep it short but tell your decision making process, " + text
67
+ # model = "mistralai/Mistral-Nemo-Instruct-2407"
68
+ # else:
69
+ # return "Sorry, I only support Hebrew and English."
70
+
71
+ # messages = [{"role": "user", "content": content}]
72
+
73
+ # completion = client.chat.completions.create(
74
+ # model=model,
75
+ # messages=messages,
76
+ # max_tokens=2048,
77
+ # temperature=0.5,
78
+ # top_p=0.7
79
+ # )
80
+ # return completion.choices[0].message.content
81
+
82
+
83
+ def generate_response(text: str) -> str:
84
+ """
85
+ Generates a response by selecting a prompt and model based on the language.
86
+ Uses the Hugging Face Inference API to get a chat completion.
87
+ """
88
+ language = detect_language(text)
89
  if language == "hebrew":
90
+ prompt = "转注谞讛 讘拽爪专讛 讗讘诇 转砖转祝 讗转 转讛诇讬讱 拽讘诇转 讛讛讞诇讟讜转 砖诇讱, " + text
91
  model = "microsoft/Phi-3.5-mini-instruct"
92
  elif language == "english":
93
+ prompt = "keep it short but tell your decision making process, " + text
94
  model = "mistralai/Mistral-Nemo-Instruct-2407"
95
  else:
96
  return "Sorry, I only support Hebrew and English."
 
 
97
 
98
+ messages = [{"role": "user", "content": prompt}]
99
+ try:
100
+ completion = client.chat.completions.create(
101
+ model=model,
102
+ messages=messages,
103
+ max_tokens=2048,
104
+ temperature=0.5,
105
+ top_p=0.7
106
+ )
107
+ return completion.choices[0].message.content
108
+ except Exception as e:
109
+ logger.error(f"Error generating response: {e}")
110
+ return "Error: Could not generate response."
111
 
112
 
113
  @app.post("/generate_response")
114
  async def generate_text(request: Request):
115
+ """
116
+ Endpoint to generate a response from the chat model.
117
+ Expects a JSON with a "text" field.
118
+ """
119
  try:
120
  data = await request.json()
121
  text = data.get("text", "").strip()
122
  if not text:
123
  return {"error": "No text provided"}
 
124
  response = generate_response(text)
125
  return {"response": response}
126
  except Exception as e:
 
130
 
131
  @app.get("/")
132
  async def root():
133
+ """
134
+ Root endpoint to check that the API is running.
135
+ """
136
  return {"message": "Decision Helper API is running!"}
137
 
138