ZennethKenneth commited on
Commit
11954b4
·
verified ·
1 Parent(s): f93e307

update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -17
app.py CHANGED
@@ -19,25 +19,32 @@ For more information on `huggingface_hub` Inference API support, please check th
19
  # text_generator = pipeline("text-generation", model="microsoft/Phi-3-mini-4k-instruct", use_auth_token=hf_token, trust_remote_code=True)
20
 
21
  def authenticate_and_generate(message, history, system_message, max_tokens, temperature, top_p):
22
- # Initialize the text-generation pipeline with the provided token
23
- text_generator = pipeline("text-generation", model="microsoft/Phi-3-mini-4k-instruct", use_auth_token=hf_token, trust_remote_code=True)
24
-
25
- # Ensure that system_message is a string
26
- system_message = str(system_message)
27
-
28
- # Construct the prompt with system message, history, and user input
29
- history_str = "\n".join([f"User: {str(msg[0])}\nAssistant: {str(msg[1])}" for msg in history if isinstance(msg, (tuple, list)) and len(msg) == 2])
30
- prompt = system_message + "\n" + history_str
31
- prompt += f"\nUser: {message}\nAssistant:"
 
 
 
 
32
 
33
- # Generate a response using the model
34
- response = text_generator(prompt, max_length=max_tokens, temperature=temperature, top_p=top_p, do_sample=True, truncation=True)
35
 
36
- # Extract the generated text from the response list
37
- assistant_response = response[0]['generated_text']
38
- # Optionally trim the assistant response if it includes the prompt again
39
- assistant_response = assistant_response.split("Assistant:", 1)[-1].strip()
40
- return assistant_response
 
 
 
41
 
42
  """
43
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
 
19
  # text_generator = pipeline("text-generation", model="microsoft/Phi-3-mini-4k-instruct", use_auth_token=hf_token, trust_remote_code=True)
20
 
21
  def authenticate_and_generate(message, history, system_message, max_tokens, temperature, top_p):
22
+ try:
23
+ # Initialize the text-generation pipeline with the provided token
24
+ text_generator = pipeline("text-generation", model="microsoft/Phi-3-mini-4k-instruct", use_auth_token=hf_token, trust_remote_code=True)
25
+
26
+ if text_generator.tokenizer is None:
27
+ raise RuntimeError("Failed to load the tokenizer. Ensure the model and API token are correct.")
28
+
29
+ # Ensure that system_message is a string
30
+ system_message = str(system_message)
31
+
32
+ # Construct the prompt with system message, history, and user input
33
+ history_str = "\n".join([f"User: {str(msg[0])}\nAssistant: {str(msg[1])}" for msg in history if isinstance(msg, (tuple, list)) and len(msg) == 2])
34
+ prompt = system_message + "\n" + history_str
35
+ prompt += f"\nUser: {message}\nAssistant:"
36
 
37
+ # Generate a response using the model
38
+ response = text_generator(prompt, max_length=max_tokens, temperature=temperature, top_p=top_p, do_sample=True, truncation=True)
39
 
40
+ # Extract the generated text from the response list
41
+ assistant_response = response[0]['generated_text']
42
+ # Optionally trim the assistant response if it includes the prompt again
43
+ assistant_response = assistant_response.split("Assistant:", 1)[-1].strip()
44
+ return assistant_response
45
+
46
+ except Exception as e:
47
+ return str(e) # Return the error message for debugging
48
 
49
  """
50
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface