Spaces:
Sleeping
Sleeping
from flask import Flask, request, jsonify | |
from twilio.twiml.messaging_response import MessagingResponse | |
from huggingface_hub import InferenceClient | |
app = Flask(__name__) | |
print("\nHello welcome to Sema AI\n", flush=True) | |
# Initialize InferenceClient | |
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.1") | |
conversation_history = {} | |
def format_prompt(message, history): | |
prompt = "<s>" | |
for user_prompt, bot_response in history: | |
prompt += f"[INST] {user_prompt} [/INST]" | |
prompt += f" {bot_response}</s> " | |
prompt += f"[INST] {message} [/INST]" | |
return prompt | |
def generate(prompt, history, temperature=0.9, max_new_tokens=100, top_p=0.95, repetition_penalty=1.0): | |
formatted_prompt = format_prompt(prompt, history) | |
generate_kwargs = dict( | |
temperature=temperature, | |
max_new_tokens=max_new_tokens, | |
top_p=top_p, | |
repetition_penalty=repetition_penalty, | |
do_sample=True, | |
seed=42, | |
) | |
response = client.text_generation( | |
formatted_prompt, | |
**generate_kwargs, | |
stream=True, | |
details=True, | |
return_full_text=False | |
) | |
output = "" | |
for token in response: | |
if hasattr(token, 'token') and hasattr(token.token, 'text'): | |
output += token.token.text | |
return output | |
def sms(): | |
try: | |
resp = MessagingResponse() | |
sender_number = request.form['From'] | |
inb_msg = request.form['Body'].strip() | |
print(inb_msg, flush=True) | |
history = conversation_history.get(sender_number, []) | |
output = generate(inb_msg, history) | |
history.append((inb_msg, output)) | |
conversation_history[sender_number] = history | |
resp.message(output) | |
except Exception as e: | |
print(f"Error processing SMS: {str(e)}") | |
resp.message("Sorry, an error occurred. Please try again later.") | |
return str(resp) | |
if __name__ == "__main__": | |
app.run(debug=True, port=5000) | |