Spaces:
Sleeping
Sleeping
File size: 2,497 Bytes
c634a8f 3818808 32c15f7 f7c34a0 be7cd6f b2183c1 86cabcc 3818808 be7cd6f 03a30d8 be7cd6f 32c15f7 c634a8f 32c15f7 c634a8f 32c15f7 c634a8f 32c15f7 c634a8f 32c15f7 c634a8f 32c15f7 c634a8f 32c15f7 c634a8f 32c15f7 c634a8f 32c15f7 03a30d8 32c15f7 c634a8f 32c15f7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
# Week - 3 Assignment - Integrate Traditional Chatbot with AI Service Project (Transformers) Praveen Kumar Parimi
#importing the required libraries including transformers
import base64
import gradio as gr
from huggingface_hub import InferenceClient
from transformers import AutoProcessor, AutoModelForImageTextToText
import torch
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-3B-Instruct")
model = AutoModelForImageTextToText.from_pretrained("Qwen/Qwen2.5-VL-3B-Instruct")
def chat(input_text, history=[]):
history.append(input_text)
prompt = "\n".join(history) + "\nAI:" # Simple conversational format
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
output = model.generate(**inputs, max_length=512, pad_token_id=tokenizer.eos_token_id)
response = tokenizer.decode(output[:, inputs.input_ids.shape[-1]:][0], skip_special_tokens=True)
history.append(f"AI: {response}")
return response, history
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
print("starting Praveen's smarter chatbot...")
"""
The transformer model used here is Microsoft-trained Phi-3.5-mini-instruct
"""
#model_name = "microsoft/Phi-3.5-mini-instruct"
chat_model = pipeline("text-generation", model=model_name)
print("defining the chat_response function")
def chat_response(
message,
history: list[tuple[str, str]],
system_message,
max_tokens
):
print("Inside chat_response progressing...")
messages = [{"role": "system", "content": system_message}]
print ("System Messages", messages)
messages.append({"role": "user", "content": message})
print ("Messages after adding user messages", messages)
response = chat_model(messages) #Passing system and user messages to the transformer model Phi-3.5-mini-instruct to get smarter responses
print("Response received from model",response)
return response[-1]['generated_text'][-1]['content']
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
chat,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
],
)
if __name__ == "__main__":
demo.launch()
|