File size: 2,606 Bytes
c634a8f
 
 
32c15f7
 
bfa46f3
be7cd6f
b2183c1
b81a54b
8f74814
b81a54b
7e7a085
edf589b
b81a54b
 
 
edf589b
b81a54b
b85b6e0
5075f8a
7e7a085
be7cd6f
 
 
03a30d8
 
 
 
 
be7cd6f
32c15f7
 
 
c634a8f
 
 
 
 
32c15f7
c634a8f
32c15f7
c634a8f
 
 
 
 
32c15f7
 
 
c634a8f
32c15f7
 
c634a8f
 
 
32c15f7
c634a8f
 
32c15f7
c634a8f
 
 
 
 
 
 
 
32c15f7
 
 
c634a8f
32c15f7
03a30d8
32c15f7
 
c634a8f
32c15f7
 
 
 
 
 
6450ec3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# Week - 3 Assignment - Integrate Traditional Chatbot with AI Service Project (Transformers) Praveen Kumar Parimi

#importing the required libraries including transformers
import gradio as gr
from huggingface_hub import InferenceClient
#from transformers import pipeline
import torch

# Use a pipeline as a high-level helper
from transformers import pipeline, AutoModel

model_name = "bmas10/DeepSeek-Llama-8-GGUF"
model = AutoModel.from_pretrained(model_name)
messages = [
    {"role": "user", "content": "Who are you?"},
]
pipe = pipeline("text-generation", model=model)
pipe(messages)
 

'''
def chat(input_text, history=[]):
    history.append(input_text)
    prompt = "\n".join(history) + "\nAI:"  # Simple conversational format
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    output = model.generate(**inputs, max_length=512, pad_token_id=tokenizer.eos_token_id)
    response = tokenizer.decode(output[:, inputs.input_ids.shape[-1]:][0], skip_special_tokens=True)
    history.append(f"AI: {response}")
    return response, history

"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
print("starting Praveen's smarter chatbot...")

"""
The transformer model used here is Microsoft-trained Phi-3.5-mini-instruct
"""

#model_name = "microsoft/Phi-3.5-mini-instruct"

chat_model = pipeline("text-generation", model=model_name)

print("defining the chat_response function")

def chat_response(
    message,
    history: list[tuple[str, str]],
    system_message,
    max_tokens    
):

    print("Inside chat_response progressing...") 
    
    messages = [{"role": "system", "content": system_message}]

    print ("System Messages", messages)
    
    messages.append({"role": "user", "content": message})
    
    print ("Messages after adding user messages", messages)
    
    response = chat_model(messages)  #Passing system and user messages to the transformer model Phi-3.5-mini-instruct to get smarter responses
      
    print("Response received from model",response)
    
    return response[-1]['generated_text'][-1]['content']
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""

demo = gr.ChatInterface(
    chat,
    additional_inputs=[
        gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
        gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")       
    ],
)


if __name__ == "__main__":
    demo.launch()
'''