krishna-k commited on
Commit
dc337fb
·
verified ·
1 Parent(s): d01fb77

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -13
app.py CHANGED
@@ -1,19 +1,70 @@
1
- from transformers import pipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import gradio as gr
3
 
 
 
 
 
4
 
5
- chatbot = pipeline("text-generation", model="unsloth/DeepSeek-R1-GGUF", trust_remote_code=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- def chat_with_bot(user_input):
8
- # Generate a response from the chatbot model
9
- response = chatbot(user_input)
10
- return response[0]['generated_text']
11
 
12
- interface = gr.Interface(
13
- fn=chat_with_bot, # Function to call for processing the input
14
- inputs=gr.Textbox(label="Enter your message"), # User input (text)
15
- outputs=gr.Textbox(label="Chatbot Response", lines=10), # Model output (text)
16
- title="Chat with DeepSeek", # Optional: Add a title to your interface
17
- description="Chat with an AI model powered by DeepSeek!" # Optional: Add a description
 
 
 
 
 
 
18
  )
19
- interface.launch()
 
 
 
1
+ # from transformers import pipeline
2
+ # import gradio as gr
3
+
4
+
5
+ # chatbot = pipeline("text-generation", model="unsloth/DeepSeek-R1-GGUF", trust_remote_code=True)
6
+
7
+ # def chat_with_bot(user_input):
8
+ # # Generate a response from the chatbot model
9
+ # response = chatbot(user_input)
10
+ # return response[0]['generated_text']
11
+
12
+ # interface = gr.Interface(
13
+ # fn=chat_with_bot, # Function to call for processing the input
14
+ # inputs=gr.Textbox(label="Enter your message"), # User input (text)
15
+ # outputs=gr.Textbox(label="Chatbot Response", lines=10), # Model output (text)
16
+ # title="Chat with DeepSeek", # Optional: Add a title to your interface
17
+ # description="Chat with an AI model powered by DeepSeek!" # Optional: Add a description
18
+ # )
19
+ # interface.launch()
20
+
21
+
22
+
23
+ from transformers import AutoModelForCausalLM, AutoTokenizer
24
  import gradio as gr
25
 
26
+ # Load the model and tokenizer from Hugging Face
27
+ model_name = "unsloth/Llama-3.2-3B-Instruct" # Replace with your model
28
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
29
+ model = AutoModelForCausalLM.from_pretrained(model_name)
30
 
31
+ # Function to generate text
32
+ def generate_text(input_text, max_length=100, temperature=0.7, top_p=0.9):
33
+ # Tokenize the input text
34
+ inputs = tokenizer(input_text, return_tensors="pt")
35
+
36
+ # Generate text using the model
37
+ outputs = model.generate(
38
+ inputs["input_ids"],
39
+ max_length=max_length,
40
+ temperature=temperature,
41
+ top_p=top_p,
42
+ num_return_sequences=1,
43
+ no_repeat_ngram_size=2,
44
+ )
45
+
46
+ # Decode the generated text
47
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
48
+ return generated_text
49
 
50
+ # Gradio Interface
51
+ def gradio_interface(input_text, max_length, temperature, top_p):
52
+ generated_text = generate_text(input_text, max_length, temperature, top_p)
53
+ return generated_text
54
 
55
+ # Create the Gradio app
56
+ app = gr.Interface(
57
+ fn=gradio_interface, # Function to call
58
+ inputs=[
59
+ gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Input Prompt"),
60
+ gr.Slider(minimum=10, maximum=500, value=100, step=10, label="Max Length"),
61
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature"),
62
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.1, label="Top-p (Nucleus Sampling)"),
63
+ ],
64
+ outputs=gr.Textbox(lines=10, label="Generated Text"),
65
+ title="Text Generation with Hugging Face Transformers",
66
+ description="Generate text using a Hugging Face model.",
67
  )
68
+
69
+ # Launch the app
70
+ app.launch()