Walid-Ahmed commited on
Commit
05cc7ca
·
verified ·
1 Parent(s): e7730f7

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -0
app.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+
5
+ import gradio as gr
6
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
7
+ import torch
8
+ import space
9
+
10
+ def load_model(model_name):
11
+ device = "cuda" if torch.cuda.is_available() else "cpu"
12
+ model = AutoModelForCausalLM.from_pretrained(
13
+ model_name,
14
+ device_map=device,
15
+ torch_dtype="auto",
16
+ trust_remote_code=True,
17
+ )
18
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
19
+ generator = pipeline(
20
+ "text-generation",
21
+ model=model,
22
+ tokenizer=tokenizer,
23
+ return_full_text=False,
24
+ max_new_tokens=500,
25
+ do_sample=False
26
+ )
27
+ return generator
28
+
29
+ @spaces.GPU
30
+ def generate_text(prompt, model_name):
31
+ generator = load_model(model_name)
32
+ messages = [{"role": "user", "content": prompt}]
33
+ output = generator(messages)
34
+ return output[0]["generated_text"]
35
+
36
+ # Create Gradio interface
37
+ demo = gr.Interface(
38
+ fn=generate_text,
39
+ inputs=[
40
+ gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
41
+ gr.Dropdown(
42
+ choices=["Qwen/Qwen2.5-1.5B-Instruct","microsoft/Phi-3-mini-4k-instruct", "ALLaM-AI/ALLaM-7B-Instruct-preview"],
43
+ label="Choose Model",
44
+ value="ALLaM-AI/ALLaM-7B-Instruct-preview"
45
+ )
46
+ ],
47
+ outputs=gr.Textbox(label="Generated Text"),
48
+ title="Text Generator",
49
+ description="Enter a prompt and generate text using one of the available models.",
50
+ examples=[
51
+ ["Tell me a funny joke about chickens.", "microsoft/Phi-3-mini-4k-instruct"],
52
+ ["أخبرني نكتة مضحكة عن الدجاج.", "ALLaM-AI/ALLaM-7B-Instruct-preview"]
53
+ ]
54
+ )
55
+
56
+ demo.launch()