OscarFAI commited on
Commit
6f8934c
·
1 Parent(s): 5268164
Files changed (2) hide show
  1. app.py +138 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import spaces
4
+ from transformers import GemmaTokenizer, AutoModelForCausalLM
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
6
+ from threading import Thread
7
+
8
+ # Set an environment variable
9
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
10
+
11
+
12
+ DESCRIPTION = '''
13
+ <div>
14
+ <h1 style="text-align: center;">deepseek-ai/DeepSeek-R1-Distill-Llama-8B</h1>
15
+ </div>
16
+ '''
17
+
18
+ LICENSE = """
19
+ <p/>
20
+
21
+ ---
22
+ """
23
+
24
+ PLACEHOLDER = """
25
+ <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
26
+ <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">DeepSeek-R1-Distill-Llama-8B</h1>
27
+ <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything...</p>
28
+ </div>
29
+ """
30
+
31
+
32
+ css = """
33
+ h1 {
34
+ text-align: center;
35
+ display: block;
36
+ }
37
+
38
+ #duplicate-button {
39
+ margin: auto;
40
+ color: white;
41
+ background: #1565c0;
42
+ border-radius: 100vh;
43
+ }
44
+ """
45
+
46
+ # Load the tokenizer and model
47
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Ministral-8B-Instruct-2410")
48
+ model = AutoModelForCausalLM.from_pretrained("mistralai/Ministral-8B-Instruct-2410", device_map="auto") # to("cuda:0")
49
+ terminators = [
50
+ tokenizer.eos_token_id,
51
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
52
+ ]
53
+
54
+ @spaces.GPU(duration=120)
55
+ def chat_llama3_8b(message: str,
56
+ history: list,
57
+ temperature: float,
58
+ max_new_tokens: int
59
+ ) -> str:
60
+ """
61
+ Generate a streaming response using the llama3-8b model.
62
+ Args:
63
+ message (str): The input message.
64
+ history (list): The conversation history used by ChatInterface.
65
+ temperature (float): The temperature for generating the response.
66
+ max_new_tokens (int): The maximum number of new tokens to generate.
67
+ Returns:
68
+ str: The generated response.
69
+ """
70
+ conversation = []
71
+ for user, assistant in history:
72
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
73
+ conversation.append({"role": "user", "content": message})
74
+
75
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
76
+
77
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
78
+
79
+ generate_kwargs = dict(
80
+ input_ids= input_ids,
81
+ streamer=streamer,
82
+ max_new_tokens=max_new_tokens,
83
+ do_sample=True,
84
+ temperature=temperature,
85
+ eos_token_id=terminators,
86
+ )
87
+ # This will enforce greedy generation (do_sample=False) when the temperature is passed 0, avoiding the crash.
88
+ if temperature == 0:
89
+ generate_kwargs['do_sample'] = False
90
+
91
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
92
+ t.start()
93
+
94
+ outputs = []
95
+ for text in streamer:
96
+ outputs.append(text)
97
+ #print(outputs)
98
+ yield "".join(outputs)
99
+
100
+
101
+ # Gradio block
102
+ chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
103
+
104
+ with gr.Blocks(fill_height=True, css=css) as demo:
105
+
106
+ gr.Markdown(DESCRIPTION)
107
+ gr.ChatInterface(
108
+ fn=chat_llama3_8b,
109
+ chatbot=chatbot,
110
+ fill_height=True,
111
+ additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
112
+ additional_inputs=[
113
+ gr.Slider(minimum=0,
114
+ maximum=1,
115
+ step=0.1,
116
+ value=0.8,
117
+ label="Temperature",
118
+ render=False),
119
+ gr.Slider(minimum=128,
120
+ maximum=4096,
121
+ step=1,
122
+ value=4096,
123
+ label="Max new tokens",
124
+ render=False ),
125
+ ],
126
+ examples=[
127
+ ['How to setup a human base on Mars? Give short answer.'],
128
+ ['Explain theory of relativity to me like I’m 8 years old.'],
129
+ ['What is 9,000 * 9,000?'],
130
+ ['Write a pun-filled happy birthday message to my friend Alex.'],
131
+ ['Justify why a penguin might make a good king of the jungle.']
132
+ ],
133
+ cache_examples=False,
134
+ )
135
+
136
+ if __name__ == "__main__":
137
+ demo.launch()
138
+
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ huggingface_hub
2
+ accelerate
3
+ transformers
4
+ SentencePiece