ajsbsd commited on
Commit
6d6c49f
·
verified ·
1 Parent(s): 32b74a3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +196 -0
app.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import os
4
+ import time
5
+
6
+ # --- Try to import ctransformers for GGUF, provide helpful message if not found ---
7
+ # We try to import ctransformers first as it's the preferred method for ZeroCPU efficiency
8
+ try:
9
+ from ctransformers import AutoModelForCausalLM as AutoModelForCausalLM_GGUF
10
+ # We still need AutoTokenizer from transformers for standard tokenizing
11
+ from transformers import AutoTokenizer, AutoModelForCausalLM
12
+ GGUF_AVAILABLE = True
13
+ except ImportError:
14
+ GGUF_AVAILABLE = False
15
+ print("WARNING: 'ctransformers' not found. This app relies on it for efficient CPU inference.")
16
+ print("Please install it with: pip install ctransformers transformers")
17
+ # If ctransformers isn't available, we'll fall back to standard transformers loading, which is slower on CPU.
18
+ from transformers import AutoTokenizer, AutoModelForCausalLM
19
+
20
+ # --- Configuration for Models and Generation ---
21
+ # Original model (for reference, or if a GPU is detected, though ZeroCPU is target)
22
+ ORIGINAL_MODEL_ID = "HuggingFaceTB/SmolLM2-360M-Instruct"
23
+
24
+ # !!! IMPORTANT !!! For efficient ZeroCPU (CPU-only) inference,
25
+ # a GGUF quantized model is HIGHLY RECOMMENDED.
26
+ # SmolLM2-360M-Instruct does NOT have a readily available GGUF version from common providers.
27
+ # Therefore, for ZeroCPU deployment, this app will use a common, small GGUF model by default.
28
+ # If you find a GGUF for SmolLM2 later, you can update these:
29
+ GGUF_MODEL_ID = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF" # Recommended GGUF placeholder for ZeroCPU
30
+ GGUF_MODEL_FILENAME = "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf" # Corresponding GGUF file name
31
+
32
+ # --- Generation Parameters ---
33
+ MAX_NEW_TOKENS = 256
34
+ TEMPERATURE = 0.7
35
+ TOP_K = 50
36
+ TOP_P = 0.95
37
+ DO_SAMPLE = True # Important for varied responses
38
+
39
+ # Global model and tokenizer variables
40
+ model = None
41
+ tokenizer = None
42
+ device = "cpu" # Explicitly set to CPU for ZeroCPU deployment
43
+
44
+ # --- Model Loading Function ---
45
+ def load_model_for_zerocpu():
46
+ global model, tokenizer, device
47
+
48
+ # Attempt to load the GGUF model first for efficiency on ZeroCPU
49
+ if GGUF_AVAILABLE:
50
+ print(f"Attempting to load GGUF model '{GGUF_MODEL_ID}' (file: '{GGUF_MODEL_FILENAME}') for ZeroCPU...")
51
+ try:
52
+ model = AutoModelForCausalLM_GGUF.from_pretrained(
53
+ GGUF_MODEL_ID,
54
+ model_file=GGUF_MODEL_FILENAME,
55
+ model_type="llama", # Most GGUF models are Llama-based (TinyLlama is)
56
+ gpu_layers=0 # Ensures it runs on CPU, not GPU
57
+ )
58
+ # Use the tokenizer from the original SmolLM2 for chat template consistency
59
+ tokenizer = AutoTokenizer.from_pretrained(ORIGINAL_MODEL_ID)
60
+ if tokenizer.pad_token is None:
61
+ tokenizer.pad_token = tokenizer.eos_token
62
+ print(f"GGUF model '{GGUF_MODEL_ID}' loaded successfully for CPU.")
63
+ return # Exit function if GGUF model loaded successfully
64
+ except Exception as e:
65
+ print(f"WARNING: Could not load GGUF model '{GGUF_MODEL_ID}' from '{GGUF_MODEL_FILENAME}': {e}")
66
+ print(f"Falling back to standard Hugging Face model '{ORIGINAL_MODEL_ID}' for CPU (will be slower without GGUF quantization).")
67
+ # Continue to the next block to try loading the standard HF model
68
+ else:
69
+ print("WARNING: ctransformers is not available. Will load standard Hugging Face model directly.")
70
+
71
+ # Fallback/alternative: Load the standard Hugging Face model (will be slower on CPU without GGUF)
72
+ print(f"Loading standard Hugging Face model '{ORIGINAL_MODEL_ID}' for CPU...")
73
+ try:
74
+ model = AutoModelForCausalLM.from_pretrained(ORIGINAL_MODEL_ID)
75
+ tokenizer = AutoTokenizer.from_pretrained(ORIGINAL_MODEL_ID)
76
+ if tokenizer.pad_token is None:
77
+ tokenizer.pad_token = tokenizer.eos_token
78
+ model.to(device) # Explicitly move model to CPU
79
+ print(f"Standard model '{ORIGINAL_MODEL_ID}' loaded successfully on CPU.")
80
+ except Exception as e:
81
+ print(f"CRITICAL ERROR: Could not load standard model '{ORIGINAL_MODEL_ID}' on CPU: {e}")
82
+ print("Please ensure the model ID is correct, you have enough RAM, and dependencies are installed.")
83
+ model = None # Indicate failure to load
84
+ tokenizer = None # Indicate failure to load
85
+
86
+ # --- Inference Function for Gradio ChatInterface ---
87
+ def predict_chat(message: str, history: list):
88
+ # 'history' is a list of lists, where each inner list is [user_message, bot_message]
89
+ # 'message' is the current user input
90
+
91
+ if model is None or tokenizer is None:
92
+ yield "Error: Model or tokenizer failed to load. Please check the Space logs for details."
93
+ return
94
+
95
+ # Build the full conversation history for the model's chat template
96
+ messages = [{"role": "system", "content": "You are a friendly chatbot."}]
97
+ for human_msg, ai_msg in history:
98
+ messages.append({"role": "user", "content": human_msg})
99
+ messages.append({"role": "assistant", "content": ai_msg})
100
+ messages.append({"role": "user", "content": message}) # Add the current user message
101
+
102
+ generated_text = ""
103
+
104
+ start_time = time.time() # Start timing for the current turn
105
+
106
+ if isinstance(model, AutoModelForCausalLM_GGUF): # Check if the loaded model is from ctransformers
107
+ # For ctransformers (GGUF), manually construct a simple prompt string
108
+ prompt_input = ""
109
+ for msg in messages:
110
+ if msg["role"] == "system":
111
+ prompt_input += f"{msg['content']}\n"
112
+ elif msg["role"] == "user":
113
+ prompt_input += f"User: {msg['content']}\n"
114
+ elif msg["role"] == "assistant":
115
+ prompt_input += f"Assistant: {msg['content']}\n"
116
+ prompt_input += "Assistant:" # Instruct the model to generate the assistant's response
117
+
118
+ # Use the GGUF model's generate method
119
+ for token in model.generate(
120
+ prompt_input,
121
+ max_new_tokens=MAX_NEW_TOKENS,
122
+ temperature=TEMPERATURE,
123
+ top_k=TOP_K,
124
+ top_p=TOP_P,
125
+ do_sample=DO_SAMPLE,
126
+ repetition_penalty=1.1, # Common for GGUF models
127
+ stop=["User:", "\nUser", "\n#", "\n##", "<|endoftext|>"] # Common stop tokens
128
+ ):
129
+ generated_text += token
130
+ yield generated_text # Yield partial response for streaming in Gradio
131
+
132
+ else: # If standard Hugging Face transformers model was loaded (slower on CPU)
133
+ # Apply the tokenizer's chat template
134
+ input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
135
+ inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
136
+
137
+ # Generate the response
138
+ outputs = model.generate(
139
+ inputs,
140
+ max_new_tokens=MAX_NEW_TOKENS,
141
+ temperature=TEMPERATURE,
142
+ top_k=TOP_K,
143
+ top_p=TOP_P,
144
+ do_sample=DO_SAMPLE,
145
+ pad_token_id=tokenizer.pad_token_id # Important for generation
146
+ )
147
+ # Decode only the newly generated tokens
148
+ generated_text = tokenizer.decode(outputs[0][inputs.shape[-1]:], skip_special_tokens=True).strip()
149
+ yield generated_text # Yield the full response at once (transformers.generate doesn't stream by default)
150
+
151
+ end_time = time.time()
152
+ print(f"Inference Time for this turn: {end_time - start_time:.2f} seconds")
153
+
154
+
155
+ # --- Gradio Interface Setup ---
156
+ if __name__ == "__main__":
157
+ # Load the model globally when the Gradio app starts
158
+ load_model_for_zerocpu()
159
+
160
+ # Define a custom startup message for the chatbot
161
+ initial_chatbot_message = (
162
+ "Hello! I'm an AI assistant. I'm currently running in a CPU-only "
163
+ "environment for efficient demonstration. How can I help you today?"
164
+ )
165
+
166
+ demo = gr.ChatInterface(
167
+ fn=predict_chat, # The function that handles chat prediction
168
+ chatbot=gr.Chatbot(height=500), # The chat display area
169
+ textbox=gr.Textbox(
170
+ placeholder="Ask me a question...",
171
+ container=False,
172
+ scale=7
173
+ ),
174
+ title="SmolLM2-360M-Instruct (or TinyLlama GGUF) on ZeroCPU",
175
+ description=(
176
+ f"This Space demonstrates an LLM for efficient CPU-only inference. "
177
+ f"**Note:** For ZeroCPU, this app prioritizes `{GGUF_MODEL_ID}` (a GGUF-quantized model "
178
+ f"like TinyLlama) due to better CPU performance than `{ORIGINAL_MODEL_ID}` "
179
+ f"without GGUF. Expect varied responses each run due to randomized generation."
180
+ ),
181
+ theme="soft",
182
+ examples=[ # Pre-defined examples for quick testing
183
+ ["What is the capital of France?"],
184
+ ["Can you tell me a fun fact about outer space?"],
185
+ ["What's the best way to stay motivated?"],
186
+ ],
187
+ cache_examples=False, # Important: Ensures examples run inference each time, not from cache
188
+ clear_btn="Clear Chat", # Button to clear the conversation
189
+ # Custom message to start the conversation from the assistant
190
+ initial_chatbot_message=initial_chatbot_message
191
+ )
192
+
193
+ # Launch the Gradio app
194
+ # `share=True` creates a public link (useful for testing, but not needed on HF Spaces)
195
+ # `server_name="0.0.0.0"` and `server_port=7860` are typically default for HF Spaces
196
+ demo.launch()