Spaces:
Running
Running
| import gradio as gr | |
| import torch | |
| import os | |
| import time | |
| # --- Try to import ctransformers for GGUF, provide helpful message if not found --- | |
| try: | |
| from ctransformers import AutoModelForCausalLM as AutoModelForCausalLM_GGUF | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| GGUF_AVAILABLE = True | |
| except ImportError: | |
| GGUF_AVAILABLE = False | |
| print("WARNING: 'ctransformers' not found. This app relies on it for efficient CPU inference.") | |
| print("Please install it with: pip install ctransformers transformers") | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| # --- Configuration for Models and Generation --- | |
| ORIGINAL_MODEL_ID = "HuggingFaceTB/SmolLM2-360M-Instruct" | |
| GGUF_MODEL_ID = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF" | |
| GGUF_MODEL_FILENAME = "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf" | |
| # --- Generation Parameters --- | |
| MAX_NEW_TOKENS = 256 | |
| TEMPERATURE = 0.7 | |
| TOP_K = 50 | |
| TOP_P = 0.95 | |
| DO_SAMPLE = True | |
| # Global model and tokenizer | |
| model = None | |
| tokenizer = None | |
| device = "cpu" | |
| # --- Model Loading Function --- | |
| def load_model_for_zerocpu(): | |
| global model, tokenizer, device | |
| if GGUF_AVAILABLE: | |
| print(f"Attempting to load GGUF model '{GGUF_MODEL_ID}' (file: '{GGUF_MODEL_FILENAME}') for ZeroCPU...") | |
| try: | |
| model = AutoModelForCausalLM_GGUF.from_pretrained( | |
| GGUF_MODEL_ID, | |
| model_file=GGUF_MODEL_FILENAME, | |
| model_type="llama", | |
| gpu_layers=0 | |
| ) | |
| tokenizer = AutoTokenizer.from_pretrained(ORIGINAL_MODEL_ID) | |
| if tokenizer.pad_token is None: | |
| tokenizer.pad_token = tokenizer.eos_token | |
| print(f"GGUF model '{GGUF_MODEL_ID}' loaded successfully for CPU.") | |
| return | |
| except Exception as e: | |
| print(f"WARNING: Could not load GGUF model '{GGUF_MODEL_ID}' from '{GGUF_MODEL_FILENAME}': {e}") | |
| print(f"Falling back to standard Hugging Face model '{ORIGINAL_MODEL_ID}' for CPU (will be slower without GGUF quantization).") | |
| else: | |
| print("WARNING: ctransformers is not available. Will load standard Hugging Face model directly.") | |
| print(f"Loading standard Hugging Face model '{ORIGINAL_MODEL_ID}' for CPU...") | |
| try: | |
| model = AutoModelForCausalLM.from_pretrained(ORIGINAL_MODEL_ID) | |
| tokenizer = AutoTokenizer.from_pretrained(ORIGINAL_MODEL_ID) | |
| if tokenizer.pad_token is None: | |
| tokenizer.pad_token = tokenizer.eos_token | |
| model.to(device) | |
| print(f"Standard model '{ORIGINAL_MODEL_ID}' loaded successfully on CPU.") | |
| except Exception as e: | |
| print(f"CRITICAL ERROR: Could not load standard model '{ORIGINAL_MODEL_ID}' on CPU: {e}") | |
| print("Please ensure the model ID is correct, you have enough RAM, and dependencies are installed.") | |
| model = None | |
| tokenizer = None | |
| # --- Inference Function for Gradio ChatInterface --- | |
| def predict_chat(message: str, history: list): | |
| if model is None or tokenizer is None: | |
| yield "Error: Model or tokenizer failed to load. Please check the Space logs for details." | |
| return | |
| # history is already in the 'messages' format if type='messages' is set on chatbot | |
| # It contains dictionaries with 'role' and 'content' | |
| messages = [{"role": "system", "content": "You are a friendly chatbot."}] + history | |
| messages.append({"role": "user", "content": message}) | |
| generated_text = "" | |
| start_time = time.time() | |
| if isinstance(model, AutoModelForCausalLM_GGUF): | |
| prompt_input = "" | |
| for msg in messages: | |
| if msg["role"] == "system": | |
| prompt_input += f"{msg['content']}\n" | |
| elif msg["role"] == "user": | |
| prompt_input += f"User: {msg['content']}\n" | |
| elif msg["role"] == "assistant": | |
| prompt_input += f"Assistant: {msg['content']}\n" | |
| prompt_input += "Assistant:" | |
| for token in model.generate( | |
| prompt_input, | |
| max_new_tokens=MAX_NEW_TOKENS, | |
| temperature=TEMPERATURE, | |
| top_k=TOP_K, | |
| top_p=TOP_P, | |
| do_sample=DO_SAMPLE, | |
| repetition_penalty=1.1, | |
| stop=["User:", "\nUser", "\n#", "\n##", "<|endoftext|>"] | |
| ): | |
| generated_text += token | |
| yield generated_text | |
| else: | |
| input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
| inputs = tokenizer.encode(input_text, return_tensors="pt").to(device) | |
| outputs = model.generate( | |
| inputs, | |
| max_new_tokens=MAX_NEW_TOKENS, | |
| temperature=TEMPERATURE, | |
| top_k=TOP_K, | |
| top_p=TOP_P, | |
| do_sample=DO_SAMPLE, | |
| pad_token_id=tokenizer.pad_token_id | |
| ) | |
| generated_text = tokenizer.decode(outputs[0][inputs.shape[-1]:], skip_special_tokens=True).strip() | |
| yield generated_text | |
| end_time = time.time() | |
| print(f"Inference Time for this turn: {end_time - start_time:.2f} seconds") | |
| # --- Gradio Interface Setup --- | |
| if __name__ == "__main__": | |
| load_model_for_zerocpu() | |
| # Initial message for the chatbot in the 'messages' format | |
| initial_messages = [{"role": "assistant", "content": | |
| "Hello! I'm an AI assistant. I'm currently running in a CPU-only " | |
| "environment for efficient demonstration. How can I help you today?" | |
| }] | |
| demo = gr.ChatInterface( | |
| fn=predict_chat, | |
| # Define the chatbot here, with type='messages' and initial value in the correct format | |
| chatbot=gr.Chatbot(height=500, type='messages', value=initial_messages), | |
| textbox=gr.Textbox( | |
| placeholder="Ask me a question...", | |
| container=False, | |
| scale=7 | |
| ), | |
| title="SmolLM2-360M-Instruct (or TinyLlama GGUF) on ZeroCPU", | |
| description=( | |
| f"This Space demonstrates an LLM for efficient CPU-only inference. " | |
| f"**Note:** For ZeroCPU, this app prioritizes `{GGUF_MODEL_ID}` (a GGUF-quantized model " | |
| f"like TinyLlama) due to better CPU performance than `{ORIGINAL_MODEL_ID}` " | |
| f"without GGUF. Expect varied responses each run due to randomized generation." | |
| ), | |
| theme="soft", | |
| examples=[ | |
| ["What is the capital of France?"], | |
| ["Can you tell me a fun fact about outer space?"], | |
| ["What's the best way to stay motivated?"], | |
| ], | |
| cache_examples=False, | |
| clear_btn="Clear Chat" | |
| ) | |
| demo.launch() |