Spaces:
Sleeping
Sleeping
| import torch | |
| import torch.nn as nn | |
| import torch.quantization # <--- Use the older namespace for default_qconfig | |
| from transformers import AutoTokenizer | |
| from model import TransformerModel | |
| import gradio as gr | |
| # Load the tokenizer | |
| tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/cosmo2-tokenizer") | |
| def load_quantized_model(checkpoint_path): | |
| model = TransformerModel( | |
| vocab_size=49152, | |
| hidden_size=576, | |
| num_hidden_layers=30, | |
| num_attention_heads=9, | |
| intermediate_size=1536, | |
| num_key_value_heads=3, | |
| max_position_embeddings=2048, | |
| rms_norm_eps=1e-5, | |
| hidden_act="silu", | |
| tie_word_embeddings=True, | |
| ) | |
| # Dynamic quant for embeddings | |
| model.embed_tokens = torch.quantization.quantize_dynamic( | |
| model.embed_tokens, {nn.Embedding}, dtype=torch.qint8 | |
| ) | |
| model.embed_positions = torch.quantization.quantize_dynamic( | |
| model.embed_positions, {nn.Embedding}, dtype=torch.qint8 | |
| ) | |
| # Static quant config for the rest of the model | |
| model.qconfig = torch.quantization.get_default_qconfig("fbgemm") # CPU | |
| model = torch.quantization.prepare(model, inplace=False) | |
| # | |
| # >>> RUN CALIBRATION HERE (forward pass with sample data) <<< | |
| # e.g. with torch.no_grad(): | |
| # for input_ids in some_calibration_loader: | |
| # outputs = model(input_ids) | |
| # | |
| model = torch.quantization.convert(model, inplace=False) | |
| # Load checkpoint | |
| checkpoint = torch.load(checkpoint_path, map_location="cpu") | |
| model.load_state_dict(checkpoint) | |
| model.eval() | |
| return model | |
| # Load the quantized model | |
| model = load_quantized_model("quantized_model.pt") | |
| # Function to generate text | |
| def generate_text(prompt, max_length=50, temperature=1.0, top_k=50): | |
| input_ids = tokenizer.encode(prompt, return_tensors="pt") | |
| with torch.no_grad(): | |
| output_ids = model.generate( | |
| input_ids, | |
| max_length=max_length, | |
| temperature=temperature, | |
| top_k=top_k, | |
| do_sample=True, | |
| ) | |
| generated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True) | |
| return generated_text | |
| # Gradio Interface | |
| interface = gr.Interface( | |
| fn=generate_text, | |
| inputs=[ | |
| gr.Textbox(label="Prompt", placeholder="Enter your prompt here..."), | |
| gr.Slider(minimum=10, maximum=200, value=50, label="Max Length"), | |
| gr.Slider(minimum=0.1, maximum=2.0, value=1.0, label="Temperature"), | |
| gr.Slider(minimum=1, maximum=100, value=50, label="Top-k Sampling"), | |
| ], | |
| outputs=gr.Textbox(label="Generated Text"), | |
| title="Text Generation with Quantized SMOL-LM2", | |
| description="Generate text using a quantized version of the SMOL-LM2 model.", | |
| ) | |
| # Launch the app | |
| interface.launch() |