Spaces:
Sleeping
Sleeping
import torch | |
torch.backends.quantized.engine = 'fbgemm' # ensure we use fbgemm | |
print("PyTorch version:", torch.__version__) | |
print("Supported quantized engines:", torch.backends.quantized.supported_engines) | |
import torch.nn as nn | |
import torch.quantization # <--- Use the older namespace for default_qconfig | |
from transformers import AutoTokenizer | |
from model import TransformerModel | |
import gradio as gr | |
from torch.ao.quantization.qconfig import float_qparams_weight_only_qconfig | |
# Load the tokenizer | |
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/cosmo2-tokenizer") | |
def load_quantized_model(checkpoint_path): | |
model = TransformerModel( | |
vocab_size=49152, | |
hidden_size=576, | |
num_hidden_layers=30, | |
num_attention_heads=9, | |
intermediate_size=1536, | |
num_key_value_heads=3, | |
max_position_embeddings=2048, | |
rms_norm_eps=1e-5, | |
hidden_act="silu", | |
tie_word_embeddings=True, | |
) | |
# This qconfig is typically for your other layers | |
default_qconfig = torch.quantization.get_default_qconfig("fbgemm") | |
model.qconfig = default_qconfig | |
# For embeddings, force the specialized config: | |
model.embed_tokens.qconfig = float_qparams_weight_only_qconfig | |
model.embed_positions.qconfig = float_qparams_weight_only_qconfig | |
# Then prepare, calibrate, and convert | |
model = torch.quantization.prepare(model, inplace=False) | |
# Calibration pass here... | |
model = torch.quantization.convert(model, inplace=False) | |
return model | |
# Load the quantized model | |
model = load_quantized_model("quantized_model.pt") | |
# Function to generate text | |
def generate_text(prompt, max_length=50, temperature=1.0, top_k=50): | |
input_ids = tokenizer.encode(prompt, return_tensors="pt") | |
with torch.no_grad(): | |
output_ids = model.generate( | |
input_ids, | |
max_length=max_length, | |
temperature=temperature, | |
top_k=top_k, | |
do_sample=True, | |
) | |
generated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True) | |
return generated_text | |
# Gradio Interface | |
interface = gr.Interface( | |
fn=generate_text, | |
inputs=[ | |
gr.Textbox(label="Prompt", placeholder="Enter your prompt here..."), | |
gr.Slider(minimum=10, maximum=200, value=50, label="Max Length"), | |
gr.Slider(minimum=0.1, maximum=2.0, value=1.0, label="Temperature"), | |
gr.Slider(minimum=1, maximum=100, value=50, label="Top-k Sampling"), | |
], | |
outputs=gr.Textbox(label="Generated Text"), | |
title="Text Generation with Quantized SMOL-LM2", | |
description="Generate text using a quantized version of the SMOL-LM2 model.", | |
) | |
# Launch the app | |
interface.launch() |