from mmap import mmap import torch from transformers import AutoTokenizer, AutoModelForCausalLM from huggingface_hub import login token1="hf_" token2="rPlNHzkJScHYmtGSaQPcaoKcjJGYQEpjLu" login(token=token1+token2) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") mm="finetuning/output/pytorch_model.bin" tokenizer = AutoTokenizer.from_pretrained(mm) model = AutoModelForCausalLM.from_pretrained(mm) def get_response(prompt, max_new_tokens=50): inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(**inputs, max_new_tokens=max_new_tokens, temperature=0.0001, do_sample=True) response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Use indexing instead of calling ans=response.toString() return ans