import torch from transformers import AutoTokenizer, AutoModelForCausalLM from huggingface_hub import login token1="hf_" token2="rPlNHzkJScHYmtGSaQPcaoKcjJGYQEpjLu" login(token=token1+token2) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf") model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf") def get_response(prompt, max_new_tokens=50): inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(**inputs, max_new_tokens=max_new_tokens, temperature=0.0001, do_sample=True) response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Use indexing instead of calling ans=response.toString() return ans