File size: 1,224 Bytes
5f87533 7f51a5a 5f87533 7f51a5a 2856ccc 44131c9 5f87533 b61717e 5f87533 0f96a52 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TrainingArguments, Trainer, pipeline
from peft import PeftModel, PeftConfig
from huggingface_hub import login
import bitsandbytes as bnb
import torch
import time
import pandas as pd
import numpy as np
import streamlit as st
st.set_page_config(
page_title="Code Generation",
page_icon="🤖",
layout="wide",
initial_sidebar_state="expanded",
)
login(token='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', add_to_git_credential=True)
st.title("Code Generation")
st.write('MODEL: TinyPixel/Llama-2-7B-bf16-sharded')
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
model_name='TinyPixel/Llama-2-7B-bf16-sharded'
tokenizer = AutoTokenizer.from_pretrained(model_name)
peft_model_base = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=bnb_config)
peft_model = PeftModel.from_pretrained(peft_model_base,
'red1xe/Llama-2-7B-codeGPT',
torch_dtype=torch.bfloat16,
is_trainable=False)
|