ashioyajotham commited on
Commit
b24a077
·
1 Parent(s): e5b4db8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -9,7 +9,8 @@ dataset = load_dataset(dataset_name, split="train")
9
  # We will be loading the Falcon 7B model, applying 4bit quantization to it, and then adding LoRA adapters to the model.
10
  import torch
11
 
12
- from transformers import FalconForCausalLM, AutoTokenizer, BitsAndBytesConfig
 
13
 
14
  # Defining the name of the Falcon model
15
  model_name = "ybelkada/falcon-7b-sharded-bf16"
@@ -22,7 +23,7 @@ bnb_4bit_compute_dtype=torch.float16,
22
  )
23
 
24
  # Loading the Falcon model with quantization configuration
25
- model = FalconForCausalLM.from_pretrained(
26
  model_name,
27
  quantization_config=bnb_config,
28
  trust_remote_code=True
 
9
  # We will be loading the Falcon 7B model, applying 4bit quantization to it, and then adding LoRA adapters to the model.
10
  import torch
11
 
12
+ #from transformers import FalconForCausalLM, AutoTokenizer, BitsAndBytesConfig
13
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
14
 
15
  # Defining the name of the Falcon model
16
  model_name = "ybelkada/falcon-7b-sharded-bf16"
 
23
  )
24
 
25
  # Loading the Falcon model with quantization configuration
26
+ model = AutoModelForCausalLM.from_pretrained(
27
  model_name,
28
  quantization_config=bnb_config,
29
  trust_remote_code=True