ehristoforu commited on
Commit
dd8a264
·
verified ·
1 Parent(s): 237a953

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -22,18 +22,16 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
22
  HF_TOKEN = os.getenv("HF_TOKEN")
23
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24
 
25
- model_name = "tiiuae/Falcon3-10B-Instruct"
26
 
27
  model = AutoModelForCausalLM.from_pretrained(
28
  model_name,
29
- torch_dtype="auto",
30
  device_map="auto"
31
  )
32
  tokenizer = AutoTokenizer.from_pretrained(model_name)
33
 
34
- peft_model = AutoPeftModelForCausalLM.from_pretrained("ehristoforu/falconthink-10b-lora")
35
- merged_model = peft_model.merge_and_unload()
36
- merged_model.save_pretrained("./falconthink")
37
 
38
  from huggingface_hub import HfApi
39
 
@@ -43,7 +41,7 @@ api = HfApi()
43
 
44
  api.upload_folder(
45
  folder_path="./falconthink",
46
- repo_id="ehristoforu/FalconThink-10B-IT",
47
  repo_type="model",
48
  token=HF_TOKEN,
49
  )
 
22
  HF_TOKEN = os.getenv("HF_TOKEN")
23
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24
 
25
+ model_name = "ehristoforu/FalconThink-10B-IT"
26
 
27
  model = AutoModelForCausalLM.from_pretrained(
28
  model_name,
29
+ torch_dtype="fp16",
30
  device_map="auto"
31
  )
32
  tokenizer = AutoTokenizer.from_pretrained(model_name)
33
 
34
+ model.save_pretrained("./falconthink")
 
 
35
 
36
  from huggingface_hub import HfApi
37
 
 
41
 
42
  api.upload_folder(
43
  folder_path="./falconthink",
44
+ repo_id="ehristoforu/FT-10B-fp16",
45
  repo_type="model",
46
  token=HF_TOKEN,
47
  )