ehristoforu commited on
Commit
de8a18e
·
verified ·
1 Parent(s): e9ef9de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -22,7 +22,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
22
  HF_TOKEN = os.getenv("HF_TOKEN")
23
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24
 
25
- model_name = "Qwen/Qwen2.5-3B-Instruct"
26
 
27
  model = AutoModelForCausalLM.from_pretrained(
28
  model_name,
@@ -31,12 +31,12 @@ model = AutoModelForCausalLM.from_pretrained(
31
  )
32
  tokenizer = AutoTokenizer.from_pretrained(model_name)
33
 
34
- peft_model = AutoPeftModelForCausalLM.from_pretrained("ehristoforu/think-lora-qwen-r64")
35
- merged_model = peft_model.merge_and_unload()
36
- merged_model.save_pretrained("./coolqwen")
37
  #model.save_pretrained("./coolqwen")
38
- tokenizer.save_pretrained("./coolqwen")
39
-
40
  from huggingface_hub import HfApi
41
 
42
  api = HfApi()
@@ -49,7 +49,7 @@ api.upload_folder(
49
  repo_type="model",
50
  token=HF_TOKEN,
51
  )
52
-
53
 
54
  @spaces.GPU(duration=60)
55
  def generate(
 
22
  HF_TOKEN = os.getenv("HF_TOKEN")
23
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24
 
25
+ model_name = "TheDrummer/MS-Interleaved-Upscale-39B"
26
 
27
  model = AutoModelForCausalLM.from_pretrained(
28
  model_name,
 
31
  )
32
  tokenizer = AutoTokenizer.from_pretrained(model_name)
33
 
34
+ #peft_model = AutoPeftModelForCausalLM.from_pretrained("ehristoforu/think-lora-qwen-r64")
35
+ #merged_model = peft_model.merge_and_unload()
36
+ #merged_model.save_pretrained("./coolqwen")
37
  #model.save_pretrained("./coolqwen")
38
+ #tokenizer.save_pretrained("./coolqwen")
39
+ '''
40
  from huggingface_hub import HfApi
41
 
42
  api = HfApi()
 
49
  repo_type="model",
50
  token=HF_TOKEN,
51
  )
52
+ '''
53
 
54
  @spaces.GPU(duration=60)
55
  def generate(