alfredplpl commited on
Commit
f036be1
·
verified ·
1 Parent(s): 83268c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -15,12 +15,12 @@ repo = "stabilityai/stable-diffusion-3.5-large"
15
  t2i = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.bfloat16, token=os.environ["TOKEN"]).to(device)
16
 
17
  model = AutoModelForCausalLM.from_pretrained(
18
- "microsoft/Phi-3.5-mini-instruct",
19
  device_map="cuda",
20
  torch_dtype=torch.bfloat16,
21
  trust_remote_code=True,
22
  )
23
- tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-mini-instruct")
24
  upsampler = pipeline(
25
  "text-generation",
26
  model=model,
@@ -28,7 +28,7 @@ upsampler = pipeline(
28
  )
29
 
30
  generation_args = {
31
- "max_new_tokens": 300,
32
  "return_full_text": False,
33
  "temperature": 0.7,
34
  "do_sample": True,
 
15
  t2i = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.bfloat16, token=os.environ["TOKEN"]).to(device)
16
 
17
  model = AutoModelForCausalLM.from_pretrained(
18
+ "llm-jp/llm-jp-3-1.8b-instruct",
19
  device_map="cuda",
20
  torch_dtype=torch.bfloat16,
21
  trust_remote_code=True,
22
  )
23
+ tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3-1.8b-instruct")
24
  upsampler = pipeline(
25
  "text-generation",
26
  model=model,
 
28
  )
29
 
30
  generation_args = {
31
+ "max_new_tokens": 70,
32
  "return_full_text": False,
33
  "temperature": 0.7,
34
  "do_sample": True,