JiantaoLin commited on
Commit
5ca2c01
·
1 Parent(s): 5eec2a3
pipeline/pipeline_config/default.yaml CHANGED
@@ -6,7 +6,7 @@ flux:
6
  redux: "black-forest-labs/FLUX.1-Redux-dev"
7
  num_inference_steps: 20
8
  seed: 42
9
- device: 'cuda'
10
 
11
  multiview:
12
  base_model: "sudo-ai/zero123plus-v1.2"
@@ -14,20 +14,20 @@ multiview:
14
  unet: "./checkpoint/zero123++/flexgen_19w.ckpt"
15
  num_inference_steps: 50
16
  seed: 42
17
- device: 'cuda'
18
 
19
  reconstruction:
20
  model_config: "./models/lrm/config/PRM_inference.yaml"
21
  base_model: "./checkpoint/lrm/final_ckpt.ckpt"
22
- device: 'cuda'
23
 
24
  caption:
25
  base_model: "multimodalart/Florence-2-large-no-flash-attn"
26
- device: 'cuda'
27
 
28
  llm:
29
  base_model: "Qwen/Qwen2-7B-Instruct"
30
- device: 'cuda'
31
 
32
  use_zero_gpu: false # for huggingface demo only
33
  3d_bundle_templates: './init_3d_Bundle'
 
6
  redux: "black-forest-labs/FLUX.1-Redux-dev"
7
  num_inference_steps: 20
8
  seed: 42
9
+ device: 'cuda:0'
10
 
11
  multiview:
12
  base_model: "sudo-ai/zero123plus-v1.2"
 
14
  unet: "./checkpoint/zero123++/flexgen_19w.ckpt"
15
  num_inference_steps: 50
16
  seed: 42
17
+ device: 'cuda:1'
18
 
19
  reconstruction:
20
  model_config: "./models/lrm/config/PRM_inference.yaml"
21
  base_model: "./checkpoint/lrm/final_ckpt.ckpt"
22
+ device: 'cuda:1'
23
 
24
  caption:
25
  base_model: "multimodalart/Florence-2-large-no-flash-attn"
26
+ device: 'cuda:1'
27
 
28
  llm:
29
  base_model: "Qwen/Qwen2-7B-Instruct"
30
+ device: 'cuda:1'
31
 
32
  use_zero_gpu: false # for huggingface demo only
33
  3d_bundle_templates: './init_3d_Bundle'