linoyts HF Staff commited on
Commit
0135c48
·
verified ·
1 Parent(s): 77756bc

update to 0.9.8

Browse files
configs/ltxv-13b-0.9.8-dev-fp8.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: multi-scale
2
+ checkpoint_path: "ltxv-13b-0.9.8-dev-fp8.safetensors"
3
+ downscale_factor: 0.6666666
4
+ spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors"
5
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
6
+ decode_timestep: 0.05
7
+ decode_noise_scale: 0.025
8
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
9
+ precision: "float8_e4m3fn" # options: "float8_e4m3fn", "bfloat16", "mixed_precision"
10
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
11
+ prompt_enhancement_words_threshold: 120
12
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
13
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
14
+ stochastic_sampling: false
15
+
16
+ first_pass:
17
+ guidance_scale: [1, 1, 6, 8, 6, 1, 1]
18
+ stg_scale: [0, 0, 4, 4, 4, 2, 1]
19
+ rescaling_scale: [1, 1, 0.5, 0.5, 1, 1, 1]
20
+ guidance_timesteps: [1.0, 0.996, 0.9933, 0.9850, 0.9767, 0.9008, 0.6180]
21
+ skip_block_list: [[], [11, 25, 35, 39], [22, 35, 39], [28], [28], [28], [28]]
22
+ num_inference_steps: 30
23
+ skip_final_inference_steps: 3
24
+ cfg_star_rescale: true
25
+
26
+ second_pass:
27
+ guidance_scale: [1]
28
+ stg_scale: [1]
29
+ rescaling_scale: [1]
30
+ guidance_timesteps: [1.0]
31
+ skip_block_list: [27]
32
+ num_inference_steps: 30
33
+ skip_initial_inference_steps: 17
34
+ cfg_star_rescale: true
configs/ltxv-13b-0.9.8-dev.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: multi-scale
2
+ checkpoint_path: "ltxv-13b-0.9.8-dev.safetensors"
3
+ downscale_factor: 0.6666666
4
+ spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors"
5
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
6
+ decode_timestep: 0.05
7
+ decode_noise_scale: 0.025
8
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
9
+ precision: "bfloat16"
10
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
11
+ prompt_enhancement_words_threshold: 120
12
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
13
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
14
+ stochastic_sampling: false
15
+
16
+ first_pass:
17
+ guidance_scale: [1, 1, 6, 8, 6, 1, 1]
18
+ stg_scale: [0, 0, 4, 4, 4, 2, 1]
19
+ rescaling_scale: [1, 1, 0.5, 0.5, 1, 1, 1]
20
+ guidance_timesteps: [1.0, 0.996, 0.9933, 0.9850, 0.9767, 0.9008, 0.6180]
21
+ skip_block_list: [[], [11, 25, 35, 39], [22, 35, 39], [28], [28], [28], [28]]
22
+ num_inference_steps: 30
23
+ skip_final_inference_steps: 3
24
+ cfg_star_rescale: true
25
+
26
+ second_pass:
27
+ guidance_scale: [1]
28
+ stg_scale: [1]
29
+ rescaling_scale: [1]
30
+ guidance_timesteps: [1.0]
31
+ skip_block_list: [27]
32
+ num_inference_steps: 30
33
+ skip_initial_inference_steps: 17
34
+ cfg_star_rescale: true
configs/ltxv-13b-0.9.8-distilled-fp8.yaml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: multi-scale
2
+ checkpoint_path: "ltxv-13b-0.9.8-distilled-fp8.safetensors"
3
+ downscale_factor: 0.6666666
4
+ spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors"
5
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
6
+ decode_timestep: 0.05
7
+ decode_noise_scale: 0.025
8
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
9
+ precision: "float8_e4m3fn" # options: "float8_e4m3fn", "bfloat16", "mixed_precision"
10
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
11
+ prompt_enhancement_words_threshold: 120
12
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
13
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
14
+ stochastic_sampling: false
15
+
16
+ first_pass:
17
+ timesteps: [1.0000, 0.9937, 0.9875, 0.9812, 0.9750, 0.9094, 0.7250]
18
+ guidance_scale: 1
19
+ stg_scale: 0
20
+ rescaling_scale: 1
21
+ skip_block_list: [42]
22
+
23
+ second_pass:
24
+ timesteps: [0.9094, 0.7250, 0.4219]
25
+ guidance_scale: 1
26
+ stg_scale: 0
27
+ rescaling_scale: 1
28
+ skip_block_list: [42]
29
+ tone_map_compression_ratio: 0.6
configs/ltxv-13b-0.9.8-distilled.yaml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: multi-scale
2
+ checkpoint_path: "ltxv-13b-0.9.8-distilled.safetensors"
3
+ downscale_factor: 0.6666666
4
+ spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors"
5
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
6
+ decode_timestep: 0.05
7
+ decode_noise_scale: 0.025
8
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
9
+ precision: "bfloat16"
10
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
11
+ prompt_enhancement_words_threshold: 120
12
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
13
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
14
+ stochastic_sampling: false
15
+
16
+ first_pass:
17
+ timesteps: [1.0000, 0.9937, 0.9875, 0.9812, 0.9750, 0.9094, 0.7250]
18
+ guidance_scale: 1
19
+ stg_scale: 0
20
+ rescaling_scale: 1
21
+ skip_block_list: [42]
22
+
23
+ second_pass:
24
+ timesteps: [0.9094, 0.7250, 0.4219]
25
+ guidance_scale: 1
26
+ stg_scale: 0
27
+ rescaling_scale: 1
28
+ skip_block_list: [42]
29
+ tone_map_compression_ratio: 0.6
configs/ltxv-2b-0.9.8-distilled-fp8.yaml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: multi-scale
2
+ checkpoint_path: "ltxv-2b-0.9.8-distilled-fp8.safetensors"
3
+ downscale_factor: 0.6666666
4
+ spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors"
5
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
6
+ decode_timestep: 0.05
7
+ decode_noise_scale: 0.025
8
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
9
+ precision: "float8_e4m3fn" # options: "float8_e4m3fn", "bfloat16", "mixed_precision"
10
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
11
+ prompt_enhancement_words_threshold: 120
12
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
13
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
14
+ stochastic_sampling: false
15
+
16
+ first_pass:
17
+ timesteps: [1.0000, 0.9937, 0.9875, 0.9812, 0.9750, 0.9094, 0.7250]
18
+ guidance_scale: 1
19
+ stg_scale: 0
20
+ rescaling_scale: 1
21
+ skip_block_list: [42]
22
+
23
+ second_pass:
24
+ timesteps: [0.9094, 0.7250, 0.4219]
25
+ guidance_scale: 1
26
+ stg_scale: 0
27
+ rescaling_scale: 1
28
+ skip_block_list: [42]
configs/ltxv-2b-0.9.8-distilled.yaml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: multi-scale
2
+ checkpoint_path: "ltxv-2b-0.9.8-distilled.safetensors"
3
+ downscale_factor: 0.6666666
4
+ spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors"
5
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
6
+ decode_timestep: 0.05
7
+ decode_noise_scale: 0.025
8
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
9
+ precision: "bfloat16"
10
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
11
+ prompt_enhancement_words_threshold: 120
12
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
13
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
14
+ stochastic_sampling: false
15
+
16
+ first_pass:
17
+ timesteps: [1.0000, 0.9937, 0.9875, 0.9812, 0.9750, 0.9094, 0.7250]
18
+ guidance_scale: 1
19
+ stg_scale: 0
20
+ rescaling_scale: 1
21
+ skip_block_list: [42]
22
+
23
+ second_pass:
24
+ timesteps: [0.9094, 0.7250, 0.4219]
25
+ guidance_scale: 1
26
+ stg_scale: 0
27
+ rescaling_scale: 1
28
+ skip_block_list: [42]