EthanZyh commited on
Commit
85c9b8b
·
1 Parent(s): 01a383f

add envelope of Text2World, add config.json

Browse files
config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DiffusionText2World"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "cosmos1.models.diffusion.inference.text2world_hf.DiffusionText2WorldConfig",
7
+ "AutoModel": "cosmos1.models.diffusion.inference.text2world_hf.DiffusionText2World"
8
+ },
9
+ "model_type": "AutoModel"
10
+ }
cosmos1/models/diffusion/inference/text2world_hf.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import torch
4
+ from transformers import PreTrainedModel, PretrainedConfig
5
+
6
+ from .inference_utils import add_common_arguments, validate_args
7
+ from .world_generation_pipeline import DiffusionText2WorldGenerationPipeline
8
+ from ....utils import log, misc
9
+ from ....utils.io import read_prompts_from_file, save_video
10
+
11
+ class DiffusionText2WorldConfig(PretrainedConfig):
12
+ model_type = "DiffusionText2World"
13
+ def __init__(self, **kwargs):
14
+ super().__init__(**kwargs)
15
+ self.diffusion_transformer_dir = kwargs.get("diffusion_transformer_dir", "Cosmos-1.0-Diffusion-7B-Text2World")
16
+ self.prompt_upsampler_dir = kwargs.get("prompt_upsampler_dir", "Cosmos-1.0-Prompt-Upsampler-12B-Text2World")
17
+ self.word_limit_to_skip_upsampler = kwargs.get("word_limit_to_skip_upsampler", 250)
18
+ self.checkpoint_dir = kwargs.get("checkpoint_dir", "checkpoints")
19
+ self.tokenizer_dir = kwargs.get("tokenizer_dir", "Cosmos-1.0-Tokenizer-CV8x8x8")
20
+ self.video_save_name = kwargs.get("video_save_name", "output")
21
+ self.video_save_folder = kwargs.get("video_save_folder", "outputs/")
22
+ self.prompt = kwargs.get("prompt", None)
23
+ self.batch_input_path = kwargs.get("batch_input_path", None)
24
+ self.negative_prompt = kwargs.get("negative_prompt", None)
25
+ self.num_steps = kwargs.get("num_steps", 35)
26
+ self.guidance = kwargs.get("guidance", 7)
27
+ self.num_video_frames = kwargs.get("num_video_frames", 121)
28
+ self.height = kwargs.get("height", 704)
29
+ self.width = kwargs.get("width", 1280)
30
+ self.fps = kwargs.get("fps", 24)
31
+ self.seed = kwargs.get("seed", 1)
32
+ self.disable_prompt_upsampler = kwargs.get("disable_prompt_upsampler", False)
33
+ self.offload_diffusion_transformer = kwargs.get("offload_diffusion_transformer", False)
34
+ self.offload_tokenizer = kwargs.get("offload_tokenizer", False)
35
+ self.offload_text_encoder_model = kwargs.get("offload_text_encoder_model", False)
36
+ self.offload_prompt_upsampler = kwargs.get("offload_prompt_upsampler", False)
37
+ self.offload_guardrail_models = kwargs.get("offload_guardrail_models", False)
38
+
39
+
40
+ class DiffusionText2World(PreTrainedModel):
41
+ config_class = DiffusionText2WorldConfig
42
+
43
+ def __init__(self, config=DiffusionText2WorldConfig()):
44
+ super().__init__(config)
45
+ torch.enable_grad(False) # TODO: do we need this?
46
+ self.config = config
47
+ inference_type = "text2world"
48
+ validate_args(argparse.Namespace(**config), inference_type)
49
+ self.pipeline = DiffusionText2WorldGenerationPipeline(config)
50
+
51
+ def forward(self, prompt):
52
+ cfg = self.config
53
+ # Handle multiple prompts if prompt file is provided
54
+ if cfg.batch_input_path:
55
+ log.info(f"Reading batch inputs from path: {cfg.batch_input_path}")
56
+ prompts = read_prompts_from_file(cfg.batch_input_path)
57
+ else:
58
+ # Single prompt case
59
+ prompts = [{"prompt": cfg.prompt}]
60
+
61
+ os.makedirs(cfg.video_save_folder, exist_ok=True)
62
+ for i, input_dict in enumerate(prompts):
63
+ current_prompt = input_dict.get("prompt", None)
64
+ if current_prompt is None:
65
+ log.critical("Prompt is missing, skipping world generation.")
66
+ continue
67
+
68
+ # Generate video
69
+ generated_output = self.pipeline.generate(current_prompt, cfg.negative_prompt, cfg.word_limit_to_skip_upsampler)
70
+ if generated_output is None:
71
+ log.critical("Guardrail blocked text2world generation.")
72
+ continue
73
+ video, prompt = generated_output
74
+
75
+ if cfg.batch_input_path:
76
+ video_save_path = os.path.join(cfg.video_save_folder, f"{i}.mp4")
77
+ prompt_save_path = os.path.join(cfg.video_save_folder, f"{i}.txt")
78
+ else:
79
+ video_save_path = os.path.join(cfg.video_save_folder, f"{cfg.video_save_name}.mp4")
80
+ prompt_save_path = os.path.join(cfg.video_save_folder, f"{cfg.video_save_name}.txt")
81
+
82
+ # Save video
83
+ save_video(
84
+ video=video,
85
+ fps=cfg.fps,
86
+ H=cfg.height,
87
+ W=cfg.width,
88
+ video_save_quality=5,
89
+ video_save_path=video_save_path,
90
+ )
91
+
92
+ # Save prompt to text file alongside video
93
+ with open(prompt_save_path, "wb") as f:
94
+ f.write(prompt.encode("utf-8"))
95
+
96
+ log.info(f"Saved video to {video_save_path}")
97
+ log.info(f"Saved prompt to {prompt_save_path}")
98
+
99
+ def save_pretrained(self, save_directory, **kwargs):
100
+ # We don't save anything
101
+ pass
102
+
103
+ @classmethod
104
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
105
+ config = kwargs["config"]
106
+ model = cls(config)
107
+ return model