OmPrakashSingh1704 commited on
Commit
76d8c6a
·
1 Parent(s): c10fc76
options/Video_model/__pycache__/Model.cpython-310.pyc CHANGED
Binary files a/options/Video_model/__pycache__/Model.cpython-310.pyc and b/options/Video_model/__pycache__/Model.cpython-310.pyc differ
 
options/Video_model/utils.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers.loaders.lora import LoraLoaderMixin
3
+ from typing import Dict, Union
4
+ import numpy as np
5
+ import imageio
6
+
7
+ def load_lora_weights(unet, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name = None, **kwargs):
8
+ # if a dict is passed, copy it instead of modifying it inplace
9
+ if isinstance(pretrained_model_name_or_path_or_dict, dict):
10
+ pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy()
11
+
12
+ # First, ensure that the checkpoint is a compatible one and can be successfully loaded.
13
+ state_dict, network_alphas = LoraLoaderMixin.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs)
14
+
15
+ # remove prefix if not removed when saved
16
+ state_dict = {name.replace('base_model.model.', ''): param for name, param in state_dict.items()}
17
+
18
+ is_correct_format = all("lora" in key or "dora_scale" in key for key in state_dict.keys())
19
+ if not is_correct_format:
20
+ raise ValueError("Invalid LoRA checkpoint.")
21
+
22
+ low_cpu_mem_usage = True
23
+
24
+ LoraLoaderMixin.load_lora_into_unet(
25
+ state_dict,
26
+ network_alphas=network_alphas,
27
+ unet = unet,
28
+ low_cpu_mem_usage=low_cpu_mem_usage,
29
+ adapter_name=adapter_name,
30
+ )
31
+
32
+ def save_video(frames, save_path, fps, quality=9):
33
+ writer = imageio.get_writer(save_path, fps=fps, quality=quality)
34
+ for frame in frames:
35
+ frame = np.array(frame)
36
+ writer.append_data(frame)
37
+ writer.close()