code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def resize_crop(video: torch.Tensor, oh: int, ow: int):
"""
Resize, center crop and normalize for decord loaded video (torch.Tensor type)
Parameters:
video - video to process (torch.Tensor): Tensor from `reader.get_batch(frame_ids)`, in shape of (T, H, W, C)
oh - target height (int)
ow - target width (int)
Returns:
The processed video (torch.Tensor): Normalized tensor range [-1, 1], in shape of (C, T, H, W)
Raises:
"""
# permute ([t, h, w, c] -> [t, c, h, w])
video = video.permute(0, 3, 1, 2)
# resize and crop
ih, iw = video.shape[2:]
if ih != oh or iw != ow:
# resize
scale = max(ow / iw, oh / ih)
video = F.interpolate(
video,
size=(round(scale * ih), round(scale * iw)),
mode='bicubic',
antialias=True
)
assert video.size(3) >= ow and video.size(2) >= oh
# center crop
x1 = (video.size(3) - ow) // 2
y1 = (video.size(2) - oh) // 2
video = video[:, :, y1:y1 + oh, x1:x1 + ow]
# permute ([t, c, h, w] -> [c, t, h, w]) and normalize
video = video.transpose(0, 1).float().div_(127.5).sub_(1.)
return video
|
Resize, center crop and normalize for decord loaded video (torch.Tensor type)
Parameters:
video - video to process (torch.Tensor): Tensor from `reader.get_batch(frame_ids)`, in shape of (T, H, W, C)
oh - target height (int)
ow - target width (int)
Returns:
The processed video (torch.Tensor): Normalized tensor range [-1, 1], in shape of (C, T, H, W)
Raises:
|
resize_crop
|
python
|
ali-vilab/VACE
|
vace/models/utils/preprocessor.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/models/utils/preprocessor.py
|
Apache-2.0
|
def __init__(
self,
config,
checkpoint_dir,
device_id=0,
rank=0,
t5_fsdp=False,
dit_fsdp=False,
use_usp=False,
t5_cpu=False,
):
r"""
Initializes the Wan text-to-video generation model components.
Args:
config (EasyDict):
Object containing model parameters initialized from config.py
checkpoint_dir (`str`):
Path to directory containing model checkpoints
device_id (`int`, *optional*, defaults to 0):
Id of target GPU device
rank (`int`, *optional*, defaults to 0):
Process rank for distributed training
t5_fsdp (`bool`, *optional*, defaults to False):
Enable FSDP sharding for T5 model
dit_fsdp (`bool`, *optional*, defaults to False):
Enable FSDP sharding for DiT model
use_usp (`bool`, *optional*, defaults to False):
Enable distribution strategy of USP.
t5_cpu (`bool`, *optional*, defaults to False):
Whether to place T5 model on CPU. Only works without t5_fsdp.
"""
self.device = torch.device(f"cuda:{device_id}")
self.config = config
self.rank = rank
self.t5_cpu = t5_cpu
self.num_train_timesteps = config.num_train_timesteps
self.param_dtype = config.param_dtype
shard_fn = partial(shard_model, device_id=device_id)
self.text_encoder = T5EncoderModel(
text_len=config.text_len,
dtype=config.t5_dtype,
device=torch.device('cpu'),
checkpoint_path=os.path.join(checkpoint_dir, config.t5_checkpoint),
tokenizer_path=os.path.join(checkpoint_dir, config.t5_tokenizer),
shard_fn=shard_fn if t5_fsdp else None)
self.vae_stride = config.vae_stride
self.patch_size = config.patch_size
self.vae = WanVAE(
vae_pth=os.path.join(checkpoint_dir, config.vae_checkpoint),
device=self.device)
logging.info(f"Creating VaceWanModel from {checkpoint_dir}")
self.model = VaceWanModel.from_pretrained(checkpoint_dir)
self.model.eval().requires_grad_(False)
if use_usp:
from xfuser.core.distributed import \
get_sequence_parallel_world_size
from .distributed.xdit_context_parallel import (usp_attn_forward,
usp_dit_forward,
usp_dit_forward_vace)
for block in self.model.blocks:
block.self_attn.forward = types.MethodType(
usp_attn_forward, block.self_attn)
for block in self.model.vace_blocks:
block.self_attn.forward = types.MethodType(
usp_attn_forward, block.self_attn)
self.model.forward = types.MethodType(usp_dit_forward, self.model)
self.model.forward_vace = types.MethodType(usp_dit_forward_vace, self.model)
self.sp_size = get_sequence_parallel_world_size()
else:
self.sp_size = 1
if dist.is_initialized():
dist.barrier()
if dit_fsdp:
self.model = shard_fn(self.model)
else:
self.model.to(self.device)
self.sample_neg_prompt = config.sample_neg_prompt
self.vid_proc = VaceVideoProcessor(downsample=tuple([x * y for x, y in zip(config.vae_stride, self.patch_size)]),
min_area=480 * 832,
max_area=480 * 832,
min_fps=self.config.sample_fps,
max_fps=self.config.sample_fps,
zero_start=True,
seq_len=32760,
keep_last=True)
|
Initializes the Wan text-to-video generation model components.
Args:
config (EasyDict):
Object containing model parameters initialized from config.py
checkpoint_dir (`str`):
Path to directory containing model checkpoints
device_id (`int`, *optional*, defaults to 0):
Id of target GPU device
rank (`int`, *optional*, defaults to 0):
Process rank for distributed training
t5_fsdp (`bool`, *optional*, defaults to False):
Enable FSDP sharding for T5 model
dit_fsdp (`bool`, *optional*, defaults to False):
Enable FSDP sharding for DiT model
use_usp (`bool`, *optional*, defaults to False):
Enable distribution strategy of USP.
t5_cpu (`bool`, *optional*, defaults to False):
Whether to place T5 model on CPU. Only works without t5_fsdp.
|
__init__
|
python
|
ali-vilab/VACE
|
vace/models/wan/wan_vace.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/models/wan/wan_vace.py
|
Apache-2.0
|
def generate(self,
input_prompt,
input_frames,
input_masks,
input_ref_images,
size=(1280, 720),
frame_num=81,
context_scale=1.0,
shift=5.0,
sample_solver='unipc',
sampling_steps=50,
guide_scale=5.0,
n_prompt="",
seed=-1,
offload_model=True):
r"""
Generates video frames from text prompt using diffusion process.
Args:
input_prompt (`str`):
Text prompt for content generation
size (tupele[`int`], *optional*, defaults to (1280,720)):
Controls video resolution, (width,height).
frame_num (`int`, *optional*, defaults to 81):
How many frames to sample from a video. The number should be 4n+1
shift (`float`, *optional*, defaults to 5.0):
Noise schedule shift parameter. Affects temporal dynamics
sample_solver (`str`, *optional*, defaults to 'unipc'):
Solver used to sample the video.
sampling_steps (`int`, *optional*, defaults to 40):
Number of diffusion sampling steps. Higher values improve quality but slow generation
guide_scale (`float`, *optional*, defaults 5.0):
Classifier-free guidance scale. Controls prompt adherence vs. creativity
n_prompt (`str`, *optional*, defaults to ""):
Negative prompt for content exclusion. If not given, use `config.sample_neg_prompt`
seed (`int`, *optional*, defaults to -1):
Random seed for noise generation. If -1, use random seed.
offload_model (`bool`, *optional*, defaults to True):
If True, offloads models to CPU during generation to save VRAM
Returns:
torch.Tensor:
Generated video frames tensor. Dimensions: (C, N H, W) where:
- C: Color channels (3 for RGB)
- N: Number of frames (81)
- H: Frame height (from size)
- W: Frame width from size)
"""
# preprocess
# F = frame_num
# target_shape = (self.vae.model.z_dim, (F - 1) // self.vae_stride[0] + 1,
# size[1] // self.vae_stride[1],
# size[0] // self.vae_stride[2])
#
# seq_len = math.ceil((target_shape[2] * target_shape[3]) /
# (self.patch_size[1] * self.patch_size[2]) *
# target_shape[1] / self.sp_size) * self.sp_size
if n_prompt == "":
n_prompt = self.sample_neg_prompt
seed = seed if seed >= 0 else random.randint(0, sys.maxsize)
seed_g = torch.Generator(device=self.device)
seed_g.manual_seed(seed)
if not self.t5_cpu:
self.text_encoder.model.to(self.device)
context = self.text_encoder([input_prompt], self.device)
context_null = self.text_encoder([n_prompt], self.device)
if offload_model:
self.text_encoder.model.cpu()
else:
context = self.text_encoder([input_prompt], torch.device('cpu'))
context_null = self.text_encoder([n_prompt], torch.device('cpu'))
context = [t.to(self.device) for t in context]
context_null = [t.to(self.device) for t in context_null]
# vace context encode
z0 = self.vace_encode_frames(input_frames, input_ref_images, masks=input_masks)
m0 = self.vace_encode_masks(input_masks, input_ref_images)
z = self.vace_latent(z0, m0)
target_shape = list(z0[0].shape)
target_shape[0] = int(target_shape[0] / 2)
noise = [
torch.randn(
target_shape[0],
target_shape[1],
target_shape[2],
target_shape[3],
dtype=torch.float32,
device=self.device,
generator=seed_g)
]
seq_len = math.ceil((target_shape[2] * target_shape[3]) /
(self.patch_size[1] * self.patch_size[2]) *
target_shape[1] / self.sp_size) * self.sp_size
@contextmanager
def noop_no_sync():
yield
no_sync = getattr(self.model, 'no_sync', noop_no_sync)
# evaluation mode
with amp.autocast(dtype=self.param_dtype), torch.no_grad(), no_sync():
if sample_solver == 'unipc':
sample_scheduler = FlowUniPCMultistepScheduler(
num_train_timesteps=self.num_train_timesteps,
shift=1,
use_dynamic_shifting=False)
sample_scheduler.set_timesteps(
sampling_steps, device=self.device, shift=shift)
timesteps = sample_scheduler.timesteps
elif sample_solver == 'dpm++':
sample_scheduler = FlowDPMSolverMultistepScheduler(
num_train_timesteps=self.num_train_timesteps,
shift=1,
use_dynamic_shifting=False)
sampling_sigmas = get_sampling_sigmas(sampling_steps, shift)
timesteps, _ = retrieve_timesteps(
sample_scheduler,
device=self.device,
sigmas=sampling_sigmas)
else:
raise NotImplementedError("Unsupported solver.")
# sample videos
latents = noise
arg_c = {'context': context, 'seq_len': seq_len}
arg_null = {'context': context_null, 'seq_len': seq_len}
for _, t in enumerate(tqdm(timesteps)):
latent_model_input = latents
timestep = [t]
timestep = torch.stack(timestep)
self.model.to(self.device)
noise_pred_cond = self.model(
latent_model_input, t=timestep, vace_context=z, vace_context_scale=context_scale, **arg_c)[0]
noise_pred_uncond = self.model(
latent_model_input, t=timestep, vace_context=z, vace_context_scale=context_scale,**arg_null)[0]
noise_pred = noise_pred_uncond + guide_scale * (
noise_pred_cond - noise_pred_uncond)
temp_x0 = sample_scheduler.step(
noise_pred.unsqueeze(0),
t,
latents[0].unsqueeze(0),
return_dict=False,
generator=seed_g)[0]
latents = [temp_x0.squeeze(0)]
x0 = latents
if offload_model:
self.model.cpu()
torch.cuda.empty_cache()
if self.rank == 0:
videos = self.decode_latent(x0, input_ref_images)
del noise, latents
del sample_scheduler
if offload_model:
gc.collect()
torch.cuda.synchronize()
if dist.is_initialized():
dist.barrier()
return videos[0] if self.rank == 0 else None
|
Generates video frames from text prompt using diffusion process.
Args:
input_prompt (`str`):
Text prompt for content generation
size (tupele[`int`], *optional*, defaults to (1280,720)):
Controls video resolution, (width,height).
frame_num (`int`, *optional*, defaults to 81):
How many frames to sample from a video. The number should be 4n+1
shift (`float`, *optional*, defaults to 5.0):
Noise schedule shift parameter. Affects temporal dynamics
sample_solver (`str`, *optional*, defaults to 'unipc'):
Solver used to sample the video.
sampling_steps (`int`, *optional*, defaults to 40):
Number of diffusion sampling steps. Higher values improve quality but slow generation
guide_scale (`float`, *optional*, defaults 5.0):
Classifier-free guidance scale. Controls prompt adherence vs. creativity
n_prompt (`str`, *optional*, defaults to ""):
Negative prompt for content exclusion. If not given, use `config.sample_neg_prompt`
seed (`int`, *optional*, defaults to -1):
Random seed for noise generation. If -1, use random seed.
offload_model (`bool`, *optional*, defaults to True):
If True, offloads models to CPU during generation to save VRAM
Returns:
torch.Tensor:
Generated video frames tensor. Dimensions: (C, N H, W) where:
- C: Color channels (3 for RGB)
- N: Number of frames (81)
- H: Frame height (from size)
- W: Frame width from size)
|
generate
|
python
|
ali-vilab/VACE
|
vace/models/wan/wan_vace.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/models/wan/wan_vace.py
|
Apache-2.0
|
def usp_dit_forward(
self,
x,
t,
vace_context,
context,
seq_len,
vace_context_scale=1.0,
clip_fea=None,
y=None,
):
"""
x: A list of videos each with shape [C, T, H, W].
t: [B].
context: A list of text embeddings each with shape [L, C].
"""
# params
device = self.patch_embedding.weight.device
if self.freqs.device != device:
self.freqs = self.freqs.to(device)
# if y is not None:
# x = [torch.cat([u, v], dim=0) for u, v in zip(x, y)]
# embeddings
x = [self.patch_embedding(u.unsqueeze(0)) for u in x]
grid_sizes = torch.stack(
[torch.tensor(u.shape[2:], dtype=torch.long) for u in x])
x = [u.flatten(2).transpose(1, 2) for u in x]
seq_lens = torch.tensor([u.size(1) for u in x], dtype=torch.long)
assert seq_lens.max() <= seq_len
x = torch.cat([
torch.cat([u, u.new_zeros(1, seq_len - u.size(1), u.size(2))], dim=1)
for u in x
])
# time embeddings
with amp.autocast(dtype=torch.float32):
e = self.time_embedding(
sinusoidal_embedding_1d(self.freq_dim, t).float())
e0 = self.time_projection(e).unflatten(1, (6, self.dim))
assert e.dtype == torch.float32 and e0.dtype == torch.float32
# context
context_lens = None
context = self.text_embedding(
torch.stack([
torch.cat([u, u.new_zeros(self.text_len - u.size(0), u.size(1))])
for u in context
]))
# if clip_fea is not None:
# context_clip = self.img_emb(clip_fea) # bs x 257 x dim
# context = torch.concat([context_clip, context], dim=1)
# arguments
kwargs = dict(
e=e0,
seq_lens=seq_lens,
grid_sizes=grid_sizes,
freqs=self.freqs,
context=context,
context_lens=context_lens)
# Context Parallel
x = torch.chunk(
x, get_sequence_parallel_world_size(),
dim=1)[get_sequence_parallel_rank()]
hints = self.forward_vace(x, vace_context, seq_len, kwargs)
kwargs['hints'] = hints
kwargs['context_scale'] = vace_context_scale
for block in self.blocks:
x = block(x, **kwargs)
# head
x = self.head(x, e)
# Context Parallel
x = get_sp_group().all_gather(x, dim=1)
# unpatchify
x = self.unpatchify(x, grid_sizes)
return [u.float() for u in x]
|
x: A list of videos each with shape [C, T, H, W].
t: [B].
context: A list of text embeddings each with shape [L, C].
|
usp_dit_forward
|
python
|
ali-vilab/VACE
|
vace/models/wan/distributed/xdit_context_parallel.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/models/wan/distributed/xdit_context_parallel.py
|
Apache-2.0
|
def forward(
self,
x,
t,
vace_context,
context,
seq_len,
vace_context_scale=1.0,
clip_fea=None,
y=None,
):
r"""
Forward pass through the diffusion model
Args:
x (List[Tensor]):
List of input video tensors, each with shape [C_in, F, H, W]
t (Tensor):
Diffusion timesteps tensor of shape [B]
context (List[Tensor]):
List of text embeddings each with shape [L, C]
seq_len (`int`):
Maximum sequence length for positional encoding
clip_fea (Tensor, *optional*):
CLIP image features for image-to-video mode
y (List[Tensor], *optional*):
Conditional video inputs for image-to-video mode, same shape as x
Returns:
List[Tensor]:
List of denoised video tensors with original input shapes [C_out, F, H / 8, W / 8]
"""
# if self.model_type == 'i2v':
# assert clip_fea is not None and y is not None
# params
device = self.patch_embedding.weight.device
if self.freqs.device != device:
self.freqs = self.freqs.to(device)
# if y is not None:
# x = [torch.cat([u, v], dim=0) for u, v in zip(x, y)]
# embeddings
x = [self.patch_embedding(u.unsqueeze(0)) for u in x]
grid_sizes = torch.stack(
[torch.tensor(u.shape[2:], dtype=torch.long) for u in x])
x = [u.flatten(2).transpose(1, 2) for u in x]
seq_lens = torch.tensor([u.size(1) for u in x], dtype=torch.long)
assert seq_lens.max() <= seq_len
x = torch.cat([
torch.cat([u, u.new_zeros(1, seq_len - u.size(1), u.size(2))],
dim=1) for u in x
])
# time embeddings
with amp.autocast(dtype=torch.float32):
e = self.time_embedding(
sinusoidal_embedding_1d(self.freq_dim, t).float())
e0 = self.time_projection(e).unflatten(1, (6, self.dim))
assert e.dtype == torch.float32 and e0.dtype == torch.float32
# context
context_lens = None
context = self.text_embedding(
torch.stack([
torch.cat(
[u, u.new_zeros(self.text_len - u.size(0), u.size(1))])
for u in context
]))
# if clip_fea is not None:
# context_clip = self.img_emb(clip_fea) # bs x 257 x dim
# context = torch.concat([context_clip, context], dim=1)
# arguments
kwargs = dict(
e=e0,
seq_lens=seq_lens,
grid_sizes=grid_sizes,
freqs=self.freqs,
context=context,
context_lens=context_lens)
hints = self.forward_vace(x, vace_context, seq_len, kwargs)
kwargs['hints'] = hints
kwargs['context_scale'] = vace_context_scale
for block in self.blocks:
x = block(x, **kwargs)
# head
x = self.head(x, e)
# unpatchify
x = self.unpatchify(x, grid_sizes)
return [u.float() for u in x]
|
Forward pass through the diffusion model
Args:
x (List[Tensor]):
List of input video tensors, each with shape [C_in, F, H, W]
t (Tensor):
Diffusion timesteps tensor of shape [B]
context (List[Tensor]):
List of text embeddings each with shape [L, C]
seq_len (`int`):
Maximum sequence length for positional encoding
clip_fea (Tensor, *optional*):
CLIP image features for image-to-video mode
y (List[Tensor], *optional*):
Conditional video inputs for image-to-video mode, same shape as x
Returns:
List[Tensor]:
List of denoised video tensors with original input shapes [C_out, F, H / 8, W / 8]
|
forward
|
python
|
ali-vilab/VACE
|
vace/models/wan/modules/model.py
|
https://github.com/ali-vilab/VACE/blob/master/vace/models/wan/modules/model.py
|
Apache-2.0
|
def get_html_video_template(file_url_path, file_name, width="auto", height="auto"):
"""
Generate an HTML code snippet for embedding and downloading a video.
Parameters:
file_url_path (str): The URL or path to the video file.
file_name (str): The name of the video file.
width (str, optional): The width of the video. Defaults to "auto".
height (str, optional): The height of the video. Defaults to "auto".
Returns:
str: The generated HTML code snippet.
"""
html = f'''
<div style="display: flex; flex-direction: column; align-items: center;">
<video width="{width}" height="{height}" style="max-height: 100%;" controls>
<source src="{file_url_path}" type="video/mp4">
Your browser does not support the video tag.
</video>
<a href="{file_url_path}" download="{file_name}" style="margin-top: 10px;">
<button style="font-size: 1em; padding: 10px; border: none; cursor: pointer; color: white; background: #007bff;">Download Video</button>
</a>
</div>
'''
return html
|
Generate an HTML code snippet for embedding and downloading a video.
Parameters:
file_url_path (str): The URL or path to the video file.
file_name (str): The name of the video file.
width (str, optional): The width of the video. Defaults to "auto".
height (str, optional): The height of the video. Defaults to "auto".
Returns:
str: The generated HTML code snippet.
|
get_html_video_template
|
python
|
RayVentura/ShortGPT
|
gui/ui_components_html.py
|
https://github.com/RayVentura/ShortGPT/blob/master/gui/ui_components_html.py
|
MIT
|
def __verify_and_add_youtube_asset(self, asset_name, yt_url, type):
'''Verify and add a youtube asset to the database'''
self.__validate_asset_name(asset_name)
self.__validate_youtube_url(yt_url)
return self.__add_youtube_asset(asset_name, yt_url, type)
|
Verify and add a youtube asset to the database
|
__verify_and_add_youtube_asset
|
python
|
RayVentura/ShortGPT
|
gui/ui_tab_asset_library.py
|
https://github.com/RayVentura/ShortGPT/blob/master/gui/ui_tab_asset_library.py
|
MIT
|
def __get_asset_embed(self, data, row):
'''Get the embed html for the asset at the given row'''
embed_height = 300
embed_width = 300
asset_link = data.iloc[row]['link']
embed_html = ''
if 'youtube.com' in asset_link:
asset_link_split = asset_link.split('?v=')
if asset_link_split[0] == asset_link:
asset_link_split = asset_link.split('/')
# if the last character is a /, remove it
if asset_link_split[-1] == '/':
asset_link_split = asset_link_split[:-1]
asset_link_split = asset_link_split[-1]
else:
asset_link_split = asset_link_split[-1]
asset_link = f"https://youtube.com/embed/{asset_link_split}"
embed_html = f'<iframe width="{embed_width}" height="{embed_height}" src="{asset_link}"></iframe>'
elif 'public/' in asset_link or 'public/' in asset_link:
asset_link = f"http://localhost:31415/gradio_api/file={asset_link}"
file_ext = asset_link.split('.')[-1]
if file_ext in ['mp3', 'wav', 'ogg']:
audio_type = 'audio/mpeg' if file_ext == 'mp3' else f'audio/{file_ext}'
embed_html = f'<audio controls><source src="{asset_link}" type="{audio_type}">Your browser does not support the audio tag.</audio>'
elif file_ext in ['mp4', 'webm', 'ogg', 'mov']:
video_type = 'video/mp4' if file_ext == 'mp4' else f'video/{file_ext}'
embed_html = f'<video width="{embed_width}" height="{embed_height}" style="max-height: 100%;" controls><source src="{asset_link}" type="{video_type}">Your browser does not support the video tag.</video>'
elif file_ext in ['jpg', 'jpeg', 'png', 'gif']:
embed_html = f'<img src="{asset_link}" width="{embed_width}" height="{embed_height}">'
else:
embed_html = 'Unsupported file type'
return embed_html
|
Get the embed html for the asset at the given row
|
__get_asset_embed
|
python
|
RayVentura/ShortGPT
|
gui/ui_tab_asset_library.py
|
https://github.com/RayVentura/ShortGPT/blob/master/gui/ui_tab_asset_library.py
|
MIT
|
def __verify_and_upload_local_asset(self, upload_type, upload_name, video_path, audio_path, image_path):
'''Verify and upload a local asset to the database'''
self.__validate_asset_name(upload_name)
path_dict = {
AssetType.VIDEO.value: video_path,
AssetType.BACKGROUND_VIDEO.value: video_path,
AssetType.AUDIO.value: audio_path,
AssetType.BACKGROUND_MUSIC.value: audio_path,
AssetType.IMAGE.value: image_path
}
if not os.path.exists(path_dict[upload_type]):
raise gr.Error(f'The file does not exist at the given path.')
return self.__upload_local_asset(upload_type, upload_name, video_path, audio_path, image_path)
|
Verify and upload a local asset to the database
|
__verify_and_upload_local_asset
|
python
|
RayVentura/ShortGPT
|
gui/ui_tab_asset_library.py
|
https://github.com/RayVentura/ShortGPT/blob/master/gui/ui_tab_asset_library.py
|
MIT
|
def on_show(self, button_text, textbox, button):
'''Show or hide the API key'''
if button_text == "Show":
return gr.update(type="text"), gr.update(value="Hide")
return gr.update(type="password"), gr.update(value="Show")
|
Show or hide the API key
|
on_show
|
python
|
RayVentura/ShortGPT
|
gui/ui_tab_config.py
|
https://github.com/RayVentura/ShortGPT/blob/master/gui/ui_tab_config.py
|
MIT
|
def save_keys(self, openai_key, eleven_key, pexels_key, gemini_key):
'''Save the keys in the database'''
if (self.api_key_manager.get_api_key("OPENAI_API_KEY") != openai_key):
self.api_key_manager.set_api_key("OPENAI_API_KEY", openai_key)
if (self.api_key_manager.get_api_key("PEXELS_API_KEY") != pexels_key):
self.api_key_manager.set_api_key("PEXELS_API_KEY", pexels_key)
if (self.api_key_manager.get_api_key('ELEVENLABS_API_KEY') != eleven_key):
self.api_key_manager.set_api_key("ELEVENLABS_API_KEY", eleven_key)
new_eleven_voices = AssetComponentsUtils.getElevenlabsVoices()
return gr.update(value=openai_key),\
gr.update(value=eleven_key),\
gr.update(value=pexels_key),\
gr.update(value=gemini_key),\
gr.update(choices=new_eleven_voices),\
gr.update(choices=new_eleven_voices)
if (self.api_key_manager.get_api_key("GEMINI_API_KEY") != gemini_key):
self.api_key_manager.set_api_key("GEMINI_API_KEY", gemini_key)
return gr.update(value=openai_key),\
gr.update(value=eleven_key),\
gr.update(value=pexels_key),\
gr.update(value=gemini_key),\
gr.update(visible=True),\
gr.update(visible=True)
|
Save the keys in the database
|
save_keys
|
python
|
RayVentura/ShortGPT
|
gui/ui_tab_config.py
|
https://github.com/RayVentura/ShortGPT/blob/master/gui/ui_tab_config.py
|
MIT
|
def get_eleven_remaining(self,):
'''Get the remaining characters from ElevenLabs API'''
if (self.eleven_labs_api):
try:
return self.eleven_labs_api.get_remaining_characters()
except Exception as e:
return e.args[0]
return ""
|
Get the remaining characters from ElevenLabs API
|
get_eleven_remaining
|
python
|
RayVentura/ShortGPT
|
gui/ui_tab_config.py
|
https://github.com/RayVentura/ShortGPT/blob/master/gui/ui_tab_config.py
|
MIT
|
def get_voices(self):
'''Get the list of voices available'''
url = self.url_base + 'voices'
headers = {'accept': 'application/json'}
if self.api_key:
headers['xi-api-key'] = self.api_key
response = requests.get(url, headers=headers)
self.voices = {voice['name']: voice['voice_id'] for voice in response.json()['voices']}
return self.voices
|
Get the list of voices available
|
get_voices
|
python
|
RayVentura/ShortGPT
|
shortGPT/api_utils/eleven_api.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/api_utils/eleven_api.py
|
MIT
|
def get_remaining_characters(self):
'''Get the number of characters remaining'''
url = self.url_base + 'user'
headers = {'accept': '*/*', 'xi-api-key': self.api_key, 'Content-Type': 'application/json'}
response = requests.get(url, headers=headers)
if response.status_code == 200:
sub = response.json()['subscription']
return sub['character_limit'] - sub['character_count']
else:
raise Exception(response.json()['detail']['message'])
|
Get the number of characters remaining
|
get_remaining_characters
|
python
|
RayVentura/ShortGPT
|
shortGPT/api_utils/eleven_api.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/api_utils/eleven_api.py
|
MIT
|
def sync_local_assets(cls):
"""
Loads all local assets from the static-assets folder into the database.
"""
local_assets = cls.local_assets._get()
local_paths = {asset['path'] for asset in local_assets.values()}
for path in Path('public').rglob('*'):
if path.is_file() and str(path) not in local_paths:
cls._add_local_asset_from_path(path)
|
Loads all local assets from the static-assets folder into the database.
|
sync_local_assets
|
python
|
RayVentura/ShortGPT
|
shortGPT/config/asset_db.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/config/asset_db.py
|
MIT
|
def get_asset_link(cls, key: str) -> str:
"""
Get the link to an asset.
Args:
key (str): Name of the asset.
Returns:
str: Link to the asset.
"""
if key in cls.local_assets._get():
return cls._update_local_asset_timestamp_and_get_link(key)
elif key in cls.remote_assets._get():
return cls._get_remote_asset_link(key)
else:
raise ValueError(f"Asset '{key}' does not exist in the database.")
|
Get the link to an asset.
Args:
key (str): Name of the asset.
Returns:
str: Link to the asset.
|
get_asset_link
|
python
|
RayVentura/ShortGPT
|
shortGPT/config/asset_db.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/config/asset_db.py
|
MIT
|
def get_asset_duration(cls, key: str) -> str:
"""
Get the duration of an asset.
Args:
key (str): Name of the asset.
Returns:
str: Duration of the asset.
"""
if key in cls.local_assets._get():
return cls._get_local_asset_duration(key)
elif key in cls.remote_assets._get():
return cls._get_remote_asset_duration(key)
else:
raise ValueError(f"Asset '{key}' does not exist in the database.")
|
Get the duration of an asset.
Args:
key (str): Name of the asset.
Returns:
str: Duration of the asset.
|
get_asset_duration
|
python
|
RayVentura/ShortGPT
|
shortGPT/config/asset_db.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/config/asset_db.py
|
MIT
|
def _remove_local_asset(cls, name: str):
"""
Remove a local asset from the database.
Args:
name (str): Name of the asset.
"""
asset = cls.local_assets._get(name)
if 'required' not in asset:
try:
Path(asset['path']).unlink()
except FileNotFoundError as e:
print(f"File not found: {e}")
cls.local_assets._delete(name)
|
Remove a local asset from the database.
Args:
name (str): Name of the asset.
|
_remove_local_asset
|
python
|
RayVentura/ShortGPT
|
shortGPT/config/asset_db.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/config/asset_db.py
|
MIT
|
def _add_local_asset_from_path(cls, path: Path):
"""
Add a local asset to the database from a file path.
Args:
path (Path): Path to the asset.
"""
file_ext = path.suffix
if file_ext in AUDIO_EXTENSIONS:
asset_type = AssetType.AUDIO
elif file_ext in IMAGE_EXTENSIONS:
asset_type = AssetType.IMAGE
elif file_ext in VIDEO_EXTENSIONS:
asset_type = AssetType.VIDEO
else:
asset_type = AssetType.OTHER
cls.local_assets._save({
path.stem: {
"path": str(path),
"type": asset_type.value,
"ts": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
})
|
Add a local asset to the database from a file path.
Args:
path (Path): Path to the asset.
|
_add_local_asset_from_path
|
python
|
RayVentura/ShortGPT
|
shortGPT/config/asset_db.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/config/asset_db.py
|
MIT
|
def _update_local_asset_timestamp_and_get_link(cls, key: str) -> str:
"""
Update the timestamp of a local asset and get its link.
Args:
key (str): Name of the asset.
Returns:
str: Link to the asset.
"""
asset = cls.local_assets._get(key)
asset['ts'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
cls.local_assets._save({key: asset})
return asset['path']
|
Update the timestamp of a local asset and get its link.
Args:
key (str): Name of the asset.
Returns:
str: Link to the asset.
|
_update_local_asset_timestamp_and_get_link
|
python
|
RayVentura/ShortGPT
|
shortGPT/config/asset_db.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/config/asset_db.py
|
MIT
|
def _get_remote_asset_link(cls, key: str) -> str:
"""
Get the link to a remote asset.
Args:
key (str): Name of the asset.
Returns:
str: Link to the asset.
"""
asset = cls.remote_assets._get(key)
asset['ts'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
cls.remote_assets._save({key: asset})
if 'youtube' in asset['url']:
return cls._get_youtube_asset_link(key, asset)
return asset['url']
|
Get the link to a remote asset.
Args:
key (str): Name of the asset.
Returns:
str: Link to the asset.
|
_get_remote_asset_link
|
python
|
RayVentura/ShortGPT
|
shortGPT/config/asset_db.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/config/asset_db.py
|
MIT
|
def _get_local_asset_duration(cls, key: str) -> str:
"""
Get the duration of a local asset.
Args:
key (str): Name of the asset.
Returns:
str: Duration of the asset.
"""
asset = cls.local_assets._get(key)
asset['ts'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
cls.local_assets._save({key: asset})
if 'duration' not in asset and asset['duration'] is not None:
_, duration = cls._update_local_asset_duration(key)
return duration
return asset['duration']
|
Get the duration of a local asset.
Args:
key (str): Name of the asset.
Returns:
str: Duration of the asset.
|
_get_local_asset_duration
|
python
|
RayVentura/ShortGPT
|
shortGPT/config/asset_db.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/config/asset_db.py
|
MIT
|
def _get_remote_asset_duration(cls, key: str) -> str:
"""
Get the duration of a remote asset.
Args:
key (str): Name of the asset.
Returns:
str: Duration of the asset.
"""
asset = cls.remote_assets._get(key)
asset['ts'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
cls.remote_assets._save({key: asset})
if 'duration' in asset and asset['duration'] is not None:
return asset['duration']
_, duration = cls._update_youtube_asset_duration(key)
return duration
|
Get the duration of a remote asset.
Args:
key (str): Name of the asset.
Returns:
str: Duration of the asset.
|
_get_remote_asset_duration
|
python
|
RayVentura/ShortGPT
|
shortGPT/config/asset_db.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/config/asset_db.py
|
MIT
|
def _update_local_asset_duration(cls, key: str) -> str:
"""
Update the duration of a local asset.
Args:
key (str): Name of the asset.
Returns:
str: Duration of the asset.
"""
asset = cls.local_assets._get(key)
path = Path(asset['path'])
if any(t in asset['type'] for t in ['audio', 'video', 'music']):
_, duration = get_asset_duration(str(path))
asset['duration'] = duration
else:
duration = None
cls.local_assets._save({key: asset})
return str(path), duration
|
Update the duration of a local asset.
Args:
key (str): Name of the asset.
Returns:
str: Duration of the asset.
|
_update_local_asset_duration
|
python
|
RayVentura/ShortGPT
|
shortGPT/config/asset_db.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/config/asset_db.py
|
MIT
|
def _update_youtube_asset_duration(cls, key: str) -> str:
"""
Update the duration of a Youtube asset.
Args:
key (str): Name of the asset.
Returns:
str: Duration of the asset.
"""
asset = cls.remote_assets._get(key)
youtube_url = asset['url']
remote_url, duration = get_asset_duration(youtube_url, isVideo="video" in asset['type'])
asset.update({
"remote_url": base64.b64encode(remote_url.encode()).decode('utf-8'),
"duration": duration,
})
cls.remote_assets._save({key: asset})
return remote_url, duration
|
Update the duration of a Youtube asset.
Args:
key (str): Name of the asset.
Returns:
str: Duration of the asset.
|
_update_youtube_asset_duration
|
python
|
RayVentura/ShortGPT
|
shortGPT/config/asset_db.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/config/asset_db.py
|
MIT
|
def _get_youtube_asset_link(cls, key: str, asset: dict) -> str:
"""
Get the link to a Youtube asset.
Args:
key (str): Name of the asset.
asset (dict): Asset data.
Returns:
str: Link to the asset.
"""
if any(t in asset['type'] for t in ['audio', 'music']):
local_audio_file, duration = downloadYoutubeAudio(asset['url'], f"public/{key}.wav")
cls.local_assets._save({
key: {
'path': local_audio_file,
'duration': duration,
'type': 'audio',
'ts': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
})
return local_audio_file
if 'remote_url' in asset:
asset['remote_url'] = base64.b64decode(asset['remote_url']).decode('utf-8')
expire_timestamp_match = re.search(r"expire=(\d+)", asset['remote_url'])
not_expired = expire_timestamp_match and int(expire_timestamp_match.group(1)) > time.time() + 1800
if not_expired and asset.get('duration') is not None :
return asset['remote_url']
remote_url, _ = cls._update_youtube_asset_duration(key)
return remote_url
|
Get the link to a Youtube asset.
Args:
key (str): Name of the asset.
asset (dict): Asset data.
Returns:
str: Link to the asset.
|
_get_youtube_asset_link
|
python
|
RayVentura/ShortGPT
|
shortGPT/config/asset_db.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/config/asset_db.py
|
MIT
|
def read_yaml_config(file_path: str) -> dict:
"""Reads and returns the contents of a YAML file as dictionary"""
with open(file_path, 'r') as file:
contents = yaml.safe_load(file)
return contents
|
Reads and returns the contents of a YAML file as dictionary
|
read_yaml_config
|
python
|
RayVentura/ShortGPT
|
shortGPT/config/config.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/config/config.py
|
MIT
|
def load_editing_assets() -> dict:
"""Loads all local assets from the static-assets folder specified in the yaml_config"""
yaml_config = read_yaml_config("public.yaml")
if yaml_config['local-assets'] == None:
yaml_config['local-assets'] = {}
# Create a copy of the dictionary before iterating over it
local_paths = []
if yaml_config['local-assets'] != {}:
local_assets = yaml_config['local-assets'].copy()
# Removing local paths that don't exist
for key in local_assets:
asset = local_assets[key]
if(type(asset) == str):
filePath = local_assets[key]
else:
filePath = local_assets[key]['path']
if not os.path.exists(filePath):
del yaml_config['local-assets'][key]
else:
local_paths.append(filePath)
folder_path = 'public'
for foldername, subfolders, filenames in os.walk(folder_path):
for filename in filenames:
file_path = os.path.join(foldername, filename).replace("\\", "/")
if not file_path in local_paths:
yaml_config['local-assets'][filename] = file_path
write_yaml_config("public.yaml", yaml_config)
return yaml_config
|
Loads all local assets from the static-assets folder specified in the yaml_config
|
load_editing_assets
|
python
|
RayVentura/ShortGPT
|
shortGPT/config/config.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/config/config.py
|
MIT
|
def extract_random_clip_from_video(video_url, video_duration, clip_duration, output_file):
"""Extracts a clip from a video using a signed URL.
Args:
video_url (str): The signed URL of the video.
video_url (int): Duration of the video.
start_time (int): The start time of the clip in seconds.
clip_duration (int): The duration of the clip in seconds.
output_file (str): The output file path for the extracted clip.
"""
if not video_duration:
raise Exception("Could not get video duration")
if not video_duration*0.7 > 120:
raise Exception("Video too short")
start_time = video_duration*0.15 + random.random()* (0.7*video_duration-clip_duration)
command = [
'ffmpeg',
'-loglevel', 'error',
'-ss', str(start_time),
'-t', str(clip_duration),
'-i', video_url,
'-c:v', 'libx264',
'-preset', 'ultrafast',
output_file
]
subprocess.run(command, check=True)
if not os.path.exists(output_file):
raise Exception("Random clip failed to be written")
return output_file
|
Extracts a clip from a video using a signed URL.
Args:
video_url (str): The signed URL of the video.
video_url (int): Duration of the video.
start_time (int): The start time of the clip in seconds.
clip_duration (int): The duration of the clip in seconds.
output_file (str): The output file path for the extracted clip.
|
extract_random_clip_from_video
|
python
|
RayVentura/ShortGPT
|
shortGPT/editing_utils/handle_videos.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/editing_utils/handle_videos.py
|
MIT
|
def _generateScript(self):
"""
Implements Abstract parent method to generate the script for the reddit short
"""
self.logger("Generating reddit question & entertaining story")
self._db_script, _ = self.__getRealisticStory(max_tries=1)
self._db_reddit_question = reddit_gpt.getQuestionFromThread(
self._db_script)
|
Implements Abstract parent method to generate the script for the reddit short
|
_generateScript
|
python
|
RayVentura/ShortGPT
|
shortGPT/engine/reddit_short_engine.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/engine/reddit_short_engine.py
|
MIT
|
def _prepareCustomAssets(self):
"""
Override parent method to generate custom reddit image asset
"""
self.logger("Rendering short: (3/4) preparing custom reddit image...")
self.verifyParameters(question=self._db_reddit_question,)
title, header, n_comments, n_upvotes = reddit_gpt.generateRedditPostMetadata(
self._db_reddit_question)
imageEditingEngine = EditingEngine()
imageEditingEngine.ingestFlow(Flow.WHITE_REDDIT_IMAGE_FLOW, {
"username_text": header,
"ncomments_text": n_comments,
"nupvote_text": n_upvotes,
"question_text": title
})
imageEditingEngine.renderImage(
self.dynamicAssetDir+"redditThreadImage.png")
self._db_reddit_thread_image = self.dynamicAssetDir+"redditThreadImage.png"
|
Override parent method to generate custom reddit image asset
|
_prepareCustomAssets
|
python
|
RayVentura/ShortGPT
|
shortGPT/engine/reddit_short_engine.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/engine/reddit_short_engine.py
|
MIT
|
def _editAndRenderShort(self):
"""
Override parent method to customize video rendering sequence by adding a Reddit image
"""
self.verifyParameters(
voiceover_audio_url=self._db_audio_path,
video_duration=self._db_background_video_duration,
music_url=self._db_background_music_url)
outputPath = self.dynamicAssetDir+"rendered_video.mp4"
if not (os.path.exists(outputPath)):
self.logger("Rendering short: Starting automated editing...")
videoEditor = EditingEngine()
videoEditor.addEditingStep(EditingStep.ADD_VOICEOVER_AUDIO, {
'url': self._db_audio_path})
videoEditor.addEditingStep(EditingStep.ADD_BACKGROUND_MUSIC, {'url': self._db_background_music_url,
'loop_background_music': self._db_voiceover_duration,
"volume_percentage": 0.11})
videoEditor.addEditingStep(EditingStep.CROP_1920x1080, {
'url': self._db_background_trimmed})
videoEditor.addEditingStep(EditingStep.ADD_SUBSCRIBE_ANIMATION, {'url': AssetDatabase.get_asset_link('subscribe animation')})
if self._db_watermark:
videoEditor.addEditingStep(EditingStep.ADD_WATERMARK, {
'text': self._db_watermark})
videoEditor.addEditingStep(EditingStep.ADD_REDDIT_IMAGE, {
'url': self._db_reddit_thread_image})
caption_type = EditingStep.ADD_CAPTION_SHORT_ARABIC if self._db_language == Language.ARABIC.value else EditingStep.ADD_CAPTION_SHORT
for timing, text in self._db_timed_captions:
videoEditor.addEditingStep(caption_type, {'text': text.upper(),
'set_time_start': timing[0],
'set_time_end': timing[1]})
if self._db_num_images:
for timing, image_url in self._db_timed_image_urls:
videoEditor.addEditingStep(EditingStep.SHOW_IMAGE, {'url': image_url,
'set_time_start': timing[0],
'set_time_end': timing[1]})
videoEditor.renderVideo(outputPath, logger= self.logger if self.logger is not self.default_logger else None)
self._db_video_path = outputPath
|
Override parent method to customize video rendering sequence by adding a Reddit image
|
_editAndRenderShort
|
python
|
RayVentura/ShortGPT
|
shortGPT/engine/reddit_short_engine.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/engine/reddit_short_engine.py
|
MIT
|
def getVideoSearchQueriesTimed(captions_timed):
"""
Generate timed video search queries based on caption timings.
Returns list of [time_range, search_queries] pairs.
"""
err = ""
for _ in range(4):
try:
# Get total video duration from last caption
end_time = captions_timed[-1][0][1]
# Load and prepare prompt
chat, system = gpt_utils.load_local_yaml_prompt('prompt_templates/editing_generate_videos.yaml')
prompt = chat.replace("<<TIMED_CAPTIONS>>", f"{captions_timed}")
# Get response and parse JSON
res = gpt_utils.llm_completion(chat_prompt=prompt, system=system)
data = extractJsonFromString(res)
# Convert to expected format
formatted_queries = []
for segment in data["video_segments"]:
time_range = segment["time_range"]
queries = segment["queries"]
# Validate time range
if not (0 <= time_range[0] < time_range[1] <= end_time):
continue
# Ensure exactly 3 queries
while len(queries) < 3:
queries.append(queries[-1])
queries = queries[:3]
formatted_queries.append([time_range, queries])
# Verify coverage
if not formatted_queries:
raise ValueError("Generated segments don't cover full video duration")
return formatted_queries
except Exception as e:
err = str(e)
print(f"Error generating video search queries {err}")
raise Exception(f"Failed to generate video search queries {err}")
|
Generate timed video search queries based on caption timings.
Returns list of [time_range, search_queries] pairs.
|
getVideoSearchQueriesTimed
|
python
|
RayVentura/ShortGPT
|
shortGPT/gpt/gpt_editing.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/gpt/gpt_editing.py
|
MIT
|
def num_tokens_from_messages(texts, model="gpt-4o-mini"):
"""Returns the number of tokens used by a list of messages."""
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
encoding = tiktoken.get_encoding("cl100k_base")
if model == "gpt-4o-mini": # note: future models may deviate from this
if isinstance(texts, str):
texts = [texts]
score = 0
for text in texts:
score += 4 + len(encoding.encode(text))
return score
else:
raise NotImplementedError(f"""num_tokens_from_messages() is not presently implemented for model {model}.
See https://github.com/openai/openai-python/blob/main/chatml.md for information""")
|
Returns the number of tokens used by a list of messages.
|
num_tokens_from_messages
|
python
|
RayVentura/ShortGPT
|
shortGPT/gpt/gpt_utils.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/gpt/gpt_utils.py
|
MIT
|
def display_header():
'''Display the header of the CLI'''
CLI.display_green_text('''
.d88888b dP dP .88888. 888888ba d888888P .88888. 888888ba d888888P
88. "' 88 88 d8' `8b 88 `8b 88 d8' `88 88 `8b 88
`Y88888b. 88aaaaa88 88 88 88aaaa8P' 88 88 88aaaa8P' 88
`8b 88 88 88 88 88 `8b. 88 88 YP88 88 88
d8' .8P 88 88 Y8. .8P 88 88 88 Y8. .88 88 88
Y88888P dP dP `8888P' dP dP dP `88888' dP dP
''')
CLI.display_green_text("Welcome to ShortGPT! This is an experimental AI framework to automate all aspects of content creation.")
print("")
CLI.display_requirements_check()
|
Display the header of the CLI
|
display_header
|
python
|
RayVentura/ShortGPT
|
shortGPT/utils/cli.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/utils/cli.py
|
MIT
|
def display_requirements_check():
'''Display information about the system and requirements'''
print("Checking requirements...")
requirements_manager = Requirements()
print(" - Requirements : List of requirements and installed version:")
all_req_versions = requirements_manager.get_all_requirements_versions()
for req_name, req_version in all_req_versions.items():
if req_version is None:
CLI.display_red_text(f"---> Error : {req_name} is not installed")
print(f"{req_name}=={req_version}")
print("")
# Skipping for now, because it assumes package have the same name as the python import itself, which is not true most sometimes.
# if not requirements_manager.is_all_requirements_installed():
# CLI.display_red_text("Error : Some requirements are missing")
# print("Please install the missing requirements using the following command :")
# print("pip install -r requirements.txt")
# print("")
# requirements_manager.get_all_requirements_not_installed()
# print("")
|
Display information about the system and requirements
|
display_requirements_check
|
python
|
RayVentura/ShortGPT
|
shortGPT/utils/cli.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/utils/cli.py
|
MIT
|
def display_error(error_message, stack_trace):
'''Display an error message in the console'''
print(CLI.bcolors.FAIL + "ERROR : " + error_message + CLI.bcolors.ENDC)
print(stack_trace)
print("If the problem persists, don't hesitate to contact our support. We're here to assist you.")
print("Get Help on Discord : https://discord.gg/qn2WJaRH")
|
Display an error message in the console
|
display_error
|
python
|
RayVentura/ShortGPT
|
shortGPT/utils/cli.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/utils/cli.py
|
MIT
|
def get_list_requirements(self):
'''Get the list of requirements packages from requirements.txt'''
with open(self.requirements_path) as f:
requirements = f.read().splitlines()
# remove comments and empty lines
requirements = [line for line in requirements if not line.startswith('#')]
requirements = [line for line in requirements if line.strip()]
# extract package name from protocol
requirements = [line.split('/')[-1] for line in requirements if not line.startswith('git+')]
requirements = [line.split('/')[-1] for line in requirements if not line.startswith('http')]
requirements = [line.split('/')[-1] for line in requirements if not line.startswith('https')]
requirements = [line.split('/')[-1] for line in requirements if not line.startswith('ssh')]
requirements = [line.split('/')[-1] for line in requirements if not line.startswith('git')]
# sort alphabetically
requirements.sort()
return requirements
|
Get the list of requirements packages from requirements.txt
|
get_list_requirements
|
python
|
RayVentura/ShortGPT
|
shortGPT/utils/requirements.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/utils/requirements.py
|
MIT
|
def is_all_requirements_installed(self):
'''Check if all requirements are installed'''
requirements = self.get_list_requirements()
for requirement in requirements:
if not self.is_requirement_installed(requirement):
return False
return True
|
Check if all requirements are installed
|
is_all_requirements_installed
|
python
|
RayVentura/ShortGPT
|
shortGPT/utils/requirements.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/utils/requirements.py
|
MIT
|
def get_all_requirements_versions(self):
'''Get the versions of all requirements'''
requirements = self.get_list_requirements()
versions = {}
for requirement in requirements:
versions[requirement] = self.get_version(requirement)
return versions
|
Get the versions of all requirements
|
get_all_requirements_versions
|
python
|
RayVentura/ShortGPT
|
shortGPT/utils/requirements.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/utils/requirements.py
|
MIT
|
def get_all_requirements_not_installed(self):
'''Get the list of all requirements not installed'''
requirements = self.get_list_requirements()
not_installed = {}
for requirement in requirements:
# if version is None then the package is not installed
if self.get_version(requirement) is None:
not_installed[requirement] = self.get_version(requirement)
return not_installed
|
Get the list of all requirements not installed
|
get_all_requirements_not_installed
|
python
|
RayVentura/ShortGPT
|
shortGPT/utils/requirements.py
|
https://github.com/RayVentura/ShortGPT/blob/master/shortGPT/utils/requirements.py
|
MIT
|
def validate_user(username, minlen):
"""Checks if the received username matches the required conditions."""
if type(username) != str:
raise TypeError("username must be a string")
if minlen < 1:
raise ValueError("minlen must be at least 1")
# Usernames can't be shorter than minlen
if len(username) < minlen:
return False
# Usernames can only use letters, numbers, dots and underscores
if not re.match('^[a-z0-9._]*$', username):
return False
# Usernames can't begin with a number
if username[0].isnumeric():
return False
return True
|
Checks if the received username matches the required conditions.
|
validate_user
|
python
|
google/it-cert-automation-practice
|
Course3/Lab4/validations.py
|
https://github.com/google/it-cert-automation-practice/blob/master/Course3/Lab4/validations.py
|
Apache-2.0
|
def __getitem__(self, idx):
"""
Output:
- target: dict of multiple items
- boxes: Tensor[num_box, 4]. \
Init type: x0,y0,x1,y1. unnormalized data.
Final type: cx,cy,w,h. normalized data.
"""
try:
img, target = super(CocoDetection, self).__getitem__(idx)
except:
print("Error idx: {}".format(idx))
idx += 1
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = {'image_id': image_id, 'annotations': target}
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
# convert to needed format
if self.aux_target_hacks is not None:
for hack_runner in self.aux_target_hacks:
target, img = hack_runner(target, img=img)
return img, target
|
Output:
- target: dict of multiple items
- boxes: Tensor[num_box, 4]. Init type: x0,y0,x1,y1. unnormalized data.
Final type: cx,cy,w,h. normalized data.
|
__getitem__
|
python
|
IDEA-Research/DINO
|
datasets/coco.py
|
https://github.com/IDEA-Research/DINO/blob/master/datasets/coco.py
|
Apache-2.0
|
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# this is NOT in the pycocotools code, but could be done outside
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
self._paramsEval = copy.deepcopy(self.params)
return p.imgIds, evalImgs
|
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
|
evaluate
|
python
|
IDEA-Research/DINO
|
datasets/coco_eval.py
|
https://github.com/IDEA-Research/DINO/blob/master/datasets/coco_eval.py
|
Apache-2.0
|
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
row = self.tsv.seek(index)
image_data = base64.b64decode(row[-1])
image = Image.open(io.BytesIO(image_data))
image = image.convert('RGB')
target = int(row[1])
if self.transform is not None:
img = self.transform(image)
else:
img = image
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
|
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
|
__getitem__
|
python
|
IDEA-Research/DINO
|
datasets/dataset.py
|
https://github.com/IDEA-Research/DINO/blob/master/datasets/dataset.py
|
Apache-2.0
|
def slcopytree(src, dst, symlinks=False, ignore=None, copy_function=shutil.copyfile,
ignore_dangling_symlinks=False):
"""
modified from shutil.copytree without copystat.
Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
errors = []
if os.path.isdir(src):
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree
# doing the right thing.
os.symlink(linkto, dstname)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
if os.path.isdir(srcname):
slcopytree(srcname, dstname, symlinks, ignore,
copy_function)
else:
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
slcopytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except OSError as why:
errors.append((srcname, dstname, str(why)))
else:
copy_function(src, dst)
if errors:
raise Error(errors)
return dst
|
modified from shutil.copytree without copystat.
Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
|
slcopytree
|
python
|
IDEA-Research/DINO
|
datasets/data_util.py
|
https://github.com/IDEA-Research/DINO/blob/master/datasets/data_util.py
|
Apache-2.0
|
def intersect(boxes1, boxes2):
'''
Find intersection of every box combination between two sets of box
boxes1: bounding boxes 1, a tensor of dimensions (n1, 4)
boxes2: bounding boxes 2, a tensor of dimensions (n2, 4)
Out: Intersection each of boxes1 with respect to each of boxes2,
a tensor of dimensions (n1, n2)
'''
n1 = boxes1.size(0)
n2 = boxes2.size(0)
max_xy = torch.min(boxes1[:, 2:].unsqueeze(1).expand(n1, n2, 2),
boxes2[:, 2:].unsqueeze(0).expand(n1, n2, 2))
min_xy = torch.max(boxes1[:, :2].unsqueeze(1).expand(n1, n2, 2),
boxes2[:, :2].unsqueeze(0).expand(n1, n2, 2))
inter = torch.clamp(max_xy - min_xy , min=0) # (n1, n2, 2)
return inter[:, :, 0] * inter[:, :, 1] #(n1, n2)
|
Find intersection of every box combination between two sets of box
boxes1: bounding boxes 1, a tensor of dimensions (n1, 4)
boxes2: bounding boxes 2, a tensor of dimensions (n2, 4)
Out: Intersection each of boxes1 with respect to each of boxes2,
a tensor of dimensions (n1, n2)
|
intersect
|
python
|
IDEA-Research/DINO
|
datasets/random_crop.py
|
https://github.com/IDEA-Research/DINO/blob/master/datasets/random_crop.py
|
Apache-2.0
|
def random_crop(image, boxes, labels, difficulties=None):
'''
image: A PIL image
boxes: Bounding boxes, a tensor of dimensions (#objects, 4)
labels: labels of object, a tensor of dimensions (#objects)
difficulties: difficulties of detect object, a tensor of dimensions (#objects)
Out: cropped image , new boxes, new labels, new difficulties
'''
if type(image) == PIL.Image.Image:
image = F.to_tensor(image)
original_h = image.size(1)
original_w = image.size(2)
while True:
mode = random.choice([0.1, 0.3, 0.5, 0.9, None])
if mode is None:
return F.to_pil_image(image), boxes, labels, difficulties
new_image = image
new_boxes = boxes
new_difficulties = difficulties
new_labels = labels
for _ in range(50):
# Crop dimensions: [0.3, 1] of original dimensions
new_h = random.uniform(0.3*original_h, original_h)
new_w = random.uniform(0.3*original_w, original_w)
# Aspect ratio constraint b/t .5 & 2
if new_h/new_w < 0.5 or new_h/new_w > 2:
continue
#Crop coordinate
left = random.uniform(0, original_w - new_w)
right = left + new_w
top = random.uniform(0, original_h - new_h)
bottom = top + new_h
crop = torch.FloatTensor([int(left), int(top), int(right), int(bottom)])
# Calculate IoU between the crop and the bounding boxes
overlap = find_IoU(crop.unsqueeze(0), boxes) #(1, #objects)
overlap = overlap.squeeze(0)
# If not a single bounding box has a IoU of greater than the minimum, try again
if overlap.shape[0] == 0:
continue
if overlap.max().item() < mode:
continue
#Crop
new_image = image[:, int(top):int(bottom), int(left):int(right)] #(3, new_h, new_w)
#Center of bounding boxes
center_bb = (boxes[:, :2] + boxes[:, 2:])/2.0
#Find bounding box has been had center in crop
center_in_crop = (center_bb[:, 0] >left) * (center_bb[:, 0] < right
) *(center_bb[:, 1] > top) * (center_bb[:, 1] < bottom) #( #objects)
if not center_in_crop.any():
continue
#take matching bounding box
new_boxes = boxes[center_in_crop, :]
#take matching labels
new_labels = labels[center_in_crop]
#take matching difficulities
if difficulties is not None:
new_difficulties = difficulties[center_in_crop]
else:
new_difficulties = None
#Use the box left and top corner or the crop's
new_boxes[:, :2] = torch.max(new_boxes[:, :2], crop[:2])
#adjust to crop
new_boxes[:, :2] -= crop[:2]
new_boxes[:, 2:] = torch.min(new_boxes[:, 2:],crop[2:])
#adjust to crop
new_boxes[:, 2:] -= crop[:2]
return F.to_pil_image(new_image), new_boxes, new_labels, new_difficulties
|
image: A PIL image
boxes: Bounding boxes, a tensor of dimensions (#objects, 4)
labels: labels of object, a tensor of dimensions (#objects)
difficulties: difficulties of detect object, a tensor of dimensions (#objects)
Out: cropped image , new boxes, new labels, new difficulties
|
random_crop
|
python
|
IDEA-Research/DINO
|
datasets/random_crop.py
|
https://github.com/IDEA-Research/DINO/blob/master/datasets/random_crop.py
|
Apache-2.0
|
def __call__(self, img, target):
"""
img (PIL Image or Tensor): Image to be adjusted.
"""
_contrast_factor = ((random.random() + 1.0) / 2.0) * self.contrast_factor
img = F.adjust_contrast(img, _contrast_factor)
return img, target
|
img (PIL Image or Tensor): Image to be adjusted.
|
__call__
|
python
|
IDEA-Research/DINO
|
datasets/sltransform.py
|
https://github.com/IDEA-Research/DINO/blob/master/datasets/sltransform.py
|
Apache-2.0
|
def __call__(self, img, target):
"""
img (PIL Image or Tensor): Image to be adjusted.
"""
_brightness_factor = ((random.random() + 1.0) / 2.0) * self.brightness_factor
img = F.adjust_brightness(img, _brightness_factor)
return img, target
|
img (PIL Image or Tensor): Image to be adjusted.
|
__call__
|
python
|
IDEA-Research/DINO
|
datasets/sltransform.py
|
https://github.com/IDEA-Research/DINO/blob/master/datasets/sltransform.py
|
Apache-2.0
|
def lighting_noise(image):
'''
color channel swap in image
image: A PIL image
'''
new_image = image
perms = ((0, 1, 2), (0, 2, 1), (1, 0, 2),
(1, 2, 0), (2, 0, 1), (2, 1, 0))
swap = perms[random.randint(0, len(perms)- 1)]
new_image = F.to_tensor(new_image)
new_image = new_image[swap, :, :]
new_image = F.to_pil_image(new_image)
return new_image
|
color channel swap in image
image: A PIL image
|
lighting_noise
|
python
|
IDEA-Research/DINO
|
datasets/sltransform.py
|
https://github.com/IDEA-Research/DINO/blob/master/datasets/sltransform.py
|
Apache-2.0
|
def rotate(image, boxes, angle):
'''
Rotate image and bounding box
image: A Pil image (w, h)
boxes: A tensors of dimensions (#objects, 4)
Out: rotated image (w, h), rotated boxes
'''
new_image = image.copy()
new_boxes = boxes.clone()
#Rotate image, expand = True
w = image.width
h = image.height
cx = w/2
cy = h/2
new_image = new_image.rotate(angle, expand=True)
angle = np.radians(angle)
alpha = np.cos(angle)
beta = np.sin(angle)
#Get affine matrix
AffineMatrix = torch.tensor([[alpha, beta, (1-alpha)*cx - beta*cy],
[-beta, alpha, beta*cx + (1-alpha)*cy]])
#Rotation boxes
box_width = (boxes[:,2] - boxes[:,0]).reshape(-1,1)
box_height = (boxes[:,3] - boxes[:,1]).reshape(-1,1)
#Get corners for boxes
x1 = boxes[:,0].reshape(-1,1)
y1 = boxes[:,1].reshape(-1,1)
x2 = x1 + box_width
y2 = y1
x3 = x1
y3 = y1 + box_height
x4 = boxes[:,2].reshape(-1,1)
y4 = boxes[:,3].reshape(-1,1)
corners = torch.stack((x1,y1,x2,y2,x3,y3,x4,y4), dim= 1)
# corners.reshape(-1, 8) #Tensors of dimensions (#objects, 8)
corners = corners.reshape(-1,2) #Tensors of dimension (4* #objects, 2)
corners = torch.cat((corners, torch.ones(corners.shape[0], 1)), dim= 1) #(Tensors of dimension (4* #objects, 3))
cos = np.abs(AffineMatrix[0, 0])
sin = np.abs(AffineMatrix[0, 1])
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
AffineMatrix[0, 2] += (nW / 2) - cx
AffineMatrix[1, 2] += (nH / 2) - cy
#Apply affine transform
rotate_corners = torch.mm(AffineMatrix, corners.t().to(torch.float64)).t()
rotate_corners = rotate_corners.reshape(-1,8)
x_corners = rotate_corners[:,[0,2,4,6]]
y_corners = rotate_corners[:,[1,3,5,7]]
#Get (x_min, y_min, x_max, y_max)
x_min, _ = torch.min(x_corners, dim= 1)
x_min = x_min.reshape(-1, 1)
y_min, _ = torch.min(y_corners, dim= 1)
y_min = y_min.reshape(-1, 1)
x_max, _ = torch.max(x_corners, dim= 1)
x_max = x_max.reshape(-1, 1)
y_max, _ = torch.max(y_corners, dim= 1)
y_max = y_max.reshape(-1, 1)
new_boxes = torch.cat((x_min, y_min, x_max, y_max), dim= 1)
scale_x = new_image.width / w
scale_y = new_image.height / h
#Resize new image to (w, h)
new_image = new_image.resize((w, h))
#Resize boxes
new_boxes /= torch.Tensor([scale_x, scale_y, scale_x, scale_y])
new_boxes[:, 0] = torch.clamp(new_boxes[:, 0], 0, w)
new_boxes[:, 1] = torch.clamp(new_boxes[:, 1], 0, h)
new_boxes[:, 2] = torch.clamp(new_boxes[:, 2], 0, w)
new_boxes[:, 3] = torch.clamp(new_boxes[:, 3], 0, h)
return new_image, new_boxes
|
Rotate image and bounding box
image: A Pil image (w, h)
boxes: A tensors of dimensions (#objects, 4)
Out: rotated image (w, h), rotated boxes
|
rotate
|
python
|
IDEA-Research/DINO
|
datasets/sltransform.py
|
https://github.com/IDEA-Research/DINO/blob/master/datasets/sltransform.py
|
Apache-2.0
|
def __call__(self, img, target, p=1.0):
"""
Input:
target['boxes']: xyxy, unnormalized data.
"""
boxes_raw = target['boxes']
labels_raw = target['labels']
img_np = np.array(img)
if self.transform and random.random() < p:
new_res = self.transform(image=img_np, bboxes=boxes_raw, class_labels=labels_raw) # transformed
boxes_new = torch.Tensor(new_res['bboxes']).to(boxes_raw.dtype).reshape_as(boxes_raw)
img_np = new_res['image']
labels_new = torch.Tensor(new_res['class_labels']).to(labels_raw.dtype)
img_new = Image.fromarray(img_np)
target['boxes'] = boxes_new
target['labels'] = labels_new
return img_new, target
|
Input:
target['boxes']: xyxy, unnormalized data.
|
__call__
|
python
|
IDEA-Research/DINO
|
datasets/sltransform.py
|
https://github.com/IDEA-Research/DINO/blob/master/datasets/sltransform.py
|
Apache-2.0
|
def register(self, module_build_function, module_name=None, force=False):
"""Register a module build function.
Args:
module (:obj:`nn.Module`): Module to be registered.
"""
if not inspect.isfunction(module_build_function):
raise TypeError('module_build_function must be a function, but got {}'.format(
type(module_build_function)))
if module_name is None:
module_name = module_build_function.__name__
if not force and module_name in self._module_dict:
raise KeyError('{} is already registered in {}'.format(
module_name, self.name))
self._module_dict[module_name] = module_build_function
return module_build_function
|
Register a module build function.
Args:
module (:obj:`nn.Module`): Module to be registered.
|
register
|
python
|
IDEA-Research/DINO
|
models/registry.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/registry.py
|
Apache-2.0
|
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*\text{num_heads}, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, out_dim=self.vdim)
else:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, out_dim=self.vdim)
|
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*\text{num_heads}, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/attention.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/attention.py
|
Apache-2.0
|
def multi_head_attention_forward(query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
out_dim: Optional[Tensor] = None
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not torch.jit.is_scripting():
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,
out_proj_weight, out_proj_bias)
if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward, tens_ops, query, key, value,
embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,
out_proj_bias, training=training, key_padding_mask=key_padding_mask,
need_weights=need_weights, attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
v_head_dim = out_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
q = query * scaling
k = key
v = value
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, v_head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == v_head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
# attn_output_weights = softmax(
# attn_output_weights, dim=-1)
attn_output_weights = softmax(
attn_output_weights - attn_output_weights.max(dim=-1, keepdim=True)[0], dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, v_head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, out_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
|
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
|
multi_head_attention_forward
|
python
|
IDEA-Research/DINO
|
models/dino/attention.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/attention.py
|
Apache-2.0
|
def build_backbone(args):
"""
Useful args:
- backbone: backbone name
- lr_backbone:
- dilation
- return_interm_indices: available: [0,1,2,3], [1,2,3], [3]
- backbone_freeze_keywords:
- use_checkpoint: for swin only for now
"""
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
if not train_backbone:
raise ValueError("Please set lr_backbone > 0")
return_interm_indices = args.return_interm_indices
assert return_interm_indices in [[0,1,2,3], [1,2,3], [3]]
backbone_freeze_keywords = args.backbone_freeze_keywords
use_checkpoint = getattr(args, 'use_checkpoint', False)
if args.backbone in ['resnet50', 'resnet101']:
backbone = Backbone(args.backbone, train_backbone, args.dilation,
return_interm_indices,
batch_norm=FrozenBatchNorm2d)
bb_num_channels = backbone.num_channels
elif args.backbone in ['swin_T_224_1k', 'swin_B_224_22k', 'swin_B_384_22k', 'swin_L_224_22k', 'swin_L_384_22k']:
pretrain_img_size = int(args.backbone.split('_')[-2])
backbone = build_swin_transformer(args.backbone, \
pretrain_img_size=pretrain_img_size, \
out_indices=tuple(return_interm_indices), \
dilation=args.dilation, use_checkpoint=use_checkpoint)
# freeze some layers
if backbone_freeze_keywords is not None:
for name, parameter in backbone.named_parameters():
for keyword in backbone_freeze_keywords:
if keyword in name:
parameter.requires_grad_(False)
break
if "backbone_dir" in args:
pretrained_dir = args.backbone_dir
PTDICT = {
'swin_T_224_1k': 'swin_tiny_patch4_window7_224.pth',
'swin_B_384_22k': 'swin_base_patch4_window12_384.pth',
'swin_L_384_22k': 'swin_large_patch4_window12_384_22k.pth',
}
pretrainedpath = os.path.join(pretrained_dir, PTDICT[args.backbone])
checkpoint = torch.load(pretrainedpath, map_location='cpu')['model']
from collections import OrderedDict
def key_select_function(keyname):
if 'head' in keyname:
return False
if args.dilation and 'layers.3' in keyname:
return False
return True
_tmp_st = OrderedDict({k:v for k, v in clean_state_dict(checkpoint).items() if key_select_function(k)})
_tmp_st_output = backbone.load_state_dict(_tmp_st, strict=False)
print(str(_tmp_st_output))
bb_num_channels = backbone.num_features[4 - len(return_interm_indices):]
elif args.backbone in ['convnext_xlarge_22k']:
backbone = build_convnext(modelname=args.backbone, pretrained=True, out_indices=tuple(return_interm_indices),backbone_dir=args.backbone_dir)
bb_num_channels = backbone.dims[4 - len(return_interm_indices):]
else:
raise NotImplementedError("Unknown backbone {}".format(args.backbone))
assert len(bb_num_channels) == len(return_interm_indices), f"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}"
model = Joiner(backbone, position_embedding)
model.num_channels = bb_num_channels
assert isinstance(bb_num_channels, List), "bb_num_channels is expected to be a List but {}".format(type(bb_num_channels))
return model
|
Useful args:
- backbone: backbone name
- lr_backbone:
- dilation
- return_interm_indices: available: [0,1,2,3], [1,2,3], [3]
- backbone_freeze_keywords:
- use_checkpoint: for swin only for now
|
build_backbone
|
python
|
IDEA-Research/DINO
|
models/dino/backbone.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/backbone.py
|
Apache-2.0
|
def forward(self, srcs, masks, refpoint_embed, pos_embeds, tgt, attn_mask=None):
"""
Input:
- srcs: List of multi features [bs, ci, hi, wi]
- masks: List of multi masks [bs, hi, wi]
- refpoint_embed: [bs, num_dn, 4]. None in infer
- pos_embeds: List of multi pos embeds [bs, ci, hi, wi]
- tgt: [bs, num_dn, d_model]. None in infer
"""
# prepare input for encoder
src_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
bs, c, h, w = src.shape
spatial_shape = (h, w)
spatial_shapes.append(spatial_shape)
src = src.flatten(2).transpose(1, 2) # bs, hw, c
mask = mask.flatten(1) # bs, hw
pos_embed = pos_embed.flatten(2).transpose(1, 2) # bs, hw, c
if self.num_feature_levels > 1 and self.level_embed is not None:
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
else:
lvl_pos_embed = pos_embed
lvl_pos_embed_flatten.append(lvl_pos_embed)
src_flatten.append(src)
mask_flatten.append(mask)
src_flatten = torch.cat(src_flatten, 1) # bs, \sum{hxw}, c
mask_flatten = torch.cat(mask_flatten, 1) # bs, \sum{hxw}
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) # bs, \sum{hxw}, c
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
# two stage
enc_topk_proposals = enc_refpoint_embed = None
#########################################################
# Begin Encoder
#########################################################
memory, enc_intermediate_output, enc_intermediate_refpoints = self.encoder(
src_flatten,
pos=lvl_pos_embed_flatten,
level_start_index=level_start_index,
spatial_shapes=spatial_shapes,
valid_ratios=valid_ratios,
key_padding_mask=mask_flatten,
ref_token_index=enc_topk_proposals, # bs, nq
ref_token_coord=enc_refpoint_embed, # bs, nq, 4
)
#########################################################
# End Encoder
# - memory: bs, \sum{hw}, c
# - mask_flatten: bs, \sum{hw}
# - lvl_pos_embed_flatten: bs, \sum{hw}, c
# - enc_intermediate_output: None or (nenc+1, bs, nq, c) or (nenc, bs, nq, c)
# - enc_intermediate_refpoints: None or (nenc+1, bs, nq, c) or (nenc, bs, nq, c)
#########################################################
if self.two_stage_type =='standard':
if self.two_stage_learn_wh:
input_hw = self.two_stage_wh_embedding.weight[0]
else:
input_hw = None
output_memory, output_proposals = gen_encoder_output_proposals(memory, mask_flatten, spatial_shapes, input_hw)
output_memory = self.enc_output_norm(self.enc_output(output_memory))
if self.two_stage_pat_embed > 0:
bs, nhw, _ = output_memory.shape
# output_memory: bs, n, 256; self.pat_embed_for_2stage: k, 256
output_memory = output_memory.repeat(1, self.two_stage_pat_embed, 1)
_pats = self.pat_embed_for_2stage.repeat_interleave(nhw, 0)
output_memory = output_memory + _pats
output_proposals = output_proposals.repeat(1, self.two_stage_pat_embed, 1)
if self.two_stage_add_query_num > 0:
assert refpoint_embed is not None
output_memory = torch.cat((output_memory, tgt), dim=1)
output_proposals = torch.cat((output_proposals, refpoint_embed), dim=1)
enc_outputs_class_unselected = self.enc_out_class_embed(output_memory)
enc_outputs_coord_unselected = self.enc_out_bbox_embed(output_memory) + output_proposals # (bs, \sum{hw}, 4) unsigmoid
topk = self.num_queries
topk_proposals = torch.topk(enc_outputs_class_unselected.max(-1)[0], topk, dim=1)[1] # bs, nq
# gather boxes
refpoint_embed_undetach = torch.gather(enc_outputs_coord_unselected, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) # unsigmoid
refpoint_embed_ = refpoint_embed_undetach.detach()
init_box_proposal = torch.gather(output_proposals, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)).sigmoid() # sigmoid
# gather tgt
tgt_undetach = torch.gather(output_memory, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model))
if self.embed_init_tgt:
tgt_ = self.tgt_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) # nq, bs, d_model
else:
tgt_ = tgt_undetach.detach()
if refpoint_embed is not None:
refpoint_embed=torch.cat([refpoint_embed,refpoint_embed_],dim=1)
tgt=torch.cat([tgt,tgt_],dim=1)
else:
refpoint_embed,tgt=refpoint_embed_,tgt_
elif self.two_stage_type == 'no':
tgt_ = self.tgt_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) # nq, bs, d_model
refpoint_embed_ = self.refpoint_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) # nq, bs, 4
if refpoint_embed is not None:
refpoint_embed=torch.cat([refpoint_embed,refpoint_embed_],dim=1)
tgt=torch.cat([tgt,tgt_],dim=1)
else:
refpoint_embed,tgt=refpoint_embed_,tgt_
if self.num_patterns > 0:
tgt_embed = tgt.repeat(1, self.num_patterns, 1)
refpoint_embed = refpoint_embed.repeat(1, self.num_patterns, 1)
tgt_pat = self.patterns.weight[None, :, :].repeat_interleave(self.num_queries, 1) # 1, n_q*n_pat, d_model
tgt = tgt_embed + tgt_pat
init_box_proposal = refpoint_embed_.sigmoid()
else:
raise NotImplementedError("unknown two_stage_type {}".format(self.two_stage_type))
#########################################################
# End preparing tgt
# - tgt: bs, NQ, d_model
# - refpoint_embed(unsigmoid): bs, NQ, d_model
#########################################################
#########################################################
# Begin Decoder
#########################################################
hs, references = self.decoder(
tgt=tgt.transpose(0, 1),
memory=memory.transpose(0, 1),
memory_key_padding_mask=mask_flatten,
pos=lvl_pos_embed_flatten.transpose(0, 1),
refpoints_unsigmoid=refpoint_embed.transpose(0, 1),
level_start_index=level_start_index,
spatial_shapes=spatial_shapes,
valid_ratios=valid_ratios,tgt_mask=attn_mask)
#########################################################
# End Decoder
# hs: n_dec, bs, nq, d_model
# references: n_dec+1, bs, nq, query_dim
#########################################################
#########################################################
# Begin postprocess
#########################################################
if self.two_stage_type == 'standard':
if self.two_stage_keep_all_tokens:
hs_enc = output_memory.unsqueeze(0)
ref_enc = enc_outputs_coord_unselected.unsqueeze(0)
init_box_proposal = output_proposals
else:
hs_enc = tgt_undetach.unsqueeze(0)
ref_enc = refpoint_embed_undetach.sigmoid().unsqueeze(0)
else:
hs_enc = ref_enc = None
#########################################################
# End postprocess
# hs_enc: (n_enc+1, bs, nq, d_model) or (1, bs, nq, d_model) or (n_enc, bs, nq, d_model) or None
# ref_enc: (n_enc+1, bs, nq, query_dim) or (1, bs, nq, query_dim) or (n_enc, bs, nq, d_model) or None
#########################################################
return hs, references, hs_enc, ref_enc, init_box_proposal
# hs: (n_dec, bs, nq, d_model)
# references: sigmoid coordinates. (n_dec+1, bs, bq, 4)
# hs_enc: (n_enc+1, bs, nq, d_model) or (1, bs, nq, d_model) or None
# ref_enc: sigmoid coordinates. \
# (n_enc+1, bs, nq, query_dim) or (1, bs, nq, query_dim) or None
|
Input:
- srcs: List of multi features [bs, ci, hi, wi]
- masks: List of multi masks [bs, hi, wi]
- refpoint_embed: [bs, num_dn, 4]. None in infer
- pos_embeds: List of multi pos embeds [bs, ci, hi, wi]
- tgt: [bs, num_dn, d_model]. None in infer
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/deformable_transformer.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/deformable_transformer.py
|
Apache-2.0
|
def forward(self,
src: Tensor,
pos: Tensor,
spatial_shapes: Tensor,
level_start_index: Tensor,
valid_ratios: Tensor,
key_padding_mask: Tensor,
ref_token_index: Optional[Tensor]=None,
ref_token_coord: Optional[Tensor]=None
):
"""
Input:
- src: [bs, sum(hi*wi), 256]
- pos: pos embed for src. [bs, sum(hi*wi), 256]
- spatial_shapes: h,w of each level [num_level, 2]
- level_start_index: [num_level] start point of level in sum(hi*wi).
- valid_ratios: [bs, num_level, 2]
- key_padding_mask: [bs, sum(hi*wi)]
- ref_token_index: bs, nq
- ref_token_coord: bs, nq, 4
Intermedia:
- reference_points: [bs, sum(hi*wi), num_level, 2]
Outpus:
- output: [bs, sum(hi*wi), 256]
"""
if self.two_stage_type in ['no', 'standard', 'enceachlayer', 'enclayer1']:
assert ref_token_index is None
output = src
# preparation and reshape
if self.num_layers > 0:
if self.deformable_encoder:
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device)
intermediate_output = []
intermediate_ref = []
if ref_token_index is not None:
out_i = torch.gather(output, 1, ref_token_index.unsqueeze(-1).repeat(1, 1, self.d_model))
intermediate_output.append(out_i)
intermediate_ref.append(ref_token_coord)
# main process
for layer_id, layer in enumerate(self.layers):
# main process
dropflag = False
if self.enc_layer_dropout_prob is not None:
prob = random.random()
if prob < self.enc_layer_dropout_prob[layer_id]:
dropflag = True
if not dropflag:
if self.deformable_encoder:
output = layer(src=output, pos=pos, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, key_padding_mask=key_padding_mask)
else:
output = layer(src=output.transpose(0, 1), pos=pos.transpose(0, 1), key_padding_mask=key_padding_mask).transpose(0, 1)
if ((layer_id == 0 and self.two_stage_type in ['enceachlayer', 'enclayer1']) \
or (self.two_stage_type == 'enceachlayer')) \
and (layer_id != self.num_layers - 1):
output_memory, output_proposals = gen_encoder_output_proposals(output, key_padding_mask, spatial_shapes)
output_memory = self.enc_norm[layer_id](self.enc_proj[layer_id](output_memory))
# gather boxes
topk = self.num_queries
enc_outputs_class = self.class_embed[layer_id](output_memory)
ref_token_index = torch.topk(enc_outputs_class.max(-1)[0], topk, dim=1)[1] # bs, nq
ref_token_coord = torch.gather(output_proposals, 1, ref_token_index.unsqueeze(-1).repeat(1, 1, 4))
output = output_memory
# aux loss
if (layer_id != self.num_layers - 1) and ref_token_index is not None:
out_i = torch.gather(output, 1, ref_token_index.unsqueeze(-1).repeat(1, 1, self.d_model))
intermediate_output.append(out_i)
intermediate_ref.append(ref_token_coord)
if self.norm is not None:
output = self.norm(output)
if ref_token_index is not None:
intermediate_output = torch.stack(intermediate_output) # n_enc/n_enc-1, bs, \sum{hw}, d_model
intermediate_ref = torch.stack(intermediate_ref)
else:
intermediate_output = intermediate_ref = None
return output, intermediate_output, intermediate_ref
|
Input:
- src: [bs, sum(hi*wi), 256]
- pos: pos embed for src. [bs, sum(hi*wi), 256]
- spatial_shapes: h,w of each level [num_level, 2]
- level_start_index: [num_level] start point of level in sum(hi*wi).
- valid_ratios: [bs, num_level, 2]
- key_padding_mask: [bs, sum(hi*wi)]
- ref_token_index: bs, nq
- ref_token_coord: bs, nq, 4
Intermedia:
- reference_points: [bs, sum(hi*wi), num_level, 2]
Outpus:
- output: [bs, sum(hi*wi), 256]
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/deformable_transformer.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/deformable_transformer.py
|
Apache-2.0
|
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
refpoints_unsigmoid: Optional[Tensor] = None, # num_queries, bs, 2
# for memory
level_start_index: Optional[Tensor] = None, # num_levels
spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2
valid_ratios: Optional[Tensor] = None,
):
"""
Input:
- tgt: nq, bs, d_model
- memory: hw, bs, d_model
- pos: hw, bs, d_model
- refpoints_unsigmoid: nq, bs, 2/4
- valid_ratios/spatial_shapes: bs, nlevel, 2
"""
output = tgt
intermediate = []
reference_points = refpoints_unsigmoid.sigmoid()
ref_points = [reference_points]
for layer_id, layer in enumerate(self.layers):
# preprocess ref points
if self.training and self.decoder_query_perturber is not None and layer_id != 0:
reference_points = self.decoder_query_perturber(reference_points)
if self.deformable_decoder:
if reference_points.shape[-1] == 4:
reference_points_input = reference_points[:, :, None] \
* torch.cat([valid_ratios, valid_ratios], -1)[None, :] # nq, bs, nlevel, 4
else:
assert reference_points.shape[-1] == 2
reference_points_input = reference_points[:, :, None] * valid_ratios[None, :]
query_sine_embed = gen_sineembed_for_position(reference_points_input[:, :, 0, :]) # nq, bs, 256*2
else:
query_sine_embed = gen_sineembed_for_position(reference_points) # nq, bs, 256*2
reference_points_input = None
# conditional query
raw_query_pos = self.ref_point_head(query_sine_embed) # nq, bs, 256
pos_scale = self.query_scale(output) if self.query_scale is not None else 1
query_pos = pos_scale * raw_query_pos
if not self.deformable_decoder:
query_sine_embed = query_sine_embed[..., :self.d_model] * self.query_pos_sine_scale(output)
# modulated HW attentions
if not self.deformable_decoder and self.modulate_hw_attn:
refHW_cond = self.ref_anchor_head(output).sigmoid() # nq, bs, 2
query_sine_embed[..., self.d_model // 2:] *= (refHW_cond[..., 0] / reference_points[..., 2]).unsqueeze(-1)
query_sine_embed[..., :self.d_model // 2] *= (refHW_cond[..., 1] / reference_points[..., 3]).unsqueeze(-1)
# random drop some layers if needed
dropflag = False
if self.dec_layer_dropout_prob is not None:
prob = random.random()
if prob < self.dec_layer_dropout_prob[layer_id]:
dropflag = True
if not dropflag:
output = layer(
tgt = output,
tgt_query_pos = query_pos,
tgt_query_sine_embed = query_sine_embed,
tgt_key_padding_mask = tgt_key_padding_mask,
tgt_reference_points = reference_points_input,
memory = memory,
memory_key_padding_mask = memory_key_padding_mask,
memory_level_start_index = level_start_index,
memory_spatial_shapes = spatial_shapes,
memory_pos = pos,
self_attn_mask = tgt_mask,
cross_attn_mask = memory_mask
)
# iter update
if self.bbox_embed is not None:
reference_before_sigmoid = inverse_sigmoid(reference_points)
delta_unsig = self.bbox_embed[layer_id](output)
outputs_unsig = delta_unsig + reference_before_sigmoid
new_reference_points = outputs_unsig.sigmoid()
# select # ref points
if self.dec_layer_number is not None and layer_id != self.num_layers - 1:
nq_now = new_reference_points.shape[0]
select_number = self.dec_layer_number[layer_id + 1]
if nq_now != select_number:
class_unselected = self.class_embed[layer_id](output) # nq, bs, 91
topk_proposals = torch.topk(class_unselected.max(-1)[0], select_number, dim=0)[1] # new_nq, bs
new_reference_points = torch.gather(new_reference_points, 0, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) # unsigmoid
if self.rm_detach and 'dec' in self.rm_detach:
reference_points = new_reference_points
else:
reference_points = new_reference_points.detach()
if self.use_detached_boxes_dec_out:
ref_points.append(reference_points)
else:
ref_points.append(new_reference_points)
intermediate.append(self.norm(output))
if self.dec_layer_number is not None and layer_id != self.num_layers - 1:
if nq_now != select_number:
output = torch.gather(output, 0, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model)) # unsigmoid
return [
[itm_out.transpose(0, 1) for itm_out in intermediate],
[itm_refpoint.transpose(0, 1) for itm_refpoint in ref_points]
]
|
Input:
- tgt: nq, bs, d_model
- memory: hw, bs, d_model
- pos: hw, bs, d_model
- refpoints_unsigmoid: nq, bs, 2/4
- valid_ratios/spatial_shapes: bs, nlevel, 2
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/deformable_transformer.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/deformable_transformer.py
|
Apache-2.0
|
def __init__(self, backbone, transformer, num_classes, num_queries,
aux_loss=False, iter_update=False,
query_dim=2,
random_refpoints_xy=False,
fix_refpoints_hw=-1,
num_feature_levels=1,
nheads=8,
# two stage
two_stage_type='no', # ['no', 'standard']
two_stage_add_query_num=0,
dec_pred_class_embed_share=True,
dec_pred_bbox_embed_share=True,
two_stage_class_embed_share=True,
two_stage_bbox_embed_share=True,
decoder_sa_type = 'sa',
num_patterns = 0,
dn_number = 100,
dn_box_noise_scale = 0.4,
dn_label_noise_ratio = 0.5,
dn_labelbook_size = 100,
):
""" Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
Conditional DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
fix_refpoints_hw: -1(default): learn w and h for each box seperately
>0 : given fixed number
-2 : learn a shared w and h
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
self.num_classes = num_classes
self.hidden_dim = hidden_dim = transformer.d_model
self.num_feature_levels = num_feature_levels
self.nheads = nheads
self.label_enc = nn.Embedding(dn_labelbook_size + 1, hidden_dim)
# setting query dim
self.query_dim = query_dim
assert query_dim == 4
self.random_refpoints_xy = random_refpoints_xy
self.fix_refpoints_hw = fix_refpoints_hw
# for dn training
self.num_patterns = num_patterns
self.dn_number = dn_number
self.dn_box_noise_scale = dn_box_noise_scale
self.dn_label_noise_ratio = dn_label_noise_ratio
self.dn_labelbook_size = dn_labelbook_size
# prepare input projection layers
if num_feature_levels > 1:
num_backbone_outs = len(backbone.num_channels)
input_proj_list = []
for _ in range(num_backbone_outs):
in_channels = backbone.num_channels[_]
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
))
for _ in range(num_feature_levels - num_backbone_outs):
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(32, hidden_dim),
))
in_channels = hidden_dim
self.input_proj = nn.ModuleList(input_proj_list)
else:
assert two_stage_type == 'no', "two_stage_type should be no if num_feature_levels=1 !!!"
self.input_proj = nn.ModuleList([
nn.Sequential(
nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
)])
self.backbone = backbone
self.aux_loss = aux_loss
self.box_pred_damping = box_pred_damping = None
self.iter_update = iter_update
assert iter_update, "Why not iter_update?"
# prepare pred layers
self.dec_pred_class_embed_share = dec_pred_class_embed_share
self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share
# prepare class & box embed
_class_embed = nn.Linear(hidden_dim, num_classes)
_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
# init the two embed layers
prior_prob = 0.01
bias_value = -math.log((1 - prior_prob) / prior_prob)
_class_embed.bias.data = torch.ones(self.num_classes) * bias_value
nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0)
nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0)
if dec_pred_bbox_embed_share:
box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)]
else:
box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)]
if dec_pred_class_embed_share:
class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)]
else:
class_embed_layerlist = [copy.deepcopy(_class_embed) for i in range(transformer.num_decoder_layers)]
self.bbox_embed = nn.ModuleList(box_embed_layerlist)
self.class_embed = nn.ModuleList(class_embed_layerlist)
self.transformer.decoder.bbox_embed = self.bbox_embed
self.transformer.decoder.class_embed = self.class_embed
# two stage
self.two_stage_type = two_stage_type
self.two_stage_add_query_num = two_stage_add_query_num
assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type)
if two_stage_type != 'no':
if two_stage_bbox_embed_share:
assert dec_pred_class_embed_share and dec_pred_bbox_embed_share
self.transformer.enc_out_bbox_embed = _bbox_embed
else:
self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed)
if two_stage_class_embed_share:
assert dec_pred_class_embed_share and dec_pred_bbox_embed_share
self.transformer.enc_out_class_embed = _class_embed
else:
self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed)
self.refpoint_embed = None
if self.two_stage_add_query_num > 0:
self.init_ref_points(two_stage_add_query_num)
self.decoder_sa_type = decoder_sa_type
assert decoder_sa_type in ['sa', 'ca_label', 'ca_content']
if decoder_sa_type == 'ca_label':
self.label_embedding = nn.Embedding(num_classes, hidden_dim)
for layer in self.transformer.decoder.layers:
layer.label_embedding = self.label_embedding
else:
for layer in self.transformer.decoder.layers:
layer.label_embedding = None
self.label_embedding = None
self._reset_parameters()
|
Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
Conditional DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
fix_refpoints_hw: -1(default): learn w and h for each box seperately
>0 : given fixed number
-2 : learn a shared w and h
|
__init__
|
python
|
IDEA-Research/DINO
|
models/dino/dino.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/dino.py
|
Apache-2.0
|
def forward(self, samples: NestedTensor, targets:List=None):
""" The forward expects a NestedTensor, which consists of:
- samples.tensor: batched images, of shape [batch_size x 3 x H x W]
- samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x num_classes]
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
(center_x, center_y, width, height). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, poss = self.backbone(samples)
srcs = []
masks = []
for l, feat in enumerate(features):
src, mask = feat.decompose()
srcs.append(self.input_proj[l](src))
masks.append(mask)
assert mask is not None
if self.num_feature_levels > len(srcs):
_len_srcs = len(srcs)
for l in range(_len_srcs, self.num_feature_levels):
if l == _len_srcs:
src = self.input_proj[l](features[-1].tensors)
else:
src = self.input_proj[l](srcs[-1])
m = samples.mask
mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0]
pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype)
srcs.append(src)
masks.append(mask)
poss.append(pos_l)
if self.dn_number > 0 or targets is not None:
input_query_label, input_query_bbox, attn_mask, dn_meta =\
prepare_for_cdn(dn_args=(targets, self.dn_number, self.dn_label_noise_ratio, self.dn_box_noise_scale),
training=self.training,num_queries=self.num_queries,num_classes=self.num_classes,
hidden_dim=self.hidden_dim,label_enc=self.label_enc)
else:
assert targets is None
input_query_bbox = input_query_label = attn_mask = dn_meta = None
hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(srcs, masks, input_query_bbox, poss,input_query_label,attn_mask)
# In case num object=0
hs[0] += self.label_enc.weight[0,0]*0.0
# deformable-detr-like anchor update
# reference_before_sigmoid = inverse_sigmoid(reference[:-1]) # n_dec, bs, nq, 4
outputs_coord_list = []
for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(zip(reference[:-1], self.bbox_embed, hs)):
layer_delta_unsig = layer_bbox_embed(layer_hs)
layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig)
layer_outputs_unsig = layer_outputs_unsig.sigmoid()
outputs_coord_list.append(layer_outputs_unsig)
outputs_coord_list = torch.stack(outputs_coord_list)
outputs_class = torch.stack([layer_cls_embed(layer_hs) for
layer_cls_embed, layer_hs in zip(self.class_embed, hs)])
if self.dn_number > 0 and dn_meta is not None:
outputs_class, outputs_coord_list = \
dn_post_process(outputs_class, outputs_coord_list,
dn_meta,self.aux_loss,self._set_aux_loss)
out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord_list[-1]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list)
# for encoder output
if hs_enc is not None:
# prepare intermediate outputs
interm_coord = ref_enc[-1]
interm_class = self.transformer.enc_out_class_embed(hs_enc[-1])
out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord}
out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal}
# prepare enc outputs
if hs_enc.shape[0] > 1:
enc_outputs_coord = []
enc_outputs_class = []
for layer_id, (layer_box_embed, layer_class_embed, layer_hs_enc, layer_ref_enc) in enumerate(zip(self.enc_bbox_embed, self.enc_class_embed, hs_enc[:-1], ref_enc[:-1])):
layer_enc_delta_unsig = layer_box_embed(layer_hs_enc)
layer_enc_outputs_coord_unsig = layer_enc_delta_unsig + inverse_sigmoid(layer_ref_enc)
layer_enc_outputs_coord = layer_enc_outputs_coord_unsig.sigmoid()
layer_enc_outputs_class = layer_class_embed(layer_hs_enc)
enc_outputs_coord.append(layer_enc_outputs_coord)
enc_outputs_class.append(layer_enc_outputs_class)
out['enc_outputs'] = [
{'pred_logits': a, 'pred_boxes': b} for a, b in zip(enc_outputs_class, enc_outputs_coord)
]
out['dn_meta'] = dn_meta
return out
|
The forward expects a NestedTensor, which consists of:
- samples.tensor: batched images, of shape [batch_size x 3 x H x W]
- samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x num_classes]
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
(center_x, center_y, width, height). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/dino.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/dino.py
|
Apache-2.0
|
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (Binary focal loss)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2]+1],
dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device)
target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
target_classes_onehot = target_classes_onehot[:,:,:-1]
loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1]
losses = {'loss_ce': loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
return losses
|
Classification loss (Binary focal loss)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
|
loss_labels
|
python
|
IDEA-Research/DINO
|
models/dino/dino.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/dino.py
|
Apache-2.0
|
def loss_cardinality(self, outputs, targets, indices, num_boxes):
""" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
"""
pred_logits = outputs['pred_logits']
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device)
# Count the number of predictions that are NOT "no-object" (which is the last class)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {'cardinality_error': card_err}
return losses
|
Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
|
loss_cardinality
|
python
|
IDEA-Research/DINO
|
models/dino/dino.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/dino.py
|
Apache-2.0
|
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
assert 'pred_boxes' in outputs
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
losses = {}
losses['loss_bbox'] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes),
box_ops.box_cxcywh_to_xyxy(target_boxes)))
losses['loss_giou'] = loss_giou.sum() / num_boxes
# calculate the x,y and h,w loss
with torch.no_grad():
losses['loss_xy'] = loss_bbox[..., :2].sum() / num_boxes
losses['loss_hw'] = loss_bbox[..., 2:].sum() / num_boxes
return losses
|
Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
|
loss_boxes
|
python
|
IDEA-Research/DINO
|
models/dino/dino.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/dino.py
|
Apache-2.0
|
def loss_masks(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
src_masks = src_masks[src_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
target_masks = target_masks.to(src_masks)
target_masks = target_masks[tgt_idx]
# upsample predictions to the target size
src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],
mode="bilinear", align_corners=False)
src_masks = src_masks[:, 0].flatten(1)
target_masks = target_masks.flatten(1)
target_masks = target_masks.view(src_masks.shape)
losses = {
"loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes),
"loss_dice": dice_loss(src_masks, target_masks, num_boxes),
}
return losses
|
Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
|
loss_masks
|
python
|
IDEA-Research/DINO
|
models/dino/dino.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/dino.py
|
Apache-2.0
|
def forward(self, outputs, targets, return_indices=False):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
return_indices: used for vis. if True, the layer0-5 indices will be returned as well.
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
device=next(iter(outputs.values())).device
indices = self.matcher(outputs_without_aux, targets)
if return_indices:
indices0_copy = indices
indices_list = []
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
# prepare for dn loss
dn_meta = outputs['dn_meta']
if self.training and dn_meta and 'output_known_lbs_bboxes' in dn_meta:
output_known_lbs_bboxes,single_pad, scalar = self.prep_for_dn(dn_meta)
dn_pos_idx = []
dn_neg_idx = []
for i in range(len(targets)):
if len(targets[i]['labels']) > 0:
t = torch.range(0, len(targets[i]['labels']) - 1).long().cuda()
t = t.unsqueeze(0).repeat(scalar, 1)
tgt_idx = t.flatten()
output_idx = (torch.tensor(range(scalar)) * single_pad).long().cuda().unsqueeze(1) + t
output_idx = output_idx.flatten()
else:
output_idx = tgt_idx = torch.tensor([]).long().cuda()
dn_pos_idx.append((output_idx, tgt_idx))
dn_neg_idx.append((output_idx + single_pad // 2, tgt_idx))
output_known_lbs_bboxes=dn_meta['output_known_lbs_bboxes']
l_dict = {}
for loss in self.losses:
kwargs = {}
if 'labels' in loss:
kwargs = {'log': False}
l_dict.update(self.get_loss(loss, output_known_lbs_bboxes, targets, dn_pos_idx, num_boxes*scalar,**kwargs))
l_dict = {k + f'_dn': v for k, v in l_dict.items()}
losses.update(l_dict)
else:
l_dict = dict()
l_dict['loss_bbox_dn'] = torch.as_tensor(0.).to('cuda')
l_dict['loss_giou_dn'] = torch.as_tensor(0.).to('cuda')
l_dict['loss_ce_dn'] = torch.as_tensor(0.).to('cuda')
l_dict['loss_xy_dn'] = torch.as_tensor(0.).to('cuda')
l_dict['loss_hw_dn'] = torch.as_tensor(0.).to('cuda')
l_dict['cardinality_error_dn'] = torch.as_tensor(0.).to('cuda')
losses.update(l_dict)
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if 'aux_outputs' in outputs:
for idx, aux_outputs in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
if return_indices:
indices_list.append(indices)
for loss in self.losses:
if loss == 'masks':
# Intermediate masks losses are too costly to compute, we ignore them.
continue
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {k + f'_{idx}': v for k, v in l_dict.items()}
losses.update(l_dict)
if self.training and dn_meta and 'output_known_lbs_bboxes' in dn_meta:
aux_outputs_known = output_known_lbs_bboxes['aux_outputs'][idx]
l_dict={}
for loss in self.losses:
kwargs = {}
if 'labels' in loss:
kwargs = {'log': False}
l_dict.update(self.get_loss(loss, aux_outputs_known, targets, dn_pos_idx, num_boxes*scalar,
**kwargs))
l_dict = {k + f'_dn_{idx}': v for k, v in l_dict.items()}
losses.update(l_dict)
else:
l_dict = dict()
l_dict['loss_bbox_dn']=torch.as_tensor(0.).to('cuda')
l_dict['loss_giou_dn']=torch.as_tensor(0.).to('cuda')
l_dict['loss_ce_dn']=torch.as_tensor(0.).to('cuda')
l_dict['loss_xy_dn'] = torch.as_tensor(0.).to('cuda')
l_dict['loss_hw_dn'] = torch.as_tensor(0.).to('cuda')
l_dict['cardinality_error_dn'] = torch.as_tensor(0.).to('cuda')
l_dict = {k + f'_{idx}': v for k, v in l_dict.items()}
losses.update(l_dict)
# interm_outputs loss
if 'interm_outputs' in outputs:
interm_outputs = outputs['interm_outputs']
indices = self.matcher(interm_outputs, targets)
if return_indices:
indices_list.append(indices)
for loss in self.losses:
if loss == 'masks':
# Intermediate masks losses are too costly to compute, we ignore them.
continue
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, interm_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {k + f'_interm': v for k, v in l_dict.items()}
losses.update(l_dict)
# enc output loss
if 'enc_outputs' in outputs:
for i, enc_outputs in enumerate(outputs['enc_outputs']):
indices = self.matcher(enc_outputs, targets)
if return_indices:
indices_list.append(indices)
for loss in self.losses:
if loss == 'masks':
# Intermediate masks losses are too costly to compute, we ignore them.
continue
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, enc_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {k + f'_enc_{i}': v for k, v in l_dict.items()}
losses.update(l_dict)
if return_indices:
indices_list.append(indices0_copy)
return losses, indices_list
return losses
|
This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
return_indices: used for vis. if True, the layer0-5 indices will be returned as well.
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/dino.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/dino.py
|
Apache-2.0
|
def forward(self, outputs, target_sizes, not_to_xyxy=False, test=False):
""" Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
num_select = self.num_select
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = out_logits.sigmoid()
topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), num_select, dim=1)
scores = topk_values
topk_boxes = topk_indexes // out_logits.shape[2]
labels = topk_indexes % out_logits.shape[2]
if not_to_xyxy:
boxes = out_bbox
else:
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
if test:
assert not not_to_xyxy
boxes[:,:,2:] = boxes[:,:,2:] - boxes[:,:,:2]
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4))
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
if self.nms_iou_threshold > 0:
item_indices = [nms(b, s, iou_threshold=self.nms_iou_threshold) for b,s in zip(boxes, scores)]
results = [{'scores': s[i], 'labels': l[i], 'boxes': b[i]} for s, l, b, i in zip(scores, labels, boxes, item_indices)]
else:
results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
return results
|
Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/dino.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/dino.py
|
Apache-2.0
|
def prepare_for_cdn(dn_args, training, num_queries, num_classes, hidden_dim, label_enc):
"""
A major difference of DINO from DN-DETR is that the author process pattern embedding pattern embedding in its detector
forward function and use learnable tgt embedding, so we change this function a little bit.
:param dn_args: targets, dn_number, label_noise_ratio, box_noise_scale
:param training: if it is training or inference
:param num_queries: number of queires
:param num_classes: number of classes
:param hidden_dim: transformer hidden dim
:param label_enc: encode labels in dn
:return:
"""
if training:
targets, dn_number, label_noise_ratio, box_noise_scale = dn_args
# positive and negative dn queries
dn_number = dn_number * 2
known = [(torch.ones_like(t['labels'])).cuda() for t in targets]
batch_size = len(known)
known_num = [sum(k) for k in known]
if int(max(known_num)) == 0:
dn_number = 1
else:
if dn_number >= 100:
dn_number = dn_number // (int(max(known_num) * 2))
elif dn_number < 1:
dn_number = 1
if dn_number == 0:
dn_number = 1
unmask_bbox = unmask_label = torch.cat(known)
labels = torch.cat([t['labels'] for t in targets])
boxes = torch.cat([t['boxes'] for t in targets])
batch_idx = torch.cat([torch.full_like(t['labels'].long(), i) for i, t in enumerate(targets)])
known_indice = torch.nonzero(unmask_label + unmask_bbox)
known_indice = known_indice.view(-1)
known_indice = known_indice.repeat(2 * dn_number, 1).view(-1)
known_labels = labels.repeat(2 * dn_number, 1).view(-1)
known_bid = batch_idx.repeat(2 * dn_number, 1).view(-1)
known_bboxs = boxes.repeat(2 * dn_number, 1)
known_labels_expaned = known_labels.clone()
known_bbox_expand = known_bboxs.clone()
if label_noise_ratio > 0:
p = torch.rand_like(known_labels_expaned.float())
chosen_indice = torch.nonzero(p < (label_noise_ratio * 0.5)).view(-1) # half of bbox prob
new_label = torch.randint_like(chosen_indice, 0, num_classes) # randomly put a new one here
known_labels_expaned.scatter_(0, chosen_indice, new_label)
single_pad = int(max(known_num))
pad_size = int(single_pad * 2 * dn_number)
positive_idx = torch.tensor(range(len(boxes))).long().cuda().unsqueeze(0).repeat(dn_number, 1)
positive_idx += (torch.tensor(range(dn_number)) * len(boxes) * 2).long().cuda().unsqueeze(1)
positive_idx = positive_idx.flatten()
negative_idx = positive_idx + len(boxes)
if box_noise_scale > 0:
known_bbox_ = torch.zeros_like(known_bboxs)
known_bbox_[:, :2] = known_bboxs[:, :2] - known_bboxs[:, 2:] / 2
known_bbox_[:, 2:] = known_bboxs[:, :2] + known_bboxs[:, 2:] / 2
diff = torch.zeros_like(known_bboxs)
diff[:, :2] = known_bboxs[:, 2:] / 2
diff[:, 2:] = known_bboxs[:, 2:] / 2
rand_sign = torch.randint_like(known_bboxs, low=0, high=2, dtype=torch.float32) * 2.0 - 1.0
rand_part = torch.rand_like(known_bboxs)
rand_part[negative_idx] += 1.0
rand_part *= rand_sign
known_bbox_ = known_bbox_ + torch.mul(rand_part,
diff).cuda() * box_noise_scale
known_bbox_ = known_bbox_.clamp(min=0.0, max=1.0)
known_bbox_expand[:, :2] = (known_bbox_[:, :2] + known_bbox_[:, 2:]) / 2
known_bbox_expand[:, 2:] = known_bbox_[:, 2:] - known_bbox_[:, :2]
m = known_labels_expaned.long().to('cuda')
input_label_embed = label_enc(m)
input_bbox_embed = inverse_sigmoid(known_bbox_expand)
padding_label = torch.zeros(pad_size, hidden_dim).cuda()
padding_bbox = torch.zeros(pad_size, 4).cuda()
input_query_label = padding_label.repeat(batch_size, 1, 1)
input_query_bbox = padding_bbox.repeat(batch_size, 1, 1)
map_known_indice = torch.tensor([]).to('cuda')
if len(known_num):
map_known_indice = torch.cat([torch.tensor(range(num)) for num in known_num]) # [1,2, 1,2,3]
map_known_indice = torch.cat([map_known_indice + single_pad * i for i in range(2 * dn_number)]).long()
if len(known_bid):
input_query_label[(known_bid.long(), map_known_indice)] = input_label_embed
input_query_bbox[(known_bid.long(), map_known_indice)] = input_bbox_embed
tgt_size = pad_size + num_queries
attn_mask = torch.ones(tgt_size, tgt_size).to('cuda') < 0
# match query cannot see the reconstruct
attn_mask[pad_size:, :pad_size] = True
# reconstruct cannot see each other
for i in range(dn_number):
if i == 0:
attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True
if i == dn_number - 1:
attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * i * 2] = True
else:
attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True
attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * 2 * i] = True
dn_meta = {
'pad_size': pad_size,
'num_dn_group': dn_number,
}
else:
input_query_label = None
input_query_bbox = None
attn_mask = None
dn_meta = None
return input_query_label, input_query_bbox, attn_mask, dn_meta
|
A major difference of DINO from DN-DETR is that the author process pattern embedding pattern embedding in its detector
forward function and use learnable tgt embedding, so we change this function a little bit.
:param dn_args: targets, dn_number, label_noise_ratio, box_noise_scale
:param training: if it is training or inference
:param num_queries: number of queires
:param num_classes: number of classes
:param hidden_dim: transformer hidden dim
:param label_enc: encode labels in dn
:return:
|
prepare_for_cdn
|
python
|
IDEA-Research/DINO
|
models/dino/dn_components.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/dn_components.py
|
Apache-2.0
|
def dn_post_process(outputs_class, outputs_coord, dn_meta, aux_loss, _set_aux_loss):
"""
post process of dn after output from the transformer
put the dn part in the dn_meta
"""
if dn_meta and dn_meta['pad_size'] > 0:
output_known_class = outputs_class[:, :, :dn_meta['pad_size'], :]
output_known_coord = outputs_coord[:, :, :dn_meta['pad_size'], :]
outputs_class = outputs_class[:, :, dn_meta['pad_size']:, :]
outputs_coord = outputs_coord[:, :, dn_meta['pad_size']:, :]
out = {'pred_logits': output_known_class[-1], 'pred_boxes': output_known_coord[-1]}
if aux_loss:
out['aux_outputs'] = _set_aux_loss(output_known_class, output_known_coord)
dn_meta['output_known_lbs_bboxes'] = out
return outputs_class, outputs_coord
|
post process of dn after output from the transformer
put the dn part in the dn_meta
|
dn_post_process
|
python
|
IDEA-Research/DINO
|
models/dino/dn_components.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/dn_components.py
|
Apache-2.0
|
def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1, focal_alpha = 0.25):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
self.focal_alpha = focal_alpha
|
Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
|
__init__
|
python
|
IDEA-Research/DINO
|
models/dino/matcher.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/matcher.py
|
Apache-2.0
|
def forward(self, outputs, targets):
""" Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
tgt_ids = torch.cat([v["labels"] for v in targets])
tgt_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost.
alpha = self.focal_alpha
gamma = 2.0
neg_cost_class = (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log())
pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())
cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
# Compute the giou cost betwen boxes
cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))
# Final cost matrix
C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
C = C.view(bs, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
|
Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/matcher.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/matcher.py
|
Apache-2.0
|
def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1, focal_alpha = 0.25):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
self.focal_alpha = focal_alpha
|
Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
|
__init__
|
python
|
IDEA-Research/DINO
|
models/dino/matcher.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/matcher.py
|
Apache-2.0
|
def forward(self, outputs, targets):
""" Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
tgt_ids = torch.cat([v["labels"] for v in targets])
tgt_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost.
alpha = self.focal_alpha
gamma = 2.0
neg_cost_class = (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log())
pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())
cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
# Compute the giou cost betwen boxes
cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))
# Final cost matrix
C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
C = C.view(bs, num_queries, -1)
sizes = [len(v["boxes"]) for v in targets]
indices = []
device = C.device
for i, (c, _size) in enumerate(zip(C.split(sizes, -1), sizes)):
weight_mat = c[i]
idx_i = weight_mat.min(0)[1]
idx_j = torch.arange(_size).to(device)
indices.append((idx_i, idx_j))
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
|
Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/matcher.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/matcher.py
|
Apache-2.0
|
def dice_loss(inputs, targets, num_boxes):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_boxes
|
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
|
dice_loss
|
python
|
IDEA-Research/DINO
|
models/dino/segmentation.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/segmentation.py
|
Apache-2.0
|
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes
|
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
|
sigmoid_focal_loss
|
python
|
IDEA-Research/DINO
|
models/dino/segmentation.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/segmentation.py
|
Apache-2.0
|
def __init__(self, is_thing_map, threshold=0.85):
"""
Parameters:
is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether
the class is a thing (True) or a stuff (False) class
threshold: confidence threshold: segments with confidence lower than this will be deleted
"""
super().__init__()
self.threshold = threshold
self.is_thing_map = is_thing_map
|
Parameters:
is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether
the class is a thing (True) or a stuff (False) class
threshold: confidence threshold: segments with confidence lower than this will be deleted
|
__init__
|
python
|
IDEA-Research/DINO
|
models/dino/segmentation.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/segmentation.py
|
Apache-2.0
|
def forward(self, outputs, processed_sizes, target_sizes=None):
""" This function computes the panoptic prediction from the model's predictions.
Parameters:
outputs: This is a dict coming directly from the model. See the model doc for the content.
processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the
model, ie the size after data augmentation but before batching.
target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size
of each prediction. If left to None, it will default to the processed_sizes
"""
if target_sizes is None:
target_sizes = processed_sizes
assert len(processed_sizes) == len(target_sizes)
out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"]
assert len(out_logits) == len(raw_masks) == len(target_sizes)
preds = []
def to_tuple(tup):
if isinstance(tup, tuple):
return tup
return tuple(tup.cpu().tolist())
for cur_logits, cur_masks, cur_boxes, size, target_size in zip(
out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes
):
# we filter empty queries and detection below threshold
scores, labels = cur_logits.softmax(-1).max(-1)
keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold)
cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)
cur_scores = cur_scores[keep]
cur_classes = cur_classes[keep]
cur_masks = cur_masks[keep]
cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1)
cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])
h, w = cur_masks.shape[-2:]
assert len(cur_boxes) == len(cur_classes)
# It may be that we have several predicted masks for the same stuff class.
# In the following, we track the list of masks ids for each stuff class (they are merged later on)
cur_masks = cur_masks.flatten(1)
stuff_equiv_classes = defaultdict(lambda: [])
for k, label in enumerate(cur_classes):
if not self.is_thing_map[label.item()]:
stuff_equiv_classes[label.item()].append(k)
def get_ids_area(masks, scores, dedup=False):
# This helper function creates the final panoptic segmentation image
# It also returns the area of the masks that appears on the image
m_id = masks.transpose(0, 1).softmax(-1)
if m_id.shape[-1] == 0:
# We didn't detect any mask :(
m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)
else:
m_id = m_id.argmax(-1).view(h, w)
if dedup:
# Merge the masks corresponding to the same stuff class
for equiv in stuff_equiv_classes.values():
if len(equiv) > 1:
for eq_id in equiv:
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
final_h, final_w = to_tuple(target_size)
seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))
seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)
np_seg_img = (
torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()
)
m_id = torch.from_numpy(rgb2id(np_seg_img))
area = []
for i in range(len(scores)):
area.append(m_id.eq(i).sum().item())
return area, seg_img
area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)
if cur_classes.numel() > 0:
# We know filter empty masks as long as we find some
while True:
filtered_small = torch.as_tensor(
[area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device
)
if filtered_small.any().item():
cur_scores = cur_scores[~filtered_small]
cur_classes = cur_classes[~filtered_small]
cur_masks = cur_masks[~filtered_small]
area, seg_img = get_ids_area(cur_masks, cur_scores)
else:
break
else:
cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)
segments_info = []
for i, a in enumerate(area):
cat = cur_classes[i].item()
segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a})
del cur_classes
with io.BytesIO() as out:
seg_img.save(out, format="PNG")
predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
preds.append(predictions)
return preds
|
This function computes the panoptic prediction from the model's predictions.
Parameters:
outputs: This is a dict coming directly from the model. See the model doc for the content.
processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the
model, ie the size after data augmentation but before batching.
target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size
of each prediction. If left to None, it will default to the processed_sizes
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/segmentation.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/segmentation.py
|
Apache-2.0
|
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
|
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
|
window_partition
|
python
|
IDEA-Research/DINO
|
models/dino/swin_transformer.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/swin_transformer.py
|
Apache-2.0
|
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
|
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
|
window_reverse
|
python
|
IDEA-Research/DINO
|
models/dino/swin_transformer.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/swin_transformer.py
|
Apache-2.0
|
def forward(self, x, mask=None):
""" Forward function.
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
|
Forward function.
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/swin_transformer.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/swin_transformer.py
|
Apache-2.0
|
def forward(self, x, mask_matrix):
""" Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
mask_matrix: Attention mask for cyclic shift.
"""
B, L, C = x.shape
H, W = self.H, self.W
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# pad feature maps to multiples of window size
pad_l = pad_t = 0
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = x.shape
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
attn_mask = mask_matrix
else:
shifted_x = x
attn_mask = None
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
if pad_r > 0 or pad_b > 0:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
|
Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
mask_matrix: Attention mask for cyclic shift.
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/swin_transformer.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/swin_transformer.py
|
Apache-2.0
|
def forward(self, x, H, W):
""" Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
# padding
pad_input = (H % 2 == 1) or (W % 2 == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
|
Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/swin_transformer.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/swin_transformer.py
|
Apache-2.0
|
def forward(self, x, H, W):
""" Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
# calculate attention mask for SW-MSA
Hp = int(np.ceil(H / self.window_size)) * self.window_size
Wp = int(np.ceil(W / self.window_size)) * self.window_size
img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
for blk in self.blocks:
blk.H, blk.W = H, W
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, attn_mask)
else:
x = blk(x, attn_mask)
if self.downsample is not None:
x_down = self.downsample(x, H, W)
Wh, Ww = (H + 1) // 2, (W + 1) // 2
return x, H, W, x_down, Wh, Ww
else:
return x, H, W, x, H, W
|
Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/swin_transformer.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/swin_transformer.py
|
Apache-2.0
|
def forward(self, srcs, masks, pos_embeds, query_embed=None):
"""
Input:
- srcs: List([bs, c, h, w])
- masks: List([bs, h, w])
"""
assert self.two_stage or query_embed is not None
# prepare input for encoder
src_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
bs, c, h, w = src.shape
spatial_shape = (h, w)
spatial_shapes.append(spatial_shape)
src = src.flatten(2).transpose(1, 2) # bs, hw, c
mask = mask.flatten(1) # bs, hw
pos_embed = pos_embed.flatten(2).transpose(1, 2) # bs, hw, c
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
src_flatten.append(src)
mask_flatten.append(mask)
src_flatten = torch.cat(src_flatten, 1) # bs, \sum{hxw}, c
mask_flatten = torch.cat(mask_flatten, 1) # bs, \sum{hxw}
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
# encoder
memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten, mask_flatten)
# prepare input for decoder
bs, _, c = memory.shape
if self.two_stage:
output_memory, output_proposals = self.gen_encoder_output_proposals(memory, mask_flatten, spatial_shapes)
# hack implementation for two-stage Deformable DETR
enc_outputs_class = self.decoder.class_embed[self.decoder.num_layers](output_memory)
enc_outputs_coord_unact = self.decoder.bbox_embed[self.decoder.num_layers](output_memory) + output_proposals
topk = self.two_stage_num_proposals
topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1]
topk_coords_unact = torch.gather(enc_outputs_coord_unact, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4))
topk_coords_unact = topk_coords_unact.detach()
reference_points = topk_coords_unact.sigmoid()
init_reference_out = reference_points
pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact)))
query_embed, tgt = torch.split(pos_trans_out, c, dim=2)
elif self.use_dab:
reference_points = query_embed[..., self.d_model:].sigmoid()
tgt = query_embed[..., :self.d_model]
tgt = tgt.unsqueeze(0).expand(bs, -1, -1)
init_reference_out = reference_points
else:
query_embed, tgt = torch.split(query_embed, c, dim=1)
query_embed = query_embed.unsqueeze(0).expand(bs, -1, -1)
tgt = tgt.unsqueeze(0).expand(bs, -1, -1)
reference_points = self.reference_points(query_embed).sigmoid()
# bs, num_quires, 2
init_reference_out = reference_points
# decoder
hs, inter_references = self.decoder(tgt, reference_points, memory,
spatial_shapes, level_start_index, valid_ratios,
query_pos=query_embed if not self.use_dab else None,
src_padding_mask=mask_flatten)
inter_references_out = inter_references
if self.two_stage:
return hs, init_reference_out, inter_references_out, enc_outputs_class, enc_outputs_coord_unact
return hs, init_reference_out, inter_references_out, None, None
|
Input:
- srcs: List([bs, c, h, w])
- masks: List([bs, h, w])
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/transformer_deformable.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/transformer_deformable.py
|
Apache-2.0
|
def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None):
"""
Input:
- src: [bs, sum(hi*wi), 256]
- spatial_shapes: h,w of each level [num_level, 2]
- level_start_index: [num_level] start point of level in sum(hi*wi).
- valid_ratios: [bs, num_level, 2]
- pos: pos embed for src. [bs, sum(hi*wi), 256]
- padding_mask: [bs, sum(hi*wi)]
Intermedia:
- reference_points: [bs, sum(hi*wi), num_lebel, 2]
"""
output = src
# bs, sum(hi*wi), 256
if self.num_layers > 0:
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device)
for _, layer in enumerate(self.layers):
output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask)
if self.norm is not None:
output = self.norm(output)
return output
|
Input:
- src: [bs, sum(hi*wi), 256]
- spatial_shapes: h,w of each level [num_level, 2]
- level_start_index: [num_level] start point of level in sum(hi*wi).
- valid_ratios: [bs, num_level, 2]
- pos: pos embed for src. [bs, sum(hi*wi), 256]
- padding_mask: [bs, sum(hi*wi)]
Intermedia:
- reference_points: [bs, sum(hi*wi), num_lebel, 2]
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/transformer_deformable.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/transformer_deformable.py
|
Apache-2.0
|
def gen_encoder_output_proposals(memory:Tensor, memory_padding_mask:Tensor, spatial_shapes:Tensor, learnedwh=None):
"""
Input:
- memory: bs, \sum{hw}, d_model
- memory_padding_mask: bs, \sum{hw}
- spatial_shapes: nlevel, 2
- learnedwh: 2
Output:
- output_memory: bs, \sum{hw}, d_model
- output_proposals: bs, \sum{hw}, 4
"""
N_, S_, C_ = memory.shape
base_scale = 4.0
proposals = []
_cur = 0
for lvl, (H_, W_) in enumerate(spatial_shapes):
mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H_ * W_)].view(N_, H_, W_, 1)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
grid_y, grid_x = torch.meshgrid(torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),
torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device))
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2
scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
if learnedwh is not None:
wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0 ** lvl)
else:
wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl)
proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)
proposals.append(proposal)
_cur += (H_ * W_)
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid
output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))
output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))
output_memory = memory
output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))
output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))
return output_memory, output_proposals
|
Input:
- memory: bs, \sum{hw}, d_model
- memory_padding_mask: bs, \sum{hw}
- spatial_shapes: nlevel, 2
- learnedwh: 2
Output:
- output_memory: bs, \sum{hw}, d_model
- output_proposals: bs, \sum{hw}, 4
|
gen_encoder_output_proposals
|
python
|
IDEA-Research/DINO
|
models/dino/utils.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/utils.py
|
Apache-2.0
|
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes
|
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
|
sigmoid_focal_loss
|
python
|
IDEA-Research/DINO
|
models/dino/utils.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/utils.py
|
Apache-2.0
|
def _get_activation_fn(activation, d_model=256, batch_dim=0):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
if activation == "prelu":
return nn.PReLU()
if activation == "selu":
return F.selu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
|
Return an activation function given a string
|
_get_activation_fn
|
python
|
IDEA-Research/DINO
|
models/dino/utils.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/utils.py
|
Apache-2.0
|
def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
"""
Multi-Scale Deformable Attention Module
:param d_model hidden dimension
:param n_levels number of feature levels
:param n_heads number of attention heads
:param n_points number of sampling points per attention head per feature level
"""
super().__init__()
if d_model % n_heads != 0:
raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads))
_d_per_head = d_model // n_heads
# you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation
if not _is_power_of_2(_d_per_head):
warnings.warn("You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 "
"which is more efficient in our CUDA implementation.")
self.im2col_step = 64
self.d_model = d_model
self.n_levels = n_levels
self.n_heads = n_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
self.value_proj = nn.Linear(d_model, d_model)
self.output_proj = nn.Linear(d_model, d_model)
self._reset_parameters()
|
Multi-Scale Deformable Attention Module
:param d_model hidden dimension
:param n_levels number of feature levels
:param n_heads number of attention heads
:param n_points number of sampling points per attention head per feature level
|
__init__
|
python
|
IDEA-Research/DINO
|
models/dino/ops/modules/ms_deform_attn.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/ops/modules/ms_deform_attn.py
|
Apache-2.0
|
def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None):
"""
:param query (N, Length_{query}, C)
:param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area
or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes
:param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C)
:param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
:param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]
:param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements
:return output (N, Length_{query}, C)
"""
N, Len_q, _ = query.shape
N, Len_in, _ = input_flatten.shape
assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in
value = self.value_proj(input_flatten)
if input_padding_mask is not None:
value = value.masked_fill(input_padding_mask[..., None], float(0))
value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points)
# N, Len_q, n_heads, n_levels, n_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1)
sampling_locations = reference_points[:, :, None, :, None, :] \
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
elif reference_points.shape[-1] == 4:
sampling_locations = reference_points[:, :, None, :, None, :2] \
+ sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
else:
raise ValueError(
'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1]))
# for amp
if value.dtype == torch.float16:
# for mixed precision
output = MSDeformAttnFunction.apply(
value.to(torch.float32), input_spatial_shapes, input_level_start_index, sampling_locations.to(torch.float32), attention_weights, self.im2col_step)
output = output.to(torch.float16)
output = self.output_proj(output)
return output
output = MSDeformAttnFunction.apply(
value, input_spatial_shapes, input_level_start_index, sampling_locations, attention_weights, self.im2col_step)
output = self.output_proj(output)
return output
|
:param query (N, Length_{query}, C)
:param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area
or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes
:param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C)
:param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
:param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]
:param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements
:return output (N, Length_{query}, C)
|
forward
|
python
|
IDEA-Research/DINO
|
models/dino/ops/modules/ms_deform_attn.py
|
https://github.com/IDEA-Research/DINO/blob/master/models/dino/ops/modules/ms_deform_attn.py
|
Apache-2.0
|
def get_shape(val: object) -> typing.List[int]:
"""
Get the shapes from a jit value object.
Args:
val (torch._C.Value): jit value object.
Returns:
list(int): return a list of ints.
"""
if val.isCompleteTensor(): # pyre-ignore
r = val.type().sizes() # pyre-ignore
if not r:
r = [1]
return r
elif val.type().kind() in ("IntType", "FloatType"):
return [1]
elif val.type().kind() in ("StringType",):
return [0]
elif val.type().kind() in ("ListType",):
return [1]
elif val.type().kind() in ("BoolType", "NoneType"):
return [0]
else:
raise ValueError()
|
Get the shapes from a jit value object.
Args:
val (torch._C.Value): jit value object.
Returns:
list(int): return a list of ints.
|
get_shape
|
python
|
IDEA-Research/DINO
|
tools/benchmark.py
|
https://github.com/IDEA-Research/DINO/blob/master/tools/benchmark.py
|
Apache-2.0
|
def addmm_flop_jit(
inputs: typing.List[object], outputs: typing.List[object]
) -> typing.Counter[str]:
"""
This method counts the flops for fully connected layers with torch script.
Args:
inputs (list(torch._C.Value)): The input shape in the form of a list of
jit object.
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
"""
# Count flop for nn.Linear
# inputs is a list of length 3.
input_shapes = [get_shape(v) for v in inputs[1:3]]
# input_shapes[0]: [batch size, input feature dimension]
# input_shapes[1]: [batch size, output feature dimension]
assert len(input_shapes[0]) == 2
assert len(input_shapes[1]) == 2
batch_size, input_dim = input_shapes[0]
output_dim = input_shapes[1][1]
flop = batch_size * input_dim * output_dim
flop_counter = Counter({"addmm": flop})
return flop_counter
|
This method counts the flops for fully connected layers with torch script.
Args:
inputs (list(torch._C.Value)): The input shape in the form of a list of
jit object.
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
|
addmm_flop_jit
|
python
|
IDEA-Research/DINO
|
tools/benchmark.py
|
https://github.com/IDEA-Research/DINO/blob/master/tools/benchmark.py
|
Apache-2.0
|
def conv_flop_count(
x_shape: typing.List[int],
w_shape: typing.List[int],
out_shape: typing.List[int],
) -> typing.Counter[str]:
"""
This method counts the flops for convolution. Note only multiplication is
counted. Computation for addition and bias is ignored.
Args:
x_shape (list(int)): The input shape before convolution.
w_shape (list(int)): The filter shape.
out_shape (list(int)): The output shape after convolution.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
"""
batch_size, Cin_dim, Cout_dim = x_shape[0], w_shape[1], out_shape[1]
out_size = prod(out_shape[2:])
kernel_size = prod(w_shape[2:])
flop = batch_size * out_size * Cout_dim * Cin_dim * kernel_size
flop_counter = Counter({"conv": flop})
return flop_counter
|
This method counts the flops for convolution. Note only multiplication is
counted. Computation for addition and bias is ignored.
Args:
x_shape (list(int)): The input shape before convolution.
w_shape (list(int)): The filter shape.
out_shape (list(int)): The output shape after convolution.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
|
conv_flop_count
|
python
|
IDEA-Research/DINO
|
tools/benchmark.py
|
https://github.com/IDEA-Research/DINO/blob/master/tools/benchmark.py
|
Apache-2.0
|
def conv_flop_jit(
inputs: typing.List[object], outputs: typing.List[object]
) -> typing.Counter[str]:
"""
This method counts the flops for convolution using torch script.
Args:
inputs (list(torch._C.Value)): The input shape in the form of a list of
jit object before convolution.
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object after convolution.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
"""
# Inputs of Convolution should be a list of length 12. They represent:
# 0) input tensor, 1) convolution filter, 2) bias, 3) stride, 4) padding,
# 5) dilation, 6) transposed, 7) out_pad, 8) groups, 9) benchmark_cudnn,
# 10) deterministic_cudnn and 11) user_enabled_cudnn.
# import ipdb; ipdb.set_trace()
# assert len(inputs) == 12
x, w = inputs[:2]
x_shape, w_shape, out_shape = (
get_shape(x),
get_shape(w),
get_shape(outputs[0]),
)
return conv_flop_count(x_shape, w_shape, out_shape)
|
This method counts the flops for convolution using torch script.
Args:
inputs (list(torch._C.Value)): The input shape in the form of a list of
jit object before convolution.
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object after convolution.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
|
conv_flop_jit
|
python
|
IDEA-Research/DINO
|
tools/benchmark.py
|
https://github.com/IDEA-Research/DINO/blob/master/tools/benchmark.py
|
Apache-2.0
|
def einsum_flop_jit(
inputs: typing.List[object], outputs: typing.List[object]
) -> typing.Counter[str]:
"""
This method counts the flops for the einsum operation. We currently support
two einsum operations: "nct,ncp->ntp" and "ntg,ncg->nct".
Args:
inputs (list(torch._C.Value)): The input shape in the form of a list of
jit object before einsum.
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object after einsum.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
"""
# Inputs of einsum should be a list of length 2.
# Inputs[0] stores the equation used for einsum.
# Inputs[1] stores the list of input shapes.
assert len(inputs) == 2
equation = inputs[0].toIValue() # pyre-ignore
# Get rid of white space in the equation string.
equation = equation.replace(" ", "")
# Re-map equation so that same equation with different alphabet
# representations will look the same.
letter_order = OrderedDict((k, 0) for k in equation if k.isalpha()).keys()
mapping = {ord(x): 97 + i for i, x in enumerate(letter_order)}
equation = equation.translate(mapping)
input_shapes_jit = inputs[1].node().inputs() # pyre-ignore
input_shapes = [get_shape(v) for v in input_shapes_jit]
if equation == "abc,abd->acd":
n, c, t = input_shapes[0]
p = input_shapes[-1][-1]
flop = n * c * t * p
flop_counter = Counter({"einsum": flop})
return flop_counter
elif equation == "abc,adc->adb":
n, t, g = input_shapes[0]
c = input_shapes[-1][1]
flop = n * t * g * c
flop_counter = Counter({"einsum": flop})
return flop_counter
else:
raise NotImplementedError("Unsupported einsum operation.")
|
This method counts the flops for the einsum operation. We currently support
two einsum operations: "nct,ncp->ntp" and "ntg,ncg->nct".
Args:
inputs (list(torch._C.Value)): The input shape in the form of a list of
jit object before einsum.
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object after einsum.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
|
einsum_flop_jit
|
python
|
IDEA-Research/DINO
|
tools/benchmark.py
|
https://github.com/IDEA-Research/DINO/blob/master/tools/benchmark.py
|
Apache-2.0
|
def matmul_flop_jit(
inputs: typing.List[object], outputs: typing.List[object]
) -> typing.Counter[str]:
"""
This method counts the flops for matmul.
Args:
inputs (list(torch._C.Value)): The input shape in the form of a list of
jit object before matmul.
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object after matmul.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
"""
# Inputs contains the shapes of two matrices.
input_shapes = [get_shape(v) for v in inputs]
assert len(input_shapes) == 2
assert input_shapes[0][-1] == input_shapes[1][-2]
dim_len = len(input_shapes[1])
assert dim_len >= 2
batch = 1
for i in range(dim_len - 2):
assert input_shapes[0][i] == input_shapes[1][i]
batch *= input_shapes[0][i]
# (b,m,c) x (b,c,n), flop = bmnc
flop = batch * input_shapes[0][-2] * input_shapes[0][-1] * input_shapes[1][-1]
flop_counter = Counter({"matmul": flop})
return flop_counter
|
This method counts the flops for matmul.
Args:
inputs (list(torch._C.Value)): The input shape in the form of a list of
jit object before matmul.
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object after matmul.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
|
matmul_flop_jit
|
python
|
IDEA-Research/DINO
|
tools/benchmark.py
|
https://github.com/IDEA-Research/DINO/blob/master/tools/benchmark.py
|
Apache-2.0
|
def batchnorm_flop_jit(
inputs: typing.List[object], outputs: typing.List[object]
) -> typing.Counter[str]:
"""
This method counts the flops for batch norm.
Args:
inputs (list(torch._C.Value)): The input shape in the form of a list of
jit object before batch norm.
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object after batch norm.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
"""
# Inputs[0] contains the shape of the input.
input_shape = get_shape(inputs[0])
assert 2 <= len(input_shape) <= 5
flop = prod(input_shape) * 4
flop_counter = Counter({"batchnorm": flop})
return flop_counter
|
This method counts the flops for batch norm.
Args:
inputs (list(torch._C.Value)): The input shape in the form of a list of
jit object before batch norm.
outputs (list(torch._C.Value)): The output shape in the form of a list
of jit object after batch norm.
Returns:
Counter: A Counter dictionary that records the number of flops for each
operation.
|
batchnorm_flop_jit
|
python
|
IDEA-Research/DINO
|
tools/benchmark.py
|
https://github.com/IDEA-Research/DINO/blob/master/tools/benchmark.py
|
Apache-2.0
|
def linear_flop_jit(inputs: List[Any], outputs: List[Any]) -> Number:
"""
Count flops for the aten::linear operator.
"""
# Inputs is a list of length 3; unlike aten::addmm, it is the first
# two elements that are relevant.
input_shapes = [get_shape(v) for v in inputs[0:2]]
# input_shapes[0]: [dim0, dim1, ..., input_feature_dim]
# input_shapes[1]: [output_feature_dim, input_feature_dim]
assert input_shapes[0][-1] == input_shapes[1][-1]
flops = prod(input_shapes[0]) * input_shapes[1][0]
flop_counter = Counter({"linear": flops})
return flop_counter
|
Count flops for the aten::linear operator.
|
linear_flop_jit
|
python
|
IDEA-Research/DINO
|
tools/benchmark.py
|
https://github.com/IDEA-Research/DINO/blob/master/tools/benchmark.py
|
Apache-2.0
|
def norm_flop_counter(affine_arg_index: int) -> Handle:
"""
Args:
affine_arg_index: index of the affine argument in inputs
"""
def norm_flop_jit(inputs: List[Any], outputs: List[Any]) -> Number:
"""
Count flops for norm layers.
"""
# Inputs[0] contains the shape of the input.
input_shape = get_shape(inputs[0])
has_affine = get_shape(inputs[affine_arg_index]) is not None
assert 2 <= len(input_shape) <= 5, input_shape
# 5 is just a rough estimate
flop = prod(input_shape) * (5 if has_affine else 4)
flop_counter = Counter({"norm": flop})
return flop_counter
return norm_flop_jit
|
Args:
affine_arg_index: index of the affine argument in inputs
|
norm_flop_counter
|
python
|
IDEA-Research/DINO
|
tools/benchmark.py
|
https://github.com/IDEA-Research/DINO/blob/master/tools/benchmark.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.