code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def tensor2im(input_image, imtype=np.uint8): """"Converts a Tensor array into a numpy image array. Parameters: input_image (tensor) -- the input image tensor array, range(0, 1) imtype (type) -- the desired type of the converted numpy array """ if not isinstance(input_image, np.ndarray): if isinstance(input_image, torch.Tensor): # get the data from a variable image_tensor = input_image.data else: return input_image image_numpy = image_tensor.clamp(0.0, 1.0).cpu().float().numpy() # convert it into a numpy array if image_numpy.shape[0] == 1: # grayscale to RGB image_numpy = np.tile(image_numpy, (3, 1, 1)) image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0 # post-processing: transpose and scaling else: # if it is a numpy array, do nothing image_numpy = input_image return image_numpy.astype(imtype)
"Converts a Tensor array into a numpy image array. Parameters: input_image (tensor) -- the input image tensor array, range(0, 1) imtype (type) -- the desired type of the converted numpy array
tensor2im
python
OpenTalker/video-retalking
third_part/face3d/util/util.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/util.py
Apache-2.0
def diagnose_network(net, name='network'): """Calculate and print the mean of average absolute(gradients) Parameters: net (torch network) -- Torch network name (str) -- the name of the network """ mean = 0.0 count = 0 for param in net.parameters(): if param.grad is not None: mean += torch.mean(torch.abs(param.grad.data)) count += 1 if count > 0: mean = mean / count print(name) print(mean)
Calculate and print the mean of average absolute(gradients) Parameters: net (torch network) -- Torch network name (str) -- the name of the network
diagnose_network
python
OpenTalker/video-retalking
third_part/face3d/util/util.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/util.py
Apache-2.0
def save_image(image_numpy, image_path, aspect_ratio=1.0): """Save a numpy image to the disk Parameters: image_numpy (numpy array) -- input numpy array image_path (str) -- the path of the image """ image_pil = Image.fromarray(image_numpy) h, w, _ = image_numpy.shape if aspect_ratio is None: pass elif aspect_ratio > 1.0: image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC) elif aspect_ratio < 1.0: image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC) image_pil.save(image_path)
Save a numpy image to the disk Parameters: image_numpy (numpy array) -- input numpy array image_path (str) -- the path of the image
save_image
python
OpenTalker/video-retalking
third_part/face3d/util/util.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/util.py
Apache-2.0
def print_numpy(x, val=True, shp=False): """Print the mean, min, max, median, std, and size of a numpy array Parameters: val (bool) -- if print the values of the numpy array shp (bool) -- if print the shape of the numpy array """ x = x.astype(np.float64) if shp: print('shape,', x.shape) if val: x = x.flatten() print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
Print the mean, min, max, median, std, and size of a numpy array Parameters: val (bool) -- if print the values of the numpy array shp (bool) -- if print the shape of the numpy array
print_numpy
python
OpenTalker/video-retalking
third_part/face3d/util/util.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/util.py
Apache-2.0
def mkdirs(paths): """create empty directories if they don't exist Parameters: paths (str list) -- a list of directory paths """ if isinstance(paths, list) and not isinstance(paths, str): for path in paths: mkdir(path) else: mkdir(paths)
create empty directories if they don't exist Parameters: paths (str list) -- a list of directory paths
mkdirs
python
OpenTalker/video-retalking
third_part/face3d/util/util.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/util.py
Apache-2.0
def draw_landmarks(img, landmark, color='r', step=2): """ Return: img -- numpy.array, (B, H, W, 3) img with landmark, RGB order, range (0, 255) Parameters: img -- numpy.array, (B, H, W, 3), RGB order, range (0, 255) landmark -- numpy.array, (B, 68, 2), y direction is opposite to v direction color -- str, 'r' or 'b' (red or blue) """ if color =='r': c = np.array([255., 0, 0]) else: c = np.array([0, 0, 255.]) _, H, W, _ = img.shape img, landmark = img.copy(), landmark.copy() landmark[..., 1] = H - 1 - landmark[..., 1] landmark = np.round(landmark).astype(np.int32) for i in range(landmark.shape[1]): x, y = landmark[:, i, 0], landmark[:, i, 1] for j in range(-step, step): for k in range(-step, step): u = np.clip(x + j, 0, W - 1) v = np.clip(y + k, 0, H - 1) for m in range(landmark.shape[0]): img[m, v[m], u[m]] = c return img
Return: img -- numpy.array, (B, H, W, 3) img with landmark, RGB order, range (0, 255) Parameters: img -- numpy.array, (B, H, W, 3), RGB order, range (0, 255) landmark -- numpy.array, (B, 68, 2), y direction is opposite to v direction color -- str, 'r' or 'b' (red or blue)
draw_landmarks
python
OpenTalker/video-retalking
third_part/face3d/util/util.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/util.py
Apache-2.0
def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256): """Save images to the disk. Parameters: webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details) visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs image_path (str) -- the string is used to create image paths aspect_ratio (float) -- the aspect ratio of saved images width (int) -- the images will be resized to width x width This function will save images stored in 'visuals' to the HTML file specified by 'webpage'. """ image_dir = webpage.get_image_dir() short_path = ntpath.basename(image_path[0]) name = os.path.splitext(short_path)[0] webpage.add_header(name) ims, txts, links = [], [], [] for label, im_data in visuals.items(): im = util.tensor2im(im_data) image_name = '%s/%s.png' % (label, name) os.makedirs(os.path.join(image_dir, label), exist_ok=True) save_path = os.path.join(image_dir, image_name) util.save_image(im, save_path, aspect_ratio=aspect_ratio) ims.append(image_name) txts.append(label) links.append(image_name) webpage.add_images(ims, txts, links, width=width)
Save images to the disk. Parameters: webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details) visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs image_path (str) -- the string is used to create image paths aspect_ratio (float) -- the aspect ratio of saved images width (int) -- the images will be resized to width x width This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
save_images
python
OpenTalker/video-retalking
third_part/face3d/util/visualizer.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/visualizer.py
Apache-2.0
def __init__(self, opt): """Initialize the Visualizer class Parameters: opt -- stores all the experiment flags; needs to be a subclass of BaseOptions Step 1: Cache the training/test options Step 2: create a tensorboard writer Step 3: create an HTML object for saving HTML filters Step 4: create a logging file to store training losses """ self.opt = opt # cache the option self.use_html = opt.isTrain and not opt.no_html self.writer = SummaryWriter(os.path.join(opt.checkpoints_dir, 'logs', opt.name)) self.win_size = opt.display_winsize self.name = opt.name self.saved = False if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/ self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') self.img_dir = os.path.join(self.web_dir, 'images') print('create web directory %s...' % self.web_dir) util.mkdirs([self.web_dir, self.img_dir]) # create a logging file to store training losses self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') with open(self.log_name, "a") as log_file: now = time.strftime("%c") log_file.write('================ Training Loss (%s) ================\n' % now)
Initialize the Visualizer class Parameters: opt -- stores all the experiment flags; needs to be a subclass of BaseOptions Step 1: Cache the training/test options Step 2: create a tensorboard writer Step 3: create an HTML object for saving HTML filters Step 4: create a logging file to store training losses
__init__
python
OpenTalker/video-retalking
third_part/face3d/util/visualizer.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/visualizer.py
Apache-2.0
def display_current_results(self, visuals, total_iters, epoch, save_result): """Display current results on tensorboad; save current results to an HTML file. Parameters: visuals (OrderedDict) - - dictionary of images to display or save total_iters (int) -- total iterations epoch (int) - - the current epoch save_result (bool) - - if save the current results to an HTML file """ for label, image in visuals.items(): self.writer.add_image(label, util.tensor2im(image), total_iters, dataformats='HWC') if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved. self.saved = True # save images to the disk for label, image in visuals.items(): image_numpy = util.tensor2im(image) img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label)) util.save_image(image_numpy, img_path) # update website webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=0) for n in range(epoch, 0, -1): webpage.add_header('epoch [%d]' % n) ims, txts, links = [], [], [] for label, image_numpy in visuals.items(): image_numpy = util.tensor2im(image) img_path = 'epoch%.3d_%s.png' % (n, label) ims.append(img_path) txts.append(label) links.append(img_path) webpage.add_images(ims, txts, links, width=self.win_size) webpage.save()
Display current results on tensorboad; save current results to an HTML file. Parameters: visuals (OrderedDict) - - dictionary of images to display or save total_iters (int) -- total iterations epoch (int) - - the current epoch save_result (bool) - - if save the current results to an HTML file
display_current_results
python
OpenTalker/video-retalking
third_part/face3d/util/visualizer.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/visualizer.py
Apache-2.0
def print_current_losses(self, epoch, iters, losses, t_comp, t_data): """print current losses on console; also save the losses to the disk Parameters: epoch (int) -- current epoch iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) losses (OrderedDict) -- training losses stored in the format of (name, float) pairs t_comp (float) -- computational time per data point (normalized by batch_size) t_data (float) -- data loading time per data point (normalized by batch_size) """ message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data) for k, v in losses.items(): message += '%s: %.3f ' % (k, v) print(message) # print the message with open(self.log_name, "a") as log_file: log_file.write('%s\n' % message) # save the message
print current losses on console; also save the losses to the disk Parameters: epoch (int) -- current epoch iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) losses (OrderedDict) -- training losses stored in the format of (name, float) pairs t_comp (float) -- computational time per data point (normalized by batch_size) t_data (float) -- data loading time per data point (normalized by batch_size)
print_current_losses
python
OpenTalker/video-retalking
third_part/face3d/util/visualizer.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/visualizer.py
Apache-2.0
def __init__(self, opt): """Initialize the Visualizer class Parameters: opt -- stores all the experiment flags; needs to be a subclass of BaseOptions Step 1: Cache the training/test options Step 2: create a tensorboard writer Step 3: create an HTML object for saving HTML filters Step 4: create a logging file to store training losses """ self.opt = opt # cache the option self.name = opt.name self.img_dir = os.path.join(opt.checkpoints_dir, opt.name, 'results') if opt.phase != 'test': self.writer = SummaryWriter(os.path.join(opt.checkpoints_dir, opt.name, 'logs')) # create a logging file to store training losses self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') with open(self.log_name, "a") as log_file: now = time.strftime("%c") log_file.write('================ Training Loss (%s) ================\n' % now)
Initialize the Visualizer class Parameters: opt -- stores all the experiment flags; needs to be a subclass of BaseOptions Step 1: Cache the training/test options Step 2: create a tensorboard writer Step 3: create an HTML object for saving HTML filters Step 4: create a logging file to store training losses
__init__
python
OpenTalker/video-retalking
third_part/face3d/util/visualizer.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/visualizer.py
Apache-2.0
def display_current_results(self, visuals, total_iters, epoch, dataset='train', save_results=False, count=0, name=None, add_image=True): """Display current results on tensorboad; save current results to an HTML file. Parameters: visuals (OrderedDict) - - dictionary of images to display or save total_iters (int) -- total iterations epoch (int) - - the current epoch dataset (str) - - 'train' or 'val' or 'test' """ # if (not add_image) and (not save_results): return for label, image in visuals.items(): for i in range(image.shape[0]): image_numpy = util.tensor2im(image[i]) if add_image: self.writer.add_image(label + '%s_%02d'%(dataset, i + count), image_numpy, total_iters, dataformats='HWC') if save_results: save_path = os.path.join(self.img_dir, dataset, 'epoch_%s_%06d'%(epoch, total_iters)) if not os.path.isdir(save_path): os.makedirs(save_path) if name is not None: img_path = os.path.join(save_path, '%s.png' % name) else: img_path = os.path.join(save_path, '%s_%03d.png' % (label, i + count)) util.save_image(image_numpy, img_path)
Display current results on tensorboad; save current results to an HTML file. Parameters: visuals (OrderedDict) - - dictionary of images to display or save total_iters (int) -- total iterations epoch (int) - - the current epoch dataset (str) - - 'train' or 'val' or 'test'
display_current_results
python
OpenTalker/video-retalking
third_part/face3d/util/visualizer.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/visualizer.py
Apache-2.0
def print_current_losses(self, epoch, iters, losses, t_comp, t_data, dataset='train'): """print current losses on console; also save the losses to the disk Parameters: epoch (int) -- current epoch iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) losses (OrderedDict) -- training losses stored in the format of (name, float) pairs t_comp (float) -- computational time per data point (normalized by batch_size) t_data (float) -- data loading time per data point (normalized by batch_size) """ message = '(dataset: %s, epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % ( dataset, epoch, iters, t_comp, t_data) for k, v in losses.items(): message += '%s: %.3f ' % (k, v) print(message) # print the message with open(self.log_name, "a") as log_file: log_file.write('%s\n' % message) # save the message
print current losses on console; also save the losses to the disk Parameters: epoch (int) -- current epoch iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) losses (OrderedDict) -- training losses stored in the format of (name, float) pairs t_comp (float) -- computational time per data point (normalized by batch_size) t_data (float) -- data loading time per data point (normalized by batch_size)
print_current_losses
python
OpenTalker/video-retalking
third_part/face3d/util/visualizer.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/visualizer.py
Apache-2.0
def transform(point, center, scale, resolution, invert=False): """Generate and affine transformation matrix. Given a set of points, a center, a scale and a targer resolution, the function generates and affine transformation matrix. If invert is ``True`` it will produce the inverse transformation. Arguments: point {torch.tensor} -- the input 2D point center {torch.tensor or numpy.array} -- the center around which to perform the transformations scale {float} -- the scale of the face/object resolution {float} -- the output resolution Keyword Arguments: invert {bool} -- define wherever the function should produce the direct or the inverse transformation matrix (default: {False}) """ _pt = torch.ones(3) _pt[0] = point[0] _pt[1] = point[1] h = 200.0 * scale t = torch.eye(3) t[0, 0] = resolution / h t[1, 1] = resolution / h t[0, 2] = resolution * (-center[0] / h + 0.5) t[1, 2] = resolution * (-center[1] / h + 0.5) if invert: t = torch.inverse(t) new_point = (torch.matmul(t, _pt))[0:2] return new_point.int()
Generate and affine transformation matrix. Given a set of points, a center, a scale and a targer resolution, the function generates and affine transformation matrix. If invert is ``True`` it will produce the inverse transformation. Arguments: point {torch.tensor} -- the input 2D point center {torch.tensor or numpy.array} -- the center around which to perform the transformations scale {float} -- the scale of the face/object resolution {float} -- the output resolution Keyword Arguments: invert {bool} -- define wherever the function should produce the direct or the inverse transformation matrix (default: {False})
transform
python
OpenTalker/video-retalking
third_part/face_detection/utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/utils.py
Apache-2.0
def crop(image, center, scale, resolution=256.0): """Center crops an image or set of heatmaps Arguments: image {numpy.array} -- an rgb image center {numpy.array} -- the center of the object, usually the same as of the bounding box scale {float} -- scale of the face Keyword Arguments: resolution {float} -- the size of the output cropped image (default: {256.0}) Returns: [type] -- [description] """ # Crop around the center point """ Crops the image around the center. Input is expected to be an np.ndarray """ ul = transform([1, 1], center, scale, resolution, True) br = transform([resolution, resolution], center, scale, resolution, True) # pad = math.ceil(torch.norm((ul - br).float()) / 2.0 - (br[0] - ul[0]) / 2.0) if image.ndim > 2: newDim = np.array([br[1] - ul[1], br[0] - ul[0], image.shape[2]], dtype=np.int32) newImg = np.zeros(newDim, dtype=np.uint8) else: newDim = np.array([br[1] - ul[1], br[0] - ul[0]], dtype=np.int) newImg = np.zeros(newDim, dtype=np.uint8) ht = image.shape[0] wd = image.shape[1] newX = np.array( [max(1, -ul[0] + 1), min(br[0], wd) - ul[0]], dtype=np.int32) newY = np.array( [max(1, -ul[1] + 1), min(br[1], ht) - ul[1]], dtype=np.int32) oldX = np.array([max(1, ul[0] + 1), min(br[0], wd)], dtype=np.int32) oldY = np.array([max(1, ul[1] + 1), min(br[1], ht)], dtype=np.int32) newImg[newY[0] - 1:newY[1], newX[0] - 1:newX[1] ] = image[oldY[0] - 1:oldY[1], oldX[0] - 1:oldX[1], :] newImg = cv2.resize(newImg, dsize=(int(resolution), int(resolution)), interpolation=cv2.INTER_LINEAR) return newImg
Center crops an image or set of heatmaps Arguments: image {numpy.array} -- an rgb image center {numpy.array} -- the center of the object, usually the same as of the bounding box scale {float} -- scale of the face Keyword Arguments: resolution {float} -- the size of the output cropped image (default: {256.0}) Returns: [type] -- [description]
crop
python
OpenTalker/video-retalking
third_part/face_detection/utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/utils.py
Apache-2.0
def get_preds_fromhm(hm, center=None, scale=None): """Obtain (x,y) coordinates given a set of N heatmaps. If the center and the scale is provided the function will return the points also in the original coordinate frame. Arguments: hm {torch.tensor} -- the predicted heatmaps, of shape [B, N, W, H] Keyword Arguments: center {torch.tensor} -- the center of the bounding box (default: {None}) scale {float} -- face scale (default: {None}) """ max, idx = torch.max( hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2) idx += 1 preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float() preds[..., 0].apply_(lambda x: (x - 1) % hm.size(3) + 1) preds[..., 1].add_(-1).div_(hm.size(2)).floor_().add_(1) for i in range(preds.size(0)): for j in range(preds.size(1)): hm_ = hm[i, j, :] pX, pY = int(preds[i, j, 0]) - 1, int(preds[i, j, 1]) - 1 if pX > 0 and pX < 63 and pY > 0 and pY < 63: diff = torch.FloatTensor( [hm_[pY, pX + 1] - hm_[pY, pX - 1], hm_[pY + 1, pX] - hm_[pY - 1, pX]]) preds[i, j].add_(diff.sign_().mul_(.25)) preds.add_(-.5) preds_orig = torch.zeros(preds.size()) if center is not None and scale is not None: for i in range(hm.size(0)): for j in range(hm.size(1)): preds_orig[i, j] = transform( preds[i, j], center, scale, hm.size(2), True) return preds, preds_orig
Obtain (x,y) coordinates given a set of N heatmaps. If the center and the scale is provided the function will return the points also in the original coordinate frame. Arguments: hm {torch.tensor} -- the predicted heatmaps, of shape [B, N, W, H] Keyword Arguments: center {torch.tensor} -- the center of the bounding box (default: {None}) scale {float} -- face scale (default: {None})
get_preds_fromhm
python
OpenTalker/video-retalking
third_part/face_detection/utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/utils.py
Apache-2.0
def get_preds_fromhm_batch(hm, centers=None, scales=None): """Obtain (x,y) coordinates given a set of N heatmaps. If the centers and the scales is provided the function will return the points also in the original coordinate frame. Arguments: hm {torch.tensor} -- the predicted heatmaps, of shape [B, N, W, H] Keyword Arguments: centers {torch.tensor} -- the centers of the bounding box (default: {None}) scales {float} -- face scales (default: {None}) """ max, idx = torch.max( hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2) idx += 1 preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float() preds[..., 0].apply_(lambda x: (x - 1) % hm.size(3) + 1) preds[..., 1].add_(-1).div_(hm.size(2)).floor_().add_(1) for i in range(preds.size(0)): for j in range(preds.size(1)): hm_ = hm[i, j, :] pX, pY = int(preds[i, j, 0]) - 1, int(preds[i, j, 1]) - 1 if pX > 0 and pX < 63 and pY > 0 and pY < 63: diff = torch.FloatTensor( [hm_[pY, pX + 1] - hm_[pY, pX - 1], hm_[pY + 1, pX] - hm_[pY - 1, pX]]) preds[i, j].add_(diff.sign_().mul_(.25)) preds.add_(-.5) preds_orig = torch.zeros(preds.size()) if centers is not None and scales is not None: for i in range(hm.size(0)): for j in range(hm.size(1)): preds_orig[i, j] = transform( preds[i, j], centers[i], scales[i], hm.size(2), True) return preds, preds_orig
Obtain (x,y) coordinates given a set of N heatmaps. If the centers and the scales is provided the function will return the points also in the original coordinate frame. Arguments: hm {torch.tensor} -- the predicted heatmaps, of shape [B, N, W, H] Keyword Arguments: centers {torch.tensor} -- the centers of the bounding box (default: {None}) scales {float} -- face scales (default: {None})
get_preds_fromhm_batch
python
OpenTalker/video-retalking
third_part/face_detection/utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/utils.py
Apache-2.0
def shuffle_lr(parts, pairs=None): """Shuffle the points left-right according to the axis of symmetry of the object. Arguments: parts {torch.tensor} -- a 3D or 4D object containing the heatmaps. Keyword Arguments: pairs {list of integers} -- [order of the flipped points] (default: {None}) """ if pairs is None: pairs = [16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 27, 28, 29, 30, 35, 34, 33, 32, 31, 45, 44, 43, 42, 47, 46, 39, 38, 37, 36, 41, 40, 54, 53, 52, 51, 50, 49, 48, 59, 58, 57, 56, 55, 64, 63, 62, 61, 60, 67, 66, 65] if parts.ndimension() == 3: parts = parts[pairs, ...] else: parts = parts[:, pairs, ...] return parts
Shuffle the points left-right according to the axis of symmetry of the object. Arguments: parts {torch.tensor} -- a 3D or 4D object containing the heatmaps. Keyword Arguments: pairs {list of integers} -- [order of the flipped points] (default: {None})
shuffle_lr
python
OpenTalker/video-retalking
third_part/face_detection/utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/utils.py
Apache-2.0
def flip(tensor, is_label=False): """Flip an image or a set of heatmaps left-right Arguments: tensor {numpy.array or torch.tensor} -- [the input image or heatmaps] Keyword Arguments: is_label {bool} -- [denote wherever the input is an image or a set of heatmaps ] (default: {False}) """ if not torch.is_tensor(tensor): tensor = torch.from_numpy(tensor) if is_label: tensor = shuffle_lr(tensor).flip(tensor.ndimension() - 1) else: tensor = tensor.flip(tensor.ndimension() - 1) return tensor
Flip an image or a set of heatmaps left-right Arguments: tensor {numpy.array or torch.tensor} -- [the input image or heatmaps] Keyword Arguments: is_label {bool} -- [denote wherever the input is an image or a set of heatmaps ] (default: {False})
flip
python
OpenTalker/video-retalking
third_part/face_detection/utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/utils.py
Apache-2.0
def appdata_dir(appname=None, roaming=False): """ appdata_dir(appname=None, roaming=False) Get the path to the application directory, where applications are allowed to write user specific files (e.g. configurations). For non-user specific data, consider using common_appdata_dir(). If appname is given, a subdir is appended (and created if necessary). If roaming is True, will prefer a roaming directory (Windows Vista/7). """ # Define default user directory userDir = os.getenv('FACEALIGNMENT_USERDIR', None) if userDir is None: userDir = os.path.expanduser('~') if not os.path.isdir(userDir): # pragma: no cover userDir = '/var/tmp' # issue #54 # Get system app data dir path = None if sys.platform.startswith('win'): path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA') path = (path2 or path1) if roaming else (path1 or path2) elif sys.platform.startswith('darwin'): path = os.path.join(userDir, 'Library', 'Application Support') # On Linux and as fallback if not (path and os.path.isdir(path)): path = userDir # Maybe we should store things local to the executable (in case of a # portable distro or a frozen application that wants to be portable) prefix = sys.prefix if getattr(sys, 'frozen', None): prefix = os.path.abspath(os.path.dirname(sys.executable)) for reldir in ('settings', '../settings'): localpath = os.path.abspath(os.path.join(prefix, reldir)) if os.path.isdir(localpath): # pragma: no cover try: open(os.path.join(localpath, 'test.write'), 'wb').close() os.remove(os.path.join(localpath, 'test.write')) except IOError: pass # We cannot write in this directory else: path = localpath break # Get path specific for this app if appname: if path == userDir: appname = '.' + appname.lstrip('.') # Make it a hidden directory path = os.path.join(path, appname) if not os.path.isdir(path): # pragma: no cover os.mkdir(path) # Done return path
appdata_dir(appname=None, roaming=False) Get the path to the application directory, where applications are allowed to write user specific files (e.g. configurations). For non-user specific data, consider using common_appdata_dir(). If appname is given, a subdir is appended (and created if necessary). If roaming is True, will prefer a roaming directory (Windows Vista/7).
appdata_dir
python
OpenTalker/video-retalking
third_part/face_detection/utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/utils.py
Apache-2.0
def detect_from_directory(self, path, extensions=['.jpg', '.png'], recursive=False, show_progress_bar=True): """Detects faces from all the images present in a given directory. Arguments: path {string} -- a string containing a path that points to the folder containing the images Keyword Arguments: extensions {list} -- list of string containing the extensions to be consider in the following format: ``.extension_name`` (default: {['.jpg', '.png']}) recursive {bool} -- option wherever to scan the folder recursively (default: {False}) show_progress_bar {bool} -- display a progressbar (default: {True}) Example: >>> directory = 'data' ... detected_faces = detect_from_directory(directory) {A dictionary of [lists containing bounding boxes(x1, y1, x2, y2)]} """ if self.verbose: logger = logging.getLogger(__name__) if len(extensions) == 0: if self.verbose: logger.error("Expected at list one extension, but none was received.") raise ValueError if self.verbose: logger.info("Constructing the list of images.") additional_pattern = '/**/*' if recursive else '/*' files = [] for extension in extensions: files.extend(glob.glob(path + additional_pattern + extension, recursive=recursive)) if self.verbose: logger.info("Finished searching for images. %s images found", len(files)) logger.info("Preparing to run the detection.") predictions = {} for image_path in tqdm(files, disable=not show_progress_bar): if self.verbose: logger.info("Running the face detector on image: %s", image_path) predictions[image_path] = self.detect_from_image(image_path) if self.verbose: logger.info("The detector was successfully run on all %s images", len(files)) return predictions
Detects faces from all the images present in a given directory. Arguments: path {string} -- a string containing a path that points to the folder containing the images Keyword Arguments: extensions {list} -- list of string containing the extensions to be consider in the following format: ``.extension_name`` (default: {['.jpg', '.png']}) recursive {bool} -- option wherever to scan the folder recursively (default: {False}) show_progress_bar {bool} -- display a progressbar (default: {True}) Example: >>> directory = 'data' ... detected_faces = detect_from_directory(directory) {A dictionary of [lists containing bounding boxes(x1, y1, x2, y2)]}
detect_from_directory
python
OpenTalker/video-retalking
third_part/face_detection/detection/core.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/detection/core.py
Apache-2.0
def tensor_or_path_to_ndarray(tensor_or_path, rgb=True): """Convert path (represented as a string) or torch.tensor to a numpy.ndarray Arguments: tensor_or_path {numpy.ndarray, torch.tensor or string} -- path to the image, or the image itself """ if isinstance(tensor_or_path, str): return cv2.imread(tensor_or_path) if not rgb else cv2.imread(tensor_or_path)[..., ::-1] elif torch.is_tensor(tensor_or_path): # Call cpu in case its coming from cuda return tensor_or_path.cpu().numpy()[..., ::-1].copy() if not rgb else tensor_or_path.cpu().numpy() elif isinstance(tensor_or_path, np.ndarray): return tensor_or_path[..., ::-1].copy() if not rgb else tensor_or_path else: raise TypeError
Convert path (represented as a string) or torch.tensor to a numpy.ndarray Arguments: tensor_or_path {numpy.ndarray, torch.tensor or string} -- path to the image, or the image itself
tensor_or_path_to_ndarray
python
OpenTalker/video-retalking
third_part/face_detection/detection/core.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/detection/core.py
Apache-2.0
def encode(matched, priors, variances): """Encode the variances from the priorbox layers into the ground truth boxes we have matched (based on jaccard overlap) with the prior boxes. Args: matched: (tensor) Coords of ground truth for each prior in point-form Shape: [num_priors, 4]. priors: (tensor) Prior boxes in center-offset form Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: encoded boxes (tensor), Shape: [num_priors, 4] """ # dist b/t match center and prior's center g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2] # encode variance g_cxcy /= (variances[0] * priors[:, 2:]) # match wh / prior wh g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:] g_wh = torch.log(g_wh) / variances[1] # return target for smooth_l1_loss return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
Encode the variances from the priorbox layers into the ground truth boxes we have matched (based on jaccard overlap) with the prior boxes. Args: matched: (tensor) Coords of ground truth for each prior in point-form Shape: [num_priors, 4]. priors: (tensor) Prior boxes in center-offset form Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: encoded boxes (tensor), Shape: [num_priors, 4]
encode
python
OpenTalker/video-retalking
third_part/face_detection/detection/sfd/bbox.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/detection/sfd/bbox.py
Apache-2.0
def decode(loc, priors, variances): """Decode locations from predictions using priors to undo the encoding we did for offset regression at train time. Args: loc (tensor): location predictions for loc layers, Shape: [num_priors,4] priors (tensor): Prior boxes in center-offset form. Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: decoded bounding box predictions """ boxes = torch.cat(( priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:], priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1) boxes[:, :2] -= boxes[:, 2:] / 2 boxes[:, 2:] += boxes[:, :2] return boxes
Decode locations from predictions using priors to undo the encoding we did for offset regression at train time. Args: loc (tensor): location predictions for loc layers, Shape: [num_priors,4] priors (tensor): Prior boxes in center-offset form. Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: decoded bounding box predictions
decode
python
OpenTalker/video-retalking
third_part/face_detection/detection/sfd/bbox.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/detection/sfd/bbox.py
Apache-2.0
def batch_decode(loc, priors, variances): """Decode locations from predictions using priors to undo the encoding we did for offset regression at train time. Args: loc (tensor): location predictions for loc layers, Shape: [num_priors,4] priors (tensor): Prior boxes in center-offset form. Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: decoded bounding box predictions """ boxes = torch.cat(( priors[:, :, :2] + loc[:, :, :2] * variances[0] * priors[:, :, 2:], priors[:, :, 2:] * torch.exp(loc[:, :, 2:] * variances[1])), 2) boxes[:, :, :2] -= boxes[:, :, 2:] / 2 boxes[:, :, 2:] += boxes[:, :, :2] return boxes
Decode locations from predictions using priors to undo the encoding we did for offset regression at train time. Args: loc (tensor): location predictions for loc layers, Shape: [num_priors,4] priors (tensor): Prior boxes in center-offset form. Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: decoded bounding box predictions
batch_decode
python
OpenTalker/video-retalking
third_part/face_detection/detection/sfd/bbox.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/detection/sfd/bbox.py
Apache-2.0
def get_norm_layer(norm_type='instance'): """Return a normalization layer Parameters: norm_type (str) -- the name of the normalization layer: batch | instance | none For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev). For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics. """ if norm_type == 'batch': norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True) elif norm_type == 'instance': # change default flag, make sure instance norm behave as the same in both train and eval # https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/395 norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) elif norm_type == 'none': norm_layer = None else: raise NotImplementedError('normalization layer [%s] is not found' % norm_type) return norm_layer
Return a normalization layer Parameters: norm_type (str) -- the name of the normalization layer: batch | instance | none For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev). For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
get_norm_layer
python
OpenTalker/video-retalking
third_part/ganimation_replicate/model/model_utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/ganimation_replicate/model/model_utils.py
Apache-2.0
def forward(self, styles, conditions, input_is_latent=False, noise=None, randomize_noise=True, truncation=1, truncation_latent=None, inject_index=None, return_latents=False): """Forward function for StyleGAN2GeneratorSFT. Args: styles (list[Tensor]): Sample codes of styles. conditions (list[Tensor]): SFT conditions to generators. input_is_latent (bool): Whether input is latent style. Default: False. noise (Tensor | None): Input noise or None. Default: None. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. truncation (float): The truncation ratio. Default: 1. truncation_latent (Tensor | None): The truncation latent tensor. Default: None. inject_index (int | None): The injection index for mixing noise. Default: None. return_latents (bool): Whether to return style latents. Default: False. """ # style codes -> latents with Style MLP layer if not input_is_latent: styles = [self.style_mlp(s) for s in styles] # noises if noise is None: if randomize_noise: noise = [None] * self.num_layers # for each style conv layer else: # use the stored noise noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)] # style truncation if truncation < 1: style_truncation = [] for style in styles: style_truncation.append(truncation_latent + truncation * (style - truncation_latent)) styles = style_truncation # get style latents with injection if len(styles) == 1: inject_index = self.num_latent if styles[0].ndim < 3: # repeat latent code for all the layers latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) else: # used for encoder with different latent code for each layer latent = styles[0] elif len(styles) == 2: # mixing noises if inject_index is None: inject_index = random.randint(1, self.num_latent - 1) latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) latent = torch.cat([latent1, latent2], 1) # main generation out = self.constant_input(latent.shape[0]) out = self.style_conv1(out, latent[:, 0], noise=noise[0]) skip = self.to_rgb1(out, latent[:, 1]) i = 1 for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2], noise[2::2], self.to_rgbs): out = conv1(out, latent[:, i], noise=noise1) # the conditions may have fewer levels if i < len(conditions): # SFT part to combine the conditions if self.sft_half: # only apply SFT to half of the channels out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1) out_sft = out_sft * conditions[i - 1] + conditions[i] out = torch.cat([out_same, out_sft], dim=1) else: # apply SFT to all the channels out = out * conditions[i - 1] + conditions[i] out = conv2(out, latent[:, i + 1], noise=noise2) skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space i += 2 image = skip if return_latents: return image, latent else: return image, None
Forward function for StyleGAN2GeneratorSFT. Args: styles (list[Tensor]): Sample codes of styles. conditions (list[Tensor]): SFT conditions to generators. input_is_latent (bool): Whether input is latent style. Default: False. noise (Tensor | None): Input noise or None. Default: None. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. truncation (float): The truncation ratio. Default: 1. truncation_latent (Tensor | None): The truncation latent tensor. Default: None. inject_index (int | None): The injection index for mixing noise. Default: None. return_latents (bool): Whether to return style latents. Default: False.
forward
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/archs/gfpganv1_arch.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/gfpganv1_arch.py
Apache-2.0
def forward(self, x, return_latents=False, return_rgb=True, randomize_noise=True): """Forward function for GFPGANv1. Args: x (Tensor): Input images. return_latents (bool): Whether to return style latents. Default: False. return_rgb (bool): Whether return intermediate rgb images. Default: True. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. """ conditions = [] unet_skips = [] out_rgbs = [] # encoder feat = self.conv_body_first(x) for i in range(self.log_size - 2): feat = self.conv_body_down[i](feat) unet_skips.insert(0, feat) feat = self.final_conv(feat) # style code style_code = self.final_linear(feat.view(feat.size(0), -1)) if self.different_w: style_code = style_code.view(style_code.size(0), -1, self.num_style_feat) # decode for i in range(self.log_size - 2): # add unet skip feat = feat + unet_skips[i] # ResUpLayer feat = self.conv_body_up[i](feat) # generate scale and shift for SFT layers scale = self.condition_scale[i](feat) conditions.append(scale.clone()) shift = self.condition_shift[i](feat) conditions.append(shift.clone()) # generate rgb images if return_rgb: out_rgbs.append(self.toRGB[i](feat)) # decoder image, _ = self.stylegan_decoder([style_code], conditions, return_latents=return_latents, input_is_latent=self.input_is_latent, randomize_noise=randomize_noise) return image, out_rgbs
Forward function for GFPGANv1. Args: x (Tensor): Input images. return_latents (bool): Whether to return style latents. Default: False. return_rgb (bool): Whether return intermediate rgb images. Default: True. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
forward
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/archs/gfpganv1_arch.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/gfpganv1_arch.py
Apache-2.0
def forward(self, x, return_feats=False): """Forward function for FacialComponentDiscriminator. Args: x (Tensor): Input images. return_feats (bool): Whether to return intermediate features. Default: False. """ feat = self.conv1(x) feat = self.conv3(self.conv2(feat)) rlt_feats = [] if return_feats: rlt_feats.append(feat.clone()) feat = self.conv5(self.conv4(feat)) if return_feats: rlt_feats.append(feat.clone()) out = self.final_conv(feat) if return_feats: return out, rlt_feats else: return out, None
Forward function for FacialComponentDiscriminator. Args: x (Tensor): Input images. return_feats (bool): Whether to return intermediate features. Default: False.
forward
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/archs/gfpganv1_arch.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/gfpganv1_arch.py
Apache-2.0
def forward(self, styles, conditions, input_is_latent=False, noise=None, randomize_noise=True, truncation=1, truncation_latent=None, inject_index=None, return_latents=False): """Forward function for StyleGAN2GeneratorCSFT. Args: styles (list[Tensor]): Sample codes of styles. conditions (list[Tensor]): SFT conditions to generators. input_is_latent (bool): Whether input is latent style. Default: False. noise (Tensor | None): Input noise or None. Default: None. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. truncation (float): The truncation ratio. Default: 1. truncation_latent (Tensor | None): The truncation latent tensor. Default: None. inject_index (int | None): The injection index for mixing noise. Default: None. return_latents (bool): Whether to return style latents. Default: False. """ # style codes -> latents with Style MLP layer if not input_is_latent: styles = [self.style_mlp(s) for s in styles] # noises if noise is None: if randomize_noise: noise = [None] * self.num_layers # for each style conv layer else: # use the stored noise noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)] # style truncation if truncation < 1: style_truncation = [] for style in styles: style_truncation.append(truncation_latent + truncation * (style - truncation_latent)) styles = style_truncation # get style latents with injection if len(styles) == 1: inject_index = self.num_latent if styles[0].ndim < 3: # repeat latent code for all the layers latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) else: # used for encoder with different latent code for each layer latent = styles[0] elif len(styles) == 2: # mixing noises if inject_index is None: inject_index = random.randint(1, self.num_latent - 1) latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) latent = torch.cat([latent1, latent2], 1) # main generation out = self.constant_input(latent.shape[0]) out = self.style_conv1(out, latent[:, 0], noise=noise[0]) skip = self.to_rgb1(out, latent[:, 1]) i = 1 for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2], noise[2::2], self.to_rgbs): out = conv1(out, latent[:, i], noise=noise1) # the conditions may have fewer levels if i < len(conditions): # SFT part to combine the conditions if self.sft_half: # only apply SFT to half of the channels out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1) out_sft = out_sft * conditions[i - 1] + conditions[i] out = torch.cat([out_same, out_sft], dim=1) else: # apply SFT to all the channels out = out * conditions[i - 1] + conditions[i] out = conv2(out, latent[:, i + 1], noise=noise2) skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space i += 2 image = skip if return_latents: return image, latent else: return image, None
Forward function for StyleGAN2GeneratorCSFT. Args: styles (list[Tensor]): Sample codes of styles. conditions (list[Tensor]): SFT conditions to generators. input_is_latent (bool): Whether input is latent style. Default: False. noise (Tensor | None): Input noise or None. Default: None. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. truncation (float): The truncation ratio. Default: 1. truncation_latent (Tensor | None): The truncation latent tensor. Default: None. inject_index (int | None): The injection index for mixing noise. Default: None. return_latents (bool): Whether to return style latents. Default: False.
forward
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/archs/gfpganv1_clean_arch.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/gfpganv1_clean_arch.py
Apache-2.0
def forward(self, x, return_latents=False, return_rgb=True, randomize_noise=True): """Forward function for GFPGANv1Clean. Args: x (Tensor): Input images. return_latents (bool): Whether to return style latents. Default: False. return_rgb (bool): Whether return intermediate rgb images. Default: True. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. """ conditions = [] unet_skips = [] out_rgbs = [] # encoder feat = F.leaky_relu_(self.conv_body_first(x), negative_slope=0.2) for i in range(self.log_size - 2): feat = self.conv_body_down[i](feat) unet_skips.insert(0, feat) feat = F.leaky_relu_(self.final_conv(feat), negative_slope=0.2) # style code style_code = self.final_linear(feat.view(feat.size(0), -1)) if self.different_w: style_code = style_code.view(style_code.size(0), -1, self.num_style_feat) # decode for i in range(self.log_size - 2): # add unet skip feat = feat + unet_skips[i] # ResUpLayer feat = self.conv_body_up[i](feat) # generate scale and shift for SFT layers scale = self.condition_scale[i](feat) conditions.append(scale.clone()) shift = self.condition_shift[i](feat) conditions.append(shift.clone()) # generate rgb images if return_rgb: out_rgbs.append(self.toRGB[i](feat)) # decoder image, _ = self.stylegan_decoder([style_code], conditions, return_latents=return_latents, input_is_latent=self.input_is_latent, randomize_noise=randomize_noise) return image, out_rgbs
Forward function for GFPGANv1Clean. Args: x (Tensor): Input images. return_latents (bool): Whether to return style latents. Default: False. return_rgb (bool): Whether return intermediate rgb images. Default: True. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
forward
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/archs/gfpganv1_clean_arch.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/gfpganv1_clean_arch.py
Apache-2.0
def forward(self, styles, conditions, input_is_latent=False, noise=None, randomize_noise=True, truncation=1, truncation_latent=None, inject_index=None, return_latents=False): """Forward function for StyleGAN2GeneratorBilinearSFT. Args: styles (list[Tensor]): Sample codes of styles. conditions (list[Tensor]): SFT conditions to generators. input_is_latent (bool): Whether input is latent style. Default: False. noise (Tensor | None): Input noise or None. Default: None. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. truncation (float): The truncation ratio. Default: 1. truncation_latent (Tensor | None): The truncation latent tensor. Default: None. inject_index (int | None): The injection index for mixing noise. Default: None. return_latents (bool): Whether to return style latents. Default: False. """ # style codes -> latents with Style MLP layer if not input_is_latent: styles = [self.style_mlp(s) for s in styles] # noises if noise is None: if randomize_noise: noise = [None] * self.num_layers # for each style conv layer else: # use the stored noise noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)] # style truncation if truncation < 1: style_truncation = [] for style in styles: style_truncation.append(truncation_latent + truncation * (style - truncation_latent)) styles = style_truncation # get style latents with injection if len(styles) == 1: inject_index = self.num_latent if styles[0].ndim < 3: # repeat latent code for all the layers latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) else: # used for encoder with different latent code for each layer latent = styles[0] elif len(styles) == 2: # mixing noises if inject_index is None: inject_index = random.randint(1, self.num_latent - 1) latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) latent = torch.cat([latent1, latent2], 1) # main generation out = self.constant_input(latent.shape[0]) out = self.style_conv1(out, latent[:, 0], noise=noise[0]) skip = self.to_rgb1(out, latent[:, 1]) i = 1 for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2], noise[2::2], self.to_rgbs): out = conv1(out, latent[:, i], noise=noise1) # the conditions may have fewer levels if i < len(conditions): # SFT part to combine the conditions if self.sft_half: # only apply SFT to half of the channels out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1) out_sft = out_sft * conditions[i - 1] + conditions[i] out = torch.cat([out_same, out_sft], dim=1) else: # apply SFT to all the channels out = out * conditions[i - 1] + conditions[i] out = conv2(out, latent[:, i + 1], noise=noise2) skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space i += 2 image = skip if return_latents: return image, latent else: return image, None
Forward function for StyleGAN2GeneratorBilinearSFT. Args: styles (list[Tensor]): Sample codes of styles. conditions (list[Tensor]): SFT conditions to generators. input_is_latent (bool): Whether input is latent style. Default: False. noise (Tensor | None): Input noise or None. Default: None. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. truncation (float): The truncation ratio. Default: 1. truncation_latent (Tensor | None): The truncation latent tensor. Default: None. inject_index (int | None): The injection index for mixing noise. Default: None. return_latents (bool): Whether to return style latents. Default: False.
forward
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/archs/gfpgan_bilinear_arch.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/gfpgan_bilinear_arch.py
Apache-2.0
def forward(self, x, return_latents=False, return_rgb=True, randomize_noise=True): """Forward function for GFPGANBilinear. Args: x (Tensor): Input images. return_latents (bool): Whether to return style latents. Default: False. return_rgb (bool): Whether return intermediate rgb images. Default: True. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. """ conditions = [] unet_skips = [] out_rgbs = [] # encoder feat = self.conv_body_first(x) for i in range(self.log_size - 2): feat = self.conv_body_down[i](feat) unet_skips.insert(0, feat) feat = self.final_conv(feat) # style code style_code = self.final_linear(feat.view(feat.size(0), -1)) if self.different_w: style_code = style_code.view(style_code.size(0), -1, self.num_style_feat) # decode for i in range(self.log_size - 2): # add unet skip feat = feat + unet_skips[i] # ResUpLayer feat = self.conv_body_up[i](feat) # generate scale and shift for SFT layers scale = self.condition_scale[i](feat) conditions.append(scale.clone()) shift = self.condition_shift[i](feat) conditions.append(shift.clone()) # generate rgb images if return_rgb: out_rgbs.append(self.toRGB[i](feat)) # decoder image, _ = self.stylegan_decoder([style_code], conditions, return_latents=return_latents, input_is_latent=self.input_is_latent, randomize_noise=randomize_noise) return image, out_rgbs
Forward function for GFPGANBilinear. Args: x (Tensor): Input images. return_latents (bool): Whether to return style latents. Default: False. return_rgb (bool): Whether return intermediate rgb images. Default: True. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
forward
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/archs/gfpgan_bilinear_arch.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/gfpgan_bilinear_arch.py
Apache-2.0
def forward(self, x, style): """Forward function. Args: x (Tensor): Tensor with shape (b, c, h, w). style (Tensor): Tensor with shape (b, num_style_feat). Returns: Tensor: Modulated tensor after convolution. """ b, c, h, w = x.shape # c = c_in # weight modulation style = self.modulation(style).view(b, 1, c, 1, 1) # self.weight: (1, c_out, c_in, k, k); style: (b, 1, c, 1, 1) weight = self.scale * self.weight * style # (b, c_out, c_in, k, k) if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps) weight = weight * demod.view(b, self.out_channels, 1, 1, 1) weight = weight.view(b * self.out_channels, c, self.kernel_size, self.kernel_size) if self.sample_mode == 'upsample': x = F.interpolate(x, scale_factor=2, mode=self.interpolation_mode, align_corners=self.align_corners) elif self.sample_mode == 'downsample': x = F.interpolate(x, scale_factor=0.5, mode=self.interpolation_mode, align_corners=self.align_corners) b, c, h, w = x.shape x = x.view(1, b * c, h, w) # weight: (b*c_out, c_in, k, k), groups=b out = F.conv2d(x, weight, padding=self.padding, groups=b) out = out.view(b, self.out_channels, *out.shape[2:4]) return out
Forward function. Args: x (Tensor): Tensor with shape (b, c, h, w). style (Tensor): Tensor with shape (b, num_style_feat). Returns: Tensor: Modulated tensor after convolution.
forward
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/archs/stylegan2_bilinear_arch.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/stylegan2_bilinear_arch.py
Apache-2.0
def forward(self, x, style, skip=None): """Forward function. Args: x (Tensor): Feature tensor with shape (b, c, h, w). style (Tensor): Tensor with shape (b, num_style_feat). skip (Tensor): Base/skip tensor. Default: None. Returns: Tensor: RGB images. """ out = self.modulated_conv(x, style) out = out + self.bias if skip is not None: if self.upsample: skip = F.interpolate( skip, scale_factor=2, mode=self.interpolation_mode, align_corners=self.align_corners) out = out + skip return out
Forward function. Args: x (Tensor): Feature tensor with shape (b, c, h, w). style (Tensor): Tensor with shape (b, num_style_feat). skip (Tensor): Base/skip tensor. Default: None. Returns: Tensor: RGB images.
forward
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/archs/stylegan2_bilinear_arch.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/stylegan2_bilinear_arch.py
Apache-2.0
def forward(self, styles, input_is_latent=False, noise=None, randomize_noise=True, truncation=1, truncation_latent=None, inject_index=None, return_latents=False): """Forward function for StyleGAN2Generator. Args: styles (list[Tensor]): Sample codes of styles. input_is_latent (bool): Whether input is latent style. Default: False. noise (Tensor | None): Input noise or None. Default: None. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. truncation (float): TODO. Default: 1. truncation_latent (Tensor | None): TODO. Default: None. inject_index (int | None): The injection index for mixing noise. Default: None. return_latents (bool): Whether to return style latents. Default: False. """ # style codes -> latents with Style MLP layer if not input_is_latent: styles = [self.style_mlp(s) for s in styles] # noises if noise is None: if randomize_noise: noise = [None] * self.num_layers # for each style conv layer else: # use the stored noise noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)] # style truncation if truncation < 1: style_truncation = [] for style in styles: style_truncation.append(truncation_latent + truncation * (style - truncation_latent)) styles = style_truncation # get style latent with injection if len(styles) == 1: inject_index = self.num_latent if styles[0].ndim < 3: # repeat latent code for all the layers latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) else: # used for encoder with different latent code for each layer latent = styles[0] elif len(styles) == 2: # mixing noises if inject_index is None: inject_index = random.randint(1, self.num_latent - 1) latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) latent = torch.cat([latent1, latent2], 1) # main generation out = self.constant_input(latent.shape[0]) out = self.style_conv1(out, latent[:, 0], noise=noise[0]) skip = self.to_rgb1(out, latent[:, 1]) i = 1 for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2], noise[2::2], self.to_rgbs): out = conv1(out, latent[:, i], noise=noise1) out = conv2(out, latent[:, i + 1], noise=noise2) skip = to_rgb(out, latent[:, i + 2], skip) i += 2 image = skip if return_latents: return image, latent else: return image, None
Forward function for StyleGAN2Generator. Args: styles (list[Tensor]): Sample codes of styles. input_is_latent (bool): Whether input is latent style. Default: False. noise (Tensor | None): Input noise or None. Default: None. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. truncation (float): TODO. Default: 1. truncation_latent (Tensor | None): TODO. Default: None. inject_index (int | None): The injection index for mixing noise. Default: None. return_latents (bool): Whether to return style latents. Default: False.
forward
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/archs/stylegan2_bilinear_arch.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/stylegan2_bilinear_arch.py
Apache-2.0
def forward(self, x, style): """Forward function. Args: x (Tensor): Tensor with shape (b, c, h, w). style (Tensor): Tensor with shape (b, num_style_feat). Returns: Tensor: Modulated tensor after convolution. """ b, c, h, w = x.shape # c = c_in # weight modulation style = self.modulation(style).view(b, 1, c, 1, 1) # self.weight: (1, c_out, c_in, k, k); style: (b, 1, c, 1, 1) weight = self.weight * style # (b, c_out, c_in, k, k) if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps) weight = weight * demod.view(b, self.out_channels, 1, 1, 1) weight = weight.view(b * self.out_channels, c, self.kernel_size, self.kernel_size) # upsample or downsample if necessary if self.sample_mode == 'upsample': x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False) elif self.sample_mode == 'downsample': x = F.interpolate(x, scale_factor=0.5, mode='bilinear', align_corners=False) b, c, h, w = x.shape x = x.view(1, b * c, h, w) # weight: (b*c_out, c_in, k, k), groups=b out = F.conv2d(x, weight, padding=self.padding, groups=b) out = out.view(b, self.out_channels, *out.shape[2:4]) return out
Forward function. Args: x (Tensor): Tensor with shape (b, c, h, w). style (Tensor): Tensor with shape (b, num_style_feat). Returns: Tensor: Modulated tensor after convolution.
forward
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/archs/stylegan2_clean_arch.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/stylegan2_clean_arch.py
Apache-2.0
def forward(self, x, style, skip=None): """Forward function. Args: x (Tensor): Feature tensor with shape (b, c, h, w). style (Tensor): Tensor with shape (b, num_style_feat). skip (Tensor): Base/skip tensor. Default: None. Returns: Tensor: RGB images. """ out = self.modulated_conv(x, style) out = out + self.bias if skip is not None: if self.upsample: skip = F.interpolate(skip, scale_factor=2, mode='bilinear', align_corners=False) out = out + skip return out
Forward function. Args: x (Tensor): Feature tensor with shape (b, c, h, w). style (Tensor): Tensor with shape (b, num_style_feat). skip (Tensor): Base/skip tensor. Default: None. Returns: Tensor: RGB images.
forward
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/archs/stylegan2_clean_arch.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/stylegan2_clean_arch.py
Apache-2.0
def forward(self, styles, input_is_latent=False, noise=None, randomize_noise=True, truncation=1, truncation_latent=None, inject_index=None, return_latents=False): """Forward function for StyleGAN2GeneratorClean. Args: styles (list[Tensor]): Sample codes of styles. input_is_latent (bool): Whether input is latent style. Default: False. noise (Tensor | None): Input noise or None. Default: None. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. truncation (float): The truncation ratio. Default: 1. truncation_latent (Tensor | None): The truncation latent tensor. Default: None. inject_index (int | None): The injection index for mixing noise. Default: None. return_latents (bool): Whether to return style latents. Default: False. """ # style codes -> latents with Style MLP layer if not input_is_latent: styles = [self.style_mlp(s) for s in styles] # noises if noise is None: if randomize_noise: noise = [None] * self.num_layers # for each style conv layer else: # use the stored noise noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)] # style truncation if truncation < 1: style_truncation = [] for style in styles: style_truncation.append(truncation_latent + truncation * (style - truncation_latent)) styles = style_truncation # get style latents with injection if len(styles) == 1: inject_index = self.num_latent if styles[0].ndim < 3: # repeat latent code for all the layers latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) else: # used for encoder with different latent code for each layer latent = styles[0] elif len(styles) == 2: # mixing noises if inject_index is None: inject_index = random.randint(1, self.num_latent - 1) latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) latent = torch.cat([latent1, latent2], 1) # main generation out = self.constant_input(latent.shape[0]) out = self.style_conv1(out, latent[:, 0], noise=noise[0]) skip = self.to_rgb1(out, latent[:, 1]) i = 1 for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2], noise[2::2], self.to_rgbs): out = conv1(out, latent[:, i], noise=noise1) out = conv2(out, latent[:, i + 1], noise=noise2) skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space i += 2 image = skip if return_latents: return image, latent else: return image, None
Forward function for StyleGAN2GeneratorClean. Args: styles (list[Tensor]): Sample codes of styles. input_is_latent (bool): Whether input is latent style. Default: False. noise (Tensor | None): Input noise or None. Default: None. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. truncation (float): The truncation ratio. Default: 1. truncation_latent (Tensor | None): The truncation latent tensor. Default: None. inject_index (int | None): The injection index for mixing noise. Default: None. return_latents (bool): Whether to return style latents. Default: False.
forward
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/archs/stylegan2_clean_arch.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/stylegan2_clean_arch.py
Apache-2.0
def color_jitter(img, shift): """jitter color: randomly jitter the RGB values, in numpy formats""" jitter_val = np.random.uniform(-shift, shift, 3).astype(np.float32) img = img + jitter_val img = np.clip(img, 0, 1) return img
jitter color: randomly jitter the RGB values, in numpy formats
color_jitter
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/data/ffhq_degradation_dataset.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/data/ffhq_degradation_dataset.py
Apache-2.0
def color_jitter_pt(img, brightness, contrast, saturation, hue): """jitter color: randomly jitter the brightness, contrast, saturation, and hue, in torch Tensor formats""" fn_idx = torch.randperm(4) for fn_id in fn_idx: if fn_id == 0 and brightness is not None: brightness_factor = torch.tensor(1.0).uniform_(brightness[0], brightness[1]).item() img = adjust_brightness(img, brightness_factor) if fn_id == 1 and contrast is not None: contrast_factor = torch.tensor(1.0).uniform_(contrast[0], contrast[1]).item() img = adjust_contrast(img, contrast_factor) if fn_id == 2 and saturation is not None: saturation_factor = torch.tensor(1.0).uniform_(saturation[0], saturation[1]).item() img = adjust_saturation(img, saturation_factor) if fn_id == 3 and hue is not None: hue_factor = torch.tensor(1.0).uniform_(hue[0], hue[1]).item() img = adjust_hue(img, hue_factor) return img
jitter color: randomly jitter the brightness, contrast, saturation, and hue, in torch Tensor formats
color_jitter_pt
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/data/ffhq_degradation_dataset.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/data/ffhq_degradation_dataset.py
Apache-2.0
def get_component_coordinates(self, index, status): """Get facial component (left_eye, right_eye, mouth) coordinates from a pre-loaded pth file""" components_bbox = self.components_list[f'{index:08d}'] if status[0]: # hflip # exchange right and left eye tmp = components_bbox['left_eye'] components_bbox['left_eye'] = components_bbox['right_eye'] components_bbox['right_eye'] = tmp # modify the width coordinate components_bbox['left_eye'][0] = self.out_size - components_bbox['left_eye'][0] components_bbox['right_eye'][0] = self.out_size - components_bbox['right_eye'][0] components_bbox['mouth'][0] = self.out_size - components_bbox['mouth'][0] # get coordinates locations = [] for part in ['left_eye', 'right_eye', 'mouth']: mean = components_bbox[part][0:2] half_len = components_bbox[part][2] if 'eye' in part: half_len *= self.eye_enlarge_ratio loc = np.hstack((mean - half_len + 1, mean + half_len)) loc = torch.from_numpy(loc).float() locations.append(loc) return locations
Get facial component (left_eye, right_eye, mouth) coordinates from a pre-loaded pth file
get_component_coordinates
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/data/ffhq_degradation_dataset.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/data/ffhq_degradation_dataset.py
Apache-2.0
def construct_img_pyramid(self): """Construct image pyramid for intermediate restoration loss""" pyramid_gt = [self.gt] down_img = self.gt for _ in range(0, self.log_size - 3): down_img = F.interpolate(down_img, scale_factor=0.5, mode='bilinear', align_corners=False) pyramid_gt.insert(0, down_img) return pyramid_gt
Construct image pyramid for intermediate restoration loss
construct_img_pyramid
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/models/gfpgan_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/models/gfpgan_model.py
Apache-2.0
def _gram_mat(self, x): """Calculate Gram matrix. Args: x (torch.Tensor): Tensor with shape of (n, c, h, w). Returns: torch.Tensor: Gram matrix. """ n, c, h, w = x.size() features = x.view(n, c, w * h) features_t = features.transpose(1, 2) gram = features.bmm(features_t) / (c * h * w) return gram
Calculate Gram matrix. Args: x (torch.Tensor): Tensor with shape of (n, c, h, w). Returns: torch.Tensor: Gram matrix.
_gram_mat
python
OpenTalker/video-retalking
third_part/GFPGAN/gfpgan/models/gfpgan_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/models/gfpgan_model.py
Apache-2.0
def _umeyama(src, dst, estimate_scale=True, scale=1.0): """Estimate N-D similarity transformation with or without scaling. Parameters ---------- src : (M, N) array Source coordinates. dst : (M, N) array Destination coordinates. estimate_scale : bool Whether to estimate scaling factor. Returns ------- T : (N + 1, N + 1) The homogeneous similarity transformation matrix. The matrix contains NaN values only if the problem is not well-conditioned. References ---------- .. [1] "Least-squares estimation of transformation parameters between two point patterns", Shinji Umeyama, PAMI 1991, :DOI:`10.1109/34.88573` """ num = src.shape[0] dim = src.shape[1] # Compute mean of src and dst. src_mean = src.mean(axis=0) dst_mean = dst.mean(axis=0) # Subtract mean from src and dst. src_demean = src - src_mean dst_demean = dst - dst_mean # Eq. (38). A = dst_demean.T @ src_demean / num # Eq. (39). d = np.ones((dim,), dtype=np.double) if np.linalg.det(A) < 0: d[dim - 1] = -1 T = np.eye(dim + 1, dtype=np.double) U, S, V = np.linalg.svd(A) # Eq. (40) and (43). rank = np.linalg.matrix_rank(A) if rank == 0: return np.nan * T elif rank == dim - 1: if np.linalg.det(U) * np.linalg.det(V) > 0: T[:dim, :dim] = U @ V else: s = d[dim - 1] d[dim - 1] = -1 T[:dim, :dim] = U @ np.diag(d) @ V d[dim - 1] = s else: T[:dim, :dim] = U @ np.diag(d) @ V if estimate_scale: # Eq. (41) and (42). scale = 1.0 / src_demean.var(axis=0).sum() * (S @ d) else: scale = scale T[:dim, dim] = dst_mean - scale * (T[:dim, :dim] @ src_mean.T) T[:dim, :dim] *= scale return T, scale
Estimate N-D similarity transformation with or without scaling. Parameters ---------- src : (M, N) array Source coordinates. dst : (M, N) array Destination coordinates. estimate_scale : bool Whether to estimate scaling factor. Returns ------- T : (N + 1, N + 1) The homogeneous similarity transformation matrix. The matrix contains NaN values only if the problem is not well-conditioned. References ---------- .. [1] "Least-squares estimation of transformation parameters between two point patterns", Shinji Umeyama, PAMI 1991, :DOI:`10.1109/34.88573`
_umeyama
python
OpenTalker/video-retalking
third_part/GPEN/align_faces.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/align_faces.py
Apache-2.0
def remove_prefix(self, state_dict, prefix): ''' Old style model is stored with all names of parameters sharing common prefix 'module.' ''' f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x return {f(key): value for key, value in state_dict.items()}
Old style model is stored with all names of parameters sharing common prefix 'module.'
remove_prefix
python
OpenTalker/video-retalking
third_part/GPEN/face_detect/retinaface_detection.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/retinaface_detection.py
Apache-2.0
def detection_collate(batch): """Custom collate fn for dealing with batches of images that have a different number of associated object annotations (bounding boxes). Arguments: batch: (tuple) A tuple of tensor images and lists of annotations Return: A tuple containing: 1) (tensor) batch of images stacked on their 0 dim 2) (list of tensors) annotations for a given image are stacked on 0 dim """ targets = [] imgs = [] for _, sample in enumerate(batch): for _, tup in enumerate(sample): if torch.is_tensor(tup): imgs.append(tup) elif isinstance(tup, type(np.empty(0))): annos = torch.from_numpy(tup).float() targets.append(annos) return (torch.stack(imgs, 0), targets)
Custom collate fn for dealing with batches of images that have a different number of associated object annotations (bounding boxes). Arguments: batch: (tuple) A tuple of tensor images and lists of annotations Return: A tuple containing: 1) (tensor) batch of images stacked on their 0 dim 2) (list of tensors) annotations for a given image are stacked on 0 dim
detection_collate
python
OpenTalker/video-retalking
third_part/GPEN/face_detect/data/wider_face.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/data/wider_face.py
Apache-2.0
def __init__(self, cfg = None, phase = 'train'): """ :param cfg: Network related settings. :param phase: train or test. """ super(RetinaFace,self).__init__() self.phase = phase backbone = None if cfg['name'] == 'mobilenet0.25': backbone = MobileNetV1() if cfg['pretrain']: checkpoint = torch.load("./weights/mobilenetV1X0.25_pretrain.tar", map_location=torch.device('cpu')) from collections import OrderedDict new_state_dict = OrderedDict() for k, v in checkpoint['state_dict'].items(): name = k[7:] # remove module. new_state_dict[name] = v # load params backbone.load_state_dict(new_state_dict) elif cfg['name'] == 'Resnet50': import torchvision.models as models backbone = models.resnet50(pretrained=cfg['pretrain']) self.body = _utils.IntermediateLayerGetter(backbone, cfg['return_layers']) in_channels_stage2 = cfg['in_channel'] in_channels_list = [ in_channels_stage2 * 2, in_channels_stage2 * 4, in_channels_stage2 * 8, ] out_channels = cfg['out_channel'] self.fpn = FPN(in_channels_list,out_channels) self.ssh1 = SSH(out_channels, out_channels) self.ssh2 = SSH(out_channels, out_channels) self.ssh3 = SSH(out_channels, out_channels) self.ClassHead = self._make_class_head(fpn_num=3, inchannels=cfg['out_channel']) self.BboxHead = self._make_bbox_head(fpn_num=3, inchannels=cfg['out_channel']) self.LandmarkHead = self._make_landmark_head(fpn_num=3, inchannels=cfg['out_channel'])
:param cfg: Network related settings. :param phase: train or test.
__init__
python
OpenTalker/video-retalking
third_part/GPEN/face_detect/facemodels/retinaface.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/facemodels/retinaface.py
Apache-2.0
def point_form(boxes): """ Convert prior_boxes to (xmin, ymin, xmax, ymax) representation for comparison to point form ground truth data. Args: boxes: (tensor) center-size default boxes from priorbox layers. Return: boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes. """ return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax
Convert prior_boxes to (xmin, ymin, xmax, ymax) representation for comparison to point form ground truth data. Args: boxes: (tensor) center-size default boxes from priorbox layers. Return: boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
point_form
python
OpenTalker/video-retalking
third_part/GPEN/face_detect/utils/box_utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
Apache-2.0
def center_size(boxes): """ Convert prior_boxes to (cx, cy, w, h) representation for comparison to center-size form ground truth data. Args: boxes: (tensor) point_form boxes Return: boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes. """ return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy boxes[:, 2:] - boxes[:, :2], 1) # w, h
Convert prior_boxes to (cx, cy, w, h) representation for comparison to center-size form ground truth data. Args: boxes: (tensor) point_form boxes Return: boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
center_size
python
OpenTalker/video-retalking
third_part/GPEN/face_detect/utils/box_utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
Apache-2.0
def intersect(box_a, box_b): """ We resize both tensors to [A,B,2] without new malloc: [A,2] -> [A,1,2] -> [A,B,2] [B,2] -> [1,B,2] -> [A,B,2] Then we compute the area of intersect between box_a and box_b. Args: box_a: (tensor) bounding boxes, Shape: [A,4]. box_b: (tensor) bounding boxes, Shape: [B,4]. Return: (tensor) intersection area, Shape: [A,B]. """ A = box_a.size(0) B = box_b.size(0) max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2), box_b[:, 2:].unsqueeze(0).expand(A, B, 2)) min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2), box_b[:, :2].unsqueeze(0).expand(A, B, 2)) inter = torch.clamp((max_xy - min_xy), min=0) return inter[:, :, 0] * inter[:, :, 1]
We resize both tensors to [A,B,2] without new malloc: [A,2] -> [A,1,2] -> [A,B,2] [B,2] -> [1,B,2] -> [A,B,2] Then we compute the area of intersect between box_a and box_b. Args: box_a: (tensor) bounding boxes, Shape: [A,4]. box_b: (tensor) bounding boxes, Shape: [B,4]. Return: (tensor) intersection area, Shape: [A,B].
intersect
python
OpenTalker/video-retalking
third_part/GPEN/face_detect/utils/box_utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
Apache-2.0
def matrix_iou(a, b): """ return iou of a and b, numpy version for data augenmentation """ lt = np.maximum(a[:, np.newaxis, :2], b[:, :2]) rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:]) area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2) area_a = np.prod(a[:, 2:] - a[:, :2], axis=1) area_b = np.prod(b[:, 2:] - b[:, :2], axis=1) return area_i / (area_a[:, np.newaxis] + area_b - area_i)
return iou of a and b, numpy version for data augenmentation
matrix_iou
python
OpenTalker/video-retalking
third_part/GPEN/face_detect/utils/box_utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
Apache-2.0
def matrix_iof(a, b): """ return iof of a and b, numpy version for data augenmentation """ lt = np.maximum(a[:, np.newaxis, :2], b[:, :2]) rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:]) area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2) area_a = np.prod(a[:, 2:] - a[:, :2], axis=1) return area_i / np.maximum(area_a[:, np.newaxis], 1)
return iof of a and b, numpy version for data augenmentation
matrix_iof
python
OpenTalker/video-retalking
third_part/GPEN/face_detect/utils/box_utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
Apache-2.0
def encode(matched, priors, variances): """Encode the variances from the priorbox layers into the ground truth boxes we have matched (based on jaccard overlap) with the prior boxes. Args: matched: (tensor) Coords of ground truth for each prior in point-form Shape: [num_priors, 4]. priors: (tensor) Prior boxes in center-offset form Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: encoded boxes (tensor), Shape: [num_priors, 4] """ # dist b/t match center and prior's center g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2] # encode variance g_cxcy /= (variances[0] * priors[:, 2:]) # match wh / prior wh g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:] g_wh = torch.log(g_wh) / variances[1] # return target for smooth_l1_loss return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
Encode the variances from the priorbox layers into the ground truth boxes we have matched (based on jaccard overlap) with the prior boxes. Args: matched: (tensor) Coords of ground truth for each prior in point-form Shape: [num_priors, 4]. priors: (tensor) Prior boxes in center-offset form Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: encoded boxes (tensor), Shape: [num_priors, 4]
encode
python
OpenTalker/video-retalking
third_part/GPEN/face_detect/utils/box_utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
Apache-2.0
def encode_landm(matched, priors, variances): """Encode the variances from the priorbox layers into the ground truth boxes we have matched (based on jaccard overlap) with the prior boxes. Args: matched: (tensor) Coords of ground truth for each prior in point-form Shape: [num_priors, 10]. priors: (tensor) Prior boxes in center-offset form Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: encoded landm (tensor), Shape: [num_priors, 10] """ # dist b/t match center and prior's center matched = torch.reshape(matched, (matched.size(0), 5, 2)) priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2) g_cxcy = matched[:, :, :2] - priors[:, :, :2] # encode variance g_cxcy /= (variances[0] * priors[:, :, 2:]) # g_cxcy /= priors[:, :, 2:] g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1) # return target for smooth_l1_loss return g_cxcy
Encode the variances from the priorbox layers into the ground truth boxes we have matched (based on jaccard overlap) with the prior boxes. Args: matched: (tensor) Coords of ground truth for each prior in point-form Shape: [num_priors, 10]. priors: (tensor) Prior boxes in center-offset form Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: encoded landm (tensor), Shape: [num_priors, 10]
encode_landm
python
OpenTalker/video-retalking
third_part/GPEN/face_detect/utils/box_utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
Apache-2.0
def decode(loc, priors, variances): """Decode locations from predictions using priors to undo the encoding we did for offset regression at train time. Args: loc (tensor): location predictions for loc layers, Shape: [num_priors,4] priors (tensor): Prior boxes in center-offset form. Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: decoded bounding box predictions """ boxes = torch.cat(( priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:], priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1) boxes[:, :2] -= boxes[:, 2:] / 2 boxes[:, 2:] += boxes[:, :2] return boxes
Decode locations from predictions using priors to undo the encoding we did for offset regression at train time. Args: loc (tensor): location predictions for loc layers, Shape: [num_priors,4] priors (tensor): Prior boxes in center-offset form. Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: decoded bounding box predictions
decode
python
OpenTalker/video-retalking
third_part/GPEN/face_detect/utils/box_utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
Apache-2.0
def decode_landm(pre, priors, variances): """Decode landm from predictions using priors to undo the encoding we did for offset regression at train time. Args: pre (tensor): landm predictions for loc layers, Shape: [num_priors,10] priors (tensor): Prior boxes in center-offset form. Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: decoded landm predictions """ landms = torch.cat((priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:], priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:], priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:], priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:], priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:], ), dim=1) return landms
Decode landm from predictions using priors to undo the encoding we did for offset regression at train time. Args: pre (tensor): landm predictions for loc layers, Shape: [num_priors,10] priors (tensor): Prior boxes in center-offset form. Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: decoded landm predictions
decode_landm
python
OpenTalker/video-retalking
third_part/GPEN/face_detect/utils/box_utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
Apache-2.0
def log_sum_exp(x): """Utility function for computing log_sum_exp while determining This will be used to determine unaveraged confidence loss across all examples in a batch. Args: x (Variable(tensor)): conf_preds from conf layers """ x_max = x.data.max() return torch.log(torch.sum(torch.exp(x-x_max), 1, keepdim=True)) + x_max
Utility function for computing log_sum_exp while determining This will be used to determine unaveraged confidence loss across all examples in a batch. Args: x (Variable(tensor)): conf_preds from conf layers
log_sum_exp
python
OpenTalker/video-retalking
third_part/GPEN/face_detect/utils/box_utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
Apache-2.0
def nms(boxes, scores, overlap=0.5, top_k=200): """Apply non-maximum suppression at test time to avoid detecting too many overlapping bounding boxes for a given object. Args: boxes: (tensor) The location preds for the img, Shape: [num_priors,4]. scores: (tensor) The class predscores for the img, Shape:[num_priors]. overlap: (float) The overlap thresh for suppressing unnecessary boxes. top_k: (int) The Maximum number of box preds to consider. Return: The indices of the kept boxes with respect to num_priors. """ keep = torch.Tensor(scores.size(0)).fill_(0).long() if boxes.numel() == 0: return keep x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] area = torch.mul(x2 - x1, y2 - y1) v, idx = scores.sort(0) # sort in ascending order # I = I[v >= 0.01] idx = idx[-top_k:] # indices of the top-k largest vals xx1 = boxes.new() yy1 = boxes.new() xx2 = boxes.new() yy2 = boxes.new() w = boxes.new() h = boxes.new() # keep = torch.Tensor() count = 0 while idx.numel() > 0: i = idx[-1] # index of current largest val # keep.append(i) keep[count] = i count += 1 if idx.size(0) == 1: break idx = idx[:-1] # remove kept element from view # load bboxes of next highest vals torch.index_select(x1, 0, idx, out=xx1) torch.index_select(y1, 0, idx, out=yy1) torch.index_select(x2, 0, idx, out=xx2) torch.index_select(y2, 0, idx, out=yy2) # store element-wise max with next highest score xx1 = torch.clamp(xx1, min=x1[i]) yy1 = torch.clamp(yy1, min=y1[i]) xx2 = torch.clamp(xx2, max=x2[i]) yy2 = torch.clamp(yy2, max=y2[i]) w.resize_as_(xx2) h.resize_as_(yy2) w = xx2 - xx1 h = yy2 - yy1 # check sizes of xx1 and xx2.. after each iteration w = torch.clamp(w, min=0.0) h = torch.clamp(h, min=0.0) inter = w*h # IoU = i / (area(a) + area(b) - i) rem_areas = torch.index_select(area, 0, idx) # load remaining areas) union = (rem_areas - inter) + area[i] IoU = inter/union # store result in iou # keep only elements with an IoU <= overlap idx = idx[IoU.le(overlap)] return keep, count
Apply non-maximum suppression at test time to avoid detecting too many overlapping bounding boxes for a given object. Args: boxes: (tensor) The location preds for the img, Shape: [num_priors,4]. scores: (tensor) The class predscores for the img, Shape:[num_priors]. overlap: (float) The overlap thresh for suppressing unnecessary boxes. top_k: (int) The Maximum number of box preds to consider. Return: The indices of the kept boxes with respect to num_priors.
nms
python
OpenTalker/video-retalking
third_part/GPEN/face_detect/utils/box_utils.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
Apache-2.0
def positive_cap(num): """ Cap a number to ensure positivity :param num: positive or negative number :returns: (overflow, capped_number) """ if num < 0: return 0, abs(num) else: return num, 0
Cap a number to ensure positivity :param num: positive or negative number :returns: (overflow, capped_number)
positive_cap
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/aligner.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/aligner.py
Apache-2.0
def roi_coordinates(rect, size, scale): """ Align the rectangle into the center and return the top-left coordinates within the new size. If rect is smaller, we add borders. :param rect: (x, y, w, h) bounding rectangle of the face :param size: (width, height) are the desired dimensions :param scale: scaling factor of the rectangle to be resized :returns: 4 numbers. Top-left coordinates of the aligned ROI. (x, y, border_x, border_y). All values are > 0. """ rectx, recty, rectw, recth = rect new_height, new_width = size mid_x = int((rectx + rectw/2) * scale) mid_y = int((recty + recth/2) * scale) roi_x = mid_x - int(new_width/2) roi_y = mid_y - int(new_height/2) roi_x, border_x = positive_cap(roi_x) roi_y, border_y = positive_cap(roi_y) return roi_x, roi_y, border_x, border_y
Align the rectangle into the center and return the top-left coordinates within the new size. If rect is smaller, we add borders. :param rect: (x, y, w, h) bounding rectangle of the face :param size: (width, height) are the desired dimensions :param scale: scaling factor of the rectangle to be resized :returns: 4 numbers. Top-left coordinates of the aligned ROI. (x, y, border_x, border_y). All values are > 0.
roi_coordinates
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/aligner.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/aligner.py
Apache-2.0
def scaling_factor(rect, size): """ Calculate the scaling factor for the current image to be resized to the new dimensions :param rect: (x, y, w, h) bounding rectangle of the face :param size: (width, height) are the desired dimensions :returns: floating point scaling factor """ new_height, new_width = size rect_h, rect_w = rect[2:] height_ratio = rect_h / new_height width_ratio = rect_w / new_width scale = 1 if height_ratio > width_ratio: new_recth = 0.8 * new_height scale = new_recth / rect_h else: new_rectw = 0.8 * new_width scale = new_rectw / rect_w return scale
Calculate the scaling factor for the current image to be resized to the new dimensions :param rect: (x, y, w, h) bounding rectangle of the face :param size: (width, height) are the desired dimensions :returns: floating point scaling factor
scaling_factor
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/aligner.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/aligner.py
Apache-2.0
def resize_image(img, scale): """ Resize image with the provided scaling factor :param img: image to be resized :param scale: scaling factor for resizing the image """ cur_height, cur_width = img.shape[:2] new_scaled_height = int(scale * cur_height) new_scaled_width = int(scale * cur_width) return cv2.resize(img, (new_scaled_width, new_scaled_height))
Resize image with the provided scaling factor :param img: image to be resized :param scale: scaling factor for resizing the image
resize_image
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/aligner.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/aligner.py
Apache-2.0
def resize_align(img, points, size): """ Resize image and associated points, align face to the center and crop to the desired size :param img: image to be resized :param points: *m* x 2 array of points :param size: (height, width) tuple of new desired size """ new_height, new_width = size # Resize image based on bounding rectangle rect = cv2.boundingRect(np.array([points], np.int32)) scale = scaling_factor(rect, size) img = resize_image(img, scale) # Align bounding rect to center cur_height, cur_width = img.shape[:2] roi_x, roi_y, border_x, border_y = roi_coordinates(rect, size, scale) roi_h = np.min([new_height-border_y, cur_height-roi_y]) roi_w = np.min([new_width-border_x, cur_width-roi_x]) # Crop to supplied size crop = np.zeros((new_height, new_width, 3), img.dtype) crop[border_y:border_y+roi_h, border_x:border_x+roi_w] = ( img[roi_y:roi_y+roi_h, roi_x:roi_x+roi_w]) # Scale and align face points to the crop points[:, 0] = (points[:, 0] * scale) + (border_x - roi_x) points[:, 1] = (points[:, 1] * scale) + (border_y - roi_y) return (crop, points)
Resize image and associated points, align face to the center and crop to the desired size :param img: image to be resized :param points: *m* x 2 array of points :param size: (height, width) tuple of new desired size
resize_align
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/aligner.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/aligner.py
Apache-2.0
def mask_from_points(size, points): """ Create a mask of supplied size from supplied points :param size: tuple of output mask size :param points: array of [x, y] points :returns: mask of values 0 and 255 where 255 indicates the convex hull containing the points """ radius = 10 # kernel size kernel = np.ones((radius, radius), np.uint8) mask = np.zeros(size, np.uint8) cv2.fillConvexPoly(mask, cv2.convexHull(points), 255) mask = cv2.erode(mask, kernel) return mask
Create a mask of supplied size from supplied points :param size: tuple of output mask size :param points: array of [x, y] points :returns: mask of values 0 and 255 where 255 indicates the convex hull containing the points
mask_from_points
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/blender.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/blender.py
Apache-2.0
def overlay_image(foreground_image, mask, background_image): """ Overlay foreground image onto the background given a mask :param foreground_image: foreground image points :param mask: [0-255] values in mask :param background_image: background image points :returns: image with foreground where mask > 0 overlaid on background image """ foreground_pixels = mask > 0 background_image[..., :3][foreground_pixels] = foreground_image[..., :3][foreground_pixels] return background_image
Overlay foreground image onto the background given a mask :param foreground_image: foreground image points :param mask: [0-255] values in mask :param background_image: background image points :returns: image with foreground where mask > 0 overlaid on background image
overlay_image
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/blender.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/blender.py
Apache-2.0
def apply_mask(img, mask): """ Apply mask to supplied image :param img: max 3 channel image :param mask: [0-255] values in mask :returns: new image with mask applied """ masked_img = np.copy(img) num_channels = 3 for c in range(num_channels): masked_img[..., c] = img[..., c] * (mask / 255) return masked_img
Apply mask to supplied image :param img: max 3 channel image :param mask: [0-255] values in mask :returns: new image with mask applied
apply_mask
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/blender.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/blender.py
Apache-2.0
def boundary_points(points, width_percent=0.1, height_percent=0.1): """ Produce additional boundary points :param points: *m* x 2 array of x,y points :param width_percent: [-1, 1] percentage of width to taper inwards. Negative for opposite direction :param height_percent: [-1, 1] percentage of height to taper downwards. Negative for opposite direction :returns: 2 additional points at the top corners """ x, y, w, h = cv2.boundingRect(np.array([points], np.int32)) spacerw = int(w * width_percent) spacerh = int(h * height_percent) return [[x+spacerw, y+spacerh], [x+w-spacerw, y+spacerh]]
Produce additional boundary points :param points: *m* x 2 array of x,y points :param width_percent: [-1, 1] percentage of width to taper inwards. Negative for opposite direction :param height_percent: [-1, 1] percentage of height to taper downwards. Negative for opposite direction :returns: 2 additional points at the top corners
boundary_points
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/locator.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/locator.py
Apache-2.0
def face_points_dlib(img, add_boundary_points=True): """ Locates 68 face points using dlib (http://dlib.net) Requires shape_predictor_68_face_landmarks.dat to be in face_morpher/data Download at: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 :param img: an image array :param add_boundary_points: bool to add additional boundary points :returns: Array of x,y face points. Empty array if no face found """ try: points = [] rgbimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) rects = dlib_detector(rgbimg, 1) if rects and len(rects) > 0: # We only take the first found face shapes = dlib_predictor(rgbimg, rects[0]) points = np.array([(shapes.part(i).x, shapes.part(i).y) for i in range(68)], np.int32) if add_boundary_points: # Add more points inwards and upwards as dlib only detects up to eyebrows points = np.vstack([ points, boundary_points(points, 0.1, -0.03), boundary_points(points, 0.13, -0.05), boundary_points(points, 0.15, -0.08), boundary_points(points, 0.33, -0.12)]) return points except Exception as e: print(e) return []
Locates 68 face points using dlib (http://dlib.net) Requires shape_predictor_68_face_landmarks.dat to be in face_morpher/data Download at: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 :param img: an image array :param add_boundary_points: bool to add additional boundary points :returns: Array of x,y face points. Empty array if no face found
face_points_dlib
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/locator.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/locator.py
Apache-2.0
def weighted_average_points(start_points, end_points, percent=0.5): """ Weighted average of two sets of supplied points :param start_points: *m* x 2 array of start face points. :param end_points: *m* x 2 array of end face points. :param percent: [0, 1] percentage weight on start_points :returns: *m* x 2 array of weighted average points """ if percent <= 0: return end_points elif percent >= 1: return start_points else: return np.asarray(start_points*percent + end_points*(1-percent), np.int32)
Weighted average of two sets of supplied points :param start_points: *m* x 2 array of start face points. :param end_points: *m* x 2 array of end face points. :param percent: [0, 1] percentage weight on start_points :returns: *m* x 2 array of weighted average points
weighted_average_points
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/locator.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/locator.py
Apache-2.0
def morph(src_img, src_points, dest_img, dest_points, video, width=500, height=600, num_frames=20, fps=10, out_frames=None, out_video=None, plot=False, background='black'): """ Create a morph sequence from source to destination image :param src_img: ndarray source image :param src_points: source image array of x,y face points :param dest_img: ndarray destination image :param dest_points: destination image array of x,y face points :param video: facemorpher.videoer.Video object """ size = (height, width) stall_frames = np.clip(int(fps*0.15), 1, fps) # Show first & last longer plt = plotter.Plotter(plot, num_images=num_frames, out_folder=out_frames) num_frames -= (stall_frames * 2) # No need to process src and dest image plt.plot_one(src_img) video.write(src_img, 1) # Produce morph frames! for percent in np.linspace(1, 0, num=num_frames): points = locator.weighted_average_points(src_points, dest_points, percent) src_face = warper.warp_image(src_img, src_points, points, size) end_face = warper.warp_image(dest_img, dest_points, points, size) average_face = blender.weighted_average(src_face, end_face, percent) if background in ('transparent', 'average'): mask = blender.mask_from_points(average_face.shape[:2], points) average_face = np.dstack((average_face, mask)) if background == 'average': average_background = blender.weighted_average(src_img, dest_img, percent) average_face = blender.overlay_image(average_face, mask, average_background) plt.plot_one(average_face) plt.save(average_face) video.write(average_face) plt.plot_one(dest_img) video.write(dest_img, stall_frames) plt.show()
Create a morph sequence from source to destination image :param src_img: ndarray source image :param src_points: source image array of x,y face points :param dest_img: ndarray destination image :param dest_points: destination image array of x,y face points :param video: facemorpher.videoer.Video object
morph
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/morpher.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/morpher.py
Apache-2.0
def morpher(imgpaths, width=500, height=600, num_frames=20, fps=10, out_frames=None, out_video=None, plot=False, background='black'): """ Create a morph sequence from multiple images in imgpaths :param imgpaths: array or generator of image paths """ video = videoer.Video(out_video, fps, width, height) images_points_gen = load_valid_image_points(imgpaths, (height, width)) src_img, src_points = next(images_points_gen) for dest_img, dest_points in images_points_gen: morph(src_img, src_points, dest_img, dest_points, video, width, height, num_frames, fps, out_frames, out_video, plot, background) src_img, src_points = dest_img, dest_points video.end()
Create a morph sequence from multiple images in imgpaths :param imgpaths: array or generator of image paths
morpher
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/morpher.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/morpher.py
Apache-2.0
def bilinear_interpolate(img, coords): """ Interpolates over every image channel http://en.wikipedia.org/wiki/Bilinear_interpolation :param img: max 3 channel image :param coords: 2 x _m_ array. 1st row = xcoords, 2nd row = ycoords :returns: array of interpolated pixels with same shape as coords """ int_coords = np.int32(coords) x0, y0 = int_coords dx, dy = coords - int_coords # 4 Neighbour pixels q11 = img[y0, x0] q21 = img[y0, x0+1] q12 = img[y0+1, x0] q22 = img[y0+1, x0+1] btm = q21.T * dx + q11.T * (1 - dx) top = q22.T * dx + q12.T * (1 - dx) inter_pixel = top * dy + btm * (1 - dy) return inter_pixel.T
Interpolates over every image channel http://en.wikipedia.org/wiki/Bilinear_interpolation :param img: max 3 channel image :param coords: 2 x _m_ array. 1st row = xcoords, 2nd row = ycoords :returns: array of interpolated pixels with same shape as coords
bilinear_interpolate
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/warper.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/warper.py
Apache-2.0
def grid_coordinates(points): """ x,y grid coordinates within the ROI of supplied points :param points: points to generate grid coordinates :returns: array of (x, y) coordinates """ xmin = np.min(points[:, 0]) xmax = np.max(points[:, 0]) + 1 ymin = np.min(points[:, 1]) ymax = np.max(points[:, 1]) + 1 return np.asarray([(x, y) for y in range(ymin, ymax) for x in range(xmin, xmax)], np.uint32)
x,y grid coordinates within the ROI of supplied points :param points: points to generate grid coordinates :returns: array of (x, y) coordinates
grid_coordinates
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/warper.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/warper.py
Apache-2.0
def process_warp(src_img, result_img, tri_affines, dst_points, delaunay): """ Warp each triangle from the src_image only within the ROI of the destination image (points in dst_points). """ roi_coords = grid_coordinates(dst_points) # indices to vertices. -1 if pixel is not in any triangle roi_tri_indices = delaunay.find_simplex(roi_coords) for simplex_index in range(len(delaunay.simplices)): coords = roi_coords[roi_tri_indices == simplex_index] num_coords = len(coords) out_coords = np.dot(tri_affines[simplex_index], np.vstack((coords.T, np.ones(num_coords)))) x, y = coords.T result_img[y, x] = bilinear_interpolate(src_img, out_coords) return None
Warp each triangle from the src_image only within the ROI of the destination image (points in dst_points).
process_warp
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/warper.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/warper.py
Apache-2.0
def triangular_affine_matrices(vertices, src_points, dest_points): """ Calculate the affine transformation matrix for each triangle (x,y) vertex from dest_points to src_points :param vertices: array of triplet indices to corners of triangle :param src_points: array of [x, y] points to landmarks for source image :param dest_points: array of [x, y] points to landmarks for destination image :returns: 2 x 3 affine matrix transformation for a triangle """ ones = [1, 1, 1] for tri_indices in vertices: src_tri = np.vstack((src_points[tri_indices, :].T, ones)) dst_tri = np.vstack((dest_points[tri_indices, :].T, ones)) mat = np.dot(src_tri, np.linalg.inv(dst_tri))[:2, :] yield mat
Calculate the affine transformation matrix for each triangle (x,y) vertex from dest_points to src_points :param vertices: array of triplet indices to corners of triangle :param src_points: array of [x, y] points to landmarks for source image :param dest_points: array of [x, y] points to landmarks for destination image :returns: 2 x 3 affine matrix transformation for a triangle
triangular_affine_matrices
python
OpenTalker/video-retalking
third_part/GPEN/face_morpher/facemorpher/warper.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/warper.py
Apache-2.0
def get_landmark(filepath, predictor, detector=None, fa=None): """get landmark with dlib :return: np.array shape=(68, 2) """ if fa is not None: image = io.imread(filepath) lms, _, bboxes = fa.get_landmarks(image, return_bboxes=True) if len(lms) == 0: return None return lms[0] if detector is None: detector = dlib.get_frontal_face_detector() if isinstance(filepath, PIL.Image.Image): img = np.array(filepath) else: img = dlib.load_rgb_image(filepath) dets = detector(img) for k, d in enumerate(dets): shape = predictor(img, d) break else: return None t = list(shape.parts()) a = [] for tt in t: a.append([tt.x, tt.y]) lm = np.array(a) return lm
get landmark with dlib :return: np.array shape=(68, 2)
get_landmark
python
OpenTalker/video-retalking
utils/alignment_stit.py
https://github.com/OpenTalker/video-retalking/blob/master/utils/alignment_stit.py
Apache-2.0
def align_face(filepath_or_image, predictor, output_size, detector=None, enable_padding=False, scale=1.0): """ :param filepath: str :return: PIL Image """ c, x, y = compute_transform(filepath_or_image, predictor, detector=detector, scale=scale) quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) img = crop_image(filepath_or_image, output_size, quad, enable_padding=enable_padding) # Return aligned image. return img
:param filepath: str :return: PIL Image
align_face
python
OpenTalker/video-retalking
utils/alignment_stit.py
https://github.com/OpenTalker/video-retalking/blob/master/utils/alignment_stit.py
Apache-2.0
def num_frames(length, fsize, fshift): """Compute number of time frames of spectrogram """ pad = (fsize - fshift) if length % fshift == 0: M = (length + pad * 2 - fsize) // fshift + 1 else: M = (length + pad * 2 - fsize) // fshift + 2 return M
Compute number of time frames of spectrogram
num_frames
python
OpenTalker/video-retalking
utils/audio.py
https://github.com/OpenTalker/video-retalking/blob/master/utils/audio.py
Apache-2.0
def get_landmark(self, img_np): """get landmark with dlib :return: np.array shape=(68, 2) """ detector = dlib.get_frontal_face_detector() dets = detector(img_np, 1) if len(dets) == 0: return None d = dets[0] # Get the landmarks/parts for the face in box d. shape = self.predictor(img_np, d) t = list(shape.parts()) a = [] for tt in t: a.append([tt.x, tt.y]) lm = np.array(a) return lm
get landmark with dlib :return: np.array shape=(68, 2)
get_landmark
python
OpenTalker/video-retalking
utils/ffhq_preprocess.py
https://github.com/OpenTalker/video-retalking/blob/master/utils/ffhq_preprocess.py
Apache-2.0
def align_face(self, img, lm, output_size=1024): """ :param filepath: str :return: PIL Image """ lm_chin = lm[0: 17] # left-right lm_eyebrow_left = lm[17: 22] # left-right lm_eyebrow_right = lm[22: 27] # left-right lm_nose = lm[27: 31] # top-down lm_nostrils = lm[31: 36] # top-down lm_eye_left = lm[36: 42] # left-clockwise lm_eye_right = lm[42: 48] # left-clockwise lm_mouth_outer = lm[48: 60] # left-clockwise lm_mouth_inner = lm[60: 68] # left-clockwise # Calculate auxiliary vectors. eye_left = np.mean(lm_eye_left, axis=0) eye_right = np.mean(lm_eye_right, axis=0) eye_avg = (eye_left + eye_right) * 0.5 eye_to_eye = eye_right - eye_left mouth_left = lm_mouth_outer[0] mouth_right = lm_mouth_outer[6] mouth_avg = (mouth_left + mouth_right) * 0.5 eye_to_mouth = mouth_avg - eye_avg # Choose oriented crop rectangle. x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] x /= np.hypot(*x) x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) y = np.flipud(x) * [-1, 1] c = eye_avg + eye_to_mouth * 0.1 quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) qsize = np.hypot(*x) * 2 # Shrink. shrink = int(np.floor(qsize / output_size * 0.5)) if shrink > 1: rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink))) img = img.resize(rsize, Image.ANTIALIAS) quad /= shrink qsize /= shrink # Crop. border = max(int(np.rint(qsize * 0.1)), 3) crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1])))) crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1])) if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]: quad -= crop[0:2] # Transform. quad = (quad + 0.5).flatten() lx = max(min(quad[0], quad[2]), 0) ly = max(min(quad[1], quad[7]), 0) rx = min(max(quad[4], quad[6]), img.size[0]) ry = min(max(quad[3], quad[5]), img.size[0]) # Save aligned image. return crop, [lx, ly, rx, ry]
:param filepath: str :return: PIL Image
align_face
python
OpenTalker/video-retalking
utils/ffhq_preprocess.py
https://github.com/OpenTalker/video-retalking/blob/master/utils/ffhq_preprocess.py
Apache-2.0
def convert_flow_to_deformation(flow): r"""convert flow fields to deformations. Args: flow (tensor): Flow field obtained by the model Returns: deformation (tensor): The deformation used for warping """ b,c,h,w = flow.shape flow_norm = 2 * torch.cat([flow[:,:1,...]/(w-1),flow[:,1:,...]/(h-1)], 1) grid = make_coordinate_grid(flow) deformation = grid + flow_norm.permute(0,2,3,1) return deformation
convert flow fields to deformations. Args: flow (tensor): Flow field obtained by the model Returns: deformation (tensor): The deformation used for warping
convert_flow_to_deformation
python
OpenTalker/video-retalking
utils/flow_util.py
https://github.com/OpenTalker/video-retalking/blob/master/utils/flow_util.py
Apache-2.0
def make_coordinate_grid(flow): r"""obtain coordinate grid with the same size as the flow filed. Args: flow (tensor): Flow field obtained by the model Returns: grid (tensor): The grid with the same size as the input flow """ b,c,h,w = flow.shape x = torch.arange(w).to(flow) y = torch.arange(h).to(flow) x = (2 * (x / (w - 1)) - 1) y = (2 * (y / (h - 1)) - 1) yy = y.view(-1, 1).repeat(1, w) xx = x.view(1, -1).repeat(h, 1) meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2) meshed = meshed.expand(b, -1, -1, -1) return meshed
obtain coordinate grid with the same size as the flow filed. Args: flow (tensor): Flow field obtained by the model Returns: grid (tensor): The grid with the same size as the input flow
make_coordinate_grid
python
OpenTalker/video-retalking
utils/flow_util.py
https://github.com/OpenTalker/video-retalking/blob/master/utils/flow_util.py
Apache-2.0
def warp_image(source_image, deformation): r"""warp the input image according to the deformation Args: source_image (tensor): source images to be warped deformation (tensor): deformations used to warp the images; value in range (-1, 1) Returns: output (tensor): the warped images """ _, h_old, w_old, _ = deformation.shape _, _, h, w = source_image.shape if h_old != h or w_old != w: deformation = deformation.permute(0, 3, 1, 2) deformation = torch.nn.functional.interpolate(deformation, size=(h, w), mode='bilinear') deformation = deformation.permute(0, 2, 3, 1) return torch.nn.functional.grid_sample(source_image, deformation)
warp the input image according to the deformation Args: source_image (tensor): source images to be warped deformation (tensor): deformations used to warp the images; value in range (-1, 1) Returns: output (tensor): the warped images
warp_image
python
OpenTalker/video-retalking
utils/flow_util.py
https://github.com/OpenTalker/video-retalking/blob/master/utils/flow_util.py
Apache-2.0
def split_coeff(coeffs): """ Return: coeffs_dict -- a dict of torch.tensors Parameters: coeffs -- torch.tensor, size (B, 256) """ id_coeffs = coeffs[:, :80] exp_coeffs = coeffs[:, 80: 144] tex_coeffs = coeffs[:, 144: 224] angles = coeffs[:, 224: 227] gammas = coeffs[:, 227: 254] translations = coeffs[:, 254:] return { 'id': id_coeffs, 'exp': exp_coeffs, 'tex': tex_coeffs, 'angle': angles, 'gamma': gammas, 'trans': translations }
Return: coeffs_dict -- a dict of torch.tensors Parameters: coeffs -- torch.tensor, size (B, 256)
split_coeff
python
OpenTalker/video-retalking
utils/inference_utils.py
https://github.com/OpenTalker/video-retalking/blob/master/utils/inference_utils.py
Apache-2.0
def compute_density_for_timestep_sampling( weighting_scheme: str, batch_size: int, logit_mean: float = None, logit_std: float = None, mode_scale: float = None ): """Compute the density for sampling the timesteps when doing SD3 training. Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528. SD3 paper reference: https://arxiv.org/abs/2403.03206v1. """ if weighting_scheme == "logit_normal": # See 3.1 in the SD3 paper ($rf/lognorm(0.00,1.00)$). u = torch.normal(mean=logit_mean, std=logit_std, size=(batch_size,), device="cpu") u = torch.nn.functional.sigmoid(u) elif weighting_scheme == "mode": u = torch.rand(size=(batch_size,), device="cpu") u = 1 - u - mode_scale * (torch.cos(math.pi * u / 2) ** 2 - 1 + u) else: u = torch.rand(size=(batch_size,), device="cpu") return u
Compute the density for sampling the timesteps when doing SD3 training. Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528. SD3 paper reference: https://arxiv.org/abs/2403.03206v1.
compute_density_for_timestep_sampling
python
memoavatar/memo
finetune.py
https://github.com/memoavatar/memo/blob/master/finetune.py
Apache-2.0
def compute_loss_weighting_for_sd3(weighting_scheme: str, sigmas=None): """Computes loss weighting scheme for SD3 training. Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528. SD3 paper reference: https://arxiv.org/abs/2403.03206v1. """ if weighting_scheme == "sigma_sqrt": weighting = (sigmas**-2.0).float() elif weighting_scheme == "cosmap": bot = 1 - 2 * sigmas + 2 * sigmas**2 weighting = 2 / (math.pi * bot) else: weighting = torch.ones_like(sigmas) return weighting
Computes loss weighting scheme for SD3 training. Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528. SD3 paper reference: https://arxiv.org/abs/2403.03206v1.
compute_loss_weighting_for_sd3
python
memoavatar/memo
finetune.py
https://github.com/memoavatar/memo/blob/master/finetune.py
Apache-2.0
def set_use_npu_flash_attention(self, use_npu_flash_attention: bool) -> None: r""" Set whether to use npu flash attention from `torch_npu` or not. """ if use_npu_flash_attention: processor = AttnProcessorNPU() else: # set attention processor # We use the AttnProcessor2_0 by default when torch 2.x is used which uses # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 processor = ( AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor() ) self.set_processor(processor)
Set whether to use npu flash attention from `torch_npu` or not.
set_use_npu_flash_attention
python
memoavatar/memo
memo/models/attention_processor.py
https://github.com/memoavatar/memo/blob/master/memo/models/attention_processor.py
Apache-2.0
def set_use_memory_efficient_attention_xformers( self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None, ) -> None: r""" Set whether to use memory efficient attention from `xformers` or not. Args: use_memory_efficient_attention_xformers (`bool`): Whether to use memory efficient attention from `xformers` or not. attention_op (`Callable`, *optional*): The attention operation to use. Defaults to `None` which uses the default attention operation from `xformers`. """ is_custom_diffusion = hasattr(self, "processor") and isinstance( self.processor, ( CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor, CustomDiffusionAttnProcessor2_0, ), ) is_joint_diffusion = hasattr(self, "processor") and isinstance( self.processor, (JointAttnProcessor2_0), ) is_added_kv_processor = hasattr(self, "processor") and isinstance( self.processor, ( AttnAddedKVProcessor, AttnAddedKVProcessor2_0, SlicedAttnAddedKVProcessor, XFormersAttnAddedKVProcessor, ), ) if use_memory_efficient_attention_xformers: if is_added_kv_processor and is_custom_diffusion: raise NotImplementedError( f"Memory efficient attention is currently not supported for custom diffusion for attention processor type {self.processor}" ) if not is_xformers_available(): raise ModuleNotFoundError( ( "Refer to https://github.com/facebookresearch/xformers for more information on how to install" " xformers" ), name="xformers", ) elif not torch.cuda.is_available(): raise ValueError( "torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is" " only available for GPU " ) else: try: # Make sure we can run the memory efficient attention _ = xformers.ops.memory_efficient_attention( torch.randn((1, 2, 40), device="cuda"), torch.randn((1, 2, 40), device="cuda"), torch.randn((1, 2, 40), device="cuda"), ) except Exception as e: raise e if is_custom_diffusion: processor = CustomDiffusionXFormersAttnProcessor( train_kv=self.processor.train_kv, train_q_out=self.processor.train_q_out, hidden_size=self.processor.hidden_size, cross_attention_dim=self.processor.cross_attention_dim, attention_op=attention_op, ) processor.load_state_dict(self.processor.state_dict()) if hasattr(self.processor, "to_k_custom_diffusion"): processor.to(self.processor.to_k_custom_diffusion.weight.device) elif is_added_kv_processor: # TODO(Patrick, Suraj, William) - currently xformers doesn't work for UnCLIP # which uses this type of cross attention ONLY because the attention mask of format # [0, ..., -10.000, ..., 0, ...,] is not supported # throw warning logger.info( "Memory efficient attention with `xformers` might currently not work correctly if an attention mask is required for the attention operation." ) processor = XFormersAttnAddedKVProcessor(attention_op=attention_op) elif is_joint_diffusion: processor = JointAttnProcessor2_0() else: processor = XFormersAttnProcessor(attention_op=attention_op) else: if is_custom_diffusion: attn_processor_class = ( CustomDiffusionAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else CustomDiffusionAttnProcessor ) processor = attn_processor_class( train_kv=self.processor.train_kv, train_q_out=self.processor.train_q_out, hidden_size=self.processor.hidden_size, cross_attention_dim=self.processor.cross_attention_dim, ) processor.load_state_dict(self.processor.state_dict()) if hasattr(self.processor, "to_k_custom_diffusion"): processor.to(self.processor.to_k_custom_diffusion.weight.device) else: # set attention processor # We use the AttnProcessor2_0 by default when torch 2.x is used which uses # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 processor = ( AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor() ) self.set_processor(processor)
Set whether to use memory efficient attention from `xformers` or not. Args: use_memory_efficient_attention_xformers (`bool`): Whether to use memory efficient attention from `xformers` or not. attention_op (`Callable`, *optional*): The attention operation to use. Defaults to `None` which uses the default attention operation from `xformers`.
set_use_memory_efficient_attention_xformers
python
memoavatar/memo
memo/models/attention_processor.py
https://github.com/memoavatar/memo/blob/master/memo/models/attention_processor.py
Apache-2.0
def set_attention_slice(self, slice_size: int) -> None: r""" Set the slice size for attention computation. Args: slice_size (`int`): The slice size for attention computation. """ if slice_size is not None and slice_size > self.sliceable_head_dim: raise ValueError(f"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.") if slice_size is not None and self.added_kv_proj_dim is not None: processor = SlicedAttnAddedKVProcessor(slice_size) elif slice_size is not None: processor = SlicedAttnProcessor(slice_size) elif self.added_kv_proj_dim is not None: processor = AttnAddedKVProcessor() else: # set attention processor # We use the AttnProcessor2_0 by default when torch 2.x is used which uses # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 processor = ( AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor() ) self.set_processor(processor)
Set the slice size for attention computation. Args: slice_size (`int`): The slice size for attention computation.
set_attention_slice
python
memoavatar/memo
memo/models/attention_processor.py
https://github.com/memoavatar/memo/blob/master/memo/models/attention_processor.py
Apache-2.0
def set_processor(self, processor: "AttnProcessor") -> None: r""" Set the attention processor to use. Args: processor (`AttnProcessor`): The attention processor to use. """ # if current processor is in `self._modules` and if passed `processor` is not, we need to # pop `processor` from `self._modules` if ( hasattr(self, "processor") and isinstance(self.processor, torch.nn.Module) and not isinstance(processor, torch.nn.Module) ): logger.info(f"You are removing possibly trained weights of {self.processor} with {processor}") self._modules.pop("processor") self.processor = processor
Set the attention processor to use. Args: processor (`AttnProcessor`): The attention processor to use.
set_processor
python
memoavatar/memo
memo/models/attention_processor.py
https://github.com/memoavatar/memo/blob/master/memo/models/attention_processor.py
Apache-2.0
def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, **cross_attention_kwargs, ) -> torch.Tensor: r""" The forward method of the `Attention` class. Args: hidden_states (`torch.Tensor`): The hidden states of the query. encoder_hidden_states (`torch.Tensor`, *optional*): The hidden states of the encoder. attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied. **cross_attention_kwargs: Additional keyword arguments to pass along to the cross attention. Returns: `torch.Tensor`: The output of the attention layer. """ # The `Attention` class can call different attention processors / attention functions # here we simply pass along all tensors to the selected processor class # For standard processors that are defined here, `**cross_attention_kwargs` is empty attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) quiet_attn_parameters = {"ip_adapter_masks"} unused_kwargs = [ k for k, _ in cross_attention_kwargs.items() if k not in attn_parameters and k not in quiet_attn_parameters ] if len(unused_kwargs) > 0: logger.warning( f"cross_attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored." ) cross_attention_kwargs = {k: w for k, w in cross_attention_kwargs.items() if k in attn_parameters} return self.processor( self, hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, **cross_attention_kwargs, )
The forward method of the `Attention` class. Args: hidden_states (`torch.Tensor`): The hidden states of the query. encoder_hidden_states (`torch.Tensor`, *optional*): The hidden states of the encoder. attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied. **cross_attention_kwargs: Additional keyword arguments to pass along to the cross attention. Returns: `torch.Tensor`: The output of the attention layer.
forward
python
memoavatar/memo
memo/models/attention_processor.py
https://github.com/memoavatar/memo/blob/master/memo/models/attention_processor.py
Apache-2.0
def batch_to_head_dim(self, tensor: torch.Tensor) -> torch.Tensor: r""" Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. `heads` is the number of heads initialized while constructing the `Attention` class. Args: tensor (`torch.Tensor`): The tensor to reshape. Returns: `torch.Tensor`: The reshaped tensor. """ head_size = self.heads batch_size, seq_len, dim = tensor.shape tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size) return tensor
Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. `heads` is the number of heads initialized while constructing the `Attention` class. Args: tensor (`torch.Tensor`): The tensor to reshape. Returns: `torch.Tensor`: The reshaped tensor.
batch_to_head_dim
python
memoavatar/memo
memo/models/attention_processor.py
https://github.com/memoavatar/memo/blob/master/memo/models/attention_processor.py
Apache-2.0
def head_to_batch_dim(self, tensor: torch.Tensor, out_dim: int = 3) -> torch.Tensor: r""" Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size, seq_len, heads, dim // heads]` `heads` is the number of heads initialized while constructing the `Attention` class. Args: tensor (`torch.Tensor`): The tensor to reshape. out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. If `3`, the tensor is reshaped to `[batch_size * heads, seq_len, dim // heads]`. Returns: `torch.Tensor`: The reshaped tensor. """ head_size = self.heads if tensor.ndim == 3: batch_size, seq_len, dim = tensor.shape extra_dim = 1 else: batch_size, extra_dim, seq_len, dim = tensor.shape tensor = tensor.reshape(batch_size, seq_len * extra_dim, head_size, dim // head_size) tensor = tensor.permute(0, 2, 1, 3) if out_dim == 3: tensor = tensor.reshape(batch_size * head_size, seq_len * extra_dim, dim // head_size) return tensor
Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size, seq_len, heads, dim // heads]` `heads` is the number of heads initialized while constructing the `Attention` class. Args: tensor (`torch.Tensor`): The tensor to reshape. out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. If `3`, the tensor is reshaped to `[batch_size * heads, seq_len, dim // heads]`. Returns: `torch.Tensor`: The reshaped tensor.
head_to_batch_dim
python
memoavatar/memo
memo/models/attention_processor.py
https://github.com/memoavatar/memo/blob/master/memo/models/attention_processor.py
Apache-2.0
def get_attention_scores( self, query: torch.Tensor, key: torch.Tensor, attention_mask: torch.Tensor = None, ) -> torch.Tensor: r""" Compute the attention scores. Args: query (`torch.Tensor`): The query tensor. key (`torch.Tensor`): The key tensor. attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied. Returns: `torch.Tensor`: The attention probabilities/scores. """ dtype = query.dtype if self.upcast_attention: query = query.float() key = key.float() if attention_mask is None: baddbmm_input = torch.empty( query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device, ) beta = 0 else: baddbmm_input = attention_mask beta = 1 attention_scores = torch.baddbmm( baddbmm_input, query, key.transpose(-1, -2), beta=beta, alpha=self.scale, ) del baddbmm_input if self.upcast_softmax: attention_scores = attention_scores.float() attention_probs = attention_scores.softmax(dim=-1) del attention_scores attention_probs = attention_probs.to(dtype) return attention_probs
Compute the attention scores. Args: query (`torch.Tensor`): The query tensor. key (`torch.Tensor`): The key tensor. attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied. Returns: `torch.Tensor`: The attention probabilities/scores.
get_attention_scores
python
memoavatar/memo
memo/models/attention_processor.py
https://github.com/memoavatar/memo/blob/master/memo/models/attention_processor.py
Apache-2.0
def prepare_attention_mask( self, attention_mask: torch.Tensor, target_length: int, batch_size: int, out_dim: int = 3, ) -> torch.Tensor: r""" Prepare the attention mask for the attention computation. Args: attention_mask (`torch.Tensor`): The attention mask to prepare. target_length (`int`): The target length of the attention mask. This is the length of the attention mask after padding. batch_size (`int`): The batch size, which is used to repeat the attention mask. out_dim (`int`, *optional*, defaults to `3`): The output dimension of the attention mask. Can be either `3` or `4`. Returns: `torch.Tensor`: The prepared attention mask. """ head_size = self.heads if attention_mask is None: return attention_mask current_length: int = attention_mask.shape[-1] if current_length != target_length: if attention_mask.device.type == "mps": # HACK: MPS: Does not support padding by greater than dimension of input tensor. # Instead, we can manually construct the padding tensor. padding_shape = ( attention_mask.shape[0], attention_mask.shape[1], target_length, ) padding = torch.zeros( padding_shape, dtype=attention_mask.dtype, device=attention_mask.device, ) attention_mask = torch.cat([attention_mask, padding], dim=2) else: # TODO: for pipelines such as stable-diffusion, padding cross-attn mask: # we want to instead pad by (0, remaining_length), where remaining_length is: # remaining_length: int = target_length - current_length # TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding attention_mask = F.pad(attention_mask, (0, target_length), value=0.0) if out_dim == 3: if attention_mask.shape[0] < batch_size * head_size: attention_mask = attention_mask.repeat_interleave(head_size, dim=0) elif out_dim == 4: attention_mask = attention_mask.unsqueeze(1) attention_mask = attention_mask.repeat_interleave(head_size, dim=1) return attention_mask
Prepare the attention mask for the attention computation. Args: attention_mask (`torch.Tensor`): The attention mask to prepare. target_length (`int`): The target length of the attention mask. This is the length of the attention mask after padding. batch_size (`int`): The batch size, which is used to repeat the attention mask. out_dim (`int`, *optional*, defaults to `3`): The output dimension of the attention mask. Can be either `3` or `4`. Returns: `torch.Tensor`: The prepared attention mask.
prepare_attention_mask
python
memoavatar/memo
memo/models/attention_processor.py
https://github.com/memoavatar/memo/blob/master/memo/models/attention_processor.py
Apache-2.0
def norm_encoder_hidden_states(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: r""" Normalize the encoder hidden states. Requires `self.norm_cross` to be specified when constructing the `Attention` class. Args: encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder. Returns: `torch.Tensor`: The normalized encoder hidden states. """ assert self.norm_cross is not None, "self.norm_cross must be defined to call self.norm_encoder_hidden_states" if isinstance(self.norm_cross, nn.LayerNorm): encoder_hidden_states = self.norm_cross(encoder_hidden_states) elif isinstance(self.norm_cross, nn.GroupNorm): # Group norm norms along the channels dimension and expects # input to be in the shape of (N, C, *). In this case, we want # to norm along the hidden dimension, so we need to move # (batch_size, sequence_length, hidden_size) -> # (batch_size, hidden_size, sequence_length) encoder_hidden_states = encoder_hidden_states.transpose(1, 2) encoder_hidden_states = self.norm_cross(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.transpose(1, 2) else: assert False return encoder_hidden_states
Normalize the encoder hidden states. Requires `self.norm_cross` to be specified when constructing the `Attention` class. Args: encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder. Returns: `torch.Tensor`: The normalized encoder hidden states.
norm_encoder_hidden_states
python
memoavatar/memo
memo/models/attention_processor.py
https://github.com/memoavatar/memo/blob/master/memo/models/attention_processor.py
Apache-2.0
def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors( name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor], ): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors
Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name.
attn_processors
python
memoavatar/memo
memo/models/unet_2d_condition.py
https://github.com/memoavatar/memo/blob/master/memo/models/unet_2d_condition.py
Apache-2.0
def set_attn_processor( self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False, ): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor, _remove_lora=_remove_lora) else: module.set_processor(processor.pop(f"{name}.processor"), _remove_lora=_remove_lora) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor)
Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors.
set_attn_processor
python
memoavatar/memo
memo/models/unet_2d_condition.py
https://github.com/memoavatar/memo/blob/master/memo/models/unet_2d_condition.py
Apache-2.0
def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor, _remove_lora=True)
Disables custom attention processors and sets the default attention implementation.
set_default_attn_processor
python
memoavatar/memo
memo/models/unet_2d_condition.py
https://github.com/memoavatar/memo/blob/master/memo/models/unet_2d_condition.py
Apache-2.0