Ashrafb commited on
Commit
677f1c0
·
verified ·
1 Parent(s): eb9648f

Update vtoonify_model.py

Browse files
Files changed (1) hide show
  1. vtoonify_model.py +19 -23
vtoonify_model.py CHANGED
@@ -201,30 +201,26 @@ class Model():
201
 
202
  return 'input.mp4', instyle, 'Successfully rescale the video to (%d, %d)'%(bottom-top, right-left)
203
 
204
- def image_toonify(self, aligned_face: np.ndarray, instyle: torch.Tensor, exstyle: torch.Tensor, style_degree: float, style_type: str) -> tuple[np.ndarray, str]:
205
- #print(style_type + ' ' + self.style_name)
206
- if instyle is None or aligned_face is None:
207
- return np.zeros((256,256,3), np.uint8), 'Opps, something wrong with the input. Please go to Step 2 and Rescale Image/First Frame again.'
208
- if self.style_name != style_type:
209
- exstyle, _ = self.load_model(style_type)
210
- if exstyle is None:
211
- return np.zeros((256,256,3), np.uint8), 'Opps, something wrong with the style type. Please go to Step 1 and load model again.'
212
- with torch.no_grad():
213
- if self.color_transfer:
214
- s_w = exstyle
215
- else:
216
- s_w = instyle.clone()
217
- s_w[:,:7] = exstyle[:,:7]
 
 
 
 
 
218
 
219
- x = self.transform(aligned_face).unsqueeze(dim=0).to(self.device)
220
- x_p = F.interpolate(self.parsingpredictor(2*(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)))[0],
221
- scale_factor=0.5, recompute_scale_factor=False).detach()
222
- inputs = torch.cat((x, x_p/16.), dim=1)
223
- y_tilde = self.vtoonify(inputs, s_w.repeat(inputs.size(0), 1, 1), d_s = style_degree)
224
- y_tilde = torch.clamp(y_tilde, -1, 1)
225
- print('*** Toonify %dx%d image with style of %s'%(y_tilde.shape[2], y_tilde.shape[3], style_type))
226
- return ((y_tilde[0].cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8), 'Successfully toonify the image with style of %s'%(self.style_name)
227
-
228
  def video_tooniy(self, aligned_video: str, instyle: torch.Tensor, exstyle: torch.Tensor, style_degree: float, style_type: str) -> tuple[str, str]:
229
  #print(style_type + ' ' + self.style_name)
230
  if aligned_video is None:
 
201
 
202
  return 'input.mp4', instyle, 'Successfully rescale the video to (%d, %d)'%(bottom-top, right-left)
203
 
204
+ def image_toonify(self, aligned_face: np.ndarray, instyle: torch.Tensor, exstyle: torch.Tensor, style_degree: float, style_type: str) -> tuple[np.ndarray, str]:
205
+ if instyle is None or aligned_face is None:
206
+ return np.zeros((256, 256, 3), np.uint8), 'Opps, something wrong with the input. Please go to Step 2 and Rescale Image/First Frame again.'
207
+ if self.style_name != style_type:
208
+ exstyle, _ = self.load_model(style_type)
209
+ if exstyle is None:
210
+ return np.zeros((256, 256, 3), np.uint8), 'Opps, something wrong with the style type. Please go to Step 1 and load model again.'
211
+ with torch.no_grad():
212
+ s_w = instyle.clone()
213
+ s_w[:, :7] = exstyle[:, :7]
214
+
215
+ x = self.transform(aligned_face).unsqueeze(dim=0).to(self.device)
216
+ x_p = F.interpolate(self.parsingpredictor(2 * (F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)))[0],
217
+ scale_factor=0.5, recompute_scale_factor=False).detach()
218
+ inputs = torch.cat((x, x_p / 16.), dim=1)
219
+ y_tilde = self.vtoonify(inputs, s_w.repeat(inputs.size(0), 1, 1), d_s=style_degree)
220
+ y_tilde = torch.clamp(y_tilde, -1, 1)
221
+ print('*** Toonify %dx%d image with style of %s' % (y_tilde.shape[2], y_tilde.shape[3], style_type))
222
+ return ((y_tilde[0].cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8), 'Successfully toonify the image with style of %s' % (self.style_name)
223
 
 
 
 
 
 
 
 
 
 
224
  def video_tooniy(self, aligned_video: str, instyle: torch.Tensor, exstyle: torch.Tensor, style_degree: float, style_type: str) -> tuple[str, str]:
225
  #print(style_type + ' ' + self.style_name)
226
  if aligned_video is None: