Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -113,7 +113,6 @@ class SpatialAttnProcessor2_0(torch.nn.Module):
|
|
113 |
temb=None):
|
114 |
# un_cond_hidden_states, cond_hidden_states = hidden_states.chunk(2)
|
115 |
# un_cond_hidden_states = self.__call2__(attn, un_cond_hidden_states,encoder_hidden_states,attention_mask,temb)
|
116 |
-
# 生成一个0到1之间的随机数
|
117 |
global total_count,attn_count,cur_step,mask1024,mask4096
|
118 |
global sa32, sa64
|
119 |
global write
|
@@ -124,7 +123,6 @@ class SpatialAttnProcessor2_0(torch.nn.Module):
|
|
124 |
self.id_bank[cur_step] = [hidden_states[:self.id_length], hidden_states[self.id_length:]]
|
125 |
else:
|
126 |
encoder_hidden_states = torch.cat((self.id_bank[cur_step][0].to(self.device),hidden_states[:1],self.id_bank[cur_step][1].to(self.device),hidden_states[1:]))
|
127 |
-
# 判断随机数是否大于0.5
|
128 |
if cur_step <=1:
|
129 |
hidden_states = self.__call2__(attn, hidden_states,None,attention_mask,temb)
|
130 |
else: # 256 1024 4096
|
@@ -377,12 +375,11 @@ css = '''
|
|
377 |
|
378 |
#################################################
|
379 |
title = r"""
|
380 |
-
<h1 align="center">
|
381 |
"""
|
382 |
|
383 |
description = r"""
|
384 |
-
<b>
|
385 |
-
❗️❗️❗️[<b>Important</b>] Personalization steps:<br>
|
386 |
1️⃣ Enter a Textual Description for Character, if you add the Ref-Image, making sure to <b>follow the class word</b> you want to customize with the <b>trigger word</b>: `img`, such as: `man img` or `woman img` or `girl img`.<br>
|
387 |
2️⃣ Enter the prompt array, each line corrsponds to one generated image.<br>
|
388 |
3️⃣ Choose your preferred style template.<br>
|
@@ -390,32 +387,14 @@ description = r"""
|
|
390 |
"""
|
391 |
|
392 |
article = r"""
|
393 |
-
If
|
394 |
-
[](https://github.com/HVision-NKU/StoryDiffusion)
|
395 |
-
---
|
396 |
-
📝 **Citation**
|
397 |
-
<br>
|
398 |
-
If our work is useful for your research, please consider citing:
|
399 |
-
```bibtex
|
400 |
-
@article{Zhou2024storydiffusion,
|
401 |
-
title={StoryDiffusion: Consistent Self-Attention for Long-Range Image and Video Generation},
|
402 |
-
author={Zhou, Yupeng and Zhou, Daquan and Cheng, Ming-Ming and Feng, Jiashi and Hou, Qibin},
|
403 |
-
year={2024}
|
404 |
-
}
|
405 |
-
```
|
406 |
-
📋 **License**
|
407 |
-
<br>
|
408 |
-
The Contents you create are under Apache-2.0 LICENSE. The Code are under Attribution-NonCommercial 4.0 International.
|
409 |
-
📧 **Contact**
|
410 |
-
<br>
|
411 |
-
If you have any questions, please feel free to reach me out at <b>[email protected]</b>.
|
412 |
"""
|
413 |
version = r"""
|
414 |
-
<h3 align="center">
|
415 |
<h5 >1. Support image ref image. (Cartoon Ref image is not support now)</h5>
|
416 |
<h5 >2. Support Typesetting Style and Captioning.(By default, the prompt is used as the caption for each image. If you need to change the caption, add a # at the end of each line. Only the part after the # will be added as a caption to the image.)</h5>
|
417 |
<h5 >3. [NC]symbol (The [NC] symbol is used as a flag to indicate that no characters should be present in the generated scene images. If you want do that, prepend the "[NC]" at the beginning of the line. For example, to generate a scene of falling leaves without any character, write: "[NC] The leaves are falling."),Currently, support is only using Textual Description</h5>
|
418 |
-
<h5>Tips: Not Ready Now! Just Test! It's better to use prompts to assist in controlling the character's attire. Depending on the limited code integration time, there might be some undiscovered bugs. If you find that a particular generation result is significantly poor, please email me (
|
419 |
"""
|
420 |
#################################################
|
421 |
global attn_count, total_count, id_length, total_length,cur_step, cur_model_type
|
|
|
113 |
temb=None):
|
114 |
# un_cond_hidden_states, cond_hidden_states = hidden_states.chunk(2)
|
115 |
# un_cond_hidden_states = self.__call2__(attn, un_cond_hidden_states,encoder_hidden_states,attention_mask,temb)
|
|
|
116 |
global total_count,attn_count,cur_step,mask1024,mask4096
|
117 |
global sa32, sa64
|
118 |
global write
|
|
|
123 |
self.id_bank[cur_step] = [hidden_states[:self.id_length], hidden_states[self.id_length:]]
|
124 |
else:
|
125 |
encoder_hidden_states = torch.cat((self.id_bank[cur_step][0].to(self.device),hidden_states[:1],self.id_bank[cur_step][1].to(self.device),hidden_states[1:]))
|
|
|
126 |
if cur_step <=1:
|
127 |
hidden_states = self.__call2__(attn, hidden_states,None,attention_mask,temb)
|
128 |
else: # 256 1024 4096
|
|
|
375 |
|
376 |
#################################################
|
377 |
title = r"""
|
378 |
+
<h1 align="center">Ai Comic Generator</h1>
|
379 |
"""
|
380 |
|
381 |
description = r"""
|
382 |
+
<br>❗️❗️❗️[<b>Important</b>] Personalization steps:<br>
|
|
|
383 |
1️⃣ Enter a Textual Description for Character, if you add the Ref-Image, making sure to <b>follow the class word</b> you want to customize with the <b>trigger word</b>: `img`, such as: `man img` or `woman img` or `girl img`.<br>
|
384 |
2️⃣ Enter the prompt array, each line corrsponds to one generated image.<br>
|
385 |
3️⃣ Choose your preferred style template.<br>
|
|
|
387 |
"""
|
388 |
|
389 |
article = r"""
|
390 |
+
<br>If you have any questions, please feel free to reach me out at <b>huzefa.ahmed.web@gmail.com</b>.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
391 |
"""
|
392 |
version = r"""
|
393 |
+
<h3 align="center">Ai Comic Generator</h3>
|
394 |
<h5 >1. Support image ref image. (Cartoon Ref image is not support now)</h5>
|
395 |
<h5 >2. Support Typesetting Style and Captioning.(By default, the prompt is used as the caption for each image. If you need to change the caption, add a # at the end of each line. Only the part after the # will be added as a caption to the image.)</h5>
|
396 |
<h5 >3. [NC]symbol (The [NC] symbol is used as a flag to indicate that no characters should be present in the generated scene images. If you want do that, prepend the "[NC]" at the beginning of the line. For example, to generate a scene of falling leaves without any character, write: "[NC] The leaves are falling."),Currently, support is only using Textual Description</h5>
|
397 |
+
<h5>Tips: Not Ready Now! Just Test! It's better to use prompts to assist in controlling the character's attire. Depending on the limited code integration time, there might be some undiscovered bugs. If you find that a particular generation result is significantly poor, please email me (huzefa.ahmed.web@gmail.com) Thank you very much.</h4>
|
398 |
"""
|
399 |
#################################################
|
400 |
global attn_count, total_count, id_length, total_length,cur_step, cur_model_type
|