hysts HF staff commited on
Commit
85f94cd
·
1 Parent(s): 941e5d8
Files changed (4) hide show
  1. .pre-commit-config.yaml +2 -2
  2. README.md +1 -1
  3. app.py +101 -107
  4. model.py +3 -2
.pre-commit-config.yaml CHANGED
@@ -21,11 +21,11 @@ repos:
21
  - id: docformatter
22
  args: ['--in-place']
23
  - repo: https://github.com/pycqa/isort
24
- rev: 5.10.1
25
  hooks:
26
  - id: isort
27
  - repo: https://github.com/pre-commit/mirrors-mypy
28
- rev: v0.812
29
  hooks:
30
  - id: mypy
31
  args: ['--ignore-missing-imports']
 
21
  - id: docformatter
22
  args: ['--in-place']
23
  - repo: https://github.com/pycqa/isort
24
+ rev: 5.12.0
25
  hooks:
26
  - id: isort
27
  - repo: https://github.com/pre-commit/mirrors-mypy
28
+ rev: v0.991
29
  hooks:
30
  - id: mypy
31
  args: ['--ignore-missing-imports']
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🏢
4
  colorFrom: blue
5
  colorTo: pink
6
  sdk: gradio
7
- sdk_version: 3.12.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: blue
5
  colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 3.19.1
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -2,14 +2,14 @@
2
 
3
  from __future__ import annotations
4
 
5
- import os
6
-
7
  import gradio as gr
8
 
9
  from model import Model
10
 
11
- TITLE = '# Multiresolution Textual Inversion'
12
- DESCRIPTION = 'An unofficial demo for [https://github.com/giannisdaras/multires_textual_inversion](https://github.com/giannisdaras/multires_textual_inversion).'
 
 
13
 
14
  DETAILS = '''
15
  - To run the Semi Resolution-Dependent sampler, use the format: `<jane(number)>`.
@@ -19,112 +19,106 @@ DETAILS = '''
19
  For this demo, only `<jane>`, `<gta5-artwork>` and `<cat-toy>` are available.
20
  Also, `number` should be an integer in [0, 9].
21
  '''
22
- FOOTER = '<img id="visitor-badge" src="https://visitor-badge.glitch.me/badge?page_id=hysts.multires-textual-inversion" alt="visitor badge" />'
23
 
24
  #CACHE_EXAMPLES = os.getenv('SYSTEM') == 'spaces'
25
  CACHE_EXAMPLES = False
26
 
27
-
28
- def main():
29
- model = Model()
30
-
31
- with gr.Blocks(css='style.css') as demo:
32
- gr.Markdown(TITLE)
33
- gr.Markdown(DESCRIPTION)
34
-
35
- with gr.Row():
36
- with gr.Group():
37
- with gr.Row():
38
- prompt = gr.Textbox(label='Prompt')
39
- with gr.Row():
40
- num_images = gr.Slider(1,
41
- 9,
42
- value=1,
43
- step=1,
44
- label='Number of images')
45
- with gr.Row():
46
- num_steps = gr.Slider(1,
47
- 50,
48
- value=10,
49
- step=1,
50
- label='Number of inference steps')
51
- with gr.Row():
52
- seed = gr.Slider(0,
53
- 100000,
54
- value=100,
55
- step=1,
56
- label='Seed')
57
- with gr.Row():
58
- run_button = gr.Button('Run')
59
-
60
- with gr.Column():
61
- result = gr.Gallery(label='Result')
62
-
63
- with gr.Row():
64
- with gr.Group():
65
- fn = lambda x: model.run(x, 2, 10, 100)
66
- with gr.Row():
67
- gr.Examples(
68
- label='Examples 1',
69
- examples=[
70
- ['an image of <gta5-artwork(0)>'],
71
- ['an image of <jane(0)>'],
72
- ['an image of <jane(3)>'],
73
- ['an image of <cat-toy(0)>'],
74
- ],
75
- inputs=[prompt],
76
- outputs=[result],
77
- fn=fn,
78
- cache_examples=CACHE_EXAMPLES,
79
- )
80
- with gr.Row():
81
- gr.Examples(
82
- label='Examples 2',
83
- examples=[
84
- [
85
- 'an image of a cat in the style of <gta5-artwork(0)>'
86
- ],
87
- ['a painting of a dog in the style of <jane(0)>'],
88
- ['a painting of a dog in the style of <jane(5)>'],
89
- [
90
- 'a painting of a <cat-toy(0)> in the style of <jane(3)>'
91
- ],
92
  ],
93
- inputs=[prompt],
94
- outputs=[result],
95
- fn=fn,
96
- cache_examples=CACHE_EXAMPLES,
97
- )
98
- with gr.Row():
99
- gr.Examples(
100
- label='Examples 3',
101
- examples=[
102
- ['an image of <jane[0]>'],
103
- ['an image of <jane|0|>'],
104
- ['an image of <jane|3|>'],
105
  ],
106
- inputs=[prompt],
107
- outputs=[result],
108
- fn=fn,
109
- cache_examples=CACHE_EXAMPLES,
110
- )
111
-
112
- prompt.submit(
113
- fn=model.run,
114
- inputs=[prompt, num_images, num_steps, seed],
115
- outputs=[result],
116
- )
117
- run_button.click(
118
- fn=model.run,
119
- inputs=[prompt, num_images, num_steps, seed],
120
- outputs=[result],
121
- )
122
-
123
- with gr.Accordion('About available prompts', open=False):
124
- gr.Markdown(DETAILS)
125
- gr.Markdown(FOOTER)
126
- demo.launch(enable_queue=True, share=False)
127
-
128
-
129
- if __name__ == '__main__':
130
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  from __future__ import annotations
4
 
 
 
5
  import gradio as gr
6
 
7
  from model import Model
8
 
9
+ DESCRIPTION = '''# Multiresolution Textual Inversion
10
+
11
+ An unofficial demo for [https://github.com/giannisdaras/multires_textual_inversion](https://github.com/giannisdaras/multires_textual_inversion).
12
+ '''
13
 
14
  DETAILS = '''
15
  - To run the Semi Resolution-Dependent sampler, use the format: `<jane(number)>`.
 
19
  For this demo, only `<jane>`, `<gta5-artwork>` and `<cat-toy>` are available.
20
  Also, `number` should be an integer in [0, 9].
21
  '''
 
22
 
23
  #CACHE_EXAMPLES = os.getenv('SYSTEM') == 'spaces'
24
  CACHE_EXAMPLES = False
25
 
26
+ model = Model()
27
+
28
+ with gr.Blocks(css='style.css') as demo:
29
+ gr.Markdown(DESCRIPTION)
30
+
31
+ with gr.Row():
32
+ with gr.Group():
33
+ with gr.Row():
34
+ prompt = gr.Textbox(label='Prompt')
35
+ with gr.Row():
36
+ num_images = gr.Slider(1,
37
+ 9,
38
+ value=1,
39
+ step=1,
40
+ label='Number of images')
41
+ with gr.Row():
42
+ num_steps = gr.Slider(1,
43
+ 50,
44
+ value=10,
45
+ step=1,
46
+ label='Number of inference steps')
47
+ with gr.Row():
48
+ seed = gr.Slider(0, 100000, value=100, step=1, label='Seed')
49
+ with gr.Row():
50
+ run_button = gr.Button('Run')
51
+
52
+ with gr.Column():
53
+ result = gr.Gallery(label='Result')
54
+
55
+ with gr.Row():
56
+ with gr.Group():
57
+ fn = lambda x: model.run(x, 2, 10, 100)
58
+ with gr.Row():
59
+ gr.Examples(
60
+ label='Examples 1',
61
+ examples=[
62
+ ['an image of <gta5-artwork(0)>'],
63
+ ['an image of <jane(0)>'],
64
+ ['an image of <jane(3)>'],
65
+ ['an image of <cat-toy(0)>'],
66
+ ],
67
+ inputs=prompt,
68
+ outputs=result,
69
+ fn=fn,
70
+ cache_examples=CACHE_EXAMPLES,
71
+ )
72
+ with gr.Row():
73
+ gr.Examples(
74
+ label='Examples 2',
75
+ examples=[
76
+ [
77
+ 'an image of a cat in the style of <gta5-artwork(0)>'
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  ],
79
+ ['a painting of a dog in the style of <jane(0)>'],
80
+ ['a painting of a dog in the style of <jane(5)>'],
81
+ [
82
+ 'a painting of a <cat-toy(0)> in the style of <jane(3)>'
 
 
 
 
 
 
 
 
83
  ],
84
+ ],
85
+ inputs=prompt,
86
+ outputs=result,
87
+ fn=fn,
88
+ cache_examples=CACHE_EXAMPLES,
89
+ )
90
+ with gr.Row():
91
+ gr.Examples(
92
+ label='Examples 3',
93
+ examples=[
94
+ ['an image of <jane[0]>'],
95
+ ['an image of <jane|0|>'],
96
+ ['an image of <jane|3|>'],
97
+ ],
98
+ inputs=prompt,
99
+ outputs=result,
100
+ fn=fn,
101
+ cache_examples=CACHE_EXAMPLES,
102
+ )
103
+
104
+ inputs = [
105
+ prompt,
106
+ num_images,
107
+ num_steps,
108
+ seed,
109
+ ]
110
+ prompt.submit(
111
+ fn=model.run,
112
+ inputs=inputs,
113
+ outputs=result,
114
+ )
115
+ run_button.click(
116
+ fn=model.run,
117
+ inputs=inputs,
118
+ outputs=result,
119
+ )
120
+
121
+ with gr.Accordion('About available prompts', open=False):
122
+ gr.Markdown(DETAILS)
123
+
124
+ demo.queue().launch(show_api=False)
model.py CHANGED
@@ -1,6 +1,7 @@
1
  from __future__ import annotations
2
 
3
  import os
 
4
  import subprocess
5
  import sys
6
 
@@ -10,7 +11,7 @@ from diffusers import DPMSolverMultistepScheduler
10
 
11
  if os.getenv('SYSTEM') == 'spaces':
12
  with open('patch') as f:
13
- subprocess.run('patch -p1'.split(),
14
  cwd='multires_textual_inversion',
15
  stdin=f)
16
 
@@ -18,7 +19,7 @@ sys.path.insert(0, 'multires_textual_inversion')
18
 
19
  from pipeline import MultiResPipeline, load_learned_concepts
20
 
21
- HF_TOKEN = os.environ.get('HF_TOKEN')
22
 
23
 
24
  class Model:
 
1
  from __future__ import annotations
2
 
3
  import os
4
+ import shlex
5
  import subprocess
6
  import sys
7
 
 
11
 
12
  if os.getenv('SYSTEM') == 'spaces':
13
  with open('patch') as f:
14
+ subprocess.run(shlex.split('patch -p1'),
15
  cwd='multires_textual_inversion',
16
  stdin=f)
17
 
 
19
 
20
  from pipeline import MultiResPipeline, load_learned_concepts
21
 
22
+ HF_TOKEN = os.getenv('HF_TOKEN')
23
 
24
 
25
  class Model: