hysts HF staff commited on
Commit
8f4ba14
·
1 Parent(s): 8557d69
.pre-commit-config.yaml CHANGED
@@ -1,7 +1,7 @@
1
  exclude: ^patch
2
  repos:
3
  - repo: https://github.com/pre-commit/pre-commit-hooks
4
- rev: v4.6.0
5
  hooks:
6
  - id: check-executables-have-shebangs
7
  - id: check-json
@@ -14,18 +14,15 @@ repos:
14
  args: ["--fix=lf"]
15
  - id: requirements-txt-fixer
16
  - id: trailing-whitespace
17
- - repo: https://github.com/myint/docformatter
18
- rev: v1.7.5
19
  hooks:
20
- - id: docformatter
21
- args: ["--in-place"]
22
- - repo: https://github.com/pycqa/isort
23
- rev: 5.13.2
24
- hooks:
25
- - id: isort
26
- args: ["--profile", "black"]
27
  - repo: https://github.com/pre-commit/mirrors-mypy
28
- rev: v1.10.0
29
  hooks:
30
  - id: mypy
31
  args: ["--ignore-missing-imports"]
@@ -36,14 +33,8 @@ repos:
36
  "types-PyYAML",
37
  "types-pytz",
38
  ]
39
- - repo: https://github.com/psf/black
40
- rev: 24.4.2
41
- hooks:
42
- - id: black
43
- language_version: python3.10
44
- args: ["--line-length", "119"]
45
  - repo: https://github.com/kynan/nbstripout
46
- rev: 0.7.1
47
  hooks:
48
  - id: nbstripout
49
  args:
@@ -52,7 +43,7 @@ repos:
52
  "metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
53
  ]
54
  - repo: https://github.com/nbQA-dev/nbQA
55
- rev: 1.8.5
56
  hooks:
57
  - id: nbqa-black
58
  - id: nbqa-pyupgrade
 
1
  exclude: ^patch
2
  repos:
3
  - repo: https://github.com/pre-commit/pre-commit-hooks
4
+ rev: v5.0.0
5
  hooks:
6
  - id: check-executables-have-shebangs
7
  - id: check-json
 
14
  args: ["--fix=lf"]
15
  - id: requirements-txt-fixer
16
  - id: trailing-whitespace
17
+ - repo: https://github.com/astral-sh/ruff-pre-commit
18
+ rev: v0.8.4
19
  hooks:
20
+ - id: ruff
21
+ args: ["--fix"]
22
+ - id: ruff-format
23
+ args: ["--line-length", "119"]
 
 
 
24
  - repo: https://github.com/pre-commit/mirrors-mypy
25
+ rev: v1.14.0
26
  hooks:
27
  - id: mypy
28
  args: ["--ignore-missing-imports"]
 
33
  "types-PyYAML",
34
  "types-pytz",
35
  ]
 
 
 
 
 
 
36
  - repo: https://github.com/kynan/nbstripout
37
+ rev: 0.8.1
38
  hooks:
39
  - id: nbstripout
40
  args:
 
43
  "metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
44
  ]
45
  - repo: https://github.com/nbQA-dev/nbQA
46
+ rev: 1.9.1
47
  hooks:
48
  - id: nbqa-black
49
  - id: nbqa-pyupgrade
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.10
.vscode/extensions.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "recommendations": [
3
+ "ms-python.python",
4
+ "charliermarsh.ruff",
5
+ "streetsidesoftware.code-spell-checker",
6
+ "tamasfe.even-better-toml"
7
+ ]
8
+ }
.vscode/settings.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "editor.formatOnSave": true,
3
+ "files.insertFinalNewline": false,
4
+ "[python]": {
5
+ "editor.defaultFormatter": "charliermarsh.ruff",
6
+ "editor.formatOnType": true,
7
+ "editor.codeActionsOnSave": {
8
+ "source.fixAll.ruff": "explicit",
9
+ "source.organizeImports": "explicit"
10
+ }
11
+ },
12
+ "[jupyter]": {
13
+ "files.insertFinalNewline": false
14
+ },
15
+ "notebook.output.scrolling": true,
16
+ "notebook.formatOnCellExecution": true,
17
+ "notebook.formatOnSave.enabled": true,
18
+ "notebook.codeActionsOnSave": {
19
+ "source.organizeImports": "explicit"
20
+ }
21
+ }
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🏢
4
  colorFrom: blue
5
  colorTo: pink
6
  sdk: gradio
7
- sdk_version: 4.42.0
8
  app_file: app.py
9
  pinned: false
10
  suggested_hardware: t4-small
 
4
  colorFrom: blue
5
  colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 5.9.1
8
  app_file: app.py
9
  pinned: false
10
  suggested_hardware: t4-small
app.py CHANGED
@@ -1,8 +1,7 @@
1
  #!/usr/bin/env python
2
 
3
- from __future__ import annotations
4
-
5
  import os
 
6
  import shlex
7
  import subprocess
8
  import sys
@@ -14,12 +13,12 @@ import torch
14
  from diffusers import DPMSolverMultistepScheduler
15
 
16
  if os.getenv("SYSTEM") == "spaces":
17
- with open("patch") as f:
18
- subprocess.run(shlex.split("patch -p1"), cwd="multires_textual_inversion", stdin=f)
19
 
20
  sys.path.insert(0, "multires_textual_inversion")
21
 
22
- from pipeline import MultiResPipeline, load_learned_concepts
23
 
24
  DESCRIPTION = "# [Multiresolution Textual Inversion](https://github.com/giannisdaras/multires_textual_inversion)"
25
 
@@ -34,7 +33,7 @@ Also, `number` should be an integer in [0, 9].
34
 
35
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
36
 
37
- model_id = "ashllay/stable-diffusion-v1-5-archive"
38
  if device.type == "cpu":
39
  pipe = MultiResPipeline.from_pretrained(model_id)
40
  else:
@@ -69,7 +68,14 @@ def run(prompt: str, n_images: int, n_steps: int, seed: int) -> list[PIL.Image.I
69
  )
70
 
71
 
72
- with gr.Blocks(css="style.css") as demo:
 
 
 
 
 
 
 
73
  gr.Markdown(DESCRIPTION)
74
 
75
  with gr.Row():
@@ -102,7 +108,6 @@ with gr.Blocks(css="style.css") as demo:
102
 
103
  with gr.Row():
104
  with gr.Group():
105
- fn = lambda x: run(x, 2, 10, 100)
106
  with gr.Row():
107
  gr.Examples(
108
  label="Examples 1",
@@ -114,7 +119,7 @@ with gr.Blocks(css="style.css") as demo:
114
  ],
115
  inputs=prompt,
116
  outputs=result,
117
- fn=fn,
118
  )
119
  with gr.Row():
120
  gr.Examples(
@@ -127,7 +132,7 @@ with gr.Blocks(css="style.css") as demo:
127
  ],
128
  inputs=prompt,
129
  outputs=result,
130
- fn=fn,
131
  )
132
  with gr.Row():
133
  gr.Examples(
@@ -139,7 +144,7 @@ with gr.Blocks(css="style.css") as demo:
139
  ],
140
  inputs=prompt,
141
  outputs=result,
142
- fn=fn,
143
  )
144
 
145
  inputs = [
 
1
  #!/usr/bin/env python
2
 
 
 
3
  import os
4
+ import pathlib
5
  import shlex
6
  import subprocess
7
  import sys
 
13
  from diffusers import DPMSolverMultistepScheduler
14
 
15
  if os.getenv("SYSTEM") == "spaces":
16
+ with pathlib.Path("patch").open() as f:
17
+ subprocess.run(shlex.split("patch -p1"), cwd="multires_textual_inversion", stdin=f, check=True) # noqa: S603
18
 
19
  sys.path.insert(0, "multires_textual_inversion")
20
 
21
+ from pipeline import MultiResPipeline, load_learned_concepts # type: ignore
22
 
23
  DESCRIPTION = "# [Multiresolution Textual Inversion](https://github.com/giannisdaras/multires_textual_inversion)"
24
 
 
33
 
34
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
35
 
36
+ model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
37
  if device.type == "cpu":
38
  pipe = MultiResPipeline.from_pretrained(model_id)
39
  else:
 
68
  )
69
 
70
 
71
+ def process_example(prompt: str) -> list[PIL.Image.Image]:
72
+ return run(prompt, 2, 10, 100)
73
+
74
+
75
+ process_example.zerogpu = True # type: ignore
76
+
77
+
78
+ with gr.Blocks(css_paths="style.css") as demo:
79
  gr.Markdown(DESCRIPTION)
80
 
81
  with gr.Row():
 
108
 
109
  with gr.Row():
110
  with gr.Group():
 
111
  with gr.Row():
112
  gr.Examples(
113
  label="Examples 1",
 
119
  ],
120
  inputs=prompt,
121
  outputs=result,
122
+ fn=process_example,
123
  )
124
  with gr.Row():
125
  gr.Examples(
 
132
  ],
133
  inputs=prompt,
134
  outputs=result,
135
+ fn=process_example,
136
  )
137
  with gr.Row():
138
  gr.Examples(
 
144
  ],
145
  inputs=prompt,
146
  outputs=result,
147
+ fn=process_example,
148
  )
149
 
150
  inputs = [
pyproject.toml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "multiresolution-textual-inversion"
3
+ version = "0.1.0"
4
+ description = ""
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ dependencies = [
8
+ "accelerate>=1.2.1",
9
+ "diffusers>=0.32.1",
10
+ "ftfy>=6.3.1",
11
+ "gradio>=5.9.1",
12
+ "hf-transfer>=0.1.8",
13
+ "spaces>=0.31.1",
14
+ "torch==2.4.0",
15
+ "transformers>=4.47.1",
16
+ ]
17
+
18
+ [tool.ruff]
19
+ line-length = 119
20
+
21
+ [tool.ruff.lint]
22
+ select = ["ALL"]
23
+ ignore = [
24
+ "COM812", # missing-trailing-comma
25
+ "D203", # one-blank-line-before-class
26
+ "D213", # multi-line-summary-second-line
27
+ "E501", # line-too-long
28
+ "SIM117", # multiple-with-statements
29
+ ]
30
+ extend-ignore = [
31
+ "D100", # undocumented-public-module
32
+ "D101", # undocumented-public-class
33
+ "D102", # undocumented-public-method
34
+ "D103", # undocumented-public-function
35
+ "D104", # undocumented-public-package
36
+ "D105", # undocumented-magic-method
37
+ "D107", # undocumented-public-init
38
+ "EM101", # raw-string-in-exception
39
+ "FBT001", # boolean-type-hint-positional-argument
40
+ "FBT002", # boolean-default-value-positional-argument
41
+ "PD901", # pandas-df-variable-name
42
+ "PGH003", # blanket-type-ignore
43
+ "PLR0913", # too-many-arguments
44
+ "PLR0915", # too-many-statements
45
+ "TRY003", # raise-vanilla-args
46
+ ]
47
+ unfixable = [
48
+ "F401", # unused-import
49
+ ]
50
+
51
+ [tool.ruff.format]
52
+ docstring-code-format = true
requirements.txt CHANGED
@@ -1,7 +1,259 @@
1
- accelerate==0.33.0
2
- diffusers==0.30.1
3
- ftfy==6.2.3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  numpy==1.26.4
5
- Pillow==10.4.0
6
- torch==2.0.1
7
- transformers==4.44.2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile pyproject.toml -o requirements.txt
3
+ accelerate==1.2.1
4
+ # via multiresolution-textual-inversion (pyproject.toml)
5
+ aiofiles==23.2.1
6
+ # via gradio
7
+ annotated-types==0.7.0
8
+ # via pydantic
9
+ anyio==4.7.0
10
+ # via
11
+ # gradio
12
+ # httpx
13
+ # starlette
14
+ certifi==2024.12.14
15
+ # via
16
+ # httpcore
17
+ # httpx
18
+ # requests
19
+ charset-normalizer==3.4.1
20
+ # via requests
21
+ click==8.1.8
22
+ # via
23
+ # typer
24
+ # uvicorn
25
+ diffusers==0.32.1
26
+ # via multiresolution-textual-inversion (pyproject.toml)
27
+ exceptiongroup==1.2.2
28
+ # via anyio
29
+ fastapi==0.115.6
30
+ # via gradio
31
+ ffmpy==0.5.0
32
+ # via gradio
33
+ filelock==3.16.1
34
+ # via
35
+ # diffusers
36
+ # huggingface-hub
37
+ # torch
38
+ # transformers
39
+ # triton
40
+ fsspec==2024.12.0
41
+ # via
42
+ # gradio-client
43
+ # huggingface-hub
44
+ # torch
45
+ ftfy==6.3.1
46
+ # via multiresolution-textual-inversion (pyproject.toml)
47
+ gradio==5.9.1
48
+ # via
49
+ # multiresolution-textual-inversion (pyproject.toml)
50
+ # spaces
51
+ gradio-client==1.5.2
52
+ # via gradio
53
+ h11==0.14.0
54
+ # via
55
+ # httpcore
56
+ # uvicorn
57
+ hf-transfer==0.1.8
58
+ # via multiresolution-textual-inversion (pyproject.toml)
59
+ httpcore==1.0.7
60
+ # via httpx
61
+ httpx==0.28.1
62
+ # via
63
+ # gradio
64
+ # gradio-client
65
+ # safehttpx
66
+ # spaces
67
+ huggingface-hub==0.27.0
68
+ # via
69
+ # accelerate
70
+ # diffusers
71
+ # gradio
72
+ # gradio-client
73
+ # tokenizers
74
+ # transformers
75
+ idna==3.10
76
+ # via
77
+ # anyio
78
+ # httpx
79
+ # requests
80
+ importlib-metadata==8.5.0
81
+ # via diffusers
82
+ jinja2==3.1.5
83
+ # via
84
+ # gradio
85
+ # torch
86
+ markdown-it-py==3.0.0
87
+ # via rich
88
+ markupsafe==2.1.5
89
+ # via
90
+ # gradio
91
+ # jinja2
92
+ mdurl==0.1.2
93
+ # via markdown-it-py
94
+ mpmath==1.3.0
95
+ # via sympy
96
+ networkx==3.4.2
97
+ # via torch
98
  numpy==1.26.4
99
+ # via
100
+ # accelerate
101
+ # diffusers
102
+ # gradio
103
+ # pandas
104
+ # transformers
105
+ nvidia-cublas-cu12==12.1.3.1
106
+ # via
107
+ # nvidia-cudnn-cu12
108
+ # nvidia-cusolver-cu12
109
+ # torch
110
+ nvidia-cuda-cupti-cu12==12.1.105
111
+ # via torch
112
+ nvidia-cuda-nvrtc-cu12==12.1.105
113
+ # via torch
114
+ nvidia-cuda-runtime-cu12==12.1.105
115
+ # via torch
116
+ nvidia-cudnn-cu12==9.1.0.70
117
+ # via torch
118
+ nvidia-cufft-cu12==11.0.2.54
119
+ # via torch
120
+ nvidia-curand-cu12==10.3.2.106
121
+ # via torch
122
+ nvidia-cusolver-cu12==11.4.5.107
123
+ # via torch
124
+ nvidia-cusparse-cu12==12.1.0.106
125
+ # via
126
+ # nvidia-cusolver-cu12
127
+ # torch
128
+ nvidia-nccl-cu12==2.20.5
129
+ # via torch
130
+ nvidia-nvjitlink-cu12==12.6.85
131
+ # via
132
+ # nvidia-cusolver-cu12
133
+ # nvidia-cusparse-cu12
134
+ nvidia-nvtx-cu12==12.1.105
135
+ # via torch
136
+ orjson==3.10.13
137
+ # via gradio
138
+ packaging==24.2
139
+ # via
140
+ # accelerate
141
+ # gradio
142
+ # gradio-client
143
+ # huggingface-hub
144
+ # spaces
145
+ # transformers
146
+ pandas==2.2.3
147
+ # via gradio
148
+ pillow==10.4.0
149
+ # via
150
+ # diffusers
151
+ # gradio
152
+ psutil==5.9.8
153
+ # via
154
+ # accelerate
155
+ # spaces
156
+ pydantic==2.10.4
157
+ # via
158
+ # fastapi
159
+ # gradio
160
+ # spaces
161
+ pydantic-core==2.27.2
162
+ # via pydantic
163
+ pydub==0.25.1
164
+ # via gradio
165
+ pygments==2.18.0
166
+ # via rich
167
+ python-dateutil==2.9.0.post0
168
+ # via pandas
169
+ python-multipart==0.0.20
170
+ # via gradio
171
+ pytz==2024.2
172
+ # via pandas
173
+ pyyaml==6.0.2
174
+ # via
175
+ # accelerate
176
+ # gradio
177
+ # huggingface-hub
178
+ # transformers
179
+ regex==2024.11.6
180
+ # via
181
+ # diffusers
182
+ # transformers
183
+ requests==2.32.3
184
+ # via
185
+ # diffusers
186
+ # huggingface-hub
187
+ # spaces
188
+ # transformers
189
+ rich==13.9.4
190
+ # via typer
191
+ ruff==0.8.5
192
+ # via gradio
193
+ safehttpx==0.1.6
194
+ # via gradio
195
+ safetensors==0.5.0
196
+ # via
197
+ # accelerate
198
+ # diffusers
199
+ # transformers
200
+ semantic-version==2.10.0
201
+ # via gradio
202
+ shellingham==1.5.4
203
+ # via typer
204
+ six==1.17.0
205
+ # via python-dateutil
206
+ sniffio==1.3.1
207
+ # via anyio
208
+ spaces==0.31.1
209
+ # via multiresolution-textual-inversion (pyproject.toml)
210
+ starlette==0.41.3
211
+ # via
212
+ # fastapi
213
+ # gradio
214
+ sympy==1.13.3
215
+ # via torch
216
+ tokenizers==0.21.0
217
+ # via transformers
218
+ tomlkit==0.13.2
219
+ # via gradio
220
+ torch==2.4.0
221
+ # via
222
+ # multiresolution-textual-inversion (pyproject.toml)
223
+ # accelerate
224
+ tqdm==4.67.1
225
+ # via
226
+ # huggingface-hub
227
+ # transformers
228
+ transformers==4.47.1
229
+ # via multiresolution-textual-inversion (pyproject.toml)
230
+ triton==3.0.0
231
+ # via torch
232
+ typer==0.15.1
233
+ # via gradio
234
+ typing-extensions==4.12.2
235
+ # via
236
+ # anyio
237
+ # fastapi
238
+ # gradio
239
+ # gradio-client
240
+ # huggingface-hub
241
+ # pydantic
242
+ # pydantic-core
243
+ # rich
244
+ # spaces
245
+ # torch
246
+ # typer
247
+ # uvicorn
248
+ tzdata==2024.2
249
+ # via pandas
250
+ urllib3==2.3.0
251
+ # via requests
252
+ uvicorn==0.34.0
253
+ # via gradio
254
+ wcwidth==0.2.13
255
+ # via ftfy
256
+ websockets==14.1
257
+ # via gradio-client
258
+ zipp==3.21.0
259
+ # via importlib-metadata
style.css CHANGED
@@ -1,7 +1,3 @@
1
  h1 {
2
  text-align: center;
3
  }
4
- img#visitor-badge {
5
- display: block;
6
- margin: auto;
7
- }
 
1
  h1 {
2
  text-align: center;
3
  }
 
 
 
 
uv.lock ADDED
The diff for this file is too large to render. See raw diff