Commit
·
717e0f1
1
Parent(s):
dfa8bd9
Update parquet files (step 35 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/123Kumar/vits-uma-genshin-honkai123/transforms.py +0 -193
- spaces/1gistliPinn/ChatGPT4/Examples/Fix Generator V.2.0 Samsungl.md +0 -9
- spaces/1pelhydcardo/ChatGPT-prompt-generator/README.md +0 -14
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dummynation Mod APK with Unlimited Troops and No Ads.md +0 -106
- spaces/1phancelerku/anime-remove-background/.md +0 -44
- spaces/1phancelerku/anime-remove-background/Bus Simulator Indonesia Mod APK A Game that Combines Simulation Adventure and Education.md +0 -101
- spaces/1phancelerku/anime-remove-background/Download and Install Instagram 4.0 2 APK - The Best Way to Share Your Photos and Videos.md +0 -127
- spaces/2ndelement/voicevox/voicevox_engine/cancellable_engine.py +0 -220
- spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/README.md +0 -19
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/openai.py +0 -129
- spaces/Abdllh/poetry2023/README.md +0 -13
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/lzstring-plugin.d.ts +0 -8
- spaces/AlekseyKorshuk/thin-plate-spline-motion-model/README.md +0 -13
- spaces/AlexWang/lama/app.py +0 -49
- spaces/Amrrs/DragGan-Inversion/gen_images.py +0 -160
- spaces/Amrrs/DragGan-Inversion/stylegan_human/utils/util.py +0 -84
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/custom_diffusion/README.md +0 -280
- spaces/Andy1621/IAT_enhancement/model/blocks.py +0 -281
- spaces/Andy1621/uniformer_video_demo/kinetics_class_index.py +0 -402
- spaces/AquaSuisei/ChatGPTXE/modules/overwrites.py +0 -56
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/euctwprober.py +0 -47
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/__init__.py +0 -49
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/recipes.py +0 -620
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/testing.py +0 -331
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/transforms/__init__.py +0 -14
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/structures/image_list.py +0 -110
- spaces/BatuhanYilmaz/Youtube-Transcriber/utils.py +0 -115
- spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/waiter.py +0 -184
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/euckrprober.py +0 -47
- spaces/CVPR/LIVE/thrust/dependencies/cub/tune/Makefile +0 -192
- spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/generate.h +0 -44
- spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/copy.h +0 -57
- spaces/CVPR/Text2Human/Text2Human/data/segm_attr_dataset.py +0 -167
- spaces/CVPR/regionclip-demo/detectron2/data/datasets/builtin_meta.py +0 -560
- spaces/CVPR/regionclip-demo/detectron2/utils/testing.py +0 -132
- spaces/ChallengeHub/Chinese-LangChain/tests/test_duckpy.py +0 -15
- spaces/ChandraMohanNayal/AutoGPT/autogpt/config/singleton.py +0 -24
- spaces/CjangCjengh/Shanghainese-TTS/monotonic_align/__init__.py +0 -19
- spaces/Clementapa/orang-outan-image-video-detection/style.css +0 -10
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-d80d0bbf.js +0 -2
- spaces/Datasculptor/MusicGen/audiocraft/utils/export.py +0 -56
- spaces/Deci/DeciLM-6b-instruct/app.py +0 -136
- spaces/DragGan/DragGan-Inversion/PTI/training/projectors/w_projector.py +0 -142
- spaces/DragGan/DragGan/stylegan_human/torch_utils/ops/conv2d_gradfix.py +0 -172
- spaces/EPFL-VILAB/MultiMAE/mask2former/modeling/pixel_decoder/ops/src/cuda/ms_deform_attn_cuda.h +0 -35
- spaces/Eddycrack864/Applio-Inference/julius/utils.py +0 -101
- spaces/Edisonymy/buy-or-rent/src/mainbody.py +0 -237
- spaces/Epoching/DocumentQA/DiT_Extractor/dit_object_detection/ditod/beit.py +0 -671
- spaces/EronSamez/RVC_HFmeu/demucs/__main__.py +0 -317
- spaces/EuroPython2022/pyro-vision/app.py +0 -72
spaces/123Kumar/vits-uma-genshin-honkai123/transforms.py
DELETED
@@ -1,193 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.nn import functional as F
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
|
7 |
-
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
8 |
-
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
9 |
-
DEFAULT_MIN_DERIVATIVE = 1e-3
|
10 |
-
|
11 |
-
|
12 |
-
def piecewise_rational_quadratic_transform(inputs,
|
13 |
-
unnormalized_widths,
|
14 |
-
unnormalized_heights,
|
15 |
-
unnormalized_derivatives,
|
16 |
-
inverse=False,
|
17 |
-
tails=None,
|
18 |
-
tail_bound=1.,
|
19 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
20 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
21 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
22 |
-
|
23 |
-
if tails is None:
|
24 |
-
spline_fn = rational_quadratic_spline
|
25 |
-
spline_kwargs = {}
|
26 |
-
else:
|
27 |
-
spline_fn = unconstrained_rational_quadratic_spline
|
28 |
-
spline_kwargs = {
|
29 |
-
'tails': tails,
|
30 |
-
'tail_bound': tail_bound
|
31 |
-
}
|
32 |
-
|
33 |
-
outputs, logabsdet = spline_fn(
|
34 |
-
inputs=inputs,
|
35 |
-
unnormalized_widths=unnormalized_widths,
|
36 |
-
unnormalized_heights=unnormalized_heights,
|
37 |
-
unnormalized_derivatives=unnormalized_derivatives,
|
38 |
-
inverse=inverse,
|
39 |
-
min_bin_width=min_bin_width,
|
40 |
-
min_bin_height=min_bin_height,
|
41 |
-
min_derivative=min_derivative,
|
42 |
-
**spline_kwargs
|
43 |
-
)
|
44 |
-
return outputs, logabsdet
|
45 |
-
|
46 |
-
|
47 |
-
def searchsorted(bin_locations, inputs, eps=1e-6):
|
48 |
-
bin_locations[..., -1] += eps
|
49 |
-
return torch.sum(
|
50 |
-
inputs[..., None] >= bin_locations,
|
51 |
-
dim=-1
|
52 |
-
) - 1
|
53 |
-
|
54 |
-
|
55 |
-
def unconstrained_rational_quadratic_spline(inputs,
|
56 |
-
unnormalized_widths,
|
57 |
-
unnormalized_heights,
|
58 |
-
unnormalized_derivatives,
|
59 |
-
inverse=False,
|
60 |
-
tails='linear',
|
61 |
-
tail_bound=1.,
|
62 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
63 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
64 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
65 |
-
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
66 |
-
outside_interval_mask = ~inside_interval_mask
|
67 |
-
|
68 |
-
outputs = torch.zeros_like(inputs)
|
69 |
-
logabsdet = torch.zeros_like(inputs)
|
70 |
-
|
71 |
-
if tails == 'linear':
|
72 |
-
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
73 |
-
constant = np.log(np.exp(1 - min_derivative) - 1)
|
74 |
-
unnormalized_derivatives[..., 0] = constant
|
75 |
-
unnormalized_derivatives[..., -1] = constant
|
76 |
-
|
77 |
-
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
78 |
-
logabsdet[outside_interval_mask] = 0
|
79 |
-
else:
|
80 |
-
raise RuntimeError('{} tails are not implemented.'.format(tails))
|
81 |
-
|
82 |
-
outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
|
83 |
-
inputs=inputs[inside_interval_mask],
|
84 |
-
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
85 |
-
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
86 |
-
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
87 |
-
inverse=inverse,
|
88 |
-
left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
|
89 |
-
min_bin_width=min_bin_width,
|
90 |
-
min_bin_height=min_bin_height,
|
91 |
-
min_derivative=min_derivative
|
92 |
-
)
|
93 |
-
|
94 |
-
return outputs, logabsdet
|
95 |
-
|
96 |
-
def rational_quadratic_spline(inputs,
|
97 |
-
unnormalized_widths,
|
98 |
-
unnormalized_heights,
|
99 |
-
unnormalized_derivatives,
|
100 |
-
inverse=False,
|
101 |
-
left=0., right=1., bottom=0., top=1.,
|
102 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
103 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
104 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
105 |
-
if torch.min(inputs) < left or torch.max(inputs) > right:
|
106 |
-
raise ValueError('Input to a transform is not within its domain')
|
107 |
-
|
108 |
-
num_bins = unnormalized_widths.shape[-1]
|
109 |
-
|
110 |
-
if min_bin_width * num_bins > 1.0:
|
111 |
-
raise ValueError('Minimal bin width too large for the number of bins')
|
112 |
-
if min_bin_height * num_bins > 1.0:
|
113 |
-
raise ValueError('Minimal bin height too large for the number of bins')
|
114 |
-
|
115 |
-
widths = F.softmax(unnormalized_widths, dim=-1)
|
116 |
-
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
117 |
-
cumwidths = torch.cumsum(widths, dim=-1)
|
118 |
-
cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
|
119 |
-
cumwidths = (right - left) * cumwidths + left
|
120 |
-
cumwidths[..., 0] = left
|
121 |
-
cumwidths[..., -1] = right
|
122 |
-
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
123 |
-
|
124 |
-
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
125 |
-
|
126 |
-
heights = F.softmax(unnormalized_heights, dim=-1)
|
127 |
-
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
128 |
-
cumheights = torch.cumsum(heights, dim=-1)
|
129 |
-
cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
|
130 |
-
cumheights = (top - bottom) * cumheights + bottom
|
131 |
-
cumheights[..., 0] = bottom
|
132 |
-
cumheights[..., -1] = top
|
133 |
-
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
134 |
-
|
135 |
-
if inverse:
|
136 |
-
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
137 |
-
else:
|
138 |
-
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
139 |
-
|
140 |
-
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
141 |
-
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
142 |
-
|
143 |
-
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
144 |
-
delta = heights / widths
|
145 |
-
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
146 |
-
|
147 |
-
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
148 |
-
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
149 |
-
|
150 |
-
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
151 |
-
|
152 |
-
if inverse:
|
153 |
-
a = (((inputs - input_cumheights) * (input_derivatives
|
154 |
-
+ input_derivatives_plus_one
|
155 |
-
- 2 * input_delta)
|
156 |
-
+ input_heights * (input_delta - input_derivatives)))
|
157 |
-
b = (input_heights * input_derivatives
|
158 |
-
- (inputs - input_cumheights) * (input_derivatives
|
159 |
-
+ input_derivatives_plus_one
|
160 |
-
- 2 * input_delta))
|
161 |
-
c = - input_delta * (inputs - input_cumheights)
|
162 |
-
|
163 |
-
discriminant = b.pow(2) - 4 * a * c
|
164 |
-
assert (discriminant >= 0).all()
|
165 |
-
|
166 |
-
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
167 |
-
outputs = root * input_bin_widths + input_cumwidths
|
168 |
-
|
169 |
-
theta_one_minus_theta = root * (1 - root)
|
170 |
-
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
171 |
-
* theta_one_minus_theta)
|
172 |
-
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
|
173 |
-
+ 2 * input_delta * theta_one_minus_theta
|
174 |
-
+ input_derivatives * (1 - root).pow(2))
|
175 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
176 |
-
|
177 |
-
return outputs, -logabsdet
|
178 |
-
else:
|
179 |
-
theta = (inputs - input_cumwidths) / input_bin_widths
|
180 |
-
theta_one_minus_theta = theta * (1 - theta)
|
181 |
-
|
182 |
-
numerator = input_heights * (input_delta * theta.pow(2)
|
183 |
-
+ input_derivatives * theta_one_minus_theta)
|
184 |
-
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
185 |
-
* theta_one_minus_theta)
|
186 |
-
outputs = input_cumheights + numerator / denominator
|
187 |
-
|
188 |
-
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
|
189 |
-
+ 2 * input_delta * theta_one_minus_theta
|
190 |
-
+ input_derivatives * (1 - theta).pow(2))
|
191 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
192 |
-
|
193 |
-
return outputs, logabsdet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Fix Generator V.2.0 Samsungl.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>Finally, since some people don’t like documentation and don’t want to read, we allow you to instantly see the output of our generators by downloading this draft and running the python script in the top-left corner of your browser. An article about our work can be found here: </p>
|
3 |
-
<h2>Fix Generator V.2.0 Samsungl</h2><br /><p><b><b>DOWNLOAD</b> 🆓 <a href="https://imgfil.com/2uxZJw">https://imgfil.com/2uxZJw</a></b></p><br /><br />
|
4 |
-
<p>Solar generators are the most expensive option, costing seven times as much as a standard fuel-powered generator. Price isnt the only issue. With fuel-powered generators, the output is consistent and guaranteed. However, solar generators require sunlight can be affected by things like cloud cover, placement location, and the length of the dayso they are nowhere near as reliable as their fossil fuel counterparts. Solar generators do store power in a power bank, which manufacturers hope will get you through any cloudy patches. But the power bank wont charge when you are operating at capacity.</p>
|
5 |
-
<p>A conventional generators main benefit over the other types listed in this article is power output. While there is a whole range of conventional generators, they usually have an output of at least 4,000 Watts and up to around 12,000 Watts. While thats overkill if you want to hook up a sound system for a family BBQ, its ideal if youre going to power multiple large appliances during a power outage. They are also cheaper than inverter or solar generators.</p>
|
6 |
-
<p>The traditional list of uses of generators is often long. Powering something that needs power when the sun doesnt shine or when the power grid is down is the most common. A generator provides ongoing and predictable power during a power outage. A generator provides power for things such as running a home lighting system at night. It can provide power for lights when batteries run out or for power tools when the AC power isnt available. It can provide power to water pumps and pump stations during a power failure. It can charge a cell phone or other electronic devices when the grid is down and when the power isnt provided by the grid. A generator can power a lantern during a storm. </p>
|
7 |
-
<p></p> 899543212b<br />
|
8 |
-
<br />
|
9 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: ChatGPT Prompt Generator
|
3 |
-
emoji: 👨🏻🎤
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.16.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
duplicated_from: umair007/ChatGPT-prompt-generator
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dummynation Mod APK with Unlimited Troops and No Ads.md
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Dummynation Mod APK Unlimited Troops: How to Conquer the World with Ease</h1>
|
3 |
-
<p>Do you love strategy games where you can control a country and lead it to world domination? If so, you might want to check out <strong>Dummynation</strong>, a game that gives you unlimited power over a country with a single promise to fulfill: world domination. But how do you manage to achieve it? That's up to you. You can expand your territory by military occupation, analyze and manipulate diplomatic relations, use your country's resources to sustain your research and military campaigns, and determine your country's economic policy. Sounds exciting, right? But what if we tell you that there is a way to make it even more exciting? That's right, we are talking about <strong>Dummynation Mod APK Unlimited Troops</strong>, a modded version of the game that removes ads and adds new features, such as unlimited troops. In this article, we will tell you everything you need to know about this mod, how to download and install it, how to play it, and some tips and tricks to help you conquer the world with ease.</p>
|
4 |
-
<h2>dummynation mod apk unlimited troops</h2><br /><p><b><b>Download Zip</b> ☆☆☆ <a href="https://urlin.us/2uT1WO">https://urlin.us/2uT1WO</a></b></p><br /><br />
|
5 |
-
<h2>What is Dummynation?</h2>
|
6 |
-
<p>Dummynation is a strategy game where you have unlimited power over a country, with a single promise to fulfill: world domination. How you manage to achieve it is up to you.</p>
|
7 |
-
<h3>A strategy game where you have unlimited power over a country</h3>
|
8 |
-
<p>In Dummynation, you can choose any country in the world to start with, and customize your leader's name and appearance. You can then use the map to select a target country and send your troops to occupy it. You can also monitor your power, relations, resources and economy on the dashboard, and use research and policy options to improve your country's performance and influence.</p>
|
9 |
-
<h3>The goal is to achieve world domination by expanding your territory, manipulating diplomacy, managing resources and economy</h3>
|
10 |
-
<p>The ultimate goal of Dummynation is to achieve world domination by any means necessary. You can expand your territory by invading other countries with your troops, or by forming alliances and treaties with them. You can also manipulate diplomatic relations by using propaganda, espionage, sabotage, or bribery. You can manage your resources by allocating them to different sectors, such as military, research, or economy. You can also determine your economic policy by setting taxes, tariffs, subsidies, or trade agreements. The game offers a lot of freedom and flexibility in how you want to play and achieve your goal.</p>
|
11 |
-
<h2>What is Dummynation Mod APK Unlimited Troops?</h2>
|
12 |
-
<p>Dummynation Mod APK Unlimited Troops is a modded version of the game that removes ads and adds new features, such as unlimited troops. The main feature is unlimited troops, which allows you to invade any country without worrying about casualties or costs.</p>
|
13 |
-
<p>dummynation mod apk no ads<br />
|
14 |
-
dummynation mod apk latest version<br />
|
15 |
-
dummynation mod apk free download<br />
|
16 |
-
dummynation mod apk unlimited gems<br />
|
17 |
-
dummynation mod apk android<br />
|
18 |
-
dummynation mod apk happymod<br />
|
19 |
-
dummynation mod apk world domination<br />
|
20 |
-
dummynation mod apk strategy game<br />
|
21 |
-
dummynation mod apk unlimited power<br />
|
22 |
-
dummynation mod apk military occupation<br />
|
23 |
-
dummynation mod apk diplomatic relations<br />
|
24 |
-
dummynation mod apk resource management<br />
|
25 |
-
dummynation mod apk economic policy<br />
|
26 |
-
dummynation mod apk new weapons<br />
|
27 |
-
dummynation mod apk updated graphics<br />
|
28 |
-
dummynation mod apk new levels<br />
|
29 |
-
dummynation mod apk easy install<br />
|
30 |
-
dummynation mod apk compatible devices<br />
|
31 |
-
dummynation mod apk anti-ban mechanism<br />
|
32 |
-
dummynation mod apk unlock characters<br />
|
33 |
-
dummynation mod apk offline mode<br />
|
34 |
-
dummynation mod apk multiplayer mode<br />
|
35 |
-
dummynation mod apk custom country<br />
|
36 |
-
dummynation mod apk realistic simulation<br />
|
37 |
-
dummynation mod apk historical scenarios<br />
|
38 |
-
dummynation mod apk random events<br />
|
39 |
-
dummynation mod apk achievements and leaderboards<br />
|
40 |
-
dummynation mod apk tips and tricks<br />
|
41 |
-
dummynation mod apk cheats and hacks<br />
|
42 |
-
dummynation mod apk reviews and ratings<br />
|
43 |
-
how to download dummynation mod apk unlimited troops<br />
|
44 |
-
how to play dummynation mod apk unlimited troops<br />
|
45 |
-
how to update dummynation mod apk unlimited troops<br />
|
46 |
-
how to uninstall dummynation mod apk unlimited troops<br />
|
47 |
-
how to backup and restore dummynation mod apk unlimited troops<br />
|
48 |
-
how to fix errors in dummynation mod apk unlimited troops<br />
|
49 |
-
how to contact developer of dummynation mod apk unlimited troops<br />
|
50 |
-
how to support developer of dummynation mod apk unlimited troops<br />
|
51 |
-
best alternatives to dummynation mod apk unlimited troops<br />
|
52 |
-
best strategies for dummynation mod apk unlimited troops<br />
|
53 |
-
best countries to play in dummynation mod apk unlimited troops<br />
|
54 |
-
best weapons to use in dummynation mod apk unlimited troops<br />
|
55 |
-
best allies and enemies in dummynation mod apk unlimited troops<br />
|
56 |
-
best resources to invest in dummynation mod apk unlimited troops<br />
|
57 |
-
best economic policies to adopt in dummynation mod apk unlimited troops<br />
|
58 |
-
best ways to achieve world domination in dummynation mod apk unlimited troops<br />
|
59 |
-
best ways to avoid war in dummynation mod apk unlimited troops<br />
|
60 |
-
best ways to win war in dummynation mod apk unlimited troops<br />
|
61 |
-
best ways to have fun in dummynation mod apk unlimited troops</p>
|
62 |
-
<h3>A modded version of the game that removes ads and adds new features</h3>
|
63 |
-
<p>Dummynation Mod APK Unlimited Troops is a modified version of the original game that removes annoying ads and adds new features that enhance the gameplay. The modded version is not available on the official app store, but you can download it from a reliable source online. The modded version does not require root access or any special permissions to install and run.</p>
|
64 |
-
<h3>The main feature is unlimited troops, which allows you to invade any country without worrying about casualties or costs</h3>
|
65 |
-
<p>The main feature of Dummynation Mod APK Unlimited Troops is unlimited troops, which means you can send as many troops as you want to any country you want to invade. You don't have to worry about losing troops or spending money on them. You can also use different types of troops, such as infantry, tanks, planes, ships, or missiles. This feature gives you a huge advantage over your enemies and makes it easier to conquer the world.</p>
|
66 |
-
<h2>How to download and install Dummynation Mod APK Unlimited Troops?</h2>
|
67 |
-
<p>Downloading and installing Dummynation Mod APK Unlimited Troops is easy and simple. Just follow these steps:</p>
|
68 |
-
<h3>Download the modded APK file from a reliable source</h3>
|
69 |
-
<p>The first step is to download the modded APK file from a reliable source online. You can search for Dummynation Mod APK Unlimited Troops on Google or any other search engine and find a link that offers a safe and secure download. Make sure you download the latest version of the mod that is compatible with your device.</p>
|
70 |
-
<h3>Enable unknown sources on your device settings</h3>
|
71 |
-
<p>The next step is to enable unknown sources on your device settings. This will allow you to install apps that are not from the official app store. To do this, go to your device settings and look for security or privacy options. Then find the option that says unknown sources or allow installation from unknown sources and turn it on.</p>
|
72 |
-
<h3>Install the APK file and launch the game</h3>
|
73 |
-
<p>The final step is to install the APK file and launch the game. To do this, locate the downloaded APK file on your device storage and tap on it. Follow the instructions on the screen to complete the installation process. Once done, you can launch the game from your app drawer or home screen and enjoy playing Dummynation Mod APK Unlimited Troops.</p> <h2>How to play Dummynation Mod APK Unlimited Troops?</h2>
|
74 |
-
<p>Playing Dummynation Mod APK Unlimited Troops is fun and easy. Just follow these steps:</p>
|
75 |
-
<h3>Choose a country to start with and customize your leader's name and appearance</h3>
|
76 |
-
<p>The first step is to choose a country to start with and customize your leader's name and appearance. You can choose any country in the world, from the USA to China, from Russia to Brazil, from India to Australia. You can also change your leader's name, gender, hair, skin, and clothes. You can make your leader look like yourself, a famous person, or a fictional character. The choice is yours.</p>
|
77 |
-
<h3>Use the map to select a target country and send your troops to occupy it</h3>
|
78 |
-
<p>The next step is to use the map to select a target country and send your troops to occupy it. You can zoom in and out of the map and see the details of each country, such as its name, flag, population, power, relations, resources, and economy. You can also see the color of each country, which indicates its status: green for allies, red for enemies, yellow for neutral, and blue for yourself. To select a target country, simply tap on it and see its information on the bottom of the screen. To send your troops to occupy it, tap on the attack button and choose the type and number of troops you want to send. You can use unlimited troops, so don't be afraid to send as many as you want.</p>
|
79 |
-
<h3>Monitor your power, relations, resources and economy on the dashboard</h3>
|
80 |
-
<p>The third step is to monitor your power, relations, resources and economy on the dashboard. The dashboard is located on the top of the screen and shows you important information about your country and the world. You can see your power level, which indicates how strong you are compared to other countries. You can also see your relations with other countries, which indicates how friendly or hostile they are towards you. You can also see your resources, which include food, water, oil, metal, uranium, and money. You can use your resources to sustain your research and military campaigns. You can also see your economy, which includes your income and expenses. You can use your economy to determine your tax rate, trade agreements, subsidies, and tariffs.</p>
|
81 |
-
<h3>Use research and policy options to improve your country's performance and influence</h3>
|
82 |
-
<p>The fourth step is to use research and policy options to improve your country's performance and influence. You can access these options by tapping on the menu button on the top right corner of the screen. You can then choose between research or policy options. Research options allow you to unlock new technologies that can improve your military, economy, or diplomacy. For example, you can research nuclear weapons that can destroy entire countries in one strike. Policy options allow you to set your country's stance on various issues that can affect your relations with other countries. For example, you can set your policy on human rights that can make you more popular or unpopular among other countries.</p> <h2>Tips and tricks for Dummynation Mod APK Unlimited Troops</h2>
|
83 |
-
<p>Playing Dummynation Mod APK Unlimited Troops can be a lot of fun, but also challenging. Here are some tips and tricks to help you conquer the world with ease:</p>
|
84 |
-
<h3>Balance your expansion and diplomacy to avoid creating too many enemies</h3>
|
85 |
-
<p>While it may be tempting to use your unlimited troops to invade every country you see, you should also consider the consequences of your actions. If you create too many enemies, you may face a coalition of countries that will try to stop you. You may also lose the support of your allies, who may turn against you or abandon you. Therefore, you should balance your expansion and diplomacy to avoid creating too many enemies. You can do this by forming alliances with other countries, respecting their sovereignty, honoring your treaties, and avoiding unnecessary conflicts. You can also use diplomacy to persuade or intimidate other countries to join you or surrender to you.</p>
|
86 |
-
<h3>Use your unlimited troops wisely and strategically to overcome stronger opponents</h3>
|
87 |
-
<p>Even though you have unlimited troops, you should still use them wisely and strategically to overcome stronger opponents. You should not just send your troops blindly to any country, but rather plan your attacks carefully and choose the best type and number of troops for each situation. You should also consider the terrain, weather, distance, and defense of each country before attacking them. You should also use different types of troops, such as infantry, tanks, planes, ships, or missiles, to exploit the weaknesses of your enemies and gain an advantage over them.</p>
|
88 |
-
<h3>Invest in research and economy to gain an edge over your rivals</h3>
|
89 |
-
<p>Besides using your unlimited troops, you should also invest in research and economy to gain an edge over your rivals. Research can help you unlock new technologies that can improve your military, economy, or diplomacy. For example, you can research nuclear weapons that can destroy entire countries in one strike, or stealth technology that can make your troops invisible to radar. Economy can help you increase your income and reduce your expenses. For example, you can increase your tax rate, trade agreements, subsidies, or tariffs to boost your revenue, or reduce your military spending, welfare spending, or debt payments to lower your costs.</p>
|
90 |
-
<h3>Explore new levels and areas to discover new challenges and rewards</h3>
|
91 |
-
<p>Dummynation Mod APK Unlimited Troops offers a lot of variety and replay value by providing different levels and areas to explore. Each level has a different difficulty and objective, such as conquering a continent, a region, or the whole world. Each area has a different theme and design, such as Europe, Asia, Africa, America, or Antarctica. By exploring new levels and areas, you can discover new challenges and rewards that will keep you entertained and motivated.</p>
|
92 |
-
<h2>Conclusion</h2>
|
93 |
-
<p>Dummynation Mod APK Unlimited Troops is a fun and addictive game that lets you experience the thrill of world domination. The modded version enhances the gameplay by removing ads and adding unlimited troops and other features. The game is easy to download, install and play, and offers hours of entertainment for strategy lovers. If you are looking for a game that will challenge your strategic skills and satisfy your desire for power, then Dummynation Mod APK Unlimited Troops is the game for you.</p>
|
94 |
-
<h2>FAQs</h2>
|
95 |
-
<h4>Is Dummynation Mod APK Unlimited Troops safe to use?</h4>
|
96 |
-
<p>Dummynation Mod APK Unlimited Troops is safe to use as long as you download it from a reliable source online. The modded version does not contain any viruses or malware that can harm your device or data. However, you should always be careful when downloading any app from unknown sources and scan it with an antivirus before installing it.</p>
|
97 |
-
<h4>What are the benefits of using Dummynation Mod APK Unlimited Troops?</h4>
|
98 |
-
<p>The benefits of using Dummynation Mod APK Unlimited Troops are that it removes ads and adds unlimited troops and other features that enhance the gameplay. By using this mod, you can enjoy playing Dummynation without any interruptions or limitations. You can also have more fun and freedom in conquering the world with unlimited troops.</p>
|
99 |
-
<h4>How can I update Dummynation Mod APK Unlimited Troops?</h4>
|
100 |
-
<p>You can update Dummynation Mod APK Unlimited Troops by downloading the latest version of the mod from a reliable source online. You can then install it over the existing version without losing your progress or data. You should always update the mod whenever there is a new version available to ensure compatibility and performance.</p>
|
101 |
-
<h4>Can I play Dummynation Mod APK Unlimited Troops offline?</h4>
|
102 |
-
<p>Yes, you can play Dummynation Mod APK Unlimited Troops offline without any internet connection. The game does not require any internet connection to run or save your progress. However, you may need an internet connection to download and install the mod, or to access some online features, such as leaderboards or achievements.</p>
|
103 |
-
<h4>Can I share my progress and achievements with other players?</h4>
|
104 |
-
<p>Yes, you can share your progress and achievements with other players by using the social media buttons on the game. You can also compare your scores and rankings with other players on the leaderboards or achievements. You can also challenge your friends or other players to see who can conquer the world faster or better.</p> 197e85843d<br />
|
105 |
-
<br />
|
106 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/.md
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
## Warman Crack With Full Game
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
WORK
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
**Click Here ✅ [https://vittuv.com/2tBMxo](https://vittuv.com/2tBMxo)**
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
Ansys Discovery Student is a cutting-edge product design software for students that leverages our instantaneous simulation technology. It allows you to create and modify geometry models easily with Ansys SpaceClaim technology, which is a direct modeling tool that eliminates the need for complex CAD operations. It also enables you to perform thermal, structural and fluids simulations in real time with completely meshless and interactive solvers. With Ansys Discovery Student, you can explore and understand physics concepts without spending a lot of time learning how to use a complicated simulation tool.
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
Ansys Discovery Student is ideal for students who want to learn about product design and engineering in a fun and intuitive way. You can experiment with different design scenarios and see how they affect the performance and behavior of your product. You can also compare different physics phenomena and discover how they interact with each other. For example, you can see how heat transfer affects the stress and deformation of a metal part, or how fluid flow affects the aerodynamics and lift of a wing.
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
Ansys Discovery Student is also a great tool for students who want to prepare for their future careers in engineering and design. You can use it to create impressive projects and portfolios that showcase your skills and creativity. You can also use it to collaborate with your classmates and instructors and get feedback on your work. Ansys Discovery Student is compatible with other Ansys products, so you can easily export your models and simulations to other tools for further analysis and optimization.
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
Ansys Discovery Student is free to download and use for academic purposes. You can install it on your personal computer or laptop and access it anytime and anywhere. You can also access online tutorials, videos, webinars and community forums to help you get started and learn more about the software. Ansys Discovery Student is the ultimate product design software for students who want to learn by doing and have fun along the way.
|
38 |
-
|
39 |
-
145887f19f
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Bus Simulator Indonesia Mod APK A Game that Combines Simulation Adventure and Education.md
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Game Bus Simulator Indonesia Mod APK: A Fun and Realistic Driving Experience</h1>
|
3 |
-
<p>Do you love driving games? Do you want to explore the beautiful and diverse cities of Indonesia? Do you want to customize your own bus and drive it on realistic roads? If you answered yes to any of these questions, then you should try Game Bus Simulator Indonesia Mod APK. This is a modified version of the popular game Bus Simulator Indonesia, which lets you enjoy unlimited money, fuel, and other features that make the game more fun and exciting. In this article, we will tell you everything you need to know about Game Bus Simulator Indonesia Mod APK, including its features, how to download and install it, and its pros and cons.</p>
|
4 |
-
<h2>What is Game Bus Simulator Indonesia Mod APK?</h2>
|
5 |
-
<p>Game Bus Simulator Indonesia Mod APK is based on driving a bus in various cities of Indonesia to perform various tasks on your android phone. In this game, you need to pick up the passengers from different areas of an Indonesian city and drop them at the destination. You can also drive freely around the city and enjoy the scenery. You can choose from different types of buses, such as mini buses, double-decker buses, or luxury buses. You can also customize your bus with different skins, stickers, horns, lights, and more. You can also experience realistic traffic, weather, day and night cycles, and other aspects of driving in Indonesia.</p>
|
6 |
-
<h2>game bus simulator indonesia mod apk</h2><br /><p><b><b>Download</b> ☆☆☆ <a href="https://jinyurl.com/2uNJO1">https://jinyurl.com/2uNJO1</a></b></p><br /><br />
|
7 |
-
<h3>Features of Game Bus Simulator Indonesia Mod APK</h3>
|
8 |
-
<h4>- Unlimited money and fuel</h4>
|
9 |
-
<p>One of the best features of Game Bus Simulator Indonesia Mod APK is that it gives you unlimited money and fuel. This means that you can buy any bus you want, upgrade it with any accessories you like, and drive it as long as you want without worrying about running out of gas. You can also use the money to unlock new cities, modes, and missions in the game.</p>
|
10 |
-
<h4>- Customizable buses and skins</h4>
|
11 |
-
<p>Another great feature of Game Bus Simulator Indonesia Mod APK is that it allows you to customize your buses with different skins and accessories. You can change the color, design, logo, name, number plate, and more of your bus. You can also add stickers, horns, lights, mirrors, spoilers, exhausts, and more to your bus. You can make your bus look unique and stylish according to your preference.</p>
|
12 |
-
<h4>- Realistic traffic and weather</h4>
|
13 |
-
<p>Game Bus Simulator Indonesia Mod APK also offers realistic traffic and weather conditions in the game. You will encounter different types of vehicles on the road, such as cars, trucks, motorcycles, bicycles, rickshaws, etc. You will also have to follow the traffic rules and signals, such as speed limits, stop signs, red lights, etc. You will also experience different weather effects, such as rain, fog, sun, wind, etc. You will have to adjust your driving accordingly to avoid accidents and delays.</p>
|
14 |
-
<h4>- Various modes and missions</h4>
|
15 |
-
<p>Game Bus Simulator Indonesia Mod APK also provides various modes and missions for you to enjoy. You can choose from free mode, career mode, or multiplayer mode. In free mode, you can drive anywhere you want without any restrictions or objectives. In career mode, you have to complete different tasks and challenges to earn money and reputation. In multiplayer mode, you can play with other players online and compete with them in races or other events. You can also chat with them using the built-in voice chat feature.</p>
|
16 |
-
<h2>How to download and install Game Bus Simulator Indonesia Mod APK?</h2>
|
17 |
-
<h3>Requirements for Game Bus Simulator Indonesia Mod APK</h3> <h3>Steps to download and install Game Bus Simulator Indonesia Mod APK</h3>
|
18 |
-
<p>If you want to download and install Game Bus Simulator Indonesia Mod APK on your android device, you need to follow these simple steps:</p>
|
19 |
-
<ol>
|
20 |
-
<li>Click on the download link to get the Game Bus Simulator Indonesia Mod APK file.</li>
|
21 |
-
<li>Allow the installation of unknown sources on your device by going to Settings > Security > Unknown Sources.</li>
|
22 |
-
<li>Locate the downloaded file in your file manager and tap on it to start the installation process.</li>
|
23 |
-
<li>Follow the instructions on the screen and wait for the installation to complete.</li>
|
24 |
-
<li>Launch the game and enjoy driving your bus in Indonesia.</li>
|
25 |
-
</ol>
|
26 |
-
<h2>Pros and cons of Game Bus Simulator Indonesia Mod APK</h2>
|
27 |
-
<p>Game Bus Simulator Indonesia Mod APK is a fun and realistic driving game that lets you experience the culture and scenery of Indonesia. However, like any other game, it also has some pros and cons that you should consider before playing it. Here are some of them:</p>
|
28 |
-
<h3>Pros of Game Bus Simulator Indonesia Mod APK</h3>
|
29 |
-
<ul>
|
30 |
-
<li>It is free to download and play.</li>
|
31 |
-
<li>It has unlimited money and fuel, which makes the game more enjoyable and less stressful.</li>
|
32 |
-
<li>It has customizable buses and skins, which gives you more options and creativity.</li>
|
33 |
-
<li>It has realistic traffic and weather, which adds more challenge and realism to the game.</li>
|
34 |
-
<li>It has various modes and missions, which keeps the game interesting and diverse.</li>
|
35 |
-
<li>It has a multiplayer mode, which allows you to play with other players online and have more fun.</li>
|
36 |
-
</ul>
|
37 |
-
<h3>Cons of Game Bus Simulator Indonesia Mod APK</h3>
|
38 |
-
<ul>
|
39 |
-
<li>It may not be compatible with some devices or versions of Android.</li>
|
40 |
-
<li>It may have some bugs or glitches that affect the gameplay or performance.</li>
|
41 |
-
<li>It may require a stable internet connection for some features or modes.</li>
|
42 |
-
<li>It may not be updated regularly or have new content or features.</li>
|
43 |
-
</ul>
|
44 |
-
<h2>Conclusion</h2>
|
45 |
-
<p>Game Bus Simulator Indonesia Mod APK is a great game for anyone who loves driving games and wants to explore the beautiful and diverse cities of Indonesia. It offers unlimited money, fuel, customization, realism, variety, and multiplayer features that make the game more fun and exciting. However, it also has some drawbacks, such as compatibility issues, bugs, internet requirements, and lack of updates. Therefore, you should weigh the pros and cons before downloading and installing it on your device. If you are looking for a fun and realistic driving experience in Indonesia, then you should give Game Bus Simulator Indonesia Mod APK a try.</p>
|
46 |
-
<h3>FAQs</h3>
|
47 |
-
<p>Here are some frequently asked questions about Game Bus Simulator Indonesia Mod APK:</p>
|
48 |
-
<p>game bus simulator indonesia mod apk unlimited money<br />
|
49 |
-
game bus simulator indonesia mod apk download latest version<br />
|
50 |
-
game bus simulator indonesia mod apk offline<br />
|
51 |
-
game bus simulator indonesia mod apk 2021<br />
|
52 |
-
game bus simulator indonesia mod apk free shopping<br />
|
53 |
-
game bus simulator indonesia mod apk revdl<br />
|
54 |
-
game bus simulator indonesia mod apk terbaru<br />
|
55 |
-
game bus simulator indonesia mod apk android 1<br />
|
56 |
-
game bus simulator indonesia mod apk unlimited fuel<br />
|
57 |
-
game bus simulator indonesia mod apk hack<br />
|
58 |
-
game bus simulator indonesia mod apk obb<br />
|
59 |
-
game bus simulator indonesia mod apk rexdl<br />
|
60 |
-
game bus simulator indonesia mod apk no ads<br />
|
61 |
-
game bus simulator indonesia mod apk update<br />
|
62 |
-
game bus simulator indonesia mod apk full unlocked<br />
|
63 |
-
game bus simulator indonesia mod apk unlimited everything<br />
|
64 |
-
game bus simulator indonesia mod apk data<br />
|
65 |
-
game bus simulator indonesia mod apk pure<br />
|
66 |
-
game bus simulator indonesia mod apk happymod<br />
|
67 |
-
game bus simulator indonesia mod apk all buses unlocked<br />
|
68 |
-
game bus simulator indonesia mod apk cheat<br />
|
69 |
-
game bus simulator indonesia mod apk new version<br />
|
70 |
-
game bus simulator indonesia mod apk online<br />
|
71 |
-
game bus simulator indonesia mod apk an1<br />
|
72 |
-
game bus simulator indonesia mod apk unlimited diamond<br />
|
73 |
-
game bus simulator indonesia mod apk latest<br />
|
74 |
-
game bus simulator indonesia mod apk original<br />
|
75 |
-
game bus simulator indonesia mod apk lenov.ru<br />
|
76 |
-
game bus simulator indonesia mod apk old version<br />
|
77 |
-
game bus simulator indonesia mod apk unlimited coin<br />
|
78 |
-
game bus simulator indonesia mod apk versi lama<br />
|
79 |
-
game bus simulator indonesia mod apk mega<br />
|
80 |
-
game bus simulator indonesia mod apk pro<br />
|
81 |
-
game bus simulator indonesia mod apk premium<br />
|
82 |
-
game bus simulator indonesia mod apk vip<br />
|
83 |
-
game bus simulator indonesia mod apk plus<br />
|
84 |
-
game bus simulator indonesia mod apk 2020<br />
|
85 |
-
game bus simulator indonesia mod apk android oyun club<br />
|
86 |
-
game bus simulator indonesia mod apk andropalace<br />
|
87 |
-
game bus simulator indonesia mod apk apkpure.com</p>
|
88 |
-
<ol>
|
89 |
-
<li><b>Is Game Bus Simulator Indonesia Mod APK safe to download and install?</b></li>
|
90 |
-
<p>Yes, Game Bus Simulator Indonesia Mod APK is safe to download and install as long as you get it from a trusted source. However, you should always scan the file for viruses or malware before installing it on your device.</p>
|
91 |
-
<li><b>What is the difference between Game Bus Simulator Indonesia Mod APK and the original game?</b></li>
|
92 |
-
<p>The main difference between Game Bus Simulator Indonesia Mod APK and the original game is that the modded version gives you unlimited money, fuel, customization, and other features that are not available in the original game. The modded version also bypasses some restrictions or limitations that are imposed by the original game.</p>
|
93 |
-
<li><b>Can I play Game Bus Simulator Indonesia Mod APK offline?</b></li>
|
94 |
-
<p>You can play Game Bus Simulator Indonesia Mod APK offline in free mode or career mode. However, you will need an internet connection to play multiplayer mode or access some online features or events.</p>
|
95 |
-
<li><b>How can I update Game Bus Simulator Indonesia Mod APK?</b></li>
|
96 |
-
<p>You can update Game Bus Simulator Indonesia Mod APK by downloading and installing the latest version from the same source. However, you should always back up your data before updating to avoid losing your progress or settings.</p>
|
97 |
-
<li><b>How can I contact the developers of Game Bus Simulator Indonesia Mod APK?</b></li>
|
98 |
-
<p>You can contact the developers of Game Bus Simulator Indonesia Mod APK by visiting their official website or their social media pages. You can also leave a comment or review on their download page or send them an email at [email protected].</p>
|
99 |
-
</ol></p> 197e85843d<br />
|
100 |
-
<br />
|
101 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download and Install Instagram 4.0 2 APK - The Best Way to Share Your Photos and Videos.md
DELETED
@@ -1,127 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Instagram 4.0 2 APK: How to Get the Latest Version of the Popular Social Media App</h1>
|
3 |
-
<p>Do you love sharing your photos, videos, stories, reels, and more with your friends and followers on Instagram? Do you want to get the latest features and updates of the app without waiting for the official release on Google Play Store? If yes, then you might be interested in downloading Instagram 4.0 2 APK, which is the latest version of the app as of June 2023. In this article, we will explain what Instagram is, what an APK file is, and how to download and install Instagram 4.0 2 APK on your Android device.</p>
|
4 |
-
<h2>download instagram 4.0 2 apk</h2><br /><p><b><b>DOWNLOAD</b> > <a href="https://jinyurl.com/2uNPFR">https://jinyurl.com/2uNPFR</a></b></p><br /><br />
|
5 |
-
<h2>What is Instagram and why do you need it?</h2>
|
6 |
-
<p>Instagram is one of the most popular social media apps in the world, with over one billion monthly active users. It allows you to create and share your photos, videos, stories, reels, live broadcasts, IGTV videos, and more with the people you care about. You can also discover new content from other users, celebrities, brands, and influencers that match your interests. You can also chat with your friends, send voice messages, video calls, stickers, GIFs, and more through Instagram Direct. You can also shop for products, watch videos, play games, and access other apps through Instagram.</p>
|
7 |
-
<h3>Instagram features and benefits</h3>
|
8 |
-
<p>Some of the features and benefits of using Instagram are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>You can edit your photos and videos with filters, stickers, text, music, effects, and more.</li>
|
11 |
-
<li>You can create short-form videos with reels, which are fun and creative ways to express yourself.</li>
|
12 |
-
<li>You can share your moments with stories, which disappear after 24 hours.</li>
|
13 |
-
<li>You can go live with your friends or followers and interact with them in real-time.</li>
|
14 |
-
<li>You can upload longer videos with IGTV, which is a platform for vertical videos.</li>
|
15 |
-
<li>You can explore content from different categories with Explore, which shows you personalized recommendations based on your preferences.</li>
|
16 |
-
<li>You can follow hashtags, accounts, topics, and locations that interest you.</li>
|
17 |
-
<li>You can shop for products from your favorite brands and creators with Shopping.</li>
|
18 |
-
<li>You can join or create rooms with up to 50 people with Messenger Rooms.</li>
|
19 |
-
<li>You can access other apps like Facebook Watch, Spotify, TikTok, Netflix, and more with App Clips.</li>
|
20 |
-
</ul>
|
21 |
-
<h3>Instagram requirements and compatibility</h3>
|
22 |
-
<p>To use Instagram on your Android device, you need to have:</p>
|
23 |
-
<p>download instagram 4.0 2 apk for android<br />
|
24 |
-
download instagram 4.0 2 apk latest version<br />
|
25 |
-
download instagram 4.0 2 apk free<br />
|
26 |
-
download instagram 4.0 2 apk mod<br />
|
27 |
-
download instagram 4.0 2 apk old version<br />
|
28 |
-
download instagram 4.0 2 apk file<br />
|
29 |
-
download instagram 4.0 2 apk from google play<br />
|
30 |
-
download instagram 4.0 2 apk update<br />
|
31 |
-
download instagram 4.0 2 apk beta<br />
|
32 |
-
download instagram 4.0 2 apk mirror<br />
|
33 |
-
download instagram 4.0 2 apk offline<br />
|
34 |
-
download instagram 4.0 2 apk cracked<br />
|
35 |
-
download instagram 4.0 2 apk hack<br />
|
36 |
-
download instagram 4.0 2 apk no ads<br />
|
37 |
-
download instagram 4.0 2 apk premium<br />
|
38 |
-
download instagram 4.0 2 apk pro<br />
|
39 |
-
download instagram 4.0 2 apk full<br />
|
40 |
-
download instagram 4.0 2 apk unlocked<br />
|
41 |
-
download instagram 4.0 2 apk original<br />
|
42 |
-
download instagram 4.0 2 apk safe<br />
|
43 |
-
download instagram 4.0 2 apk direct link<br />
|
44 |
-
download instagram 4.0 2 apk for pc<br />
|
45 |
-
download instagram 4.0 2 apk for ios<br />
|
46 |
-
download instagram 4.0 2 apk for windows<br />
|
47 |
-
download instagram 4.0 2 apk for mac<br />
|
48 |
-
download instagram 4.0 2 apk for tablet<br />
|
49 |
-
download instagram 4.0 2 apk for firestick<br />
|
50 |
-
download instagram 4.0 2 apk for smart tv<br />
|
51 |
-
download instagram 4.0 2 apk for chromebook<br />
|
52 |
-
download instagram 4.0 2 apk for huawei<br />
|
53 |
-
download instagram 4.0 2 apk for samsung<br />
|
54 |
-
download instagram 4.0 2 apk for xiaomi<br />
|
55 |
-
download instagram 4.0 2 apk for oppo<br />
|
56 |
-
download instagram 4.0 2 apk for vivo<br />
|
57 |
-
download instagram 4.0 2 apk for nokia<br />
|
58 |
-
download instagram 4.0 2 apk for lg<br />
|
59 |
-
download instagram 4.0 2 apk for sony<br />
|
60 |
-
download instagram 4.0 2 apk for oneplus<br />
|
61 |
-
download instagram</p>
|
62 |
-
<ul>
|
63 |
-
<li>An Android device running Android 4.1 or higher.</li>
|
64 |
-
<li>A stable internet connection (Wi-Fi or mobile data).</li>
|
65 |
-
<li>An Instagram account (you can sign up with your email address, phone number, or Facebook account).</li>
|
66 |
-
<li>At least 100 MB of free storage space on your device.</li>
|
67 |
-
</ul>
|
68 |
-
<h2>What is an APK file and why do you need it?</h2>
|
69 |
-
<p>An APK file is an Android Package Kit file that contains all the files and code needed to install an app on an Android device. It is similar to an EXE file on Windows or a DMG file on Mac. You can download APK files from various sources online, such as websites, blogs, forums, or app stores. However, not all APK files are safe or reliable. Some may contain malware or viruses that can harm your device or steal your personal information. Therefore, you need to be careful when downloading APK files from unknown sources.</p>
|
70 |
-
<h3>AP <h3>APK file definition and advantages</h3>
|
71 |
-
<p>An APK file is an Android Package Kit file that contains all the files and code needed to install an app on an Android device. It is similar to an EXE file on Windows or a DMG file on Mac. You can download APK files from various sources online, such as websites, blogs, forums, or app stores. However, not all APK files are safe or reliable. Some may contain malware or viruses that can harm your device or steal your personal information. Therefore, you need to be careful when downloading APK files from unknown sources.</p>
|
72 |
-
<p>Some of the advantages of using APK files are:</p>
|
73 |
-
<ul>
|
74 |
-
<li>You can get the latest version of an app before it is officially released on Google Play Store.</li>
|
75 |
-
<li>You can access apps that are not available in your region or country.</li>
|
76 |
-
<li>You can install apps that are not compatible with your device or Android version.</li>
|
77 |
-
<li>You can customize or modify apps according to your preferences.</li>
|
78 |
-
<li>You can backup or restore apps and their data easily.</li>
|
79 |
-
</ul>
|
80 |
-
<h3>APK file risks and precautions</h3>
|
81 |
-
<p>Some of the risks and precautions of using APK files are:</p>
|
82 |
-
<ul>
|
83 |
-
<li>You may download fake or malicious apps that can damage your device or compromise your security.</li>
|
84 |
-
<li>You may violate the terms and conditions of the app developer or Google Play Store.</li>
|
85 |
-
<li>You may lose the warranty or support of your device manufacturer or service provider.</li>
|
86 |
-
<li>You may encounter bugs, errors, crashes, or compatibility issues with the app or your device.</li>
|
87 |
-
<li>You may need to update the app manually whenever a new version is available.</li>
|
88 |
-
</ul>
|
89 |
-
<h2>How to download Instagram 4.0 2 APK?</h2>
|
90 |
-
<p>If you want to download Instagram 4.0 2 APK, you need to follow these steps:</p>
|
91 |
-
<h3>Step 1: Enable unknown sources on your device</h3>
|
92 |
-
<p>Before you can install any APK file on your device, you need to enable unknown sources in your settings. This will allow you to install apps from sources other than Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message that says installing apps from unknown sources may harm your device. Tap OK to proceed.</p>
|
93 |
-
<h3>Step 2: Find a reliable source for the APK file</h3>
|
94 |
-
<p>The next step is to find a reliable source for the Instagram 4.0 2 APK file. You can search online for websites, blogs, forums, or app stores that offer the APK file. However, be careful not to download from shady or untrustworthy sites that may contain malware or viruses. You can also check the reviews, ratings, comments, and feedback from other users who have downloaded the APK file before. You can also scan the APK file with an antivirus app before installing it.</p>
|
95 |
-
<h3>Step 3: Download and install the APK file</h3>
|
96 |
-
<p>Once you have found a reliable source for the Instagram 4.0 2 APK file, you can download it to your device. You may need to grant permission for the browser or app to download the file. After the download is complete, you can open the file and tap Install. You may see a message that says installing this app may harm your device. Tap Install Anyway to continue. Wait for the installation process to finish.</p>
|
97 |
-
<h3>Step 4: Launch and enjoy Instagram 4.0 2</h3>
|
98 |
-
<p>After the installation is done, you can launch Instagram 4.0 2 from your app drawer or home screen. You can sign in with your existing account or create a new one if you don't have one yet. You can then enjoy all the features and updates of Instagram 4.0 2 on your device.</p>
|
99 |
-
<h2>Conclusion</h2>
|
100 |
-
<p>In this article, we have explained what Instagram is, what an APK file is, and how to download and install Instagram 4.0 2 APK on your Android device. We hope this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below.</p>
|
101 |
-
<h2>FAQs</h2>
|
102 |
-
<p>Here are some frequently asked questions about Instagram 4.0 2 APK:</p>
|
103 |
-
<ol>
|
104 |
-
<li><b>Is Instagram 4.0 2 APK safe?</b></li>
|
105 |
-
<p>Instagram 4.0 2 APK is safe as long as you download it from a reliable source and scan it with an antivirus app before installing it. However, there is always a risk of downloading fake or malicious apps from unknown sources, so be careful and use your own discretion.</p>
|
106 |
-
<li><b>What are the new features of Instagram 4.0 <li><b>What are the new features of Instagram 4.0 2 APK?</b></li>
|
107 |
-
<p>Instagram 4.0 2 APK has some new features and improvements, such as:</p>
|
108 |
-
<ul>
|
109 |
-
<li>You can create and join audio rooms with up to 50 people with Live Audio.</li>
|
110 |
-
<li>You can add captions to your stories and reels automatically with Captions Sticker.</li>
|
111 |
-
<li>You can remix your reels with other users' reels with Remix Reels.</li>
|
112 |
-
<li>You can hide or unhide your likes and views on your posts with Hide Like Counts.</li>
|
113 |
-
<li>You can save your drafts of stories and reels with Story Drafts and Reels Drafts.</li>
|
114 |
-
</ul>
|
115 |
-
<li><b>How to update Instagram 4.0 2 APK?</b></li>
|
116 |
-
<p>To update Instagram 4.0 2 APK, you need to download the latest version of the APK file from a reliable source and install it on your device. You may need to uninstall the previous version of the app before installing the new one. Alternatively, you can wait for the official update on Google Play Store, which may take some time to be available.</p>
|
117 |
-
<li><b>How to uninstall Instagram 4.0 2 APK?</b></li>
|
118 |
-
<p>To uninstall Instagram 4.0 2 APK, you need to go to Settings > Apps > Instagram and tap Uninstall. You may also need to delete the APK file from your device storage. If you want to reinstall the app, you can download it from Google Play Store or another source.</p>
|
119 |
-
<li><b>How to contact Instagram support?</b></li>
|
120 |
-
<p>If you have any issues or problems with Instagram, you can contact Instagram support through the following ways:</p>
|
121 |
-
<ul>
|
122 |
-
<li>You can report a problem or send feedback through the app by going to Settings > Help > Report a Problem.</li>
|
123 |
-
<li>You can visit the Instagram Help Center website at https://help.instagram.com/ for FAQs, guides, tips, and more.</li>
|
124 |
-
<li>You can follow the Instagram official account on Twitter at https://twitter.com/instagram for updates, announcements, and more.</li>
|
125 |
-
</ul></p> 401be4b1e0<br />
|
126 |
-
<br />
|
127 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/voicevox_engine/cancellable_engine.py
DELETED
@@ -1,220 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import asyncio
|
3 |
-
import queue
|
4 |
-
from multiprocessing import Pipe, Process
|
5 |
-
from multiprocessing.connection import Connection
|
6 |
-
from tempfile import NamedTemporaryFile
|
7 |
-
from typing import List, Optional, Tuple
|
8 |
-
|
9 |
-
import soundfile
|
10 |
-
|
11 |
-
# FIXME: remove FastAPI dependency
|
12 |
-
from fastapi import HTTPException, Request
|
13 |
-
|
14 |
-
from .model import AudioQuery
|
15 |
-
from .synthesis_engine import make_synthesis_engines
|
16 |
-
from .utility import get_latest_core_version
|
17 |
-
|
18 |
-
|
19 |
-
class CancellableEngine:
|
20 |
-
"""
|
21 |
-
音声合成のキャンセル機能に関するクラス
|
22 |
-
初期化後は、synthesis関数で音声合成できる
|
23 |
-
(オリジナルと比べ引数が増えているので注意)
|
24 |
-
|
25 |
-
Attributes
|
26 |
-
----------
|
27 |
-
watch_con_list: List[Tuple[Request, Process]]
|
28 |
-
Requestは接続の監視に使用され、Processは通信切断時のプロセスキルに使用される
|
29 |
-
クライアントから接続があるとListにTupleが追加される
|
30 |
-
接続が切断、もしくは音声合成が終了すると削除される
|
31 |
-
procs_and_cons: queue.Queue[Tuple[Process, Connection]]
|
32 |
-
音声合成の準備が終わっているプロセスのList
|
33 |
-
(音声合成中のプロセスは入っていない)
|
34 |
-
"""
|
35 |
-
|
36 |
-
def __init__(self, args: argparse.Namespace) -> None:
|
37 |
-
"""
|
38 |
-
変数の初期化を行う
|
39 |
-
また、args.init_processesの数だけプロセスを起動し、procs_and_consに格納する
|
40 |
-
"""
|
41 |
-
self.args = args
|
42 |
-
if not self.args.enable_cancellable_synthesis:
|
43 |
-
raise HTTPException(
|
44 |
-
status_code=404,
|
45 |
-
detail="実験的機能はデフォルトで無効になっています。使用するには引数を指定してください。",
|
46 |
-
)
|
47 |
-
|
48 |
-
self.watch_con_list: List[Tuple[Request, Process]] = []
|
49 |
-
self.procs_and_cons: queue.Queue[Tuple[Process, Connection]] = queue.Queue()
|
50 |
-
for _ in range(self.args.init_processes):
|
51 |
-
self.procs_and_cons.put(self.start_new_proc())
|
52 |
-
|
53 |
-
def start_new_proc(
|
54 |
-
self,
|
55 |
-
) -> Tuple[Process, Connection]:
|
56 |
-
"""
|
57 |
-
新しく開始したプロセスを返す関数
|
58 |
-
|
59 |
-
Returns
|
60 |
-
-------
|
61 |
-
ret_proc: Process
|
62 |
-
新規のプロセス
|
63 |
-
sub_proc_con1: Connection
|
64 |
-
ret_procのプロセスと通信するためのPipe
|
65 |
-
"""
|
66 |
-
sub_proc_con1, sub_proc_con2 = Pipe(True)
|
67 |
-
ret_proc = Process(
|
68 |
-
target=start_synthesis_subprocess,
|
69 |
-
kwargs={
|
70 |
-
"args": self.args,
|
71 |
-
"sub_proc_con": sub_proc_con2,
|
72 |
-
},
|
73 |
-
daemon=True,
|
74 |
-
)
|
75 |
-
ret_proc.start()
|
76 |
-
return ret_proc, sub_proc_con1
|
77 |
-
|
78 |
-
def finalize_con(
|
79 |
-
self,
|
80 |
-
req: Request,
|
81 |
-
proc: Process,
|
82 |
-
sub_proc_con: Optional[Connection],
|
83 |
-
) -> None:
|
84 |
-
"""
|
85 |
-
接続が切断された時の処理を行う関数
|
86 |
-
watch_con_listからの削除、プロセスの後処理を行う
|
87 |
-
プロセスが生きている場合はそのままprocs_and_consに加える
|
88 |
-
死んでいる場合は新しく生成したものをprocs_and_consに加える
|
89 |
-
|
90 |
-
Parameters
|
91 |
-
----------
|
92 |
-
req: fastapi.Request
|
93 |
-
接続確立時に受け取ったものをそのまま渡せばよい
|
94 |
-
https://fastapi.tiangolo.com/advanced/using-request-directly/
|
95 |
-
proc: Process
|
96 |
-
音声合成を行っていたプロセス
|
97 |
-
sub_proc_con: Connection, optional
|
98 |
-
音声合成を行っていたプロセスとのPipe
|
99 |
-
指定されていない場合、プロセスは再利用されず終了される
|
100 |
-
"""
|
101 |
-
try:
|
102 |
-
self.watch_con_list.remove((req, proc))
|
103 |
-
except ValueError:
|
104 |
-
pass
|
105 |
-
try:
|
106 |
-
if not proc.is_alive() or sub_proc_con is None:
|
107 |
-
proc.close()
|
108 |
-
raise ValueError
|
109 |
-
# プロセスが死んでいない場合は再利用する
|
110 |
-
self.procs_and_cons.put((proc, sub_proc_con))
|
111 |
-
except ValueError:
|
112 |
-
# プロセスが死んでいるので新しく作り直す
|
113 |
-
self.procs_and_cons.put(self.start_new_proc())
|
114 |
-
|
115 |
-
def _synthesis_impl(
|
116 |
-
self,
|
117 |
-
query: AudioQuery,
|
118 |
-
speaker_id: int,
|
119 |
-
request: Request,
|
120 |
-
core_version: Optional[str],
|
121 |
-
) -> str:
|
122 |
-
"""
|
123 |
-
音声合成を行う関数
|
124 |
-
通常エンジンの引数に比べ、requestが必要になっている
|
125 |
-
また、返り値がファイル名になっている
|
126 |
-
|
127 |
-
Parameters
|
128 |
-
----------
|
129 |
-
query: AudioQuery
|
130 |
-
speaker_id: int
|
131 |
-
request: fastapi.Request
|
132 |
-
接続確立時に受け取ったものをそのまま渡せばよい
|
133 |
-
https://fastapi.tiangolo.com/advanced/using-request-directly/
|
134 |
-
core_version: str
|
135 |
-
|
136 |
-
Returns
|
137 |
-
-------
|
138 |
-
f_name: str
|
139 |
-
生���された音声ファイルの名前
|
140 |
-
"""
|
141 |
-
proc, sub_proc_con1 = self.procs_and_cons.get()
|
142 |
-
self.watch_con_list.append((request, proc))
|
143 |
-
try:
|
144 |
-
sub_proc_con1.send((query, speaker_id, core_version))
|
145 |
-
f_name = sub_proc_con1.recv()
|
146 |
-
except EOFError:
|
147 |
-
raise HTTPException(status_code=422, detail="既にサブプロセスは終了されています")
|
148 |
-
except Exception:
|
149 |
-
self.finalize_con(request, proc, sub_proc_con1)
|
150 |
-
raise
|
151 |
-
|
152 |
-
self.finalize_con(request, proc, sub_proc_con1)
|
153 |
-
return f_name
|
154 |
-
|
155 |
-
async def catch_disconnection(self):
|
156 |
-
"""
|
157 |
-
接続監視を行うコルーチン
|
158 |
-
"""
|
159 |
-
while True:
|
160 |
-
await asyncio.sleep(1)
|
161 |
-
for con in self.watch_con_list:
|
162 |
-
req, proc = con
|
163 |
-
if await req.is_disconnected():
|
164 |
-
try:
|
165 |
-
if proc.is_alive():
|
166 |
-
proc.terminate()
|
167 |
-
proc.join()
|
168 |
-
proc.close()
|
169 |
-
except ValueError:
|
170 |
-
pass
|
171 |
-
finally:
|
172 |
-
self.finalize_con(req, proc, None)
|
173 |
-
|
174 |
-
|
175 |
-
def start_synthesis_subprocess(
|
176 |
-
args: argparse.Namespace,
|
177 |
-
sub_proc_con: Connection,
|
178 |
-
):
|
179 |
-
"""
|
180 |
-
音声合成を行うサブプロセスで行うための関数
|
181 |
-
pickle化の関係でグローバルに書いている
|
182 |
-
|
183 |
-
Parameters
|
184 |
-
----------
|
185 |
-
args: argparse.Namespace
|
186 |
-
起動時に作られたものをそのまま渡す
|
187 |
-
sub_proc_con: Connection
|
188 |
-
メインプロセスと通信するためのPipe
|
189 |
-
"""
|
190 |
-
|
191 |
-
synthesis_engines = make_synthesis_engines(
|
192 |
-
use_gpu=args.use_gpu,
|
193 |
-
voicelib_dirs=args.voicelib_dir,
|
194 |
-
voicevox_dir=args.voicevox_dir,
|
195 |
-
runtime_dirs=args.runtime_dir,
|
196 |
-
cpu_num_threads=args.cpu_num_threads,
|
197 |
-
enable_mock=args.enable_mock,
|
198 |
-
)
|
199 |
-
assert len(synthesis_engines) != 0, "音声合成エンジンがありません。"
|
200 |
-
latest_core_version = get_latest_core_version(versions=synthesis_engines.keys())
|
201 |
-
while True:
|
202 |
-
try:
|
203 |
-
query, speaker_id, core_version = sub_proc_con.recv()
|
204 |
-
if core_version is None:
|
205 |
-
_engine = synthesis_engines[latest_core_version]
|
206 |
-
elif core_version in synthesis_engines:
|
207 |
-
_engine = synthesis_engines[core_version]
|
208 |
-
else:
|
209 |
-
# バージョンが見つからないエラー
|
210 |
-
sub_proc_con.send("")
|
211 |
-
continue
|
212 |
-
wave = _engine._synthesis_impl(query, speaker_id)
|
213 |
-
with NamedTemporaryFile(delete=False) as f:
|
214 |
-
soundfile.write(
|
215 |
-
file=f, data=wave, samplerate=query.outputSamplingRate, format="WAV"
|
216 |
-
)
|
217 |
-
sub_proc_con.send(f.name)
|
218 |
-
except Exception:
|
219 |
-
sub_proc_con.close()
|
220 |
-
raise
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/README.md
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
# Utils
|
2 |
-
|
3 |
-
Scripts in this directory are used as utility functions.
|
4 |
-
|
5 |
-
## BERT Pretrained Embeddings
|
6 |
-
|
7 |
-
You can load pretrained word embeddings in Google [BERT](https://github.com/google-research/bert#pre-trained-models) instead of training word embeddings from scratch. The scripts in `utils/bert` need a BERT server in the background. We use BERT server from [bert-as-service](https://github.com/hanxiao/bert-as-service).
|
8 |
-
|
9 |
-
To use bert-as-service, you need to first install the repository. It is recommended that you create a new environment with Tensorflow 1.3 to run BERT server since it is incompatible with Tensorflow 2.x.
|
10 |
-
|
11 |
-
After successful installation of [bert-as-service](https://github.com/hanxiao/bert-as-service), downloading and running the BERT server needs to execute:
|
12 |
-
|
13 |
-
```bash
|
14 |
-
bash scripts/prepare_bert_server.sh <path-to-server> <num-workers> zh
|
15 |
-
```
|
16 |
-
|
17 |
-
By default, server based on BERT base Chinese model is running in the background. You can change to other models by changing corresponding model name and path in `scripts/prepare_bert_server.sh`.
|
18 |
-
|
19 |
-
To extract BERT word embeddings, you need to execute `utils/bert/create_word_embedding.py`.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/openai.py
DELETED
@@ -1,129 +0,0 @@
|
|
1 |
-
""" OpenAI pretrained model functions
|
2 |
-
|
3 |
-
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
|
4 |
-
"""
|
5 |
-
|
6 |
-
import os
|
7 |
-
import warnings
|
8 |
-
from typing import Union, List
|
9 |
-
|
10 |
-
import torch
|
11 |
-
|
12 |
-
from .model import build_model_from_openai_state_dict
|
13 |
-
from .pretrained import get_pretrained_url, list_pretrained_tag_models, download_pretrained
|
14 |
-
|
15 |
-
__all__ = ["list_openai_models", "load_openai_model"]
|
16 |
-
|
17 |
-
|
18 |
-
def list_openai_models() -> List[str]:
|
19 |
-
"""Returns the names of available CLIP models"""
|
20 |
-
return list_pretrained_tag_models('openai')
|
21 |
-
|
22 |
-
|
23 |
-
def load_openai_model(
|
24 |
-
name: str,
|
25 |
-
model_cfg,
|
26 |
-
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
|
27 |
-
jit=True,
|
28 |
-
cache_dir=os.path.expanduser("~/.cache/clip"),
|
29 |
-
enable_fusion: bool = False,
|
30 |
-
fusion_type: str = 'None'
|
31 |
-
):
|
32 |
-
"""Load a CLIP model, preserve its text pretrained part, and set in the CLAP model
|
33 |
-
|
34 |
-
Parameters
|
35 |
-
----------
|
36 |
-
name : str
|
37 |
-
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
|
38 |
-
device : Union[str, torch.device]
|
39 |
-
The device to put the loaded model
|
40 |
-
jit : bool
|
41 |
-
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
|
42 |
-
|
43 |
-
Returns
|
44 |
-
-------
|
45 |
-
model : torch.nn.Module
|
46 |
-
The CLAP model
|
47 |
-
preprocess : Callable[[PIL.Image], torch.Tensor]
|
48 |
-
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
|
49 |
-
"""
|
50 |
-
if get_pretrained_url(name, 'openai'):
|
51 |
-
model_path = download_pretrained(get_pretrained_url(name, 'openai'), root=cache_dir)
|
52 |
-
elif os.path.isfile(name):
|
53 |
-
model_path = name
|
54 |
-
else:
|
55 |
-
raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
|
56 |
-
|
57 |
-
try:
|
58 |
-
# loading JIT archive
|
59 |
-
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
|
60 |
-
state_dict = None
|
61 |
-
except RuntimeError:
|
62 |
-
# loading saved state dict
|
63 |
-
if jit:
|
64 |
-
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
|
65 |
-
jit = False
|
66 |
-
state_dict = torch.load(model_path, map_location="cpu")
|
67 |
-
|
68 |
-
if not jit:
|
69 |
-
try:
|
70 |
-
model = build_model_from_openai_state_dict(state_dict or model.state_dict(), model_cfg, enable_fusion, fusion_type).to(device)
|
71 |
-
except KeyError:
|
72 |
-
sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
|
73 |
-
model = build_model_from_openai_state_dict(sd, model_cfg, enable_fusion, fusion_type).to(device)
|
74 |
-
|
75 |
-
if str(device) == "cpu":
|
76 |
-
model.float()
|
77 |
-
return model
|
78 |
-
|
79 |
-
# patch the device names
|
80 |
-
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
|
81 |
-
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
|
82 |
-
|
83 |
-
def patch_device(module):
|
84 |
-
try:
|
85 |
-
graphs = [module.graph] if hasattr(module, "graph") else []
|
86 |
-
except RuntimeError:
|
87 |
-
graphs = []
|
88 |
-
|
89 |
-
if hasattr(module, "forward1"):
|
90 |
-
graphs.append(module.forward1.graph)
|
91 |
-
|
92 |
-
for graph in graphs:
|
93 |
-
for node in graph.findAllNodes("prim::Constant"):
|
94 |
-
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
|
95 |
-
node.copyAttributes(device_node)
|
96 |
-
|
97 |
-
model.apply(patch_device)
|
98 |
-
patch_device(model.encode_audio)
|
99 |
-
patch_device(model.encode_text)
|
100 |
-
|
101 |
-
# patch dtype to float32 on CPU
|
102 |
-
if str(device) == "cpu":
|
103 |
-
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
|
104 |
-
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
|
105 |
-
float_node = float_input.node()
|
106 |
-
|
107 |
-
def patch_float(module):
|
108 |
-
try:
|
109 |
-
graphs = [module.graph] if hasattr(module, "graph") else []
|
110 |
-
except RuntimeError:
|
111 |
-
graphs = []
|
112 |
-
|
113 |
-
if hasattr(module, "forward1"):
|
114 |
-
graphs.append(module.forward1.graph)
|
115 |
-
|
116 |
-
for graph in graphs:
|
117 |
-
for node in graph.findAllNodes("aten::to"):
|
118 |
-
inputs = list(node.inputs())
|
119 |
-
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
|
120 |
-
if inputs[i].node()["value"] == 5:
|
121 |
-
inputs[i].node().copyAttributes(float_node)
|
122 |
-
|
123 |
-
model.apply(patch_float)
|
124 |
-
patch_float(model.encode_audio)
|
125 |
-
patch_float(model.encode_text)
|
126 |
-
model.float()
|
127 |
-
|
128 |
-
model.audio_branch.audio_length = model.audio_cfg.audio_length
|
129 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abdllh/poetry2023/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Poetry2023
|
3 |
-
emoji: 👁
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.16.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
duplicated_from: aaaaaabbbbbbbdddddddduuuuulllll/poetry2023
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/lzstring-plugin.d.ts
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
import LZString from './lzstring';
|
2 |
-
|
3 |
-
export default class LZStringPlugin extends Phaser.Plugins.BasePlugin {
|
4 |
-
add(
|
5 |
-
config?: LZString.IConfig
|
6 |
-
): LZString;
|
7 |
-
|
8 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlekseyKorshuk/thin-plate-spline-motion-model/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Thin Plate Spline Motion Model
|
3 |
-
emoji: 💩
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 2.9.4
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWang/lama/app.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
os.system("wget https://huggingface.co/akhaliq/lama/resolve/main/best.ckpt")
|
3 |
-
os.system("pip install imageio")
|
4 |
-
os.system("pip install albumentations==0.5.2")
|
5 |
-
import cv2
|
6 |
-
import paddlehub as hub
|
7 |
-
import gradio as gr
|
8 |
-
import torch
|
9 |
-
from PIL import Image, ImageOps
|
10 |
-
import numpy as np
|
11 |
-
import imageio
|
12 |
-
os.mkdir("data")
|
13 |
-
os.rename("best.ckpt", "models/best.ckpt")
|
14 |
-
os.mkdir("dataout")
|
15 |
-
model = hub.Module(name='U2Net')
|
16 |
-
|
17 |
-
|
18 |
-
def infer(img, mask, option):
|
19 |
-
print(type(img["image"]), img["image"].shape)
|
20 |
-
imageio.imwrite("./data/data.png", img["image"])
|
21 |
-
if option == "Upload":
|
22 |
-
imageio.imwrite("./data/data_mask.png", mask)
|
23 |
-
elif option == "Automatic (U2net)":
|
24 |
-
result = model.Segmentation(
|
25 |
-
images=[cv2.cvtColor(img["image"], cv2.COLOR_RGB2BGR)],
|
26 |
-
paths=None,
|
27 |
-
batch_size=1,
|
28 |
-
input_size=320,
|
29 |
-
output_dir='output',
|
30 |
-
visualization=True)
|
31 |
-
im = Image.fromarray(result[0]['mask'])
|
32 |
-
im.save("./data/data_mask.png")
|
33 |
-
else:
|
34 |
-
imageio.imwrite("./data/data_mask.png", img["mask"])
|
35 |
-
os.system('python predict.py model.path=/home/user/app/ indir=/home/user/app/data/ outdir=/home/user/app/dataout/ device=cpu')
|
36 |
-
return "./dataout/data_mask.png", "./data/data_mask.png"
|
37 |
-
|
38 |
-
|
39 |
-
inputs = [gr.Image(tool="sketch", label="Input", type="numpy"),
|
40 |
-
gr.Image(label="Mask", type="numpy"),
|
41 |
-
gr.inputs.Radio(choices=["Upload", "Manual", "Automatic (U2net)"],
|
42 |
-
type="value", default="Upload", label="Masking option")]
|
43 |
-
outputs = [gr.outputs.Image(type="file", label="output"),
|
44 |
-
gr.outputs.Image(type="file", label="Mask")]
|
45 |
-
title = "LaMa Image Inpainting"
|
46 |
-
description = "Gradio demo for LaMa: Resolution-robust Large Mask Inpainting with Fourier Convolutions. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Masks are generated by U^2net"
|
47 |
-
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.07161' target='_blank'>Resolution-robust Large Mask Inpainting with Fourier Convolutions</a> | <a href='https://github.com/saic-mdal/lama' target='_blank'>Github Repo</a></p>"
|
48 |
-
gr.Interface(infer, inputs, outputs, title=title,
|
49 |
-
description=description, article=article).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/gen_images.py
DELETED
@@ -1,160 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Generate images using pretrained network pickle."""
|
10 |
-
|
11 |
-
import os
|
12 |
-
import re
|
13 |
-
from typing import List, Optional, Tuple, Union
|
14 |
-
|
15 |
-
import click
|
16 |
-
import dnnlib
|
17 |
-
import numpy as np
|
18 |
-
import PIL.Image
|
19 |
-
import torch
|
20 |
-
|
21 |
-
import legacy
|
22 |
-
|
23 |
-
# ----------------------------------------------------------------------------
|
24 |
-
|
25 |
-
|
26 |
-
def parse_range(s: Union[str, List]) -> List[int]:
|
27 |
-
'''Parse a comma separated list of numbers or ranges and return a list of ints.
|
28 |
-
|
29 |
-
Example: '1,2,5-10' returns [1, 2, 5, 6, 7]
|
30 |
-
'''
|
31 |
-
if isinstance(s, list):
|
32 |
-
return s
|
33 |
-
ranges = []
|
34 |
-
range_re = re.compile(r'^(\d+)-(\d+)$')
|
35 |
-
for p in s.split(','):
|
36 |
-
m = range_re.match(p)
|
37 |
-
if m:
|
38 |
-
ranges.extend(range(int(m.group(1)), int(m.group(2))+1))
|
39 |
-
else:
|
40 |
-
ranges.append(int(p))
|
41 |
-
return ranges
|
42 |
-
|
43 |
-
# ----------------------------------------------------------------------------
|
44 |
-
|
45 |
-
|
46 |
-
def parse_vec2(s: Union[str, Tuple[float, float]]) -> Tuple[float, float]:
|
47 |
-
'''Parse a floating point 2-vector of syntax 'a,b'.
|
48 |
-
|
49 |
-
Example:
|
50 |
-
'0,1' returns (0,1)
|
51 |
-
'''
|
52 |
-
if isinstance(s, tuple):
|
53 |
-
return s
|
54 |
-
parts = s.split(',')
|
55 |
-
if len(parts) == 2:
|
56 |
-
return (float(parts[0]), float(parts[1]))
|
57 |
-
raise ValueError(f'cannot parse 2-vector {s}')
|
58 |
-
|
59 |
-
# ----------------------------------------------------------------------------
|
60 |
-
|
61 |
-
|
62 |
-
def make_transform(translate: Tuple[float, float], angle: float):
|
63 |
-
m = np.eye(3)
|
64 |
-
s = np.sin(angle/360.0*np.pi*2)
|
65 |
-
c = np.cos(angle/360.0*np.pi*2)
|
66 |
-
m[0][0] = c
|
67 |
-
m[0][1] = s
|
68 |
-
m[0][2] = translate[0]
|
69 |
-
m[1][0] = -s
|
70 |
-
m[1][1] = c
|
71 |
-
m[1][2] = translate[1]
|
72 |
-
return m
|
73 |
-
|
74 |
-
# ----------------------------------------------------------------------------
|
75 |
-
|
76 |
-
|
77 |
-
@click.command()
|
78 |
-
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
|
79 |
-
@click.option('--seeds', type=parse_range, help='List of random seeds (e.g., \'0,1,4-6\')', required=True)
|
80 |
-
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
|
81 |
-
@click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)')
|
82 |
-
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
|
83 |
-
@click.option('--translate', help='Translate XY-coordinate (e.g. \'0.3,1\')', type=parse_vec2, default='0,0', show_default=True, metavar='VEC2')
|
84 |
-
@click.option('--rotate', help='Rotation angle in degrees', type=float, default=0, show_default=True, metavar='ANGLE')
|
85 |
-
@click.option('--outdir', help='Where to save the output images', type=str, required=True, metavar='DIR')
|
86 |
-
def generate_images(
|
87 |
-
network_pkl: str,
|
88 |
-
seeds: List[int],
|
89 |
-
truncation_psi: float,
|
90 |
-
noise_mode: str,
|
91 |
-
outdir: str,
|
92 |
-
translate: Tuple[float, float],
|
93 |
-
rotate: float,
|
94 |
-
class_idx: Optional[int]
|
95 |
-
):
|
96 |
-
"""Generate images using pretrained network pickle.
|
97 |
-
|
98 |
-
Examples:
|
99 |
-
|
100 |
-
\b
|
101 |
-
# Generate an image using pre-trained AFHQv2 model ("Ours" in Figure 1, left).
|
102 |
-
python gen_images.py --outdir=out --trunc=1 --seeds=2 \\
|
103 |
-
--network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl
|
104 |
-
|
105 |
-
\b
|
106 |
-
# Generate uncurated images with truncation using the MetFaces-U dataset
|
107 |
-
python gen_images.py --outdir=out --trunc=0.7 --seeds=600-605 \\
|
108 |
-
--network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-metfacesu-1024x1024.pkl
|
109 |
-
"""
|
110 |
-
|
111 |
-
print('Loading networks from "%s"...' % network_pkl)
|
112 |
-
device = torch.device('cuda')
|
113 |
-
with dnnlib.util.open_url(network_pkl) as f:
|
114 |
-
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
|
115 |
-
# import pickle
|
116 |
-
# G = legacy.load_network_pkl(f)
|
117 |
-
# output = open('checkpoints/stylegan2-car-config-f-pt.pkl', 'wb')
|
118 |
-
# pickle.dump(G, output)
|
119 |
-
|
120 |
-
os.makedirs(outdir, exist_ok=True)
|
121 |
-
|
122 |
-
# Labels.
|
123 |
-
label = torch.zeros([1, G.c_dim], device=device)
|
124 |
-
if G.c_dim != 0:
|
125 |
-
if class_idx is None:
|
126 |
-
raise click.ClickException(
|
127 |
-
'Must specify class label with --class when using a conditional network')
|
128 |
-
label[:, class_idx] = 1
|
129 |
-
else:
|
130 |
-
if class_idx is not None:
|
131 |
-
print('warn: --class=lbl ignored when running on an unconditional network')
|
132 |
-
|
133 |
-
# Generate images.
|
134 |
-
for seed_idx, seed in enumerate(seeds):
|
135 |
-
print('Generating image for seed %d (%d/%d) ...' %
|
136 |
-
(seed, seed_idx, len(seeds)))
|
137 |
-
z = torch.from_numpy(np.random.RandomState(
|
138 |
-
seed).randn(1, G.z_dim)).to(device)
|
139 |
-
|
140 |
-
# Construct an inverse rotation/translation matrix and pass to the generator. The
|
141 |
-
# generator expects this matrix as an inverse to avoid potentially failing numerical
|
142 |
-
# operations in the network.
|
143 |
-
if hasattr(G.synthesis, 'input'):
|
144 |
-
m = make_transform(translate, rotate)
|
145 |
-
m = np.linalg.inv(m)
|
146 |
-
G.synthesis.input.transform.copy_(torch.from_numpy(m))
|
147 |
-
|
148 |
-
img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)
|
149 |
-
img = (img.permute(0, 2, 3, 1) * 127.5 +
|
150 |
-
128).clamp(0, 255).to(torch.uint8)
|
151 |
-
PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(
|
152 |
-
f'{outdir}/seed{seed:04d}.png')
|
153 |
-
|
154 |
-
|
155 |
-
# ----------------------------------------------------------------------------
|
156 |
-
|
157 |
-
if __name__ == "__main__":
|
158 |
-
generate_images() # pylint: disable=no-value-for-parameter
|
159 |
-
|
160 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/utils/util.py
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import cv2
|
5 |
-
from torchvision import transforms
|
6 |
-
import numpy as np
|
7 |
-
import math
|
8 |
-
|
9 |
-
|
10 |
-
def visual(output, out_path):
|
11 |
-
output = (output + 1)/2
|
12 |
-
output = torch.clamp(output, 0, 1)
|
13 |
-
if output.shape[1] == 1:
|
14 |
-
output = torch.cat([output, output, output], 1)
|
15 |
-
output = output[0].detach().cpu().permute(1, 2, 0).numpy()
|
16 |
-
output = (output*255).astype(np.uint8)
|
17 |
-
output = output[:, :, ::-1]
|
18 |
-
cv2.imwrite(out_path, output)
|
19 |
-
|
20 |
-
|
21 |
-
def get_lr(t, initial_lr, rampdown=0.25, rampup=0.05):
|
22 |
-
|
23 |
-
lr_ramp = min(1, (1 - t) / rampdown)
|
24 |
-
lr_ramp = 0.5 - 0.5 * math.cos(lr_ramp * math.pi)
|
25 |
-
lr_ramp = lr_ramp * min(1, t / rampup)
|
26 |
-
return initial_lr * lr_ramp
|
27 |
-
|
28 |
-
|
29 |
-
def latent_noise(latent, strength):
|
30 |
-
noise = torch.randn_like(latent) * strength
|
31 |
-
|
32 |
-
return latent + noise
|
33 |
-
|
34 |
-
|
35 |
-
def noise_regularize_(noises):
|
36 |
-
loss = 0
|
37 |
-
|
38 |
-
for noise in noises:
|
39 |
-
size = noise.shape[2]
|
40 |
-
|
41 |
-
while True:
|
42 |
-
loss = (
|
43 |
-
loss
|
44 |
-
+ (noise * torch.roll(noise, shifts=1, dims=3)).mean().pow(2)
|
45 |
-
+ (noise * torch.roll(noise, shifts=1, dims=2)).mean().pow(2)
|
46 |
-
)
|
47 |
-
|
48 |
-
if size <= 8:
|
49 |
-
break
|
50 |
-
|
51 |
-
noise = noise.reshape([-1, 1, size // 2, 2, size // 2, 2])
|
52 |
-
noise = noise.mean([3, 5])
|
53 |
-
size //= 2
|
54 |
-
|
55 |
-
return loss
|
56 |
-
|
57 |
-
|
58 |
-
def noise_normalize_(noises):
|
59 |
-
for noise in noises:
|
60 |
-
mean = noise.mean()
|
61 |
-
std = noise.std()
|
62 |
-
|
63 |
-
noise.data.add_(-mean).div_(std)
|
64 |
-
|
65 |
-
|
66 |
-
def tensor_to_numpy(x):
|
67 |
-
x = x[0].permute(1, 2, 0)
|
68 |
-
x = torch.clamp(x, -1, 1)
|
69 |
-
x = (x+1) * 127.5
|
70 |
-
x = x.cpu().detach().numpy().astype(np.uint8)
|
71 |
-
return x
|
72 |
-
|
73 |
-
|
74 |
-
def numpy_to_tensor(x):
|
75 |
-
x = (x / 255 - 0.5) * 2
|
76 |
-
x = torch.from_numpy(x).unsqueeze(0).permute(0, 3, 1, 2)
|
77 |
-
x = x.cuda().float()
|
78 |
-
return x
|
79 |
-
|
80 |
-
|
81 |
-
def tensor_to_pil(x):
|
82 |
-
x = torch.clamp(x, -1, 1)
|
83 |
-
x = (x+1) * 127.5
|
84 |
-
return transforms.ToPILImage()(x.squeeze_(0))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/custom_diffusion/README.md
DELETED
@@ -1,280 +0,0 @@
|
|
1 |
-
# Custom Diffusion training example
|
2 |
-
|
3 |
-
[Custom Diffusion](https://arxiv.org/abs/2212.04488) is a method to customize text-to-image models like Stable Diffusion given just a few (4~5) images of a subject.
|
4 |
-
The `train_custom_diffusion.py` script shows how to implement the training procedure and adapt it for stable diffusion.
|
5 |
-
|
6 |
-
## Running locally with PyTorch
|
7 |
-
|
8 |
-
### Installing the dependencies
|
9 |
-
|
10 |
-
Before running the scripts, make sure to install the library's training dependencies:
|
11 |
-
|
12 |
-
**Important**
|
13 |
-
|
14 |
-
To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
|
15 |
-
|
16 |
-
```bash
|
17 |
-
git clone https://github.com/huggingface/diffusers
|
18 |
-
cd diffusers
|
19 |
-
pip install -e .
|
20 |
-
```
|
21 |
-
|
22 |
-
Then cd in the example folder and run
|
23 |
-
|
24 |
-
```bash
|
25 |
-
pip install -r requirements.txt
|
26 |
-
pip install clip-retrieval
|
27 |
-
```
|
28 |
-
|
29 |
-
And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
|
30 |
-
|
31 |
-
```bash
|
32 |
-
accelerate config
|
33 |
-
```
|
34 |
-
|
35 |
-
Or for a default accelerate configuration without answering questions about your environment
|
36 |
-
|
37 |
-
```bash
|
38 |
-
accelerate config default
|
39 |
-
```
|
40 |
-
|
41 |
-
Or if your environment doesn't support an interactive shell e.g. a notebook
|
42 |
-
|
43 |
-
```python
|
44 |
-
from accelerate.utils import write_basic_config
|
45 |
-
write_basic_config()
|
46 |
-
```
|
47 |
-
### Cat example 😺
|
48 |
-
|
49 |
-
Now let's get our dataset. Download dataset from [here](https://www.cs.cmu.edu/~custom-diffusion/assets/data.zip) and unzip it.
|
50 |
-
|
51 |
-
We also collect 200 real images using `clip-retrieval` which are combined with the target images in the training dataset as a regularization. This prevents overfitting to the the given target image. The following flags enable the regularization `with_prior_preservation`, `real_prior` with `prior_loss_weight=1.`.
|
52 |
-
The `class_prompt` should be the category name same as target image. The collected real images are with text captions similar to the `class_prompt`. The retrieved image are saved in `class_data_dir`. You can disable `real_prior` to use generated images as regularization. To collect the real images use this command first before training.
|
53 |
-
|
54 |
-
```bash
|
55 |
-
pip install clip-retrieval
|
56 |
-
python retrieve.py --class_prompt cat --class_data_dir real_reg/samples_cat --num_class_images 200
|
57 |
-
```
|
58 |
-
|
59 |
-
**___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___**
|
60 |
-
|
61 |
-
```bash
|
62 |
-
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
63 |
-
export OUTPUT_DIR="path-to-save-model"
|
64 |
-
export INSTANCE_DIR="./data/cat"
|
65 |
-
|
66 |
-
accelerate launch train_custom_diffusion.py \
|
67 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
68 |
-
--instance_data_dir=$INSTANCE_DIR \
|
69 |
-
--output_dir=$OUTPUT_DIR \
|
70 |
-
--class_data_dir=./real_reg/samples_cat/ \
|
71 |
-
--with_prior_preservation --real_prior --prior_loss_weight=1.0 \
|
72 |
-
--class_prompt="cat" --num_class_images=200 \
|
73 |
-
--instance_prompt="photo of a <new1> cat" \
|
74 |
-
--resolution=512 \
|
75 |
-
--train_batch_size=2 \
|
76 |
-
--learning_rate=1e-5 \
|
77 |
-
--lr_warmup_steps=0 \
|
78 |
-
--max_train_steps=250 \
|
79 |
-
--scale_lr --hflip \
|
80 |
-
--modifier_token "<new1>"
|
81 |
-
```
|
82 |
-
|
83 |
-
**Use `--enable_xformers_memory_efficient_attention` for faster training with lower VRAM requirement (16GB per GPU). Follow [this guide](https://github.com/facebookresearch/xformers) for installation instructions.**
|
84 |
-
|
85 |
-
To track your experiments using Weights and Biases (`wandb`) and to save intermediate results (whcih we HIGHLY recommend), follow these steps:
|
86 |
-
|
87 |
-
* Install `wandb`: `pip install wandb`.
|
88 |
-
* Authorize: `wandb login`.
|
89 |
-
* Then specify a `validation_prompt` and set `report_to` to `wandb` while launching training. You can also configure the following related arguments:
|
90 |
-
* `num_validation_images`
|
91 |
-
* `validation_steps`
|
92 |
-
|
93 |
-
Here is an example command:
|
94 |
-
|
95 |
-
```bash
|
96 |
-
accelerate launch train_custom_diffusion.py \
|
97 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
98 |
-
--instance_data_dir=$INSTANCE_DIR \
|
99 |
-
--output_dir=$OUTPUT_DIR \
|
100 |
-
--class_data_dir=./real_reg/samples_cat/ \
|
101 |
-
--with_prior_preservation --real_prior --prior_loss_weight=1.0 \
|
102 |
-
--class_prompt="cat" --num_class_images=200 \
|
103 |
-
--instance_prompt="photo of a <new1> cat" \
|
104 |
-
--resolution=512 \
|
105 |
-
--train_batch_size=2 \
|
106 |
-
--learning_rate=1e-5 \
|
107 |
-
--lr_warmup_steps=0 \
|
108 |
-
--max_train_steps=250 \
|
109 |
-
--scale_lr --hflip \
|
110 |
-
--modifier_token "<new1>" \
|
111 |
-
--validation_prompt="<new1> cat sitting in a bucket" \
|
112 |
-
--report_to="wandb"
|
113 |
-
```
|
114 |
-
|
115 |
-
Here is an example [Weights and Biases page](https://wandb.ai/sayakpaul/custom-diffusion/runs/26ghrcau) where you can check out the intermediate results along with other training details.
|
116 |
-
|
117 |
-
If you specify `--push_to_hub`, the learned parameters will be pushed to a repository on the Hugging Face Hub. Here is an [example repository](https://huggingface.co/sayakpaul/custom-diffusion-cat).
|
118 |
-
|
119 |
-
### Training on multiple concepts 🐱🪵
|
120 |
-
|
121 |
-
Provide a [json](https://github.com/adobe-research/custom-diffusion/blob/main/assets/concept_list.json) file with the info about each concept, similar to [this](https://github.com/ShivamShrirao/diffusers/blob/main/examples/dreambooth/train_dreambooth.py).
|
122 |
-
|
123 |
-
To collect the real images run this command for each concept in the json file.
|
124 |
-
|
125 |
-
```bash
|
126 |
-
pip install clip-retrieval
|
127 |
-
python retrieve.py --class_prompt {} --class_data_dir {} --num_class_images 200
|
128 |
-
```
|
129 |
-
|
130 |
-
And then we're ready to start training!
|
131 |
-
|
132 |
-
```bash
|
133 |
-
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
134 |
-
export OUTPUT_DIR="path-to-save-model"
|
135 |
-
|
136 |
-
accelerate launch train_custom_diffusion.py \
|
137 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
138 |
-
--output_dir=$OUTPUT_DIR \
|
139 |
-
--concepts_list=./concept_list.json \
|
140 |
-
--with_prior_preservation --real_prior --prior_loss_weight=1.0 \
|
141 |
-
--resolution=512 \
|
142 |
-
--train_batch_size=2 \
|
143 |
-
--learning_rate=1e-5 \
|
144 |
-
--lr_warmup_steps=0 \
|
145 |
-
--max_train_steps=500 \
|
146 |
-
--num_class_images=200 \
|
147 |
-
--scale_lr --hflip \
|
148 |
-
--modifier_token "<new1>+<new2>"
|
149 |
-
```
|
150 |
-
|
151 |
-
Here is an example [Weights and Biases page](https://wandb.ai/sayakpaul/custom-diffusion/runs/3990tzkg) where you can check out the intermediate results along with other training details.
|
152 |
-
|
153 |
-
### Training on human faces
|
154 |
-
|
155 |
-
For fine-tuning on human faces we found the following configuration to work better: `learning_rate=5e-6`, `max_train_steps=1000 to 2000`, and `freeze_model=crossattn` with at least 15-20 images.
|
156 |
-
|
157 |
-
To collect the real images use this command first before training.
|
158 |
-
|
159 |
-
```bash
|
160 |
-
pip install clip-retrieval
|
161 |
-
python retrieve.py --class_prompt person --class_data_dir real_reg/samples_person --num_class_images 200
|
162 |
-
```
|
163 |
-
|
164 |
-
Then start training!
|
165 |
-
|
166 |
-
```bash
|
167 |
-
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
168 |
-
export OUTPUT_DIR="path-to-save-model"
|
169 |
-
export INSTANCE_DIR="path-to-images"
|
170 |
-
|
171 |
-
accelerate launch train_custom_diffusion.py \
|
172 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
173 |
-
--instance_data_dir=$INSTANCE_DIR \
|
174 |
-
--output_dir=$OUTPUT_DIR \
|
175 |
-
--class_data_dir=./real_reg/samples_person/ \
|
176 |
-
--with_prior_preservation --real_prior --prior_loss_weight=1.0 \
|
177 |
-
--class_prompt="person" --num_class_images=200 \
|
178 |
-
--instance_prompt="photo of a <new1> person" \
|
179 |
-
--resolution=512 \
|
180 |
-
--train_batch_size=2 \
|
181 |
-
--learning_rate=5e-6 \
|
182 |
-
--lr_warmup_steps=0 \
|
183 |
-
--max_train_steps=1000 \
|
184 |
-
--scale_lr --hflip --noaug \
|
185 |
-
--freeze_model crossattn \
|
186 |
-
--modifier_token "<new1>" \
|
187 |
-
--enable_xformers_memory_efficient_attention
|
188 |
-
```
|
189 |
-
|
190 |
-
## Inference
|
191 |
-
|
192 |
-
Once you have trained a model using the above command, you can run inference using the below command. Make sure to include the `modifier token` (e.g. \<new1\> in above example) in your prompt.
|
193 |
-
|
194 |
-
```python
|
195 |
-
import torch
|
196 |
-
from diffusers import DiffusionPipeline
|
197 |
-
|
198 |
-
pipe = DiffusionPipeline.from_pretrained(
|
199 |
-
"CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16
|
200 |
-
).to("cuda")
|
201 |
-
pipe.unet.load_attn_procs(
|
202 |
-
"path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin"
|
203 |
-
)
|
204 |
-
pipe.load_textual_inversion("path-to-save-model", weight_name="<new1>.bin")
|
205 |
-
|
206 |
-
image = pipe(
|
207 |
-
"<new1> cat sitting in a bucket",
|
208 |
-
num_inference_steps=100,
|
209 |
-
guidance_scale=6.0,
|
210 |
-
eta=1.0,
|
211 |
-
).images[0]
|
212 |
-
image.save("cat.png")
|
213 |
-
```
|
214 |
-
|
215 |
-
It's possible to directly load these parameters from a Hub repository:
|
216 |
-
|
217 |
-
```python
|
218 |
-
import torch
|
219 |
-
from huggingface_hub.repocard import RepoCard
|
220 |
-
from diffusers import DiffusionPipeline
|
221 |
-
|
222 |
-
model_id = "sayakpaul/custom-diffusion-cat"
|
223 |
-
card = RepoCard.load(model_id)
|
224 |
-
base_model_id = card.data.to_dict()["base_model"]
|
225 |
-
|
226 |
-
pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16).to(
|
227 |
-
"cuda")
|
228 |
-
pipe.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin")
|
229 |
-
pipe.load_textual_inversion(model_id, weight_name="<new1>.bin")
|
230 |
-
|
231 |
-
image = pipe(
|
232 |
-
"<new1> cat sitting in a bucket",
|
233 |
-
num_inference_steps=100,
|
234 |
-
guidance_scale=6.0,
|
235 |
-
eta=1.0,
|
236 |
-
).images[0]
|
237 |
-
image.save("cat.png")
|
238 |
-
```
|
239 |
-
|
240 |
-
Here is an example of performing inference with multiple concepts:
|
241 |
-
|
242 |
-
```python
|
243 |
-
import torch
|
244 |
-
from huggingface_hub.repocard import RepoCard
|
245 |
-
from diffusers import DiffusionPipeline
|
246 |
-
|
247 |
-
model_id = "sayakpaul/custom-diffusion-cat-wooden-pot"
|
248 |
-
card = RepoCard.load(model_id)
|
249 |
-
base_model_id = card.data.to_dict()["base_model"]
|
250 |
-
|
251 |
-
pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16).to(
|
252 |
-
"cuda")
|
253 |
-
pipe.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin")
|
254 |
-
pipe.load_textual_inversion(model_id, weight_name="<new1>.bin")
|
255 |
-
pipe.load_textual_inversion(model_id, weight_name="<new2>.bin")
|
256 |
-
|
257 |
-
image = pipe(
|
258 |
-
"the <new1> cat sculpture in the style of a <new2> wooden pot",
|
259 |
-
num_inference_steps=100,
|
260 |
-
guidance_scale=6.0,
|
261 |
-
eta=1.0,
|
262 |
-
).images[0]
|
263 |
-
image.save("multi-subject.png")
|
264 |
-
```
|
265 |
-
|
266 |
-
Here, `cat` and `wooden pot` refer to the multiple concepts.
|
267 |
-
|
268 |
-
### Inference from a training checkpoint
|
269 |
-
|
270 |
-
You can also perform inference from one of the complete checkpoint saved during the training process, if you used the `--checkpointing_steps` argument.
|
271 |
-
|
272 |
-
TODO.
|
273 |
-
|
274 |
-
## Set grads to none
|
275 |
-
To save even more memory, pass the `--set_grads_to_none` argument to the script. This will set grads to None instead of zero. However, be aware that it changes certain behaviors, so if you start experiencing any problems, remove this argument.
|
276 |
-
|
277 |
-
More info: https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html
|
278 |
-
|
279 |
-
## Experimental results
|
280 |
-
You can refer to [our webpage](https://www.cs.cmu.edu/~custom-diffusion/) that discusses our experiments in detail. We also released a more extensive dataset of 101 concepts for evaluating model customization methods. For more details please refer to our [dataset webpage](https://www.cs.cmu.edu/~custom-diffusion/dataset.html).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/IAT_enhancement/model/blocks.py
DELETED
@@ -1,281 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Code copy from uniformer source code:
|
3 |
-
https://github.com/Sense-X/UniFormer
|
4 |
-
"""
|
5 |
-
import os
|
6 |
-
import torch
|
7 |
-
import torch.nn as nn
|
8 |
-
from functools import partial
|
9 |
-
import math
|
10 |
-
from timm.models.vision_transformer import VisionTransformer, _cfg
|
11 |
-
from timm.models.registry import register_model
|
12 |
-
from timm.models.layers import trunc_normal_, DropPath, to_2tuple
|
13 |
-
|
14 |
-
# ResMLP's normalization
|
15 |
-
class Aff(nn.Module):
|
16 |
-
def __init__(self, dim):
|
17 |
-
super().__init__()
|
18 |
-
# learnable
|
19 |
-
self.alpha = nn.Parameter(torch.ones([1, 1, dim]))
|
20 |
-
self.beta = nn.Parameter(torch.zeros([1, 1, dim]))
|
21 |
-
|
22 |
-
def forward(self, x):
|
23 |
-
x = x * self.alpha + self.beta
|
24 |
-
return x
|
25 |
-
|
26 |
-
# Color Normalization
|
27 |
-
class Aff_channel(nn.Module):
|
28 |
-
def __init__(self, dim, channel_first = True):
|
29 |
-
super().__init__()
|
30 |
-
# learnable
|
31 |
-
self.alpha = nn.Parameter(torch.ones([1, 1, dim]))
|
32 |
-
self.beta = nn.Parameter(torch.zeros([1, 1, dim]))
|
33 |
-
self.color = nn.Parameter(torch.eye(dim))
|
34 |
-
self.channel_first = channel_first
|
35 |
-
|
36 |
-
def forward(self, x):
|
37 |
-
if self.channel_first:
|
38 |
-
x1 = torch.tensordot(x, self.color, dims=[[-1], [-1]])
|
39 |
-
x2 = x1 * self.alpha + self.beta
|
40 |
-
else:
|
41 |
-
x1 = x * self.alpha + self.beta
|
42 |
-
x2 = torch.tensordot(x1, self.color, dims=[[-1], [-1]])
|
43 |
-
return x2
|
44 |
-
|
45 |
-
class Mlp(nn.Module):
|
46 |
-
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
|
47 |
-
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
48 |
-
super().__init__()
|
49 |
-
out_features = out_features or in_features
|
50 |
-
hidden_features = hidden_features or in_features
|
51 |
-
self.fc1 = nn.Linear(in_features, hidden_features)
|
52 |
-
self.act = act_layer()
|
53 |
-
self.fc2 = nn.Linear(hidden_features, out_features)
|
54 |
-
self.drop = nn.Dropout(drop)
|
55 |
-
|
56 |
-
def forward(self, x):
|
57 |
-
x = self.fc1(x)
|
58 |
-
x = self.act(x)
|
59 |
-
x = self.drop(x)
|
60 |
-
x = self.fc2(x)
|
61 |
-
x = self.drop(x)
|
62 |
-
return x
|
63 |
-
|
64 |
-
class CMlp(nn.Module):
|
65 |
-
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
|
66 |
-
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
67 |
-
super().__init__()
|
68 |
-
out_features = out_features or in_features
|
69 |
-
hidden_features = hidden_features or in_features
|
70 |
-
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
|
71 |
-
self.act = act_layer()
|
72 |
-
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
|
73 |
-
self.drop = nn.Dropout(drop)
|
74 |
-
|
75 |
-
def forward(self, x):
|
76 |
-
x = self.fc1(x)
|
77 |
-
x = self.act(x)
|
78 |
-
x = self.drop(x)
|
79 |
-
x = self.fc2(x)
|
80 |
-
x = self.drop(x)
|
81 |
-
return x
|
82 |
-
|
83 |
-
class CBlock_ln(nn.Module):
|
84 |
-
def __init__(self, dim, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
85 |
-
drop_path=0., act_layer=nn.GELU, norm_layer=Aff_channel, init_values=1e-4):
|
86 |
-
super().__init__()
|
87 |
-
self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
|
88 |
-
#self.norm1 = Aff_channel(dim)
|
89 |
-
self.norm1 = norm_layer(dim)
|
90 |
-
self.conv1 = nn.Conv2d(dim, dim, 1)
|
91 |
-
self.conv2 = nn.Conv2d(dim, dim, 1)
|
92 |
-
self.attn = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
|
93 |
-
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
94 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
95 |
-
#self.norm2 = Aff_channel(dim)
|
96 |
-
self.norm2 = norm_layer(dim)
|
97 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
98 |
-
self.gamma_1 = nn.Parameter(init_values * torch.ones((1, dim, 1, 1)), requires_grad=True)
|
99 |
-
self.gamma_2 = nn.Parameter(init_values * torch.ones((1, dim, 1, 1)), requires_grad=True)
|
100 |
-
self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
101 |
-
|
102 |
-
def forward(self, x):
|
103 |
-
x = x + self.pos_embed(x)
|
104 |
-
B, C, H, W = x.shape
|
105 |
-
#print(x.shape)
|
106 |
-
norm_x = x.flatten(2).transpose(1, 2)
|
107 |
-
#print(norm_x.shape)
|
108 |
-
norm_x = self.norm1(norm_x)
|
109 |
-
norm_x = norm_x.view(B, H, W, C).permute(0, 3, 1, 2)
|
110 |
-
|
111 |
-
|
112 |
-
x = x + self.drop_path(self.gamma_1*self.conv2(self.attn(self.conv1(norm_x))))
|
113 |
-
norm_x = x.flatten(2).transpose(1, 2)
|
114 |
-
norm_x = self.norm2(norm_x)
|
115 |
-
norm_x = norm_x.view(B, H, W, C).permute(0, 3, 1, 2)
|
116 |
-
x = x + self.drop_path(self.gamma_2*self.mlp(norm_x))
|
117 |
-
return x
|
118 |
-
|
119 |
-
|
120 |
-
def window_partition(x, window_size):
|
121 |
-
"""
|
122 |
-
Args:
|
123 |
-
x: (B, H, W, C)
|
124 |
-
window_size (int): window size
|
125 |
-
Returns:
|
126 |
-
windows: (num_windows*B, window_size, window_size, C)
|
127 |
-
"""
|
128 |
-
B, H, W, C = x.shape
|
129 |
-
#print(x.shape)
|
130 |
-
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
|
131 |
-
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
132 |
-
return windows
|
133 |
-
|
134 |
-
|
135 |
-
def window_reverse(windows, window_size, H, W):
|
136 |
-
"""
|
137 |
-
Args:
|
138 |
-
windows: (num_windows*B, window_size, window_size, C)
|
139 |
-
window_size (int): Window size
|
140 |
-
H (int): Height of image
|
141 |
-
W (int): Width of image
|
142 |
-
Returns:
|
143 |
-
x: (B, H, W, C)
|
144 |
-
"""
|
145 |
-
B = int(windows.shape[0] / (H * W / window_size / window_size))
|
146 |
-
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
|
147 |
-
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
|
148 |
-
return x
|
149 |
-
|
150 |
-
|
151 |
-
class WindowAttention(nn.Module):
|
152 |
-
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
|
153 |
-
It supports both of shifted and non-shifted window.
|
154 |
-
Args:
|
155 |
-
dim (int): Number of input channels.
|
156 |
-
window_size (tuple[int]): The height and width of the window.
|
157 |
-
num_heads (int): Number of attention heads.
|
158 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
159 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
|
160 |
-
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
|
161 |
-
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
|
162 |
-
"""
|
163 |
-
|
164 |
-
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
|
165 |
-
super().__init__()
|
166 |
-
self.dim = dim
|
167 |
-
self.window_size = window_size # Wh, Ww
|
168 |
-
self.num_heads = num_heads
|
169 |
-
head_dim = dim // num_heads
|
170 |
-
self.scale = qk_scale or head_dim ** -0.5
|
171 |
-
|
172 |
-
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
173 |
-
self.attn_drop = nn.Dropout(attn_drop)
|
174 |
-
self.proj = nn.Linear(dim, dim)
|
175 |
-
self.proj_drop = nn.Dropout(proj_drop)
|
176 |
-
|
177 |
-
self.softmax = nn.Softmax(dim=-1)
|
178 |
-
|
179 |
-
def forward(self, x):
|
180 |
-
B_, N, C = x.shape
|
181 |
-
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
182 |
-
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
183 |
-
|
184 |
-
q = q * self.scale
|
185 |
-
attn = (q @ k.transpose(-2, -1))
|
186 |
-
|
187 |
-
attn = self.softmax(attn)
|
188 |
-
|
189 |
-
attn = self.attn_drop(attn)
|
190 |
-
|
191 |
-
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
|
192 |
-
x = self.proj(x)
|
193 |
-
x = self.proj_drop(x)
|
194 |
-
return x
|
195 |
-
|
196 |
-
## Layer_norm, Aff_norm, Aff_channel_norm
|
197 |
-
class SwinTransformerBlock(nn.Module):
|
198 |
-
r""" Swin Transformer Block.
|
199 |
-
Args:
|
200 |
-
dim (int): Number of input channels.
|
201 |
-
input_resolution (tuple[int]): Input resulotion.
|
202 |
-
num_heads (int): Number of attention heads.
|
203 |
-
window_size (int): Window size.
|
204 |
-
shift_size (int): Shift size for SW-MSA.
|
205 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
206 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
207 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
208 |
-
drop (float, optional): Dropout rate. Default: 0.0
|
209 |
-
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
210 |
-
drop_path (float, optional): Stochastic depth rate. Default: 0.0
|
211 |
-
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
|
212 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
213 |
-
"""
|
214 |
-
|
215 |
-
def __init__(self, dim, num_heads=2, window_size=8, shift_size=0,
|
216 |
-
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
|
217 |
-
act_layer=nn.GELU, norm_layer=Aff_channel):
|
218 |
-
super().__init__()
|
219 |
-
self.dim = dim
|
220 |
-
self.num_heads = num_heads
|
221 |
-
self.window_size = window_size
|
222 |
-
self.shift_size = shift_size
|
223 |
-
self.mlp_ratio = mlp_ratio
|
224 |
-
|
225 |
-
self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
|
226 |
-
#self.norm1 = norm_layer(dim)
|
227 |
-
self.norm1 = norm_layer(dim)
|
228 |
-
self.attn = WindowAttention(
|
229 |
-
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
|
230 |
-
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
|
231 |
-
|
232 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
233 |
-
#self.norm2 = norm_layer(dim)
|
234 |
-
self.norm2 = norm_layer(dim)
|
235 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
236 |
-
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
237 |
-
|
238 |
-
def forward(self, x):
|
239 |
-
x = x + self.pos_embed(x)
|
240 |
-
B, C, H, W = x.shape
|
241 |
-
x = x.flatten(2).transpose(1, 2)
|
242 |
-
|
243 |
-
shortcut = x
|
244 |
-
x = self.norm1(x)
|
245 |
-
x = x.view(B, H, W, C)
|
246 |
-
|
247 |
-
# cyclic shift
|
248 |
-
if self.shift_size > 0:
|
249 |
-
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
|
250 |
-
else:
|
251 |
-
shifted_x = x
|
252 |
-
|
253 |
-
# partition windows
|
254 |
-
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
|
255 |
-
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
|
256 |
-
|
257 |
-
# W-MSA/SW-MSA
|
258 |
-
attn_windows = self.attn(x_windows) # nW*B, window_size*window_size, C
|
259 |
-
|
260 |
-
# merge windows
|
261 |
-
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
|
262 |
-
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
|
263 |
-
|
264 |
-
x = shifted_x
|
265 |
-
x = x.view(B, H * W, C)
|
266 |
-
|
267 |
-
# FFN
|
268 |
-
x = shortcut + self.drop_path(x)
|
269 |
-
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
270 |
-
x = x.transpose(1, 2).reshape(B, C, H, W)
|
271 |
-
|
272 |
-
return x
|
273 |
-
|
274 |
-
|
275 |
-
if __name__ == "__main__":
|
276 |
-
os.environ['CUDA_VISIBLE_DEVICES']='1'
|
277 |
-
cb_blovk = CBlock_ln(dim = 16)
|
278 |
-
x = torch.Tensor(1, 16, 400, 600)
|
279 |
-
swin = SwinTransformerBlock(dim=16, num_heads=4)
|
280 |
-
x = cb_blovk(x)
|
281 |
-
print(x.shape)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_video_demo/kinetics_class_index.py
DELETED
@@ -1,402 +0,0 @@
|
|
1 |
-
kinetics_classnames = {
|
2 |
-
"0": "riding a bike",
|
3 |
-
"1": "marching",
|
4 |
-
"2": "dodgeball",
|
5 |
-
"3": "playing cymbals",
|
6 |
-
"4": "checking tires",
|
7 |
-
"5": "roller skating",
|
8 |
-
"6": "tasting beer",
|
9 |
-
"7": "clapping",
|
10 |
-
"8": "drawing",
|
11 |
-
"9": "juggling fire",
|
12 |
-
"10": "bobsledding",
|
13 |
-
"11": "petting animal (not cat)",
|
14 |
-
"12": "spray painting",
|
15 |
-
"13": "training dog",
|
16 |
-
"14": "eating watermelon",
|
17 |
-
"15": "building cabinet",
|
18 |
-
"16": "applauding",
|
19 |
-
"17": "playing harp",
|
20 |
-
"18": "balloon blowing",
|
21 |
-
"19": "sled dog racing",
|
22 |
-
"20": "wrestling",
|
23 |
-
"21": "pole vault",
|
24 |
-
"22": "hurling (sport)",
|
25 |
-
"23": "riding scooter",
|
26 |
-
"24": "shearing sheep",
|
27 |
-
"25": "sweeping floor",
|
28 |
-
"26": "eating carrots",
|
29 |
-
"27": "skateboarding",
|
30 |
-
"28": "dunking basketball",
|
31 |
-
"29": "disc golfing",
|
32 |
-
"30": "eating spaghetti",
|
33 |
-
"31": "playing flute",
|
34 |
-
"32": "riding mechanical bull",
|
35 |
-
"33": "making sushi",
|
36 |
-
"34": "trapezing",
|
37 |
-
"35": "picking fruit",
|
38 |
-
"36": "stretching leg",
|
39 |
-
"37": "playing ukulele",
|
40 |
-
"38": "tying tie",
|
41 |
-
"39": "skydiving",
|
42 |
-
"40": "playing cello",
|
43 |
-
"41": "jumping into pool",
|
44 |
-
"42": "shooting goal (soccer)",
|
45 |
-
"43": "trimming trees",
|
46 |
-
"44": "bookbinding",
|
47 |
-
"45": "ski jumping",
|
48 |
-
"46": "walking the dog",
|
49 |
-
"47": "riding unicycle",
|
50 |
-
"48": "shaving head",
|
51 |
-
"49": "hopscotch",
|
52 |
-
"50": "playing piano",
|
53 |
-
"51": "parasailing",
|
54 |
-
"52": "bartending",
|
55 |
-
"53": "kicking field goal",
|
56 |
-
"54": "finger snapping",
|
57 |
-
"55": "dining",
|
58 |
-
"56": "yawning",
|
59 |
-
"57": "peeling potatoes",
|
60 |
-
"58": "canoeing or kayaking",
|
61 |
-
"59": "front raises",
|
62 |
-
"60": "laughing",
|
63 |
-
"61": "dancing macarena",
|
64 |
-
"62": "digging",
|
65 |
-
"63": "reading newspaper",
|
66 |
-
"64": "hitting baseball",
|
67 |
-
"65": "clay pottery making",
|
68 |
-
"66": "exercising with an exercise ball",
|
69 |
-
"67": "playing saxophone",
|
70 |
-
"68": "shooting basketball",
|
71 |
-
"69": "washing hair",
|
72 |
-
"70": "lunge",
|
73 |
-
"71": "brushing hair",
|
74 |
-
"72": "curling hair",
|
75 |
-
"73": "kitesurfing",
|
76 |
-
"74": "tapping guitar",
|
77 |
-
"75": "bending back",
|
78 |
-
"76": "skipping rope",
|
79 |
-
"77": "situp",
|
80 |
-
"78": "folding paper",
|
81 |
-
"79": "cracking neck",
|
82 |
-
"80": "assembling computer",
|
83 |
-
"81": "cleaning gutters",
|
84 |
-
"82": "blowing out candles",
|
85 |
-
"83": "shaking hands",
|
86 |
-
"84": "dancing gangnam style",
|
87 |
-
"85": "windsurfing",
|
88 |
-
"86": "tap dancing",
|
89 |
-
"87": "skiing (not slalom or crosscountry)",
|
90 |
-
"88": "bandaging",
|
91 |
-
"89": "push up",
|
92 |
-
"90": "doing nails",
|
93 |
-
"91": "punching person (boxing)",
|
94 |
-
"92": "bouncing on trampoline",
|
95 |
-
"93": "scrambling eggs",
|
96 |
-
"94": "singing",
|
97 |
-
"95": "cleaning floor",
|
98 |
-
"96": "krumping",
|
99 |
-
"97": "drumming fingers",
|
100 |
-
"98": "snowmobiling",
|
101 |
-
"99": "gymnastics tumbling",
|
102 |
-
"100": "headbanging",
|
103 |
-
"101": "catching or throwing frisbee",
|
104 |
-
"102": "riding elephant",
|
105 |
-
"103": "bee keeping",
|
106 |
-
"104": "feeding birds",
|
107 |
-
"105": "snatch weight lifting",
|
108 |
-
"106": "mowing lawn",
|
109 |
-
"107": "fixing hair",
|
110 |
-
"108": "playing trumpet",
|
111 |
-
"109": "flying kite",
|
112 |
-
"110": "crossing river",
|
113 |
-
"111": "swinging legs",
|
114 |
-
"112": "sanding floor",
|
115 |
-
"113": "belly dancing",
|
116 |
-
"114": "sneezing",
|
117 |
-
"115": "clean and jerk",
|
118 |
-
"116": "side kick",
|
119 |
-
"117": "filling eyebrows",
|
120 |
-
"118": "shuffling cards",
|
121 |
-
"119": "recording music",
|
122 |
-
"120": "cartwheeling",
|
123 |
-
"121": "feeding fish",
|
124 |
-
"122": "folding clothes",
|
125 |
-
"123": "water skiing",
|
126 |
-
"124": "tobogganing",
|
127 |
-
"125": "blowing leaves",
|
128 |
-
"126": "smoking",
|
129 |
-
"127": "unboxing",
|
130 |
-
"128": "tai chi",
|
131 |
-
"129": "waxing legs",
|
132 |
-
"130": "riding camel",
|
133 |
-
"131": "slapping",
|
134 |
-
"132": "tossing salad",
|
135 |
-
"133": "capoeira",
|
136 |
-
"134": "playing cards",
|
137 |
-
"135": "playing organ",
|
138 |
-
"136": "playing violin",
|
139 |
-
"137": "playing drums",
|
140 |
-
"138": "tapping pen",
|
141 |
-
"139": "vault",
|
142 |
-
"140": "shoveling snow",
|
143 |
-
"141": "playing tennis",
|
144 |
-
"142": "getting a tattoo",
|
145 |
-
"143": "making a sandwich",
|
146 |
-
"144": "making tea",
|
147 |
-
"145": "grinding meat",
|
148 |
-
"146": "squat",
|
149 |
-
"147": "eating doughnuts",
|
150 |
-
"148": "ice fishing",
|
151 |
-
"149": "snowkiting",
|
152 |
-
"150": "kicking soccer ball",
|
153 |
-
"151": "playing controller",
|
154 |
-
"152": "giving or receiving award",
|
155 |
-
"153": "welding",
|
156 |
-
"154": "throwing discus",
|
157 |
-
"155": "throwing axe",
|
158 |
-
"156": "ripping paper",
|
159 |
-
"157": "swimming butterfly stroke",
|
160 |
-
"158": "air drumming",
|
161 |
-
"159": "blowing nose",
|
162 |
-
"160": "hockey stop",
|
163 |
-
"161": "taking a shower",
|
164 |
-
"162": "bench pressing",
|
165 |
-
"163": "planting trees",
|
166 |
-
"164": "pumping fist",
|
167 |
-
"165": "climbing tree",
|
168 |
-
"166": "tickling",
|
169 |
-
"167": "high kick",
|
170 |
-
"168": "waiting in line",
|
171 |
-
"169": "slacklining",
|
172 |
-
"170": "tango dancing",
|
173 |
-
"171": "hurdling",
|
174 |
-
"172": "carrying baby",
|
175 |
-
"173": "celebrating",
|
176 |
-
"174": "sharpening knives",
|
177 |
-
"175": "passing American football (in game)",
|
178 |
-
"176": "headbutting",
|
179 |
-
"177": "playing recorder",
|
180 |
-
"178": "brush painting",
|
181 |
-
"179": "garbage collecting",
|
182 |
-
"180": "robot dancing",
|
183 |
-
"181": "shredding paper",
|
184 |
-
"182": "pumping gas",
|
185 |
-
"183": "rock climbing",
|
186 |
-
"184": "hula hooping",
|
187 |
-
"185": "braiding hair",
|
188 |
-
"186": "opening present",
|
189 |
-
"187": "texting",
|
190 |
-
"188": "decorating the christmas tree",
|
191 |
-
"189": "answering questions",
|
192 |
-
"190": "playing keyboard",
|
193 |
-
"191": "writing",
|
194 |
-
"192": "bungee jumping",
|
195 |
-
"193": "sniffing",
|
196 |
-
"194": "eating burger",
|
197 |
-
"195": "playing accordion",
|
198 |
-
"196": "making pizza",
|
199 |
-
"197": "playing volleyball",
|
200 |
-
"198": "tasting food",
|
201 |
-
"199": "pushing cart",
|
202 |
-
"200": "spinning poi",
|
203 |
-
"201": "cleaning windows",
|
204 |
-
"202": "arm wrestling",
|
205 |
-
"203": "changing oil",
|
206 |
-
"204": "swimming breast stroke",
|
207 |
-
"205": "tossing coin",
|
208 |
-
"206": "deadlifting",
|
209 |
-
"207": "hoverboarding",
|
210 |
-
"208": "cutting watermelon",
|
211 |
-
"209": "cheerleading",
|
212 |
-
"210": "snorkeling",
|
213 |
-
"211": "washing hands",
|
214 |
-
"212": "eating cake",
|
215 |
-
"213": "pull ups",
|
216 |
-
"214": "surfing water",
|
217 |
-
"215": "eating hotdog",
|
218 |
-
"216": "holding snake",
|
219 |
-
"217": "playing harmonica",
|
220 |
-
"218": "ironing",
|
221 |
-
"219": "cutting nails",
|
222 |
-
"220": "golf chipping",
|
223 |
-
"221": "shot put",
|
224 |
-
"222": "hugging",
|
225 |
-
"223": "playing clarinet",
|
226 |
-
"224": "faceplanting",
|
227 |
-
"225": "trimming or shaving beard",
|
228 |
-
"226": "drinking shots",
|
229 |
-
"227": "riding mountain bike",
|
230 |
-
"228": "tying bow tie",
|
231 |
-
"229": "swinging on something",
|
232 |
-
"230": "skiing crosscountry",
|
233 |
-
"231": "unloading truck",
|
234 |
-
"232": "cleaning pool",
|
235 |
-
"233": "jogging",
|
236 |
-
"234": "ice climbing",
|
237 |
-
"235": "mopping floor",
|
238 |
-
"236": "making bed",
|
239 |
-
"237": "diving cliff",
|
240 |
-
"238": "washing dishes",
|
241 |
-
"239": "grooming dog",
|
242 |
-
"240": "weaving basket",
|
243 |
-
"241": "frying vegetables",
|
244 |
-
"242": "stomping grapes",
|
245 |
-
"243": "moving furniture",
|
246 |
-
"244": "cooking sausages",
|
247 |
-
"245": "doing laundry",
|
248 |
-
"246": "dying hair",
|
249 |
-
"247": "knitting",
|
250 |
-
"248": "reading book",
|
251 |
-
"249": "baby waking up",
|
252 |
-
"250": "punching bag",
|
253 |
-
"251": "surfing crowd",
|
254 |
-
"252": "cooking chicken",
|
255 |
-
"253": "pushing car",
|
256 |
-
"254": "springboard diving",
|
257 |
-
"255": "swing dancing",
|
258 |
-
"256": "massaging legs",
|
259 |
-
"257": "beatboxing",
|
260 |
-
"258": "breading or breadcrumbing",
|
261 |
-
"259": "somersaulting",
|
262 |
-
"260": "brushing teeth",
|
263 |
-
"261": "stretching arm",
|
264 |
-
"262": "juggling balls",
|
265 |
-
"263": "massaging person's head",
|
266 |
-
"264": "eating ice cream",
|
267 |
-
"265": "extinguishing fire",
|
268 |
-
"266": "hammer throw",
|
269 |
-
"267": "whistling",
|
270 |
-
"268": "crawling baby",
|
271 |
-
"269": "using remote controller (not gaming)",
|
272 |
-
"270": "playing cricket",
|
273 |
-
"271": "opening bottle",
|
274 |
-
"272": "playing xylophone",
|
275 |
-
"273": "motorcycling",
|
276 |
-
"274": "driving car",
|
277 |
-
"275": "exercising arm",
|
278 |
-
"276": "passing American football (not in game)",
|
279 |
-
"277": "playing kickball",
|
280 |
-
"278": "sticking tongue out",
|
281 |
-
"279": "flipping pancake",
|
282 |
-
"280": "catching fish",
|
283 |
-
"281": "eating chips",
|
284 |
-
"282": "shaking head",
|
285 |
-
"283": "sword fighting",
|
286 |
-
"284": "playing poker",
|
287 |
-
"285": "cooking on campfire",
|
288 |
-
"286": "doing aerobics",
|
289 |
-
"287": "paragliding",
|
290 |
-
"288": "using segway",
|
291 |
-
"289": "folding napkins",
|
292 |
-
"290": "playing bagpipes",
|
293 |
-
"291": "gargling",
|
294 |
-
"292": "skiing slalom",
|
295 |
-
"293": "strumming guitar",
|
296 |
-
"294": "javelin throw",
|
297 |
-
"295": "waxing back",
|
298 |
-
"296": "riding or walking with horse",
|
299 |
-
"297": "plastering",
|
300 |
-
"298": "long jump",
|
301 |
-
"299": "parkour",
|
302 |
-
"300": "wrapping present",
|
303 |
-
"301": "egg hunting",
|
304 |
-
"302": "archery",
|
305 |
-
"303": "cleaning toilet",
|
306 |
-
"304": "swimming backstroke",
|
307 |
-
"305": "snowboarding",
|
308 |
-
"306": "catching or throwing baseball",
|
309 |
-
"307": "massaging back",
|
310 |
-
"308": "blowing glass",
|
311 |
-
"309": "playing guitar",
|
312 |
-
"310": "playing chess",
|
313 |
-
"311": "golf driving",
|
314 |
-
"312": "presenting weather forecast",
|
315 |
-
"313": "rock scissors paper",
|
316 |
-
"314": "high jump",
|
317 |
-
"315": "baking cookies",
|
318 |
-
"316": "using computer",
|
319 |
-
"317": "washing feet",
|
320 |
-
"318": "arranging flowers",
|
321 |
-
"319": "playing bass guitar",
|
322 |
-
"320": "spraying",
|
323 |
-
"321": "cutting pineapple",
|
324 |
-
"322": "waxing chest",
|
325 |
-
"323": "auctioning",
|
326 |
-
"324": "jetskiing",
|
327 |
-
"325": "drinking",
|
328 |
-
"326": "busking",
|
329 |
-
"327": "playing monopoly",
|
330 |
-
"328": "salsa dancing",
|
331 |
-
"329": "waxing eyebrows",
|
332 |
-
"330": "watering plants",
|
333 |
-
"331": "zumba",
|
334 |
-
"332": "chopping wood",
|
335 |
-
"333": "pushing wheelchair",
|
336 |
-
"334": "carving pumpkin",
|
337 |
-
"335": "building shed",
|
338 |
-
"336": "making jewelry",
|
339 |
-
"337": "catching or throwing softball",
|
340 |
-
"338": "bending metal",
|
341 |
-
"339": "ice skating",
|
342 |
-
"340": "dancing charleston",
|
343 |
-
"341": "abseiling",
|
344 |
-
"342": "climbing a rope",
|
345 |
-
"343": "crying",
|
346 |
-
"344": "cleaning shoes",
|
347 |
-
"345": "dancing ballet",
|
348 |
-
"346": "driving tractor",
|
349 |
-
"347": "triple jump",
|
350 |
-
"348": "throwing ball",
|
351 |
-
"349": "getting a haircut",
|
352 |
-
"350": "running on treadmill",
|
353 |
-
"351": "climbing ladder",
|
354 |
-
"352": "blasting sand",
|
355 |
-
"353": "playing trombone",
|
356 |
-
"354": "drop kicking",
|
357 |
-
"355": "country line dancing",
|
358 |
-
"356": "changing wheel",
|
359 |
-
"357": "feeding goats",
|
360 |
-
"358": "tying knot (not on a tie)",
|
361 |
-
"359": "setting table",
|
362 |
-
"360": "shaving legs",
|
363 |
-
"361": "kissing",
|
364 |
-
"362": "riding mule",
|
365 |
-
"363": "counting money",
|
366 |
-
"364": "laying bricks",
|
367 |
-
"365": "barbequing",
|
368 |
-
"366": "news anchoring",
|
369 |
-
"367": "smoking hookah",
|
370 |
-
"368": "cooking egg",
|
371 |
-
"369": "peeling apples",
|
372 |
-
"370": "yoga",
|
373 |
-
"371": "sharpening pencil",
|
374 |
-
"372": "dribbling basketball",
|
375 |
-
"373": "petting cat",
|
376 |
-
"374": "playing ice hockey",
|
377 |
-
"375": "milking cow",
|
378 |
-
"376": "shining shoes",
|
379 |
-
"377": "juggling soccer ball",
|
380 |
-
"378": "scuba diving",
|
381 |
-
"379": "playing squash or racquetball",
|
382 |
-
"380": "drinking beer",
|
383 |
-
"381": "sign language interpreting",
|
384 |
-
"382": "playing basketball",
|
385 |
-
"383": "breakdancing",
|
386 |
-
"384": "testifying",
|
387 |
-
"385": "making snowman",
|
388 |
-
"386": "golf putting",
|
389 |
-
"387": "playing didgeridoo",
|
390 |
-
"388": "biking through snow",
|
391 |
-
"389": "sailing",
|
392 |
-
"390": "jumpstyle dancing",
|
393 |
-
"391": "water sliding",
|
394 |
-
"392": "grooming horse",
|
395 |
-
"393": "massaging feet",
|
396 |
-
"394": "playing paintball",
|
397 |
-
"395": "making a cake",
|
398 |
-
"396": "bowling",
|
399 |
-
"397": "contact juggling",
|
400 |
-
"398": "applying cream",
|
401 |
-
"399": "playing badminton"
|
402 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AquaSuisei/ChatGPTXE/modules/overwrites.py
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
import logging
|
3 |
-
|
4 |
-
from llama_index import Prompt
|
5 |
-
from typing import List, Tuple
|
6 |
-
import mdtex2html
|
7 |
-
|
8 |
-
from modules.presets import *
|
9 |
-
from modules.llama_func import *
|
10 |
-
|
11 |
-
|
12 |
-
def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]:
|
13 |
-
logging.debug("Compacting text chunks...🚀🚀🚀")
|
14 |
-
combined_str = [c.strip() for c in text_chunks if c.strip()]
|
15 |
-
combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)]
|
16 |
-
combined_str = "\n\n".join(combined_str)
|
17 |
-
# resplit based on self.max_chunk_overlap
|
18 |
-
text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1)
|
19 |
-
return text_splitter.split_text(combined_str)
|
20 |
-
|
21 |
-
|
22 |
-
def postprocess(
|
23 |
-
self, y: List[Tuple[str | None, str | None]]
|
24 |
-
) -> List[Tuple[str | None, str | None]]:
|
25 |
-
"""
|
26 |
-
Parameters:
|
27 |
-
y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
|
28 |
-
Returns:
|
29 |
-
List of tuples representing the message and response. Each message and response will be a string of HTML.
|
30 |
-
"""
|
31 |
-
if y is None or y == []:
|
32 |
-
return []
|
33 |
-
user, bot = y[-1]
|
34 |
-
if not detect_converted_mark(user):
|
35 |
-
user = convert_asis(user)
|
36 |
-
if not detect_converted_mark(bot):
|
37 |
-
bot = convert_mdtext(bot)
|
38 |
-
y[-1] = (user, bot)
|
39 |
-
return y
|
40 |
-
|
41 |
-
with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2:
|
42 |
-
customJS = f.read()
|
43 |
-
kelpyCodos = f2.read()
|
44 |
-
|
45 |
-
def reload_javascript():
|
46 |
-
print("Reloading javascript...")
|
47 |
-
js = f'<script>{customJS}</script><script>{kelpyCodos}</script>'
|
48 |
-
def template_response(*args, **kwargs):
|
49 |
-
res = GradioTemplateResponseOriginal(*args, **kwargs)
|
50 |
-
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
|
51 |
-
res.init_headers()
|
52 |
-
return res
|
53 |
-
|
54 |
-
gr.routes.templates.TemplateResponse = template_response
|
55 |
-
|
56 |
-
GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/euctwprober.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
######################## BEGIN LICENSE BLOCK ########################
|
2 |
-
# The Original Code is mozilla.org code.
|
3 |
-
#
|
4 |
-
# The Initial Developer of the Original Code is
|
5 |
-
# Netscape Communications Corporation.
|
6 |
-
# Portions created by the Initial Developer are Copyright (C) 1998
|
7 |
-
# the Initial Developer. All Rights Reserved.
|
8 |
-
#
|
9 |
-
# Contributor(s):
|
10 |
-
# Mark Pilgrim - port to Python
|
11 |
-
#
|
12 |
-
# This library is free software; you can redistribute it and/or
|
13 |
-
# modify it under the terms of the GNU Lesser General Public
|
14 |
-
# License as published by the Free Software Foundation; either
|
15 |
-
# version 2.1 of the License, or (at your option) any later version.
|
16 |
-
#
|
17 |
-
# This library is distributed in the hope that it will be useful,
|
18 |
-
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
19 |
-
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
20 |
-
# Lesser General Public License for more details.
|
21 |
-
#
|
22 |
-
# You should have received a copy of the GNU Lesser General Public
|
23 |
-
# License along with this library; if not, write to the Free Software
|
24 |
-
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
25 |
-
# 02110-1301 USA
|
26 |
-
######################### END LICENSE BLOCK #########################
|
27 |
-
|
28 |
-
from .chardistribution import EUCTWDistributionAnalysis
|
29 |
-
from .codingstatemachine import CodingStateMachine
|
30 |
-
from .mbcharsetprober import MultiByteCharSetProber
|
31 |
-
from .mbcssm import EUCTW_SM_MODEL
|
32 |
-
|
33 |
-
|
34 |
-
class EUCTWProber(MultiByteCharSetProber):
|
35 |
-
def __init__(self) -> None:
|
36 |
-
super().__init__()
|
37 |
-
self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
|
38 |
-
self.distribution_analyzer = EUCTWDistributionAnalysis()
|
39 |
-
self.reset()
|
40 |
-
|
41 |
-
@property
|
42 |
-
def charset_name(self) -> str:
|
43 |
-
return "EUC-TW"
|
44 |
-
|
45 |
-
@property
|
46 |
-
def language(self) -> str:
|
47 |
-
return "Taiwan"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/__init__.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
from __future__ import absolute_import
|
2 |
-
|
3 |
-
# For backwards compatibility, provide imports that used to be here.
|
4 |
-
from .connection import is_connection_dropped
|
5 |
-
from .request import SKIP_HEADER, SKIPPABLE_HEADERS, make_headers
|
6 |
-
from .response import is_fp_closed
|
7 |
-
from .retry import Retry
|
8 |
-
from .ssl_ import (
|
9 |
-
ALPN_PROTOCOLS,
|
10 |
-
HAS_SNI,
|
11 |
-
IS_PYOPENSSL,
|
12 |
-
IS_SECURETRANSPORT,
|
13 |
-
PROTOCOL_TLS,
|
14 |
-
SSLContext,
|
15 |
-
assert_fingerprint,
|
16 |
-
resolve_cert_reqs,
|
17 |
-
resolve_ssl_version,
|
18 |
-
ssl_wrap_socket,
|
19 |
-
)
|
20 |
-
from .timeout import Timeout, current_time
|
21 |
-
from .url import Url, get_host, parse_url, split_first
|
22 |
-
from .wait import wait_for_read, wait_for_write
|
23 |
-
|
24 |
-
__all__ = (
|
25 |
-
"HAS_SNI",
|
26 |
-
"IS_PYOPENSSL",
|
27 |
-
"IS_SECURETRANSPORT",
|
28 |
-
"SSLContext",
|
29 |
-
"PROTOCOL_TLS",
|
30 |
-
"ALPN_PROTOCOLS",
|
31 |
-
"Retry",
|
32 |
-
"Timeout",
|
33 |
-
"Url",
|
34 |
-
"assert_fingerprint",
|
35 |
-
"current_time",
|
36 |
-
"is_connection_dropped",
|
37 |
-
"is_fp_closed",
|
38 |
-
"get_host",
|
39 |
-
"parse_url",
|
40 |
-
"make_headers",
|
41 |
-
"resolve_cert_reqs",
|
42 |
-
"resolve_ssl_version",
|
43 |
-
"split_first",
|
44 |
-
"ssl_wrap_socket",
|
45 |
-
"wait_for_read",
|
46 |
-
"wait_for_write",
|
47 |
-
"SKIP_HEADER",
|
48 |
-
"SKIPPABLE_HEADERS",
|
49 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/recipes.py
DELETED
@@ -1,620 +0,0 @@
|
|
1 |
-
"""Imported from the recipes section of the itertools documentation.
|
2 |
-
|
3 |
-
All functions taken from the recipes section of the itertools library docs
|
4 |
-
[1]_.
|
5 |
-
Some backward-compatible usability improvements have been made.
|
6 |
-
|
7 |
-
.. [1] http://docs.python.org/library/itertools.html#recipes
|
8 |
-
|
9 |
-
"""
|
10 |
-
import warnings
|
11 |
-
from collections import deque
|
12 |
-
from itertools import (
|
13 |
-
chain,
|
14 |
-
combinations,
|
15 |
-
count,
|
16 |
-
cycle,
|
17 |
-
groupby,
|
18 |
-
islice,
|
19 |
-
repeat,
|
20 |
-
starmap,
|
21 |
-
tee,
|
22 |
-
zip_longest,
|
23 |
-
)
|
24 |
-
import operator
|
25 |
-
from random import randrange, sample, choice
|
26 |
-
|
27 |
-
__all__ = [
|
28 |
-
'all_equal',
|
29 |
-
'consume',
|
30 |
-
'convolve',
|
31 |
-
'dotproduct',
|
32 |
-
'first_true',
|
33 |
-
'flatten',
|
34 |
-
'grouper',
|
35 |
-
'iter_except',
|
36 |
-
'ncycles',
|
37 |
-
'nth',
|
38 |
-
'nth_combination',
|
39 |
-
'padnone',
|
40 |
-
'pad_none',
|
41 |
-
'pairwise',
|
42 |
-
'partition',
|
43 |
-
'powerset',
|
44 |
-
'prepend',
|
45 |
-
'quantify',
|
46 |
-
'random_combination_with_replacement',
|
47 |
-
'random_combination',
|
48 |
-
'random_permutation',
|
49 |
-
'random_product',
|
50 |
-
'repeatfunc',
|
51 |
-
'roundrobin',
|
52 |
-
'tabulate',
|
53 |
-
'tail',
|
54 |
-
'take',
|
55 |
-
'unique_everseen',
|
56 |
-
'unique_justseen',
|
57 |
-
]
|
58 |
-
|
59 |
-
|
60 |
-
def take(n, iterable):
|
61 |
-
"""Return first *n* items of the iterable as a list.
|
62 |
-
|
63 |
-
>>> take(3, range(10))
|
64 |
-
[0, 1, 2]
|
65 |
-
|
66 |
-
If there are fewer than *n* items in the iterable, all of them are
|
67 |
-
returned.
|
68 |
-
|
69 |
-
>>> take(10, range(3))
|
70 |
-
[0, 1, 2]
|
71 |
-
|
72 |
-
"""
|
73 |
-
return list(islice(iterable, n))
|
74 |
-
|
75 |
-
|
76 |
-
def tabulate(function, start=0):
|
77 |
-
"""Return an iterator over the results of ``func(start)``,
|
78 |
-
``func(start + 1)``, ``func(start + 2)``...
|
79 |
-
|
80 |
-
*func* should be a function that accepts one integer argument.
|
81 |
-
|
82 |
-
If *start* is not specified it defaults to 0. It will be incremented each
|
83 |
-
time the iterator is advanced.
|
84 |
-
|
85 |
-
>>> square = lambda x: x ** 2
|
86 |
-
>>> iterator = tabulate(square, -3)
|
87 |
-
>>> take(4, iterator)
|
88 |
-
[9, 4, 1, 0]
|
89 |
-
|
90 |
-
"""
|
91 |
-
return map(function, count(start))
|
92 |
-
|
93 |
-
|
94 |
-
def tail(n, iterable):
|
95 |
-
"""Return an iterator over the last *n* items of *iterable*.
|
96 |
-
|
97 |
-
>>> t = tail(3, 'ABCDEFG')
|
98 |
-
>>> list(t)
|
99 |
-
['E', 'F', 'G']
|
100 |
-
|
101 |
-
"""
|
102 |
-
return iter(deque(iterable, maxlen=n))
|
103 |
-
|
104 |
-
|
105 |
-
def consume(iterator, n=None):
|
106 |
-
"""Advance *iterable* by *n* steps. If *n* is ``None``, consume it
|
107 |
-
entirely.
|
108 |
-
|
109 |
-
Efficiently exhausts an iterator without returning values. Defaults to
|
110 |
-
consuming the whole iterator, but an optional second argument may be
|
111 |
-
provided to limit consumption.
|
112 |
-
|
113 |
-
>>> i = (x for x in range(10))
|
114 |
-
>>> next(i)
|
115 |
-
0
|
116 |
-
>>> consume(i, 3)
|
117 |
-
>>> next(i)
|
118 |
-
4
|
119 |
-
>>> consume(i)
|
120 |
-
>>> next(i)
|
121 |
-
Traceback (most recent call last):
|
122 |
-
File "<stdin>", line 1, in <module>
|
123 |
-
StopIteration
|
124 |
-
|
125 |
-
If the iterator has fewer items remaining than the provided limit, the
|
126 |
-
whole iterator will be consumed.
|
127 |
-
|
128 |
-
>>> i = (x for x in range(3))
|
129 |
-
>>> consume(i, 5)
|
130 |
-
>>> next(i)
|
131 |
-
Traceback (most recent call last):
|
132 |
-
File "<stdin>", line 1, in <module>
|
133 |
-
StopIteration
|
134 |
-
|
135 |
-
"""
|
136 |
-
# Use functions that consume iterators at C speed.
|
137 |
-
if n is None:
|
138 |
-
# feed the entire iterator into a zero-length deque
|
139 |
-
deque(iterator, maxlen=0)
|
140 |
-
else:
|
141 |
-
# advance to the empty slice starting at position n
|
142 |
-
next(islice(iterator, n, n), None)
|
143 |
-
|
144 |
-
|
145 |
-
def nth(iterable, n, default=None):
|
146 |
-
"""Returns the nth item or a default value.
|
147 |
-
|
148 |
-
>>> l = range(10)
|
149 |
-
>>> nth(l, 3)
|
150 |
-
3
|
151 |
-
>>> nth(l, 20, "zebra")
|
152 |
-
'zebra'
|
153 |
-
|
154 |
-
"""
|
155 |
-
return next(islice(iterable, n, None), default)
|
156 |
-
|
157 |
-
|
158 |
-
def all_equal(iterable):
|
159 |
-
"""
|
160 |
-
Returns ``True`` if all the elements are equal to each other.
|
161 |
-
|
162 |
-
>>> all_equal('aaaa')
|
163 |
-
True
|
164 |
-
>>> all_equal('aaab')
|
165 |
-
False
|
166 |
-
|
167 |
-
"""
|
168 |
-
g = groupby(iterable)
|
169 |
-
return next(g, True) and not next(g, False)
|
170 |
-
|
171 |
-
|
172 |
-
def quantify(iterable, pred=bool):
|
173 |
-
"""Return the how many times the predicate is true.
|
174 |
-
|
175 |
-
>>> quantify([True, False, True])
|
176 |
-
2
|
177 |
-
|
178 |
-
"""
|
179 |
-
return sum(map(pred, iterable))
|
180 |
-
|
181 |
-
|
182 |
-
def pad_none(iterable):
|
183 |
-
"""Returns the sequence of elements and then returns ``None`` indefinitely.
|
184 |
-
|
185 |
-
>>> take(5, pad_none(range(3)))
|
186 |
-
[0, 1, 2, None, None]
|
187 |
-
|
188 |
-
Useful for emulating the behavior of the built-in :func:`map` function.
|
189 |
-
|
190 |
-
See also :func:`padded`.
|
191 |
-
|
192 |
-
"""
|
193 |
-
return chain(iterable, repeat(None))
|
194 |
-
|
195 |
-
|
196 |
-
padnone = pad_none
|
197 |
-
|
198 |
-
|
199 |
-
def ncycles(iterable, n):
|
200 |
-
"""Returns the sequence elements *n* times
|
201 |
-
|
202 |
-
>>> list(ncycles(["a", "b"], 3))
|
203 |
-
['a', 'b', 'a', 'b', 'a', 'b']
|
204 |
-
|
205 |
-
"""
|
206 |
-
return chain.from_iterable(repeat(tuple(iterable), n))
|
207 |
-
|
208 |
-
|
209 |
-
def dotproduct(vec1, vec2):
|
210 |
-
"""Returns the dot product of the two iterables.
|
211 |
-
|
212 |
-
>>> dotproduct([10, 10], [20, 20])
|
213 |
-
400
|
214 |
-
|
215 |
-
"""
|
216 |
-
return sum(map(operator.mul, vec1, vec2))
|
217 |
-
|
218 |
-
|
219 |
-
def flatten(listOfLists):
|
220 |
-
"""Return an iterator flattening one level of nesting in a list of lists.
|
221 |
-
|
222 |
-
>>> list(flatten([[0, 1], [2, 3]]))
|
223 |
-
[0, 1, 2, 3]
|
224 |
-
|
225 |
-
See also :func:`collapse`, which can flatten multiple levels of nesting.
|
226 |
-
|
227 |
-
"""
|
228 |
-
return chain.from_iterable(listOfLists)
|
229 |
-
|
230 |
-
|
231 |
-
def repeatfunc(func, times=None, *args):
|
232 |
-
"""Call *func* with *args* repeatedly, returning an iterable over the
|
233 |
-
results.
|
234 |
-
|
235 |
-
If *times* is specified, the iterable will terminate after that many
|
236 |
-
repetitions:
|
237 |
-
|
238 |
-
>>> from operator import add
|
239 |
-
>>> times = 4
|
240 |
-
>>> args = 3, 5
|
241 |
-
>>> list(repeatfunc(add, times, *args))
|
242 |
-
[8, 8, 8, 8]
|
243 |
-
|
244 |
-
If *times* is ``None`` the iterable will not terminate:
|
245 |
-
|
246 |
-
>>> from random import randrange
|
247 |
-
>>> times = None
|
248 |
-
>>> args = 1, 11
|
249 |
-
>>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP
|
250 |
-
[2, 4, 8, 1, 8, 4]
|
251 |
-
|
252 |
-
"""
|
253 |
-
if times is None:
|
254 |
-
return starmap(func, repeat(args))
|
255 |
-
return starmap(func, repeat(args, times))
|
256 |
-
|
257 |
-
|
258 |
-
def _pairwise(iterable):
|
259 |
-
"""Returns an iterator of paired items, overlapping, from the original
|
260 |
-
|
261 |
-
>>> take(4, pairwise(count()))
|
262 |
-
[(0, 1), (1, 2), (2, 3), (3, 4)]
|
263 |
-
|
264 |
-
On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`.
|
265 |
-
|
266 |
-
"""
|
267 |
-
a, b = tee(iterable)
|
268 |
-
next(b, None)
|
269 |
-
yield from zip(a, b)
|
270 |
-
|
271 |
-
|
272 |
-
try:
|
273 |
-
from itertools import pairwise as itertools_pairwise
|
274 |
-
except ImportError:
|
275 |
-
pairwise = _pairwise
|
276 |
-
else:
|
277 |
-
|
278 |
-
def pairwise(iterable):
|
279 |
-
yield from itertools_pairwise(iterable)
|
280 |
-
|
281 |
-
pairwise.__doc__ = _pairwise.__doc__
|
282 |
-
|
283 |
-
|
284 |
-
def grouper(iterable, n, fillvalue=None):
|
285 |
-
"""Collect data into fixed-length chunks or blocks.
|
286 |
-
|
287 |
-
>>> list(grouper('ABCDEFG', 3, 'x'))
|
288 |
-
[('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
|
289 |
-
|
290 |
-
"""
|
291 |
-
if isinstance(iterable, int):
|
292 |
-
warnings.warn(
|
293 |
-
"grouper expects iterable as first parameter", DeprecationWarning
|
294 |
-
)
|
295 |
-
n, iterable = iterable, n
|
296 |
-
args = [iter(iterable)] * n
|
297 |
-
return zip_longest(fillvalue=fillvalue, *args)
|
298 |
-
|
299 |
-
|
300 |
-
def roundrobin(*iterables):
|
301 |
-
"""Yields an item from each iterable, alternating between them.
|
302 |
-
|
303 |
-
>>> list(roundrobin('ABC', 'D', 'EF'))
|
304 |
-
['A', 'D', 'E', 'B', 'F', 'C']
|
305 |
-
|
306 |
-
This function produces the same output as :func:`interleave_longest`, but
|
307 |
-
may perform better for some inputs (in particular when the number of
|
308 |
-
iterables is small).
|
309 |
-
|
310 |
-
"""
|
311 |
-
# Recipe credited to George Sakkis
|
312 |
-
pending = len(iterables)
|
313 |
-
nexts = cycle(iter(it).__next__ for it in iterables)
|
314 |
-
while pending:
|
315 |
-
try:
|
316 |
-
for next in nexts:
|
317 |
-
yield next()
|
318 |
-
except StopIteration:
|
319 |
-
pending -= 1
|
320 |
-
nexts = cycle(islice(nexts, pending))
|
321 |
-
|
322 |
-
|
323 |
-
def partition(pred, iterable):
|
324 |
-
"""
|
325 |
-
Returns a 2-tuple of iterables derived from the input iterable.
|
326 |
-
The first yields the items that have ``pred(item) == False``.
|
327 |
-
The second yields the items that have ``pred(item) == True``.
|
328 |
-
|
329 |
-
>>> is_odd = lambda x: x % 2 != 0
|
330 |
-
>>> iterable = range(10)
|
331 |
-
>>> even_items, odd_items = partition(is_odd, iterable)
|
332 |
-
>>> list(even_items), list(odd_items)
|
333 |
-
([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
|
334 |
-
|
335 |
-
If *pred* is None, :func:`bool` is used.
|
336 |
-
|
337 |
-
>>> iterable = [0, 1, False, True, '', ' ']
|
338 |
-
>>> false_items, true_items = partition(None, iterable)
|
339 |
-
>>> list(false_items), list(true_items)
|
340 |
-
([0, False, ''], [1, True, ' '])
|
341 |
-
|
342 |
-
"""
|
343 |
-
if pred is None:
|
344 |
-
pred = bool
|
345 |
-
|
346 |
-
evaluations = ((pred(x), x) for x in iterable)
|
347 |
-
t1, t2 = tee(evaluations)
|
348 |
-
return (
|
349 |
-
(x for (cond, x) in t1 if not cond),
|
350 |
-
(x for (cond, x) in t2 if cond),
|
351 |
-
)
|
352 |
-
|
353 |
-
|
354 |
-
def powerset(iterable):
|
355 |
-
"""Yields all possible subsets of the iterable.
|
356 |
-
|
357 |
-
>>> list(powerset([1, 2, 3]))
|
358 |
-
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
|
359 |
-
|
360 |
-
:func:`powerset` will operate on iterables that aren't :class:`set`
|
361 |
-
instances, so repeated elements in the input will produce repeated elements
|
362 |
-
in the output. Use :func:`unique_everseen` on the input to avoid generating
|
363 |
-
duplicates:
|
364 |
-
|
365 |
-
>>> seq = [1, 1, 0]
|
366 |
-
>>> list(powerset(seq))
|
367 |
-
[(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
|
368 |
-
>>> from more_itertools import unique_everseen
|
369 |
-
>>> list(powerset(unique_everseen(seq)))
|
370 |
-
[(), (1,), (0,), (1, 0)]
|
371 |
-
|
372 |
-
"""
|
373 |
-
s = list(iterable)
|
374 |
-
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
|
375 |
-
|
376 |
-
|
377 |
-
def unique_everseen(iterable, key=None):
|
378 |
-
"""
|
379 |
-
Yield unique elements, preserving order.
|
380 |
-
|
381 |
-
>>> list(unique_everseen('AAAABBBCCDAABBB'))
|
382 |
-
['A', 'B', 'C', 'D']
|
383 |
-
>>> list(unique_everseen('ABBCcAD', str.lower))
|
384 |
-
['A', 'B', 'C', 'D']
|
385 |
-
|
386 |
-
Sequences with a mix of hashable and unhashable items can be used.
|
387 |
-
The function will be slower (i.e., `O(n^2)`) for unhashable items.
|
388 |
-
|
389 |
-
Remember that ``list`` objects are unhashable - you can use the *key*
|
390 |
-
parameter to transform the list to a tuple (which is hashable) to
|
391 |
-
avoid a slowdown.
|
392 |
-
|
393 |
-
>>> iterable = ([1, 2], [2, 3], [1, 2])
|
394 |
-
>>> list(unique_everseen(iterable)) # Slow
|
395 |
-
[[1, 2], [2, 3]]
|
396 |
-
>>> list(unique_everseen(iterable, key=tuple)) # Faster
|
397 |
-
[[1, 2], [2, 3]]
|
398 |
-
|
399 |
-
Similary, you may want to convert unhashable ``set`` objects with
|
400 |
-
``key=frozenset``. For ``dict`` objects,
|
401 |
-
``key=lambda x: frozenset(x.items())`` can be used.
|
402 |
-
|
403 |
-
"""
|
404 |
-
seenset = set()
|
405 |
-
seenset_add = seenset.add
|
406 |
-
seenlist = []
|
407 |
-
seenlist_add = seenlist.append
|
408 |
-
use_key = key is not None
|
409 |
-
|
410 |
-
for element in iterable:
|
411 |
-
k = key(element) if use_key else element
|
412 |
-
try:
|
413 |
-
if k not in seenset:
|
414 |
-
seenset_add(k)
|
415 |
-
yield element
|
416 |
-
except TypeError:
|
417 |
-
if k not in seenlist:
|
418 |
-
seenlist_add(k)
|
419 |
-
yield element
|
420 |
-
|
421 |
-
|
422 |
-
def unique_justseen(iterable, key=None):
|
423 |
-
"""Yields elements in order, ignoring serial duplicates
|
424 |
-
|
425 |
-
>>> list(unique_justseen('AAAABBBCCDAABBB'))
|
426 |
-
['A', 'B', 'C', 'D', 'A', 'B']
|
427 |
-
>>> list(unique_justseen('ABBCcAD', str.lower))
|
428 |
-
['A', 'B', 'C', 'A', 'D']
|
429 |
-
|
430 |
-
"""
|
431 |
-
return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
|
432 |
-
|
433 |
-
|
434 |
-
def iter_except(func, exception, first=None):
|
435 |
-
"""Yields results from a function repeatedly until an exception is raised.
|
436 |
-
|
437 |
-
Converts a call-until-exception interface to an iterator interface.
|
438 |
-
Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
|
439 |
-
to end the loop.
|
440 |
-
|
441 |
-
>>> l = [0, 1, 2]
|
442 |
-
>>> list(iter_except(l.pop, IndexError))
|
443 |
-
[2, 1, 0]
|
444 |
-
|
445 |
-
"""
|
446 |
-
try:
|
447 |
-
if first is not None:
|
448 |
-
yield first()
|
449 |
-
while 1:
|
450 |
-
yield func()
|
451 |
-
except exception:
|
452 |
-
pass
|
453 |
-
|
454 |
-
|
455 |
-
def first_true(iterable, default=None, pred=None):
|
456 |
-
"""
|
457 |
-
Returns the first true value in the iterable.
|
458 |
-
|
459 |
-
If no true value is found, returns *default*
|
460 |
-
|
461 |
-
If *pred* is not None, returns the first item for which
|
462 |
-
``pred(item) == True`` .
|
463 |
-
|
464 |
-
>>> first_true(range(10))
|
465 |
-
1
|
466 |
-
>>> first_true(range(10), pred=lambda x: x > 5)
|
467 |
-
6
|
468 |
-
>>> first_true(range(10), default='missing', pred=lambda x: x > 9)
|
469 |
-
'missing'
|
470 |
-
|
471 |
-
"""
|
472 |
-
return next(filter(pred, iterable), default)
|
473 |
-
|
474 |
-
|
475 |
-
def random_product(*args, repeat=1):
|
476 |
-
"""Draw an item at random from each of the input iterables.
|
477 |
-
|
478 |
-
>>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
|
479 |
-
('c', 3, 'Z')
|
480 |
-
|
481 |
-
If *repeat* is provided as a keyword argument, that many items will be
|
482 |
-
drawn from each iterable.
|
483 |
-
|
484 |
-
>>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
|
485 |
-
('a', 2, 'd', 3)
|
486 |
-
|
487 |
-
This equivalent to taking a random selection from
|
488 |
-
``itertools.product(*args, **kwarg)``.
|
489 |
-
|
490 |
-
"""
|
491 |
-
pools = [tuple(pool) for pool in args] * repeat
|
492 |
-
return tuple(choice(pool) for pool in pools)
|
493 |
-
|
494 |
-
|
495 |
-
def random_permutation(iterable, r=None):
|
496 |
-
"""Return a random *r* length permutation of the elements in *iterable*.
|
497 |
-
|
498 |
-
If *r* is not specified or is ``None``, then *r* defaults to the length of
|
499 |
-
*iterable*.
|
500 |
-
|
501 |
-
>>> random_permutation(range(5)) # doctest:+SKIP
|
502 |
-
(3, 4, 0, 1, 2)
|
503 |
-
|
504 |
-
This equivalent to taking a random selection from
|
505 |
-
``itertools.permutations(iterable, r)``.
|
506 |
-
|
507 |
-
"""
|
508 |
-
pool = tuple(iterable)
|
509 |
-
r = len(pool) if r is None else r
|
510 |
-
return tuple(sample(pool, r))
|
511 |
-
|
512 |
-
|
513 |
-
def random_combination(iterable, r):
|
514 |
-
"""Return a random *r* length subsequence of the elements in *iterable*.
|
515 |
-
|
516 |
-
>>> random_combination(range(5), 3) # doctest:+SKIP
|
517 |
-
(2, 3, 4)
|
518 |
-
|
519 |
-
This equivalent to taking a random selection from
|
520 |
-
``itertools.combinations(iterable, r)``.
|
521 |
-
|
522 |
-
"""
|
523 |
-
pool = tuple(iterable)
|
524 |
-
n = len(pool)
|
525 |
-
indices = sorted(sample(range(n), r))
|
526 |
-
return tuple(pool[i] for i in indices)
|
527 |
-
|
528 |
-
|
529 |
-
def random_combination_with_replacement(iterable, r):
|
530 |
-
"""Return a random *r* length subsequence of elements in *iterable*,
|
531 |
-
allowing individual elements to be repeated.
|
532 |
-
|
533 |
-
>>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
|
534 |
-
(0, 0, 1, 2, 2)
|
535 |
-
|
536 |
-
This equivalent to taking a random selection from
|
537 |
-
``itertools.combinations_with_replacement(iterable, r)``.
|
538 |
-
|
539 |
-
"""
|
540 |
-
pool = tuple(iterable)
|
541 |
-
n = len(pool)
|
542 |
-
indices = sorted(randrange(n) for i in range(r))
|
543 |
-
return tuple(pool[i] for i in indices)
|
544 |
-
|
545 |
-
|
546 |
-
def nth_combination(iterable, r, index):
|
547 |
-
"""Equivalent to ``list(combinations(iterable, r))[index]``.
|
548 |
-
|
549 |
-
The subsequences of *iterable* that are of length *r* can be ordered
|
550 |
-
lexicographically. :func:`nth_combination` computes the subsequence at
|
551 |
-
sort position *index* directly, without computing the previous
|
552 |
-
subsequences.
|
553 |
-
|
554 |
-
>>> nth_combination(range(5), 3, 5)
|
555 |
-
(0, 3, 4)
|
556 |
-
|
557 |
-
``ValueError`` will be raised If *r* is negative or greater than the length
|
558 |
-
of *iterable*.
|
559 |
-
``IndexError`` will be raised if the given *index* is invalid.
|
560 |
-
"""
|
561 |
-
pool = tuple(iterable)
|
562 |
-
n = len(pool)
|
563 |
-
if (r < 0) or (r > n):
|
564 |
-
raise ValueError
|
565 |
-
|
566 |
-
c = 1
|
567 |
-
k = min(r, n - r)
|
568 |
-
for i in range(1, k + 1):
|
569 |
-
c = c * (n - k + i) // i
|
570 |
-
|
571 |
-
if index < 0:
|
572 |
-
index += c
|
573 |
-
|
574 |
-
if (index < 0) or (index >= c):
|
575 |
-
raise IndexError
|
576 |
-
|
577 |
-
result = []
|
578 |
-
while r:
|
579 |
-
c, n, r = c * r // n, n - 1, r - 1
|
580 |
-
while index >= c:
|
581 |
-
index -= c
|
582 |
-
c, n = c * (n - r) // n, n - 1
|
583 |
-
result.append(pool[-1 - n])
|
584 |
-
|
585 |
-
return tuple(result)
|
586 |
-
|
587 |
-
|
588 |
-
def prepend(value, iterator):
|
589 |
-
"""Yield *value*, followed by the elements in *iterator*.
|
590 |
-
|
591 |
-
>>> value = '0'
|
592 |
-
>>> iterator = ['1', '2', '3']
|
593 |
-
>>> list(prepend(value, iterator))
|
594 |
-
['0', '1', '2', '3']
|
595 |
-
|
596 |
-
To prepend multiple values, see :func:`itertools.chain`
|
597 |
-
or :func:`value_chain`.
|
598 |
-
|
599 |
-
"""
|
600 |
-
return chain([value], iterator)
|
601 |
-
|
602 |
-
|
603 |
-
def convolve(signal, kernel):
|
604 |
-
"""Convolve the iterable *signal* with the iterable *kernel*.
|
605 |
-
|
606 |
-
>>> signal = (1, 2, 3, 4, 5)
|
607 |
-
>>> kernel = [3, 2, 1]
|
608 |
-
>>> list(convolve(signal, kernel))
|
609 |
-
[3, 8, 14, 20, 26, 14, 5]
|
610 |
-
|
611 |
-
Note: the input arguments are not interchangeable, as the *kernel*
|
612 |
-
is immediately consumed and stored.
|
613 |
-
|
614 |
-
"""
|
615 |
-
kernel = tuple(kernel)[::-1]
|
616 |
-
n = len(kernel)
|
617 |
-
window = deque([0], maxlen=n) * n
|
618 |
-
for x in chain(signal, repeat(0, n - 1)):
|
619 |
-
window.append(x)
|
620 |
-
yield sum(map(operator.mul, kernel, window))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/testing.py
DELETED
@@ -1,331 +0,0 @@
|
|
1 |
-
# testing.py
|
2 |
-
|
3 |
-
from contextlib import contextmanager
|
4 |
-
import typing
|
5 |
-
|
6 |
-
from .core import (
|
7 |
-
ParserElement,
|
8 |
-
ParseException,
|
9 |
-
Keyword,
|
10 |
-
__diag__,
|
11 |
-
__compat__,
|
12 |
-
)
|
13 |
-
|
14 |
-
|
15 |
-
class pyparsing_test:
|
16 |
-
"""
|
17 |
-
namespace class for classes useful in writing unit tests
|
18 |
-
"""
|
19 |
-
|
20 |
-
class reset_pyparsing_context:
|
21 |
-
"""
|
22 |
-
Context manager to be used when writing unit tests that modify pyparsing config values:
|
23 |
-
- packrat parsing
|
24 |
-
- bounded recursion parsing
|
25 |
-
- default whitespace characters.
|
26 |
-
- default keyword characters
|
27 |
-
- literal string auto-conversion class
|
28 |
-
- __diag__ settings
|
29 |
-
|
30 |
-
Example::
|
31 |
-
|
32 |
-
with reset_pyparsing_context():
|
33 |
-
# test that literals used to construct a grammar are automatically suppressed
|
34 |
-
ParserElement.inlineLiteralsUsing(Suppress)
|
35 |
-
|
36 |
-
term = Word(alphas) | Word(nums)
|
37 |
-
group = Group('(' + term[...] + ')')
|
38 |
-
|
39 |
-
# assert that the '()' characters are not included in the parsed tokens
|
40 |
-
self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def'])
|
41 |
-
|
42 |
-
# after exiting context manager, literals are converted to Literal expressions again
|
43 |
-
"""
|
44 |
-
|
45 |
-
def __init__(self):
|
46 |
-
self._save_context = {}
|
47 |
-
|
48 |
-
def save(self):
|
49 |
-
self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
|
50 |
-
self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
|
51 |
-
|
52 |
-
self._save_context[
|
53 |
-
"literal_string_class"
|
54 |
-
] = ParserElement._literalStringClass
|
55 |
-
|
56 |
-
self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace
|
57 |
-
|
58 |
-
self._save_context["packrat_enabled"] = ParserElement._packratEnabled
|
59 |
-
if ParserElement._packratEnabled:
|
60 |
-
self._save_context[
|
61 |
-
"packrat_cache_size"
|
62 |
-
] = ParserElement.packrat_cache.size
|
63 |
-
else:
|
64 |
-
self._save_context["packrat_cache_size"] = None
|
65 |
-
self._save_context["packrat_parse"] = ParserElement._parse
|
66 |
-
self._save_context[
|
67 |
-
"recursion_enabled"
|
68 |
-
] = ParserElement._left_recursion_enabled
|
69 |
-
|
70 |
-
self._save_context["__diag__"] = {
|
71 |
-
name: getattr(__diag__, name) for name in __diag__._all_names
|
72 |
-
}
|
73 |
-
|
74 |
-
self._save_context["__compat__"] = {
|
75 |
-
"collect_all_And_tokens": __compat__.collect_all_And_tokens
|
76 |
-
}
|
77 |
-
|
78 |
-
return self
|
79 |
-
|
80 |
-
def restore(self):
|
81 |
-
# reset pyparsing global state
|
82 |
-
if (
|
83 |
-
ParserElement.DEFAULT_WHITE_CHARS
|
84 |
-
!= self._save_context["default_whitespace"]
|
85 |
-
):
|
86 |
-
ParserElement.set_default_whitespace_chars(
|
87 |
-
self._save_context["default_whitespace"]
|
88 |
-
)
|
89 |
-
|
90 |
-
ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"]
|
91 |
-
|
92 |
-
Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
|
93 |
-
ParserElement.inlineLiteralsUsing(
|
94 |
-
self._save_context["literal_string_class"]
|
95 |
-
)
|
96 |
-
|
97 |
-
for name, value in self._save_context["__diag__"].items():
|
98 |
-
(__diag__.enable if value else __diag__.disable)(name)
|
99 |
-
|
100 |
-
ParserElement._packratEnabled = False
|
101 |
-
if self._save_context["packrat_enabled"]:
|
102 |
-
ParserElement.enable_packrat(self._save_context["packrat_cache_size"])
|
103 |
-
else:
|
104 |
-
ParserElement._parse = self._save_context["packrat_parse"]
|
105 |
-
ParserElement._left_recursion_enabled = self._save_context[
|
106 |
-
"recursion_enabled"
|
107 |
-
]
|
108 |
-
|
109 |
-
__compat__.collect_all_And_tokens = self._save_context["__compat__"]
|
110 |
-
|
111 |
-
return self
|
112 |
-
|
113 |
-
def copy(self):
|
114 |
-
ret = type(self)()
|
115 |
-
ret._save_context.update(self._save_context)
|
116 |
-
return ret
|
117 |
-
|
118 |
-
def __enter__(self):
|
119 |
-
return self.save()
|
120 |
-
|
121 |
-
def __exit__(self, *args):
|
122 |
-
self.restore()
|
123 |
-
|
124 |
-
class TestParseResultsAsserts:
|
125 |
-
"""
|
126 |
-
A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
|
127 |
-
"""
|
128 |
-
|
129 |
-
def assertParseResultsEquals(
|
130 |
-
self, result, expected_list=None, expected_dict=None, msg=None
|
131 |
-
):
|
132 |
-
"""
|
133 |
-
Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``,
|
134 |
-
and compare any defined results names with an optional ``expected_dict``.
|
135 |
-
"""
|
136 |
-
if expected_list is not None:
|
137 |
-
self.assertEqual(expected_list, result.as_list(), msg=msg)
|
138 |
-
if expected_dict is not None:
|
139 |
-
self.assertEqual(expected_dict, result.as_dict(), msg=msg)
|
140 |
-
|
141 |
-
def assertParseAndCheckList(
|
142 |
-
self, expr, test_string, expected_list, msg=None, verbose=True
|
143 |
-
):
|
144 |
-
"""
|
145 |
-
Convenience wrapper assert to test a parser element and input string, and assert that
|
146 |
-
the resulting ``ParseResults.asList()`` is equal to the ``expected_list``.
|
147 |
-
"""
|
148 |
-
result = expr.parse_string(test_string, parse_all=True)
|
149 |
-
if verbose:
|
150 |
-
print(result.dump())
|
151 |
-
else:
|
152 |
-
print(result.as_list())
|
153 |
-
self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
|
154 |
-
|
155 |
-
def assertParseAndCheckDict(
|
156 |
-
self, expr, test_string, expected_dict, msg=None, verbose=True
|
157 |
-
):
|
158 |
-
"""
|
159 |
-
Convenience wrapper assert to test a parser element and input string, and assert that
|
160 |
-
the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``.
|
161 |
-
"""
|
162 |
-
result = expr.parse_string(test_string, parseAll=True)
|
163 |
-
if verbose:
|
164 |
-
print(result.dump())
|
165 |
-
else:
|
166 |
-
print(result.as_list())
|
167 |
-
self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
|
168 |
-
|
169 |
-
def assertRunTestResults(
|
170 |
-
self, run_tests_report, expected_parse_results=None, msg=None
|
171 |
-
):
|
172 |
-
"""
|
173 |
-
Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of
|
174 |
-
list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped
|
175 |
-
with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``.
|
176 |
-
Finally, asserts that the overall ``runTests()`` success value is ``True``.
|
177 |
-
|
178 |
-
:param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
|
179 |
-
:param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
|
180 |
-
"""
|
181 |
-
run_test_success, run_test_results = run_tests_report
|
182 |
-
|
183 |
-
if expected_parse_results is not None:
|
184 |
-
merged = [
|
185 |
-
(*rpt, expected)
|
186 |
-
for rpt, expected in zip(run_test_results, expected_parse_results)
|
187 |
-
]
|
188 |
-
for test_string, result, expected in merged:
|
189 |
-
# expected should be a tuple containing a list and/or a dict or an exception,
|
190 |
-
# and optional failure message string
|
191 |
-
# an empty tuple will skip any result validation
|
192 |
-
fail_msg = next(
|
193 |
-
(exp for exp in expected if isinstance(exp, str)), None
|
194 |
-
)
|
195 |
-
expected_exception = next(
|
196 |
-
(
|
197 |
-
exp
|
198 |
-
for exp in expected
|
199 |
-
if isinstance(exp, type) and issubclass(exp, Exception)
|
200 |
-
),
|
201 |
-
None,
|
202 |
-
)
|
203 |
-
if expected_exception is not None:
|
204 |
-
with self.assertRaises(
|
205 |
-
expected_exception=expected_exception, msg=fail_msg or msg
|
206 |
-
):
|
207 |
-
if isinstance(result, Exception):
|
208 |
-
raise result
|
209 |
-
else:
|
210 |
-
expected_list = next(
|
211 |
-
(exp for exp in expected if isinstance(exp, list)), None
|
212 |
-
)
|
213 |
-
expected_dict = next(
|
214 |
-
(exp for exp in expected if isinstance(exp, dict)), None
|
215 |
-
)
|
216 |
-
if (expected_list, expected_dict) != (None, None):
|
217 |
-
self.assertParseResultsEquals(
|
218 |
-
result,
|
219 |
-
expected_list=expected_list,
|
220 |
-
expected_dict=expected_dict,
|
221 |
-
msg=fail_msg or msg,
|
222 |
-
)
|
223 |
-
else:
|
224 |
-
# warning here maybe?
|
225 |
-
print("no validation for {!r}".format(test_string))
|
226 |
-
|
227 |
-
# do this last, in case some specific test results can be reported instead
|
228 |
-
self.assertTrue(
|
229 |
-
run_test_success, msg=msg if msg is not None else "failed runTests"
|
230 |
-
)
|
231 |
-
|
232 |
-
@contextmanager
|
233 |
-
def assertRaisesParseException(self, exc_type=ParseException, msg=None):
|
234 |
-
with self.assertRaises(exc_type, msg=msg):
|
235 |
-
yield
|
236 |
-
|
237 |
-
@staticmethod
|
238 |
-
def with_line_numbers(
|
239 |
-
s: str,
|
240 |
-
start_line: typing.Optional[int] = None,
|
241 |
-
end_line: typing.Optional[int] = None,
|
242 |
-
expand_tabs: bool = True,
|
243 |
-
eol_mark: str = "|",
|
244 |
-
mark_spaces: typing.Optional[str] = None,
|
245 |
-
mark_control: typing.Optional[str] = None,
|
246 |
-
) -> str:
|
247 |
-
"""
|
248 |
-
Helpful method for debugging a parser - prints a string with line and column numbers.
|
249 |
-
(Line and column numbers are 1-based.)
|
250 |
-
|
251 |
-
:param s: tuple(bool, str - string to be printed with line and column numbers
|
252 |
-
:param start_line: int - (optional) starting line number in s to print (default=1)
|
253 |
-
:param end_line: int - (optional) ending line number in s to print (default=len(s))
|
254 |
-
:param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default
|
255 |
-
:param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|")
|
256 |
-
:param mark_spaces: str - (optional) special character to display in place of spaces
|
257 |
-
:param mark_control: str - (optional) convert non-printing control characters to a placeholding
|
258 |
-
character; valid values:
|
259 |
-
- "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊"
|
260 |
-
- any single character string - replace control characters with given string
|
261 |
-
- None (default) - string is displayed as-is
|
262 |
-
|
263 |
-
:return: str - input string with leading line numbers and column number headers
|
264 |
-
"""
|
265 |
-
if expand_tabs:
|
266 |
-
s = s.expandtabs()
|
267 |
-
if mark_control is not None:
|
268 |
-
if mark_control == "unicode":
|
269 |
-
tbl = str.maketrans(
|
270 |
-
{c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))}
|
271 |
-
| {127: 0x2421}
|
272 |
-
)
|
273 |
-
eol_mark = ""
|
274 |
-
else:
|
275 |
-
tbl = str.maketrans(
|
276 |
-
{c: mark_control for c in list(range(0, 32)) + [127]}
|
277 |
-
)
|
278 |
-
s = s.translate(tbl)
|
279 |
-
if mark_spaces is not None and mark_spaces != " ":
|
280 |
-
if mark_spaces == "unicode":
|
281 |
-
tbl = str.maketrans({9: 0x2409, 32: 0x2423})
|
282 |
-
s = s.translate(tbl)
|
283 |
-
else:
|
284 |
-
s = s.replace(" ", mark_spaces)
|
285 |
-
if start_line is None:
|
286 |
-
start_line = 1
|
287 |
-
if end_line is None:
|
288 |
-
end_line = len(s)
|
289 |
-
end_line = min(end_line, len(s))
|
290 |
-
start_line = min(max(1, start_line), end_line)
|
291 |
-
|
292 |
-
if mark_control != "unicode":
|
293 |
-
s_lines = s.splitlines()[start_line - 1 : end_line]
|
294 |
-
else:
|
295 |
-
s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]]
|
296 |
-
if not s_lines:
|
297 |
-
return ""
|
298 |
-
|
299 |
-
lineno_width = len(str(end_line))
|
300 |
-
max_line_len = max(len(line) for line in s_lines)
|
301 |
-
lead = " " * (lineno_width + 1)
|
302 |
-
if max_line_len >= 99:
|
303 |
-
header0 = (
|
304 |
-
lead
|
305 |
-
+ "".join(
|
306 |
-
"{}{}".format(" " * 99, (i + 1) % 100)
|
307 |
-
for i in range(max(max_line_len // 100, 1))
|
308 |
-
)
|
309 |
-
+ "\n"
|
310 |
-
)
|
311 |
-
else:
|
312 |
-
header0 = ""
|
313 |
-
header1 = (
|
314 |
-
header0
|
315 |
-
+ lead
|
316 |
-
+ "".join(
|
317 |
-
" {}".format((i + 1) % 10)
|
318 |
-
for i in range(-(-max_line_len // 10))
|
319 |
-
)
|
320 |
-
+ "\n"
|
321 |
-
)
|
322 |
-
header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n"
|
323 |
-
return (
|
324 |
-
header1
|
325 |
-
+ header2
|
326 |
-
+ "\n".join(
|
327 |
-
"{:{}d}:{}{}".format(i, lineno_width, line, eol_mark)
|
328 |
-
for i, line in enumerate(s_lines, start=start_line)
|
329 |
-
)
|
330 |
-
+ "\n"
|
331 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/transforms/__init__.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
from fvcore.transforms.transform import Transform, TransformList # order them first
|
3 |
-
from fvcore.transforms.transform import *
|
4 |
-
from .transform import *
|
5 |
-
from .augmentation import *
|
6 |
-
from .augmentation_impl import *
|
7 |
-
|
8 |
-
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
9 |
-
|
10 |
-
|
11 |
-
from detectron2.utils.env import fixup_module_metadata
|
12 |
-
|
13 |
-
fixup_module_metadata(__name__, globals(), __all__)
|
14 |
-
del fixup_module_metadata
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/structures/image_list.py
DELETED
@@ -1,110 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
from __future__ import division
|
3 |
-
from typing import Any, List, Tuple
|
4 |
-
import torch
|
5 |
-
from torch import device
|
6 |
-
from torch.nn import functional as F
|
7 |
-
|
8 |
-
from detectron2.layers.wrappers import shapes_to_tensor
|
9 |
-
|
10 |
-
|
11 |
-
class ImageList(object):
|
12 |
-
"""
|
13 |
-
Structure that holds a list of images (of possibly
|
14 |
-
varying sizes) as a single tensor.
|
15 |
-
This works by padding the images to the same size.
|
16 |
-
The original sizes of each image is stored in `image_sizes`.
|
17 |
-
|
18 |
-
Attributes:
|
19 |
-
image_sizes (list[tuple[int, int]]): each tuple is (h, w).
|
20 |
-
During tracing, it becomes list[Tensor] instead.
|
21 |
-
"""
|
22 |
-
|
23 |
-
def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):
|
24 |
-
"""
|
25 |
-
Arguments:
|
26 |
-
tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1
|
27 |
-
image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can
|
28 |
-
be smaller than (H, W) due to padding.
|
29 |
-
"""
|
30 |
-
self.tensor = tensor
|
31 |
-
self.image_sizes = image_sizes
|
32 |
-
|
33 |
-
def __len__(self) -> int:
|
34 |
-
return len(self.image_sizes)
|
35 |
-
|
36 |
-
def __getitem__(self, idx) -> torch.Tensor:
|
37 |
-
"""
|
38 |
-
Access the individual image in its original size.
|
39 |
-
|
40 |
-
Args:
|
41 |
-
idx: int or slice
|
42 |
-
|
43 |
-
Returns:
|
44 |
-
Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1
|
45 |
-
"""
|
46 |
-
size = self.image_sizes[idx]
|
47 |
-
return self.tensor[idx, ..., : size[0], : size[1]]
|
48 |
-
|
49 |
-
@torch.jit.unused
|
50 |
-
def to(self, *args: Any, **kwargs: Any) -> "ImageList":
|
51 |
-
cast_tensor = self.tensor.to(*args, **kwargs)
|
52 |
-
return ImageList(cast_tensor, self.image_sizes)
|
53 |
-
|
54 |
-
@property
|
55 |
-
def device(self) -> device:
|
56 |
-
return self.tensor.device
|
57 |
-
|
58 |
-
@staticmethod
|
59 |
-
def from_tensors(
|
60 |
-
tensors: List[torch.Tensor], size_divisibility: int = 0, pad_value: float = 0.0
|
61 |
-
) -> "ImageList":
|
62 |
-
"""
|
63 |
-
Args:
|
64 |
-
tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or
|
65 |
-
(C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded
|
66 |
-
to the same shape with `pad_value`.
|
67 |
-
size_divisibility (int): If `size_divisibility > 0`, add padding to ensure
|
68 |
-
the common height and width is divisible by `size_divisibility`.
|
69 |
-
This depends on the model and many models need a divisibility of 32.
|
70 |
-
pad_value (float): value to pad
|
71 |
-
|
72 |
-
Returns:
|
73 |
-
an `ImageList`.
|
74 |
-
"""
|
75 |
-
assert len(tensors) > 0
|
76 |
-
assert isinstance(tensors, (tuple, list))
|
77 |
-
for t in tensors:
|
78 |
-
assert isinstance(t, torch.Tensor), type(t)
|
79 |
-
assert t.shape[:-2] == tensors[0].shape[:-2], t.shape
|
80 |
-
|
81 |
-
image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors]
|
82 |
-
image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes]
|
83 |
-
max_size = torch.stack(image_sizes_tensor).max(0).values
|
84 |
-
|
85 |
-
if size_divisibility > 1:
|
86 |
-
stride = size_divisibility
|
87 |
-
# the last two dims are H,W, both subject to divisibility requirement
|
88 |
-
max_size = (max_size + (stride - 1)).div(stride, rounding_mode="floor") * stride
|
89 |
-
|
90 |
-
# handle weirdness of scripting and tracing ...
|
91 |
-
if torch.jit.is_scripting():
|
92 |
-
max_size: List[int] = max_size.to(dtype=torch.long).tolist()
|
93 |
-
else:
|
94 |
-
if torch.jit.is_tracing():
|
95 |
-
image_sizes = image_sizes_tensor
|
96 |
-
|
97 |
-
if len(tensors) == 1:
|
98 |
-
# This seems slightly (2%) faster.
|
99 |
-
# TODO: check whether it's faster for multiple images as well
|
100 |
-
image_size = image_sizes[0]
|
101 |
-
padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]]
|
102 |
-
batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0)
|
103 |
-
else:
|
104 |
-
# max_size can be a tensor in tracing mode, therefore convert to list
|
105 |
-
batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size)
|
106 |
-
batched_imgs = tensors[0].new_full(batch_shape, pad_value)
|
107 |
-
for img, pad_img in zip(tensors, batched_imgs):
|
108 |
-
pad_img[..., : img.shape[-2], : img.shape[-1]].copy_(img)
|
109 |
-
|
110 |
-
return ImageList(batched_imgs.contiguous(), image_sizes)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BatuhanYilmaz/Youtube-Transcriber/utils.py
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
import textwrap
|
2 |
-
import unicodedata
|
3 |
-
import re
|
4 |
-
|
5 |
-
import zlib
|
6 |
-
from typing import Iterator, TextIO
|
7 |
-
|
8 |
-
|
9 |
-
def exact_div(x, y):
|
10 |
-
assert x % y == 0
|
11 |
-
return x // y
|
12 |
-
|
13 |
-
|
14 |
-
def str2bool(string):
|
15 |
-
str2val = {"True": True, "False": False}
|
16 |
-
if string in str2val:
|
17 |
-
return str2val[string]
|
18 |
-
else:
|
19 |
-
raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
|
20 |
-
|
21 |
-
|
22 |
-
def optional_int(string):
|
23 |
-
return None if string == "None" else int(string)
|
24 |
-
|
25 |
-
|
26 |
-
def optional_float(string):
|
27 |
-
return None if string == "None" else float(string)
|
28 |
-
|
29 |
-
|
30 |
-
def compression_ratio(text) -> float:
|
31 |
-
return len(text) / len(zlib.compress(text.encode("utf-8")))
|
32 |
-
|
33 |
-
|
34 |
-
def format_timestamp(seconds: float, always_include_hours: bool = False, fractionalSeperator: str = '.'):
|
35 |
-
assert seconds >= 0, "non-negative timestamp expected"
|
36 |
-
milliseconds = round(seconds * 1000.0)
|
37 |
-
|
38 |
-
hours = milliseconds // 3_600_000
|
39 |
-
milliseconds -= hours * 3_600_000
|
40 |
-
|
41 |
-
minutes = milliseconds // 60_000
|
42 |
-
milliseconds -= minutes * 60_000
|
43 |
-
|
44 |
-
seconds = milliseconds // 1_000
|
45 |
-
milliseconds -= seconds * 1_000
|
46 |
-
|
47 |
-
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
|
48 |
-
return f"{hours_marker}{minutes:02d}:{seconds:02d}{fractionalSeperator}{milliseconds:03d}"
|
49 |
-
|
50 |
-
|
51 |
-
def write_txt(transcript: Iterator[dict], file: TextIO):
|
52 |
-
for segment in transcript:
|
53 |
-
print(segment['text'].strip(), file=file, flush=True)
|
54 |
-
|
55 |
-
|
56 |
-
def write_vtt(transcript: Iterator[dict], file: TextIO, maxLineWidth=None):
|
57 |
-
print("WEBVTT\n", file=file)
|
58 |
-
for segment in transcript:
|
59 |
-
text = processText(segment['text'], maxLineWidth).replace('-->', '->')
|
60 |
-
|
61 |
-
print(
|
62 |
-
f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
|
63 |
-
f"{text}\n",
|
64 |
-
file=file,
|
65 |
-
flush=True,
|
66 |
-
)
|
67 |
-
|
68 |
-
|
69 |
-
def write_srt(transcript: Iterator[dict], file: TextIO, maxLineWidth=None):
|
70 |
-
"""
|
71 |
-
Write a transcript to a file in SRT format.
|
72 |
-
Example usage:
|
73 |
-
from pathlib import Path
|
74 |
-
from whisper.utils import write_srt
|
75 |
-
result = transcribe(model, audio_path, temperature=temperature, **args)
|
76 |
-
# save SRT
|
77 |
-
audio_basename = Path(audio_path).stem
|
78 |
-
with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt:
|
79 |
-
write_srt(result["segments"], file=srt)
|
80 |
-
"""
|
81 |
-
for i, segment in enumerate(transcript, start=1):
|
82 |
-
text = processText(segment['text'].strip(), maxLineWidth).replace('-->', '->')
|
83 |
-
|
84 |
-
# write srt lines
|
85 |
-
print(
|
86 |
-
f"{i}\n"
|
87 |
-
f"{format_timestamp(segment['start'], always_include_hours=True, fractionalSeperator=',')} --> "
|
88 |
-
f"{format_timestamp(segment['end'], always_include_hours=True, fractionalSeperator=',')}\n"
|
89 |
-
f"{text}\n",
|
90 |
-
file=file,
|
91 |
-
flush=True,
|
92 |
-
)
|
93 |
-
|
94 |
-
def processText(text: str, maxLineWidth=None):
|
95 |
-
if (maxLineWidth is None or maxLineWidth < 0):
|
96 |
-
return text
|
97 |
-
|
98 |
-
lines = textwrap.wrap(text, width=maxLineWidth, tabsize=4)
|
99 |
-
return '\n'.join(lines)
|
100 |
-
|
101 |
-
def slugify(value, allow_unicode=False):
|
102 |
-
"""
|
103 |
-
Taken from https://github.com/django/django/blob/master/django/utils/text.py
|
104 |
-
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
|
105 |
-
dashes to single dashes. Remove characters that aren't alphanumerics,
|
106 |
-
underscores, or hyphens. Convert to lowercase. Also strip leading and
|
107 |
-
trailing whitespace, dashes, and underscores.
|
108 |
-
"""
|
109 |
-
value = str(value)
|
110 |
-
if allow_unicode:
|
111 |
-
value = unicodedata.normalize('NFKC', value)
|
112 |
-
else:
|
113 |
-
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
|
114 |
-
value = re.sub(r'[^\w\s-]', '', value.lower())
|
115 |
-
return re.sub(r'[-\s]+', '-', value).strip('-_')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/waiter.py
DELETED
@@ -1,184 +0,0 @@
|
|
1 |
-
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# http://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
import os
|
14 |
-
|
15 |
-
from botocore import xform_name
|
16 |
-
from botocore.compat import OrderedDict
|
17 |
-
from botocore.docs.bcdoc.restdoc import DocumentStructure
|
18 |
-
from botocore.docs.method import document_model_driven_method
|
19 |
-
from botocore.docs.utils import DocumentedShape
|
20 |
-
from botocore.utils import get_service_module_name
|
21 |
-
|
22 |
-
|
23 |
-
class WaiterDocumenter:
|
24 |
-
def __init__(self, client, service_waiter_model, root_docs_path):
|
25 |
-
self._client = client
|
26 |
-
self._client_class_name = self._client.__class__.__name__
|
27 |
-
self._service_name = self._client.meta.service_model.service_name
|
28 |
-
self._service_waiter_model = service_waiter_model
|
29 |
-
self._root_docs_path = root_docs_path
|
30 |
-
self._USER_GUIDE_LINK = (
|
31 |
-
'https://boto3.amazonaws.com/'
|
32 |
-
'v1/documentation/api/latest/guide/clients.html#waiters'
|
33 |
-
)
|
34 |
-
|
35 |
-
def document_waiters(self, section):
|
36 |
-
"""Documents the various waiters for a service.
|
37 |
-
|
38 |
-
:param section: The section to write to.
|
39 |
-
"""
|
40 |
-
section.style.h2('Waiters')
|
41 |
-
self._add_overview(section)
|
42 |
-
section.style.new_line()
|
43 |
-
section.writeln('The available waiters are:')
|
44 |
-
section.style.toctree()
|
45 |
-
for waiter_name in self._service_waiter_model.waiter_names:
|
46 |
-
section.style.tocitem(f'{self._service_name}/waiter/{waiter_name}')
|
47 |
-
# Create a new DocumentStructure for each waiter and add contents.
|
48 |
-
waiter_doc_structure = DocumentStructure(
|
49 |
-
waiter_name, target='html'
|
50 |
-
)
|
51 |
-
self._add_single_waiter(waiter_doc_structure, waiter_name)
|
52 |
-
# Write waiters in individual/nested files.
|
53 |
-
# Path: <root>/reference/services/<service>/waiter/<waiter_name>.rst
|
54 |
-
waiter_dir_path = os.path.join(
|
55 |
-
self._root_docs_path, self._service_name, 'waiter'
|
56 |
-
)
|
57 |
-
waiter_doc_structure.write_to_file(waiter_dir_path, waiter_name)
|
58 |
-
|
59 |
-
def _add_single_waiter(self, section, waiter_name):
|
60 |
-
breadcrumb_section = section.add_new_section('breadcrumb')
|
61 |
-
breadcrumb_section.style.ref(
|
62 |
-
self._client_class_name, f'../../{self._service_name}'
|
63 |
-
)
|
64 |
-
breadcrumb_section.write(f' / Waiter / {waiter_name}')
|
65 |
-
section.add_title_section(waiter_name)
|
66 |
-
waiter_section = section.add_new_section(waiter_name)
|
67 |
-
waiter_section.style.start_sphinx_py_class(
|
68 |
-
class_name=f"{self._client_class_name}.Waiter.{waiter_name}"
|
69 |
-
)
|
70 |
-
|
71 |
-
# Add example on how to instantiate waiter.
|
72 |
-
waiter_section.style.start_codeblock()
|
73 |
-
waiter_section.style.new_line()
|
74 |
-
waiter_section.write(
|
75 |
-
'waiter = client.get_waiter(\'%s\')' % xform_name(waiter_name)
|
76 |
-
)
|
77 |
-
waiter_section.style.end_codeblock()
|
78 |
-
|
79 |
-
# Add information on the wait() method
|
80 |
-
waiter_section.style.new_line()
|
81 |
-
document_wait_method(
|
82 |
-
section=waiter_section,
|
83 |
-
waiter_name=waiter_name,
|
84 |
-
event_emitter=self._client.meta.events,
|
85 |
-
service_model=self._client.meta.service_model,
|
86 |
-
service_waiter_model=self._service_waiter_model,
|
87 |
-
)
|
88 |
-
|
89 |
-
def _add_overview(self, section):
|
90 |
-
section.style.new_line()
|
91 |
-
section.write(
|
92 |
-
'Waiters are available on a client instance '
|
93 |
-
'via the ``get_waiter`` method. For more detailed instructions '
|
94 |
-
'and examples on the usage or waiters, see the '
|
95 |
-
'waiters '
|
96 |
-
)
|
97 |
-
section.style.external_link(
|
98 |
-
title='user guide',
|
99 |
-
link=self._USER_GUIDE_LINK,
|
100 |
-
)
|
101 |
-
section.write('.')
|
102 |
-
section.style.new_line()
|
103 |
-
|
104 |
-
|
105 |
-
def document_wait_method(
|
106 |
-
section,
|
107 |
-
waiter_name,
|
108 |
-
event_emitter,
|
109 |
-
service_model,
|
110 |
-
service_waiter_model,
|
111 |
-
include_signature=True,
|
112 |
-
):
|
113 |
-
"""Documents a the wait method of a waiter
|
114 |
-
|
115 |
-
:param section: The section to write to
|
116 |
-
|
117 |
-
:param waiter_name: The name of the waiter
|
118 |
-
|
119 |
-
:param event_emitter: The event emitter to use to emit events
|
120 |
-
|
121 |
-
:param service_model: The service model
|
122 |
-
|
123 |
-
:param service_waiter_model: The waiter model associated to the service
|
124 |
-
|
125 |
-
:param include_signature: Whether or not to include the signature.
|
126 |
-
It is useful for generating docstrings.
|
127 |
-
"""
|
128 |
-
waiter_model = service_waiter_model.get_waiter(waiter_name)
|
129 |
-
operation_model = service_model.operation_model(waiter_model.operation)
|
130 |
-
|
131 |
-
waiter_config_members = OrderedDict()
|
132 |
-
|
133 |
-
waiter_config_members['Delay'] = DocumentedShape(
|
134 |
-
name='Delay',
|
135 |
-
type_name='integer',
|
136 |
-
documentation=(
|
137 |
-
'<p>The amount of time in seconds to wait between '
|
138 |
-
'attempts. Default: {}</p>'.format(waiter_model.delay)
|
139 |
-
),
|
140 |
-
)
|
141 |
-
|
142 |
-
waiter_config_members['MaxAttempts'] = DocumentedShape(
|
143 |
-
name='MaxAttempts',
|
144 |
-
type_name='integer',
|
145 |
-
documentation=(
|
146 |
-
'<p>The maximum number of attempts to be made. '
|
147 |
-
'Default: {}</p>'.format(waiter_model.max_attempts)
|
148 |
-
),
|
149 |
-
)
|
150 |
-
|
151 |
-
botocore_waiter_params = [
|
152 |
-
DocumentedShape(
|
153 |
-
name='WaiterConfig',
|
154 |
-
type_name='structure',
|
155 |
-
documentation=(
|
156 |
-
'<p>A dictionary that provides parameters to control '
|
157 |
-
'waiting behavior.</p>'
|
158 |
-
),
|
159 |
-
members=waiter_config_members,
|
160 |
-
)
|
161 |
-
]
|
162 |
-
|
163 |
-
wait_description = (
|
164 |
-
'Polls :py:meth:`{}.Client.{}` every {} '
|
165 |
-
'seconds until a successful state is reached. An error is '
|
166 |
-
'returned after {} failed checks.'.format(
|
167 |
-
get_service_module_name(service_model),
|
168 |
-
xform_name(waiter_model.operation),
|
169 |
-
waiter_model.delay,
|
170 |
-
waiter_model.max_attempts,
|
171 |
-
)
|
172 |
-
)
|
173 |
-
|
174 |
-
document_model_driven_method(
|
175 |
-
section,
|
176 |
-
'wait',
|
177 |
-
operation_model,
|
178 |
-
event_emitter=event_emitter,
|
179 |
-
method_description=wait_description,
|
180 |
-
example_prefix='waiter.wait',
|
181 |
-
include_input=botocore_waiter_params,
|
182 |
-
document_output=False,
|
183 |
-
include_signature=include_signature,
|
184 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/euckrprober.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
######################## BEGIN LICENSE BLOCK ########################
|
2 |
-
# The Original Code is mozilla.org code.
|
3 |
-
#
|
4 |
-
# The Initial Developer of the Original Code is
|
5 |
-
# Netscape Communications Corporation.
|
6 |
-
# Portions created by the Initial Developer are Copyright (C) 1998
|
7 |
-
# the Initial Developer. All Rights Reserved.
|
8 |
-
#
|
9 |
-
# Contributor(s):
|
10 |
-
# Mark Pilgrim - port to Python
|
11 |
-
#
|
12 |
-
# This library is free software; you can redistribute it and/or
|
13 |
-
# modify it under the terms of the GNU Lesser General Public
|
14 |
-
# License as published by the Free Software Foundation; either
|
15 |
-
# version 2.1 of the License, or (at your option) any later version.
|
16 |
-
#
|
17 |
-
# This library is distributed in the hope that it will be useful,
|
18 |
-
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
19 |
-
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
20 |
-
# Lesser General Public License for more details.
|
21 |
-
#
|
22 |
-
# You should have received a copy of the GNU Lesser General Public
|
23 |
-
# License along with this library; if not, write to the Free Software
|
24 |
-
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
25 |
-
# 02110-1301 USA
|
26 |
-
######################### END LICENSE BLOCK #########################
|
27 |
-
|
28 |
-
from .chardistribution import EUCKRDistributionAnalysis
|
29 |
-
from .codingstatemachine import CodingStateMachine
|
30 |
-
from .mbcharsetprober import MultiByteCharSetProber
|
31 |
-
from .mbcssm import EUCKR_SM_MODEL
|
32 |
-
|
33 |
-
|
34 |
-
class EUCKRProber(MultiByteCharSetProber):
|
35 |
-
def __init__(self) -> None:
|
36 |
-
super().__init__()
|
37 |
-
self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL)
|
38 |
-
self.distribution_analyzer = EUCKRDistributionAnalysis()
|
39 |
-
self.reset()
|
40 |
-
|
41 |
-
@property
|
42 |
-
def charset_name(self) -> str:
|
43 |
-
return "EUC-KR"
|
44 |
-
|
45 |
-
@property
|
46 |
-
def language(self) -> str:
|
47 |
-
return "Korean"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/dependencies/cub/tune/Makefile
DELETED
@@ -1,192 +0,0 @@
|
|
1 |
-
#/******************************************************************************
|
2 |
-
# * Copyright (c) 2011, Duane Merrill. All rights reserved.
|
3 |
-
# * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
-
# *
|
5 |
-
# * Redistribution and use in source and binary forms, with or without
|
6 |
-
# * modification, are permitted provided that the following conditions are met:
|
7 |
-
# * * Redistributions of source code must retain the above copyright
|
8 |
-
# * notice, this list of conditions and the following disclaimer.
|
9 |
-
# * * Redistributions in binary form must reproduce the above copyright
|
10 |
-
# * notice, this list of conditions and the following disclaimer in the
|
11 |
-
# * documentation and/or other materials provided with the distribution.
|
12 |
-
# * * Neither the name of the NVIDIA CORPORATION nor the
|
13 |
-
# * names of its contributors may be used to endorse or promote products
|
14 |
-
# * derived from this software without specific prior written permission.
|
15 |
-
# *
|
16 |
-
# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
17 |
-
# * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
18 |
-
# * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
19 |
-
# * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
20 |
-
# * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
21 |
-
# * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
22 |
-
# * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
23 |
-
# * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
24 |
-
# * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
25 |
-
# * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
26 |
-
# *
|
27 |
-
#******************************************************************************/
|
28 |
-
|
29 |
-
#-------------------------------------------------------------------------------
|
30 |
-
# Build script for project
|
31 |
-
#-------------------------------------------------------------------------------
|
32 |
-
|
33 |
-
NVCC = "$(shell which nvcc)"
|
34 |
-
NVCC_VERSION = $(strip $(shell nvcc --version | grep release | sed 's/.*release //' | sed 's/,.*//'))
|
35 |
-
|
36 |
-
# detect OS
|
37 |
-
OSUPPER = $(shell uname -s 2>/dev/null | tr [:lower:] [:upper:])
|
38 |
-
|
39 |
-
#-------------------------------------------------------------------------------
|
40 |
-
# Libs
|
41 |
-
#-------------------------------------------------------------------------------
|
42 |
-
|
43 |
-
|
44 |
-
#-------------------------------------------------------------------------------
|
45 |
-
# Includes
|
46 |
-
#-------------------------------------------------------------------------------
|
47 |
-
|
48 |
-
INC = -I. -I.. -I../test
|
49 |
-
|
50 |
-
#-------------------------------------------------------------------------------
|
51 |
-
# Libs
|
52 |
-
#-------------------------------------------------------------------------------
|
53 |
-
|
54 |
-
LIBS += -lcudart
|
55 |
-
|
56 |
-
#-------------------------------------------------------------------------------
|
57 |
-
# Defines
|
58 |
-
#-------------------------------------------------------------------------------
|
59 |
-
|
60 |
-
DEFINES =
|
61 |
-
|
62 |
-
#-------------------------------------------------------------------------------
|
63 |
-
# SM Arch
|
64 |
-
#-------------------------------------------------------------------------------
|
65 |
-
|
66 |
-
ifdef sm
|
67 |
-
SM_ARCH = $(sm)
|
68 |
-
else
|
69 |
-
SM_ARCH = 200
|
70 |
-
endif
|
71 |
-
|
72 |
-
# Only one arch per tuning binary
|
73 |
-
ifeq (350, $(findstring 350, $(SM_ARCH)))
|
74 |
-
SM_TARGETS = -arch=sm_35
|
75 |
-
SM_ARCH = 350
|
76 |
-
endif
|
77 |
-
ifeq (300, $(findstring 300, $(SM_ARCH)))
|
78 |
-
SM_TARGETS = -arch=sm_30
|
79 |
-
SM_ARCH = 300
|
80 |
-
endif
|
81 |
-
ifeq (200, $(findstring 200, $(SM_ARCH)))
|
82 |
-
SM_TARGETS = -arch=sm_20
|
83 |
-
SM_ARCH = 200
|
84 |
-
endif
|
85 |
-
ifeq (130, $(findstring 130, $(SM_ARCH)))
|
86 |
-
SM_TARGETS = -arch=sm_13
|
87 |
-
SM_ARCH = 130
|
88 |
-
endif
|
89 |
-
ifeq (110, $(findstring 110, $(SM_ARCH)))
|
90 |
-
SM_TARGETS = -arch=sm_11
|
91 |
-
SM_ARCH = 110
|
92 |
-
endif
|
93 |
-
ifeq (100, $(findstring 100, $(SM_ARCH)))
|
94 |
-
SM_TARGETS = -arch=sm_10
|
95 |
-
SM_ARCH = 100
|
96 |
-
endif
|
97 |
-
|
98 |
-
|
99 |
-
#-------------------------------------------------------------------------------
|
100 |
-
# Compiler Flags
|
101 |
-
#-------------------------------------------------------------------------------
|
102 |
-
|
103 |
-
NVCCFLAGS = -Xptxas -v -Xcudafe -\#
|
104 |
-
|
105 |
-
# Help the compiler/linker work with huge numbers of kernels on Windows
|
106 |
-
ifeq (WIN_NT, $(findstring WIN_NT, $(OSUPPER)))
|
107 |
-
NVCCFLAGS += -Xcompiler /bigobj -Xcompiler /Zm500
|
108 |
-
endif
|
109 |
-
|
110 |
-
# 32/64-bit (32-bit device pointers by default)
|
111 |
-
ifeq ($(force32), 1)
|
112 |
-
CPU_ARCH = -m32
|
113 |
-
CPU_ARCH_SUFFIX = i386
|
114 |
-
else
|
115 |
-
CPU_ARCH = -m64
|
116 |
-
CPU_ARCH_SUFFIX = x86_64
|
117 |
-
endif
|
118 |
-
|
119 |
-
# CUDA ABI enable/disable (enabled by default)
|
120 |
-
ifneq ($(abi), 0)
|
121 |
-
ABI_SUFFIX = abi
|
122 |
-
else
|
123 |
-
NVCCFLAGS += -Xptxas -abi=no
|
124 |
-
ABI_SUFFIX = noabi
|
125 |
-
endif
|
126 |
-
|
127 |
-
# NVVM/Open64 middle-end compiler (nvvm by default)
|
128 |
-
ifeq ($(open64), 1)
|
129 |
-
NVCCFLAGS += -open64
|
130 |
-
PTX_SUFFIX = open64
|
131 |
-
else
|
132 |
-
PTX_SUFFIX = nvvm
|
133 |
-
endif
|
134 |
-
|
135 |
-
# Verbose toolchain output from nvcc
|
136 |
-
ifeq ($(verbose), 1)
|
137 |
-
NVCCFLAGS += -v
|
138 |
-
endif
|
139 |
-
|
140 |
-
# Keep intermediate compilation artifacts
|
141 |
-
ifeq ($(keep), 1)
|
142 |
-
NVCCFLAGS += -keep
|
143 |
-
endif
|
144 |
-
|
145 |
-
# Data type size to compile a schmoo binary for
|
146 |
-
ifdef tunesize
|
147 |
-
TUNE_SIZE = $(tunesize)
|
148 |
-
else
|
149 |
-
TUNE_SIZE = 4
|
150 |
-
endif
|
151 |
-
|
152 |
-
|
153 |
-
SUFFIX = $(TUNE_SIZE)B_sm$(SM_ARCH)_$(PTX_SUFFIX)_$(NVCC_VERSION)_$(ABI_SUFFIX)_$(CPU_ARCH_SUFFIX)
|
154 |
-
|
155 |
-
#-------------------------------------------------------------------------------
|
156 |
-
# Dependency Lists
|
157 |
-
#-------------------------------------------------------------------------------
|
158 |
-
|
159 |
-
rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d))
|
160 |
-
|
161 |
-
DEPS = ./Makefile \
|
162 |
-
../test/test_util.h \
|
163 |
-
$(call rwildcard,../cub/,*.cuh)
|
164 |
-
|
165 |
-
|
166 |
-
#-------------------------------------------------------------------------------
|
167 |
-
# make default
|
168 |
-
#-------------------------------------------------------------------------------
|
169 |
-
|
170 |
-
default:
|
171 |
-
|
172 |
-
|
173 |
-
#-------------------------------------------------------------------------------
|
174 |
-
# make clean
|
175 |
-
#-------------------------------------------------------------------------------
|
176 |
-
|
177 |
-
clean :
|
178 |
-
rm -f bin/*$(CPU_ARCH_SUFFIX)*
|
179 |
-
rm -f *.i* *.cubin *.cu.c *.cudafe* *.fatbin.c *.ptx *.hash *.cu.cpp *.o
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
#-------------------------------------------------------------------------------
|
184 |
-
# make tune_device_reduce
|
185 |
-
#-------------------------------------------------------------------------------
|
186 |
-
|
187 |
-
tune_device_reduce: bin/tune_device_reduce_$(SUFFIX)
|
188 |
-
|
189 |
-
bin/tune_device_reduce_$(SUFFIX) : tune_device_reduce.cu $(DEPS)
|
190 |
-
mkdir -p bin
|
191 |
-
$(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/tune_device_reduce_$(SUFFIX) tune_device_reduce.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 -DTUNE_ARCH=$(SM_ARCH) -DTUNE_SIZE=$(TUNE_SIZE)
|
192 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/generate.h
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a fill of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// the purpose of this header is to #include the generate.h header
|
22 |
-
// of the sequential, host, and device systems. It should be #included in any
|
23 |
-
// code which uses adl to dispatch generate
|
24 |
-
|
25 |
-
#include <thrust/system/detail/sequential/generate.h>
|
26 |
-
|
27 |
-
// SCons can't see through the #defines below to figure out what this header
|
28 |
-
// includes, so we fake it out by specifying all possible files we might end up
|
29 |
-
// including inside an #if 0.
|
30 |
-
#if 0
|
31 |
-
#include <thrust/system/cpp/detail/generate.h>
|
32 |
-
#include <thrust/system/cuda/detail/generate.h>
|
33 |
-
#include <thrust/system/omp/detail/generate.h>
|
34 |
-
#include <thrust/system/tbb/detail/generate.h>
|
35 |
-
#endif
|
36 |
-
|
37 |
-
#define __THRUST_HOST_SYSTEM_GENERATE_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/generate.h>
|
38 |
-
#include __THRUST_HOST_SYSTEM_GENERATE_HEADER
|
39 |
-
#undef __THRUST_HOST_SYSTEM_GENERATE_HEADER
|
40 |
-
|
41 |
-
#define __THRUST_DEVICE_SYSTEM_GENERATE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/generate.h>
|
42 |
-
#include __THRUST_DEVICE_SYSTEM_GENERATE_HEADER
|
43 |
-
#undef __THRUST_DEVICE_SYSTEM_GENERATE_HEADER
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/copy.h
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/system/tbb/detail/execution_policy.h>
|
21 |
-
|
22 |
-
namespace thrust
|
23 |
-
{
|
24 |
-
namespace system
|
25 |
-
{
|
26 |
-
namespace tbb
|
27 |
-
{
|
28 |
-
namespace detail
|
29 |
-
{
|
30 |
-
|
31 |
-
|
32 |
-
template<typename DerivedPolicy,
|
33 |
-
typename InputIterator,
|
34 |
-
typename OutputIterator>
|
35 |
-
OutputIterator copy(execution_policy<DerivedPolicy> &exec,
|
36 |
-
InputIterator first,
|
37 |
-
InputIterator last,
|
38 |
-
OutputIterator result);
|
39 |
-
|
40 |
-
|
41 |
-
template<typename DerivedPolicy,
|
42 |
-
typename InputIterator,
|
43 |
-
typename Size,
|
44 |
-
typename OutputIterator>
|
45 |
-
OutputIterator copy_n(execution_policy<DerivedPolicy> &exec,
|
46 |
-
InputIterator first,
|
47 |
-
Size n,
|
48 |
-
OutputIterator result);
|
49 |
-
|
50 |
-
|
51 |
-
} // end namespace detail
|
52 |
-
} // end namespace tbb
|
53 |
-
} // end namespace system
|
54 |
-
} // end namespace thrust
|
55 |
-
|
56 |
-
#include <thrust/system/tbb/detail/copy.inl>
|
57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Text2Human/Text2Human/data/segm_attr_dataset.py
DELETED
@@ -1,167 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import os.path
|
3 |
-
import random
|
4 |
-
|
5 |
-
import numpy as np
|
6 |
-
import torch
|
7 |
-
import torch.utils.data as data
|
8 |
-
from PIL import Image
|
9 |
-
|
10 |
-
|
11 |
-
class DeepFashionAttrSegmDataset(data.Dataset):
|
12 |
-
|
13 |
-
def __init__(self,
|
14 |
-
img_dir,
|
15 |
-
segm_dir,
|
16 |
-
pose_dir,
|
17 |
-
ann_dir,
|
18 |
-
downsample_factor=2,
|
19 |
-
xflip=False):
|
20 |
-
self._img_path = img_dir
|
21 |
-
self._densepose_path = pose_dir
|
22 |
-
self._segm_path = segm_dir
|
23 |
-
self._image_fnames = []
|
24 |
-
self.upper_fused_attrs = []
|
25 |
-
self.lower_fused_attrs = []
|
26 |
-
self.outer_fused_attrs = []
|
27 |
-
|
28 |
-
self.downsample_factor = downsample_factor
|
29 |
-
self.xflip = xflip
|
30 |
-
|
31 |
-
# load attributes
|
32 |
-
assert os.path.exists(f'{ann_dir}/upper_fused.txt')
|
33 |
-
for idx, row in enumerate(
|
34 |
-
open(os.path.join(f'{ann_dir}/upper_fused.txt'), 'r')):
|
35 |
-
annotations = row.split()
|
36 |
-
self._image_fnames.append(annotations[0])
|
37 |
-
# assert self._image_fnames[idx] == annotations[0]
|
38 |
-
self.upper_fused_attrs.append(int(annotations[1]))
|
39 |
-
|
40 |
-
assert len(self._image_fnames) == len(self.upper_fused_attrs)
|
41 |
-
|
42 |
-
assert os.path.exists(f'{ann_dir}/lower_fused.txt')
|
43 |
-
for idx, row in enumerate(
|
44 |
-
open(os.path.join(f'{ann_dir}/lower_fused.txt'), 'r')):
|
45 |
-
annotations = row.split()
|
46 |
-
assert self._image_fnames[idx] == annotations[0]
|
47 |
-
self.lower_fused_attrs.append(int(annotations[1]))
|
48 |
-
|
49 |
-
assert len(self._image_fnames) == len(self.lower_fused_attrs)
|
50 |
-
|
51 |
-
assert os.path.exists(f'{ann_dir}/outer_fused.txt')
|
52 |
-
for idx, row in enumerate(
|
53 |
-
open(os.path.join(f'{ann_dir}/outer_fused.txt'), 'r')):
|
54 |
-
annotations = row.split()
|
55 |
-
assert self._image_fnames[idx] == annotations[0]
|
56 |
-
self.outer_fused_attrs.append(int(annotations[1]))
|
57 |
-
|
58 |
-
assert len(self._image_fnames) == len(self.outer_fused_attrs)
|
59 |
-
|
60 |
-
# remove the overlapping item between upper cls and lower cls
|
61 |
-
# cls 21 can appear with upper clothes
|
62 |
-
# cls 4 can appear with lower clothes
|
63 |
-
self.upper_cls = [1., 4.]
|
64 |
-
self.lower_cls = [3., 5., 21.]
|
65 |
-
self.outer_cls = [2.]
|
66 |
-
self.other_cls = [
|
67 |
-
11., 18., 7., 8., 9., 10., 12., 16., 17., 19., 20., 22., 23., 15.,
|
68 |
-
14., 13., 0., 6.
|
69 |
-
]
|
70 |
-
|
71 |
-
def _open_file(self, path_prefix, fname):
|
72 |
-
return open(os.path.join(path_prefix, fname), 'rb')
|
73 |
-
|
74 |
-
def _load_raw_image(self, raw_idx):
|
75 |
-
fname = self._image_fnames[raw_idx]
|
76 |
-
with self._open_file(self._img_path, fname) as f:
|
77 |
-
image = Image.open(f)
|
78 |
-
if self.downsample_factor != 1:
|
79 |
-
width, height = image.size
|
80 |
-
width = width // self.downsample_factor
|
81 |
-
height = height // self.downsample_factor
|
82 |
-
image = image.resize(
|
83 |
-
size=(width, height), resample=Image.LANCZOS)
|
84 |
-
image = np.array(image)
|
85 |
-
if image.ndim == 2:
|
86 |
-
image = image[:, :, np.newaxis] # HW => HWC
|
87 |
-
image = image.transpose(2, 0, 1) # HWC => CHW
|
88 |
-
return image
|
89 |
-
|
90 |
-
def _load_densepose(self, raw_idx):
|
91 |
-
fname = self._image_fnames[raw_idx]
|
92 |
-
fname = f'{fname[:-4]}_densepose.png'
|
93 |
-
with self._open_file(self._densepose_path, fname) as f:
|
94 |
-
densepose = Image.open(f)
|
95 |
-
if self.downsample_factor != 1:
|
96 |
-
width, height = densepose.size
|
97 |
-
width = width // self.downsample_factor
|
98 |
-
height = height // self.downsample_factor
|
99 |
-
densepose = densepose.resize(
|
100 |
-
size=(width, height), resample=Image.NEAREST)
|
101 |
-
# channel-wise IUV order, [3, H, W]
|
102 |
-
densepose = np.array(densepose)[:, :, 2:].transpose(2, 0, 1)
|
103 |
-
return densepose.astype(np.float32)
|
104 |
-
|
105 |
-
def _load_segm(self, raw_idx):
|
106 |
-
fname = self._image_fnames[raw_idx]
|
107 |
-
fname = f'{fname[:-4]}_segm.png'
|
108 |
-
with self._open_file(self._segm_path, fname) as f:
|
109 |
-
segm = Image.open(f)
|
110 |
-
if self.downsample_factor != 1:
|
111 |
-
width, height = segm.size
|
112 |
-
width = width // self.downsample_factor
|
113 |
-
height = height // self.downsample_factor
|
114 |
-
segm = segm.resize(
|
115 |
-
size=(width, height), resample=Image.NEAREST)
|
116 |
-
segm = np.array(segm)
|
117 |
-
segm = segm[:, :, np.newaxis].transpose(2, 0, 1)
|
118 |
-
return segm.astype(np.float32)
|
119 |
-
|
120 |
-
def __getitem__(self, index):
|
121 |
-
image = self._load_raw_image(index)
|
122 |
-
pose = self._load_densepose(index)
|
123 |
-
segm = self._load_segm(index)
|
124 |
-
|
125 |
-
if self.xflip and random.random() > 0.5:
|
126 |
-
assert image.ndim == 3 # CHW
|
127 |
-
image = image[:, :, ::-1].copy()
|
128 |
-
pose = pose[:, :, ::-1].copy()
|
129 |
-
segm = segm[:, :, ::-1].copy()
|
130 |
-
|
131 |
-
image = torch.from_numpy(image)
|
132 |
-
segm = torch.from_numpy(segm)
|
133 |
-
|
134 |
-
upper_fused_attr = self.upper_fused_attrs[index]
|
135 |
-
lower_fused_attr = self.lower_fused_attrs[index]
|
136 |
-
outer_fused_attr = self.outer_fused_attrs[index]
|
137 |
-
|
138 |
-
# mask 0: denotes the common codebook,
|
139 |
-
# mask (attr + 1): denotes the texture-specific codebook
|
140 |
-
mask = torch.zeros_like(segm)
|
141 |
-
if upper_fused_attr != 17:
|
142 |
-
for cls in self.upper_cls:
|
143 |
-
mask[segm == cls] = upper_fused_attr + 1
|
144 |
-
|
145 |
-
if lower_fused_attr != 17:
|
146 |
-
for cls in self.lower_cls:
|
147 |
-
mask[segm == cls] = lower_fused_attr + 1
|
148 |
-
|
149 |
-
if outer_fused_attr != 17:
|
150 |
-
for cls in self.outer_cls:
|
151 |
-
mask[segm == cls] = outer_fused_attr + 1
|
152 |
-
|
153 |
-
pose = pose / 12. - 1
|
154 |
-
image = image / 127.5 - 1
|
155 |
-
|
156 |
-
return_dict = {
|
157 |
-
'image': image,
|
158 |
-
'densepose': pose,
|
159 |
-
'segm': segm,
|
160 |
-
'texture_mask': mask,
|
161 |
-
'img_name': self._image_fnames[index]
|
162 |
-
}
|
163 |
-
|
164 |
-
return return_dict
|
165 |
-
|
166 |
-
def __len__(self):
|
167 |
-
return len(self._image_fnames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/data/datasets/builtin_meta.py
DELETED
@@ -1,560 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
"""
|
5 |
-
Note:
|
6 |
-
For your custom dataset, there is no need to hard-code metadata anywhere in the code.
|
7 |
-
For example, for COCO-format dataset, metadata will be obtained automatically
|
8 |
-
when calling `load_coco_json`. For other dataset, metadata may also be obtained in other ways
|
9 |
-
during loading.
|
10 |
-
|
11 |
-
However, we hard-coded metadata for a few common dataset here.
|
12 |
-
The only goal is to allow users who don't have these dataset to use pre-trained models.
|
13 |
-
Users don't have to download a COCO json (which contains metadata), in order to visualize a
|
14 |
-
COCO model (with correct class names and colors).
|
15 |
-
"""
|
16 |
-
# meta data for 65-48-17 zeroshot split of COCO
|
17 |
-
COCO_OVD_CATEGORIES = {
|
18 |
-
'target': [
|
19 |
-
{"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
|
20 |
-
{"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
|
21 |
-
{"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
|
22 |
-
{"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
|
23 |
-
{"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
|
24 |
-
{"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
|
25 |
-
{"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
|
26 |
-
{"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
|
27 |
-
{"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
|
28 |
-
{"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
|
29 |
-
{"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
|
30 |
-
{"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
|
31 |
-
{"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
|
32 |
-
{"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
|
33 |
-
{"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
|
34 |
-
{"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
|
35 |
-
{"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
|
36 |
-
],
|
37 |
-
'base': [
|
38 |
-
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
|
39 |
-
{"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
|
40 |
-
{"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
|
41 |
-
{"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
|
42 |
-
{"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
|
43 |
-
{"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
|
44 |
-
{"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
|
45 |
-
{"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
|
46 |
-
{"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
|
47 |
-
{"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
|
48 |
-
{"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
|
49 |
-
{"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
|
50 |
-
{"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
|
51 |
-
{"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
|
52 |
-
{"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
|
53 |
-
{"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
|
54 |
-
{"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
|
55 |
-
{"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
|
56 |
-
{"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
|
57 |
-
{"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
|
58 |
-
{"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
|
59 |
-
{"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
|
60 |
-
{"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
|
61 |
-
{"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
|
62 |
-
{"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
|
63 |
-
{"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
|
64 |
-
{"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
|
65 |
-
{"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
|
66 |
-
{"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
|
67 |
-
{"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
|
68 |
-
{"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
|
69 |
-
{"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
|
70 |
-
{"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
|
71 |
-
{"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
|
72 |
-
{"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
|
73 |
-
{"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
|
74 |
-
{"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
|
75 |
-
{"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
|
76 |
-
{"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
|
77 |
-
{"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
|
78 |
-
{"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
|
79 |
-
{"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
|
80 |
-
{"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
|
81 |
-
{"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
|
82 |
-
{"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
|
83 |
-
{"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
|
84 |
-
{"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
|
85 |
-
{"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
|
86 |
-
],
|
87 |
-
'all': [
|
88 |
-
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
|
89 |
-
{"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
|
90 |
-
{"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
|
91 |
-
{"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
|
92 |
-
{"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
|
93 |
-
{"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
|
94 |
-
{"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
|
95 |
-
{"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
|
96 |
-
{"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
|
97 |
-
{"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
|
98 |
-
{"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
|
99 |
-
{"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
|
100 |
-
{"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
|
101 |
-
{"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
|
102 |
-
{"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
|
103 |
-
{"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
|
104 |
-
{"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
|
105 |
-
{"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
|
106 |
-
{"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
|
107 |
-
{"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
|
108 |
-
{"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
|
109 |
-
{"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
|
110 |
-
{"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
|
111 |
-
{"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
|
112 |
-
{"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
|
113 |
-
{"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
|
114 |
-
{"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
|
115 |
-
{"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
|
116 |
-
{"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
|
117 |
-
{"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
|
118 |
-
{"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
|
119 |
-
{"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
|
120 |
-
{"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
|
121 |
-
{"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
|
122 |
-
{"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
|
123 |
-
{"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
|
124 |
-
{"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
|
125 |
-
{"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
|
126 |
-
{"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
|
127 |
-
{"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
|
128 |
-
{"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
|
129 |
-
{"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
|
130 |
-
{"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
|
131 |
-
{"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
|
132 |
-
{"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
|
133 |
-
{"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
|
134 |
-
{"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
|
135 |
-
{"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
|
136 |
-
{"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
|
137 |
-
{"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
|
138 |
-
{"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
|
139 |
-
{"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
|
140 |
-
{"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
|
141 |
-
{"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
|
142 |
-
{"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
|
143 |
-
{"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
|
144 |
-
{"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
|
145 |
-
{"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
|
146 |
-
{"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
|
147 |
-
{"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
|
148 |
-
{"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
|
149 |
-
{"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
|
150 |
-
{"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
|
151 |
-
{"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
|
152 |
-
{"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
|
153 |
-
],
|
154 |
-
}
|
155 |
-
|
156 |
-
# Classes not used in COCO_OVD_CATEGORIES
|
157 |
-
NOT_USED = [
|
158 |
-
{"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
|
159 |
-
{"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
|
160 |
-
{"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
|
161 |
-
{"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
|
162 |
-
{"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
|
163 |
-
{"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
|
164 |
-
{"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
|
165 |
-
{"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
|
166 |
-
{"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
|
167 |
-
{"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
|
168 |
-
{"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
|
169 |
-
{"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
|
170 |
-
{"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
|
171 |
-
{"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
|
172 |
-
{"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
|
173 |
-
{"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"},
|
174 |
-
{"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"},
|
175 |
-
{"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"},
|
176 |
-
{"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"},
|
177 |
-
{"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"},
|
178 |
-
{"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"},
|
179 |
-
{"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"},
|
180 |
-
{"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"},
|
181 |
-
{"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"},
|
182 |
-
{"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"},
|
183 |
-
{"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"},
|
184 |
-
{"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"},
|
185 |
-
{"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"},
|
186 |
-
{"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"},
|
187 |
-
{"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"},
|
188 |
-
{"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"},
|
189 |
-
{"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"},
|
190 |
-
{"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"},
|
191 |
-
{"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"},
|
192 |
-
{"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"},
|
193 |
-
{"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"},
|
194 |
-
{"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"},
|
195 |
-
{"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"},
|
196 |
-
{"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"},
|
197 |
-
{"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"},
|
198 |
-
{"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"},
|
199 |
-
{"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"},
|
200 |
-
{"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"},
|
201 |
-
{"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"},
|
202 |
-
{"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"},
|
203 |
-
{"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"},
|
204 |
-
{"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"},
|
205 |
-
{"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"},
|
206 |
-
{"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"},
|
207 |
-
{"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"},
|
208 |
-
{"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"},
|
209 |
-
{"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"},
|
210 |
-
{"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"},
|
211 |
-
{"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"},
|
212 |
-
{"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"},
|
213 |
-
{"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"},
|
214 |
-
{"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"},
|
215 |
-
{"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"},
|
216 |
-
{"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"},
|
217 |
-
{"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"},
|
218 |
-
{"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"},
|
219 |
-
{"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"},
|
220 |
-
{"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"},
|
221 |
-
{"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"},
|
222 |
-
{"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"},
|
223 |
-
{"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"},
|
224 |
-
{"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"},
|
225 |
-
{"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"},
|
226 |
-
]
|
227 |
-
|
228 |
-
# All coco categories, together with their nice-looking visualization colors
|
229 |
-
# It's from https://github.com/cocodataset/panopticapi/blob/master/panoptic_coco_categories.json
|
230 |
-
COCO_CATEGORIES = [
|
231 |
-
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
|
232 |
-
{"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
|
233 |
-
{"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
|
234 |
-
{"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
|
235 |
-
{"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
|
236 |
-
{"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
|
237 |
-
{"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
|
238 |
-
{"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
|
239 |
-
{"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
|
240 |
-
{"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
|
241 |
-
{"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
|
242 |
-
{"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
|
243 |
-
{"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
|
244 |
-
{"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
|
245 |
-
{"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
|
246 |
-
{"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
|
247 |
-
{"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
|
248 |
-
{"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
|
249 |
-
{"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
|
250 |
-
{"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
|
251 |
-
{"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
|
252 |
-
{"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
|
253 |
-
{"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
|
254 |
-
{"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
|
255 |
-
{"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
|
256 |
-
{"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
|
257 |
-
{"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
|
258 |
-
{"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
|
259 |
-
{"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
|
260 |
-
{"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
|
261 |
-
{"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
|
262 |
-
{"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
|
263 |
-
{"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
|
264 |
-
{"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
|
265 |
-
{"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
|
266 |
-
{"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
|
267 |
-
{"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
|
268 |
-
{"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
|
269 |
-
{"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
|
270 |
-
{"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
|
271 |
-
{"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
|
272 |
-
{"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
|
273 |
-
{"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
|
274 |
-
{"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
|
275 |
-
{"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
|
276 |
-
{"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
|
277 |
-
{"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
|
278 |
-
{"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
|
279 |
-
{"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
|
280 |
-
{"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
|
281 |
-
{"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
|
282 |
-
{"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
|
283 |
-
{"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
|
284 |
-
{"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
|
285 |
-
{"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
|
286 |
-
{"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
|
287 |
-
{"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
|
288 |
-
{"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
|
289 |
-
{"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
|
290 |
-
{"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
|
291 |
-
{"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
|
292 |
-
{"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
|
293 |
-
{"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
|
294 |
-
{"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
|
295 |
-
{"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
|
296 |
-
{"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
|
297 |
-
{"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
|
298 |
-
{"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
|
299 |
-
{"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
|
300 |
-
{"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
|
301 |
-
{"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
|
302 |
-
{"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
|
303 |
-
{"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
|
304 |
-
{"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
|
305 |
-
{"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
|
306 |
-
{"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
|
307 |
-
{"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
|
308 |
-
{"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
|
309 |
-
{"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
|
310 |
-
{"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
|
311 |
-
{"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"},
|
312 |
-
{"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"},
|
313 |
-
{"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"},
|
314 |
-
{"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"},
|
315 |
-
{"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"},
|
316 |
-
{"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"},
|
317 |
-
{"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"},
|
318 |
-
{"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"},
|
319 |
-
{"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"},
|
320 |
-
{"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"},
|
321 |
-
{"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"},
|
322 |
-
{"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"},
|
323 |
-
{"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"},
|
324 |
-
{"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"},
|
325 |
-
{"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"},
|
326 |
-
{"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"},
|
327 |
-
{"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"},
|
328 |
-
{"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"},
|
329 |
-
{"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"},
|
330 |
-
{"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"},
|
331 |
-
{"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"},
|
332 |
-
{"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"},
|
333 |
-
{"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"},
|
334 |
-
{"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"},
|
335 |
-
{"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"},
|
336 |
-
{"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"},
|
337 |
-
{"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"},
|
338 |
-
{"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"},
|
339 |
-
{"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"},
|
340 |
-
{"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"},
|
341 |
-
{"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"},
|
342 |
-
{"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"},
|
343 |
-
{"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"},
|
344 |
-
{"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"},
|
345 |
-
{"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"},
|
346 |
-
{"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"},
|
347 |
-
{"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"},
|
348 |
-
{"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"},
|
349 |
-
{"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"},
|
350 |
-
{"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"},
|
351 |
-
{"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"},
|
352 |
-
{"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"},
|
353 |
-
{"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"},
|
354 |
-
{"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"},
|
355 |
-
{"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"},
|
356 |
-
{"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"},
|
357 |
-
{"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"},
|
358 |
-
{"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"},
|
359 |
-
{"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"},
|
360 |
-
{"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"},
|
361 |
-
{"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"},
|
362 |
-
{"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"},
|
363 |
-
{"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"},
|
364 |
-
]
|
365 |
-
|
366 |
-
# fmt: off
|
367 |
-
COCO_PERSON_KEYPOINT_NAMES = (
|
368 |
-
"nose",
|
369 |
-
"left_eye", "right_eye",
|
370 |
-
"left_ear", "right_ear",
|
371 |
-
"left_shoulder", "right_shoulder",
|
372 |
-
"left_elbow", "right_elbow",
|
373 |
-
"left_wrist", "right_wrist",
|
374 |
-
"left_hip", "right_hip",
|
375 |
-
"left_knee", "right_knee",
|
376 |
-
"left_ankle", "right_ankle",
|
377 |
-
)
|
378 |
-
# fmt: on
|
379 |
-
|
380 |
-
# Pairs of keypoints that should be exchanged under horizontal flipping
|
381 |
-
COCO_PERSON_KEYPOINT_FLIP_MAP = (
|
382 |
-
("left_eye", "right_eye"),
|
383 |
-
("left_ear", "right_ear"),
|
384 |
-
("left_shoulder", "right_shoulder"),
|
385 |
-
("left_elbow", "right_elbow"),
|
386 |
-
("left_wrist", "right_wrist"),
|
387 |
-
("left_hip", "right_hip"),
|
388 |
-
("left_knee", "right_knee"),
|
389 |
-
("left_ankle", "right_ankle"),
|
390 |
-
)
|
391 |
-
|
392 |
-
# rules for pairs of keypoints to draw a line between, and the line color to use.
|
393 |
-
KEYPOINT_CONNECTION_RULES = [
|
394 |
-
# face
|
395 |
-
("left_ear", "left_eye", (102, 204, 255)),
|
396 |
-
("right_ear", "right_eye", (51, 153, 255)),
|
397 |
-
("left_eye", "nose", (102, 0, 204)),
|
398 |
-
("nose", "right_eye", (51, 102, 255)),
|
399 |
-
# upper-body
|
400 |
-
("left_shoulder", "right_shoulder", (255, 128, 0)),
|
401 |
-
("left_shoulder", "left_elbow", (153, 255, 204)),
|
402 |
-
("right_shoulder", "right_elbow", (128, 229, 255)),
|
403 |
-
("left_elbow", "left_wrist", (153, 255, 153)),
|
404 |
-
("right_elbow", "right_wrist", (102, 255, 224)),
|
405 |
-
# lower-body
|
406 |
-
("left_hip", "right_hip", (255, 102, 0)),
|
407 |
-
("left_hip", "left_knee", (255, 255, 77)),
|
408 |
-
("right_hip", "right_knee", (153, 255, 204)),
|
409 |
-
("left_knee", "left_ankle", (191, 255, 128)),
|
410 |
-
("right_knee", "right_ankle", (255, 195, 77)),
|
411 |
-
]
|
412 |
-
|
413 |
-
# All Cityscapes categories, together with their nice-looking visualization colors
|
414 |
-
# It's from https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py # noqa
|
415 |
-
CITYSCAPES_CATEGORIES = [
|
416 |
-
{"color": (128, 64, 128), "isthing": 0, "id": 7, "trainId": 0, "name": "road"},
|
417 |
-
{"color": (244, 35, 232), "isthing": 0, "id": 8, "trainId": 1, "name": "sidewalk"},
|
418 |
-
{"color": (70, 70, 70), "isthing": 0, "id": 11, "trainId": 2, "name": "building"},
|
419 |
-
{"color": (102, 102, 156), "isthing": 0, "id": 12, "trainId": 3, "name": "wall"},
|
420 |
-
{"color": (190, 153, 153), "isthing": 0, "id": 13, "trainId": 4, "name": "fence"},
|
421 |
-
{"color": (153, 153, 153), "isthing": 0, "id": 17, "trainId": 5, "name": "pole"},
|
422 |
-
{"color": (250, 170, 30), "isthing": 0, "id": 19, "trainId": 6, "name": "traffic light"},
|
423 |
-
{"color": (220, 220, 0), "isthing": 0, "id": 20, "trainId": 7, "name": "traffic sign"},
|
424 |
-
{"color": (107, 142, 35), "isthing": 0, "id": 21, "trainId": 8, "name": "vegetation"},
|
425 |
-
{"color": (152, 251, 152), "isthing": 0, "id": 22, "trainId": 9, "name": "terrain"},
|
426 |
-
{"color": (70, 130, 180), "isthing": 0, "id": 23, "trainId": 10, "name": "sky"},
|
427 |
-
{"color": (220, 20, 60), "isthing": 1, "id": 24, "trainId": 11, "name": "person"},
|
428 |
-
{"color": (255, 0, 0), "isthing": 1, "id": 25, "trainId": 12, "name": "rider"},
|
429 |
-
{"color": (0, 0, 142), "isthing": 1, "id": 26, "trainId": 13, "name": "car"},
|
430 |
-
{"color": (0, 0, 70), "isthing": 1, "id": 27, "trainId": 14, "name": "truck"},
|
431 |
-
{"color": (0, 60, 100), "isthing": 1, "id": 28, "trainId": 15, "name": "bus"},
|
432 |
-
{"color": (0, 80, 100), "isthing": 1, "id": 31, "trainId": 16, "name": "train"},
|
433 |
-
{"color": (0, 0, 230), "isthing": 1, "id": 32, "trainId": 17, "name": "motorcycle"},
|
434 |
-
{"color": (119, 11, 32), "isthing": 1, "id": 33, "trainId": 18, "name": "bicycle"},
|
435 |
-
]
|
436 |
-
|
437 |
-
# fmt: off
|
438 |
-
ADE20K_SEM_SEG_CATEGORIES = [
|
439 |
-
"wall", "building", "sky", "floor", "tree", "ceiling", "road, route", "bed", "window ", "grass", "cabinet", "sidewalk, pavement", "person", "earth, ground", "door", "table", "mountain, mount", "plant", "curtain", "chair", "car", "water", "painting, picture", "sofa", "shelf", "house", "sea", "mirror", "rug", "field", "armchair", "seat", "fence", "desk", "rock, stone", "wardrobe, closet, press", "lamp", "tub", "rail", "cushion", "base, pedestal, stand", "box", "column, pillar", "signboard, sign", "chest of drawers, chest, bureau, dresser", "counter", "sand", "sink", "skyscraper", "fireplace", "refrigerator, icebox", "grandstand, covered stand", "path", "stairs", "runway", "case, display case, showcase, vitrine", "pool table, billiard table, snooker table", "pillow", "screen door, screen", "stairway, staircase", "river", "bridge, span", "bookcase", "blind, screen", "coffee table", "toilet, can, commode, crapper, pot, potty, stool, throne", "flower", "book", "hill", "bench", "countertop", "stove", "palm, palm tree", "kitchen island", "computer", "swivel chair", "boat", "bar", "arcade machine", "hovel, hut, hutch, shack, shanty", "bus", "towel", "light", "truck", "tower", "chandelier", "awning, sunshade, sunblind", "street lamp", "booth", "tv", "plane", "dirt track", "clothes", "pole", "land, ground, soil", "bannister, banister, balustrade, balusters, handrail", "escalator, moving staircase, moving stairway", "ottoman, pouf, pouffe, puff, hassock", "bottle", "buffet, counter, sideboard", "poster, posting, placard, notice, bill, card", "stage", "van", "ship", "fountain", "conveyer belt, conveyor belt, conveyer, conveyor, transporter", "canopy", "washer, automatic washer, washing machine", "plaything, toy", "pool", "stool", "barrel, cask", "basket, handbasket", "falls", "tent", "bag", "minibike, motorbike", "cradle", "oven", "ball", "food, solid food", "step, stair", "tank, storage tank", "trade name", "microwave", "pot", "animal", "bicycle", "lake", "dishwasher", "screen", "blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase", "traffic light", "tray", "trash can", "fan", "pier", "crt screen", "plate", "monitor", "bulletin board", "shower", "radiator", "glass, drinking glass", "clock", "flag", # noqa
|
440 |
-
]
|
441 |
-
# After processed by `prepare_ade20k_sem_seg.py`, id 255 means ignore
|
442 |
-
# fmt: on
|
443 |
-
|
444 |
-
|
445 |
-
def _get_coco_instances_meta():
|
446 |
-
thing_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 1]
|
447 |
-
thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1]
|
448 |
-
assert len(thing_ids) == 80, len(thing_ids)
|
449 |
-
# Mapping from the incontiguous COCO category id to an id in [0, 79]
|
450 |
-
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
|
451 |
-
thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1]
|
452 |
-
ret = {
|
453 |
-
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
|
454 |
-
"thing_classes": thing_classes,
|
455 |
-
"thing_colors": thing_colors,
|
456 |
-
}
|
457 |
-
return ret
|
458 |
-
|
459 |
-
|
460 |
-
def _get_coco_panoptic_separated_meta():
|
461 |
-
"""
|
462 |
-
Returns metadata for "separated" version of the panoptic segmentation dataset.
|
463 |
-
"""
|
464 |
-
stuff_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 0]
|
465 |
-
assert len(stuff_ids) == 53, len(stuff_ids)
|
466 |
-
|
467 |
-
# For semantic segmentation, this mapping maps from contiguous stuff id
|
468 |
-
# (in [0, 53], used in models) to ids in the dataset (used for processing results)
|
469 |
-
# The id 0 is mapped to an extra category "thing".
|
470 |
-
stuff_dataset_id_to_contiguous_id = {k: i + 1 for i, k in enumerate(stuff_ids)}
|
471 |
-
# When converting COCO panoptic annotations to semantic annotations
|
472 |
-
# We label the "thing" category to 0
|
473 |
-
stuff_dataset_id_to_contiguous_id[0] = 0
|
474 |
-
|
475 |
-
# 54 names for COCO stuff categories (including "things")
|
476 |
-
stuff_classes = ["things"] + [
|
477 |
-
k["name"].replace("-other", "").replace("-merged", "")
|
478 |
-
for k in COCO_CATEGORIES
|
479 |
-
if k["isthing"] == 0
|
480 |
-
]
|
481 |
-
|
482 |
-
# NOTE: I randomly picked a color for things
|
483 |
-
stuff_colors = [[82, 18, 128]] + [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 0]
|
484 |
-
ret = {
|
485 |
-
"stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
|
486 |
-
"stuff_classes": stuff_classes,
|
487 |
-
"stuff_colors": stuff_colors,
|
488 |
-
}
|
489 |
-
ret.update(_get_coco_instances_meta())
|
490 |
-
return ret
|
491 |
-
|
492 |
-
|
493 |
-
def _get_builtin_metadata(dataset_name):
|
494 |
-
if dataset_name == "coco":
|
495 |
-
return _get_coco_instances_meta()
|
496 |
-
if dataset_name == "coco_panoptic_separated":
|
497 |
-
return _get_coco_panoptic_separated_meta()
|
498 |
-
elif dataset_name == "coco_panoptic_standard":
|
499 |
-
meta = {}
|
500 |
-
# The following metadata maps contiguous id from [0, #thing categories +
|
501 |
-
# #stuff categories) to their names and colors. We have to replica of the
|
502 |
-
# same name and color under "thing_*" and "stuff_*" because the current
|
503 |
-
# visualization function in D2 handles thing and class classes differently
|
504 |
-
# due to some heuristic used in Panoptic FPN. We keep the same naming to
|
505 |
-
# enable reusing existing visualization functions.
|
506 |
-
thing_classes = [k["name"] for k in COCO_CATEGORIES]
|
507 |
-
thing_colors = [k["color"] for k in COCO_CATEGORIES]
|
508 |
-
stuff_classes = [k["name"] for k in COCO_CATEGORIES]
|
509 |
-
stuff_colors = [k["color"] for k in COCO_CATEGORIES]
|
510 |
-
|
511 |
-
meta["thing_classes"] = thing_classes
|
512 |
-
meta["thing_colors"] = thing_colors
|
513 |
-
meta["stuff_classes"] = stuff_classes
|
514 |
-
meta["stuff_colors"] = stuff_colors
|
515 |
-
|
516 |
-
# Convert category id for training:
|
517 |
-
# category id: like semantic segmentation, it is the class id for each
|
518 |
-
# pixel. Since there are some classes not used in evaluation, the category
|
519 |
-
# id is not always contiguous and thus we have two set of category ids:
|
520 |
-
# - original category id: category id in the original dataset, mainly
|
521 |
-
# used for evaluation.
|
522 |
-
# - contiguous category id: [0, #classes), in order to train the linear
|
523 |
-
# softmax classifier.
|
524 |
-
thing_dataset_id_to_contiguous_id = {}
|
525 |
-
stuff_dataset_id_to_contiguous_id = {}
|
526 |
-
|
527 |
-
for i, cat in enumerate(COCO_CATEGORIES):
|
528 |
-
if cat["isthing"]:
|
529 |
-
thing_dataset_id_to_contiguous_id[cat["id"]] = i
|
530 |
-
else:
|
531 |
-
stuff_dataset_id_to_contiguous_id[cat["id"]] = i
|
532 |
-
|
533 |
-
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
|
534 |
-
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
|
535 |
-
|
536 |
-
return meta
|
537 |
-
elif dataset_name == "coco_person":
|
538 |
-
return {
|
539 |
-
"thing_classes": ["person"],
|
540 |
-
"keypoint_names": COCO_PERSON_KEYPOINT_NAMES,
|
541 |
-
"keypoint_flip_map": COCO_PERSON_KEYPOINT_FLIP_MAP,
|
542 |
-
"keypoint_connection_rules": KEYPOINT_CONNECTION_RULES,
|
543 |
-
}
|
544 |
-
elif dataset_name == "cityscapes":
|
545 |
-
# fmt: off
|
546 |
-
CITYSCAPES_THING_CLASSES = [
|
547 |
-
"person", "rider", "car", "truck",
|
548 |
-
"bus", "train", "motorcycle", "bicycle",
|
549 |
-
]
|
550 |
-
CITYSCAPES_STUFF_CLASSES = [
|
551 |
-
"road", "sidewalk", "building", "wall", "fence", "pole", "traffic light",
|
552 |
-
"traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car",
|
553 |
-
"truck", "bus", "train", "motorcycle", "bicycle",
|
554 |
-
]
|
555 |
-
# fmt: on
|
556 |
-
return {
|
557 |
-
"thing_classes": CITYSCAPES_THING_CLASSES,
|
558 |
-
"stuff_classes": CITYSCAPES_STUFF_CLASSES,
|
559 |
-
}
|
560 |
-
raise KeyError("No built-in metadata for dataset {}".format(dataset_name))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/utils/testing.py
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import io
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
|
6 |
-
from detectron2 import model_zoo
|
7 |
-
from detectron2.data import DatasetCatalog
|
8 |
-
from detectron2.data.detection_utils import read_image
|
9 |
-
from detectron2.modeling import build_model
|
10 |
-
from detectron2.structures import Boxes, Instances, ROIMasks
|
11 |
-
from detectron2.utils.file_io import PathManager
|
12 |
-
|
13 |
-
|
14 |
-
"""
|
15 |
-
Internal utilities for tests. Don't use except for writing tests.
|
16 |
-
"""
|
17 |
-
|
18 |
-
|
19 |
-
def get_model_no_weights(config_path):
|
20 |
-
"""
|
21 |
-
Like model_zoo.get, but do not load any weights (even pretrained)
|
22 |
-
"""
|
23 |
-
cfg = model_zoo.get_config(config_path)
|
24 |
-
if not torch.cuda.is_available():
|
25 |
-
cfg.MODEL.DEVICE = "cpu"
|
26 |
-
return build_model(cfg)
|
27 |
-
|
28 |
-
|
29 |
-
def random_boxes(num_boxes, max_coord=100, device="cpu"):
|
30 |
-
"""
|
31 |
-
Create a random Nx4 boxes tensor, with coordinates < max_coord.
|
32 |
-
"""
|
33 |
-
boxes = torch.rand(num_boxes, 4, device=device) * (max_coord * 0.5)
|
34 |
-
boxes.clamp_(min=1.0) # tiny boxes cause numerical instability in box regression
|
35 |
-
# Note: the implementation of this function in torchvision is:
|
36 |
-
# boxes[:, 2:] += torch.rand(N, 2) * 100
|
37 |
-
# but it does not guarantee non-negative widths/heights constraints:
|
38 |
-
# boxes[:, 2] >= boxes[:, 0] and boxes[:, 3] >= boxes[:, 1]:
|
39 |
-
boxes[:, 2:] += boxes[:, :2]
|
40 |
-
return boxes
|
41 |
-
|
42 |
-
|
43 |
-
def get_sample_coco_image(tensor=True):
|
44 |
-
"""
|
45 |
-
Args:
|
46 |
-
tensor (bool): if True, returns 3xHxW tensor.
|
47 |
-
else, returns a HxWx3 numpy array.
|
48 |
-
|
49 |
-
Returns:
|
50 |
-
an image, in BGR color.
|
51 |
-
"""
|
52 |
-
try:
|
53 |
-
file_name = DatasetCatalog.get("coco_2017_val_100")[0]["file_name"]
|
54 |
-
if not PathManager.exists(file_name):
|
55 |
-
raise FileNotFoundError()
|
56 |
-
except IOError:
|
57 |
-
# for public CI to run
|
58 |
-
file_name = "http://images.cocodataset.org/train2017/000000000009.jpg"
|
59 |
-
ret = read_image(file_name, format="BGR")
|
60 |
-
if tensor:
|
61 |
-
ret = torch.from_numpy(np.ascontiguousarray(ret.transpose(2, 0, 1)))
|
62 |
-
return ret
|
63 |
-
|
64 |
-
|
65 |
-
def convert_scripted_instances(instances):
|
66 |
-
"""
|
67 |
-
Convert a scripted Instances object to a regular :class:`Instances` object
|
68 |
-
"""
|
69 |
-
ret = Instances(instances.image_size)
|
70 |
-
for name in instances._field_names:
|
71 |
-
val = getattr(instances, "_" + name, None)
|
72 |
-
if val is not None:
|
73 |
-
ret.set(name, val)
|
74 |
-
return ret
|
75 |
-
|
76 |
-
|
77 |
-
def assert_instances_allclose(input, other, *, rtol=1e-5, msg="", size_as_tensor=False):
|
78 |
-
"""
|
79 |
-
Args:
|
80 |
-
input, other (Instances):
|
81 |
-
size_as_tensor: compare image_size of the Instances as tensors (instead of tuples).
|
82 |
-
Useful for comparing outputs of tracing.
|
83 |
-
"""
|
84 |
-
if not isinstance(input, Instances):
|
85 |
-
input = convert_scripted_instances(input)
|
86 |
-
if not isinstance(other, Instances):
|
87 |
-
other = convert_scripted_instances(other)
|
88 |
-
|
89 |
-
if not msg:
|
90 |
-
msg = "Two Instances are different! "
|
91 |
-
else:
|
92 |
-
msg = msg.rstrip() + " "
|
93 |
-
|
94 |
-
size_error_msg = msg + f"image_size is {input.image_size} vs. {other.image_size}!"
|
95 |
-
if size_as_tensor:
|
96 |
-
assert torch.equal(
|
97 |
-
torch.tensor(input.image_size), torch.tensor(other.image_size)
|
98 |
-
), size_error_msg
|
99 |
-
else:
|
100 |
-
assert input.image_size == other.image_size, size_error_msg
|
101 |
-
fields = sorted(input.get_fields().keys())
|
102 |
-
fields_other = sorted(other.get_fields().keys())
|
103 |
-
assert fields == fields_other, msg + f"Fields are {fields} vs {fields_other}!"
|
104 |
-
|
105 |
-
for f in fields:
|
106 |
-
val1, val2 = input.get(f), other.get(f)
|
107 |
-
if isinstance(val1, (Boxes, ROIMasks)):
|
108 |
-
# boxes in the range of O(100) and can have a larger tolerance
|
109 |
-
assert torch.allclose(val1.tensor, val2.tensor, atol=100 * rtol), (
|
110 |
-
msg + f"Field {f} differs too much!"
|
111 |
-
)
|
112 |
-
elif isinstance(val1, torch.Tensor):
|
113 |
-
if val1.dtype.is_floating_point:
|
114 |
-
mag = torch.abs(val1).max().cpu().item()
|
115 |
-
assert torch.allclose(val1, val2, atol=mag * rtol), (
|
116 |
-
msg + f"Field {f} differs too much!"
|
117 |
-
)
|
118 |
-
else:
|
119 |
-
assert torch.equal(val1, val2), msg + f"Field {f} is different!"
|
120 |
-
else:
|
121 |
-
raise ValueError(f"Don't know how to compare type {type(val1)}")
|
122 |
-
|
123 |
-
|
124 |
-
def reload_script_model(module):
|
125 |
-
"""
|
126 |
-
Save a jit module and load it back.
|
127 |
-
Similar to the `getExportImportCopy` function in torch/testing/
|
128 |
-
"""
|
129 |
-
buffer = io.BytesIO()
|
130 |
-
torch.jit.save(module, buffer)
|
131 |
-
buffer.seek(0)
|
132 |
-
return torch.jit.load(buffer)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChallengeHub/Chinese-LangChain/tests/test_duckpy.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
from duckpy import Client
|
2 |
-
|
3 |
-
client = Client()
|
4 |
-
|
5 |
-
results = client.search("Python Wikipedia")
|
6 |
-
|
7 |
-
# Prints first result title
|
8 |
-
print(results[0].title)
|
9 |
-
|
10 |
-
# Prints first result URL
|
11 |
-
print(results[0].url)
|
12 |
-
|
13 |
-
# Prints first result description
|
14 |
-
print(results[0].description)
|
15 |
-
# https://github.com/AmanoTeam/duckpy
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChandraMohanNayal/AutoGPT/autogpt/config/singleton.py
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
"""The singleton metaclass for ensuring only one instance of a class."""
|
2 |
-
import abc
|
3 |
-
|
4 |
-
|
5 |
-
class Singleton(abc.ABCMeta, type):
|
6 |
-
"""
|
7 |
-
Singleton metaclass for ensuring only one instance of a class.
|
8 |
-
"""
|
9 |
-
|
10 |
-
_instances = {}
|
11 |
-
|
12 |
-
def __call__(cls, *args, **kwargs):
|
13 |
-
"""Call method for the singleton metaclass."""
|
14 |
-
if cls not in cls._instances:
|
15 |
-
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
|
16 |
-
return cls._instances[cls]
|
17 |
-
|
18 |
-
|
19 |
-
class AbstractSingleton(abc.ABC, metaclass=Singleton):
|
20 |
-
"""
|
21 |
-
Abstract singleton class for ensuring only one instance of a class.
|
22 |
-
"""
|
23 |
-
|
24 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CjangCjengh/Shanghainese-TTS/monotonic_align/__init__.py
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
from numpy import zeros, int32, float32
|
2 |
-
from torch import from_numpy
|
3 |
-
|
4 |
-
from .core import maximum_path_jit
|
5 |
-
|
6 |
-
def maximum_path(neg_cent, mask):
|
7 |
-
""" numba optimized version.
|
8 |
-
neg_cent: [b, t_t, t_s]
|
9 |
-
mask: [b, t_t, t_s]
|
10 |
-
"""
|
11 |
-
device = neg_cent.device
|
12 |
-
dtype = neg_cent.dtype
|
13 |
-
neg_cent = neg_cent.data.cpu().numpy().astype(float32)
|
14 |
-
path = zeros(neg_cent.shape, dtype=int32)
|
15 |
-
|
16 |
-
t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32)
|
17 |
-
t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32)
|
18 |
-
maximum_path_jit(path, neg_cent, t_t_max, t_s_max)
|
19 |
-
return from_numpy(path).to(device=device, dtype=dtype)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Clementapa/orang-outan-image-video-detection/style.css
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
#disp_image {
|
2 |
-
text-align: center;
|
3 |
-
/* Horizontally center the content */
|
4 |
-
}
|
5 |
-
|
6 |
-
#duplicate-button {
|
7 |
-
margin-left: auto;
|
8 |
-
color: #fff;
|
9 |
-
background: #1565c0;
|
10 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-d80d0bbf.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import{S as N,e as O,s as P,N as G,O as H,k as Q,K as r,p as j,o as R,Q as K,z as D,v as I,A as q,x as T,a1 as J,B as V,a9 as L,ab as M,ac as Y,ad as Z,h as x,a4 as p,at as $,au as ee,P as le,R as ie,a7 as ne,F as te}from"./index-1d65707a.js";import{a as ae}from"./Button-f155035a.js";import{b as se}from"./ModifyUpload.svelte_svelte_type_style_lang-d2acacf0.js";import{X as fe}from"./Blocks-c9e1499d.js";function ue(l){let e;const i=l[17].default,n=L(i,l,l[19],null);return{c(){n&&n.c()},m(s,u){n&&n.m(s,u),e=!0},p(s,u){n&&n.p&&(!e||u&524288)&&M(n,i,s,s[19],e?Z(i,s[19],u,null):Y(s[19]),null)},i(s){e||(D(n,s),e=!0)},o(s){I(n,s),e=!1},d(s){n&&n.d(s)}}}function _e(l){let e,i,n,s,u,h,c,m,d,g;return c=new ae({props:{size:l[4],variant:l[8],elem_id:l[0],elem_classes:l[1],visible:l[2],scale:l[5],min_width:l[6],disabled:l[7]==="static",$$slots:{default:[ue]},$$scope:{ctx:l}}}),c.$on("click",l[12]),{c(){e=G("input"),h=H(),Q(c.$$.fragment),r(e,"class","hide svelte-ydeks8"),r(e,"accept",l[11]),r(e,"type","file"),e.multiple=i=l[3]==="multiple"||void 0,r(e,"webkitdirectory",n=l[3]==="directory"||void 0),r(e,"mozdirectory",s=l[3]==="directory"||void 0),r(e,"data-testid",u=l[9]+"-upload-button")},m(f,_){j(f,e,_),l[18](e),j(f,h,_),R(c,f,_),m=!0,d||(g=[K(e,"change",l[13]),K(e,"click",l[14])],d=!0)},p(f,[_]){(!m||_&2048)&&r(e,"accept",f[11]),(!m||_&8&&i!==(i=f[3]==="multiple"||void 0))&&(e.multiple=i),(!m||_&8&&n!==(n=f[3]==="directory"||void 0))&&r(e,"webkitdirectory",n),(!m||_&8&&s!==(s=f[3]==="directory"||void 0))&&r(e,"mozdirectory",s),(!m||_&512&&u!==(u=f[9]+"-upload-button"))&&r(e,"data-testid",u);const o={};_&16&&(o.size=f[4]),_&256&&(o.variant=f[8]),_&1&&(o.elem_id=f[0]),_&2&&(o.elem_classes=f[1]),_&4&&(o.visible=f[2]),_&32&&(o.scale=f[5]),_&64&&(o.min_width=f[6]),_&128&&(o.disabled=f[7]==="static"),_&524288&&(o.$$scope={dirty:_,ctx:f}),c.$set(o)},i(f){m||(D(c.$$.fragment,f),m=!0)},o(f){I(c.$$.fragment,f),m=!1},d(f){f&&(q(e),q(h)),l[18](null),T(c,f),d=!1,J(g)}}}function me(l,e,i){let{$$slots:n={},$$scope:s}=e,{elem_id:u=""}=e,{elem_classes:h=[]}=e,{visible:c=!0}=e,{file_count:m}=e,{file_types:d=[]}=e,{include_file_metadata:g=!0}=e,{size:f="lg"}=e,{scale:_=null}=e,{min_width:o=void 0}=e,{mode:k="dynamic"}=e,{variant:A="secondary"}=e,{label:B}=e,y;const E=V();let v;d==null?v=null:(d=d.map(t=>t.startsWith(".")?t:t+"/*"),v=d.join(", "));const C=()=>{y.click()},a=t=>{let w=Array.from(t);if(t.length){m==="single"&&(w=[t[0]]);var U=[];w.forEach((F,W)=>{U[W]=g?{name:F.name,size:F.size,data:"",blob:F}:F,U.filter(X=>X!==void 0).length===t.length&&E("load",m=="single"?U[0]:U)})}},S=t=>{const w=t.target;w.files&&a(w.files)},z=t=>{const w=t.target;w.value&&(w.value="")};function b(t){x[t?"unshift":"push"](()=>{y=t,i(10,y)})}return l.$$set=t=>{"elem_id"in t&&i(0,u=t.elem_id),"elem_classes"in t&&i(1,h=t.elem_classes),"visible"in t&&i(2,c=t.visible),"file_count"in t&&i(3,m=t.file_count),"file_types"in t&&i(15,d=t.file_types),"include_file_metadata"in t&&i(16,g=t.include_file_metadata),"size"in t&&i(4,f=t.size),"scale"in t&&i(5,_=t.scale),"min_width"in t&&i(6,o=t.min_width),"mode"in t&&i(7,k=t.mode),"variant"in t&&i(8,A=t.variant),"label"in t&&i(9,B=t.label),"$$scope"in t&&i(19,s=t.$$scope)},[u,h,c,m,f,_,o,k,A,B,y,v,C,S,z,d,g,n,b,s]}class oe extends N{constructor(e){super(),O(this,e,me,_e,P,{elem_id:0,elem_classes:1,visible:2,file_count:3,file_types:15,include_file_metadata:16,size:4,scale:5,min_width:6,mode:7,variant:8,label:9})}}function ce(l){let e=l[11](l[3])+"",i;return{c(){i=le(e)},m(n,s){j(n,i,s)},p(n,s){s&2056&&e!==(e=n[11](n[3])+"")&&ie(i,e)},d(n){n&&q(i)}}}function de(l){let e,i;return e=new oe({props:{elem_id:l[0],elem_classes:l[1],visible:l[2],file_count:l[4],file_types:l[5],size:l[6],scale:l[7],min_width:l[8],mode:l[9],variant:l[10],label:l[3],$$slots:{default:[ce]},$$scope:{ctx:l}}}),e.$on("click",l[15]),e.$on("load",l[12]),{c(){Q(e.$$.fragment)},m(n,s){R(e,n,s),i=!0},p(n,[s]){const u={};s&1&&(u.elem_id=n[0]),s&2&&(u.elem_classes=n[1]),s&4&&(u.visible=n[2]),s&16&&(u.file_count=n[4]),s&32&&(u.file_types=n[5]),s&64&&(u.size=n[6]),s&128&&(u.scale=n[7]),s&256&&(u.min_width=n[8]),s&512&&(u.mode=n[9]),s&1024&&(u.variant=n[10]),s&8&&(u.label=n[3]),s&264200&&(u.$$scope={dirty:s,ctx:n}),e.$set(u)},i(n){i||(D(e.$$.fragment,n),i=!0)},o(n){I(e.$$.fragment,n),i=!1},d(n){T(e,n)}}}function be(l,e,i){let n;p(l,fe,a=>i(11,n=a));let{elem_id:s=""}=e,{elem_classes:u=[]}=e,{visible:h=!0}=e,{label:c}=e,{value:m}=e,{file_count:d}=e,{file_types:g=[]}=e,{root:f}=e,{size:_="lg"}=e,{scale:o=null}=e,{min_width:k=void 0}=e,{mode:A="dynamic"}=e,{variant:B="secondary"}=e;const y=$("upload_files")??ee;async function E({detail:a}){i(13,m=a),await ne();let S=(Array.isArray(a)?a:[a]).map(z=>z.blob);y(f,S).then(async z=>{z.error?(Array.isArray(a)?a:[a]).forEach(async(b,t)=>{b.data=await se(b.blob),b.blob=void 0}):(Array.isArray(a)?a:[a]).forEach((b,t)=>{z.files&&(b.orig_name=b.name,b.name=z.files[t],b.is_file=!0,b.blob=void 0)}),v("change",m),v("upload",a)})}const v=V();function C(a){te.call(this,l,a)}return l.$$set=a=>{"elem_id"in a&&i(0,s=a.elem_id),"elem_classes"in a&&i(1,u=a.elem_classes),"visible"in a&&i(2,h=a.visible),"label"in a&&i(3,c=a.label),"value"in a&&i(13,m=a.value),"file_count"in a&&i(4,d=a.file_count),"file_types"in a&&i(5,g=a.file_types),"root"in a&&i(14,f=a.root),"size"in a&&i(6,_=a.size),"scale"in a&&i(7,o=a.scale),"min_width"in a&&i(8,k=a.min_width),"mode"in a&&i(9,A=a.mode),"variant"in a&&i(10,B=a.variant)},[s,u,h,c,d,g,_,o,k,A,B,n,E,m,f,C]}class re extends N{constructor(e){super(),O(this,e,be,de,P,{elem_id:0,elem_classes:1,visible:2,label:3,value:13,file_count:4,file_types:5,root:14,size:6,scale:7,min_width:8,mode:9,variant:10})}}const ye=re,ve=["static","dynamic"];export{ye as Component,ve as modes};
|
2 |
-
//# sourceMappingURL=index-d80d0bbf.js.map
|
|
|
|
|
|
spaces/Datasculptor/MusicGen/audiocraft/utils/export.py
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
"""
|
8 |
-
Utility to export a training checkpoint to a lightweight release checkpoint.
|
9 |
-
"""
|
10 |
-
|
11 |
-
from pathlib import Path
|
12 |
-
import typing as tp
|
13 |
-
|
14 |
-
from omegaconf import OmegaConf, DictConfig
|
15 |
-
import torch
|
16 |
-
|
17 |
-
|
18 |
-
def _clean_lm_cfg(cfg: DictConfig):
|
19 |
-
OmegaConf.set_struct(cfg, False)
|
20 |
-
# This used to be set automatically in the LM solver, need a more robust solution
|
21 |
-
# for the future.
|
22 |
-
cfg['transformer_lm']['card'] = 2048
|
23 |
-
cfg['transformer_lm']['n_q'] = 4
|
24 |
-
# Experimental params no longer supported.
|
25 |
-
bad_params = ['spectral_norm_attn_iters', 'spectral_norm_ff_iters',
|
26 |
-
'residual_balancer_attn', 'residual_balancer_ff', 'layer_drop']
|
27 |
-
for name in bad_params:
|
28 |
-
del cfg['transformer_lm'][name]
|
29 |
-
OmegaConf.set_struct(cfg, True)
|
30 |
-
return cfg
|
31 |
-
|
32 |
-
|
33 |
-
def export_encodec(checkpoint_path: tp.Union[Path, str], out_folder: tp.Union[Path, str]):
|
34 |
-
sig = Path(checkpoint_path).parent.name
|
35 |
-
assert len(sig) == 8, "Not a valid Dora signature"
|
36 |
-
pkg = torch.load(checkpoint_path, 'cpu')
|
37 |
-
new_pkg = {
|
38 |
-
'best_state': pkg['ema']['state']['model'],
|
39 |
-
'xp.cfg': OmegaConf.to_yaml(pkg['xp.cfg']),
|
40 |
-
}
|
41 |
-
out_file = Path(out_folder) / f'{sig}.th'
|
42 |
-
torch.save(new_pkg, out_file)
|
43 |
-
return out_file
|
44 |
-
|
45 |
-
|
46 |
-
def export_lm(checkpoint_path: tp.Union[Path, str], out_folder: tp.Union[Path, str]):
|
47 |
-
sig = Path(checkpoint_path).parent.name
|
48 |
-
assert len(sig) == 8, "Not a valid Dora signature"
|
49 |
-
pkg = torch.load(checkpoint_path, 'cpu')
|
50 |
-
new_pkg = {
|
51 |
-
'best_state': pkg['fsdp_best_state']['model'],
|
52 |
-
'xp.cfg': OmegaConf.to_yaml(_clean_lm_cfg(pkg['xp.cfg']))
|
53 |
-
}
|
54 |
-
out_file = Path(out_folder) / f'{sig}.th'
|
55 |
-
torch.save(new_pkg, out_file)
|
56 |
-
return out_file
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Deci/DeciLM-6b-instruct/app.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import gradio as gr
|
3 |
-
import torch
|
4 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
5 |
-
|
6 |
-
token = os.environ["HUGGINGFACEHUB_API_TOKEN"]
|
7 |
-
|
8 |
-
model_id = 'Deci/DeciLM-6b-instruct'
|
9 |
-
|
10 |
-
SYSTEM_PROMPT_TEMPLATE = """Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
11 |
-
|
12 |
-
### Instruction:
|
13 |
-
|
14 |
-
{instruction}
|
15 |
-
|
16 |
-
### Response:
|
17 |
-
"""
|
18 |
-
|
19 |
-
DESCRIPTION = """
|
20 |
-
# <p style="text-align: center; color: #292b47;"> 🤖 <span style='color: #3264ff;'>DeciLM-6B-Instruct:</span> A Fast Instruction-Tuned Model💨 </p>
|
21 |
-
<span style='color: #292b47;'>Welcome to <a href="https://huggingface.co/Deci/DeciLM-6b-instruct" style="color: #3264ff;">DeciLM-6B-Instruct</a>! DeciLM-6B-Instruct is a 6B parameter instruction-tuned language model and released under the Llama license. It's an instruction-tuned model, not a chat-tuned model; you should prompt the model with an instruction that describes a task, and the model will respond appropriately to complete the task.</span>
|
22 |
-
<p><span style='color: #292b47;'>Learn more about the base model <a href="https://deci.ai/blog/decilm-15-times-faster-than-llama2-nas-generated-llm-with-variable-gqa/" style="color: #3264ff;">DeciLM-6B.</a></span></p>
|
23 |
-
"""
|
24 |
-
|
25 |
-
if not torch.cuda.is_available():
|
26 |
-
DESCRIPTION += 'You need a GPU for this example. Try using colab: https://bit.ly/decilm-instruct-nb'
|
27 |
-
|
28 |
-
if torch.cuda.is_available():
|
29 |
-
model = AutoModelForCausalLM.from_pretrained(
|
30 |
-
model_id,
|
31 |
-
torch_dtype=torch.float16,
|
32 |
-
device_map='auto',
|
33 |
-
trust_remote_code=True,
|
34 |
-
use_auth_token=token
|
35 |
-
)
|
36 |
-
else:
|
37 |
-
model = None
|
38 |
-
|
39 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=token)
|
40 |
-
tokenizer.pad_token = tokenizer.eos_token
|
41 |
-
|
42 |
-
# Function to construct the prompt using the new system prompt template
|
43 |
-
def get_prompt_with_template(message: str) -> str:
|
44 |
-
return SYSTEM_PROMPT_TEMPLATE.format(instruction=message)
|
45 |
-
|
46 |
-
# Function to generate the model's response
|
47 |
-
def generate_model_response(message: str) -> str:
|
48 |
-
prompt = get_prompt_with_template(message)
|
49 |
-
inputs = tokenizer(prompt, return_tensors='pt')
|
50 |
-
if torch.cuda.is_available():
|
51 |
-
inputs = inputs.to('cuda')
|
52 |
-
# Include **generate_kwargs to include the user-defined options
|
53 |
-
output = model.generate(**inputs,
|
54 |
-
max_new_tokens=3000,
|
55 |
-
num_beams=2,
|
56 |
-
no_repeat_ngram_size=4,
|
57 |
-
early_stopping=True,
|
58 |
-
do_sample=True
|
59 |
-
)
|
60 |
-
return tokenizer.decode(output[0], skip_special_tokens=True)
|
61 |
-
|
62 |
-
# Function to extract the content after "### Response:"
|
63 |
-
def extract_response_content(full_response: str, ) -> str:
|
64 |
-
response_start_index = full_response.find("### Response:")
|
65 |
-
if response_start_index != -1:
|
66 |
-
return full_response[response_start_index + len("### Response:"):].strip()
|
67 |
-
else:
|
68 |
-
return full_response
|
69 |
-
|
70 |
-
# The main function that uses the dynamic generate_kwargs
|
71 |
-
def get_response_with_template(message: str) -> str:
|
72 |
-
full_response = generate_model_response(message)
|
73 |
-
return extract_response_content(full_response)
|
74 |
-
|
75 |
-
with gr.Blocks(css="style.css") as demo:
|
76 |
-
gr.Markdown(DESCRIPTION)
|
77 |
-
gr.DuplicateButton(value='Duplicate Space for private use',
|
78 |
-
elem_id='duplicate-button')
|
79 |
-
with gr.Group():
|
80 |
-
chatbot = gr.Textbox(label='DeciLM-6B-Instruct Output:')
|
81 |
-
with gr.Row():
|
82 |
-
textbox = gr.Textbox(
|
83 |
-
container=False,
|
84 |
-
show_label=False,
|
85 |
-
placeholder='Type an instruction...',
|
86 |
-
scale=10,
|
87 |
-
elem_id="textbox"
|
88 |
-
)
|
89 |
-
submit_button = gr.Button(
|
90 |
-
'💬 Submit',
|
91 |
-
variant='primary',
|
92 |
-
scale=1,
|
93 |
-
min_width=0,
|
94 |
-
elem_id="submit_button"
|
95 |
-
)
|
96 |
-
|
97 |
-
# Clear button to clear the chat history
|
98 |
-
clear_button = gr.Button(
|
99 |
-
'🗑️ Clear',
|
100 |
-
variant='secondary',
|
101 |
-
)
|
102 |
-
|
103 |
-
clear_button.click(
|
104 |
-
fn=lambda: ('',''),
|
105 |
-
outputs=[textbox, chatbot],
|
106 |
-
queue=False,
|
107 |
-
api_name=False,
|
108 |
-
)
|
109 |
-
|
110 |
-
submit_button.click(
|
111 |
-
fn=get_response_with_template,
|
112 |
-
inputs=textbox,
|
113 |
-
outputs= chatbot,
|
114 |
-
queue=False,
|
115 |
-
api_name=False,
|
116 |
-
)
|
117 |
-
|
118 |
-
gr.Examples(
|
119 |
-
examples=[
|
120 |
-
'Write detailed instructions for making chocolate chip pancakes.',
|
121 |
-
'Write a 250-word article about your love of pancakes.',
|
122 |
-
'Explain the plot of Back to the Future in three sentences.',
|
123 |
-
'How do I make a trap beat?',
|
124 |
-
'A step-by-step guide to learning Python in one month.',
|
125 |
-
],
|
126 |
-
inputs=textbox,
|
127 |
-
outputs=chatbot,
|
128 |
-
fn=get_response_with_template,
|
129 |
-
cache_examples=True,
|
130 |
-
elem_id="examples"
|
131 |
-
)
|
132 |
-
|
133 |
-
|
134 |
-
gr.HTML(label="Keep in touch", value="<img src='https://huggingface.co/spaces/Deci/DeciLM-6b-instruct/resolve/main/deci-coder-banner.png' alt='Keep in touch' style='display: block; color: #292b47; margin: auto; max-width: 800px;'>")
|
135 |
-
|
136 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan-Inversion/PTI/training/projectors/w_projector.py
DELETED
@@ -1,142 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Project given image to the latent space of pretrained network pickle."""
|
10 |
-
|
11 |
-
import copy
|
12 |
-
import wandb
|
13 |
-
import numpy as np
|
14 |
-
import torch
|
15 |
-
import torch.nn.functional as F
|
16 |
-
from tqdm import tqdm
|
17 |
-
from PTI.configs import global_config, hyperparameters
|
18 |
-
from PTI.utils import log_utils
|
19 |
-
import dnnlib
|
20 |
-
|
21 |
-
|
22 |
-
def project(
|
23 |
-
G,
|
24 |
-
target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution
|
25 |
-
*,
|
26 |
-
num_steps=1000,
|
27 |
-
w_avg_samples=10000,
|
28 |
-
initial_learning_rate=0.01,
|
29 |
-
initial_noise_factor=0.05,
|
30 |
-
lr_rampdown_length=0.25,
|
31 |
-
lr_rampup_length=0.05,
|
32 |
-
noise_ramp_length=0.75,
|
33 |
-
regularize_noise_weight=1e5,
|
34 |
-
verbose=False,
|
35 |
-
device: torch.device,
|
36 |
-
use_wandb=False,
|
37 |
-
initial_w=None,
|
38 |
-
image_log_step=global_config.image_rec_result_log_snapshot,
|
39 |
-
w_name: str
|
40 |
-
):
|
41 |
-
assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution),print(target.shape,G.img_resolution)
|
42 |
-
|
43 |
-
def logprint(*args):
|
44 |
-
if verbose:
|
45 |
-
print(*args)
|
46 |
-
|
47 |
-
G = copy.deepcopy(G).eval().requires_grad_(False).to(device).float() # type: ignore
|
48 |
-
|
49 |
-
# Compute w stats.
|
50 |
-
logprint(f'Computing W midpoint and stddev using {w_avg_samples} samples...')
|
51 |
-
z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim)
|
52 |
-
w_samples = G.mapping(torch.from_numpy(z_samples).to(device), None) # [N, L, C]
|
53 |
-
w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C]
|
54 |
-
w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C]
|
55 |
-
w_avg_tensor = torch.from_numpy(w_avg).to(global_config.device)
|
56 |
-
w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5
|
57 |
-
|
58 |
-
start_w = initial_w if initial_w is not None else w_avg
|
59 |
-
|
60 |
-
# Setup noise inputs.
|
61 |
-
noise_bufs = {name: buf for (name, buf) in G.synthesis.named_buffers() if 'noise_const' in name}
|
62 |
-
|
63 |
-
# Load VGG16 feature detector.
|
64 |
-
url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
|
65 |
-
with dnnlib.util.open_url(url) as f:
|
66 |
-
vgg16 = torch.jit.load(f).eval().to(device)
|
67 |
-
|
68 |
-
# Features for target image.
|
69 |
-
target_images = target.unsqueeze(0).to(device).to(torch.float32)
|
70 |
-
if target_images.shape[2] > 256:
|
71 |
-
target_images = F.interpolate(target_images, size=(256, 256), mode='area')
|
72 |
-
target_features = vgg16(target_images, resize_images=False, return_lpips=True)
|
73 |
-
|
74 |
-
w_opt = torch.tensor(start_w, dtype=torch.float32, device=device,
|
75 |
-
requires_grad=True) # pylint: disable=not-callable
|
76 |
-
optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999),
|
77 |
-
lr=hyperparameters.first_inv_lr)
|
78 |
-
|
79 |
-
# Init noise.
|
80 |
-
for buf in noise_bufs.values():
|
81 |
-
buf[:] = torch.randn_like(buf)
|
82 |
-
buf.requires_grad = True
|
83 |
-
|
84 |
-
for step in tqdm(range(num_steps)):
|
85 |
-
|
86 |
-
# Learning rate schedule.
|
87 |
-
t = step / num_steps
|
88 |
-
w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2
|
89 |
-
lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)
|
90 |
-
lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
|
91 |
-
lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)
|
92 |
-
lr = initial_learning_rate * lr_ramp
|
93 |
-
for param_group in optimizer.param_groups:
|
94 |
-
param_group['lr'] = lr
|
95 |
-
|
96 |
-
# Synth images from opt_w.
|
97 |
-
w_noise = torch.randn_like(w_opt) * w_noise_scale
|
98 |
-
ws = (w_opt + w_noise).repeat([1, G.mapping.num_ws, 1])
|
99 |
-
synth_images = G.synthesis(ws, noise_mode='const', force_fp32=True)
|
100 |
-
|
101 |
-
# Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.
|
102 |
-
synth_images = (synth_images + 1) * (255 / 2)
|
103 |
-
if synth_images.shape[2] > 256:
|
104 |
-
synth_images = F.interpolate(synth_images, size=(256, 256), mode='area')
|
105 |
-
|
106 |
-
# Features for synth images.
|
107 |
-
synth_features = vgg16(synth_images, resize_images=False, return_lpips=True)
|
108 |
-
dist = (target_features - synth_features).square().sum()
|
109 |
-
|
110 |
-
# Noise regularization.
|
111 |
-
reg_loss = 0.0
|
112 |
-
for v in noise_bufs.values():
|
113 |
-
noise = v[None, None, :, :] # must be [1,1,H,W] for F.avg_pool2d()
|
114 |
-
while True:
|
115 |
-
reg_loss += (noise * torch.roll(noise, shifts=1, dims=3)).mean() ** 2
|
116 |
-
reg_loss += (noise * torch.roll(noise, shifts=1, dims=2)).mean() ** 2
|
117 |
-
if noise.shape[2] <= 8:
|
118 |
-
break
|
119 |
-
noise = F.avg_pool2d(noise, kernel_size=2)
|
120 |
-
loss = dist + reg_loss * regularize_noise_weight
|
121 |
-
|
122 |
-
if step % image_log_step == 0:
|
123 |
-
with torch.no_grad():
|
124 |
-
if use_wandb:
|
125 |
-
global_config.training_step += 1
|
126 |
-
wandb.log({f'first projection _{w_name}': loss.detach().cpu()}, step=global_config.training_step)
|
127 |
-
log_utils.log_image_from_w(w_opt.repeat([1, G.mapping.num_ws, 1]), G, w_name)
|
128 |
-
|
129 |
-
# Step
|
130 |
-
optimizer.zero_grad(set_to_none=True)
|
131 |
-
loss.backward()
|
132 |
-
optimizer.step()
|
133 |
-
logprint(f'step {step + 1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}')
|
134 |
-
|
135 |
-
# Normalize noise.
|
136 |
-
with torch.no_grad():
|
137 |
-
for buf in noise_bufs.values():
|
138 |
-
buf -= buf.mean()
|
139 |
-
buf *= buf.square().mean().rsqrt()
|
140 |
-
|
141 |
-
del G
|
142 |
-
return w_opt.repeat([1, 18, 1])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan/stylegan_human/torch_utils/ops/conv2d_gradfix.py
DELETED
@@ -1,172 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
4 |
-
#
|
5 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
6 |
-
# and proprietary rights in and to this software, related documentation
|
7 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
8 |
-
# distribution of this software and related documentation without an express
|
9 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
10 |
-
|
11 |
-
"""Custom replacement for `torch.nn.functional.conv2d` that supports
|
12 |
-
arbitrarily high order gradients with zero performance penalty."""
|
13 |
-
|
14 |
-
import warnings
|
15 |
-
import contextlib
|
16 |
-
import torch
|
17 |
-
|
18 |
-
# pylint: disable=redefined-builtin
|
19 |
-
# pylint: disable=arguments-differ
|
20 |
-
# pylint: disable=protected-access
|
21 |
-
|
22 |
-
#----------------------------------------------------------------------------
|
23 |
-
|
24 |
-
enabled = False # Enable the custom op by setting this to true.
|
25 |
-
weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights.
|
26 |
-
|
27 |
-
@contextlib.contextmanager
|
28 |
-
def no_weight_gradients():
|
29 |
-
global weight_gradients_disabled
|
30 |
-
old = weight_gradients_disabled
|
31 |
-
weight_gradients_disabled = True
|
32 |
-
yield
|
33 |
-
weight_gradients_disabled = old
|
34 |
-
|
35 |
-
#----------------------------------------------------------------------------
|
36 |
-
|
37 |
-
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
|
38 |
-
if _should_use_custom_op(input):
|
39 |
-
return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias)
|
40 |
-
return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
|
41 |
-
|
42 |
-
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
|
43 |
-
if _should_use_custom_op(input):
|
44 |
-
return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias)
|
45 |
-
return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation)
|
46 |
-
|
47 |
-
#----------------------------------------------------------------------------
|
48 |
-
|
49 |
-
def _should_use_custom_op(input):
|
50 |
-
assert isinstance(input, torch.Tensor)
|
51 |
-
if (not enabled) or (not torch.backends.cudnn.enabled):
|
52 |
-
return False
|
53 |
-
if input.device.type != 'cuda':
|
54 |
-
return False
|
55 |
-
if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']):
|
56 |
-
return True
|
57 |
-
warnings.warn(f'conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d().')
|
58 |
-
return False
|
59 |
-
|
60 |
-
def _tuple_of_ints(xs, ndim):
|
61 |
-
xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim
|
62 |
-
assert len(xs) == ndim
|
63 |
-
assert all(isinstance(x, int) for x in xs)
|
64 |
-
return xs
|
65 |
-
|
66 |
-
#----------------------------------------------------------------------------
|
67 |
-
|
68 |
-
_conv2d_gradfix_cache = dict()
|
69 |
-
|
70 |
-
def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups):
|
71 |
-
# Parse arguments.
|
72 |
-
ndim = 2
|
73 |
-
weight_shape = tuple(weight_shape)
|
74 |
-
stride = _tuple_of_ints(stride, ndim)
|
75 |
-
padding = _tuple_of_ints(padding, ndim)
|
76 |
-
output_padding = _tuple_of_ints(output_padding, ndim)
|
77 |
-
dilation = _tuple_of_ints(dilation, ndim)
|
78 |
-
|
79 |
-
# Lookup from cache.
|
80 |
-
key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups)
|
81 |
-
if key in _conv2d_gradfix_cache:
|
82 |
-
return _conv2d_gradfix_cache[key]
|
83 |
-
|
84 |
-
# Validate arguments.
|
85 |
-
assert groups >= 1
|
86 |
-
assert len(weight_shape) == ndim + 2
|
87 |
-
assert all(stride[i] >= 1 for i in range(ndim))
|
88 |
-
assert all(padding[i] >= 0 for i in range(ndim))
|
89 |
-
assert all(dilation[i] >= 0 for i in range(ndim))
|
90 |
-
if not transpose:
|
91 |
-
assert all(output_padding[i] == 0 for i in range(ndim))
|
92 |
-
else: # transpose
|
93 |
-
assert all(0 <= output_padding[i] < max(stride[i], dilation[i]) for i in range(ndim))
|
94 |
-
|
95 |
-
# Helpers.
|
96 |
-
common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups)
|
97 |
-
def calc_output_padding(input_shape, output_shape):
|
98 |
-
if transpose:
|
99 |
-
return [0, 0]
|
100 |
-
return [
|
101 |
-
input_shape[i + 2]
|
102 |
-
- (output_shape[i + 2] - 1) * stride[i]
|
103 |
-
- (1 - 2 * padding[i])
|
104 |
-
- dilation[i] * (weight_shape[i + 2] - 1)
|
105 |
-
for i in range(ndim)
|
106 |
-
]
|
107 |
-
|
108 |
-
# Forward & backward.
|
109 |
-
class Conv2d(torch.autograd.Function):
|
110 |
-
@staticmethod
|
111 |
-
def forward(ctx, input, weight, bias):
|
112 |
-
assert weight.shape == weight_shape
|
113 |
-
if not transpose:
|
114 |
-
output = torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs)
|
115 |
-
else: # transpose
|
116 |
-
output = torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs)
|
117 |
-
ctx.save_for_backward(input, weight)
|
118 |
-
return output
|
119 |
-
|
120 |
-
@staticmethod
|
121 |
-
def backward(ctx, grad_output):
|
122 |
-
input, weight = ctx.saved_tensors
|
123 |
-
grad_input = None
|
124 |
-
grad_weight = None
|
125 |
-
grad_bias = None
|
126 |
-
|
127 |
-
if ctx.needs_input_grad[0]:
|
128 |
-
p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
|
129 |
-
grad_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, weight, None)
|
130 |
-
assert grad_input.shape == input.shape
|
131 |
-
|
132 |
-
if ctx.needs_input_grad[1] and not weight_gradients_disabled:
|
133 |
-
grad_weight = Conv2dGradWeight.apply(grad_output, input)
|
134 |
-
assert grad_weight.shape == weight_shape
|
135 |
-
|
136 |
-
if ctx.needs_input_grad[2]:
|
137 |
-
grad_bias = grad_output.sum([0, 2, 3])
|
138 |
-
|
139 |
-
return grad_input, grad_weight, grad_bias
|
140 |
-
|
141 |
-
# Gradient with respect to the weights.
|
142 |
-
class Conv2dGradWeight(torch.autograd.Function):
|
143 |
-
@staticmethod
|
144 |
-
def forward(ctx, grad_output, input):
|
145 |
-
op = torch._C._jit_get_operation('aten::cudnn_convolution_backward_weight' if not transpose else 'aten::cudnn_convolution_transpose_backward_weight')
|
146 |
-
flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32]
|
147 |
-
grad_weight = op(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags)
|
148 |
-
assert grad_weight.shape == weight_shape
|
149 |
-
ctx.save_for_backward(grad_output, input)
|
150 |
-
return grad_weight
|
151 |
-
|
152 |
-
@staticmethod
|
153 |
-
def backward(ctx, grad2_grad_weight):
|
154 |
-
grad_output, input = ctx.saved_tensors
|
155 |
-
grad2_grad_output = None
|
156 |
-
grad2_input = None
|
157 |
-
|
158 |
-
if ctx.needs_input_grad[0]:
|
159 |
-
grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None)
|
160 |
-
assert grad2_grad_output.shape == grad_output.shape
|
161 |
-
|
162 |
-
if ctx.needs_input_grad[1]:
|
163 |
-
p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
|
164 |
-
grad2_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, grad2_grad_weight, None)
|
165 |
-
assert grad2_input.shape == input.shape
|
166 |
-
|
167 |
-
return grad2_grad_output, grad2_input
|
168 |
-
|
169 |
-
_conv2d_gradfix_cache[key] = Conv2d
|
170 |
-
return Conv2d
|
171 |
-
|
172 |
-
#----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EPFL-VILAB/MultiMAE/mask2former/modeling/pixel_decoder/ops/src/cuda/ms_deform_attn_cuda.h
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
/*!
|
2 |
-
**************************************************************************************************
|
3 |
-
* Deformable DETR
|
4 |
-
* Copyright (c) 2020 SenseTime. All Rights Reserved.
|
5 |
-
* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
6 |
-
**************************************************************************************************
|
7 |
-
* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
|
8 |
-
**************************************************************************************************
|
9 |
-
*/
|
10 |
-
|
11 |
-
/*!
|
12 |
-
* Copyright (c) Facebook, Inc. and its affiliates.
|
13 |
-
* Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
|
14 |
-
*/
|
15 |
-
|
16 |
-
#pragma once
|
17 |
-
#include <torch/extension.h>
|
18 |
-
|
19 |
-
at::Tensor ms_deform_attn_cuda_forward(
|
20 |
-
const at::Tensor &value,
|
21 |
-
const at::Tensor &spatial_shapes,
|
22 |
-
const at::Tensor &level_start_index,
|
23 |
-
const at::Tensor &sampling_loc,
|
24 |
-
const at::Tensor &attn_weight,
|
25 |
-
const int im2col_step);
|
26 |
-
|
27 |
-
std::vector<at::Tensor> ms_deform_attn_cuda_backward(
|
28 |
-
const at::Tensor &value,
|
29 |
-
const at::Tensor &spatial_shapes,
|
30 |
-
const at::Tensor &level_start_index,
|
31 |
-
const at::Tensor &sampling_loc,
|
32 |
-
const at::Tensor &attn_weight,
|
33 |
-
const at::Tensor &grad_output,
|
34 |
-
const int im2col_step);
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Eddycrack864/Applio-Inference/julius/utils.py
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details.
|
2 |
-
# Author: adefossez, 2020
|
3 |
-
"""
|
4 |
-
Non signal processing related utilities.
|
5 |
-
"""
|
6 |
-
|
7 |
-
import inspect
|
8 |
-
import typing as tp
|
9 |
-
import sys
|
10 |
-
import time
|
11 |
-
|
12 |
-
|
13 |
-
def simple_repr(obj, attrs: tp.Optional[tp.Sequence[str]] = None,
|
14 |
-
overrides: dict = {}):
|
15 |
-
"""
|
16 |
-
Return a simple representation string for `obj`.
|
17 |
-
If `attrs` is not None, it should be a list of attributes to include.
|
18 |
-
"""
|
19 |
-
params = inspect.signature(obj.__class__).parameters
|
20 |
-
attrs_repr = []
|
21 |
-
if attrs is None:
|
22 |
-
attrs = list(params.keys())
|
23 |
-
for attr in attrs:
|
24 |
-
display = False
|
25 |
-
if attr in overrides:
|
26 |
-
value = overrides[attr]
|
27 |
-
elif hasattr(obj, attr):
|
28 |
-
value = getattr(obj, attr)
|
29 |
-
else:
|
30 |
-
continue
|
31 |
-
if attr in params:
|
32 |
-
param = params[attr]
|
33 |
-
if param.default is inspect._empty or value != param.default: # type: ignore
|
34 |
-
display = True
|
35 |
-
else:
|
36 |
-
display = True
|
37 |
-
|
38 |
-
if display:
|
39 |
-
attrs_repr.append(f"{attr}={value}")
|
40 |
-
return f"{obj.__class__.__name__}({','.join(attrs_repr)})"
|
41 |
-
|
42 |
-
|
43 |
-
class MarkdownTable:
|
44 |
-
"""
|
45 |
-
Simple MarkdownTable generator. The column titles should be large enough
|
46 |
-
for the lines content. This will right align everything.
|
47 |
-
|
48 |
-
>>> import io # we use io purely for test purposes, default is sys.stdout.
|
49 |
-
>>> file = io.StringIO()
|
50 |
-
>>> table = MarkdownTable(["Item Name", "Price"], file=file)
|
51 |
-
>>> table.header(); table.line(["Honey", "5"]); table.line(["Car", "5,000"])
|
52 |
-
>>> print(file.getvalue().strip()) # Strip for test purposes
|
53 |
-
| Item Name | Price |
|
54 |
-
|-----------|-------|
|
55 |
-
| Honey | 5 |
|
56 |
-
| Car | 5,000 |
|
57 |
-
"""
|
58 |
-
def __init__(self, columns, file=sys.stdout):
|
59 |
-
self.columns = columns
|
60 |
-
self.file = file
|
61 |
-
|
62 |
-
def _writeln(self, line):
|
63 |
-
self.file.write("|" + "|".join(line) + "|\n")
|
64 |
-
|
65 |
-
def header(self):
|
66 |
-
self._writeln(f" {col} " for col in self.columns)
|
67 |
-
self._writeln("-" * (len(col) + 2) for col in self.columns)
|
68 |
-
|
69 |
-
def line(self, line):
|
70 |
-
out = []
|
71 |
-
for val, col in zip(line, self.columns):
|
72 |
-
val = format(val, '>' + str(len(col)))
|
73 |
-
out.append(" " + val + " ")
|
74 |
-
self._writeln(out)
|
75 |
-
|
76 |
-
|
77 |
-
class Chrono:
|
78 |
-
"""
|
79 |
-
Measures ellapsed time, calling `torch.cuda.synchronize` if necessary.
|
80 |
-
`Chrono` instances can be used as context managers (e.g. with `with`).
|
81 |
-
Upon exit of the block, you can access the duration of the block in seconds
|
82 |
-
with the `duration` attribute.
|
83 |
-
|
84 |
-
>>> with Chrono() as chrono:
|
85 |
-
... _ = sum(range(10_000))
|
86 |
-
...
|
87 |
-
>>> print(chrono.duration < 10) # Should be true unless on a really slow computer.
|
88 |
-
True
|
89 |
-
"""
|
90 |
-
def __init__(self):
|
91 |
-
self.duration = None
|
92 |
-
|
93 |
-
def __enter__(self):
|
94 |
-
self._begin = time.time()
|
95 |
-
return self
|
96 |
-
|
97 |
-
def __exit__(self, exc_type, exc_value, exc_tracebck):
|
98 |
-
import torch
|
99 |
-
if torch.cuda.is_available():
|
100 |
-
torch.cuda.synchronize()
|
101 |
-
self.duration = time.time() - self._begin
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Edisonymy/buy-or-rent/src/mainbody.py
DELETED
@@ -1,237 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import pandas as pd
|
3 |
-
import numpy as np
|
4 |
-
from buy_or_rent import Buy_or_Rent_Model
|
5 |
-
from scipy.stats import norm, skew
|
6 |
-
from utils.general import calculate_percentiles
|
7 |
-
from utils.streamlit_utils import sticky_bottom_bar
|
8 |
-
from plot import plot_hist_from_list
|
9 |
-
import hydralit_components as hc
|
10 |
-
import warnings
|
11 |
-
|
12 |
-
warnings.simplefilter(action="ignore", category=FutureWarning)
|
13 |
-
|
14 |
-
|
15 |
-
def generate_main_body(
|
16 |
-
model: Buy_or_Rent_Model,
|
17 |
-
mortgage_interest_annual_list=np.array([0.05]),
|
18 |
-
property_price_growth_annual_list=np.array([0.026]),
|
19 |
-
rent_increase_list=np.array([0.01325]),
|
20 |
-
investment_return_annual_list=np.array([0.06]),
|
21 |
-
years_until_sell_list=np.array([20]),
|
22 |
-
):
|
23 |
-
|
24 |
-
adjust_for_inflation_bool = st.sidebar.toggle("Adjust for inflation (2% a year)")
|
25 |
-
# use_present_value = st.toggle('Use present value instead of future value')
|
26 |
-
# define what option labels and icons to display
|
27 |
-
option_data = [
|
28 |
-
{"icon": "bi bi-calculator", "label": "Typical Outcome"},
|
29 |
-
{"icon": "bi bi-bar-chart-line", "label": "Simulation Results"},
|
30 |
-
]
|
31 |
-
|
32 |
-
# override the theme, else it will use the Streamlit applied theme
|
33 |
-
over_theme = {
|
34 |
-
"txc_inactive": "black",
|
35 |
-
"menu_background": "#b8d7ed",
|
36 |
-
"txc_active": "black",
|
37 |
-
"option_active": "white",
|
38 |
-
}
|
39 |
-
font_fmt = {"font-class": "h2", "font-size": "100%"}
|
40 |
-
|
41 |
-
# display a horizontal version of the option bar
|
42 |
-
op = hc.option_bar(
|
43 |
-
option_definition=option_data,
|
44 |
-
key="PrimaryOption",
|
45 |
-
override_theme=over_theme,
|
46 |
-
font_styling=font_fmt,
|
47 |
-
horizontal_orientation=True,
|
48 |
-
)
|
49 |
-
n_samples_simulation = 1000
|
50 |
-
|
51 |
-
if op == "Simulation Results":
|
52 |
-
n_samples_simulation = st.slider(
|
53 |
-
"Number of Simulation Samples:",
|
54 |
-
min_value=500,
|
55 |
-
max_value=5000,
|
56 |
-
value=1000,
|
57 |
-
step=100,
|
58 |
-
)
|
59 |
-
|
60 |
-
model.samples_rent_increase = np.random.choice(rent_increase_list, n_samples_simulation)
|
61 |
-
model.samples_property_price_growth_annual = np.random.choice(property_price_growth_annual_list, n_samples_simulation)
|
62 |
-
model.samples_mortgage_interest_annual = np.random.choice(mortgage_interest_annual_list, n_samples_simulation)
|
63 |
-
model.samples_investment_return_annual = np.random.choice(investment_return_annual_list, n_samples_simulation)
|
64 |
-
model.samples_years_until_sell = np.random.choice(years_until_sell_list, n_samples_simulation)
|
65 |
-
|
66 |
-
model.run_calculations(adjust_for_inflation_bool = adjust_for_inflation_bool)
|
67 |
-
#save simulation results
|
68 |
-
buying_npv_list = model.buying_npv
|
69 |
-
buying_fv_list = model.buying_fv
|
70 |
-
renting_fv_list = model.renting_fv
|
71 |
-
mortgage_interest_annual_list_chosen = model.samples_mortgage_interest_annual
|
72 |
-
property_price_growth_annual_list_chosen = model.samples_property_price_growth_annual
|
73 |
-
rent_increase_list_chosen = model.samples_rent_increase
|
74 |
-
investment_return_annual_list_chosen = model.samples_investment_return_annual
|
75 |
-
years_until_sell_list_chosen = model.samples_years_until_sell
|
76 |
-
|
77 |
-
# typical scenario
|
78 |
-
model.samples_rent_increase = np.median(rent_increase_list)
|
79 |
-
model.samples_property_price_growth_annual = np.median(property_price_growth_annual_list)
|
80 |
-
model.samples_mortgage_interest_annual = np.median(mortgage_interest_annual_list)
|
81 |
-
model.samples_investment_return_annual = np.median(investment_return_annual_list)
|
82 |
-
model.samples_years_until_sell = int(np.median(years_until_sell_list))
|
83 |
-
model.run_calculations(adjust_for_inflation_bool=adjust_for_inflation_bool)
|
84 |
-
|
85 |
-
if model.buying_fv > model.renting_fv:
|
86 |
-
text = "Return is typically higher if you <strong>buy</strong>."
|
87 |
-
if np.std(buying_fv_list) > np.std(renting_fv_list):
|
88 |
-
text += " However, it is less risky if you <strong>rent</strong>."
|
89 |
-
else:
|
90 |
-
text += " It is also less risky if you <strong>buy</strong>."
|
91 |
-
else:
|
92 |
-
text = "Return is typically higher if you <strong>rent and invest the deposit</strong>."
|
93 |
-
if np.std(buying_fv_list) > np.std(renting_fv_list):
|
94 |
-
text += " It is also less risky if you <strong>rent</strong>."
|
95 |
-
else:
|
96 |
-
text += " However, it is less risky if you <strong>buy</strong>."
|
97 |
-
|
98 |
-
sticky_bottom_bar(text)
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
if op == "Typical Outcome":
|
104 |
-
left_column, right_column = st.columns(2)
|
105 |
-
with left_column:
|
106 |
-
st.write(
|
107 |
-
f"### Buy - Asset future value after {model.samples_years_until_sell} years"
|
108 |
-
)
|
109 |
-
st.markdown(
|
110 |
-
f"**Typical Total Asset Value: £{model.buying_fv:,.0f}**",
|
111 |
-
help="All components are converted to future value at the time of sale.",
|
112 |
-
)
|
113 |
-
st.markdown(f"***Breakdown:***")
|
114 |
-
st.markdown(f" - Capital Invested (deposit): £{model.DEPOSIT:,.0f}")
|
115 |
-
st.markdown(
|
116 |
-
f" - Capital Invested (buying cost + stamp duty, if any): £{model.BUYING_COST_FLAT + model.STAMP_DUTY:,.0f}"
|
117 |
-
)
|
118 |
-
st.markdown(
|
119 |
-
f" - Property Price at Sale: :green[£{model.future_house_price:,.0f}]",
|
120 |
-
help="Calculated using the property price growth rate set in the left sidebar.",
|
121 |
-
)
|
122 |
-
st.markdown(
|
123 |
-
f" - Selling cost (including Capital Gains Tax): :red[ -£{model.SELLING_COST:,.0f}]",
|
124 |
-
help="Total expenses incurred when selling a property. These costs typically include real estate agent commissions, legal fees, advertising expenses, and any necessary repairs or renovations to prepare the property for sale.",
|
125 |
-
)
|
126 |
-
st.markdown(
|
127 |
-
f" - Total maintenance and service costs: :red[ -£{model.fv_ongoing_cost:,.0f}]",
|
128 |
-
help="Future value at the time of sale for the total cost associated with maintaining and servicing a property, including expenses such as property management fees, maintenance fees, and other related charges. Assumed to grow at inflation rate. Future value is determined by the discount rate, which is assumed to be equal to the investment return.",
|
129 |
-
)
|
130 |
-
if model.COUNTRY == "US":
|
131 |
-
st.markdown(
|
132 |
-
f" - Total property tax: :red[ -£{model.fv_property_tax:,.0f}]",
|
133 |
-
help="Future value at the time of sale for the total property tax paid",
|
134 |
-
)
|
135 |
-
st.markdown(
|
136 |
-
f" - Total Mortgage Payments: :red[ -£{model.fv_mortgage_payments:,.0f}]",
|
137 |
-
help="This is higher than the sum of all mortgage payments since the payments are converted to their future value at the time of sale. Future value is determined by the discount rate, which is assumed to be equal to the investment return.",
|
138 |
-
)
|
139 |
-
st.markdown(
|
140 |
-
f" - Total Rent Saved (future value at time of sale): :green[£{model.rent_fv:,.0f}]",
|
141 |
-
help="This is higher than the sum of all rent payments that would have been paid since the payments are converted to their future value at the time of sale. Future value is determined by the discount rate, which is assumed to be equal to the investment return.",
|
142 |
-
)
|
143 |
-
|
144 |
-
with right_column:
|
145 |
-
st.write(
|
146 |
-
f"### Rent and invest - Asset future value after {model.samples_years_until_sell} years"
|
147 |
-
)
|
148 |
-
st.markdown(
|
149 |
-
f"**Typical Total Asset Value: £{model.renting_fv:,.0f}**",
|
150 |
-
help="All components are converted to future value at the time of sale.",
|
151 |
-
)
|
152 |
-
st.markdown(f"***Breakdown:***")
|
153 |
-
st.markdown(f" - Capital Invested (deposit): £{model.DEPOSIT:,.0f}")
|
154 |
-
st.markdown(
|
155 |
-
f" - Capital Invested (buying cost + stamp duty, if any): £{model.BUYING_COST_FLAT + model.STAMP_DUTY:,.0f}"
|
156 |
-
)
|
157 |
-
st.markdown(
|
158 |
-
f" - Capital Gains Tax: :red[-£{model.cgt_investment:,.0f}]",
|
159 |
-
help="Your tax rate is determined by the annual salary set in the left sidebar.",
|
160 |
-
)
|
161 |
-
if (
|
162 |
-
model.renting_fv
|
163 |
-
- (model.DEPOSIT + model.BUYING_COST_FLAT + model.STAMP_DUTY)
|
164 |
-
>= 0
|
165 |
-
):
|
166 |
-
st.markdown(
|
167 |
-
f" - Assumed Typical Capital Growth: :green[£{model.renting_fv - (model.DEPOSIT + model.BUYING_COST_FLAT + model.STAMP_DUTY):,.0f}]",
|
168 |
-
help="Calculated with the investment return rated provided in the left sidebar.",
|
169 |
-
)
|
170 |
-
else:
|
171 |
-
st.markdown(
|
172 |
-
f" - Assumed Typical Capital Growth: :red[£{model.renting_fv - (model.DEPOSIT + model.BUYING_COST_FLAT + model.STAMP_DUTY):,.0f}]"
|
173 |
-
)
|
174 |
-
|
175 |
-
if op == "Simulation Results":
|
176 |
-
|
177 |
-
|
178 |
-
plot_hist_from_list(
|
179 |
-
[buying_fv_list, renting_fv_list],
|
180 |
-
st,
|
181 |
-
figsize=(7, 2),
|
182 |
-
legends=["Buying", "Renting"],
|
183 |
-
main_colors=["orange", "blue"],
|
184 |
-
title="Future Asset Value - Simulation Results",
|
185 |
-
xlabel="Asset Value",
|
186 |
-
)
|
187 |
-
st.markdown(
|
188 |
-
"<span style='font-size: 14px; font-style: italic;'>Simulation results for future asset value. Using future value at 'years until sell mean' in your assumptions.</span>",
|
189 |
-
unsafe_allow_html=True,
|
190 |
-
)
|
191 |
-
plot_hist_from_list(
|
192 |
-
[buying_npv_list],
|
193 |
-
st,
|
194 |
-
plot_below_zero=True,
|
195 |
-
clip=(0, None),
|
196 |
-
main_colors=["blue"],
|
197 |
-
secondary_color="orange",
|
198 |
-
title="Net Present Value of Buying - Simulation Results",
|
199 |
-
xlabel="Net Present Value of Buying",
|
200 |
-
)
|
201 |
-
st.markdown(
|
202 |
-
"<span style='font-size: 14px; font-style: italic;'>Negative = Renting is better; Positive = Buying is better.</span>",
|
203 |
-
unsafe_allow_html=True,
|
204 |
-
)
|
205 |
-
st.markdown(
|
206 |
-
"<span style='font-size: 14px; font-style: italic;'>Net Present Value represents the net gain/loss that result in purchasing the property in present value. It is calculated as (PV of future house sale price - PV of rent saved - PV of mortgage payments - PV of ongoing costs - deposit - buying costs - stamp duty - PV of selling costs). If it is positive, then it is financially better to buy a property. Present value is calculated using a future discount rate equal to your assumed investment return. This is equivalent to assuming that any amount you save on rent or mortgage will be invested. </span>",
|
207 |
-
unsafe_allow_html=True,
|
208 |
-
)
|
209 |
-
|
210 |
-
results_dict = {
|
211 |
-
"buying_npv": buying_npv_list,
|
212 |
-
"mortgage_interest_annual": mortgage_interest_annual_list_chosen,
|
213 |
-
"property_price_growth_annual": property_price_growth_annual_list_chosen,
|
214 |
-
"rent_increase": rent_increase_list_chosen,
|
215 |
-
"investment_return_annual": investment_return_annual_list_chosen,
|
216 |
-
"years_until_sell": years_until_sell_list_chosen,
|
217 |
-
}
|
218 |
-
results_df = pd.DataFrame(results_dict)
|
219 |
-
percentiles_df = calculate_percentiles(buying_npv_list, model.DEPOSIT)
|
220 |
-
with st.expander("### Net Present Value Statistics", expanded=False):
|
221 |
-
st.write(
|
222 |
-
f'- Buying is better {100-percentiles_df.loc[5,"Percentile"]:.0f}% of the time'
|
223 |
-
)
|
224 |
-
st.write(f"- Mean: £{np.mean(buying_npv_list):,.0f}")
|
225 |
-
st.write(
|
226 |
-
f"- Mean (as % of deposit): {np.mean(buying_npv_list)/model.DEPOSIT*100:.0f}%"
|
227 |
-
)
|
228 |
-
st.write(f"- Standard Deviation: £{np.std(buying_npv_list):,.0f}")
|
229 |
-
st.write(
|
230 |
-
f"- Standard Deviation (as % of deposit): {np.std(buying_npv_list)/model.DEPOSIT*100:.0f}%"
|
231 |
-
)
|
232 |
-
st.write(f"- Skew: {skew(buying_npv_list):.2f}")
|
233 |
-
with st.expander(
|
234 |
-
"Correlations Between Parameters and Buying NPV", expanded=False
|
235 |
-
):
|
236 |
-
st.write(results_df.corr().iloc[0, 1:])
|
237 |
-
# return percentiles_df, results_df
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Epoching/DocumentQA/DiT_Extractor/dit_object_detection/ditod/beit.py
DELETED
@@ -1,671 +0,0 @@
|
|
1 |
-
""" Vision Transformer (ViT) in PyTorch
|
2 |
-
|
3 |
-
A PyTorch implement of Vision Transformers as described in
|
4 |
-
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929
|
5 |
-
|
6 |
-
The official jax code is released and available at https://github.com/google-research/vision_transformer
|
7 |
-
|
8 |
-
Status/TODO:
|
9 |
-
* Models updated to be compatible with official impl. Args added to support backward compat for old PyTorch weights.
|
10 |
-
* Weights ported from official jax impl for 384x384 base and small models, 16x16 and 32x32 patches.
|
11 |
-
* Trained (supervised on ImageNet-1k) my custom 'small' patch model to 77.9, 'base' to 79.4 top-1 with this code.
|
12 |
-
* Hopefully find time and GPUs for SSL or unsupervised pretraining on OpenImages w/ ImageNet fine-tune in future.
|
13 |
-
|
14 |
-
Acknowledgments:
|
15 |
-
* The paper authors for releasing code and weights, thanks!
|
16 |
-
* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out
|
17 |
-
for some einops/einsum fun
|
18 |
-
* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT
|
19 |
-
* Bert reference code checks against Huggingface Transformers and Tensorflow Bert
|
20 |
-
|
21 |
-
Hacked together by / Copyright 2020 Ross Wightman
|
22 |
-
"""
|
23 |
-
import warnings
|
24 |
-
import math
|
25 |
-
import torch
|
26 |
-
from functools import partial
|
27 |
-
import torch.nn as nn
|
28 |
-
import torch.nn.functional as F
|
29 |
-
import torch.utils.checkpoint as checkpoint
|
30 |
-
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
|
31 |
-
|
32 |
-
|
33 |
-
def _cfg(url='', **kwargs):
|
34 |
-
return {
|
35 |
-
'url': url,
|
36 |
-
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
|
37 |
-
'crop_pct': .9, 'interpolation': 'bicubic',
|
38 |
-
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
|
39 |
-
**kwargs
|
40 |
-
}
|
41 |
-
|
42 |
-
|
43 |
-
class DropPath(nn.Module):
|
44 |
-
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
45 |
-
"""
|
46 |
-
|
47 |
-
def __init__(self, drop_prob=None):
|
48 |
-
super(DropPath, self).__init__()
|
49 |
-
self.drop_prob = drop_prob
|
50 |
-
|
51 |
-
def forward(self, x):
|
52 |
-
return drop_path(x, self.drop_prob, self.training)
|
53 |
-
|
54 |
-
def extra_repr(self) -> str:
|
55 |
-
return 'p={}'.format(self.drop_prob)
|
56 |
-
|
57 |
-
|
58 |
-
class Mlp(nn.Module):
|
59 |
-
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
60 |
-
super().__init__()
|
61 |
-
out_features = out_features or in_features
|
62 |
-
hidden_features = hidden_features or in_features
|
63 |
-
self.fc1 = nn.Linear(in_features, hidden_features)
|
64 |
-
self.act = act_layer()
|
65 |
-
self.fc2 = nn.Linear(hidden_features, out_features)
|
66 |
-
self.drop = nn.Dropout(drop)
|
67 |
-
|
68 |
-
def forward(self, x):
|
69 |
-
x = self.fc1(x)
|
70 |
-
x = self.act(x)
|
71 |
-
# x = self.drop(x)
|
72 |
-
# commit this for the orignal BERT implement
|
73 |
-
x = self.fc2(x)
|
74 |
-
x = self.drop(x)
|
75 |
-
return x
|
76 |
-
|
77 |
-
|
78 |
-
class Attention(nn.Module):
|
79 |
-
def __init__(
|
80 |
-
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
|
81 |
-
proj_drop=0., window_size=None, attn_head_dim=None):
|
82 |
-
super().__init__()
|
83 |
-
self.num_heads = num_heads
|
84 |
-
head_dim = dim // num_heads
|
85 |
-
if attn_head_dim is not None:
|
86 |
-
head_dim = attn_head_dim
|
87 |
-
all_head_dim = head_dim * self.num_heads
|
88 |
-
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
|
89 |
-
self.scale = qk_scale or head_dim ** -0.5
|
90 |
-
|
91 |
-
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
|
92 |
-
if qkv_bias:
|
93 |
-
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
|
94 |
-
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
|
95 |
-
else:
|
96 |
-
self.q_bias = None
|
97 |
-
self.v_bias = None
|
98 |
-
|
99 |
-
if window_size:
|
100 |
-
self.window_size = window_size
|
101 |
-
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
|
102 |
-
self.relative_position_bias_table = nn.Parameter(
|
103 |
-
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
|
104 |
-
# cls to token & token 2 cls & cls to cls
|
105 |
-
|
106 |
-
# get pair-wise relative position index for each token inside the window
|
107 |
-
coords_h = torch.arange(window_size[0])
|
108 |
-
coords_w = torch.arange(window_size[1])
|
109 |
-
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
110 |
-
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
111 |
-
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
112 |
-
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
113 |
-
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
|
114 |
-
relative_coords[:, :, 1] += window_size[1] - 1
|
115 |
-
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
|
116 |
-
relative_position_index = \
|
117 |
-
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
|
118 |
-
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
119 |
-
relative_position_index[0, 0:] = self.num_relative_distance - 3
|
120 |
-
relative_position_index[0:, 0] = self.num_relative_distance - 2
|
121 |
-
relative_position_index[0, 0] = self.num_relative_distance - 1
|
122 |
-
|
123 |
-
self.register_buffer("relative_position_index", relative_position_index)
|
124 |
-
|
125 |
-
# trunc_normal_(self.relative_position_bias_table, std=.0)
|
126 |
-
else:
|
127 |
-
self.window_size = None
|
128 |
-
self.relative_position_bias_table = None
|
129 |
-
self.relative_position_index = None
|
130 |
-
|
131 |
-
self.attn_drop = nn.Dropout(attn_drop)
|
132 |
-
self.proj = nn.Linear(all_head_dim, dim)
|
133 |
-
self.proj_drop = nn.Dropout(proj_drop)
|
134 |
-
|
135 |
-
def forward(self, x, rel_pos_bias=None, training_window_size=None):
|
136 |
-
B, N, C = x.shape
|
137 |
-
qkv_bias = None
|
138 |
-
if self.q_bias is not None:
|
139 |
-
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
|
140 |
-
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
141 |
-
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
|
142 |
-
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
143 |
-
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
144 |
-
|
145 |
-
q = q * self.scale
|
146 |
-
attn = (q @ k.transpose(-2, -1))
|
147 |
-
|
148 |
-
if self.relative_position_bias_table is not None:
|
149 |
-
if training_window_size == self.window_size:
|
150 |
-
relative_position_bias = \
|
151 |
-
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
152 |
-
self.window_size[0] * self.window_size[1] + 1,
|
153 |
-
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
|
154 |
-
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
155 |
-
attn = attn + relative_position_bias.unsqueeze(0)
|
156 |
-
else:
|
157 |
-
training_window_size = tuple(training_window_size.tolist())
|
158 |
-
new_num_relative_distance = (2 * training_window_size[0] - 1) * (2 * training_window_size[1] - 1) + 3
|
159 |
-
# new_num_relative_dis 为 所有可能的相对位置选项,包含cls-cls,tok-cls,与cls-tok
|
160 |
-
new_relative_position_bias_table = F.interpolate(
|
161 |
-
self.relative_position_bias_table[:-3, :].permute(1, 0).view(1, self.num_heads,
|
162 |
-
2 * self.window_size[0] - 1,
|
163 |
-
2 * self.window_size[1] - 1),
|
164 |
-
size=(2 * training_window_size[0] - 1, 2 * training_window_size[1] - 1), mode='bicubic',
|
165 |
-
align_corners=False)
|
166 |
-
new_relative_position_bias_table = new_relative_position_bias_table.view(self.num_heads,
|
167 |
-
new_num_relative_distance - 3).permute(
|
168 |
-
1, 0)
|
169 |
-
new_relative_position_bias_table = torch.cat(
|
170 |
-
[new_relative_position_bias_table, self.relative_position_bias_table[-3::]], dim=0)
|
171 |
-
|
172 |
-
# get pair-wise relative position index for each token inside the window
|
173 |
-
coords_h = torch.arange(training_window_size[0])
|
174 |
-
coords_w = torch.arange(training_window_size[1])
|
175 |
-
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
176 |
-
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
177 |
-
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
178 |
-
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
179 |
-
relative_coords[:, :, 0] += training_window_size[0] - 1 # shift to start from 0
|
180 |
-
relative_coords[:, :, 1] += training_window_size[1] - 1
|
181 |
-
relative_coords[:, :, 0] *= 2 * training_window_size[1] - 1
|
182 |
-
relative_position_index = \
|
183 |
-
torch.zeros(size=(training_window_size[0] * training_window_size[1] + 1,) * 2,
|
184 |
-
dtype=relative_coords.dtype)
|
185 |
-
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
186 |
-
relative_position_index[0, 0:] = new_num_relative_distance - 3
|
187 |
-
relative_position_index[0:, 0] = new_num_relative_distance - 2
|
188 |
-
relative_position_index[0, 0] = new_num_relative_distance - 1
|
189 |
-
|
190 |
-
relative_position_bias = \
|
191 |
-
new_relative_position_bias_table[relative_position_index.view(-1)].view(
|
192 |
-
training_window_size[0] * training_window_size[1] + 1,
|
193 |
-
training_window_size[0] * training_window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
|
194 |
-
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
195 |
-
attn = attn + relative_position_bias.unsqueeze(0)
|
196 |
-
|
197 |
-
if rel_pos_bias is not None:
|
198 |
-
attn = attn + rel_pos_bias
|
199 |
-
|
200 |
-
attn = attn.softmax(dim=-1)
|
201 |
-
attn = self.attn_drop(attn)
|
202 |
-
|
203 |
-
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
|
204 |
-
x = self.proj(x)
|
205 |
-
x = self.proj_drop(x)
|
206 |
-
return x
|
207 |
-
|
208 |
-
|
209 |
-
class Block(nn.Module):
|
210 |
-
|
211 |
-
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
212 |
-
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
|
213 |
-
window_size=None, attn_head_dim=None):
|
214 |
-
super().__init__()
|
215 |
-
self.norm1 = norm_layer(dim)
|
216 |
-
self.attn = Attention(
|
217 |
-
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
218 |
-
attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim)
|
219 |
-
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
220 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
221 |
-
self.norm2 = norm_layer(dim)
|
222 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
223 |
-
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
224 |
-
|
225 |
-
if init_values is not None:
|
226 |
-
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True)
|
227 |
-
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True)
|
228 |
-
else:
|
229 |
-
self.gamma_1, self.gamma_2 = None, None
|
230 |
-
|
231 |
-
def forward(self, x, rel_pos_bias=None, training_window_size=None):
|
232 |
-
if self.gamma_1 is None:
|
233 |
-
x = x + self.drop_path(
|
234 |
-
self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, training_window_size=training_window_size))
|
235 |
-
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
236 |
-
else:
|
237 |
-
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias,
|
238 |
-
training_window_size=training_window_size))
|
239 |
-
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
|
240 |
-
return x
|
241 |
-
|
242 |
-
|
243 |
-
class PatchEmbed(nn.Module):
|
244 |
-
""" Image to Patch Embedding
|
245 |
-
"""
|
246 |
-
|
247 |
-
def __init__(self, img_size=[224, 224], patch_size=16, in_chans=3, embed_dim=768):
|
248 |
-
super().__init__()
|
249 |
-
img_size = to_2tuple(img_size)
|
250 |
-
patch_size = to_2tuple(patch_size)
|
251 |
-
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
|
252 |
-
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
|
253 |
-
self.num_patches_w = self.patch_shape[0]
|
254 |
-
self.num_patches_h = self.patch_shape[1]
|
255 |
-
# the so-called patch_shape is the patch shape during pre-training
|
256 |
-
self.img_size = img_size
|
257 |
-
self.patch_size = patch_size
|
258 |
-
self.num_patches = num_patches
|
259 |
-
|
260 |
-
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
261 |
-
|
262 |
-
def forward(self, x, position_embedding=None, **kwargs):
|
263 |
-
# FIXME look at relaxing size constraints
|
264 |
-
# assert H == self.img_size[0] and W == self.img_size[1], \
|
265 |
-
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
266 |
-
x = self.proj(x)
|
267 |
-
Hp, Wp = x.shape[2], x.shape[3]
|
268 |
-
|
269 |
-
if position_embedding is not None:
|
270 |
-
# interpolate the position embedding to the corresponding size
|
271 |
-
position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1).permute(0, 3,
|
272 |
-
1, 2)
|
273 |
-
position_embedding = F.interpolate(position_embedding, size=(Hp, Wp), mode='bicubic')
|
274 |
-
x = x + position_embedding
|
275 |
-
|
276 |
-
x = x.flatten(2).transpose(1, 2)
|
277 |
-
return x, (Hp, Wp)
|
278 |
-
|
279 |
-
|
280 |
-
class HybridEmbed(nn.Module):
|
281 |
-
""" CNN Feature Map Embedding
|
282 |
-
Extract feature map from CNN, flatten, project to embedding dim.
|
283 |
-
"""
|
284 |
-
|
285 |
-
def __init__(self, backbone, img_size=[224, 224], feature_size=None, in_chans=3, embed_dim=768):
|
286 |
-
super().__init__()
|
287 |
-
assert isinstance(backbone, nn.Module)
|
288 |
-
img_size = to_2tuple(img_size)
|
289 |
-
self.img_size = img_size
|
290 |
-
self.backbone = backbone
|
291 |
-
if feature_size is None:
|
292 |
-
with torch.no_grad():
|
293 |
-
# FIXME this is hacky, but most reliable way of determining the exact dim of the output feature
|
294 |
-
# map for all networks, the feature metadata has reliable channel and stride info, but using
|
295 |
-
# stride to calc feature dim requires info about padding of each stage that isn't captured.
|
296 |
-
training = backbone.training
|
297 |
-
if training:
|
298 |
-
backbone.eval()
|
299 |
-
o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))[-1]
|
300 |
-
feature_size = o.shape[-2:]
|
301 |
-
feature_dim = o.shape[1]
|
302 |
-
backbone.train(training)
|
303 |
-
else:
|
304 |
-
feature_size = to_2tuple(feature_size)
|
305 |
-
feature_dim = self.backbone.feature_info.channels()[-1]
|
306 |
-
self.num_patches = feature_size[0] * feature_size[1]
|
307 |
-
self.proj = nn.Linear(feature_dim, embed_dim)
|
308 |
-
|
309 |
-
def forward(self, x):
|
310 |
-
x = self.backbone(x)[-1]
|
311 |
-
x = x.flatten(2).transpose(1, 2)
|
312 |
-
x = self.proj(x)
|
313 |
-
return x
|
314 |
-
|
315 |
-
|
316 |
-
class RelativePositionBias(nn.Module):
|
317 |
-
|
318 |
-
def __init__(self, window_size, num_heads):
|
319 |
-
super().__init__()
|
320 |
-
self.window_size = window_size
|
321 |
-
self.num_heads = num_heads
|
322 |
-
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
|
323 |
-
self.relative_position_bias_table = nn.Parameter(
|
324 |
-
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
|
325 |
-
# cls to token & token 2 cls & cls to cls
|
326 |
-
|
327 |
-
# get pair-wise relative position index for each token inside the window
|
328 |
-
coords_h = torch.arange(window_size[0])
|
329 |
-
coords_w = torch.arange(window_size[1])
|
330 |
-
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
331 |
-
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
332 |
-
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
333 |
-
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
334 |
-
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
|
335 |
-
relative_coords[:, :, 1] += window_size[1] - 1
|
336 |
-
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
|
337 |
-
relative_position_index = \
|
338 |
-
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
|
339 |
-
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
340 |
-
relative_position_index[0, 0:] = self.num_relative_distance - 3
|
341 |
-
relative_position_index[0:, 0] = self.num_relative_distance - 2
|
342 |
-
relative_position_index[0, 0] = self.num_relative_distance - 1
|
343 |
-
|
344 |
-
self.register_buffer("relative_position_index", relative_position_index)
|
345 |
-
|
346 |
-
# trunc_normal_(self.relative_position_bias_table, std=.02)
|
347 |
-
|
348 |
-
def forward(self, training_window_size):
|
349 |
-
if training_window_size == self.window_size:
|
350 |
-
relative_position_bias = \
|
351 |
-
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
352 |
-
self.window_size[0] * self.window_size[1] + 1,
|
353 |
-
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
|
354 |
-
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
355 |
-
else:
|
356 |
-
training_window_size = tuple(training_window_size.tolist())
|
357 |
-
new_num_relative_distance = (2 * training_window_size[0] - 1) * (2 * training_window_size[1] - 1) + 3
|
358 |
-
# new_num_relative_dis 为 所有可能的相对位置选项,包含cls-cls,tok-cls,与cls-tok
|
359 |
-
new_relative_position_bias_table = F.interpolate(
|
360 |
-
self.relative_position_bias_table[:-3, :].permute(1, 0).view(1, self.num_heads,
|
361 |
-
2 * self.window_size[0] - 1,
|
362 |
-
2 * self.window_size[1] - 1),
|
363 |
-
size=(2 * training_window_size[0] - 1, 2 * training_window_size[1] - 1), mode='bicubic',
|
364 |
-
align_corners=False)
|
365 |
-
new_relative_position_bias_table = new_relative_position_bias_table.view(self.num_heads,
|
366 |
-
new_num_relative_distance - 3).permute(
|
367 |
-
1, 0)
|
368 |
-
new_relative_position_bias_table = torch.cat(
|
369 |
-
[new_relative_position_bias_table, self.relative_position_bias_table[-3::]], dim=0)
|
370 |
-
|
371 |
-
# get pair-wise relative position index for each token inside the window
|
372 |
-
coords_h = torch.arange(training_window_size[0])
|
373 |
-
coords_w = torch.arange(training_window_size[1])
|
374 |
-
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
375 |
-
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
376 |
-
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
377 |
-
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
378 |
-
relative_coords[:, :, 0] += training_window_size[0] - 1 # shift to start from 0
|
379 |
-
relative_coords[:, :, 1] += training_window_size[1] - 1
|
380 |
-
relative_coords[:, :, 0] *= 2 * training_window_size[1] - 1
|
381 |
-
relative_position_index = \
|
382 |
-
torch.zeros(size=(training_window_size[0] * training_window_size[1] + 1,) * 2,
|
383 |
-
dtype=relative_coords.dtype)
|
384 |
-
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
385 |
-
relative_position_index[0, 0:] = new_num_relative_distance - 3
|
386 |
-
relative_position_index[0:, 0] = new_num_relative_distance - 2
|
387 |
-
relative_position_index[0, 0] = new_num_relative_distance - 1
|
388 |
-
|
389 |
-
relative_position_bias = \
|
390 |
-
new_relative_position_bias_table[relative_position_index.view(-1)].view(
|
391 |
-
training_window_size[0] * training_window_size[1] + 1,
|
392 |
-
training_window_size[0] * training_window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
|
393 |
-
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
394 |
-
|
395 |
-
return relative_position_bias
|
396 |
-
|
397 |
-
|
398 |
-
class BEiT(nn.Module):
|
399 |
-
""" Vision Transformer with support for patch or hybrid CNN input stage
|
400 |
-
"""
|
401 |
-
|
402 |
-
def __init__(self,
|
403 |
-
img_size=[224, 224],
|
404 |
-
patch_size=16,
|
405 |
-
in_chans=3,
|
406 |
-
num_classes=80,
|
407 |
-
embed_dim=768,
|
408 |
-
depth=12,
|
409 |
-
num_heads=12,
|
410 |
-
mlp_ratio=4.,
|
411 |
-
qkv_bias=False,
|
412 |
-
qk_scale=None,
|
413 |
-
drop_rate=0.,
|
414 |
-
attn_drop_rate=0.,
|
415 |
-
drop_path_rate=0.,
|
416 |
-
hybrid_backbone=None,
|
417 |
-
norm_layer=None,
|
418 |
-
init_values=None,
|
419 |
-
use_abs_pos_emb=False,
|
420 |
-
use_rel_pos_bias=False,
|
421 |
-
use_shared_rel_pos_bias=False,
|
422 |
-
use_checkpoint=True,
|
423 |
-
pretrained=None,
|
424 |
-
out_features=None,
|
425 |
-
):
|
426 |
-
|
427 |
-
super(BEiT, self).__init__()
|
428 |
-
|
429 |
-
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
|
430 |
-
self.num_classes = num_classes
|
431 |
-
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
432 |
-
self.use_checkpoint = use_checkpoint
|
433 |
-
|
434 |
-
if hybrid_backbone is not None:
|
435 |
-
self.patch_embed = HybridEmbed(
|
436 |
-
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
|
437 |
-
else:
|
438 |
-
self.patch_embed = PatchEmbed(
|
439 |
-
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
|
440 |
-
num_patches = self.patch_embed.num_patches
|
441 |
-
self.out_features = out_features
|
442 |
-
self.out_indices = [int(name[5:]) for name in out_features]
|
443 |
-
|
444 |
-
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
445 |
-
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
446 |
-
if use_abs_pos_emb:
|
447 |
-
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
|
448 |
-
else:
|
449 |
-
self.pos_embed = None
|
450 |
-
self.pos_drop = nn.Dropout(p=drop_rate)
|
451 |
-
|
452 |
-
self.use_shared_rel_pos_bias = use_shared_rel_pos_bias
|
453 |
-
if use_shared_rel_pos_bias:
|
454 |
-
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
|
455 |
-
else:
|
456 |
-
self.rel_pos_bias = None
|
457 |
-
|
458 |
-
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
459 |
-
self.use_rel_pos_bias = use_rel_pos_bias
|
460 |
-
self.blocks = nn.ModuleList([
|
461 |
-
Block(
|
462 |
-
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
463 |
-
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
|
464 |
-
init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None)
|
465 |
-
for i in range(depth)])
|
466 |
-
|
467 |
-
# trunc_normal_(self.mask_token, std=.02)
|
468 |
-
|
469 |
-
if patch_size == 16:
|
470 |
-
self.fpn1 = nn.Sequential(
|
471 |
-
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
|
472 |
-
# nn.SyncBatchNorm(embed_dim),
|
473 |
-
nn.BatchNorm2d(embed_dim),
|
474 |
-
nn.GELU(),
|
475 |
-
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
|
476 |
-
)
|
477 |
-
|
478 |
-
self.fpn2 = nn.Sequential(
|
479 |
-
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
|
480 |
-
)
|
481 |
-
|
482 |
-
self.fpn3 = nn.Identity()
|
483 |
-
|
484 |
-
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
|
485 |
-
elif patch_size == 8:
|
486 |
-
self.fpn1 = nn.Sequential(
|
487 |
-
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
|
488 |
-
)
|
489 |
-
|
490 |
-
self.fpn2 = nn.Identity()
|
491 |
-
|
492 |
-
self.fpn3 = nn.Sequential(
|
493 |
-
nn.MaxPool2d(kernel_size=2, stride=2),
|
494 |
-
)
|
495 |
-
|
496 |
-
self.fpn4 = nn.Sequential(
|
497 |
-
nn.MaxPool2d(kernel_size=4, stride=4),
|
498 |
-
)
|
499 |
-
|
500 |
-
if self.pos_embed is not None:
|
501 |
-
trunc_normal_(self.pos_embed, std=.02)
|
502 |
-
trunc_normal_(self.cls_token, std=.02)
|
503 |
-
self.apply(self._init_weights)
|
504 |
-
self.fix_init_weight()
|
505 |
-
|
506 |
-
def fix_init_weight(self):
|
507 |
-
def rescale(param, layer_id):
|
508 |
-
param.div_(math.sqrt(2.0 * layer_id))
|
509 |
-
|
510 |
-
for layer_id, layer in enumerate(self.blocks):
|
511 |
-
rescale(layer.attn.proj.weight.data, layer_id + 1)
|
512 |
-
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
|
513 |
-
|
514 |
-
def _init_weights(self, m):
|
515 |
-
if isinstance(m, nn.Linear):
|
516 |
-
trunc_normal_(m.weight, std=.02)
|
517 |
-
if isinstance(m, nn.Linear) and m.bias is not None:
|
518 |
-
nn.init.constant_(m.bias, 0)
|
519 |
-
elif isinstance(m, nn.LayerNorm):
|
520 |
-
nn.init.constant_(m.bias, 0)
|
521 |
-
nn.init.constant_(m.weight, 1.0)
|
522 |
-
|
523 |
-
'''
|
524 |
-
def init_weights(self):
|
525 |
-
"""Initialize the weights in backbone.
|
526 |
-
|
527 |
-
Args:
|
528 |
-
pretrained (str, optional): Path to pre-trained weights.
|
529 |
-
Defaults to None.
|
530 |
-
"""
|
531 |
-
logger = get_root_logger()
|
532 |
-
|
533 |
-
if self.pos_embed is not None:
|
534 |
-
trunc_normal_(self.pos_embed, std=.02)
|
535 |
-
trunc_normal_(self.cls_token, std=.02)
|
536 |
-
self.apply(self._init_weights)
|
537 |
-
self.fix_init_weight()
|
538 |
-
|
539 |
-
if self.init_cfg is None:
|
540 |
-
logger.warn(f'No pre-trained weights for '
|
541 |
-
f'{self.__class__.__name__}, '
|
542 |
-
f'training start from scratch')
|
543 |
-
else:
|
544 |
-
assert 'checkpoint' in self.init_cfg, f'Only support ' \
|
545 |
-
f'specify `Pretrained` in ' \
|
546 |
-
f'`init_cfg` in ' \
|
547 |
-
f'{self.__class__.__name__} '
|
548 |
-
logger.info(f"Will load ckpt from {self.init_cfg['checkpoint']}")
|
549 |
-
load_checkpoint(self,
|
550 |
-
filename=self.init_cfg['checkpoint'],
|
551 |
-
strict=False,
|
552 |
-
logger=logger,
|
553 |
-
beit_spec_expand_rel_pos = self.use_rel_pos_bias,
|
554 |
-
)
|
555 |
-
'''
|
556 |
-
|
557 |
-
def get_num_layers(self):
|
558 |
-
return len(self.blocks)
|
559 |
-
|
560 |
-
@torch.jit.ignore
|
561 |
-
def no_weight_decay(self):
|
562 |
-
return {'pos_embed', 'cls_token'}
|
563 |
-
|
564 |
-
def forward_features(self, x):
|
565 |
-
B, C, H, W = x.shape
|
566 |
-
x, (Hp, Wp) = self.patch_embed(x, self.pos_embed[:, 1:, :] if self.pos_embed is not None else None)
|
567 |
-
# Hp, Wp are HW for patches
|
568 |
-
batch_size, seq_len, _ = x.size()
|
569 |
-
|
570 |
-
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
|
571 |
-
if self.pos_embed is not None:
|
572 |
-
cls_tokens = cls_tokens + self.pos_embed[:, :1, :]
|
573 |
-
x = torch.cat((cls_tokens, x), dim=1)
|
574 |
-
x = self.pos_drop(x)
|
575 |
-
|
576 |
-
features = []
|
577 |
-
training_window_size = torch.tensor([Hp, Wp])
|
578 |
-
|
579 |
-
rel_pos_bias = self.rel_pos_bias(training_window_size) if self.rel_pos_bias is not None else None
|
580 |
-
|
581 |
-
for i, blk in enumerate(self.blocks):
|
582 |
-
if self.use_checkpoint:
|
583 |
-
x = checkpoint.checkpoint(blk, x, rel_pos_bias, training_window_size)
|
584 |
-
else:
|
585 |
-
x = blk(x, rel_pos_bias=rel_pos_bias, training_window_size=training_window_size)
|
586 |
-
if i in self.out_indices:
|
587 |
-
xp = x[:, 1:, :].permute(0, 2, 1).reshape(B, -1, Hp, Wp)
|
588 |
-
features.append(xp.contiguous())
|
589 |
-
|
590 |
-
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
|
591 |
-
for i in range(len(features)):
|
592 |
-
features[i] = ops[i](features[i])
|
593 |
-
|
594 |
-
feat_out = {}
|
595 |
-
|
596 |
-
for name, value in zip(self.out_features, features):
|
597 |
-
feat_out[name] = value
|
598 |
-
|
599 |
-
return feat_out
|
600 |
-
|
601 |
-
def forward(self, x):
|
602 |
-
x = self.forward_features(x)
|
603 |
-
return x
|
604 |
-
|
605 |
-
|
606 |
-
def beit_base_patch16(pretrained=False, **kwargs):
|
607 |
-
model = BEiT(
|
608 |
-
patch_size=16,
|
609 |
-
embed_dim=768,
|
610 |
-
depth=12,
|
611 |
-
num_heads=12,
|
612 |
-
mlp_ratio=4,
|
613 |
-
qkv_bias=True,
|
614 |
-
norm_layer=partial(nn.LayerNorm, eps=1e-6),
|
615 |
-
init_values=None,
|
616 |
-
**kwargs)
|
617 |
-
model.default_cfg = _cfg()
|
618 |
-
return model
|
619 |
-
|
620 |
-
def beit_large_patch16(pretrained=False, **kwargs):
|
621 |
-
model = BEiT(
|
622 |
-
patch_size=16,
|
623 |
-
embed_dim=1024,
|
624 |
-
depth=24,
|
625 |
-
num_heads=16,
|
626 |
-
mlp_ratio=4,
|
627 |
-
qkv_bias=True,
|
628 |
-
norm_layer=partial(nn.LayerNorm, eps=1e-6),
|
629 |
-
init_values=None,
|
630 |
-
**kwargs)
|
631 |
-
model.default_cfg = _cfg()
|
632 |
-
return model
|
633 |
-
|
634 |
-
def dit_base_patch16(pretrained=False, **kwargs):
|
635 |
-
model = BEiT(
|
636 |
-
patch_size=16,
|
637 |
-
embed_dim=768,
|
638 |
-
depth=12,
|
639 |
-
num_heads=12,
|
640 |
-
mlp_ratio=4,
|
641 |
-
qkv_bias=True,
|
642 |
-
norm_layer=partial(nn.LayerNorm, eps=1e-6),
|
643 |
-
init_values=0.1,
|
644 |
-
**kwargs)
|
645 |
-
model.default_cfg = _cfg()
|
646 |
-
return model
|
647 |
-
|
648 |
-
def dit_large_patch16(pretrained=False, **kwargs):
|
649 |
-
model = BEiT(
|
650 |
-
patch_size=16,
|
651 |
-
embed_dim=1024,
|
652 |
-
depth=24,
|
653 |
-
num_heads=16,
|
654 |
-
mlp_ratio=4,
|
655 |
-
qkv_bias=True,
|
656 |
-
norm_layer=partial(nn.LayerNorm, eps=1e-6),
|
657 |
-
init_values=1e-5,
|
658 |
-
**kwargs)
|
659 |
-
model.default_cfg = _cfg()
|
660 |
-
return model
|
661 |
-
|
662 |
-
if __name__ == '__main__':
|
663 |
-
model = BEiT(use_checkpoint=True, use_shared_rel_pos_bias=True)
|
664 |
-
model = model.to("cuda:0")
|
665 |
-
input1 = torch.rand(2, 3, 512, 762).to("cuda:0")
|
666 |
-
input2 = torch.rand(2, 3, 800, 1200).to("cuda:0")
|
667 |
-
input3 = torch.rand(2, 3, 720, 1000).to("cuda:0")
|
668 |
-
output1 = model(input1)
|
669 |
-
output2 = model(input2)
|
670 |
-
output3 = model(input3)
|
671 |
-
print("all done")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EronSamez/RVC_HFmeu/demucs/__main__.py
DELETED
@@ -1,317 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import json
|
8 |
-
import math
|
9 |
-
import os
|
10 |
-
import sys
|
11 |
-
import time
|
12 |
-
from dataclasses import dataclass, field
|
13 |
-
|
14 |
-
import torch as th
|
15 |
-
from torch import distributed, nn
|
16 |
-
from torch.nn.parallel.distributed import DistributedDataParallel
|
17 |
-
|
18 |
-
from .augment import FlipChannels, FlipSign, Remix, Scale, Shift
|
19 |
-
from .compressed import get_compressed_datasets
|
20 |
-
from .model import Demucs
|
21 |
-
from .parser import get_name, get_parser
|
22 |
-
from .raw import Rawset
|
23 |
-
from .repitch import RepitchedWrapper
|
24 |
-
from .pretrained import load_pretrained, SOURCES
|
25 |
-
from .tasnet import ConvTasNet
|
26 |
-
from .test import evaluate
|
27 |
-
from .train import train_model, validate_model
|
28 |
-
from .utils import (human_seconds, load_model, save_model, get_state,
|
29 |
-
save_state, sizeof_fmt, get_quantizer)
|
30 |
-
from .wav import get_wav_datasets, get_musdb_wav_datasets
|
31 |
-
|
32 |
-
|
33 |
-
@dataclass
|
34 |
-
class SavedState:
|
35 |
-
metrics: list = field(default_factory=list)
|
36 |
-
last_state: dict = None
|
37 |
-
best_state: dict = None
|
38 |
-
optimizer: dict = None
|
39 |
-
|
40 |
-
|
41 |
-
def main():
|
42 |
-
parser = get_parser()
|
43 |
-
args = parser.parse_args()
|
44 |
-
name = get_name(parser, args)
|
45 |
-
print(f"Experiment {name}")
|
46 |
-
|
47 |
-
if args.musdb is None and args.rank == 0:
|
48 |
-
print(
|
49 |
-
"You must provide the path to the MusDB dataset with the --musdb flag. "
|
50 |
-
"To download the MusDB dataset, see https://sigsep.github.io/datasets/musdb.html.",
|
51 |
-
file=sys.stderr)
|
52 |
-
sys.exit(1)
|
53 |
-
|
54 |
-
eval_folder = args.evals / name
|
55 |
-
eval_folder.mkdir(exist_ok=True, parents=True)
|
56 |
-
args.logs.mkdir(exist_ok=True)
|
57 |
-
metrics_path = args.logs / f"{name}.json"
|
58 |
-
eval_folder.mkdir(exist_ok=True, parents=True)
|
59 |
-
args.checkpoints.mkdir(exist_ok=True, parents=True)
|
60 |
-
args.models.mkdir(exist_ok=True, parents=True)
|
61 |
-
|
62 |
-
if args.device is None:
|
63 |
-
device = "cpu"
|
64 |
-
if th.cuda.is_available():
|
65 |
-
device = "cuda"
|
66 |
-
else:
|
67 |
-
device = args.device
|
68 |
-
|
69 |
-
th.manual_seed(args.seed)
|
70 |
-
# Prevents too many threads to be started when running `museval` as it can be quite
|
71 |
-
# inefficient on NUMA architectures.
|
72 |
-
os.environ["OMP_NUM_THREADS"] = "1"
|
73 |
-
os.environ["MKL_NUM_THREADS"] = "1"
|
74 |
-
|
75 |
-
if args.world_size > 1:
|
76 |
-
if device != "cuda" and args.rank == 0:
|
77 |
-
print("Error: distributed training is only available with cuda device", file=sys.stderr)
|
78 |
-
sys.exit(1)
|
79 |
-
th.cuda.set_device(args.rank % th.cuda.device_count())
|
80 |
-
distributed.init_process_group(backend="nccl",
|
81 |
-
init_method="tcp://" + args.master,
|
82 |
-
rank=args.rank,
|
83 |
-
world_size=args.world_size)
|
84 |
-
|
85 |
-
checkpoint = args.checkpoints / f"{name}.th"
|
86 |
-
checkpoint_tmp = args.checkpoints / f"{name}.th.tmp"
|
87 |
-
if args.restart and checkpoint.exists() and args.rank == 0:
|
88 |
-
checkpoint.unlink()
|
89 |
-
|
90 |
-
if args.test or args.test_pretrained:
|
91 |
-
args.epochs = 1
|
92 |
-
args.repeat = 0
|
93 |
-
if args.test:
|
94 |
-
model = load_model(args.models / args.test)
|
95 |
-
else:
|
96 |
-
model = load_pretrained(args.test_pretrained)
|
97 |
-
elif args.tasnet:
|
98 |
-
model = ConvTasNet(audio_channels=args.audio_channels,
|
99 |
-
samplerate=args.samplerate, X=args.X,
|
100 |
-
segment_length=4 * args.samples,
|
101 |
-
sources=SOURCES)
|
102 |
-
else:
|
103 |
-
model = Demucs(
|
104 |
-
audio_channels=args.audio_channels,
|
105 |
-
channels=args.channels,
|
106 |
-
context=args.context,
|
107 |
-
depth=args.depth,
|
108 |
-
glu=args.glu,
|
109 |
-
growth=args.growth,
|
110 |
-
kernel_size=args.kernel_size,
|
111 |
-
lstm_layers=args.lstm_layers,
|
112 |
-
rescale=args.rescale,
|
113 |
-
rewrite=args.rewrite,
|
114 |
-
stride=args.conv_stride,
|
115 |
-
resample=args.resample,
|
116 |
-
normalize=args.normalize,
|
117 |
-
samplerate=args.samplerate,
|
118 |
-
segment_length=4 * args.samples,
|
119 |
-
sources=SOURCES,
|
120 |
-
)
|
121 |
-
model.to(device)
|
122 |
-
if args.init:
|
123 |
-
model.load_state_dict(load_pretrained(args.init).state_dict())
|
124 |
-
|
125 |
-
if args.show:
|
126 |
-
print(model)
|
127 |
-
size = sizeof_fmt(4 * sum(p.numel() for p in model.parameters()))
|
128 |
-
print(f"Model size {size}")
|
129 |
-
return
|
130 |
-
|
131 |
-
try:
|
132 |
-
saved = th.load(checkpoint, map_location='cpu')
|
133 |
-
except IOError:
|
134 |
-
saved = SavedState()
|
135 |
-
|
136 |
-
optimizer = th.optim.Adam(model.parameters(), lr=args.lr)
|
137 |
-
|
138 |
-
quantizer = None
|
139 |
-
quantizer = get_quantizer(model, args, optimizer)
|
140 |
-
|
141 |
-
if saved.last_state is not None:
|
142 |
-
model.load_state_dict(saved.last_state, strict=False)
|
143 |
-
if saved.optimizer is not None:
|
144 |
-
optimizer.load_state_dict(saved.optimizer)
|
145 |
-
|
146 |
-
model_name = f"{name}.th"
|
147 |
-
if args.save_model:
|
148 |
-
if args.rank == 0:
|
149 |
-
model.to("cpu")
|
150 |
-
model.load_state_dict(saved.best_state)
|
151 |
-
save_model(model, quantizer, args, args.models / model_name)
|
152 |
-
return
|
153 |
-
elif args.save_state:
|
154 |
-
model_name = f"{args.save_state}.th"
|
155 |
-
if args.rank == 0:
|
156 |
-
model.to("cpu")
|
157 |
-
model.load_state_dict(saved.best_state)
|
158 |
-
state = get_state(model, quantizer)
|
159 |
-
save_state(state, args.models / model_name)
|
160 |
-
return
|
161 |
-
|
162 |
-
if args.rank == 0:
|
163 |
-
done = args.logs / f"{name}.done"
|
164 |
-
if done.exists():
|
165 |
-
done.unlink()
|
166 |
-
|
167 |
-
augment = [Shift(args.data_stride)]
|
168 |
-
if args.augment:
|
169 |
-
augment += [FlipSign(), FlipChannels(), Scale(),
|
170 |
-
Remix(group_size=args.remix_group_size)]
|
171 |
-
augment = nn.Sequential(*augment).to(device)
|
172 |
-
print("Agumentation pipeline:", augment)
|
173 |
-
|
174 |
-
if args.mse:
|
175 |
-
criterion = nn.MSELoss()
|
176 |
-
else:
|
177 |
-
criterion = nn.L1Loss()
|
178 |
-
|
179 |
-
# Setting number of samples so that all convolution windows are full.
|
180 |
-
# Prevents hard to debug mistake with the prediction being shifted compared
|
181 |
-
# to the input mixture.
|
182 |
-
samples = model.valid_length(args.samples)
|
183 |
-
print(f"Number of training samples adjusted to {samples}")
|
184 |
-
samples = samples + args.data_stride
|
185 |
-
if args.repitch:
|
186 |
-
# We need a bit more audio samples, to account for potential
|
187 |
-
# tempo change.
|
188 |
-
samples = math.ceil(samples / (1 - 0.01 * args.max_tempo))
|
189 |
-
|
190 |
-
args.metadata.mkdir(exist_ok=True, parents=True)
|
191 |
-
if args.raw:
|
192 |
-
train_set = Rawset(args.raw / "train",
|
193 |
-
samples=samples,
|
194 |
-
channels=args.audio_channels,
|
195 |
-
streams=range(1, len(model.sources) + 1),
|
196 |
-
stride=args.data_stride)
|
197 |
-
|
198 |
-
valid_set = Rawset(args.raw / "valid", channels=args.audio_channels)
|
199 |
-
elif args.wav:
|
200 |
-
train_set, valid_set = get_wav_datasets(args, samples, model.sources)
|
201 |
-
elif args.is_wav:
|
202 |
-
train_set, valid_set = get_musdb_wav_datasets(args, samples, model.sources)
|
203 |
-
else:
|
204 |
-
train_set, valid_set = get_compressed_datasets(args, samples)
|
205 |
-
|
206 |
-
if args.repitch:
|
207 |
-
train_set = RepitchedWrapper(
|
208 |
-
train_set,
|
209 |
-
proba=args.repitch,
|
210 |
-
max_tempo=args.max_tempo)
|
211 |
-
|
212 |
-
best_loss = float("inf")
|
213 |
-
for epoch, metrics in enumerate(saved.metrics):
|
214 |
-
print(f"Epoch {epoch:03d}: "
|
215 |
-
f"train={metrics['train']:.8f} "
|
216 |
-
f"valid={metrics['valid']:.8f} "
|
217 |
-
f"best={metrics['best']:.4f} "
|
218 |
-
f"ms={metrics.get('true_model_size', 0):.2f}MB "
|
219 |
-
f"cms={metrics.get('compressed_model_size', 0):.2f}MB "
|
220 |
-
f"duration={human_seconds(metrics['duration'])}")
|
221 |
-
best_loss = metrics['best']
|
222 |
-
|
223 |
-
if args.world_size > 1:
|
224 |
-
dmodel = DistributedDataParallel(model,
|
225 |
-
device_ids=[th.cuda.current_device()],
|
226 |
-
output_device=th.cuda.current_device())
|
227 |
-
else:
|
228 |
-
dmodel = model
|
229 |
-
|
230 |
-
for epoch in range(len(saved.metrics), args.epochs):
|
231 |
-
begin = time.time()
|
232 |
-
model.train()
|
233 |
-
train_loss, model_size = train_model(
|
234 |
-
epoch, train_set, dmodel, criterion, optimizer, augment,
|
235 |
-
quantizer=quantizer,
|
236 |
-
batch_size=args.batch_size,
|
237 |
-
device=device,
|
238 |
-
repeat=args.repeat,
|
239 |
-
seed=args.seed,
|
240 |
-
diffq=args.diffq,
|
241 |
-
workers=args.workers,
|
242 |
-
world_size=args.world_size)
|
243 |
-
model.eval()
|
244 |
-
valid_loss = validate_model(
|
245 |
-
epoch, valid_set, model, criterion,
|
246 |
-
device=device,
|
247 |
-
rank=args.rank,
|
248 |
-
split=args.split_valid,
|
249 |
-
overlap=args.overlap,
|
250 |
-
world_size=args.world_size)
|
251 |
-
|
252 |
-
ms = 0
|
253 |
-
cms = 0
|
254 |
-
if quantizer and args.rank == 0:
|
255 |
-
ms = quantizer.true_model_size()
|
256 |
-
cms = quantizer.compressed_model_size(num_workers=min(40, args.world_size * 10))
|
257 |
-
|
258 |
-
duration = time.time() - begin
|
259 |
-
if valid_loss < best_loss and ms <= args.ms_target:
|
260 |
-
best_loss = valid_loss
|
261 |
-
saved.best_state = {
|
262 |
-
key: value.to("cpu").clone()
|
263 |
-
for key, value in model.state_dict().items()
|
264 |
-
}
|
265 |
-
|
266 |
-
saved.metrics.append({
|
267 |
-
"train": train_loss,
|
268 |
-
"valid": valid_loss,
|
269 |
-
"best": best_loss,
|
270 |
-
"duration": duration,
|
271 |
-
"model_size": model_size,
|
272 |
-
"true_model_size": ms,
|
273 |
-
"compressed_model_size": cms,
|
274 |
-
})
|
275 |
-
if args.rank == 0:
|
276 |
-
json.dump(saved.metrics, open(metrics_path, "w"))
|
277 |
-
|
278 |
-
saved.last_state = model.state_dict()
|
279 |
-
saved.optimizer = optimizer.state_dict()
|
280 |
-
if args.rank == 0 and not args.test:
|
281 |
-
th.save(saved, checkpoint_tmp)
|
282 |
-
checkpoint_tmp.rename(checkpoint)
|
283 |
-
|
284 |
-
print(f"Epoch {epoch:03d}: "
|
285 |
-
f"train={train_loss:.8f} valid={valid_loss:.8f} best={best_loss:.4f} ms={ms:.2f}MB "
|
286 |
-
f"cms={cms:.2f}MB "
|
287 |
-
f"duration={human_seconds(duration)}")
|
288 |
-
|
289 |
-
if args.world_size > 1:
|
290 |
-
distributed.barrier()
|
291 |
-
|
292 |
-
del dmodel
|
293 |
-
model.load_state_dict(saved.best_state)
|
294 |
-
if args.eval_cpu:
|
295 |
-
device = "cpu"
|
296 |
-
model.to(device)
|
297 |
-
model.eval()
|
298 |
-
evaluate(model, args.musdb, eval_folder,
|
299 |
-
is_wav=args.is_wav,
|
300 |
-
rank=args.rank,
|
301 |
-
world_size=args.world_size,
|
302 |
-
device=device,
|
303 |
-
save=args.save,
|
304 |
-
split=args.split_valid,
|
305 |
-
shifts=args.shifts,
|
306 |
-
overlap=args.overlap,
|
307 |
-
workers=args.eval_workers)
|
308 |
-
model.to("cpu")
|
309 |
-
if args.rank == 0:
|
310 |
-
if not (args.test or args.test_pretrained):
|
311 |
-
save_model(model, quantizer, args, args.models / model_name)
|
312 |
-
print("done")
|
313 |
-
done.write_text("done")
|
314 |
-
|
315 |
-
|
316 |
-
if __name__ == "__main__":
|
317 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EuroPython2022/pyro-vision/app.py
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
# Copyright (C) 2022, Pyronear.
|
2 |
-
|
3 |
-
# This program is licensed under the Apache License 2.0.
|
4 |
-
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0> for full license details.
|
5 |
-
|
6 |
-
import argparse
|
7 |
-
import json
|
8 |
-
|
9 |
-
import gradio as gr
|
10 |
-
import numpy as np
|
11 |
-
import onnxruntime
|
12 |
-
from huggingface_hub import hf_hub_download
|
13 |
-
from PIL import Image
|
14 |
-
|
15 |
-
REPO = "pyronear/rexnet1_0x"
|
16 |
-
|
17 |
-
|
18 |
-
# Download model config & checkpoint
|
19 |
-
with open(hf_hub_download(REPO, filename="config.json"), "rb") as f:
|
20 |
-
cfg = json.load(f)
|
21 |
-
|
22 |
-
ort_session = onnxruntime.InferenceSession(hf_hub_download(REPO, filename="model.onnx"))
|
23 |
-
|
24 |
-
def preprocess_image(pil_img: Image.Image) -> np.ndarray:
|
25 |
-
"""Preprocess an image for inference
|
26 |
-
|
27 |
-
Args:
|
28 |
-
pil_img: a valid pillow image
|
29 |
-
|
30 |
-
Returns:
|
31 |
-
the resized and normalized image of shape (1, C, H, W)
|
32 |
-
"""
|
33 |
-
|
34 |
-
# Resizing (PIL takes (W, H) order for resizing)
|
35 |
-
img = pil_img.resize(cfg["input_shape"][-2:][::-1], Image.BILINEAR)
|
36 |
-
# (H, W, C) --> (C, H, W)
|
37 |
-
img = np.asarray(img).transpose((2, 0, 1)).astype(np.float32) / 255
|
38 |
-
# Normalization
|
39 |
-
img -= np.array(cfg["mean"])[:, None, None]
|
40 |
-
img /= np.array(cfg["std"])[:, None, None]
|
41 |
-
|
42 |
-
return img[None, ...]
|
43 |
-
|
44 |
-
def predict(image):
|
45 |
-
# Preprocessing
|
46 |
-
np_img = preprocess_image(image)
|
47 |
-
ort_input = {ort_session.get_inputs()[0].name: np_img}
|
48 |
-
|
49 |
-
# Inference
|
50 |
-
ort_out = ort_session.run(None, ort_input)
|
51 |
-
# Post-processing
|
52 |
-
probs = 1 / (1 + np.exp(-ort_out[0][0]))
|
53 |
-
|
54 |
-
return {class_name: float(conf) for class_name, conf in zip(cfg["classes"], probs)}
|
55 |
-
|
56 |
-
|
57 |
-
img = gr.inputs.Image(type="pil")
|
58 |
-
outputs = gr.outputs.Label(num_top_classes=1)
|
59 |
-
|
60 |
-
|
61 |
-
gr.Interface(
|
62 |
-
fn=predict,
|
63 |
-
inputs=[img],
|
64 |
-
outputs=outputs,
|
65 |
-
title="PyroVision: image classification demo",
|
66 |
-
article=(
|
67 |
-
"<p style='text-align: center'><a href='https://github.com/pyronear/pyro-vision'>"
|
68 |
-
"Github Repo</a> | "
|
69 |
-
"<a href='https://pyronear.org/pyro-vision/'>Documentation</a></p>"
|
70 |
-
),
|
71 |
-
live=True,
|
72 |
-
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|