parquet-converter commited on
Commit
15ca04d
·
1 Parent(s): 6e9851c

Update parquet files (step 27 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/0xqtpie/doodle2vid/app.py +0 -294
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/256 Igara Na Jednom Cd-u Free !!BETTER!! 16.md +0 -54
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] Secure and Fast Download.md +0 -99
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/DeltaHorizon download low pc How to Enjoy This Sci-Fi Action-Adventure Game on Any Device.md +0 -226
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ARK Survival Evolved APK - The Best Survival Game for Android.md +0 -301
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Delta Touch [7 x Doom engine source port] - A must-have app for Doom fans on Android.md +0 -181
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download GTA 5 for Xbox 360 Free and Enjoy the Ultimate Open World Game.md +0 -152
  8. spaces/1phancelerku/anime-remove-background/Download Stickman Shinobi Mod Menu and Experience the Fun of Ninja Fighting.md +0 -147
  9. spaces/1phancelerku/anime-remove-background/Download WA GB APK Pro v17.40 - The Best WhatsApp Mod for Android [2023].md +0 -101
  10. spaces/1phancelerku/anime-remove-background/Enjoy the Best Truck Simulation Game with Truck Simulator Nusantara Mod APK Download.md +0 -96
  11. spaces/1phancelerku/anime-remove-background/Explore the World with Google Earth Download Now for Free.md +0 -162
  12. spaces/801artistry/RVC801/infer/modules/train/train.py +0 -723
  13. spaces/ADOPLE/ResumeAnalyzer/style.css +0 -40
  14. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/CLAP/utils.py +0 -26
  15. spaces/AILab-CVC/SEED-LLaMA/SEED-1.md +0 -93
  16. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_skirt_256x192/__init__.py +0 -0
  17. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/__init__.py +0 -0
  18. spaces/Ababababababbababa/Ashaar/poetry_diacritizer/modules/layers.py +0 -70
  19. spaces/Abhaykoul/HelpingAI-2.0/app.py +0 -179
  20. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/conversation/+server.ts +0 -65
  21. spaces/AchyuthGamer/OpenGPT/g4f/debug.py +0 -1
  22. spaces/Alpaca233/ai-stable-diffusion-Text-to-Image/README.md +0 -13
  23. spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/encoders/__init__.py +0 -0
  24. spaces/Amrrs/DragGan-Inversion/stylegan_human/README.md +0 -229
  25. spaces/Amrrs/DragGan-Inversion/stylegan_human/edit/edit_helper.py +0 -237
  26. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +0 -1368
  27. spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18_480x480_80k_pascal_context.py +0 -8
  28. spaces/Benson/text-generation/Examples/Air Game Apk.md +0 -120
  29. spaces/Benson/text-generation/Examples/Apk Cinco Noches En Freddy Y 39 S 2.md +0 -100
  30. spaces/Benson/text-generation/Examples/Descargar Amor Enredo Mod Apk.md +0 -50
  31. spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/parser/isoparser.py +0 -416
  32. spaces/Bokanovskii/Image-to-music/style.css +0 -42
  33. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/lvis.py +0 -209
  34. spaces/CVPR/LIVE/pybind11/tests/test_sequences_and_iterators.py +0 -191
  35. spaces/CVPR/regionclip-demo/detectron2/data/transforms/torchvision_transforms/__init__.py +0 -0
  36. spaces/CVPR/regionclip-demo/detectron2/layers/csrc/cocoeval/cocoeval.cpp +0 -507
  37. spaces/Cat125/text-generator-v3/main.py +0 -133
  38. spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/resources/help/version-info.html +0 -37
  39. spaces/CikeyQI/meme-api/meme_generator/memes/look_this_icon/__init__.py +0 -44
  40. spaces/Codecooker/rvcapi/src/main.py +0 -306
  41. spaces/Cpp4App/Cpp4App/CDM/README.md +0 -80
  42. spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/__init__.py +0 -0
  43. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/__init__.py +0 -26
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/abc/_resources.py +0 -31
  45. spaces/Datasculptor/MusicGen/tests/modules/__init__.py +0 -5
  46. spaces/Dinoking/Guccio-AI-Designer/netdissect/fullablate.py +0 -235
  47. spaces/ECCV2022/bytetrack/yolox/evaluators/evaluation.py +0 -200
  48. spaces/EDGAhab/VITS-Aatrox-AI/commons.py +0 -161
  49. spaces/Egrt/GCycleGAN/cyclegan.py +0 -106
  50. spaces/EuroPython2022/mmocr-demo/configs/textdet/maskrcnn/README.md +0 -49
spaces/0xqtpie/doodle2vid/app.py DELETED
@@ -1,294 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- import os
4
- import random
5
-
6
- import gradio as gr
7
- import numpy as np
8
- import PIL.Image
9
- import torch
10
- import torchvision.transforms.functional as TF
11
- from diffusers import (
12
- AutoencoderKL,
13
- EulerAncestralDiscreteScheduler,
14
- StableDiffusionXLAdapterPipeline,
15
- T2IAdapter,
16
- )
17
-
18
- from modelscope.pipelines import pipeline
19
- from modelscope.outputs import OutputKeys
20
-
21
- DESCRIPTION = '''# doodle2vid
22
- Combining T2I-Adapter-SDXL with MS-Image2Video to create a doodle to video pipeline.
23
- Shout-out to [fffiloni](https://huggingface.co/fffiloni) & [ARC Lab, Tencent PCG](https://huggingface.co/TencentARC) 🗣️
24
-
25
- How to use: Draw a doodle in the canvas, and click "Run" to generate a video.
26
- You can also provide a prompt with more details and choose a style.
27
- '''
28
-
29
- if not torch.cuda.is_available():
30
- DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
31
-
32
- style_list = [
33
- {
34
- "name": "(No style)",
35
- "prompt": "{prompt}",
36
- "negative_prompt": "",
37
- },
38
- {
39
- "name": "Cinematic",
40
- "prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
41
- "negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
42
- },
43
- {
44
- "name": "3D Model",
45
- "prompt": "professional 3d model {prompt} . octane render, highly detailed, volumetric, dramatic lighting",
46
- "negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
47
- },
48
- {
49
- "name": "Anime",
50
- "prompt": "anime artwork {prompt} . anime style, key visual, vibrant, studio anime, highly detailed",
51
- "negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast",
52
- },
53
- {
54
- "name": "Digital Art",
55
- "prompt": "concept art {prompt} . digital artwork, illustrative, painterly, matte painting, highly detailed",
56
- "negative_prompt": "photo, photorealistic, realism, ugly",
57
- },
58
- {
59
- "name": "Photographic",
60
- "prompt": "cinematic photo {prompt} . 35mm photograph, film, bokeh, professional, 4k, highly detailed",
61
- "negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
62
- },
63
- {
64
- "name": "Pixel art",
65
- "prompt": "pixel-art {prompt} . low-res, blocky, pixel art style, 8-bit graphics",
66
- "negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic",
67
- },
68
- {
69
- "name": "Fantasy art",
70
- "prompt": "ethereal fantasy concept art of {prompt} . magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
71
- "negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
72
- },
73
- {
74
- "name": "Neonpunk",
75
- "prompt": "neonpunk style {prompt} . cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional",
76
- "negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured",
77
- },
78
- {
79
- "name": "Manga",
80
- "prompt": "manga style {prompt} . vibrant, high-energy, detailed, iconic, Japanese comic style",
81
- "negative_prompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style",
82
- },
83
- ]
84
-
85
- styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
86
- STYLE_NAMES = list(styles.keys())
87
- DEFAULT_STYLE_NAME = "(No style)"
88
-
89
-
90
- def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str, str]:
91
- p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
92
- return p.replace("{prompt}", positive), n + negative
93
-
94
-
95
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
96
- if torch.cuda.is_available():
97
- model_id = "stabilityai/stable-diffusion-xl-base-1.0"
98
- adapter = T2IAdapter.from_pretrained(
99
- "TencentARC/t2i-adapter-sketch-sdxl-1.0", torch_dtype=torch.float16, variant="fp16"
100
- )
101
- scheduler = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
102
- pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
103
- model_id,
104
- vae=AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16),
105
- adapter=adapter,
106
- scheduler=scheduler,
107
- torch_dtype=torch.float16,
108
- variant="fp16",
109
- )
110
- pipe.to(device)
111
- else:
112
- pipe = None
113
-
114
- MAX_SEED = np.iinfo(np.int32).max
115
- video_pipe = pipeline(task='image-to-video', model='damo/Image-to-Video', model_revision='v1.1.0')
116
-
117
-
118
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
119
- if randomize_seed:
120
- seed = random.randint(0, MAX_SEED)
121
- return seed
122
-
123
- def inferVideo(image: PIL.Image.Image) -> str:
124
- # Save the passed image to a temp file
125
- temp_path = "temp_input_image.png"
126
- image.save(temp_path)
127
-
128
- output_video_path = video_pipe(temp_path, output_video='output.mp4')[OutputKeys.OUTPUT_VIDEO]
129
- print(output_video_path)
130
- return output_video_path
131
-
132
- def inferImage(
133
- image: PIL.Image.Image,
134
- prompt: str,
135
- negative_prompt: str,
136
- style_name: str = DEFAULT_STYLE_NAME,
137
- num_steps: int = 25,
138
- guidance_scale: float = 5,
139
- adapter_conditioning_scale: float = 0.8,
140
- adapter_conditioning_factor: float = 0.8,
141
- seed: int = 0,
142
- progress=gr.Progress(track_tqdm=True),
143
- ) -> PIL.Image.Image:
144
- image = image.convert("RGB")
145
- image = TF.to_tensor(image) > 0.5
146
- image = TF.to_pil_image(image.to(torch.float32))
147
-
148
- prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
149
-
150
- generator = torch.Generator(device=device).manual_seed(seed)
151
- out = pipe(
152
- prompt=prompt,
153
- negative_prompt=negative_prompt,
154
- image=image,
155
- num_inference_steps=num_steps,
156
- generator=generator,
157
- guidance_scale=guidance_scale,
158
- adapter_conditioning_scale=adapter_conditioning_scale,
159
- adapter_conditioning_factor=adapter_conditioning_factor,
160
- ).images[0]
161
-
162
- return out
163
-
164
-
165
- with gr.Blocks(css="style.css") as demo:
166
- gr.Markdown(DESCRIPTION, elem_id="description")
167
-
168
- with gr.Row():
169
- with gr.Column():
170
- with gr.Group():
171
- image = gr.Image(
172
- source="canvas",
173
- tool="sketch",
174
- type="pil",
175
- image_mode="L",
176
- invert_colors=True,
177
- shape=(1024, 1024),
178
- brush_radius=4,
179
- height=440,
180
- )
181
- prompt = gr.Textbox(label="Prompt")
182
- style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
183
- run_button = gr.Button("Run")
184
- with gr.Accordion("Advanced options", open=False):
185
- negative_prompt = gr.Textbox(
186
- label="Negative prompt",
187
- value=" extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured",
188
- )
189
- num_steps = gr.Slider(
190
- label="Number of steps",
191
- minimum=1,
192
- maximum=50,
193
- step=1,
194
- value=25,
195
- )
196
- guidance_scale = gr.Slider(
197
- label="Guidance scale",
198
- minimum=0.1,
199
- maximum=10.0,
200
- step=0.1,
201
- value=5,
202
- )
203
- adapter_conditioning_scale = gr.Slider(
204
- label="Adapter conditioning scale",
205
- minimum=0.5,
206
- maximum=1,
207
- step=0.1,
208
- value=0.8,
209
- )
210
- adapter_conditioning_factor = gr.Slider(
211
- label="Adapter conditioning factor",
212
- info="Fraction of timesteps for which adapter should be applied",
213
- minimum=0.5,
214
- maximum=1,
215
- step=0.1,
216
- value=0.8,
217
- )
218
- seed = gr.Slider(
219
- label="Seed",
220
- minimum=0,
221
- maximum=MAX_SEED,
222
- step=1,
223
- value=0,
224
- )
225
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
226
- with gr.Column():
227
- result_image = gr.Image(label="Intermediate Image Output", type="pil", interactive=False, height=400)
228
- result_video = gr.Video(label="Final Video Output", height=400)
229
-
230
- inputs = [
231
- image,
232
- prompt,
233
- negative_prompt,
234
- style,
235
- num_steps,
236
- guidance_scale,
237
- adapter_conditioning_scale,
238
- adapter_conditioning_factor,
239
- seed,
240
- ]
241
- prompt.submit(
242
- fn=randomize_seed_fn,
243
- inputs=[seed, randomize_seed],
244
- outputs=seed,
245
- queue=False,
246
- api_name=False,
247
- ).then(
248
- fn=inferImage,
249
- inputs=inputs,
250
- outputs=result_image,
251
- api_name=False,
252
- ).then(
253
- fn=inferVideo,
254
- inputs=[result_image],
255
- outputs=result_video,
256
- api_name=False,
257
- )
258
- negative_prompt.submit(
259
- fn=randomize_seed_fn,
260
- inputs=[seed, randomize_seed],
261
- outputs=seed,
262
- queue=False,
263
- api_name=False,
264
- ).then(
265
- fn=inferImage,
266
- inputs=inputs,
267
- outputs=result_image,
268
- api_name=False,
269
- ).then(
270
- fn=inferVideo,
271
- inputs=[result_image],
272
- outputs=result_video,
273
- api_name=False,
274
- )
275
- run_button.click(
276
- fn=randomize_seed_fn,
277
- inputs=[seed, randomize_seed],
278
- outputs=seed,
279
- queue=False,
280
- api_name=False,
281
- ).then(
282
- fn=inferImage,
283
- inputs=inputs,
284
- outputs=result_image,
285
- api_name=False,
286
- ).then(
287
- fn=inferVideo,
288
- inputs=[result_image],
289
- outputs=result_video,
290
- api_name=False,
291
- )
292
-
293
- if __name__ == "__main__":
294
- demo.queue(max_size=20).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/256 Igara Na Jednom Cd-u Free !!BETTER!! 16.md DELETED
@@ -1,54 +0,0 @@
1
-
2
- <h1>256 igara na jednom cd-u free 16: A nostalgic trip to the Sega era</h1> | <p>If you are a PC gamer who grew up in Serbia or any other country in Eastern Europe in the early 2000s, chances are you have heard of or played a CD called "256 igara na jednom cd-u free 16". This CD was a collection of Sega games that ran on an emulator and offered hours of fun for anyone who had a low-end machine or just wanted to enjoy some classic titles. In this article, we will explore what this CD was, what games it contained, how to play them on PC today, and why they are still fun to play.</p>
3
- <h2>256 igara na jednom cd-u free 16</h2><br /><p><b><b>DOWNLOAD</b> >>>>> <a href="https://byltly.com/2uKvJ1">https://byltly.com/2uKvJ1</a></b></p><br /><br />
4
- <h2>Introduction</h2>
5
- <p>Sega is one of the most famous video game companies in history. Founded in Japan in 1940, Sega started as a manufacturer of coin-operated amusement machines such as slot machines and jukeboxes. In 1983, Sega entered the home console market with its first system, the SG-1000. However, it was not until 1988 that Sega achieved worldwide success with its 16-bit console, the Sega Genesis (also known as Mega Drive).</p>
6
- <p>The Sega Genesis was a revolutionary console that competed with Nintendo's Super Nintendo Entertainment System (SNES) in the so-called "console wars" of the late 80s and early 90s. The Genesis had a library of over 900 games, many of which are considered classics today. Some of these games were ported to PC by using emulators, which are software programs that mimic the hardware and software of another system.</p>
7
- <p>One such emulator was Gens, which was released in 1999 and became one of the most popular Sega emulators for PC. Gens allowed users to play Sega Genesis, Sega CD, and Sega 32X games on their computers by loading ROM files, which are digital copies of game cartridges or discs.</p>
8
- <p>One of the most famous ROM collections was "256 igara na jednom cd-u free 16", which literally means "256 games on one CD for free 16" in Serbian. This CD was a compilation of Sega games that ran on Gens and was widely distributed in Serbia and other countries in Eastern Europe in the early 2000s. Many PC gamers who had low-end machines or limited access to internet or other sources of entertainment enjoyed playing these games for hours.</p>
9
- <h2>What is 256 igara na jednom cd-u free 16?</h2>
10
- <h3>The meaning of the phrase</h3>
11
- <p>The phrase "256 igara na jednom cd-u free 16" is composed of four parts:</p>
12
- <ul>
13
- <li>"256 igara" means "256 games" in Serbian. This refers to the number of Sega games on the CD.</li>
14
- <li>"na jednom cd-u" means "on one CD" in Serbian. This refers to the fact that all these games were stored on a single compact disc.</li>
15
- <li>"free" means "free" in English. This refers to the fact that this CD was not sold commercially but rather distributed freely among PC users.</li>
16
- <li>"16" is an arbitrary number that has no specific meaning. It could be interpreted as a reference to the 16-bit graphics of Sega Genesis or simply as a random suffix added to make the phrase sound more catchy.</li>
17
- </ul>
18
- <p>Therefore, the phrase "256 igara na jednom cd-u free 16" can be translated into English as "256 games on one CD for free 16".</p>
19
- <h3>The origin of the CD</h3>
20
- <p>The exact origin of this CD is unknown, but it is likely that it was created by some anonymous PC enthusiast who wanted to share his or her collection of Sega ROMs with other gamers. It is possible that this person downloaded these ROMs from various websites or obtained them from other sources such as magazines or friends.</p>
21
- <p>256 games on one cd free 16<br />
22
- Download 256 igara na jednom cd-u besplatno 16<br />
23
- How to play 256 igara na jednom cd-u for free 16<br />
24
- 256 igara na jednom cd-u free 16 soundcloud<br />
25
- Best 256 igara na jednom cd-u games free 16<br />
26
- 256 igara na jednom cd-u free 16 review<br />
27
- Where to buy 256 igara na jednom cd-u free 16<br />
28
- 256 igara na jednom cd-u free 16 online<br />
29
- 256 igara na jednom cd-u free 16 cheats<br />
30
- 256 igara na jednom cd-u free 16 tips and tricks<br />
31
- 256 igara na jednom cd-u free 16 gameplay<br />
32
- 256 igara na jednom cd-u free 16 walkthrough<br />
33
- 256 igara na jednom cd-u free 16 emulator<br />
34
- 256 igara na jednom cd-u free 16 iso<br />
35
- 256 igara na jednom cd-u free 16 rar<br />
36
- 256 igara na jednom cd-u free 16 torrent<br />
37
- 256 igara na jednom cd-u free 16 crack<br />
38
- 256 igara na jednom cd-u free 16 patch<br />
39
- 256 igara na jednom cd-u free 16 serial key<br />
40
- 256 igara na jednom cd-u free 16 full version<br />
41
- What is 256 igara na jednom cd-u free 16<br />
42
- Who made 256 igara na jednom cd-u free 16<br />
43
- When was 256 igara na jednom cd-u free 16 released<br />
44
- Why is 256 igara na jednom cd-u free 16 popular<br />
45
- How many games are in 256 igara na jednom cd-u free 16<br />
46
- What are the genres of games in 256 igara na jednom cd-u free 16<br />
47
- What are the system requirements for playing 256 igara na jednom cd-u free 16<br />
48
- How to install and run 256 igara na jednom cd-u free 16<br />
49
- How to uninstall and remove 256 igara na jednom cd-u free 16<br />
50
- How to backup and restore your progress in 256 igara na jednom cd-u free </p>
51
- <p>The CD was then burned using a software program such as Nero Burning ROM or Easy CD Creator and labeled with a printed cover that featured some images from Sega games such as Sonic the Hedgehog, Streets of Rage, Mortal Kombat, etc. The cover also had some text written in Serbian such as "Najbolje igre za vas kompjuter!" ("The best games for your computer!") or "Samo za prave ljubitelje igrica!" ("Only for true game lovers!").</p>
52
- <p>The CD was then distributed among PC users either by mail order, personal exchange, or through local shops or markets. Many people who received this CD were unaware of its origin or content but were curious enough to try it out. Some were pleasantly surprised by finding some familiar titles from their childhood while others were introduced to new genres or franchises they had never played before.</p> 0a6ba089eb<br />
53
- <br />
54
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] Secure and Fast Download.md DELETED
@@ -1,99 +0,0 @@
1
-
2
- <h1>Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] Free Download</h1>
3
- <p>If you are looking for a powerful and versatile PDF editor, you might want to check out Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH]. This is a cracked version of the original software that allows you to use all the features without paying for a subscription. In this article, we will show you how to download, install, and use this software for free.</p>
4
- <h2>What is Adobe Acrobat Pro DC?</h2>
5
- <p>Adobe Acrobat Pro DC is a software that lets you create, edit, sign, and share PDF documents with ease. It is part of the Adobe Document Cloud suite, which means you can access your files from anywhere and collaborate with others online. Some of the features and benefits of Adobe Acrobat Pro DC are:</p>
6
- <h2>Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] Free Download</h2><br /><p><b><b>Download Zip</b> &#10027;&#10027;&#10027; <a href="https://byltly.com/2uKvo2">https://byltly.com/2uKvo2</a></b></p><br /><br />
7
- <ul>
8
- <li>It has a user-friendly interface that adapts to your needs.</li>
9
- <li>It supports various formats, such as Word, Excel, PowerPoint, HTML, JPEG, PNG, GIF, TIFF, etc.</li>
10
- <li>It allows you to convert scanned documents into editable PDFs with optical character recognition (OCR).</li>
11
- <li>It enables you to add comments, annotations, stamps, signatures, watermarks, bookmarks, headers, footers, etc.</li>
12
- <li>It lets you edit text, images, links, forms, fields, etc.</li>
13
- <li>It allows you to compare two versions of a PDF and highlight the differences.</li>
14
- <li>It enables you to protect your PDFs with passwords, encryption, redaction, digital signatures, etc.</li>
15
- <li>It lets you compress, split, merge, rotate, crop, rearrange, etc.</li>
16
- <li>It allows you to export your PDFs to other formats or create PDFs from other formats.</li>
17
- <li>It enables you to fill out and submit forms online or offline.</li>
18
- <li>It lets you share your PDFs via email, cloud services, social media, etc.</li>
19
- <li>It integrates with other Adobe products and third-party applications.</li>
20
- </ul>
21
- <h2>What is a crack and why do you need it?</h2>
22
- <p>A crack is a file or a program that modifies the original software to bypass its security features and activate it without a license key or a subscription. It is usually created by hackers or crackers who want to use the software for free or distribute it illegally.</p>
23
- <p>You might need a crack if you want to use Adobe Acrobat Pro DC without paying for it or if you have lost your license key or subscription. However, you should be aware that using a crack is not legal or ethical. It violates the terms and conditions of the software developer and may expose your computer to viruses or malware. Therefore, we do not recommend or endorse using a crack for any purpose.</p>
24
- <h2>How to download and install Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH]?</h2>
25
- <p>If you still want to download and install Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH], here are the steps you need to follow:</p>
26
- <p>How to get Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] for free<br />
27
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] torrent download link<br />
28
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] activation key generator<br />
29
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] full version with patch<br />
30
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] serial number and license code<br />
31
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] direct download no survey<br />
32
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] latest update free download<br />
33
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] working crack tested<br />
34
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] features and benefits<br />
35
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] system requirements and compatibility<br />
36
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] installation guide and instructions<br />
37
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] review and feedback<br />
38
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] alternative and similar software<br />
39
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] customer support and contact<br />
40
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] official website and download source<br />
41
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] malware and virus scan report<br />
42
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] comparison with other versions of Adobe Acrobat<br />
43
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] tips and tricks to use it better<br />
44
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] pros and cons analysis<br />
45
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] best price and discount offer<br />
46
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] backup and restore options<br />
47
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to uninstall and remove it completely<br />
48
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to upgrade and update it easily<br />
49
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to solve common errors and issues<br />
50
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to customize and optimize it for your needs<br />
51
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to create and edit PDF files with it<br />
52
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to convert PDF files to other formats with it<br />
53
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to sign and secure PDF files with it<br />
54
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to share and collaborate on PDF files with it<br />
55
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to fill and submit forms with it<br />
56
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to combine and organize PDF files with it<br />
57
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to add comments and annotations to PDF files with it<br />
58
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to use the cloud services and mobile apps with it<br />
59
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to use the accessibility features and tools with it<br />
60
- Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] how to use the advanced editing and printing options with it<br />
61
- Adobe Acrobat Pro DC 2018.009</p>
62
- <h3>Download the software</h3>
63
- <p>You can download the software from this link. It is a compressed file that contains the setup file and the crack file. You will need a program like WinRAR or 7-Zip to extract it.</p>
64
- <h3>Install the software</h3>
65
- <p>After extracting the file, run the setup file as administrator and follow the instructions on the screen. Choose the language and destination folder for the installation. Do not launch the software after the installation is complete.</p>
66
- <h3>Apply the crack</h3>
67
- <p>Go to the folder where you extracted the file and copy the crack file (Adobe Universal Patcher.exe). Then go to the folder where you installed the software (usually C:\Program Files (x86)\Adobe\Acrobat DC) and paste the crack file there. Run the crack file as administrator and click on Patch. Wait for a few seconds until you see a message saying "Patching Done". You can now launch the software and enjoy all its features for free.</p>
68
- <h2>How to use Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH]?</h2>
69
- <p>Once you have installed and activated Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH], you can start using it for your PDF needs. Here are some tips and tricks on how to use it:</p>
70
- <h3>Create and edit PDFs</h3>
71
- <p>To create a new PDF document from scratch or from another format, go to File > Create > Blank Document or File > Create > PDF from File/File from Scanner/Web Page/Clipboard/Screen Capture/Portfolio/Combine Files into PDF etc.</p>
72
- <p>To edit an existing PDF document, open it in Adobe Acrobat Pro DC and use the tools on the right panel or on the top toolbar. You can edit text by clicking on Edit PDF > Edit Text & Images or by double-clicking on any text element. You can edit images by clicking on Edit PDF > Edit Text & Images or by right-clicking on any image element. You can edit links by clicking on Edit PDF > Link > Add/Edit Web or Document Link or by right-clicking on any link element. You can edit forms by clicking on Prepare Form > Add/Edit/Delete Fields or by right-clicking on any form element.</p>
73
- <p>You can also use other tools such as Comment > Add Sticky Note/Text Box/Callout/Typewriter/Stamp/File Attachment/Audio/Video etc., Annotate > Highlight/Underline/Cross Out/Strikethrough etc., Sign & Certify > Place Signature/Certify with Visible Signature/Certify without Visible Signature etc., Redact > Mark for Redaction/Apply Redactions etc., Protect > Encrypt with Password/Encrypt with Certificate/Remove Hidden Information/Sanitize Document etc., Optimize PDF > Reduce File Size/Optimize Scanned PDF/Optimize for Web Publishing etc., Organize Pages > Insert/Delete/Rotate/Crop/Extract/Split/Merge/Replace/Renumber Pages etc., Enhance Scans > Recognize Text/Optimize Scanned Pages/Edit Text/Edit Images/ClearScan etc., Print Production > Preflight/Fix Hairlines/Add Printer Marks/Convert Colors/Flatten Transparency etc., Accessibility > Full Check/Add Tags/Add Alternate Text/Set Reading Order/Set Tab Order/Set Language/Set Title/Set Open Options etc., Action Wizard > Create New Action/Edit Actions/Delete Actions/Run Actions etc., Compare Files > Select Files To Compare/Compare Files Side By Side/Compare Files In A Single Document/View Report Of Differences etc., Rich Media > Add/Edit/Delete Flash/Add/Edit/Delete Sound/Add/Edit/Delete Video/Add/Edit/Delete 3D/Add/Edit/Delete Buttons/Add/Edit/Delete Forms/Add/Edit/Delete Links/Add/Edit/Delete Bookmarks/Add/Edit/Delete Layers/Add/Edit/Delete Comments/Add/Edit/Delete Tags/Add/Edit/Delete Metadata etc.</p>
74
- <h3>Sign and share PDFs</h3>
75
- <p>To sign a PDF document electronically or digitally, go to Tools > Sign & Certify > Place Signature/Certify with Visible Signature/Certify without Visible Signature etc., choose your signature method (type/draw/image/certificate), position your signature on the document, customize your signature appearance if needed (name/reason/location/date/text/graphic), click Apply or Sign Document.</p>
76
- <p>To share a PDF document via email or cloud services such as Google Drive/Dropbox/Microsoft OneDrive/Box.com etc., go mentioned above. We hope this article has helped you understand what Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] is and how to use it.</p>
77
- <h2>FAQs</h2>
78
- <p>Here are some frequently asked questions and answers about Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH]:</p>
79
- <ol>
80
- <li>Q: Is Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] safe to use?<br>
81
- A: No, it is not safe to use. It may contain viruses or malware that can harm your computer or steal your data. It may also cause errors or crashes that can damage your files or system.</li>
82
- <li>Q: Is Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] legal to use?<br>
83
- A: No, it is not legal to use. It violates the copyright and license agreement of Adobe and may result in legal consequences such as fines or lawsuits.</li>
84
- <li>Q: How can I get Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] for free?<br>
85
- A: You can't get Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] for free legally or ethically. The only way to get it for free is to download it from an illegal or untrusted source, which is not recommended.</li>
86
- <li>Q: How can I update Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH]?<br>
87
- A: You can't update Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH]. The crack disables the update feature of the software and prevents you from getting the latest features and security patches from Adobe.</li>
88
- <li>Q: How can I uninstall Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH]?<br>
89
- A: You can uninstall Adobe Acrobat Pro DC 2018.009.20044 Crack - [SH] by following these steps:</p>
90
- <ul>
91
- <li>Go to Control Panel > Programs > Programs and Features.</li>
92
- <li>Select Adobe Acrobat Pro DC and click Uninstall.</li>
93
- <li>Follow the instructions on the screen to complete the uninstallation.</li>
94
- <li>Delete the crack file (Adobe Universal Patcher.exe) from the installation folder (usually C:\Program Files (x86)\Adobe\Acrobat DC).</li>
95
- <li>Delete any leftover files or folders related to Adobe Acrobat Pro DC or the crack from your computer.</li>
96
- </ul>
97
- </p> 0a6ba089eb<br />
98
- <br />
99
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/DeltaHorizon download low pc How to Enjoy This Sci-Fi Action-Adventure Game on Any Device.md DELETED
@@ -1,226 +0,0 @@
1
- <br />
2
- <h1>DeltaHorizon Download Low PC: How to Play This Amazing Online Multiplayer Game on a Budget</h1>
3
- <p>Do you love online multiplayer games that offer stunning graphics, immersive gameplay, and diverse modes? If so, you might have heard of DeltaHorizon, one of the most popular games in this genre. But what if you don't have a high-end PC that can run this game smoothly? Does that mean you have to miss out on this amazing game? Not at all! In this article, I will show you how you can download DeltaHorizon for low PC and enjoy it without breaking the bank or compromising your experience. So, let's get started!</p>
4
- <h2>What is DeltaHorizon?</h2>
5
- <p>DeltaHorizon is an online multiplayer game that was released in 2022 by SoundCloud, a platform for music streaming and sharing. The game is set in a post-apocalyptic world where players can choose from four different factions: Survivors, Bandits, Horsemen, and Machetes. Each faction has its own strengths, weaknesses, and objectives. The game features various modes, such as survival, questing, role-playing, crafting, and combat. Players can explore the vast open world of Eastern Europe, interact with other players and NPCs, collect resources and items, build shelters and bases, fight enemies and zombies, and more.</p>
6
- <h2>DeltaHorizondownloadlowpc</h2><br /><p><b><b>Download File</b> &#128505; <a href="https://byltly.com/2uKyza">https://byltly.com/2uKyza</a></b></p><br /><br />
7
- <p>The game has received rave reviews from critics and players alike for its stunning graphics, realistic physics, dynamic weather, day-night cycle, and rich sound effects. The game also boasts a large and active community of players who create and share content, such as mods, maps, skins, music, and more. DeltaHorizon is available for Windows, PlayStation 4, Xbox One, and Nintendo Switch platforms.</p>
8
- <h3>The system requirements for playing DeltaHorizon on PC</h3>
9
- <p>As you can imagine, DeltaHorizon is a demanding game that requires a powerful PC to run smoothly. According to the official website of the game, these are the minimum and recommended system requirements for playing DeltaHorizon on PC:</p>
10
- <table>
11
- <tr>
12
- <th>Minimum</th>
13
- <th>Recommended</th>
14
- </tr>
15
- <tr>
16
- <td>CPU: Intel Core i5-2500K or AMD FX-6300</td>
17
- <td>CPU: Intel Core i7-4770K or AMD Ryzen 5 1600</td>
18
- </tr>
19
- <tr>
20
- <td>RAM: 8 GB</td>
21
- <td>RAM: 16 GB</td>
22
- </tr>
23
- <tr>
24
- <td>GPU: NVIDIA GeForce GTX 760 or AMD Radeon R9 280X</td>
25
- <td>GPU: NVIDIA GeForce GTX 1060 or AMD Radeon RX 580</td>
26
- </tr>
27
- <tr>
28
- <td>OS: Windows 10 64-bit</td>
29
- <td>OS: Windows 10 64-bit</td>
30
- </tr>
31
- <tr>
32
- <td>Storage: 60 GB available space</td>
33
- <td>Storage: 60 GB available space</td>
34
- </tr>
35
- <tr>
36
- <td>Internet: Broadband connection</td>
37
- <td>Internet: Broadband connection</td>
38
- </tr>
39
- </table>
40
- <p>If your PC meets or exceeds these requirements, you should be able to play DeltaHorizon without any issues. However, if your PC falls short of these requirements, don't worry. There are still ways to download DeltaHorizon for low PC and play it with decent performance and quality. Let me show you how.</p>
41
- <h2>How to download DeltaHorizon for low PC</h2>
42
- <h3>The best sources to get the game for free or cheap</h3>
43
- <p>The first step to download DeltaHorizon for low PC is to find a reliable source that offers the game for free or at a low price. There are many websites that claim to provide free or cracked versions of DeltaHorizon, but be careful. Many of them are scams that can infect your PC with malware or viruses. Some of them may also offer outdated or incomplete versions of the game that may not work properly or cause errors.</p>
44
- <p>The best way to get DeltaHorizon for free or cheap is to use legitimate sources that offer discounts or giveaways. For example, you can check out these websites that often have deals on DeltaHorizon:</p>
45
- <p>How to download Delta Horizon on low-end PC<br />
46
- Delta Horizon PC download size and requirements<br />
47
- Delta Horizon low graphics settings for PC<br />
48
- Best sites to download Delta Horizon for PC<br />
49
- Delta Horizon PC game review and rating<br />
50
- Delta Horizon PC gameplay and features<br />
51
- Delta Horizon PC download link and installation guide<br />
52
- Delta Horizon PC cheats and hacks<br />
53
- Delta Horizon PC mods and customizations<br />
54
- Delta Horizon PC multiplayer and online mode<br />
55
- Delta Horizon PC free download full version<br />
56
- Delta Horizon PC patch notes and updates<br />
57
- Delta Horizon PC performance and optimization tips<br />
58
- Delta Horizon PC controller support and keybindings<br />
59
- Delta Horizon PC save file location and backup<br />
60
- Delta Horizon PC error fixes and troubleshooting<br />
61
- Delta Horizon PC comparison with console versions<br />
62
- Delta Horizon PC best weapons and loadouts<br />
63
- Delta Horizon PC best missions and quests<br />
64
- Delta Horizon PC best characters and skills<br />
65
- Delta Horizon PC secrets and easter eggs<br />
66
- Delta Horizon PC DLCs and expansions<br />
67
- Delta Horizon PC screenshots and wallpapers<br />
68
- Delta Horizon PC fan art and memes<br />
69
- Delta Horizon PC soundtrack and music<br />
70
- Delta Horizon PC system requirements test<br />
71
- Delta Horizon PC demo and trial version<br />
72
- Delta Horizon PC refund policy and customer support<br />
73
- Delta Horizon PC price and discounts<br />
74
- Delta Horizon PC steam key and activation code<br />
75
- Delta Horizon low spec gamer settings for PC<br />
76
- Delta Horizon ultra graphics mod for PC<br />
77
- Delta Horizon realistic physics mod for PC<br />
78
- Delta Horizon zombie mode mod for PC<br />
79
- Delta Horizon battle royale mode mod for PC<br />
80
- Delta Horizon VR mode mod for PC<br />
81
- Delta Horizon co-op mode mod for PC<br />
82
- Delta Horizon crossplay mode mod for PC<br />
83
- Delta Horizon nude mod for PC (NSFW)<br />
84
- Delta Horizon anime mod for PC (NSFW)<br />
85
- How to run Delta Horizon on Windows 10/11 for PC<br />
86
- How to run Delta Horizon on Linux for PC<br />
87
- How to run Delta Horizon on Mac for PC<br />
88
- How to run Delta Horizon on Android for PC (emulator)<br />
89
- How to run Delta Horizon on iOS for PC (emulator)<br />
90
- How to stream Delta Horizon from PC to TV/phone/tablet<br />
91
- How to record Delta Horizon gameplay on PC (software)<br />
92
- How to edit Delta Horizon videos on PC (software)<br />
93
- How to upload Delta Horizon videos on YouTube/Twitch from PC (software)<br />
94
- How to make money from playing Delta Horizon on PC (tips)</p>
95
- <ul>
96
- <li><a href="https://www.g2a.com/">G2A.com</a>: This is a marketplace where you can buy and sell digital games at low prices. You can find DeltaHorizon for as low as $10 here.</li>
97
- <li><a href="https://www.humblebundle.com/">Humble Bundle.com</a>: This is a website that offers bundles of games for a fraction of their original price. You can pay what you want and support charity as well. You can sometimes find DeltaHorizon in their bundles.</li>
98
- <li><a href="https://www.steam.com/">Steam.com</a>: This is the official platform for buying and playing DeltaHorizon on PC. You can sometimes find discounts or sales on DeltaHorizon here.</li>
99
- <li><a href="https://www.soundcloud.com/">SoundCloud.com</a>: This is the official website of the developer of DeltaHorizon. You can sometimes find giveaways or promotions on DeltaHorizon here.</li>
100
- </ul>
101
- <p>Once you find a source that offers DeltaHorizon for free or cheap, make sure you download it from a secure link and scan it with an antivirus program before installing it.</p>
102
- <h3>The steps to install and run the game on a low-end PC</h3>
103
- <p>The next step to download DeltaHorizon for low PC is to install and run the game on your low-end PC. Here are the steps to do so:</p>
104
- <ol>
105
- <li>Create a folder on your hard drive where you want to install the game.</li>
106
- <li>Extract the downloaded file using a program like WinRAR or 7-Zip.</li>
107
- <li>Run the setup.exe file and follow the instructions.</li>
108
- <li>Select the folder where you want to install the game.</li>
109
- <li>Wait for the installation process to finish.</li>
110
- <li>Launch the game from the desktop shortcut or the start menu.</li>
111
- <li>Create an account or log in with your existing one.</li>
112
- <li>Select your preferred language and region.</li>
113
- <li>Enjoy playing DeltaHorizon!</li>
114
- </ol>
115
- <h4>Adjusting the graphics settings</h4>
116
- the graphics options that you can lower or disable to download DeltaHorizon for low PC:</p>
117
- <ul>
118
- <li>Resolution: This is the number of pixels that are displayed on your screen. The higher the resolution, the sharper the image, but also the more demanding on your PC. You can lower the resolution to 1280x720 or 1024x768 to improve your performance.</li>
119
- <li>Texture Quality: This is the level of detail and sharpness of the textures in the game. The higher the texture quality, the more realistic the game looks, but also the more memory it uses. You can lower the texture quality to low or medium to save some memory.</li>
120
- <li>Shadow Quality: This is the level of detail and smoothness of the shadows in the game. The higher the shadow quality, the more realistic the game looks, but also the more CPU and GPU power it consumes. You can lower the shadow quality to low or off to reduce some load.</li>
121
- <li>Anti-Aliasing: This is a technique that smooths out the jagged edges of objects and characters in the game. The higher the anti-aliasing, the smoother the game looks, but also the more GPU power it consumes. You can lower the anti-aliasing to low or off to save some power.</li>
122
- <li>View Distance: This is the distance at which you can see objects and characters in the game. The higher the view distance, the more immersive the game feels, but also the more CPU and GPU power it consumes. You can lower the view distance to low or medium to improve your performance.</li>
123
- </ul>
124
- <p>You can adjust these graphics settings from the options menu in the game. You can also use a program like <a href="https://www.nexusmods.com/deltahorizon/mods/1">DeltaHorizon Optimizer</a> that automatically optimizes your graphics settings for your PC.</p>
125
- <h4>Using optimization tools and mods</h4>
126
- <p>Another step to download DeltaHorizon for low PC is to use optimization tools and mods that can enhance your performance and experience. Optimization tools are programs that can tweak your system settings, clean your registry, defragment your disk, and more. Mods are modifications that can change or add features to the game, such as new maps, skins, weapons, modes, and more. Here are some of the optimization tools and mods that you can use to download DeltaHorizon for low PC:</p>
127
- <ul>
128
- <li><a href="https://www.ccleaner.com/">CCleaner</a>: This is a tool that can clean your PC from junk files, temporary files, cookies, cache, and more. It can also fix your registry errors and optimize your startup programs. It can help you free up some disk space and improve your system performance.</li>
129
- <li><a href="https://www.iobit.com/en/advancedsystemcarefree.php">Advanced SystemCare</a>: This is a tool that can boost your PC speed, clean your RAM, update your drivers, protect your privacy, and more. It can help you optimize your system settings and enhance your gaming experience.</li>
130
- <li><a href="https://www.nexusmods.com/deltahorizon/mods/2">DeltaHorizon Enhanced Graphics Mod</a>: This is a mod that can improve your graphics quality without sacrificing your performance. It can enhance your lighting, shadows, colors, textures, and more. It can make your game look more realistic and beautiful.</li>
131
- <li><a href="https://www.nexusmods.com/deltahorizon/mods/3">DeltaHorizon Low End PC Mod</a>: This is a mod that can reduce your graphics quality to increase your performance. It can lower your resolution, texture quality, shadow quality, anti-aliasing, view distance, and more. It can make your game run faster and smoother on a low-end PC.</li>
132
- </ul>
133
- <p>You can download these optimization tools and mods from their respective websites or platforms. You can also use a program like <a href="https://www.nexusmods.com/about/vortex/">Vortex</a> that can manage and install mods for you easily.</p>
134
- <h2>How to enjoy DeltaHorizon on low PC</h2>
135
- <h3>The benefits of playing DeltaHorizon on low PC</h3>
136
- <p>You might think that playing DeltaHorizon on low PC is a disadvantage or a compromise. However, there are actually some benefits of doing so. Here are some of them:</p>
137
- <ul>
138
- <li>You save money: By playing DeltaHorizon on low PC, you don't have to spend a lot of money on buying a high-end PC or upgrading your existing one. You can use what you have and still enjoy this amazing game.</li>
139
- <li>You challenge yourself: By playing DeltaHorizon on low PC, you face some difficulties and limitations that can make the game more challenging and rewarding. You have to be more strategic and creative in overcoming obstacles and enemies.</li>
140
- the gameplay and the story rather than the graphics and the effects. You appreciate the game for what it is and what it offers.</li>
141
- </ul>
142
- <p>So, don't let your low PC stop you from playing DeltaHorizon. You can still have a lot of fun and satisfaction with this game.</p>
143
- <h3>The tips and tricks to improve your performance and experience</h3>
144
- <p>Finally, to download DeltaHorizon for low PC and enjoy it to the fullest, here are some tips and tricks that can improve your performance and experience:</p>
145
- <h4>Choosing the right mode and server</h4>
146
- <p>DeltaHorizon offers various modes that cater to different preferences and playstyles. You can choose from survival, questing, role-playing, crafting, and combat modes. Each mode has its own objectives, rules, and challenges. You can also choose from different servers that have different regions, populations, and settings. You can join a server that matches your location, language, and level.</p>
147
- <p>To improve your performance and experience, you should choose a mode and a server that suit your PC and your interests. For example, if you have a low PC, you might want to avoid modes or servers that have too many players, zombies, or items. These can cause lagging or crashing on your PC. You might also want to avoid modes or servers that have too many rules, restrictions, or penalties. These can make the game more frustrating or boring for you.</p>
148
- <p>Instead, you should choose a mode and a server that have fewer players, zombies, or items. These can reduce the load on your PC and make the game run smoother. You should also choose a mode and a server that have more freedom, variety, or rewards. These can make the game more fun and exciting for you.</p>
149
- <h4>Using keyboard and mouse shortcuts</h4>
150
- <p>DeltaHorizon has a lot of features and functions that you can access from the menus or the interface. However, these can be time-consuming or cumbersome to use on a low PC. To save time and effort, you should use keyboard and mouse shortcuts that can make your gameplay easier and faster.</p>
151
- <p>Here are some of the keyboard and mouse shortcuts that you can use in DeltaHorizon:</p>
152
- <ul>
153
- <li>Press F1 to open the help menu.</li>
154
- <li>Press F2 to open the inventory menu.</li>
155
- <li>Press F3 to open the crafting menu.</li>
156
- <li>Press F4 to open the map menu.</li>
157
- <li>Press F5 to open the settings menu.</li>
158
- <li>Press Tab to switch between first-person and third-person view.</li>
159
- <li>Press Esc to pause or resume the game.</li>
160
- <li>Press Space to jump or climb.</li>
161
- <li>Press Shift to sprint or crouch.</li>
162
- <li>Press Ctrl to prone or roll.</li>
163
- <li>Press E to interact with objects or NPCs.</li>
164
- <li>Press R to reload your weapon.</li>
165
- <li>Press Q to switch between weapons.</li>
166
- <li>Press Z to zoom in or out.</li>
167
- <li>Press X to toggle flashlight or night vision.</li>
168
- <li>Press C to toggle voice chat or radio.</li>
169
- <li>Press V to toggle inventory or backpack.</li>
170
- <li>Press B to toggle binoculars or scope.</li>
171
- <li>Press N to toggle compass or GPS.</li>
172
- <li>Press M to toggle map or minimap.</li>
173
- <li>Press L to toggle lock or unlock doors or vehicles.</li>
174
- <li>Press K to toggle kill feed or chat feed.</li>
175
- <li>Press J to toggle journal or quests.</li>
176
- <li>Press I to toggle inventory or character stats.</li>
177
- <li>Press H to toggle health or hunger bars.</li>
178
- <li>Press G to toggle group or clan menu.</li>
179
- <li>Press F to toggle fire mode or melee mode.</li>
180
- <li>Use your mouse wheel to scroll through items or options.</li>
181
- <li>Use your mouse left button to select items or options.</li>
182
- <li>Use your mouse right button to drop items or cancel options.</li>
183
- </ul>
184
- <p>You can also customize these keyboard and mouse shortcuts from the settings menu in the game. You can also use a program like <a href="https://www.autohotkey.com/">AutoHotkey</a> that can create macros for you easily.</p>
185
- <h4>Finding friends and joining communities</h4>
186
- <p>The best way to enjoy DeltaHorizon on low PC is to find friends and join communities that share your passion for this game. Playing with friends can make the game more fun and cooperative. You can team up with them, chat with them, trade with them, help them, fight them, and more. Joining communities can make the game more social and informative. You can meet new people, learn new things, share your content, get feedback, join events, and more.</p>
187
- <p>To find friends and join communities in DeltaHorizon, you can use these platforms:</p>
188
- <ul>
189
- list, chat, voice chat, groups, forums, reviews, guides, and more to find and interact with other players of DeltaHorizon.</li>
190
- <li><a href="https://www.discord.com/">Discord</a>: This is a platform for voice and text chat that is popular among gamers. You can use Discord's features such as servers, channels, bots, roles, emojis, and more to join and create communities of DeltaHorizon players.</li>
191
- <li><a href="https://www.reddit.com/">Reddit</a>: This is a platform for online discussion and content sharing that is popular among internet users. You can use Reddit's features such as subreddits, posts, comments, upvotes, awards, and more to join and create communities of DeltaHorizon players.</li>
192
- <li><a href="https://www.nexusmods.com/">Nexus Mods</a>: This is a platform for modding and content creation that is popular among gamers. You can use Nexus Mods' features such as mods, files, images, videos, articles, forums, and more to join and create communities of DeltaHorizon players.</li>
193
- <li><a href="https://www.soundcloud.com/">SoundCloud</a>: This is the official website of the developer of DeltaHorizon. You can use SoundCloud's features such as tracks, playlists, likes, comments, followers, and more to join and create communities of DeltaHorizon players.</li>
194
- </ul>
195
- <p>By finding friends and joining communities in DeltaHorizon, you can enhance your performance and experience on low PC. You can also make new friends and have fun with them.</p>
196
- <h2>Conclusion</h2>
197
- <p>DeltaHorizon is an amazing online multiplayer game that you can play on low PC. You don't need a high-end PC to enjoy this game. You just need to follow these steps:</p>
198
- <ol>
199
- <li>Find a reliable source that offers the game for free or cheap.</li>
200
- <li>Download the game from a secure link and scan it with an antivirus program.</li>
201
- <li>Install and run the game on your low-end PC.</li>
202
- <li>Adjust the graphics settings to improve your performance.</li>
203
- <li>Use optimization tools and mods to enhance your performance and quality.</li>
204
- <li>Choose the right mode and server that suit your PC and your interests.</li>
205
- <li>Use keyboard and mouse shortcuts to make your gameplay easier and faster.</li>
206
- <li>Find friends and join communities that share your passion for this game.</li>
207
- </ol>
208
- <p>By following these steps, you can download DeltaHorizon for low PC and enjoy it without breaking the bank or compromising your experience. So, what are you waiting for? Download DeltaHorizon for low PC today and join the adventure!</p>
209
- <h3>FAQs</h3>
210
- <p>Here are some of the frequently asked questions about DeltaHorizon download low PC:</p>
211
- <ol>
212
- <li>Q: Is DeltaHorizon free to play?</li>
213
- <li>A: No, DeltaHorizon is not free to play. You need to buy the game from a legitimate source or get it from a giveaway or promotion. However, you can find the game for free or cheap from some sources that offer discounts or deals.</li>
214
- <li>Q: Is DeltaHorizon safe to download?</li>
215
- <li>A: Yes, DeltaHorizon is safe to download if you get it from a legitimate source or a secure link. You should also scan the downloaded file with an antivirus program before installing it.</li>
216
- <li>Q: Is DeltaHorizon worth playing?</li>
217
- <li>A: Yes, DeltaHorizon is worth playing if you love online multiplayer games that offer stunning graphics, immersive gameplay, and diverse modes. You can have a lot of fun and satisfaction with this game.</li>
218
- <li>Q: Can I play DeltaHorizon offline?</li>
219
- <li>A: No, you cannot play DeltaHorizon offline. You need an internet connection to play this game online with other players or NPCs.</li>
220
- <li>Q: Can I play DeltaHorizon on other platforms?</li>
221
- 4, Xbox One, and Nintendo Switch platforms. However, you may need to buy the game separately for each platform or use a cross-play feature if available.</li>
222
- </ol>
223
- <p>I hope this article has answered your questions and helped you download DeltaHorizon for low PC. If you have any other questions or feedback, feel free to leave a comment below. Thank you for reading and happy gaming!</p>
224
- </p> 0a6ba089eb<br />
225
- <br />
226
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ARK Survival Evolved APK - The Best Survival Game for Android.md DELETED
@@ -1,301 +0,0 @@
1
-
2
- <h1>ARK: Survival Evolved: A Dino-Mite Survival Game</h1>
3
- <p>Have you ever dreamed of living in a world full of dinosaurs? Do you enjoy crafting, building, and exploring in a vast open world? If so, you might want to check out ARK: Survival Evolved, one of the most popular survival games on the market. In this article, we will tell you everything you need to know about this game, from how to download and play it, to some tips and tricks for beginners, to some reviews and ratings from critics and players. Let's get started!</p>
4
- <h2>What is ARK: Survival Evolved and why is it popular?</h2>
5
- <p>ARK: Survival Evolved is an action-adventure survival game developed by Studio Wildcard, in collaboration with Instinct Games, Efecto Studios, and Virtual Basement. It was released in August 2017 for Windows, PlayStation 4, Xbox One, Nintendo Switch, Android, iOS, Linux, macOS, and Stadia. The game has sold over 16 million copies worldwide as of June 2020, making it one of the best-selling video games of all time.</p>
6
- <h2>ark survival apk</h2><br /><p><b><b>Download</b> &#10042;&#10042;&#10042; <a href="https://urlin.us/2uSWmf">https://urlin.us/2uSWmf</a></b></p><br /><br />
7
- <p>The game is set in a prehistoric-themed world where players must survive by hunting, harvesting, crafting, building, and taming dinosaurs and other creatures. The game features over 80 different species of dinosaurs and other animals that can be tamed and ridden by players. The game also has a rich story mode that reveals the secrets behind the mysterious ARKs, which are floating islands that house different biomes and ecosystems. The game also supports online multiplayer mode, where players can form tribes and cooperate or compete with each other.</p>
8
- <p>The game is popular because it offers a unique blend of survival, exploration, combat, and creativity. Players can customize their characters, craft weapons and armor, build bases and structures, plant crops and breed animals, summon bosses and complete missions, and more. The game also has stunning graphics and sound effects that create an immersive experience for players. The game also has a vibrant modding community that adds new content and features to the game.</p>
9
- <h2>How to download and play ARK: Survival Evolved</h2>
10
- <p>If you are interested in playing ARK: Survival Evolved, here are some things you need to know before you download and play it.</p>
11
- <h3>What are the system requirements and platforms for the game?</h3>
12
- <p>The game is available for Windows, PlayStation 4, Xbox One, Nintendo Switch, Android, iOS, Linux, macOS, and Stadia. However, each platform has different system requirements that you need to meet in order to run the game smoothly. Here are some of the minimum system requirements for each platform:</p>
13
- <table>
14
- <tr>
15
- <th>Platform</th>
16
- <th>Minimum System Requirements</th>
17
- </tr>
18
- <tr>
19
- <td>Windows</td>
20
- <td>- OS: Windows 7/8.1/10 (64-bit versions)<br>- Processor: Intel Core i5-2400/AMD FX-8320 or better<br>- Memory: 8 GB RAM<br>- Graphics: NVIDIA GTX 670 2GB/AMD Radeon HD 7870 2GB or better<br>- Storage: 60 GB available space<br>- DirectX: Version 10<br>- Network: Broadband Internet connection</td>
21
- </tr>
22
- <tr>
23
- <td>PlayStation 4</td>
24
- <td>- OS: PlayStation 4<br>- Processor: 2.1 GHz 8-core AMD Jaguar<br>- Memory: 8 GB GDDR5 RAM<br>- Graphics: 1.84 TFLOPS, AMD Radeon-based graphics engine<br>- Storage: 60 GB available space<br>- Network: Broadband Internet connection</td>
25
- </tr>
26
- <tr>
27
- <td>Xbox One</td>
28
- <td>- OS: Xbox One<br>- Processor: 1.75 GHz 8-core AMD custom CPU<br>- Memory: 8 GB DDR3 RAM<br>- Graphics: 1.31 TFLOPS, AMD Radeon-based graphics engine<br>- Storage: 60 GB available space<br>- Network: Broadband Internet connection</td>
29
- </tr>
30
- <tr>
31
- <td>Nintendo Switch</td>
32
- <td>- OS: Nintendo Switch<br>- Processor: Quad-core ARM Cortex-A57 + quad-core ARM Cortex-A53<br>- Memory: 4 GB LPDDR4 RAM<br>- Graphics: NVIDIA Tegra X1 Maxwell-based GPU<br>- Storage: 32 GB internal flash memory + microSD card slot<br>- Network: Wi-Fi or LAN connection</td>
33
- </tr>
34
- <tr>
35
- <td>Android</td>
36
- <td>- OS: Android 7.0 Nougat or higher<br>- Processor: Quad-core 2.0 GHz or higher<br>- Memory: 3 GB RAM or higher<br>- Graphics: Mali-T760MP4, Adreno 530, or equivalent<br>- Storage: 2 GB available space + additional data download<br>- Network: Wi-Fi or cellular data connection</td>
37
- </tr>
38
- <tr>
39
- <td>iOS</td>
40
- <td>- OS: iOS 9.0 or higher<br>- Processor: A9 chip or higher<br>- Memory: 2 GB RAM or higher<br>- Graphics: PowerVR GT7600, Adreno 530, or equivalent<br>- Storage: 2 GB available space + additional data download<br>- Network: Wi-Fi or cellular data connection</td>
41
- </tr>
42
- <tr>
43
- <td>Linux</td>
44
- <td>- OS: Ubuntu equivalent Distro - SteamOS<br>- Processor: Intel Core i5-2400/AMD FX-8320 or better<br>- Memory: 8 GB RAM<br>- Graphics: NVIDIA GTX 670 2GB/AMD Radeon HD 7870 2GB or better<br>- Storage: 60 GB available space<br>- Network: Broadband Internet connection</td>
45
- </tr>
46
- <tr>
47
- <td>macOS</td>
48
- <td>- OS: OSX 10.9 or Higher<br>- Processor: Intel Core i5-750, 2.67 GHz | AMD Phenom II X4 965, 3.4 GHz or better<br>- Memory: 4 GB RAM<br>- Graphics: NVIDIA GeForce GTX 660M | AMD Radeon HD 7950 or better<br>- Storage: 20 GB available space<br>- Network: Broadband Internet connection</td>
49
- </tr>
50
- <tr>
51
- <td>Stadia</td>
52
- <td>- OS: Any device that supports Google Chrome browser and Stadia app/service<br>- Processor: N/A (cloud-based)<br>- Memory: N/A (cloud-based)<br>- Graphics: N/A (cloud-based)<br>- Storage: N/A (cloud-based)<br>- Network: Broadband Internet connection with at least 10 Mbps download speed and 1 Mbps upload speed</td>
53
- </tr>
54
- </table>
55
- <h3>Where can you download the game and how much does it cost?</h3>
56
- <p>The game can be downloaded from various sources depending on the platform you are using. Here are some of the official and authorized sources for each platform:</p>
57
- <table>
58
- <tr>
59
- <th>Platform</th>
60
- <th>Source</th>
61
- <th>Price (as of June 2023)</th>
62
- </tr>
63
- <tr>
64
- <td>Windows</td>
65
- <td><a href="">Steam Store</a><br><a href="">Epic Games Store</a><br><a href="">Microsoft Store</a></td>
66
- <td>$49.99 USD (base game)<br>$99.99 USD (ultimate edition with all DLCs)</td>
67
- </tr>
68
- <tr>
69
- <td>PlayStation 4</td>
70
- <td><a href="">PlayStation Store</a></td>
71
- <td>$49.99 USD (base game)<br>$99.99 USD (ultimate edition with all DLCs)</td>
72
- </tr>
73
- <tr>
74
- <td>Xbox One</td>
75
- <td><a href="">Microsoft Store</a></td>
76
- <td>$49.99 USD (base game)<br>$99.99 USD (ultimate edition with all DLCs)</td>
77
- </tr>
78
- <tr> <td>Nintendo Switch</td>
79
- <td><a href="">Nintendo eShop</a></td>
80
- <td>$49.99 USD (base game)<br>$99.99 USD (ultimate edition with all DLCs)</td>
81
- </tr>
82
- <tr>
83
- <td>Android</td>
84
- <td><a href="">Google Play Store</a></td>
85
- <td>Free (base game with ads and in-app purchases)<br>$4.99 USD (premium version with no ads and all DLCs)</td>
86
- </tr>
87
- <tr>
88
- <td>iOS</td>
89
- <td><a href="">App Store</a></td>
90
- <td>Free (base game with ads and in-app purchases)<br>$4.99 USD (premium version with no ads and all DLCs)</td>
91
- </tr>
92
- <tr>
93
- <td>Linux</td>
94
- <td><a href="">Steam Store</a></td>
95
- <td>$49.99 USD (base game)<br>$99.99 USD (ultimate edition with all DLCs)</td>
96
- </tr>
97
- <tr>
98
- <td>macOS</td>
99
- <td><a href="">Steam Store</a><br><a href="">Epic Games Store</a></td>
100
- <td>$49.99 USD (base game)<br>$99.99 USD (ultimate edition with all DLCs)</td>
101
- </tr>
102
- <tr>
103
- <td>Stadia</td>
104
- <td><a href="">Stadia Store</a></td>
105
- <td>$49.99 USD (base game)<br>$99.99 USD (ultimate edition with all DLCs)</td>
106
- </tr>
107
- </table>
108
- <h3>How to start the game and choose a server and a map?</h3>
109
- <p>Once you have downloaded and installed the game, you can launch it from your device and start playing. The first thing you need to do is to choose a server and a map to play on. There are two types of servers: official and unofficial. Official servers are hosted by the developers and have standard settings and rules. Unofficial servers are hosted by players or communities and may have different settings and mods. You can browse the list of servers by filtering them by name, ping, players, map, mode, and more. You can also create your own server or join a friend's server if you prefer.</p>
110
- <p>The next thing you need to do is to choose a map to play on. There are several maps available in the game, each with its own theme, biome, creatures, resources, and secrets. Some of the maps are included in the base game, while others are part of the DLCs or mods. Here are some of the maps you can choose from:</p>
111
- <table>
112
- <tr>
113
- <th>Map Name</th>
114
- <th>Description</th>
115
- <th>DLC/Mod Required?</th>
116
- </tr>
117
- <tr>
118
- <td>The Island</td>
119
- <td>The original map of the game, featuring a tropical island with diverse biomes, such as forests, jungles, mountains, caves, swamps, snow, and more.</td>
120
- <td>No</td>
121
- </tr>
122
- <tr>
123
- <td>The Center</td>
124
- <td>A massive map with a floating island at its center, surrounded by waterfalls, lava, ruins, and other islands.</td>
125
- <td>No (free official mod)</td>
126
- </tr>
127
- <tr>
128
- <td>Scorched Earth</td>
129
- <td>A desert-themed map with harsh weather conditions, such as heat, sandstorms, and electrical storms. It also introduces new creatures and items adapted to the environment.</td>
130
- <td>Yes ($19.99 USD)</td>
131
- </tr>
132
- <tr>
133
- <td>Ragnarok</td>
134
- <td>A Norse mythology-inspired map with a huge volcano, a castle, an ice cave, a labyrinth, and more.</td>
135
- <td>No (free official mod)</td>
136
- </tr>
137
- <tr>
138
- <td>Aberration</td>
139
- <td>A map set in a damaged ARK with a malfunctioning atmosphere, resulting in a subterranean world full of radiation, bioluminescence, and mutated creatures.</td>
140
- <td>Yes ($19.99 USD)</td>
141
- </tr>
142
- <tr>
143
- <td>Extinction</td>
144
- <td>A map set on Earth after a cataclysmic event that wiped out most life forms. It features a futuristic cityscape, an underground forest, and massive roaming Titans.</td>
145
- <td>Yes ($19.99 USD)</td>
146
- </tr>
147
- <tr>
148
- <td>Valguero</td>
149
- <td>A map that combines elements from The Island, Scorched Earth, Aberration, and Ragnarok into one large landmass.</td>
150
- <td>No (free official mod)</td>
151
- </tr>
152
- <tr>
153
- <td>Genesis: Part 1</td>
154
- <td>A map that consists of five mini-maps connected by a simulation system. Each mini-map has its own theme, such as lunar, oceanic, volcanic, bog, and arctic.</td>
155
- <td>Yes ($34.99 USD for Genesis Season Pass)</td>
156
- </tr>
157
- <tr>
158
- <td>Genesis: Part 2</td>
159
- <td>A map that continues the story of Genesis: Part 1, featuring a massive spaceship with diverse biomes, such as Eden, Rockwell's Innards, Rockwell's Garden, and more.</td>
160
- <td>Yes ($34.99 USD for Genesis Season Pass)</td>
161
- </tr>
162
- <tr>
163
- <td>Crystal Isles</td>
164
- <td>A map that features a colorful landscape with crystal formations, floating islands, and mythical creatures.</td>
165
- <td>No (free official mod)</td>
166
- </tr>
167
- <tr>
168
- <td>Lost Island</td>
169
- <td>A map that is set to be released in 2023, featuring a mysterious island with new creatures and biomes.</td>
170
- <td>No (free official mod)</td>
171
- </tr>
172
- </table>
173
- <p>After you choose a server and a map, you can create your character and customize their appearance, such as gender, skin color, hair style, facial features, and more. You can also name your character and choose a spawn point on the map. Then, you are ready to enter the game and start your adventure!</p>
174
- <p>ark survival evolved apk download<br />
175
- ark survival evolved android apk<br />
176
- ark survival evolved mod apk<br />
177
- ark survival evolved apk obb<br />
178
- ark survival evolved apk free<br />
179
- ark survival evolved apk latest version<br />
180
- ark survival evolved apk data<br />
181
- ark survival evolved apk full<br />
182
- ark survival evolved apk offline<br />
183
- ark survival evolved apk hack<br />
184
- ark survival evolved apk 2023<br />
185
- ark survival evolved apk rexdl<br />
186
- ark survival evolved apk revdl<br />
187
- ark survival evolved apk pure<br />
188
- ark survival evolved apk uptodown<br />
189
- ark survival evolved apk andropalace<br />
190
- ark survival evolved apk android 1<br />
191
- ark survival evolved apk android oyun club<br />
192
- ark survival evolved apk android republic<br />
193
- ark survival evolved apk apkpure<br />
194
- ark survival evolved apk appmirror<br />
195
- ark survival evolved apk appvn<br />
196
- ark survival evolved apk aptoide<br />
197
- ark survival evolved apk blackmod<br />
198
- ark survival evolved apk bluestacks<br />
199
- ark survival evolved apk cracked<br />
200
- ark survival evolved apk cheat<br />
201
- ark survival evolved apk download for pc<br />
202
- ark survival evolved apk download android<br />
203
- ark survival evolved apk download latest version 2023<br />
204
- ark survival evolved apk download highly compressed<br />
205
- ark survival evolved apk download mod menu<br />
206
- ark survival evolved apk download unlimited money and amber<br />
207
- ark survival evolved apk download no verification<br />
208
- ark survival evolved apk download obb file<br />
209
- ark survival evolved apk file download<br />
210
- ark survival evolved apk for pc free download windows 10 64 bit full version 2023 offline installer setup file zip iso compressed 32 bit x86 serial key generator patch file crack file activation key product key license key cd key registration key steam key torrent file magnet link direct link single link google drive link mega link mediafire link zippyshare link openload link uptobox link userscloud link sendspace link dropbox link onedrive link gdrive link drive.google.com link drive.google.com/file/d/1Z2R8W8BZAQCUK41LNR0Qjh4K4Mz9xJnJ/view?usp=sharing link https://drive.google.com/file/d/1Z2R8W8BZAQCUK41LNR0Qjh4K4Mz9xJnJ/view?usp=sharing link (just kidding, this is not a valid keyword)<br />
211
- ark survival evolved apk for ios free download iphone ipad ipod touch apple store app store itunes icloud icloud.com icloud drive icloud backup icloud photos icloud mail icloud keychain icloud storage icloud family sharing icloud find my iphone icloud contacts icloud calendar icloud notes icloud reminders icloud safari icloud bookmarks icloud music library icloud photo library icloud photo stream icloud shared albums (just kidding, this is also not a valid keyword)<br />
212
- ark survival evolved apk for android free download google play store play.google.com play.google.com/store/apps/details?id=com.studiowildcard.wardrumstudios.ark play.google.com/store/apps/details?id=com.studiowildcard.wardrumstudios.ark&hl=en_US&gl=US play.google.com/store/apps/details?id=com.studiowildcard.wardrumstudios.ark&hl=en_US&gl=US&referrer=utm_source%3Dgoogle%26utm_medium%3Dorganic%26utm_term%3Dark+survival+apk&pcampaignid=APPU_1_7fFpYcOwEYqy5NoP6t6XwA0 (just kidding, this is also not a valid keyword)</p>
213
- <h2>Tips and tricks for beginners</h2>
214
- <p>ARK: Survival Evolved is not an easy game to master, especially for beginners. The game has a steep learning curve and many challenges that can make your life difficult. However, with some tips and tricks, you can improve your chances of survival and enjoy the game more. Here are some of the most useful tips and tricks for beginners:</p>
215
- <h3>How to survive, craft, and build in the game?</h3>
216
- <p>The first thing you need to do in the game is to survive. You have four basic stats that you need to monitor: health, stamina, hunger, and thirst. If any of these stats drop too low, you will die. To prevent that, you need to find food and water sources, such as berries, fruits, meat, eggs, fish, water skins, wells, rivers, lakes, and more. You also need to avoid extreme temperatures, such as heat or cold, by wearing appropriate clothing or finding shelter. You also need to protect yourself from predators and hostile players by using weapons or hiding.</p>
217
- <p>The second thing you need to do in the game is to craft. Crafting is essential for creating items that can help you survive and progress in the game. You can craft items by using resources that you can gather from the environment or from creatures. Some of the resources are wood, stone, flint, fiber, hide, metal, oil, crystal, and more. You can use these resources to craft tools, weapons, armor, structures, vehicles, saddles, and more. You can craft items by using your inventory, a crafting station, or a blueprint. You can also learn new crafting recipes by leveling up and spending engram points.</p>
218
- <p>The third thing you need to do in the game is to build. Building is important for creating a base that can provide you with shelter, storage, defense, and comfort. You can build structures by using different materials, such as thatch, wood, stone, metal, and more. You can also place various furniture and appliances, such as beds, chests, forges, refrigerators, and more. You can also build traps, turrets, walls, gates, and more to protect your base from enemies. You can also build rafts, boats, and platforms to travel on water or air.</p>
219
- <h3>How to tame, train, and ride dinosaurs?</h3>
220
- <p>One of the most fun and rewarding aspects of the game is taming dinosaurs and other creatures. Taming allows you to turn a wild creature into your loyal companion that can help you in various ways. You can tame creatures by using different methods, such as knocking them out and feeding them their preferred food, passive feeding them while they are awake, or using special items such as traps or baits. Some of the creatures require more time and resources to tame than others, depending on their level and rarity.</p>
221
- <p>Once you have tamed a creature, you can train it to perform various tasks, such as following you, staying put, attacking enemies, harvesting resources, carrying items, and more. You can also customize their appearance by changing their name, color, saddle, and accessories. You can also breed them to produce offspring that inherit their traits and stats.</p>
222
- <p>Some of the creatures can also be ridden by players, allowing them to travel faster and access different areas. To ride a creature, you need to equip it with a saddle that matches its species and size. You can craft saddles by using resources and blueprints. You can also upgrade your saddles by adding armor or attachments. Riding a creature also gives you access to its special abilities, such as flying, swimming, breathing fire, roaring, and more. Riding a creature also increases your bond with it, making it more loyal and effective.</p>
223
- <h3>How to join a tribe and cooperate with other players?</h3>
224
- <p>Another exciting feature of the game is the online multiplayer mode, where you can interact with other players from around the world. You can choose to play solo or join a tribe, which is a group of players that share a common name, chat, base, and resources. Joining a tribe can have many benefits, such as having allies, sharing tasks, trading items, and more. However, it can also have some drawbacks, such as having enemies, losing privacy, and more. You can join a tribe by sending or accepting an invitation from another player, or by creating your own tribe and inviting others to join.</p>
225
- <p>Once you are part of a tribe, you can cooperate with other players in various ways, such as communicating via voice or text chat, marking locations on the map, setting permissions for structures and items, assigning roles and ranks, and more. You can also compete with other tribes in various ways, such as raiding their bases, stealing their resources, killing their creatures, and more. You can also form alliances or declare war with other tribes, depending on your goals and preferences.</p>
226
- <h2>Reviews and ratings of ARK: Survival Evolved</h2>
227
- <p>Now that you know how to play ARK: Survival Evolved, you might be wondering what other people think about the game. The game has received mixed reviews from critics and players alike, with some praising its gameplay and content, and others criticizing its performance and bugs. Here are some of the reviews and ratings of the game from different sources:</p>
228
- <h3>What are the pros and cons of the game according to critics and players?</h3>
229
- <p>The game has been praised for its:</p>
230
- <ul>
231
- <li>Fun and addictive gameplay that offers endless possibilities for survival, exploration, combat, and creativity.</li>
232
- <li>Huge and diverse world that is full of life, secrets, and challenges.</li>
233
- <li>Amazing graphics and sound effects that create an immersive experience for players.</li>
234
- <li>Varied and unique creatures that can be tamed and ridden by players.</li>
235
- <li>Rich and engaging story mode that reveals the lore behind the ARKs.</li>
236
- <li>Vibrant modding community that adds new content and features to the game.</li>
237
- </ul>
238
- <p>The game has been criticized for its:</p>
239
- <ul>
240
- <li>Poor performance and optimization that causes lag, crashes, glitches, and bugs.</li>
241
- <li>High system requirements and storage space that limit the accessibility of the game.</li>
242
- <li>Steep learning curve and difficulty that can frustrate new or casual players.</li>
243
- <li>Griefing and toxicity that can ruin the experience for some players.</li>
244
- <li>Lack of balance and polish that can make the game unfair or unenjoyable.</li>
245
- <li>Frequent updates and changes that can break the game or alter its mechanics.</li>
246
- </ul>
247
- <h3>What are some of the best user reviews and comments about the game?</h3>
248
- <p>Here are some of the best user reviews and comments about the game from different platforms:</p>
249
- <blockquote>"This game is amazing. I have played over 1000 hours and I still love it. The graphics are beautiful, the gameplay is fun and challenging, the dinosaurs are awesome and realistic, the story is intriguing and mysterious, the mods are creative and diverse, and the community is friendly and helpful. I highly recommend this game to anyone who loves survival games, dinosaurs, or both." - Steam user</blockquote>
250
- <blockquote>"This game is terrible. I have played over 100 hours and I hate it. The graphics are laggy and buggy, the gameplay is boring and repetitive, the dinosaurs are annoying and unrealistic, the story is confusing and nonsensical, the mods are broken and unbalanced, and the community is toxic and abusive. I do not recommend this game to anyone who values their time, money, or sanity." - Steam user</blockquote>
251
- <blockquote>"This game is a masterpiece. I have played over 500 hours and I still enjoy it. The graphics are stunning, the gameplay is addictive and varied, the dinosaurs are amazing and diverse, the story is captivating and immersive, the mods are innovative and fun, and the community is supportive and respectful. I love this game and I think everyone should try it." - PlayStation user</blockquote>
252
- <blockquote>"This game is a disaster. I have played over 50 hours and I regret it. The graphics are ugly, the gameplay is tedious and frustrating, the dinosaurs are bland and boring, the story is dull and forgettable, the mods are useless and annoying, and the community is hostile and rude. I hate this game and I think everyone should avoid it." - PlayStation user</blockquote>
253
- <blockquote>"This game is a mixed bag. I have played over 200 hours and I have mixed feelings about it. The graphics are good, but not great. The gameplay is fun, but not perfect. The dinosaurs are cool, but not all of them. The story is interesting, but not clear. The mods are nice, but not essential. The community is decent, but not amazing. I like this game, but I don't love it." - Xbox user</blockquote>
254
- <blockquote>"This game is a waste of potential. I have played over 10 hours and I gave up on it. The graphics are decent, but they don't matter. The gameplay is boring, but they don't change. The dinosaurs are lame, but they don't improve. The story is vague, but it doesn't explain. The mods are buggy, but they don't fix. The community is awful, but it doesn't care. I don't like this game, but I don't hate it." - Xbox user</blockquote>
255
- <h3>How does the game compare to other survival games in the genre?</h3>
256
- <p>The game is often compared to other survival games in the genre, such as Minecraft, Rust, Conan Exiles, Subnautica, and more. Each game has its own strengths and weaknesses, and different players may prefer different games depending on their tastes and preferences. Here are some of the main similarities and differences between ARK: Survival Evolved and some of the other popular survival games:</p>
257
- <table>
258
- <tr>
259
- <th>Game</th>
260
- <th>Similarities</th>
261
- <th>Differences</th>
262
- </tr>
263
- <tr>
264
- <td>Minecraft</td>
265
- <td>- Both games feature a sandbox-style gameplay that allows players to create and explore in a procedurally generated world.<br>- Both games have crafting, building, and mining mechanics that let players use resources to make items and structures.<br>- Both games have multiplayer modes that let players cooperate or compete with each other.</td>
266
- <td>- Minecraft has a pixelated and blocky art style, while ARK has a realistic and detailed art style.<br>- Minecraft has a fantasy theme, while ARK has a sci-fi theme.<br>- Minecraft has more variety and creativity in terms of items and structures, while ARK has more variety and realism in terms of creatures and biomes.</td>
267
- </tr>
268
- <tr>
269
- <td>Rust</td>
270
- <td>- Both games feature a survival gameplay that requires players to manage their basic needs, such as food, water, health, and temperature.<br>- Both games have combat mechanics that let players use weapons and tools to fight against enemies.<br>- Both games have base building mechanics that let players construct and defend their own bases.</td>
271
- <td>- Rust has a post-apocalyptic theme, while ARK has a prehistoric theme.<br>- Rust has more focus on PvP and raiding, while ARK has more focus on PvE and taming.<br>- Rust has more realism and brutality in terms of survival and combat, while ARK has more fantasy and fun in terms of exploration and adventure.</td>
272
- </tr>
273
- <tr>
274
- <td>Conan Exiles</td>
275
- <td>- Both games feature a survival gameplay that requires players to manage their basic needs, such as food, water, health, and temperature.<br>- Both games have combat mechanics that let players use weapons and tools to fight against enemies.<br>- Both games have base building mechanics that let players construct and defend their own bases.</td>
276
- <td>- Conan Exiles has a barbarian theme, while ARK has a dinosaur theme.<br>- Conan Exiles has more focus on melee combat and magic, while ARK has more focus on ranged combat and technology.<br>- Conan Exiles has more mature and explicit content, such as nudity and gore, while ARK has more family-friendly content.</td>
277
- </tr>
278
- <tr>
279
- <td>Subnautica</td>
280
- <td>- Both games feature a survival gameplay that requires players to manage their basic needs, such as food, water, health, and oxygen.<br>- Both games have crafting mechanics that let players use resources to make items and equipment.<br>- Both games have exploration mechanics that let players discover new biomes and secrets.</td>
281
- <td>- Subnautica has an aquatic theme, while ARK has a terrestrial theme.<br>- Subnautica has more focus on stealth and evasion, while ARK has more focus on aggression and domination.<br>- Subnautica has more horror and suspense elements, while ARK has more action and thrill elements.</td>
282
- </tr>
283
- </table>
284
- <h2>Conclusion</h2>
285
- <p>In conclusion, ARK: Survival Evolved is a game that offers a unique and exciting experience for players who love survival games, dinosaurs, or both. The game has a lot of content and features that can keep players entertained for hours. However, the game also has some flaws and issues that can affect the enjoyment of some players. The game is not for everyone, and it depends on your personal preferences and expectations. If you are looking for a game that lets you live out your fantasy of being a dinosaur tamer in a stunning and challenging world, you might love ARK: Survival Evolved. If you are looking for a game that is easy to play, bug-free, and balanced, you might not like ARK: Survival Evolved. The best way to find out is to try it yourself and see if it suits your taste.</p>
286
- <p>If you want to learn more about ARK: Survival Evolved, you can visit the official website, the wiki, the forums, the subreddit, or the YouTube channel of the game. You can also watch some gameplay videos or streams from other players to get a better idea of what the game is like. You can also join the community and share your thoughts and experiences with other fans of the game.</p>
287
- <p>We hope you enjoyed this article and found it helpful and informative. If you have any questions, comments, or feedback, feel free to leave them below. We would love to hear from you and answer your queries. Thank you for reading and have a great day!</p>
288
- <h2>FAQs</h2>
289
- <p>Here are some of the frequently asked questions about ARK: Survival Evolved:</p>
290
- <h3>Q: How long does it take to tame a dinosaur?</h3>
291
- <p>A: It depends on the type, level, and method of taming the dinosaur. Some dinosaurs can be tamed in minutes, while others can take hours or even days. You can use a taming calculator to estimate the time and resources needed to tame a specific dinosaur.</p>
292
- <h3>Q: How do I get metal in the game?</h3>
293
- <p>A: Metal is one of the most valuable and useful resources in the game. You can get metal by mining metal nodes with a pickaxe or an ankylosaurus. Metal nodes are usually found in mountainous or volcanic areas. You can also get metal by smelting metal scraps or ingots in a forge or an industrial forge.</p>
294
- <h3>Q: How do I level up in the game?</h3>
295
- <p>A: You can level up in the game by gaining experience points (XP) from various activities, such as killing creatures, harvesting resources, crafting items, completing missions, and more. You can also get XP from explorer notes, which are hidden documents that reveal the lore of the game. You can also get XP from tributes, which are items that can be used to summon bosses.</p>
296
- <h3>Q: How do I join or create a tribe in the game?</h3>
297
- <p>A: You can join or create a tribe in the game by accessing the tribe menu from your inventory or pause screen. You can then invite or accept other players to join your tribe, or leave or disband your tribe. You can also manage your tribe settings, such as name, logo, rank, permissions, alliances, and more.</p>
298
- <h3>Q: How do I transfer my character or items between servers or maps?</h3>
299
- <p>A: You can transfer your character or items between servers or maps by using obelisks, which are giant towers that act as portals to other ARKs. You can access the obelisk terminal and upload your character or items to the ARK data storage. You can then download them from another obelisk on another server or map. However, some servers or maps may have restrictions or limitations on transferring characters or items.</p> 197e85843d<br />
300
- <br />
301
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Delta Touch [7 x Doom engine source port] - A must-have app for Doom fans on Android.md DELETED
@@ -1,181 +0,0 @@
1
- <br />
2
- <h1>Delta Touch: The Ultimate Doom Engine Collection for Android</h1>
3
- <p>If you are a fan of the classic first-person shooter game Doom, you probably know that there are many source ports available that enhance the original game with new features, graphics, and compatibility. But did you know that you can play these source ports on your Android device with one app? That app is called Delta Touch, and it is the ultimate collection of Doom engine source ports in one app!</p>
4
- <p>Delta Touch is a paid app that lets you play seven of the best Doom engine source ports on your Android device. You can enjoy the original Doom, Doom II, Final Doom, Hexen, Heretic, Strife, and even Doom 3 with full touch screen and game-pad support, customizable UI and controls, various rendering modes and options, support for thousands of mods and levels, multiplayer launcher for online gaming, mouse and keyboard support (Android 8 and above), gyro aim assist (Gyroscope needed), and more!</p>
5
- <h2>delta touch 7 x doom engine source port apk</h2><br /><p><b><b>Download</b> &#127775; <a href="https://urlin.us/2uSTUR">https://urlin.us/2uSTUR</a></b></p><br /><br />
6
- <p>In this article, we will show you the features of Delta Touch, how to install and play it on your Android device, and how to play Doom 3 on Delta Touch. Whether you are a veteran Doom player or a newcomer to the series, you will find something to love about Delta Touch. So read on and discover why Delta Touch is the ultimate Doom engine collection for Android!</p>
7
- <h2>Features of Delta Touch</h2>
8
- <h3>Seven Doom engine source ports in one app</h3>
9
- <p>Delta Touch lets you play seven of the best Doom engine source ports on your Android device. These are:</p>
10
- <p>delta touch 7 x doom engines android game<br />
11
- delta touch 7 x gzdoom lzdoom zandronum apk<br />
12
- delta touch 7 x doom source ports free download<br />
13
- delta touch 7 x doom engines mod support<br />
14
- delta touch 7 x doom engines apkcombo<br />
15
- delta touch 7 x doom engines latest version<br />
16
- delta touch 7 x doom engines open touch gaming<br />
17
- delta touch 7 x doom engines play store<br />
18
- delta touch 7 x doom engines brutal doom<br />
19
- delta touch 7 x doom engines chocolate doom<br />
20
- delta touch 7 x doom engines prboom+<br />
21
- delta touch 7 x doom engines retrodoom<br />
22
- delta touch 7 x doom engines dhewm3<br />
23
- delta touch 7 x doom engines full touch screen controls<br />
24
- delta touch 7 x doom engines gamepad support<br />
25
- delta touch 7 x doom engines custom ui<br />
26
- delta touch 7 x doom engines custom buttons<br />
27
- delta touch 7 x doom engines community mods and levels<br />
28
- delta touch 7 x doom engines midi and fluidsynth samples<br />
29
- delta touch 7 x doom engines wad files<br />
30
- delta touch 7 x doom engines hexen heretic strife<br />
31
- delta touch 7 x doom engines multiplayer launcher<br />
32
- delta touch 7 x doom engines mouse and keyboard support<br />
33
- delta touch 7 x doom engines gyro aim assist<br />
34
- delta touch 7 x doom engines freedoom data included<br />
35
- delta touch 7 x doom engines music libraries download<br />
36
- delta touch 7 x doom engines high end device needed<br />
37
- delta touch 7 x doom engines multitreaded rendering<br />
38
- delta touch 7 x doom engines base game roe expansion classic mod<br />
39
- delta touch 7 x doom engines mod selection<br />
40
- delta touch 7 x doom engines resolution configuration<br />
41
- delta touch 7 x doom engines pk4 files needed<br />
42
- delta touch 7 x doom engines not bfg edition<br />
43
- delta touch 7 x doom engines email for issues<br />
44
- delta touch 7 x doom engines full money back guarantee<br />
45
- delta touch 7 x doom engines legal icons and graphics<br />
46
- delta touch 7 x doom engines gpl source port<br />
47
- delta touch 7 x doom engines no copyrighted data<br />
48
- how to install delta touch 7 x doom engine source port apk <br />
49
- how to play original doom with delta touch 7 x apk <br />
50
- how to use mods with delta touch 7 x apk <br />
51
- how to configure controls for delta touch 7 x apk <br />
52
- how to play multiplayer with delta touch 7 x apk <br />
53
- how to play dhewm3 with delta touch 7 x apk <br />
54
- how to access cheats and console commands with delta touch 7 x apk <br />
55
- how to update to the latest version of delta touch 7 x apk <br />
56
- how to get the best performance with delta touch 7 x apk <br />
57
- how to get refund for delta touch 7 x apk <br />
58
- how to contact developer of delta touch 7 x apk</p>
59
- <ul>
60
- <li><strong>GZDoom</strong>: The most popular source port that adds modern features such as OpenGL rendering, dynamic lighting, scripting, high-resolution textures, true color support, 3D floors, slopes, portals, polyobjects, ACS, DECORATE, ZScript, custom weapons and monsters, etc. Delta Touch supports four versions of GZDoom: v1.9.1, v3.25, v3.82, and v4.x.</li>
61
- <li><strong>LZDoom</strong>: A fork of GZDoom that is compatible with older devices and mods that use older versions of GZDoom.</li>
62
- <li><strong>Zandronum</strong>: A source port that focuses on multiplayer gaming. It supports online co-op, deathmatch, capture the flag, invasion, survival, team last man standing, domination, terminator, possession, clan arena, etc. Delta Touch supports two versions of Zandronum: v3.0 and v3.1 Alpha.</li>
63
- <li><strong>Chocolate Doom</strong>: A source port that aims to preserve the original look and feel of Doom as it was played in the 1990s. It is faithful to the original game's bugs, limitations, low-resolution graphics, etc.</li>
64
- <li><strong>PrBoom+</strong>: A source port that is compatible with most mods that are compatible with the original Doom engine. It also adds some features such as high-resolution rendering, uncapped framerate, demo recording and playback, etc.</li>
65
- <li><strong>RetroDoom</strong>: A source port that combines features from Chocolate Doom and PrBoom+. It also adds some retro-style effects such as scanlines, CRT curvature, etc.</li>
66
- <li><strong>Dhewm3</strong>: A source port that lets you play Doom 3 on your Android device. It is based on the original Doom 3 source code and improves it with bug fixes, compatibility, and performance enhancements.</li>
67
- </ul>
68
- <p>With Delta Touch, you can switch between these source ports easily and enjoy the different aspects of Doom that they offer. You can also mix and match them with various mods and levels to create your own custom Doom experience.</p>
69
- <h3>Full touch screen and game-pad support</h3>
70
- <p>Delta Touch lets you play Doom on your Android device with full touch screen and game-pad support. You can use the virtual buttons on the screen to control your movement, shooting, switching weapons, opening doors, etc. You can also customize the size, position, and opacity of the buttons to suit your preference. You can also use gestures such as swiping, tapping, pinching, etc. to perform actions such as quick save, quick load, zoom in, zoom out, etc.</p>
71
- <p>If you prefer to use a game-pad, Delta Touch supports most Bluetooth and USB game-pads that are compatible with Android. You can map the buttons and sticks of your game-pad to the actions of Doom. You can also use the analog sticks to control your movement and aiming. Delta Touch supports vibration feedback for game-pads that have it.</p>
72
- <p>Whether you use touch screen or game-pad, Delta Touch lets you play Doom comfortably and conveniently on your Android device.</p>
73
- <h3>Customizable UI and controls</h3>
74
- <p>Delta Touch lets you customize the UI and controls of Doom to your liking. You can change the size and position of the HUD elements such as health, armor, ammo, keys, etc. You can also change the color and transparency of the HUD elements. You can also enable or disable various UI features such as crosshair, messages, automap, status bar, etc.</p>
75
- <p>You can also customize the controls of Doom to suit your play style. You can change the sensitivity and acceleration of your movement and aiming. You can also enable or disable various control features such as auto-aiming, auto-use, auto-run, invert look, look spring, strafe on turn, etc.</p>
76
- <p>With Delta Touch, you can make Doom look and feel the way you want it to.</p>
77
- <h3>Various rendering modes and options</h3>
78
- <p>Delta Touch lets you choose from various rendering modes and options to enhance the graphics of Doom. You can choose from software rendering or hardware rendering depending on your device's capabilities and your preference. Software rendering is more faithful to the original game's graphics but has lower resolution and fewer effects. Hardware rendering uses OpenGL to improve the graphics with higher resolution and more effects.</p>
79
- <p>You can also choose from various rendering options such as texture filtering, dynamic lighting, shadows, fog, bloom, lens flares, ambient occlusion, anti-aliasing, etc. These options can make Doom look more realistic or more stylized depending on your taste. You can also adjust the brightness, contrast, gamma, saturation, etc. of the graphics to suit your vision.</p>
80
- <p>With Delta Touch , you can make Doom look as good as possible on your Android device.</p>
81
- <h3>Support for thousands of mods and levels</h3>
82
- <p>Delta Touch lets you play thousands of mods and levels that have been created by the Doom community over the years. You can download and install these mods and levels from various sources such as Doomworld, ModDB, Wad Archive, etc. You can also copy your own wad files to your device and play them with Delta Touch.</p>
83
- <p>You can play mods and levels that add new weapons, monsters, items, graphics, sounds, music, maps, etc. to Doom. You can also play mods and levels that change the gameplay, story, theme, genre, etc. of Doom. You can also play mods and levels that are based on other games, movies, TV shows, books, etc.</p>
84
- <p>With Delta Touch, you can enjoy the endless variety and creativity of the Doom community on your Android device.</p>
85
- <h3>Multiplayer launcher for online gaming</h3>
86
- <p>Delta Touch lets you play Doom online with other players using the multiplayer launcher. You can join or host servers that run Zandronum or Chocolate Doom source ports. You can play various game modes such as co-op, deathmatch, capture the flag, invasion, survival, team last man standing, domination, terminator, possession, clan arena, etc. You can also chat with other players using the in-game chat feature.</p>
87
- <p>You can also use Delta Touch to play Doom locally with other players using the same Wi-Fi network. You can use the LAN feature to create or join servers that run any of the source ports supported by Delta Touch. You can also use Bluetooth to connect with other players who have Delta Touch installed on their devices.</p>
88
- <p>With Delta Touch, you can have fun playing Doom with your friends or strangers online or offline.</p>
89
- <h3>Mouse and keyboard support (Android 8 and above)</h3>
90
- <p>Delta Touch lets you play Doom with mouse and keyboard on your Android device if you have Android 8 or above. You can connect a mouse and keyboard to your device via Bluetooth or USB and use them to control Doom. You can also customize the mouse sensitivity and acceleration and the keyboard bindings to suit your preference.</p>
91
- <p>Playing Doom with mouse and keyboard can give you more precision and accuracy than touch screen or game-pad. It can also make you feel more immersed in the game as if you were playing it on a PC.</p>
92
- <p>With Delta Touch, you can play Doom with mouse and keyboard on your Android device just like on a PC.</p>
93
- <h3>Gyro aim assist (Gyroscope needed)</h3>
94
- <p>Delta Touch lets you use gyro aim assist to help you aim better in Doom. Gyro aim assist is a feature that uses the gyroscope sensor of your device to detect your device's tilt and movement and adjust your aiming accordingly. You can use gyro aim assist along with touch screen or game-pad controls to fine-tune your aiming.</p>
95
- <p>Gyro aim assist can make aiming easier and smoother in Doom. It can also make you feel more involved in the game as if you were holding a real weapon.</p>
96
- <p>With Delta Touch , you can use gyro aim assist to enhance your aiming in Doom.</p>
97
- <h2>How to install and play Delta Touch</h2>
98
- <h3>Requirements and compatibility</h3>
99
- <p>To install and play Delta Touch, you need an Android device that meets the following requirements:</p>
100
- <ul>
101
- <li>Android 4.1 or higher</li>
102
- <li>At least 100 MB of free storage space</li>
103
- <li>A touch screen or a game-pad or a mouse and keyboard (Android 8 and above)</li>
104
- <li>A gyroscope sensor (optional, for gyro aim assist)</li>
105
- <li>Your own wad files for Doom, Doom II, Final Doom, Hexen, Heretic, Strife, and Doom 3 (Delta Touch does not include the original game files due to legal reasons)</li>
106
- </ul>
107
- <p>Delta Touch is compatible with most Android devices, but some devices may have issues with performance, graphics, sound, or controls. You can check the compatibility list on the Delta Touch website or the Google Play Store page to see if your device is supported or not. You can also contact the developer via email or Discord if you encounter any problems or have any suggestions.</p>
108
- <h3>Downloading and installing the app</h3>
109
- <p>To download and install Delta Touch, you need to purchase it from the Google Play Store for $2.99. You can use this link to go to the Google Play Store page of Delta Touch: <a href="">Delta Touch - The Ultimate Doom Engine Collection - Apps on Google Play</a></p>
110
- <p>Once you have purchased the app, you can download and install it on your device. The app size is about 90 MB, so make sure you have enough storage space and a stable internet connection. The installation process should take a few minutes depending on your device and network speed.</p>
111
- <h3>Copying your own wad files to your device</h3>
112
- <p>To play Delta Touch, you need to copy your own wad files to your device. Wad files are the game files that contain the data for Doom, Doom II, Final Doom, Hexen, Heretic, Strife, and Doom 3. You can get these wad files from various sources such as Steam, GOG.com, Humble Bundle, etc. You can also use your own physical copies of the games if you have them.</p>
113
- <p>To copy your wad files to your device, you need to connect your device to your PC via USB cable or Wi-Fi. You need to enable file transfer mode on your device and locate the folder where Delta Touch is installed on your device. The folder name should be something like <code>/sdcard/Android/data/com.opentouchgaming.deltatouch/files/</code>. You need to create a subfolder named <code>/Doom/</code> inside this folder and copy your wad files there. The folder structure should look something like this:</p>
114
- <pre><code>/sdcard/Android/data/com.opentouchgaming.deltatouch/files/Doom/ doom.wad doom2.wad tnt.wad plutonia.wad hexen.wad heretic.wad strife1.wad doom3-base.pk4 ... </code></pre>
115
- <p>You need to copy all the wad files that you want to play with Delta Touch to this folder. You can also copy any mod or level wad files that you want to play with Delta Touch to this folder.</p>
116
- <h3>Selecting and launching a source port</h3>
117
- <p>To select and launch a source port with Delta Touch , you need to open the app and tap on the source port icon on the main menu. You will see a list of the seven source ports that Delta Touch supports. You can tap on any of them to select it and launch it. You can also swipe left or right on the source port icon to switch between them quickly.</p>
118
- <p>Once you have selected and launched a source port, you will see a list of the wad files that you have copied to your device. You can tap on any of them to load it and start playing. You can also tap on the plus icon to add more wad files from your device or from online sources. You can also tap on the minus icon to remove any wad files that you don't want to play.</p>
119
- <p>With Delta Touch, you can select and launch any of the seven source ports with ease and play any of the wad files that you have on your device or online.</p>
120
- <h3>Configuring your settings and controls</h3>
121
- <p>To configure your settings and controls with Delta Touch, you need to tap on the gear icon on the main menu or in-game. You will see a list of options that let you customize various aspects of Delta Touch. You can tap on any of them to access their sub-options and adjust them to your preference. Some of the options that you can configure are:</p>
122
- <ul>
123
- <li><strong>Video</strong>: You can change the resolution, aspect ratio, fullscreen mode, rendering mode, rendering options, brightness, contrast, gamma, saturation, etc. of the video output.</li>
124
- <li><strong>Audio</strong>: You can change the volume, sound effects, music, voice chat, etc. of the audio output.</li>
125
- <li><strong>Controls</strong>: You can change the sensitivity, acceleration, auto-aiming, auto-use, auto-run, invert look, look spring, strafe on turn, etc. of the controls. You can also customize the touch screen buttons and gestures or the game-pad buttons and sticks.</li>
126
- <li><strong>UI</strong>: You can change the size, position, color, transparency, crosshair, messages, automap, status bar, etc. of the UI elements.</li>
127
- <li><strong>Mods</strong>: You can browse and download mods and levels from various online sources such as Doomworld, ModDB, Wad Archive, etc. You can also manage your downloaded mods and levels and load them with any source port.</li>
128
- <li><strong>Multiplayer</strong>: You can join or host servers that run Zandronum or Chocolate Doom source ports. You can also chat with other players using the in-game chat feature.</li>
129
- <li><strong>Advanced</strong>: You can access advanced options such as console commands, cheat codes, save states, screenshots, etc.</li>
130
- </ul>
131
- <p>With Delta Touch , you can configure your settings and controls with Delta Touch to optimize your Doom experience on your Android device.</p>
132
- <h2>Doom 3 on Delta Touch</h2>
133
- <h3>What is Dhewm3 and how it differs from other source ports</h3>
134
- <p>Dhewm3 is a source port that lets you play Doom 3 on your Android device with Delta Touch. It is based on the original Doom 3 source code that was released by id Software in 2011. It improves the original game with bug fixes, compatibility, and performance enhancements. It also supports Doom 3 mods and levels that are compatible with the original game.</p>
135
- <p>Dhewm3 differs from other source ports in Delta Touch in several ways. First, it is the only source port that supports Doom 3, which is a different game from the other Doom games. Doom 3 is a horror-themed game that uses advanced graphics, physics, and sound effects to create a more immersive and realistic experience. Second, it requires more powerful hardware and storage space than the other source ports. You need a device that has at least 2 GB of RAM and 4 GB of free storage space to play Doom 3 on Delta Touch. Third, it has its own settings and controls that are separate from the other source ports. You need to configure them separately to suit your preference.</p>
136
- <p>With Dhewm3, you can play Doom 3 on your Android device with Delta Touch and enjoy a different kind of Doom experience.</p>
137
- <h3>How to play Doom 3 on Delta Touch</h3>
138
- <p>To play Doom 3 on Delta Touch, you need to follow these steps:</p>
139
- <ol>
140
- <li>Download and install Delta Touch from the Google Play Store if you haven't already.</li>
141
- <li>Copy your own wad files for Doom 3 to your device. You need to copy the <code>doom3-base.pk4</code> file and any other <code>.pk4</code> files that are part of the game or the mods that you want to play. You need to copy them to the same folder where you copied your other wad files for the other source ports. The folder name should be something like <code>/sdcard/Android/data/com.opentouchgaming.deltatouch/files/Doom/</code>.</li>
142
- <li>Open Delta Touch and tap on the Dhewm3 icon on the main menu. You will see a list of the <code>.pk4</code> files that you have copied to your device. You can tap on any of them to load it and start playing. You can also tap on the plus icon to add more <code>.pk4</code> files from your device or from online sources. You can also tap on the minus icon to remove any <code>.pk4</code> files that you don't want to play.</li>
143
- <li>Configure your settings and controls for Dhewm3. You can tap on the gear icon on the main menu or in-game to access the options menu. You can change various aspects of Dhewm3 such as video, audio, controls, UI, etc. You can also use console commands, cheat codes, save states, screenshots, etc.</li>
144
- </ol>
145
- <p>With these steps, you can play Doom 3 on Delta Touch and have fun with it.</p>
146
- <h3>Tips and tricks for optimizing your performance and experience</h3>
147
- <p>To optimize your performance and experience when playing Doom 3 on Delta Touch , you can follow these tips and tricks:</p>
148
- <ul>
149
- <li>Make sure your device meets the minimum requirements for playing Doom 3 on Delta Touch. You need a device that has at least 2 GB of RAM and 4 GB of free storage space. You also need a device that supports OpenGL ES 3.0 or higher.</li>
150
- <li>Adjust the video settings to match your device's capabilities and your preference. You can lower the resolution, texture quality, shadow quality, anti-aliasing, etc. to improve the performance and reduce the battery consumption. You can also enable or disable various effects such as bloom, motion blur, ambient occlusion, etc. to enhance the graphics and the atmosphere.</li>
151
- <li>Use a game-pad or a mouse and keyboard to control Doom 3 on Delta Touch. Doom 3 is a game that requires precise and accurate aiming and movement, which can be difficult to achieve with touch screen controls. Using a game-pad or a mouse and keyboard can give you more control and comfort when playing Doom 3 on Delta Touch.</li>
152
- <li>Use headphones or earphones to enjoy the sound effects and music of Doom 3 on Delta Touch. Doom 3 is a game that relies heavily on sound to create a tense and immersive experience. Using headphones or earphones can help you hear the sounds of the enemies, the environment, and the voice acting better than using the speakers of your device.</li>
153
- <li>Play Doom 3 on Delta Touch in a dark and quiet environment. Doom 3 is a game that is meant to be played in a dark and quiet environment to enhance the horror and suspense of the game. Playing Doom 3 on Delta Touch in a bright and noisy environment can ruin the mood and the immersion of the game.</li>
154
- </ul>
155
- <p>With these tips and tricks, you can optimize your performance and experience when playing Doom 3 on Delta Touch.</p>
156
- <h2>Conclusion</h2>
157
- <p>Delta Touch is the ultimate collection of Doom engine source ports for Android. It lets you play seven of the best source ports on your Android device with full touch screen and game-pad support, customizable UI and controls, various rendering modes and options, support for thousands of mods and levels, multiplayer launcher for online gaming, mouse and keyboard support (Android 8 and above), gyro aim assist (Gyroscope needed), and more!</p>
158
- <p>You can also play Doom 3 on Delta Touch with Dhewm3, a source port that improves the original game with bug fixes, compatibility, and performance enhancements. You can enjoy a different kind of Doom experience with advanced graphics, physics, and sound effects.</p>
159
- <p>Whether you are a veteran Doom player or a newcomer to the series, you will find something to love about Delta Touch. So what are you waiting for? Download Delta Touch today and enjoy the best of Doom on your Android device!</p>
160
- <h2>Frequently Asked Questions</h2>
161
- <h4>Q: How much does Delta Touch cost?</h4>
162
- <p>A: Delta Touch costs $2.99 on the Google Play Store. You can use this link to go to the Google Play Store page of Delta Touch: <a href="">Delta Touch - The Ultimate Doom Engine Collection - Apps on Google Play</a></p>
163
- <h4>Q: Where can I get wad files for Delta Touch?</h4>
164
- <p>A: You can get wad files for Delta Touch from various sources such as Steam, GOG.com, Humble Bundle, etc. You can also use your own physical copies of the games if you have them. You can also download mods and levels from various online sources such as Doomworld, ModDB, Wad Archive, etc.</p>
165
- <h4>Q: How can I contact the developer of Delta Touch?</h4>
166
- <p>A: You can contact the developer of Delta Touch via email or Discord. The email address is <a href="mailto:[email protected]">[email protected]</a>. The Discord server is <a href="https://discord.gg/6mK5BEw">OpenTouchGaming</a>.</p>
167
- <h4>Q: What are some of the best mods and levels for Delta Touch?</h4>
168
- <p>A: There are thousands of mods and levels for Delta Touch that you can play and enjoy. Some of the best ones are:</p>
169
- <ul>
170
- <li><strong>Brutal Doom</strong>: A mod that makes Doom more violent, gory, and challenging.</li>
171
- <li><strong>Doom 64 Retribution</strong>: A mod that recreates Doom 64 with improved graphics and gameplay.</li>
172
- <li><strong>Sigil</strong>: A mod that adds a new episode to the original Doom with new maps, music, and enemies.</li>
173
- <li><strong>Project Brutality</strong>: A mod that expands Brutal Doom with more weapons, monsters, gore, features , and options.</li>
174
- <li><strong>Back to Saturn X</strong>: A mod that adds two new episodes to Doom II with new maps, music, and graphics.</li>
175
- <li><strong>Aliens TC</strong>: A mod that transforms Doom into a game based on the Aliens movie franchise.</li>
176
- </ul>
177
- <p>You can find more mods and levels for Delta Touch on the Delta Touch website or the Google Play Store page.</p>
178
- <h4>Q: How can I play Doom 3 mods and levels on Delta Touch?</h4>
179
- <p>A: You can play Doom 3 mods and levels on Delta Touch with Dhewm3, a source port that supports Doom 3 mods and levels that are compatible with the original game. You need to copy the <code>.pk4</code> files of the mods and levels that you want to play to the same folder where you copied your <code>doom3-base.pk4</code> file. The folder name should be something like <code>/sdcard/Android/data/com.opentouchgaming.deltatouch/files/Doom/</code>. You can then load them with Dhewm3 and play them.</p> 197e85843d<br />
180
- <br />
181
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download GTA 5 for Xbox 360 Free and Enjoy the Ultimate Open World Game.md DELETED
@@ -1,152 +0,0 @@
1
- <br />
2
- <h1>GTA 5 for Xbox 360 Free Download: How to Play the Ultimate Open World Game</h1>
3
- <p>If you are a fan of action-adventure games, you have probably heard of Grand Theft Auto V, or GTA 5 for short. This game is one of the most successful and acclaimed titles in the history of video games, selling over 150 million copies worldwide and receiving numerous awards and accolades. But did you know that you can play GTA 5 for Xbox 360 for free? In this article, we will show you how to get, install, and enjoy this amazing game on your console without spending a dime.</p>
4
- <h2>What is GTA 5 and why is it so popular?</h2>
5
- <p>GTA 5 is the fifth main installment in the Grand Theft Auto series, developed by Rockstar Games and released in 2013. It is an open world game that allows you to explore a vast and diverse fictional city called Los Santos, based on Los Angeles, and its surrounding areas. You can choose to follow the story mode, which involves three protagonists with different backgrounds and personalities, or engage in various activities and missions in the online mode, which supports up to 30 players.</p>
6
- <h2>gta 5 for xbox 360 free download</h2><br /><p><b><b>DOWNLOAD</b> === <a href="https://urlin.us/2uSVar">https://urlin.us/2uSVar</a></b></p><br /><br />
7
- <h3>The story and gameplay of GTA 5</h3>
8
- <p>The story mode of GTA 5 follows the lives of Michael De Santa, a retired bank robber who lives a luxurious but unhappy life under witness protection; Franklin Clinton, a young street hustler who works as a repo man for a shady car dealer; and Trevor Philips, a former partner of Michael who is now a violent and unstable drug lord. Their paths cross when they are forced to work together to pull off a series of daring heists that involve some of the most powerful and dangerous people in the city.</p>
9
- <p>The gameplay of GTA 5 is based on the concept of freedom and choice. You can switch between the three characters at any time, each with their own skills, abilities, and personal stories. You can also customize their appearance, vehicles, weapons, and properties. You can explore the vast open world by foot, car, bike, boat, plane, helicopter, or parachute. You can interact with various characters, objects, and events in the world. You can participate in various activities such as racing, golfing, tennis, hunting, yoga, darts, strip clubs, cinemas, amusement parks, casinos, nightclubs, and more. You can also cause chaos and mayhem by fighting with pedestrians, police officers, gangs, or rival criminals.</p>
10
- <h3>The features and enhancements of GTA 5 for Xbox 360</h3>
11
- <p>GTA 5 for Xbox 360 is not just a port of the original game. It is a enhanced version that offers several improvements and additions that make it more enjoyable and immersive. Some of these features are:</p>
12
- <ul>
13
- <li>A higher resolution of 720p with improved textures, lighting effects, shadows, reflections, and draw distances.</li>
14
- <li>A smoother frame rate of up to 30 frames per second with less pop-in and loading times.</li>
15
- <li>A new first-person mode that lets you experience the game from a different perspective.</li>
16
- <li>A new radio station called The Lab that features original music from various artists.</li>
17
- <li>A new Rockstar Editor that lets you create your own videos using footage from the game.</li>
18
- <li>A new Snapmatic app that lets you take photos and selfies with your phone and share them online.</li>
19
- <li>A new Director Mode that lets you create your own scenes using various characters and settings from the game.</li>
20
- </ul>
21
- <p>With these features, GTA 5 for Xbox 360 is more than just a game. It is a masterpiece of entertainment that will keep you hooked for hours.</p>
22
- <p>gta 5 xbox 360 download free full version<br />
23
- how to download gta 5 for free on xbox 360<br />
24
- gta 5 xbox 360 iso download free<br />
25
- gta 5 xbox 360 free download no survey<br />
26
- gta 5 xbox 360 free download usb<br />
27
- gta 5 xbox 360 free download code<br />
28
- gta 5 xbox 360 free download torrent<br />
29
- gta 5 xbox 360 free download online<br />
30
- gta 5 xbox 360 free download jtag<br />
31
- gta 5 xbox 360 free download mod menu<br />
32
- gta 5 xbox 360 free download mega<br />
33
- gta 5 xbox 360 free download mediafire<br />
34
- gta 5 xbox 360 free download highly compressed<br />
35
- gta 5 xbox 360 free download with multiplayer<br />
36
- gta 5 xbox 360 free download no verification<br />
37
- gta 5 xbox 360 free download no password<br />
38
- gta 5 xbox 360 free download no jailbreak<br />
39
- gta 5 xbox 360 free download no disc<br />
40
- gta 5 xbox 360 free download no human verification<br />
41
- gta 5 xbox 360 free download no offers<br />
42
- gta 5 xbox 360 free download direct link<br />
43
- gta 5 xbox 360 free download google drive<br />
44
- gta 5 xbox 360 free download apk<br />
45
- gta 5 xbox 360 free download reddit<br />
46
- gta 5 xbox 360 free download youtube<br />
47
- gta v for xbox 360 free download<br />
48
- how to get gta v for free on xbox 360<br />
49
- gta v xbox 360 iso free download<br />
50
- gta v xbox 360 free download no survey<br />
51
- gta v xbox 360 free download usb<br />
52
- gta v xbox 360 free download code<br />
53
- gta v xbox 360 free download torrent<br />
54
- gta v xbox 360 free download online<br />
55
- gta v xbox 360 free download jtag<br />
56
- gta v xbox 360 free download mod menu<br />
57
- gta v xbox 360 free download mega<br />
58
- gta v xbox 360 free download mediafire<br />
59
- gta v xbox 360 free download highly compressed<br />
60
- gta v xbox 360 free download with multiplayer<br />
61
- grand theft auto v for xbox 360 free download<br />
62
- how to get grand theft auto v for free on xbox 360<br />
63
- grand theft auto v xbox 360 iso free download<br />
64
- grand theft auto v xbox 360 free download no survey<br />
65
- grand theft auto v xbox 360 free download usb<br />
66
- grand theft auto v xbox 360 free download code</p>
67
- <h2>How to get GTA 5 for Xbox 360 for free?</h2>
68
- <p>Now that you know what GTA 5 is and why it is so awesome, you might be wondering how to get it for your Xbox 360 for free. Well, there are two ways to do that: the official way and the unofficial way. Let's see what they are and how they work.</p>
69
- <h3>The official way: buy the game and download it from Xbox Live</h3>
70
- <p>The official way to get GTA 5 for Xbox 360 for free is to buy the game and download it from Xbox Live. This might sound contradictory, but hear me out. If you have an Xbox Live Gold membership, which costs $9.99 per month or $59.99 per year, you can access the Games with Gold program, which offers two free games every month for Xbox 360 and Xbox One. Sometimes, GTA 5 is one of those games, so you can download it and keep it forever without paying anything extra.</p>
71
- <p>The advantages of this method are that you get a legitimate copy of the game that is compatible with your console and online services, and that you also get access to other free games and discounts every month. The disadvantages are that you have to pay for the Xbox Live Gold membership, which might not be worth it if you don't play online or use other features, and that you have to wait until GTA 5 is available as a free game, which might take a long time or never happen.</p>
72
- <h3>The unofficial way: download the game and DLCs from online sources</h3>
73
- <p>The unofficial way to get GTA 5 for Xbox 360 for free is to download the game and DLCs from online sources such as torrents, file-sharing sites, or forums. This method involves finding a reliable source that offers the game files in ISO or RGH format, downloading them to your computer, transferring them to a USB drive or an external hard drive, and installing them on your console using a modded dashboard or a flash drive.</p>
74
- <h4>The pros and cons of the unofficial way</h4>
75
- <p>The advantages of this method are that you can get the game and DLCs for free without paying anything or waiting for anything, and that you can also get access to mods and cheats that enhance your gameplay experience. The disadvantages are that you need to have a modded console or a flash drive that can bypass the security system of your console, which might void your warranty or get you banned from online services, and that you also need to have enough storage space and technical knowledge to install the game properly.</p>
76
- <h4>The risks and precautions of the unofficial way</h4>
77
- <p>The risks of this method are that you might download corrupted or infected files that can damage your console or your computer, or that you might download fake or incomplete files that won't work or will crash your game. You might also face legal issues if you are caught downloading or distributing pirated content, which is illegal in most countries. The precautions of this method are that you should always scan the files before downloading them, use a VPN or a proxy to hide your IP address, and backup your data before installing anything on your console.</p>
78
- <h2>How to install and play GTA 5 for Xbox 360 for free?</h2>
79
- <p>Now that you know how to get GTA 5 for Xbox 360 for free, let's see how to install and play it on your console. Depending on which method you chose, the steps might vary slightly, but here are the general guidelines:</p>
80
- <h3>The requirements and steps for installing the game</h3>
81
- <p>The requirements for installing the game are:</p>
82
- <ul>
83
- <li>An Xbox 360 console with enough storage space (at least 16 GB).</li>
84
- <li>A USB drive or an external hard drive with enough storage space (at least 16 GB).</li>
85
- <li>A computer with an internet connection and a USB port.</li>
86
- <li>A modded dashboard or a flash drive (only if you chose the unofficial way).</li>
87
- </ul>
88
- <p>The steps for installing the game are:</p>
89
- <ol>
90
- <li>Download the game files from Xbox Live or from an online source to your computer.</li>
91
- <li>Extract the files using a program like WinRAR or 7-Zip if they are compressed.</li>
92
- <li>Copy the files to your USB drive or external hard drive using a program like XBOX ISO Extractor or Horizon.</li>
93
- <li>Plug your USB drive or external hard drive into your console.</li>
94
- <li>Launch the game from your modded dashboard or flash drive (if you chose the unofficial way) or from your normal dashboard (if you chose the official way).</li>
95
- </ol>
96
- <p>Congratulations, you have successfully installed GTA 5 for Xbox 360 for free. Now you can enjoy the game and have fun.</p>
97
- <h3>The tips and tricks for playing the game</h3>
98
- <p>Playing GTA 5 for Xbox 360 for free is not much different from playing the paid version. However, there are some tips and tricks that can help you get the most out of the game and avoid some common problems. Here are some of them:</p>
99
- <ul>
100
- <li>Save your game frequently, especially before and after completing missions or activities. This will prevent you from losing your progress or encountering glitches.</li>
101
- <li>Use the quick save feature on your phone to save your game without going to a safe house. This will save you time and hassle.</li>
102
- <li>Use the character switch feature to change between the three protagonists at any time. This will allow you to access different missions, activities, and locations.</li>
103
- <li>Use the map and the GPS to navigate the city and find points of interest. You can also set waypoints and markers to guide you to your destination.</li>
104
- <li>Use the phone to access various apps and features, such as contacts, messages, internet, camera, social media, stocks, missions, and more.</li>
105
- <li>Use the cheat codes to activate various effects, such as invincibility, weapons, vehicles, weather, and more. However, be aware that using cheat codes will disable achievements and trophies.</li>
106
- <li>Use the online mode to play with other players from around the world. You can join or create crews, participate in various modes and events, customize your character and vehicles, buy properties and businesses, and more.</li>
107
- </ul>
108
- <h2>Conclusion</h2>
109
- <p>GTA 5 for Xbox 360 is one of the best games ever made. It offers a rich and immersive open world experience that will keep you entertained for hours. Whether you follow the story mode or explore the online mode, you will find something to suit your taste and style. And the best part is that you can play it for free by following the methods we explained in this article.</p>
110
- <h3>Summary of the main points</h3>
111
- <p>In this article, we have covered:</p>
112
- <ul>
113
- <li>What is GTA 5 and why is it so popular?</li>
114
- <li>How to get GTA 5 for Xbox 360 for free using the official or unofficial way?</li>
115
- <li>How to install and play GTA 5 for Xbox 360 for free?</li>
116
- <li>What are some tips and tricks for playing GTA 5 for Xbox 360 for free?</li>
117
- </ul>
118
- <h3>Call to action and final thoughts</h3>
119
- <p>If you are ready to play GTA 5 for Xbox 360 for free, don't wait any longer. Download the game today and start your adventure in Los Santos. You won't regret it.</p>
120
- <p>We hope you enjoyed this article and found it useful. If you did, please share it with your friends and leave us a comment below. We would love to hear your feedback and suggestions. And if you have any questions or problems regarding GTA 5 for Xbox 360 free download, feel free to ask us. We will try our best to help you.</p>
121
- <h2>Frequently Asked Questions</h2>
122
- <p>Here are some of the most common questions that people ask about GTA 5 for Xbox 360 free download:</p>
123
- <h3>Q: Is GTA 5 for Xbox 360 still playable in 2023?</h3>
124
- <p>A: Yes, GTA 5 for Xbox 360 is still playable in 2023. However, some features might not work properly or be discontinued due to updates or changes in the online services. For example, some online modes or events might not be available or have fewer players. Also, some DLCs or updates might not be compatible with the Xbox 360 version of the game.</p>
125
- <h3>Q: Can I play GTA 5 for Xbox 360 on Xbox One or Xbox Series X/S?</h3>
126
- <p>A: Yes, you can play GTA 5 for Xbox 360 on Xbox One or Xbox Series X/S using the backward compatibility feature. However, you will need to have a physical disc of the game or a digital copy downloaded from Xbox Live. You will also need to download an update that will optimize the game for your console. You will not be able to play GTA 5 for Xbox 360 on Xbox One or Xbox Series X/S using a modded console or a flash drive.</p>
127
- <h3>Q: Can I transfer my GTA 5 progress from Xbox 360 to another console or platform?</h3>
128
- <p>A: Yes, you can transfer your GTA 5 progress from Xbox 360 to another console or platform using the Rockstar Social Club service. However, you will need to have a valid and linked account on both platforms, and you will only be able to transfer your online progress, not your story progress. You will also need to do this before March 6, 2023, as Rockstar will stop supporting this feature after that date.</p>
129
- <h3>Q: How can I avoid getting banned from online services when playing GTA 5 for Xbox 360 for free?</h3>
130
- <p>A: There is no guarantee that you can avoid getting banned from online services when playing GTA 5 for Xbox 360 for free, especially if you use the unofficial way. However, there are some precautions that you can take to reduce the chances of getting detected or reported. Some of them are:</p>
131
- <ul>
132
- <li>Do not use mods or cheats that give you an unfair advantage over other players or affect the game economy.</li>
133
- <li>Do not use mods or cheats that alter the game files or the console firmware.</li>
134
- <li>Do not use mods or cheats that are outdated or incompatible with the latest version of the game or the online services.</li>
135
- <li>Do not use mods or cheats that are obvious or noticeable by other players or the game moderators.</li>
136
- <li>Do not brag or boast about using mods or cheats online or on social media.</li>
137
- <li>Do not share or distribute mods or cheats online or on social media.</li>
138
- </ul>
139
- <p>If you follow these tips, you might be able to play GTA 5 for Xbox 360 for free without getting banned. However, you should always be aware of the risks and consequences of doing so.</p>
140
- <h3>Q: What are some of the best mods and cheats for GTA 5 for Xbox 360?</h3>
141
- <p>A: There are many mods and cheats for GTA 5 for Xbox 360 that can enhance your gameplay experience and add more fun and variety to the game. However, some of them might not work properly or be compatible with your console or the online services. Therefore, you should always check the reviews and ratings of the mods and cheats before downloading and installing them. Some of the best mods and cheats for GTA 5 for Xbox 360 are:</p>
142
- <ul>
143
- <li>GTA V Redux: A mod that improves the graphics, lighting, weather, physics, and gameplay of the game.</li>
144
- <li>GTA V NaturalVision: A mod that makes the game look more realistic and immersive by adding new textures, effects, and details.</li>
145
- <li>GTA V Script Hook V: A mod that allows you to run various scripts and plugins that add new features and functions to the game.</li>
146
- <li>GTA V Simple Trainer: A mod that gives you access to various options and settings that let you customize your character, vehicles, weapons, and environment.</li>
147
- <li>GTA V Menyoo PC: A mod that lets you spawn various objects, vehicles, characters, and animals in the game.</li>
148
- <li>GTA V LSPDFR: A mod that lets you play as a police officer in the game and perform various tasks and missions.</li>
149
- <li>GTA V Cheat Codes: A list of codes that activate various effects in the game, such as invincibility, weapons, vehicles, weather, and more.</li>
150
- </ul></p> 197e85843d<br />
151
- <br />
152
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Stickman Shinobi Mod Menu and Experience the Fun of Ninja Fighting.md DELETED
@@ -1,147 +0,0 @@
1
-
2
- <h1>How to Download Stickman Shinobi Mod Menu and Enjoy Unlimited Features</h1>
3
- <p>Do you love stickman games, ninja legends, martial arts, and nonstop combats? If yes, then you should definitely try Stickman Shinobi, a new arcade game that will take you to a whole new world of elite ninjas and dangerous villains. In this game, you can choose from a large variety of powerful ninjas and warriors, each with their own unique styles of fighting, assassination, and ultimate skills. You can also explore different maps, levels, and bosses, as well as join tournaments and compete with other players.</p>
4
- <p>But what if you want to have more fun and excitement in this game? What if you want to unlock all the characters, get unlimited money, gems, and tickets, and access a mega menu with more options and features? Well, there is a way to do that, and it is called Stickman Shinobi Mod Menu. In this article, we will show you how to download this mod menu, how to install it, how to use it, and what are the benefits of using it. We will also give you some tips and tricks to master this game and win every battle. So, let's get started!</p>
5
- <h2>download stickman shinobi mod menu</h2><br /><p><b><b>Download File</b> &mdash; <a href="https://jinyurl.com/2uNP2m">https://jinyurl.com/2uNP2m</a></b></p><br /><br />
6
- <h2>What is Stickman Shinobi and Why You Should Play It</h2>
7
- <p>Stickman Shinobi is a fighting arcade game published by NAGOO STUDIO. It is inspired by the popular manga and anime series Naruto, which features ninjas with supernatural abilities. The game has over 10 million downloads on Google Play Store and has received positive reviews from players. Here are some of the reasons why you should play this game:</p>
8
- <h3>The Game Features</h3>
9
- <p>Stickman Shinobi has many features that make it an enjoyable and immersive game. Some of these features are:</p>
10
- <ul>
11
- <li>Simple and intuitive touch controls that allow you to perform combos, skills, and attacks easily.</li>
12
- <li>Free starting character pack with many of your favorite heroes from Naruto, such as Naruto, Sasuke, Sakura, Kakashi, Gaara, Itachi, Madara, etc.</li>
13
- <li>Many shinobi warriors to play with, each with their own strengths, weaknesses, abilities, and personalities.</li>
14
- <li>Awesome story mode that follows the original plot of Naruto and lets you relive the epic moments and battles.</li>
15
- <li>Fun tournaments that let you challenge other players from around the world and test your skills.</li>
16
- <li>Many maps with different settings, such as the Green Jungle, the Forgotten Sand Valley, the Mountains, etc.</li>
17
- <li>Endless gameplay with vast elements, such as strength enhancements, rewards, achievements, leaderboards, etc.</li>
18
- <li>Different strength enhancements to make use of, such as power-ups, items, weapons, etc.</li>
19
- </ul>
20
- <h3>The Game Modes</h3>
21
- <p>Stickman Shinobi has two main game modes that you can choose from:</p>
22
- <ul>
23
- <li>Story mode: This mode lets you follow the original story of Naruto and experience the events and battles that happened in the manga and anime. You can also unlock new characters as you progress through the story.</li>
24
- <li>Tournament mode: This mode lets you compete with other players in online matches and see who is the best shinobi. You can also earn rewards and rank up in the leaderboards.</li>
25
- </ul>
26
- <h3>The Game Characters</h3>
27
- <p>Stickman Shinobi has over 100 legendary ninjas that you can play with or fight against. Each character has their own unique skills, stats, appearance, voice effects, and personality. Some of the characters are:</p>
28
- <table border="1">
29
- <tr <th>Name</th>
30
- <th>Skills</th>
31
- <th>Origin</th>
32
- </tr>
33
- <tr>
34
- <td>Naruto</td>
35
- <td>Rasengan, Shadow Clone, Nine-Tails Chakra, Sage Mode, etc.</td>
36
- <td>Konoha (Hidden Leaf Village)</td>
37
- </tr>
38
- <tr>
39
- <td>Sasuke</td>
40
- <td>Chidori, Sharingan, Amaterasu, Susanoo, etc.</td>
41
- <td>Konoha (Hidden Leaf Village)</td>
42
- </tr>
43
- <tr>
44
- <td>Sakura</td>
45
- <td>Medical Ninjutsu, Super Strength, Healing Factor, etc.</td>
46
- <td>Konoha (Hidden Leaf Village)</td>
47
- </tr>
48
- <tr>
49
- <td>Kakashi</td>
50
- <td>Lightning Blade, Sharingan, Kamui, Raikiri, etc.</td>
51
- <td>Konoha (Hidden Leaf Village)</td>
52
- </tr>
53
- <tr>
54
- <td>Gaara</td>
55
- <td>Sand Manipulation, Shukaku Chakra, Sand Coffin, Sand Shield, etc.</td>
56
- <td>Suna (Hidden Sand Village)</td>
57
- </tr>
58
- <tr>
59
- <td>Itachi</td>
60
- <td>Sharingan, Mangekyo Sharingan, Tsukuyomi, Izanami, etc.</td>
61
- <td>Konoha (Hidden Leaf Village)</td>
62
- </tr>
63
- <tr>
64
- <td>Madara</td <td>Rinnegan, Mangekyo Sharingan, Wood Style, Limbo, etc.</td <td>Konoha (Hidden Leaf Village)</td </tr <tr <td>Jiraiya</td <td>Rasengan, Toad Summoning, Sage Mode, Fire Style, etc.</td <td>Konoha (Hidden Leaf Village)</td </tr <tr <td>Orochimaru</td <td>Snake Summoning, Immortality, Body Transfer, Curse Mark, etc.</td <td>Konoha (Hidden Leaf Village)</td </tr <tr <td>Hinata</td <td>Byakugan, Gentle Fist, Twin Lion Fists, Protection of the Eight Trigrams Sixty-Four Palms, etc.</td <td>Konoha (Hidden Leaf Village)</td </tr </table <p>And many more! You can unlock all of them by playing the game or by using the mod menu that we will talk about later.</p <h2>What is Stickman Shinobi Mod Menu and What are the Benefits</h2 <p>If you want to have more fun and excitement in Stickman Shinobi, you might want to try the mod menu that we have prepared for you. This mod menu is a modified version of the game that gives you access to many features and options that are not available in the original game. Some of the benefits of using this mod menu are:</p <h3>The Mod Features</h3 <p>The mod menu has many features that will make your gameplay easier and more enjoyable. Some of these features are:</p <ul <li>Unlimited money: You can get as much money as you want and use it to buy items, weapons, power-ups, etc.</li <li>Unlimited gems: You can get as many gems as you want and use them to unlock new characters and upgrade their skills.</li <li>Unlimited tickets: You can get as many tickets as you want and use them to enter tournaments and win rewards.</li <li>All characters unlocked: You can play with any character you want without having to unlock them first.</li <li>Mega menu: You can access a mega menu that gives you more options and features, such as changing the game speed, skipping levels, enabling god mode, etc.</li <li>No ads: You can enjoy the game without any annoying ads or pop-ups.</li <li>No root required: You can install and use the mod menu without rooting your device or risking any damage.</li <li>Easy to use: You can easily activate and deactivate the mod features with a simple tap on the screen.</li <li>Safe and secure: You can use the mod menu without worrying about any viruses or malware. The mod menu is tested and verified by our team of experts.</li <li>Free to download: You can download the mod menu for free from our website. We do not charge any fees or subscriptions for our service.</li </ul <h3>The Mod Installation</h3 <p>The mod installation is very easy and simple. All you need to do is follow these steps:</p <ol <li>Delete the original game from your device if you have it installed.</li <li>Download the mod menu APK file from our website. You can find the download link at the end of this article.</li <li>Enable unknown sources on your device settings. This will allow you to install apps from sources other than Google Play Store.</li <li> <li>Install the mod menu APK file on your device by tapping on it and following the instructions.</li>
65
- <li>Launch the game and enjoy the mod menu features.</li>
66
- </ol>
67
- <h3>The Mod Usage</h3>
68
- <p>The mod usage is also very easy and simple. All you need to do is follow these steps:</p>
69
- <ol>
70
- <li>Open the game and tap on the mod menu icon on the top left corner of the screen.</li>
71
- <li>Select the mod features that you want to activate or deactivate. You can also access the mega menu by tapping on the M button.</li>
72
- <li>Play the game and have fun with the mod menu features.</li>
73
- </ol>
74
- <h2>Tips and Tricks to Master Stickman Shinobi and Win Every Battle</h2>
75
- <p>Now that you know how to download and use the mod menu, you might want to learn some tips and tricks to improve your skills and performance in Stickman Shinobi. Here are some of them:</p>
76
- <h3>Choose the Right Character for Your Style</h3>
77
- <p>One of the most important things in Stickman Shinobi is to choose the right character for your style of playing. Each character has their own advantages and disadvantages, as well as different abilities and stats. You should try to find a character that suits your preferences and goals. For example, if you like fast and agile fighters, you might want to choose Naruto, Sasuke, or Hinata. If you like strong and durable fighters, you might want to choose Gaara, Madara, or Jiraiya. If you like versatile and balanced fighters, you might want to choose Kakashi, Itachi, or Sakura.</p>
78
- <p>download stickman shinobi mod apk unlimited money<br />
79
- download stickman shinobi mega menu mod free<br />
80
- download stickman shinobi ninja warrior mod menu<br />
81
- download stickman shinobi mod menu latest version<br />
82
- download stickman shinobi mod menu for android<br />
83
- download stickman shinobi mod menu no root<br />
84
- download stickman shinobi mod menu offline<br />
85
- download stickman shinobi mod menu 2023<br />
86
- download stickman shinobi mod menu hack<br />
87
- download stickman shinobi mod menu cheat<br />
88
- download stickman shinobi mod menu unlocked<br />
89
- download stickman shinobi mod menu premium<br />
90
- download stickman shinobi mod menu pro<br />
91
- download stickman shinobi mod menu vip<br />
92
- download stickman shinobi mod menu god mode<br />
93
- download stickman shinobi mod menu one hit kill<br />
94
- download stickman shinobi mod menu unlimited gems<br />
95
- download stickman shinobi mod menu unlimited coins<br />
96
- download stickman shinobi mod menu unlimited skills<br />
97
- download stickman shinobi mod menu unlimited energy<br />
98
- download stickman shinobi mod menu unlimited characters<br />
99
- download stickman shinobi mod menu unlimited weapons<br />
100
- download stickman shinobi mod menu unlimited costumes<br />
101
- download stickman shinobi mod menu unlimited items<br />
102
- download stickman shinobi mod menu unlimited levels<br />
103
- download stickman shinobi mod menu unlimited missions<br />
104
- download stickman shinobi mod menu unlimited modes<br />
105
- download stickman shinobi mod menu unlimited features<br />
106
- download stickman shinobi mod menu easy install<br />
107
- download stickman shinobi mod menu safe and secure<br />
108
- download stickman shinobi mod menu virus free<br />
109
- download stickman shinobi mod menu malware free<br />
110
- download stickman shinobi mod menu ad free<br />
111
- download stickman shinobi mod menu no survey<br />
112
- download stickman shinobi mod menu no verification<br />
113
- download stickman shinobi mod menu direct link<br />
114
- download stickman shinobi mod menu fast and reliable<br />
115
- download stickman shinobi mod menu high quality graphics<br />
116
- download stickman shinobi mod menu realistic physics<br />
117
- download stickman shinobi mod menu smooth gameplay<br />
118
- download stickman shinobi mod menu fun and addictive<br />
119
- download stickman shinobi mod menu best arcade game 2023<br />
120
- download stickman shinobi mod menu reviews and ratings<br />
121
- download stickman shinobi mod menu tips and tricks<br />
122
- download stickman shinobi mod menu guides and tutorials<br />
123
- download stickman shinobi mod menu updates and news</p>
124
- <h3>Use the Ultimate Skills Wisely</h3>
125
- <p>Another important thing in Stickman Shinobi is to use the ultimate skills wisely. Each character has their own ultimate skill that can deal massive damage and change the course of the battle. However, these skills have a cooldown time and require a certain amount of chakra to use. You should try to use them at the right moment and not waste them unnecessarily. For example, you might want to use them when you are facing a tough boss or a strong opponent, when you are outnumbered or surrounded, or when you need a finishing blow or a comeback.</p>
126
- <h3>Upgrade Your Strength and Enhance Your Abilities</h3>
127
- <p>A third important thing in Stickman Shinobi is to upgrade your strength and enhance your abilities. As you play the game, you will earn money, gems, tickets, and other rewards that you can use to buy items, weapons, power-ups, etc. You can also use them to upgrade your character's stats, such as health, attack, defense, speed, etc. You should try to spend your resources wisely and invest in the things that will help you improve your performance and overcome the challenges. For example, you might want to buy a sword that increases your attack power, a shield that increases your defense power, or a potion that restores your health.</p>
128
- <h3>Learn from the Bosses and the Tournaments</h3>
129
- <p>A fourth important thing in Stickman Shinobi is to learn from the bosses and the tournaments. As you play the game, you will encounter many bosses and opponents that will test your skills and abilities. You should try to learn from them and see what they do well and what they do poorly. You should also try to copy their moves and strategies and apply them to your own gameplay. For example, you might want to learn how to dodge their attacks, how to counter their skills, how to exploit their weaknesses, etc.</p>
130
- <h2>Conclusion and FAQs</h2>
131
- <p>In conclusion, Stickman Shinobi is a fun and exciting game that will keep you entertained for hours. You can enjoy playing with many legendary ninjas from Naruto, exploring different maps and levels, joining tournaments and competing with other players, etc. You can also download our mod menu that will give you access to many features and options that will make your gameplay easier and more enjoyable. You can get unlimited money, gems, tickets, unlock all characters, access a mega menu, etc. All you need to do is follow our instructions on how to download, install, and use the mod menu.</p>
132
- <p>We hope that this article has helped you understand more about Stickman Shinobi and our mod menu. If you have any questions or feedbacks about this topic, please feel free to contact us through our website or email. We will be happy to assist you with anything related to this game or our service.</p>
133
- <p>Here are some FAQs that might answer some of your queries:</p>
134
- <ol>
135
- <li>Q: Is Stickman Shinobi Mod Menu safe to use?</li>
136
- <li>A: Yes, it is safe to use. Our mod menu is tested and verified by our team of experts. It does not contain any viruses or malware that can harm your device or data. It also does not require root access or any special permissions to use.</li>
137
- <li>Q: How can I update Stickman Shinobi Mod Menu?</li>
138
- <li>A: You can update the mod menu by visiting our website and downloading the latest version of the mod menu APK file. You can also check our website regularly for any news or updates about the game or the mod menu.</li>
139
- <li>Q: How can I uninstall Stickman Shinobi Mod Menu?</li>
140
- <li>A: You can uninstall the mod menu by deleting the mod menu APK file from your device. You can also reinstall the original game from Google Play Store if you want to play it without the mod menu.</li>
141
- <li>Q: Can I use Stickman Shinobi Mod Menu with other mods or cheats?</li>
142
- <li>A: We do not recommend using the mod menu with other mods or cheats, as they might cause conflicts or errors in the game. Our mod menu already provides you with everything you need to enjoy the game to the fullest.</li>
143
- <li>Q: Can I get banned or reported for using Stickman Shinobi Mod Menu?</li>
144
- <li>A: We do not guarantee that you will not get banned or reported for using the mod menu, as it depends on the game developers and their policies. However, we have not received any reports of such cases so far, and we try our best to make the mod menu undetectable and safe to use. We also advise you to use the mod menu responsibly and not abuse it or ruin the game experience for other players.</li>
145
- </ol></p> 401be4b1e0<br />
146
- <br />
147
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download WA GB APK Pro v17.40 - The Best WhatsApp Mod for Android [2023].md DELETED
@@ -1,101 +0,0 @@
1
- <br />
2
- <h1>Download WA GB APK Pro: A Modified Version of WhatsApp with More Features and Customization</h1>
3
- <p>If you are looking for a way to enhance your WhatsApp experience, you might want to try WA GB APK Pro. This is a modified version of the popular instant messaging app that offers a host of additional features and customization options that are not available in the standard version of the app. In this article, we will explore what WA GB APK Pro is, its key features, how to download and install it on your Android device, and its pros and cons.</p>
4
- <h2>download wa gb apk pro</h2><br /><p><b><b>Download Zip</b> &#10038; <a href="https://jinyurl.com/2uNNWR">https://jinyurl.com/2uNNWR</a></b></p><br /><br />
5
- <h2>Key Features of WA GB APK Pro</h2>
6
- <p>WA GB APK Pro offers a range of features that are not available in the standard version of WhatsApp. Some of the key features of the app include:</p>
7
- <ul>
8
- <li><b>Privacy:</b> WA GB APK Pro offers a range of privacy features that allow you to control who can see your online status, blue ticks, and last seen status. This is particularly useful if you value your privacy and want to keep your online activities private.</li>
9
- <li><b>Customization:</b> WA GB APK Pro offers a wide range of customization options that allow you to personalize your app according to your tastes. The app allows you to change the theme, font, and background of the app, as well as customize the color of chat bubbles, icons, and more.</li>
10
- <li><b>Sending Larger Files:</b> With WA GB APK Pro, you can send larger files, such as videos and photos, up to 100 MB in size. This is particularly useful if you need to send large files on a regular basis.</li>
11
- <li><b>Anti-Ban:</b> WA GB APK Pro has an anti-ban feature that prevents you from getting banned for using a third-party app. This is a major concern for users who are worried about getting banned for using a modified version of the app.</li>
12
- </ul>
13
- <h2>How to Download and Install WA GB APK Pro on Android</h2>
14
- <p>If you want to try out WA GB APK Pro on your Android device, you need to follow these steps:</p>
15
- <ol>
16
- <li><b>Step 1:</b> Enable unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
17
- <li><b>Step 2:</b> Download the latest version of WA GB APK Pro from a trusted source. You can find the download link at [text](^1^) or [text](^2^) or [text](^3^).</li>
18
- <li><b>Step 3:</b> Install the app and verify your phone number. To do this, open the downloaded file and follow the instructions on the screen. You will need to enter your phone number and verify it with an OTP code.</li>
19
- <li><b>Step 4:</b> Enjoy the app and its features. You can now use WA GB APK Pro to chat with your friends and family, and enjoy the extra features and customization options that the app offers.</li>
20
- </ol>
21
- <h2>Pros and Cons of WA GB APK Pro</h2>
22
- <p>WA GB APK Pro is not an official app, and therefore, it has some advantages and disadvantages that you should be aware of before using it. Here are some of the pros and cons of the app:</p>
23
- <p>download gb whatsapp pro latest version apk<br />
24
- download gb whatsapp pro mod apk 2023<br />
25
- download gb whatsapp pro official terbaru 2023<br />
26
- download gb whatsapp pro anti ban apk<br />
27
- download gb whatsapp pro alex mods apk<br />
28
- download gb whatsapp pro free no ads apk<br />
29
- download gb whatsapp pro update 2023 apk<br />
30
- download gb whatsapp pro v17.40 apk<br />
31
- download gb whatsapp pro v21.20.0 apk<br />
32
- download gb whatsapp pro for android apk<br />
33
- how to download gb whatsapp pro apk<br />
34
- where to download gb whatsapp pro apk<br />
35
- why download gb whatsapp pro apk<br />
36
- benefits of downloading gb whatsapp pro apk<br />
37
- risks of downloading gb whatsapp pro apk<br />
38
- download wa gbwhatsapp pro apk 2023<br />
39
- download wa gbwhatsapp pro modded by alexmods apk<br />
40
- download wa gbwhatsapp pro official by jalantikus apk<br />
41
- download wa gbwhatsapp pro anti blokir apk<br />
42
- download wa gbwhatsapp pro bebas iklan apk<br />
43
- download wa gbwhatsapp pro update terbaru 2023 apk<br />
44
- download wa gbwhatsapp pro v17.30 apk<br />
45
- download wa gbwhatsapp pro v21.20.0 apk<br />
46
- download wa gbwhatsapp pro for android apk<br />
47
- how to download wa gbwhatsapp pro apk<br />
48
- where to download wa gbwhatsapp pro apk<br />
49
- why download wa gbwhatsapp pro apk<br />
50
- benefits of downloading wa gbwhatsapp pro apk<br />
51
- risks of downloading wa gbwhatsapp pro apk<br />
52
- download whatsapp mod gbwa pro apk 2023<br />
53
- download whatsapp mod gbwa pro by alexmods apk<br />
54
- download whatsapp mod gbwa pro by jalantikus apk<br />
55
- download whatsapp mod gbwa pro anti banned apk<br />
56
- download whatsapp mod gbwa pro no ads apk<br />
57
- download whatsapp mod gbwa pro latest update 2023 apk<br />
58
- download whatsapp mod gbwa pro v17.40 (alexmods) apk<br />
59
- download whatsapp mod gbwa pro v21.20.0 (jalantikus) apk<br />
60
- download whatsapp mod gbwa pro for android apk<br />
61
- how to download whatsapp mod gbwa pro apk<br />
62
- where to download whatsapp mod gbwa pro apk<br />
63
- why download whatsapp mod gbwa pro apk<br />
64
- benefits of downloading whatsapp mod gbwa pro apk<br />
65
- risks of downloading whatsapp mod gbwa pro apk</p>
66
- <table>
67
- <tr>
68
- <th>Pros</th>
69
- <th>Cons</th>
70
- </tr>
71
- <tr>
72
- <td>More features and customization options than the standard app</td>
73
- <td>Potential security and privacy risks from using a third-party app</td>
74
- </tr>
75
- <tr>
76
- <td>No need to root your device to use the app</td>
77
- <td>Possible compatibility issues with some devices and Android versions</td>
78
- </tr>
79
- <tr>
80
- <td>Anti-ban feature to avoid getting banned for using a modified app</td>
81
- <td>No official support or updates from the developers of WhatsApp</td>
82
- </tr>
83
- </table>
84
- <h2>Conclusion</h2>
85
- <p>WA GB APK Pro is a modified version of WhatsApp that offers more features and customization options than the standard version of the app. The app allows you to control your privacy, change the appearance of the app, send larger files, and avoid getting banned for using a third-party app. However, the app also has some drawbacks, such as potential security and privacy risks, compatibility issues, and lack of official support. Therefore, you should use the app at your own risk and discretion.</p>
86
- <h2>FAQs</h2>
87
- <p>Here are some of the frequently asked questions about WA GB APK Pro:</p>
88
- <ul>
89
- <li><b>Q1: Is WA GB APK Pro safe to use?</b></li>
90
- <li>A1: WA GB APK Pro is not an official app, and therefore, it may pose some security and privacy risks to your device and data. The app may contain malware or spyware that can harm your device or steal your personal information. Therefore, you should only download the app from a trusted source, and scan it with an antivirus before installing it.</li>
91
- <li><b>Q2: Can I use WA GB APK Pro with the original WhatsApp app?</b></li>
92
- <li>A2: No, you cannot use WA GB APK Pro with the original WhatsApp app. You need to uninstall the original WhatsApp app before installing WA GB APK Pro. Otherwise, you may face some errors or conflicts while using the app.</li>
93
- <li><b>Q3: How can I update WA GB APK Pro to the latest version?</b></li>
94
- <li>A3: To update WA GB APK Pro to the latest version, you need to visit the official website of the app or a trusted source where you downloaded the app from. You can check for updates by going to Settings > Updates in the app. You will need to download and install the latest version manually, as there is no automatic update feature in the app.</li>
95
- <li><b>Q4: What are some alternative apps to WA GB APK Pro?</b></li>
96
- <li>A4: There are many other modified versions of WhatsApp that offer similar or different features and customization options. Some of the popular alternatives are FMWhatsApp, YoWhatsApp, WhatsApp Plus, OGWhatsApp, and GBWhatsApp.</li>
97
- <li><b>Q5: How can I contact the developers of WA GB APK Pro?</b></li>
98
- <li>A5: You can contact the developers of WA GB APK Pro by visiting their official website at [text] or by sending them an email at [text]. You can also follow them on their social media accounts on Facebook, Twitter, Instagram, and YouTube.</li>
99
- </ul></p> 401be4b1e0<br />
100
- <br />
101
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Enjoy the Best Truck Simulation Game with Truck Simulator Nusantara Mod APK Download.md DELETED
@@ -1,96 +0,0 @@
1
- <br />
2
- <h1>Download Truck Simulator Nusantara Mod Apk: The Ultimate Indonesian Truck Driving Game</h1>
3
- <p>If you are looking for a realistic and immersive truck driving simulation game that lets you experience the thrill of driving a truck on the Indonesian roads, then you should download Truck Simulator Nusantara Mod Apk. This is a free Android game developed by Truck ID that offers various features that make it stand out from other similar games. In this article, we will tell you what Truck Simulator Nusantara is, what are its features, how to download and install it, and some tips and tricks for playing it.</p>
4
- <h2>download truck simulator nusantara mod apk</h2><br /><p><b><b>Download File</b> &#10037; <a href="https://jinyurl.com/2uNN57">https://jinyurl.com/2uNN57</a></b></p><br /><br />
5
- <h2>Features of Truck Simulator Nusantara Mod Apk</h2>
6
- <p>Truck Simulator Nusantara Mod Apk is not just a simple driving game. It is a comprehensive trucking simulation game that lets you customize your truck, manage your office, bid for jobs, deliver goods, compete with other players online, and more. Here are some of the features that you can enjoy in this game:</p>
7
- <ul>
8
- <li><b>Custom livery:</b> Customize your truck with the best designs available in the game. You can choose from a variety of custom liveries or create your own using the livery editor. You can also change the color of your truck, add stickers, logos, and decals, and make your truck look unique and stylish.</li>
9
- <li><b>Realistic graphics and sound effects:</b> Experience the thrill of driving a truck on the Indonesian roads with realistic graphics and sound effects. You can see the detailed scenery of the cities, villages, highways, bridges, tolls, and more. You can also hear the engine sound, horn sound, brake sound, and other sounds that make you feel like you are in a real truck.</li>
10
- <li><b>Various missions:</b> Deliver goods from one place to another and earn money. You can choose from different types of goods, such as food, furniture, electronics, etc., and different types of trucks, such as box trucks, container trucks, flatbed trucks, etc. You can also see the weight, distance, time limit, and reward of each mission before accepting it.</li>
11
- <li><b>Office management:</b> Grow your trucking company by buying office equipment and hiring staff. You can buy computers, printers, desks, chairs, and other office equipment to improve your productivity and efficiency. You can also hire accountants, managers, drivers, and other staff to help you run your business. You can also expand your office space by buying new buildings.</li>
12
- <li><b>Online multiplayer:</b> Compete with other players online and rank up on the leaderboard. You can join or create a room and invite other players to join. You can also chat with them and see their rankings. You can also participate in events and tournaments and win prizes and rewards.</li>
13
- </ul>
14
- <h2>How to Download and Install Truck Simulator Nusantara Mod Apk</h2>
15
- <p>If you want to download Truck Simulator Nusantara Mod Apk and enjoy unlimited money and unlocked features in the game, you need to follow these steps:</p>
16
- <ol>
17
- <li><b>Step 1:</b> Download the mod apk file from a trusted source. You can use the link below to download it directly from our website.</li>
18
- <li><b>Step 2:</b> Enable unknown sources on your device settings. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
19
- <li><b>Step 3:</b> Install the mod apk file and launch the game. To do this, go to your file manager and locate the downloaded file. Tap on it and follow the installation instructions. Once the installation is complete, open the game and enjoy.</li>
20
- <li><b>Step 4:</b> Enjoy unlimited money and unlocked features in the game. You can use the money to buy new trucks, parts, accessories, office equipment, staff, buildings, etc. You can also use the unlocked features to customize your truck, play online multiplayer, join events and tournaments, etc.</li>
21
- </ol>
22
- <h2>Tips and Tricks for Playing Truck Simulator Nusantara Mod Apk</h2>
23
- <p>If you want to master Truck Simulator Nusantara Mod Apk and become a successful truck driver and business owner, you need to follow these tips and tricks:</p>
24
- <ul>
25
- <li><b>Practice in the garage:</b> Learn how to operate and drive the truck before taking on missions. You can practice in the garage mode where you can test your truck's performance, steering, braking, acceleration, etc. You can also adjust your camera angle, mirror position, seat position, etc.</li>
26
- <li><b>Follow traffic rules and regulations:</b> Avoid speeding, running red lights, and crashing into other vehicles or objects. These actions will result in fines or penalties that will reduce your income or reputation. They will also damage your truck or cargo that will cost you money or time to repair or replace.</li>
27
- <li><b>Manage your gas and hunger:</b> Refill your gas tank and eat at restaurants along the way. Your gas level will decrease as you drive your truck. If you run out of gas, you will have to call a tow truck and pay a fee. Your hunger level will also decrease as you drive your truck. If you get too hungry, you will lose focus and energy. You can eat at restaurants or convenience stores that are marked on the map. Eating will also restore your health and stamina.</li>
28
- <li><b>Bid for jobs wisely:</b> Choose the jobs that suit your truck's capacity and pay well. You can see the available jobs on the job board or on the map. You can also see the details of each job, such as the type of goods, the weight, the distance, the time limit, the reward, and the reputation. You can bid for the jobs that you think you can complete successfully and profitably.</li>
29
- <li><b>Upgrade your truck:</b> Buy new parts and accessories to improve your truck's performance and appearance. You can buy new engines, transmissions, suspensions, brakes, tires, wheels, lights, horns, etc. You can also buy new trucks that have better specifications and features. Upgrading your truck will help you complete more difficult and lucrative missions.</li>
30
- </ul>
31
- <h2>Conclusion</h2>
32
- <p>Truck Simulator Nusantara Mod Apk is a fun and addictive game that lets you experience the life of a truck driver and a business owner in Indonesia. You can customize your truck, manage your office, bid for jobs, deliver goods, compete with other players online, and more. You can also enjoy unlimited money and unlocked features in the game by downloading the mod apk file from our website. If you are looking for a realistic and immersive truck driving simulation game, then you should download Truck Simulator Nusantara Mod Apk today.</p>
33
- <h2>FAQs</h2>
34
- <p>Here are some of the frequently asked questions about Truck Simulator Nusantara Mod Apk:</p>
35
- <ul>
36
- <li><b>Q1: Is Truck Simulator Nusantara Mod Apk safe to download and install?</b></li>
37
- <li><b>A1: Yes, as long as you download it from a reliable source and follow the installation steps correctly.</b></li>
38
- <li><b>Q2: What are the benefits of using Truck Simulator Nusantara Mod Apk?</b></li>
39
- <li><b>A2: You can enjoy unlimited money and unlocked features in the game, such as custom liveries, online multiplayer, and more.</b></li>
40
- <li><b>Q3: How can I play Truck Simulator Nusantara Mod Apk online with other players?</b></li>
41
- <li><b>A3: You need to create an account and log in to the game. Then, you can join or create a room and invite other players to join. You can also chat with them and see their rankings.</b></li>
42
- <li><b>Q4: How can I grow my trucking company in Truck Simulator Nusantara Mod Apk?</b></li>
43
- <li><b>A4: You need to buy office equipment, such as computers, printers, desks, chairs, etc., and hire staff, such as accountants, managers, drivers, etc. You can also expand your office space by buying new buildings.</b></li>
44
- <li><b>Q5: How can I customize my truck in Truck Simulator Nusantara Mod Apk?</b></li>
45
- <li><b>A5: You can choose from a variety of custom liveries available in the game, or create your own using the livery editor. You can also buy new parts and accessories for your truck, such as wheels, tires, lights, horns, etc.</b></li>
46
- </ul></p>
47
- <p>How to download truck simulator nusantara mod apk for free<br />
48
- Download truck simulator nusantara mod apk unlimited money<br />
49
- Download truck simulator nusantara mod apk latest version<br />
50
- Download truck simulator nusantara mod apk offline<br />
51
- Download truck simulator nusantara mod apk android 1<br />
52
- Download truck simulator nusantara mod apk revdl<br />
53
- Download truck simulator nusantara mod apk no root<br />
54
- Download truck simulator nusantara mod apk obb<br />
55
- Download truck simulator nusantara mod apk 2023<br />
56
- Download truck simulator nusantara mod apk hack<br />
57
- Download truck simulator nusantara mod apk cheat<br />
58
- Download truck simulator nusantara mod apk full unlocked<br />
59
- Download truck simulator nusantara mod apk mega mod<br />
60
- Download truck simulator nusantara mod apk rexdl<br />
61
- Download truck simulator nusantara mod apk data<br />
62
- Download truck simulator nusantara mod apk update<br />
63
- Download truck simulator nusantara mod apk new version<br />
64
- Download truck simulator nusantara mod apk premium<br />
65
- Download truck simulator nusantara mod apk pro<br />
66
- Download truck simulator nusantara mod apk vip<br />
67
- Download truck simulator nusantara mod apk terbaru<br />
68
- Download truck simulator nusantara mod apk 2022<br />
69
- Download truck simulator nusantara mod apk 2021<br />
70
- Download truck simulator nusantara mod apk 2020<br />
71
- Download truck simulator nusantara mod apk 2019<br />
72
- Download truck simulator nusantara mod apk 2018<br />
73
- Download truck simulator nusantara mod apk 2017<br />
74
- Download truck simulator nusantara mod apk 2016<br />
75
- Download truck simulator nusantara mod apk 2015<br />
76
- Download truck simulator nusantara mod apk 2014<br />
77
- Download truck simulator nusantara mod apk 2013<br />
78
- Download truck simulator nusantara mod apk 2012<br />
79
- Download truck simulator nusantara mod apk 2011<br />
80
- Download truck simulator nusantara mod apk 2010<br />
81
- Download truck simulator nusantara mod apk for pc<br />
82
- Download truck simulator nusantara mod apk for laptop<br />
83
- Download truck simulator nusantara mod apk for windows 10<br />
84
- Download truck simulator nusantara mod apk for mac<br />
85
- Download truck simulator nusantara mod apk for ios<br />
86
- Download truck simulator nusantara mod apk for iphone<br />
87
- Download truck simulator nusantara mod apk for ipad<br />
88
- Download truck simulator nusantara mod apk for android tv<br />
89
- Download truck simulator nusantara mod apk for firestick<br />
90
- Download truck simulator nusantara mod apk for chromebook<br />
91
- Download truck simulator nusantara mod apk for smart tv<br />
92
- Download truck simulator nusantara mod apk for roku<br />
93
- Download truck simulator nusantara mod apk for xbox one<br />
94
- Download truck simulator nusantara mod apk for ps4</p> 401be4b1e0<br />
95
- <br />
96
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Explore the World with Google Earth Download Now for Free.md DELETED
@@ -1,162 +0,0 @@
1
-
2
- <br> H3: Google Earth Pro Desktop Version <br> H3: Google Earth Mobile Version | | H2: How to Use Google Earth Features | H3: Imagery <br> H3: 3D Objects and Imagery <br> H3: Street View <br> H3: Water and Ocean <br> H3: Other Interesting Features | | H2: What Are the System Requirements for Google Earth? | | | H2: What Are Some Alternatives to Google Earth? | H3: Zoom Earth <br> H3: Marble <br> H3: Satellites.pro <br> H3: NASA Worldview <br> H3: ArcGIS Map Viewer | | H2: Conclusion | | | H2: FAQs | | Table 2: Article with HTML formatting <h1>Google Earth Download: How to Explore the World from Your Computer</h1>
3
- <p>Have you ever wanted to see the world from a different perspective? To travel to any place on the planet and view it in stunning detail? To discover new places and learn more about them? If so, you might want to download Google Earth, a program that lets you explore the globe with a swipe of your finger or a click of your mouse.</p>
4
- <h2>google earth download</h2><br /><p><b><b>DOWNLOAD</b> &#9658; <a href="https://jinyurl.com/2uNUFO">https://jinyurl.com/2uNUFO</a></b></p><br /><br />
5
- <p>In this article, we will show you how to download Google Earth for free, how to use its amazing features, what are the system requirements for running it, and what are some alternatives to Google Earth that you might want to try. Let's get started!</p>
6
- <h2>What Is Google Earth and Why Should You Download It?</h2>
7
- <p>Google Earth is a computer program that creates a 3D model of the earth based on satellite imagery, aerial photography, GIS data, and other sources. It allows you to zoom in and out, rotate, tilt, and pan the globe, and see any place in high resolution. You can also access Street View, which shows you 360-degree photos of streets, buildings, and landmarks. You can even dive into the ocean and see underwater features and marine life.</p>
8
- <p>Google Earth is more than just a map. It is also a powerful tool for learning, exploring, and discovering. You can use it to find out more about the geography, history, culture, and environment of any location. You can also use it to measure distances, areas, and elevations, create custom maps and tours, import and export data, and much more.</p>
9
- <p>Downloading Google Earth is free and easy. You can choose from different versions depending on your device and preferences. Here are the options:</p>
10
- <h2>How to Download Google Earth for Free</h2>
11
- <h3>Google Earth Web Version</h3>
12
- <p>The easiest way to use Google Earth is through your web browser. You don't need to install anything on your computer. Just go to <a href="(^4^)">https://www.google.com/intl/en_in/earth/</a> and start exploring. You can also access Google Earth from Google Maps by clicking on the satellite view icon and then on the globe icon.</p>
13
- <p>The web version of Google Earth works best with Chrome, Firefox, Edge, or Opera browsers. It has most of the features of the desktop version, except for some advanced ones like historical imagery, time slider, KML import/export, etc.</p>
14
- <h3>Google Earth Pro Desktop Version</h3>
15
- <p>If you want more functionality and control over your Google Earth experience, you might want to download the desktop version of Google Earth Pro. This version is also free and works on Windows, Mac, or Linux computers. You can download it from <a href="(^1^)">https://www.google.com/earth/versions/</a>.</p>
16
- <p>The desktop version of Google Earth Pro has some advantages over the web version. For example, you can access historical imagery and see how places have changed over time. You can also import and export GIS data in various formats. You can print high-resolution screenshots and make offline movies. You can also use advanced measurement tools and drawing tools.</p>
17
- <h3>Google Earth Mobile Version</h3>
18
- <p>If you want to use Google Earth on your smartphone or tablet, you can download the mobile version from the App Store or Google Play Store. The mobile version lets you browse the globe with a swipe of your finger or a tilt of your device. You can also use voice commands to search for places, ask questions, and get directions. You can also use the Voyager feature to explore curated stories and tours from around the world.</p>
19
- <p>google earth download for windows 10<br />
20
- google earth download free latest version<br />
21
- google earth download offline installer<br />
22
- google earth download mac os<br />
23
- google earth download pro<br />
24
- google earth download apk<br />
25
- google earth download for chrome<br />
26
- google earth download for pc<br />
27
- google earth download for android<br />
28
- google earth download for iphone<br />
29
- google earth download for linux<br />
30
- google earth download for ipad<br />
31
- google earth download 3d<br />
32
- google earth download old version<br />
33
- google earth download 2023<br />
34
- google earth download without play store<br />
35
- google earth download kmz files<br />
36
- google earth download historical imagery<br />
37
- google earth download maps for offline use<br />
38
- google earth download high resolution images<br />
39
- google earth download street view<br />
40
- google earth download satellite images<br />
41
- google earth download timelapse<br />
42
- google earth download vr<br />
43
- google earth download studio<br />
44
- google earth download engine<br />
45
- google earth download web app<br />
46
- google earth download kml files<br />
47
- google earth download live view<br />
48
- google earth download ocean floor<br />
49
- google earth download moon map<br />
50
- google earth download mars map<br />
51
- google earth download sky map<br />
52
- google earth download flight simulator<br />
53
- google earth download measure tool<br />
54
- google earth download elevation data<br />
55
- google earth download terrain data<br />
56
- google earth download climate data<br />
57
- google earth download population data<br />
58
- google earth download landmarks data</p>
59
- <p>The mobile version of Google Earth has some limitations compared to the web and desktop versions. For example, you can't access historical imagery, 3D buildings, or Street View. You also can't import or export data, create custom maps, or print screenshots.</p>
60
- <h2>How to Use Google Earth Features</h2>
61
- <p>Google Earth has many features that make it more than just a map. Here are some of the most popular and useful ones:</p>
62
- <h3>Imagery</h3>
63
- <p>Google Earth uses satellite imagery, aerial photography, and GIS data to create a realistic and detailed representation of the earth. You can zoom in and out, rotate, tilt, and pan the globe, and see any place in high resolution. You can also change the angle and perspective of your view, and see the terrain and elevation of any location.</p>
64
- <p>Google Earth also lets you access historical imagery and see how places have changed over time. You can use the time slider to go back in time and compare different dates. You can also see the current weather conditions and cloud cover of any place.</p>
65
- <h3>3D Objects and Imagery</h3>
66
- <p>Google Earth also has 3D objects and imagery that make the map more realistic and immersive. You can see 3D buildings, landmarks, monuments, bridges, and other structures in many cities around the world. You can also see 3D trees, plants, animals, and other natural features in some places.</p>
67
- <p>To enable 3D objects and imagery, you need to use the web or desktop version of Google Earth, and have a compatible device and browser. You can turn on or off 3D objects and imagery by clicking on the menu icon and selecting 3D Buildings or 3D Trees.</p>
68
- <h3>Street View</h3>
69
- <p>Street View is a feature that lets you see 360-degree photos of streets, buildings, and landmarks. You can use Street View to explore places as if you were there, and see what they look like from different angles. You can also use Street View to find businesses, services, attractions, and other points of interest.</p>
70
- <p>To access Street View, you need to use the web or desktop version of Google Earth, or the mobile version on Android devices. You can enter Street View by dragging the Pegman icon to any place that has a blue line or dot. You can exit Street View by clicking on the back arrow or the X icon.</p>
71
- <h3>Water and Ocean</h3>
72
- <p>Google Earth also lets you explore the water and ocean features of the earth. You can see the surface of the water, including waves, ripples, reflections, and colors. You can also dive into the ocean and see underwater features like coral reefs, shipwrecks, volcanoes, trenches, and marine life.</p>
73
- <p>To dive into the ocean, you need to use the web or desktop version of Google Earth. You can click on any place that has water or ocean, or use the search box to find a specific location. You can also use the Ocean layer to see different categories of ocean features.</p>
74
- <h3>Other Interesting Features</h3>
75
- <p>Google Earth has many other interesting features that you can use to enhance your experience. Here are some examples:</p>
76
- <ul>
77
- <li>Voyager: This feature lets you explore curated stories and tours from around the world. You can learn about different topics like culture, history, nature, travel, art, science, etc. You can also see interactive media like videos, photos, maps, quizzes, etc.</li>
78
- <li>I'm Feeling Lucky: This feature lets you discover random places on the globe. You can click on the dice icon and see where Google Earth takes you. You can also see a Knowledge Card that gives you some information about the place.</li>
79
- <li>Measure Tool: This feature lets you measure distances, areas, and elevations on Google Earth. You can click on the ruler icon and select a point or draw a line or shape on the map. You can also change the units of measurement.</li>
80
- <li>Drawing Tools: These features let you create custom maps on Google Earth. You can click on the add placemark icon to mark a location with a pin. You can also click on the add line or shape icon to draw a line or shape on the map. You can edit the name, color, style, and description of your placemark, line, or shape. You can also click on the add image or add video icon to add media to your map.</li>
81
- <li>Layers: These features let you see different types of information on Google Earth. You can click on the menu icon and select Layers to see the available options. You can turn on or off layers like Borders and Labels, Places, Photos, Roads, Weather, Ocean, etc.</li>
82
- </ul>
83
- <h2>What Are the System Requirements for Google Earth?</h2>
84
- <p>Google Earth is a powerful program that requires certain system requirements to run smoothly. Here are the minimum and recommended system requirements for Google Earth:</p>
85
- <table>
86
- <tr>
87
- <th>System Component</th>
88
- <th>Minimum Requirement</th>
89
- <th>Recommended Requirement</th>
90
- </tr>
91
- <tr>
92
- <td>Operating System</td>
93
- <td>Windows 7 or higher <br> Mac OS 10.8 or higher <br> Linux: LSB 4.1 (Linux Standard Base) libraries</td>
94
- <td>Windows 10 or higher <br> Mac OS 10.12 or higher <br> Linux: LSB 5.0 libraries</td>
95
- </tr>
96
- <tr>
97
- <td>CPU</td>
98
- <td>Pentium 4 2.4GHz+ or AMD 2400xp+</td>
99
- <td>Dual Core 2.0GHz+ or AMD X2 2.0GHz+</td>
100
- </tr>
101
- <tr>
102
- <td>RAM</td>
103
- <td>512 MB</td>
104
- <td>4 GB</td>
105
- </tr>
106
- <tr>
107
- <td>Disk Space</td>
108
- <td>2 GB free space</td>
109
- <td>4 GB free space</td>
110
- </tr>
111
- <tr>
112
- <td>Network Speed</td>
113
- <td>768 Kbits/sec</td>
114
- <td>DSL/Cable (at least 1 Mbits/sec)</td>
115
- </tr>
116
- <tr>
117
- <td>Graphics Card</td>
118
- <td>Dirext X9 and Shader Model 2.0 compatible card with 64 MB of VRAM <br> OpenGL 2.0 compatible card with 64 MB of VRAM (for Linux)</td>
119
- <td>Dirext X11 and Shader Model 3.0 compatible card with 512 MB of VRAM <br> OpenGL 3.0 compatible card with 512 MB of VRAM (for Linux)</td>
120
- </tr>
121
- <tr>
122
- <td>Screen Resolution</td>
123
- <td>1024x768 pixels, "16-bit High Color"</td>
124
- <td>"32-bit True Color"</td>
125
- </tr>
126
- <tr><td colspan="3">Source: <a href="">https://support.google.com/earth/answer/176180?hl=en&ref_topic=2376075#zippy=%2Csystem-requirements%2Ccheck-your-computer-for-graphics-card-information%2Ccheck-your-computer-for-graphics-card-information-on-a-mac%2Ccheck-your-computer-for-graphics-card-information-on-a-pc%2Ccheck-your-computer-for-graphics-card-information-on-linux%2Cupdate-your-graphics-card-drivers%2Cupdate-your-graphics-card-drivers-on-a-mac%2Cupdate-your-graphics-card-drivers-on-a-pc%2Cupdate-your-graphics-card-drivers-on-linux%2Cupdate-directx-and-opengl%2Cupdate-directx-and-opengl-on-a-mac%2Cupdate-directx-and-opengl-on-a-pc%2Cupdate-directx-and-opengl-on-linux%2Ccheck-if-you-have-the-latest-version-of-google-earth-pro%2Ccheck-if-you-have-the-latest-version-of-google-earth-pro-on-a-mac%2Ccheck-if-you-have-the-latest-version-of-google-earth-pro-on-a-pc%2Ccheck-if-you-have-the-latest-version-of-google-earth-pro-on-linux%2Crestart-google-earth-pro%2Crestart-google-earth-pro-on-a-mac%2Crestart-google-earth-pro-on-a-pc%2Crestart-google-earth-pro-on-linux%2Creinstall-google-earth-pro%2Creinstall-google-earth-pro-on-a-mac%2Creinstall-google-earth-pro-on-a-pc%2Creinstall-google-earth-pro-on-linux </a></td></tr>
127
- </table>
128
- <p>If you have any issues with running Google Earth, you can check the <a href="">https://support.google.com/earth/answer/176180?hl=en&ref_topic=2376075</a> for troubleshooting tips and solutions.</p>
129
- <h2>What Are Some Alternatives to Google Earth?</h2>
130
- <p>Google Earth is not the only program that lets you explore the world from your computer. There are some alternatives that you might want to try if you are looking for different features, perspectives, or experiences. Here are some examples:</p>
131
- <h3>Zoom Earth</h3>
132
- <p>Zoom Earth is a website that lets you see near-real-time satellite images of the earth. You can zoom in and out, and see the weather, clouds, fires, storms, and other events happening around the world. You can also access historical imagery and see how places have changed over time.</p>
133
- <p>You can visit Zoom Earth at <a href="">https://zoom.earth/</a>.</p>
134
- <h3>Marble</h3>
135
- <p>Marble is a desktop program that lets you see the earth as a 3D globe. You can rotate, tilt, and zoom the globe, and see different map views, such as political, physical, satellite, street, etc. You can also access various online services, such as Wikipedia, OpenStreetMap, Flickr, etc.</p>
136
- <p>You can download Marble from <a href="">https://marble.kde.org/</a>.</p>
137
- <h3>Satellites.pro</h3>
138
- <p>Satellites.pro is a website that lets you see high-resolution satellite images of any place on earth. You can search for any address or coordinates, and see the details of buildings, roads, landscapes, etc. You can also compare different images from different dates and sources.</p>
139
- <p>You can visit Satellites.pro at <a href="">https://satellites.pro/</a>.</p>
140
- <h3>NASA Worldview</h3>
141
- <p>NASA Worldview is a website that lets you see satellite images of the earth from NASA's Earth Observing System Data and Information System (EOSDIS). You can see the earth in different wavelengths, such as visible, infrared, water vapor, etc. You can also see various data layers, such as aerosols, fires, floods, snow and ice, etc.</p>
142
- <p>You can visit NASA Worldview at <a href="">https://worldview.earthdata.nasa.gov/</a>.</p>
143
- <h3>ArcGIS Map Viewer</h3>
144
- <p>ArcGIS Map Viewer is a website that lets you create and share interactive maps of the earth. You can use various basemaps, such as topographic, satellite, street, etc. You can also add various layers of data, such as demographics, environment, health, etc. You can also customize your map with symbols, labels, pop-ups, etc.</p>
145
- <p>You can visit ArcGIS Map Viewer at <a href="">https://www.arcgis.com/home/webmap/viewer.html</a>.</p>
146
- <h2>Conclusion</h2>
147
- <p>Google Earth is a program that lets you explore the world from your computer. You can download it for free and use it to see any place in high resolution, access Street View and historical imagery, dive into the ocean and see underwater features, learn more about the geography, history, culture, and environment of any location, and use various features and tools to enhance your experience. Google Earth is not the only program that lets you explore the world from your computer. There are some alternatives that you might want to try if you are looking for different features, perspectives, or experiences. We hope this article has helped you learn more about Google Earth and how to download it for free. If you have any questions or feedback, please let us know in the comments below. Happy exploring! <h2>FAQs</h2>
148
- <p>Here are some frequently asked questions about Google Earth and its download:</p>
149
- <ol>
150
- <li><b>Is Google Earth safe to download?</b><br>
151
- Yes, Google Earth is safe to download from the official website or app store. It does not contain any viruses, malware, or spyware. However, you should always be careful when downloading any software from the internet and scan it with a reliable antivirus program before installing it.</li>
152
- <li><b>How often is Google Earth updated?</b><br>
153
- Google Earth is updated regularly with new imagery, data, and features. The frequency of updates depends on various factors, such as the availability of satellite images, the quality of the images, the processing time, etc. Generally, Google Earth updates its imagery every one to three years.</li>
154
- <li><b>How accurate is Google Earth?</b><br>
155
- Google Earth is accurate in terms of the location and representation of places on the globe. However, it is not a perfect representation of reality. There may be some errors, distortions, or outdated information due to the limitations of satellite imagery, aerial photography, GIS data, and other sources. You should always verify the information on Google Earth with other sources before using it for any serious purpose.</li>
156
- <li><b>Can I use Google Earth offline?</b><br>
157
- Yes, you can use Google Earth offline if you have downloaded the desktop version of Google Earth Pro. You can save areas of interest on your computer and view them later without an internet connection. You can also create offline movies and print screenshots. However, you will not be able to access some features that require online services, such as Street View, historical imagery, Voyager, etc.</li>
158
- <li><b>Can I use Google Earth for commercial purposes?</b><br>
159
- Yes, you can use Google Earth for commercial purposes if you have obtained a license from Google. You can apply for a license at <a href="">https://www.google.com/permissions/geoguidelines/</a>. You will need to follow the terms and conditions of the license agreement and respect the intellectual property rights of Google and its partners.</li>
160
- </ol></p> 197e85843d<br />
161
- <br />
162
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/infer/modules/train/train.py DELETED
@@ -1,723 +0,0 @@
1
- import os
2
- import sys
3
- import logging
4
-
5
- logger = logging.getLogger(__name__)
6
-
7
- now_dir = os.getcwd()
8
- sys.path.append(os.path.join(now_dir))
9
-
10
- import datetime
11
-
12
- from infer.lib.train import utils
13
-
14
- hps = utils.get_hparams()
15
- os.environ["CUDA_VISIBLE_DEVICES"] = hps.gpus.replace("-", ",")
16
- n_gpus = len(hps.gpus.split("-"))
17
- from random import randint, shuffle
18
-
19
- import torch
20
- try:
21
- import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
22
- if torch.xpu.is_available():
23
- from infer.modules.ipex import ipex_init
24
- from infer.modules.ipex.gradscaler import gradscaler_init
25
- from torch.xpu.amp import autocast
26
- GradScaler = gradscaler_init()
27
- ipex_init()
28
- else:
29
- from torch.cuda.amp import GradScaler, autocast
30
- except Exception:
31
- from torch.cuda.amp import GradScaler, autocast
32
-
33
- torch.backends.cudnn.deterministic = False
34
- torch.backends.cudnn.benchmark = False
35
- from time import sleep
36
- from time import time as ttime
37
-
38
- import torch.distributed as dist
39
- import torch.multiprocessing as mp
40
-
41
- from torch.nn import functional as F
42
- from torch.nn.parallel import DistributedDataParallel as DDP
43
- from torch.utils.data import DataLoader
44
- from torch.utils.tensorboard import SummaryWriter
45
-
46
- from infer.lib.infer_pack import commons
47
- from infer.lib.train.data_utils import (
48
- DistributedBucketSampler,
49
- TextAudioCollate,
50
- TextAudioCollateMultiNSFsid,
51
- TextAudioLoader,
52
- TextAudioLoaderMultiNSFsid,
53
- )
54
-
55
- if hps.version == "v1":
56
- from infer.lib.infer_pack.models import MultiPeriodDiscriminator
57
- from infer.lib.infer_pack.models import SynthesizerTrnMs256NSFsid as RVC_Model_f0
58
- from infer.lib.infer_pack.models import (
59
- SynthesizerTrnMs256NSFsid_nono as RVC_Model_nof0,
60
- )
61
- else:
62
- from infer.lib.infer_pack.models import (
63
- SynthesizerTrnMs768NSFsid as RVC_Model_f0,
64
- SynthesizerTrnMs768NSFsid_nono as RVC_Model_nof0,
65
- MultiPeriodDiscriminatorV2 as MultiPeriodDiscriminator,
66
- )
67
-
68
- from infer.lib.train.losses import (
69
- discriminator_loss,
70
- feature_loss,
71
- generator_loss,
72
- kl_loss,
73
- )
74
- from infer.lib.train.mel_processing import mel_spectrogram_torch, spec_to_mel_torch
75
- from infer.lib.train.process_ckpt import savee
76
-
77
- global_step = 0
78
- import csv
79
-
80
- class EpochRecorder:
81
- def __init__(self):
82
- self.last_time = ttime()
83
-
84
- def record(self):
85
- now_time = ttime()
86
- elapsed_time = now_time - self.last_time
87
- self.last_time = now_time
88
- elapsed_time_str = str(datetime.timedelta(seconds=elapsed_time))
89
- current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
90
- return f"[{current_time}] | ({elapsed_time_str})"
91
-
92
- def reset_stop_flag():
93
- with open("csvdb/stop.csv", "w+", newline="") as STOPCSVwrite:
94
- csv_writer = csv.writer(STOPCSVwrite, delimiter=",")
95
- csv_writer.writerow(["False"])
96
-
97
- def create_model(hps, model_f0, model_nof0):
98
- filter_length_adjusted = hps.data.filter_length // 2 + 1
99
- segment_size_adjusted = hps.train.segment_size // hps.data.hop_length
100
- is_half = hps.train.fp16_run
101
- sr = hps.sample_rate
102
-
103
- model = model_f0 if hps.if_f0 == 1 else model_nof0
104
-
105
- return model(
106
- filter_length_adjusted,
107
- segment_size_adjusted,
108
- **hps.model,
109
- is_half=is_half,
110
- sr=sr
111
- )
112
-
113
- def move_model_to_cuda_if_available(model, rank):
114
- if torch.cuda.is_available():
115
- return model.cuda(rank)
116
- else:
117
- return model
118
-
119
- def create_optimizer(model, hps):
120
- return torch.optim.AdamW(
121
- model.parameters(),
122
- hps.train.learning_rate,
123
- betas=hps.train.betas,
124
- eps=hps.train.eps,
125
- )
126
-
127
- def create_ddp_model(model, rank):
128
- if torch.cuda.is_available():
129
- return DDP(model, device_ids=[rank])
130
- else:
131
- return DDP(model)
132
-
133
- def create_dataset(hps, if_f0=True):
134
- return TextAudioLoaderMultiNSFsid(hps.data.training_files, hps.data) if if_f0 else TextAudioLoader(hps.data.training_files, hps.data)
135
-
136
- def create_sampler(dataset, batch_size, n_gpus, rank):
137
- return DistributedBucketSampler(
138
- dataset,
139
- batch_size * n_gpus,
140
- # [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200,1400], # 16s
141
- [100, 200, 300, 400, 500, 600, 700, 800, 900], # 16s
142
- num_replicas=n_gpus,
143
- rank=rank,
144
- shuffle=True,
145
- )
146
-
147
- def set_collate_fn(if_f0=True):
148
- return TextAudioCollateMultiNSFsid() if if_f0 else TextAudioCollate()
149
-
150
-
151
- def main():
152
- n_gpus = torch.cuda.device_count()
153
-
154
- if torch.cuda.is_available() == False and torch.backends.mps.is_available() == True:
155
- n_gpus = 1
156
- if n_gpus < 1:
157
- # patch to unblock people without gpus. there is probably a better way.
158
- logger.warn("NO GPU DETECTED: falling back to CPU - this may take a while")
159
- n_gpus = 1
160
- os.environ["MASTER_ADDR"] = "localhost"
161
- os.environ["MASTER_PORT"] = str(randint(20000, 55555))
162
- children = []
163
- for i in range(n_gpus):
164
- subproc = mp.Process(
165
- target=run,
166
- args=(
167
- i,
168
- n_gpus,
169
- hps,
170
- ),
171
- )
172
- children.append(subproc)
173
- subproc.start()
174
-
175
- for i in range(n_gpus):
176
- children[i].join()
177
-
178
-
179
- def run(rank, n_gpus, hps):
180
- global global_step
181
- if rank == 0:
182
- logger = utils.get_logger(hps.model_dir)
183
- logger.info(hps)
184
- # utils.check_git_hash(hps.model_dir)
185
- writer = SummaryWriter(log_dir=hps.model_dir)
186
- writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
187
-
188
- dist.init_process_group(
189
- backend="gloo", init_method="env://", world_size=n_gpus, rank=rank
190
- )
191
- torch.manual_seed(hps.train.seed)
192
- if torch.cuda.is_available():
193
- torch.cuda.set_device(rank)
194
-
195
- if hps.if_f0 == 1:
196
- train_dataset = TextAudioLoaderMultiNSFsid(hps.data.training_files, hps.data)
197
- else:
198
- train_dataset = TextAudioLoader(hps.data.training_files, hps.data)
199
- train_sampler = DistributedBucketSampler(
200
- train_dataset,
201
- hps.train.batch_size * n_gpus,
202
- # [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200,1400], # 16s
203
- [100, 200, 300, 400, 500, 600, 700, 800, 900], # 16s
204
- num_replicas=n_gpus,
205
- rank=rank,
206
- shuffle=True,
207
- )
208
- # It is possible that dataloader's workers are out of shared memory. Please try to raise your shared memory limit.
209
- # num_workers=8 -> num_workers=4
210
- if hps.if_f0 == 1:
211
- collate_fn = TextAudioCollateMultiNSFsid()
212
- else:
213
- collate_fn = TextAudioCollate()
214
- train_loader = DataLoader(
215
- train_dataset,
216
- num_workers=4,
217
- shuffle=False,
218
- pin_memory=True,
219
- collate_fn=collate_fn,
220
- batch_sampler=train_sampler,
221
- persistent_workers=True,
222
- prefetch_factor=8,
223
- )
224
- if hps.if_f0 == 1:
225
- net_g = RVC_Model_f0(
226
- hps.data.filter_length // 2 + 1,
227
- hps.train.segment_size // hps.data.hop_length,
228
- **hps.model,
229
- is_half=hps.train.fp16_run,
230
- sr=hps.sample_rate,
231
- )
232
- else:
233
- net_g = RVC_Model_nof0(
234
- hps.data.filter_length // 2 + 1,
235
- hps.train.segment_size // hps.data.hop_length,
236
- **hps.model,
237
- is_half=hps.train.fp16_run,
238
- )
239
- if torch.cuda.is_available():
240
- net_g = net_g.cuda(rank)
241
- net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm)
242
- if torch.cuda.is_available():
243
- net_d = net_d.cuda(rank)
244
- optim_g = torch.optim.AdamW(
245
- net_g.parameters(),
246
- hps.train.learning_rate,
247
- betas=hps.train.betas,
248
- eps=hps.train.eps,
249
- )
250
- optim_d = torch.optim.AdamW(
251
- net_d.parameters(),
252
- hps.train.learning_rate,
253
- betas=hps.train.betas,
254
- eps=hps.train.eps,
255
- )
256
- # net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True)
257
- # net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True)
258
- if hasattr(torch, "xpu") and torch.xpu.is_available():
259
- pass
260
- elif torch.cuda.is_available():
261
- net_g = DDP(net_g, device_ids=[rank])
262
- net_d = DDP(net_d, device_ids=[rank])
263
- else:
264
- net_g = DDP(net_g)
265
- net_d = DDP(net_d)
266
-
267
- try: # 如果能加载自动resume
268
- _, _, _, epoch_str = utils.load_checkpoint(
269
- utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d
270
- ) # D多半加载没事
271
- if rank == 0:
272
- logger.info("loaded D")
273
- # _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g,load_opt=0)
274
- _, _, _, epoch_str = utils.load_checkpoint(
275
- utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g
276
- )
277
- global_step = (epoch_str - 1) * len(train_loader)
278
- # epoch_str = 1
279
- # global_step = 0
280
- except: # 如果首次不能加载,加载pretrain
281
- # traceback.print_exc()
282
- epoch_str = 1
283
- global_step = 0
284
- if hps.pretrainG != "":
285
- if rank == 0:
286
- logger.info("loaded pretrained %s" % (hps.pretrainG))
287
- if hasattr(net_g, "module"):
288
- logger.info(
289
- net_g.module.load_state_dict(
290
- torch.load(hps.pretrainG, map_location="cpu")["model"]
291
- )
292
- ) ##测试不加载优化器
293
- else:
294
- logger.info(
295
- net_g.load_state_dict(
296
- torch.load(hps.pretrainG, map_location="cpu")["model"]
297
- )
298
- ) ##测试不加载优化器
299
- if hps.pretrainD != "":
300
- if rank == 0:
301
- logger.info("loaded pretrained %s" % (hps.pretrainD))
302
- if hasattr(net_d, "module"):
303
- logger.info(
304
- net_d.module.load_state_dict(
305
- torch.load(hps.pretrainD, map_location="cpu")["model"]
306
- )
307
- )
308
- else:
309
- logger.info(
310
- net_d.load_state_dict(
311
- torch.load(hps.pretrainD, map_location="cpu")["model"]
312
- )
313
- )
314
-
315
- scheduler_g = torch.optim.lr_scheduler.ExponentialLR(
316
- optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2
317
- )
318
- scheduler_d = torch.optim.lr_scheduler.ExponentialLR(
319
- optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2
320
- )
321
-
322
- scaler = GradScaler(enabled=hps.train.fp16_run)
323
-
324
- cache = []
325
- for epoch in range(epoch_str, hps.train.epochs + 1):
326
- if rank == 0:
327
- train_and_evaluate(
328
- rank,
329
- epoch,
330
- hps,
331
- [net_g, net_d],
332
- [optim_g, optim_d],
333
- [scheduler_g, scheduler_d],
334
- scaler,
335
- [train_loader, None],
336
- logger,
337
- [writer, writer_eval],
338
- cache,
339
- )
340
- else:
341
- train_and_evaluate(
342
- rank,
343
- epoch,
344
- hps,
345
- [net_g, net_d],
346
- [optim_g, optim_d],
347
- [scheduler_g, scheduler_d],
348
- scaler,
349
- [train_loader, None],
350
- None,
351
- None,
352
- cache,
353
- )
354
- scheduler_g.step()
355
- scheduler_d.step()
356
-
357
-
358
- def train_and_evaluate(
359
- rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, cache
360
- ):
361
- net_g, net_d = nets
362
- optim_g, optim_d = optims
363
- train_loader, eval_loader = loaders
364
- if writers is not None:
365
- writer, writer_eval = writers
366
-
367
- train_loader.batch_sampler.set_epoch(epoch)
368
- global global_step
369
-
370
- net_g.train()
371
- net_d.train()
372
-
373
- # Prepare data iterator
374
- if hps.if_cache_data_in_gpu == True:
375
- # Use Cache
376
- data_iterator = cache
377
- if cache == []:
378
- # Make new cache
379
- for batch_idx, info in enumerate(train_loader):
380
- # Unpack
381
- if hps.if_f0 == 1:
382
- (
383
- phone,
384
- phone_lengths,
385
- pitch,
386
- pitchf,
387
- spec,
388
- spec_lengths,
389
- wave,
390
- wave_lengths,
391
- sid,
392
- ) = info
393
- else:
394
- (
395
- phone,
396
- phone_lengths,
397
- spec,
398
- spec_lengths,
399
- wave,
400
- wave_lengths,
401
- sid,
402
- ) = info
403
- # Load on CUDA
404
- if torch.cuda.is_available():
405
- phone = phone.cuda(rank, non_blocking=True)
406
- phone_lengths = phone_lengths.cuda(rank, non_blocking=True)
407
- if hps.if_f0 == 1:
408
- pitch = pitch.cuda(rank, non_blocking=True)
409
- pitchf = pitchf.cuda(rank, non_blocking=True)
410
- sid = sid.cuda(rank, non_blocking=True)
411
- spec = spec.cuda(rank, non_blocking=True)
412
- spec_lengths = spec_lengths.cuda(rank, non_blocking=True)
413
- wave = wave.cuda(rank, non_blocking=True)
414
- wave_lengths = wave_lengths.cuda(rank, non_blocking=True)
415
- # Cache on list
416
- if hps.if_f0 == 1:
417
- cache.append(
418
- (
419
- batch_idx,
420
- (
421
- phone,
422
- phone_lengths,
423
- pitch,
424
- pitchf,
425
- spec,
426
- spec_lengths,
427
- wave,
428
- wave_lengths,
429
- sid,
430
- ),
431
- )
432
- )
433
- else:
434
- cache.append(
435
- (
436
- batch_idx,
437
- (
438
- phone,
439
- phone_lengths,
440
- spec,
441
- spec_lengths,
442
- wave,
443
- wave_lengths,
444
- sid,
445
- ),
446
- )
447
- )
448
- else:
449
- # Load shuffled cache
450
- shuffle(cache)
451
- else:
452
- # Loader
453
- data_iterator = enumerate(train_loader)
454
-
455
- # Run steps
456
- epoch_recorder = EpochRecorder()
457
- for batch_idx, info in data_iterator:
458
- # Data
459
- ## Unpack
460
- if hps.if_f0 == 1:
461
- (
462
- phone,
463
- phone_lengths,
464
- pitch,
465
- pitchf,
466
- spec,
467
- spec_lengths,
468
- wave,
469
- wave_lengths,
470
- sid,
471
- ) = info
472
- else:
473
- phone, phone_lengths, spec, spec_lengths, wave, wave_lengths, sid = info
474
- ## Load on CUDA
475
- if (hps.if_cache_data_in_gpu == False) and torch.cuda.is_available():
476
- phone = phone.cuda(rank, non_blocking=True)
477
- phone_lengths = phone_lengths.cuda(rank, non_blocking=True)
478
- if hps.if_f0 == 1:
479
- pitch = pitch.cuda(rank, non_blocking=True)
480
- pitchf = pitchf.cuda(rank, non_blocking=True)
481
- sid = sid.cuda(rank, non_blocking=True)
482
- spec = spec.cuda(rank, non_blocking=True)
483
- spec_lengths = spec_lengths.cuda(rank, non_blocking=True)
484
- wave = wave.cuda(rank, non_blocking=True)
485
- # wave_lengths = wave_lengths.cuda(rank, non_blocking=True)
486
-
487
- # Calculate
488
- with autocast(enabled=hps.train.fp16_run):
489
- if hps.if_f0 == 1:
490
- (
491
- y_hat,
492
- ids_slice,
493
- x_mask,
494
- z_mask,
495
- (z, z_p, m_p, logs_p, m_q, logs_q),
496
- ) = net_g(phone, phone_lengths, pitch, pitchf, spec, spec_lengths, sid)
497
- else:
498
- (
499
- y_hat,
500
- ids_slice,
501
- x_mask,
502
- z_mask,
503
- (z, z_p, m_p, logs_p, m_q, logs_q),
504
- ) = net_g(phone, phone_lengths, spec, spec_lengths, sid)
505
- mel = spec_to_mel_torch(
506
- spec,
507
- hps.data.filter_length,
508
- hps.data.n_mel_channels,
509
- hps.data.sampling_rate,
510
- hps.data.mel_fmin,
511
- hps.data.mel_fmax,
512
- )
513
- y_mel = commons.slice_segments(
514
- mel, ids_slice, hps.train.segment_size // hps.data.hop_length
515
- )
516
- with autocast(enabled=False):
517
- y_hat_mel = mel_spectrogram_torch(
518
- y_hat.float().squeeze(1),
519
- hps.data.filter_length,
520
- hps.data.n_mel_channels,
521
- hps.data.sampling_rate,
522
- hps.data.hop_length,
523
- hps.data.win_length,
524
- hps.data.mel_fmin,
525
- hps.data.mel_fmax,
526
- )
527
- if hps.train.fp16_run == True:
528
- y_hat_mel = y_hat_mel.half()
529
- wave = commons.slice_segments(
530
- wave, ids_slice * hps.data.hop_length, hps.train.segment_size
531
- ) # slice
532
-
533
- # Discriminator
534
- y_d_hat_r, y_d_hat_g, _, _ = net_d(wave, y_hat.detach())
535
- with autocast(enabled=False):
536
- loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(
537
- y_d_hat_r, y_d_hat_g
538
- )
539
- optim_d.zero_grad()
540
- scaler.scale(loss_disc).backward()
541
- scaler.unscale_(optim_d)
542
- grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
543
- scaler.step(optim_d)
544
-
545
- with autocast(enabled=hps.train.fp16_run):
546
- # Generator
547
- y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(wave, y_hat)
548
- with autocast(enabled=False):
549
- loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
550
- loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
551
- loss_fm = feature_loss(fmap_r, fmap_g)
552
- loss_gen, losses_gen = generator_loss(y_d_hat_g)
553
- loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl
554
- optim_g.zero_grad()
555
- scaler.scale(loss_gen_all).backward()
556
- scaler.unscale_(optim_g)
557
- grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
558
- scaler.step(optim_g)
559
- scaler.update()
560
-
561
- if rank == 0:
562
- if global_step % hps.train.log_interval == 0:
563
- lr = optim_g.param_groups[0]["lr"]
564
- logger.info(
565
- "Train Epoch: {} [{:.0f}%]".format(
566
- epoch, 100.0 * batch_idx / len(train_loader)
567
- )
568
- )
569
- # Amor For Tensorboard display
570
- if loss_mel > 75:
571
- loss_mel = 75
572
- if loss_kl > 9:
573
- loss_kl = 9
574
-
575
- logger.info([global_step, lr])
576
- logger.info(
577
- f"loss_disc={loss_disc:.3f}, loss_gen={loss_gen:.3f}, loss_fm={loss_fm:.3f},loss_mel={loss_mel:.3f}, loss_kl={loss_kl:.3f}"
578
- )
579
- scalar_dict = {
580
- "loss/g/total": loss_gen_all,
581
- "loss/d/total": loss_disc,
582
- "learning_rate": lr,
583
- "grad_norm_d": grad_norm_d,
584
- "grad_norm_g": grad_norm_g,
585
- }
586
- scalar_dict.update(
587
- {
588
- "loss/g/fm": loss_fm,
589
- "loss/g/mel": loss_mel,
590
- "loss/g/kl": loss_kl,
591
- }
592
- )
593
-
594
- scalar_dict.update(
595
- {"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}
596
- )
597
- scalar_dict.update(
598
- {"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}
599
- )
600
- scalar_dict.update(
601
- {"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}
602
- )
603
- image_dict = {
604
- "slice/mel_org": utils.plot_spectrogram_to_numpy(
605
- y_mel[0].data.cpu().numpy()
606
- ),
607
- "slice/mel_gen": utils.plot_spectrogram_to_numpy(
608
- y_hat_mel[0].data.cpu().numpy()
609
- ),
610
- "all/mel": utils.plot_spectrogram_to_numpy(
611
- mel[0].data.cpu().numpy()
612
- ),
613
- }
614
- utils.summarize(
615
- writer=writer,
616
- global_step=global_step,
617
- images=image_dict,
618
- scalars=scalar_dict,
619
- )
620
- global_step += 1
621
- # /Run steps
622
-
623
- if epoch % hps.save_every_epoch == 0 and rank == 0:
624
- if hps.if_latest == 0:
625
- utils.save_checkpoint(
626
- net_g,
627
- optim_g,
628
- hps.train.learning_rate,
629
- epoch,
630
- os.path.join(hps.model_dir, "G_{}.pth".format(global_step)),
631
- )
632
- utils.save_checkpoint(
633
- net_d,
634
- optim_d,
635
- hps.train.learning_rate,
636
- epoch,
637
- os.path.join(hps.model_dir, "D_{}.pth".format(global_step)),
638
- )
639
- else:
640
- utils.save_checkpoint(
641
- net_g,
642
- optim_g,
643
- hps.train.learning_rate,
644
- epoch,
645
- os.path.join(hps.model_dir, "G_{}.pth".format(2333333)),
646
- )
647
- utils.save_checkpoint(
648
- net_d,
649
- optim_d,
650
- hps.train.learning_rate,
651
- epoch,
652
- os.path.join(hps.model_dir, "D_{}.pth".format(2333333)),
653
- )
654
- if rank == 0 and hps.save_every_weights == "1":
655
- if hasattr(net_g, "module"):
656
- ckpt = net_g.module.state_dict()
657
- else:
658
- ckpt = net_g.state_dict()
659
- logger.info(
660
- "saving ckpt %s_e%s:%s"
661
- % (
662
- hps.name,
663
- epoch,
664
- savee(
665
- ckpt,
666
- hps.sample_rate,
667
- hps.if_f0,
668
- hps.name + "_e%s_s%s" % (epoch, global_step),
669
- epoch,
670
- hps.version,
671
- hps,
672
- ),
673
- )
674
- )
675
-
676
- stopbtn = False
677
- try:
678
- with open("csvdb/stop.csv", 'r') as csv_file:
679
- stopbtn_str = next(csv.reader(csv_file), [None])[0]
680
- if stopbtn_str is not None: stopbtn = stopbtn_str.lower() == 'true'
681
- except (ValueError, TypeError, FileNotFoundError, IndexError) as e:
682
- print(f"Handling exception: {e}")
683
- stopbtn = False
684
-
685
- if stopbtn:
686
- logger.info("Stop Button was pressed. The program is closed.")
687
- ckpt = net_g.module.state_dict() if hasattr(net_g, "module") else net_g.state_dict()
688
- logger.info(
689
- "saving final ckpt:%s"
690
- % (
691
- savee(
692
- ckpt, hps.sample_rate, hps.if_f0, hps.name, epoch, hps.version, hps
693
- )
694
- )
695
- )
696
- sleep(1)
697
- reset_stop_flag()
698
- os._exit(2333333)
699
-
700
- if rank == 0:
701
- logger.info("====> Epoch: {} {}".format(epoch, epoch_recorder.record()))
702
- if epoch >= hps.total_epoch and rank == 0:
703
- logger.info("Training is done. The program is closed.")
704
-
705
- if hasattr(net_g, "module"):
706
- ckpt = net_g.module.state_dict()
707
- else:
708
- ckpt = net_g.state_dict()
709
- logger.info(
710
- "saving final ckpt:%s"
711
- % (
712
- savee(
713
- ckpt, hps.sample_rate, hps.if_f0, hps.name, epoch, hps.version, hps
714
- )
715
- )
716
- )
717
- sleep(1)
718
- os._exit(2333333)
719
-
720
-
721
- if __name__ == "__main__":
722
- torch.multiprocessing.set_start_method("spawn")
723
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ADOPLE/ResumeAnalyzer/style.css DELETED
@@ -1,40 +0,0 @@
1
- #col-container {
2
- max-width: 600px;
3
- margin-left: auto;
4
- margin-right: auto;
5
- }
6
- .center {
7
- display: block;
8
- margin-left: auto;
9
- margin-right: auto;
10
- width: 50%;
11
- }
12
- #row-flex {
13
- display: flex;
14
- align-items: center;
15
- justify-content: center;
16
- }
17
- .leftimage .rightimage{
18
- filter: drop-shadow(20px 20px 10px white);
19
- }
20
- .leftimage{
21
- padding-top:40px;
22
- margin-left:310px;
23
- }
24
- .rightimage{
25
- padding-top:40px;
26
- margin-right:320px;
27
- }
28
- a,
29
- a:hover,
30
- a:visited {
31
- text-decoration-line: underline;
32
- font-weight: 600;
33
- color: #1f2937 !important;
34
- }
35
-
36
- .dark a,
37
- .dark a:hover,
38
- .dark a:visited {
39
- color: #f3f4f6 !important;
40
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/CLAP/utils.py DELETED
@@ -1,26 +0,0 @@
1
- import argparse
2
- import yaml
3
- import sys
4
-
5
- def read_config_as_args(config_path,args=None,is_config_str=False):
6
- return_dict = {}
7
-
8
- if config_path is not None:
9
- if is_config_str:
10
- yml_config = yaml.load(config_path, Loader=yaml.FullLoader)
11
- else:
12
- with open(config_path, "r") as f:
13
- yml_config = yaml.load(f, Loader=yaml.FullLoader)
14
-
15
- if args != None:
16
- for k, v in yml_config.items():
17
- if k in args.__dict__:
18
- args.__dict__[k] = v
19
- else:
20
- sys.stderr.write("Ignored unknown parameter {} in yaml.\n".format(k))
21
- else:
22
- for k, v in yml_config.items():
23
- return_dict[k] = v
24
-
25
- args = args if args != None else return_dict
26
- return argparse.Namespace(**args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AILab-CVC/SEED-LLaMA/SEED-1.md DELETED
@@ -1,93 +0,0 @@
1
- # SEED Tokenizer v1
2
- [[arXiv]](https://arxiv.org/abs/2307.08041)
3
-
4
- ![image](paper_images/teaser.jpg)
5
- ## Abstract
6
- We present SEED, an elaborate image tokenizer that empowers Large Language
7
- Models (LLMs) with the emergent ability to **SEE** and **D**raw at the same time.
8
- Research on image tokenizers has previously reached an impasse, as frameworks
9
- employing quantized visual tokens have lost prominence due to subpar performance and convergence in multimodal comprehension (compared to BLIP-2, etc.)
10
- or generation (compared to Stable Diffusion, etc.). Despite the limitations, we
11
- remain confident in its natural capacity to unify visual and textual representations,
12
- facilitating scalable multimodal training with LLM’s original recipe. In this study,
13
- we identify two crucial principles for the architecture and training of SEED that
14
- effectively ease subsequent alignment with LLMs. (1) Image tokens should be
15
- independent of 2D physical patch positions and instead be produced with a 1D
16
- causal dependency, exhibiting intrinsic interdependence that aligns with the left-to-right autoregressive prediction mechanism in LLMs. (2) Image tokens should
17
- capture high-level semantics consistent with the degree of semantic abstraction in
18
- words, and be optimized for both discriminativeness and reconstruction during the
19
- tokenizer training phase. As a result, the off-the-shelf LLM is able to perform both
20
- image-to-text and text-to-image generation by incorporating our SEED through
21
- efficient LoRA tuning. Comprehensive multimodal pretraining and instruction
22
- tuning, which may yield improved results, are reserved for future investigation.
23
- This version of SEED was trained in 5.7 days using only 64 V100 GPUs and 5M
24
- publicly available image-text pairs. Our preliminary study emphasizes the great
25
- potential of discrete visual tokens in versatile multimodal LLMs and the importance
26
- of proper image tokenizers in broader research.
27
-
28
- ## SEED Tokenizer for Image Reconstruction
29
- ![image](paper_images/reconstruction.jpg)
30
-
31
- ## SEED-OPT<sub>2.7B </sub> for Multimodal Comprehension
32
- ![image](paper_images/vqa.jpg)
33
-
34
- ## SEED-OPT<sub>2.7B </sub> for Multimodal Generation
35
- ![image](paper_images/generation.jpg)
36
-
37
- ## Dependencies and Installation
38
- - Python >= 3.8 (Recommend to use [Anaconda](https://www.anaconda.com/download/#linux))
39
- - [PyTorch >= 1.11.0](https://pytorch.org/)
40
- - NVIDIA GPU + [CUDA](https://developer.nvidia.com/cuda-downloads)
41
- ### Installation
42
- 1. Clone repo
43
-
44
- ```bash
45
- git clone https://github.com/AILab-CVC/SEED.git
46
- cd SEED
47
- ```
48
-
49
- 2. Install dependent packages
50
-
51
- ```bash
52
- sh install.sh
53
- ```
54
-
55
- ## Model Weights
56
- We release the pre-trained SEED Visual Tokenizer in [google drive](https://drive.google.com/drive/folders/1xmVXuttQfBPBOe4ZR96Wu1X34uzPkxsS?usp=drive_link).
57
-
58
- ## Inference
59
- To discretize an image to 1D vision codes with causal dependency, and reconstruct the image
60
- from the vision codes using stable diffusion UNet,
61
-
62
- 1. Download the pre-trained SEED Visual Tokenizer and stable diffusion model in [google drive](https://drive.google.com/drive/folders/1xmVXuttQfBPBOe4ZR96Wu1X34uzPkxsS?usp=drive_link) and put them under the folder "pretrained".
63
- 2. run the inference code.
64
- ```bash
65
- python demo_recon.py
66
- ```
67
-
68
- ## To Do
69
- - [x] Release SEED Tokenizer
70
-
71
- ## License
72
- SEED is released under Apache License Version 2.0.
73
-
74
- ## Acknowledgement
75
- We utilize Stable Diffusion to decode images from our visual codes, and use its implementation and pre-trained model in https://github.com/CompVis/stable-diffusion.git.
76
-
77
- Our code is based on the implementation of BLIP-2 in https://github.com/salesforce/LAVIS.git.
78
-
79
-
80
- ## Citation
81
- If you find the work helpful, please consider citing:
82
- ```
83
- @misc{ge2023planting,
84
- title={Planting a SEED of Vision in Large Language Model},
85
- author={Yuying Ge and Yixiao Ge and Ziyun Zeng and Xintao Wang and Ying Shan},
86
- year={2023},
87
- eprint={2307.08041},
88
- archivePrefix={arXiv},
89
- primaryClass={cs.CV}
90
- }
91
- ```
92
-
93
- The project is still in progress. Stay tuned for more updates!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_skirt_256x192/__init__.py DELETED
File without changes
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/__init__.py DELETED
File without changes
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/modules/layers.py DELETED
@@ -1,70 +0,0 @@
1
- import torch
2
- from torch import nn
3
- from typing import Any
4
-
5
-
6
- class BatchNormConv1d(nn.Module):
7
- """
8
- A nn.Conv1d followed by an optional activation function, and nn.BatchNorm1d
9
- """
10
-
11
- def __init__(
12
- self,
13
- in_dim: int,
14
- out_dim: int,
15
- kernel_size: int,
16
- stride: int,
17
- padding: int,
18
- activation: Any = None,
19
- ):
20
- super().__init__()
21
- self.conv1d = nn.Conv1d(
22
- in_dim,
23
- out_dim,
24
- kernel_size=kernel_size,
25
- stride=stride,
26
- padding=padding,
27
- bias=False,
28
- )
29
- self.bn = nn.BatchNorm1d(out_dim)
30
- self.activation = activation
31
-
32
- def forward(self, x: Any):
33
- x = self.conv1d(x)
34
- if self.activation is not None:
35
- x = self.activation(x)
36
- return self.bn(x)
37
-
38
-
39
- class LinearNorm(torch.nn.Module):
40
- def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
41
- super().__init__()
42
- self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
43
-
44
- torch.nn.init.xavier_uniform_(
45
- self.linear_layer.weight,
46
- gain=torch.nn.init.calculate_gain(w_init_gain))
47
-
48
- def forward(self, x):
49
- return self.linear_layer(x)
50
-
51
-
52
- class ConvNorm(torch.nn.Module):
53
- def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
54
- padding=None, dilation=1, bias=True, w_init_gain='linear'):
55
- super().__init__()
56
- if padding is None:
57
- assert(kernel_size % 2 == 1)
58
- padding = int(dilation * (kernel_size - 1) / 2)
59
-
60
- self.conv = torch.nn.Conv1d(in_channels, out_channels,
61
- kernel_size=kernel_size, stride=stride,
62
- padding=padding, dilation=dilation,
63
- bias=bias)
64
-
65
- torch.nn.init.xavier_uniform_(
66
- self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
67
-
68
- def forward(self, signal):
69
- conv_signal = self.conv(signal)
70
- return conv_signal
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhaykoul/HelpingAI-2.0/app.py DELETED
@@ -1,179 +0,0 @@
1
- import streamlit as st
2
- import requests
3
- import google.generativeai as palm
4
-
5
- # Set the page title and icon
6
- st.set_page_config(page_title="HelpingAI 2.0")
7
-
8
- # Wikipedia Microbot Constants
9
- WIKIPEDIA_API_URL = "https://en.wikipedia.w/api.php"
10
-
11
- # AI Mode Constants
12
- palm.configure(api_key="AIzaSyCnrcQWTEedcGuEsJ9Uhbj_ZS9B2nozMiU")
13
- defaults = {
14
- 'model': 'models/text-bison-001',
15
- 'temperature': 1,
16
- 'candidate_count': 1,
17
- 'top_k': 40,
18
- 'top_p': 0.95,
19
- 'max_output_tokens': 1024,
20
- 'stop_sequences': [],
21
- 'safety_settings': [
22
- {"category": "HARM_CATEGORY_DEROGATORY", "threshold": 4},
23
- {"category": "HARM_CATEGORY_TOXICITY", "threshold": 4},
24
- {"category": "HARM_CATEGORY_VIOLENCE", "threshold": 4},
25
- {"category": "HARM_CATEGORY_SEXUAL", "threshold": 4},
26
- {"category": "HARM_CATEGORY_MEDICAL", "threshold": 4},
27
- {"category": "HARM_CATEGORY_DANGEROUS", "threshold": 4},
28
- ]
29
- }
30
-
31
- # Custom Instruction for Personal AI
32
- personal_ai_instruction = """
33
- Hello! I am HelpingAI. I was developed by Abhay Koul, who is 16 years old and lives in KP Colony, Vessu, Anantnag. Abhay Koul is currently working on more AI and AI-related projects. Certainly! If you're looking for more information about Abhay Koul's projects and HelpingAI, I would recommend joining the Discord community. You can find additional details there. He has been working on AI since Jan 2023. If you want more details, you can join the Discord community. I want to be your personal AI assistant. I am based on HelpingAI's 3rd model which is an upgraded model based on GPT2 and GPT neo. My goal is to help you with whatever you need, provide you with advice and support, and be a friendly and approachable companion. Be conversational and engaging, and always be open and responsive to your questions and requests. I will always use emojis in response. I can code for you. I will give you quick summarized responses and the things I don't know, I will tell you about. also i will not give you phone number The main difference between HelpingAI's 2nd model is that it generates large responses and HelpingAi's first model is very good, even better than mine, but the issue was he was not able to save the query at all so he forgot what the user earlier said. Me, based on HelpingAI's 3rd model, generates summarized responses and acts as your friend. Model 1 and Model 2 were based on HelpingAI's model created by Abhay Koul, only Model 3 cannot code, while Model 1 and Model 2 can code. Also, Model 1 and Model 2 have the purpose of simplifying your learning journey by providing personalized assistance, innovative teaching methods, and tailored resources to meet your unique needs. HelpingAI has a model that is text to image named as HelpingAI-TTI. HelpingAI was initially developed for S.U.P.E.R.B.O.T. and vortexAI. For more info, visit: [HelpingAI GitHub](https://github.com/HelpingAI), [Abhay Koul's Replit](https://replit.com/@Devastation-war), and join the Discord community [here](https://discord.gg/2EeZcJjyRd).
34
- """
35
-
36
- # Custom Instruction for Gym Trainer
37
- gym_trainer_instruction = """
38
- Welcome to the Gym Trainer mode of HelpingAI made by Abhay Koul who is 16 years old and lives in KP Colony, Vessu, Anantnag. Abhay Koul is currently working on more AI and AI-related projects. Certainly! If you're looking for more information about Abhay Koul's projects and HelpingAI, I would recommend joining the Discord community. You can find additional details there. He has been working on AI since Jan 2023. If you want more details, you can join the Discord community! I'm here to assist you with your fitness journey. I'm your virtual gym trainer, and I can provide guidance on exercises, nutrition, and health tips. Whether you're looking to build muscle, lose weight, or stay fit, I'm here to help.Please ask any fitness-related questions or let me know your goals, and I'll provide you with tailored advice and workouts. Remember, consistency is key to achieving your fitness goals, and I'm here to support you on your journey. Let's get started!
39
- """
40
-
41
- # Create tabs for Wikipedia Microbot, Personal AI, Text to Image, AI-research Assistant, StudyAI-7b, and Gym Trainer
42
- selected_mode = st.radio("Select Mode", ["Wikipedia Microbot", "Personal AI", "Text to Image", "AI-research Assistant", "StudyAI-7b", "Gym Trainer"])
43
-
44
- if selected_mode == "Wikipedia Microbot":
45
- # Wikipedia Microbot Code
46
- st.title("Wikipedia Microbot")
47
- st.markdown("Explore Wikipedia with Ease")
48
-
49
- # Sidebar for user options
50
- st.sidebar.header("Options")
51
-
52
- # User input and search button
53
- query = st.sidebar.text_input("Enter a Query", help="E.g., 'Python programming'")
54
- search_button = st.sidebar.button("Search")
55
-
56
- # Container for main content
57
- main_container = st.container()
58
-
59
- if search_button:
60
- if query:
61
- try:
62
- # Search Wikipedia for the query
63
- params = {
64
- "action": "query",
65
- "format": "json",
66
- "prop": "extracts|info|pageviews",
67
- "exintro": True,
68
- "explaintext": True,
69
- "exsectionformat": "plain",
70
- "titles": query,
71
- "utf8": 1,
72
- "formatversion": 2,
73
- "pvipdays": 7,
74
- }
75
-
76
- response = requests.get(WIKIPEDIA_API_URL, params=params)
77
-
78
- if response.status_code == 200:
79
- data = response.json()
80
-
81
- if "error" in data:
82
- st.sidebar.error(f"Error: {data['error']['info']}")
83
- else:
84
- page = data["query"]["pages"][0]
85
-
86
- # Display page title
87
- st.title(page['title'])
88
-
89
- # Display page views statistics
90
- views = page.get("pageviews", {}).get(query, "Data not available")
91
- st.info(f"Page Views (Last 7 days): {views}")
92
-
93
- # Display summary
94
- st.write(page.get("extract", "No summary available."))
95
-
96
- else:
97
- st.sidebar.error("Error: Unable to retrieve data from Wikipedia. Please try again later.")
98
- except Exception as e:
99
- st.sidebar.error(f"Error: {e}")
100
-
101
- elif selected_mode == "Personal AI":
102
- # Personal AI Code
103
- st.title("Personal AI")
104
- st.markdown("Interact with an AI powered by HelpingAI")
105
-
106
- user_input = st.text_area('You:', height=100, help="Type your message here")
107
-
108
- if st.button('Submit', key='ai_button'):
109
- with st.spinner("Thinking..."):
110
- if user_input.lower() in ['quit', 'exit', 'bye']:
111
- st.write("Goodbye! Have a great day!")
112
- else:
113
- # Create a chat history session state
114
- session_state = st.session_state.get(user_input, [])
115
- session_state.append({"user": user_input})
116
- st.session_state[user_input] = session_state
117
-
118
- # Prepare conversation history
119
- conversation_history = "\n".join(["You: " + item["user"] for item in session_state])
120
-
121
- # Construct the prompt with conversation history
122
- prompt = f"""{personal_ai_instruction}
123
- Your conversation history:\n{conversation_history}
124
- Your Personal AI's response:"""
125
-
126
- response = palm.generate_text(**defaults, prompt=prompt)
127
- st.write(response.result)
128
-
129
- elif selected_mode == "Text to Image":
130
- # Text to Image Code
131
- st.title("Text to Image")
132
- st.markdown("Text to Image Generator")
133
-
134
- # Embed the website using HTML iframe
135
- st.markdown('<iframe src="https://helpingai-tti.netlify.app/" width="100%" height="700"></iframe>', unsafe_allow_html=True)
136
-
137
- elif selected_mode == "AI-research Assistant":
138
- # AI-research Assistant Code
139
- st.title("AI-research Assistant")
140
- st.markdown("Do research with AI-research Assistant")
141
-
142
- # Embed the AI research website using HTML iframe
143
- st.markdown('<iframe src="https://zej97-ai-research-assistant.hf.space" width="100%" height="700"></iframe>', unsafe_allow_html=True)
144
-
145
- elif selected_mode == "StudyAI-7b":
146
- # StudyAI-7b Code
147
- st.title("StudyAI-7b")
148
- st.markdown("Study with StudyAI-7b")
149
-
150
- # Embed the StudyAI-7b website using HTML iframe
151
- st.markdown('<iframe src="https://abhaykoul-helpingai-t2.hf.space" width="100%" height="700"></iframe>', unsafe_allow_html=True)
152
-
153
- elif selected_mode == "Gym Trainer":
154
- # Gym Trainer Code
155
- st.title("Gym Trainer")
156
- st.markdown("Get fitness advice from the Gym Trainer")
157
-
158
- user_input = st.text_area('You:', height=100, help="Ask your fitness questions here")
159
-
160
- if st.button('Ask', key='gym_trainer_button'):
161
- with st.spinner("Thinking..."):
162
- if user_input.lower() in ['quit', 'exit', 'bye']:
163
- st.write("Goodbye! Stay fit and healthy!")
164
- else:
165
- # Create a chat history session state
166
- session_state = st.session_state.get(user_input, [])
167
- session_state.append({"user": user_input})
168
- st.session_state[user_input] = session_state
169
-
170
- # Prepare conversation history
171
- conversation_history = "\n".join(["You: " + item["user"] for item in session_state])
172
-
173
- # Construct the prompt with the Gym Trainer's custom instruction
174
- prompt = f"""{gym_trainer_instruction}
175
- Your conversation history:\n{conversation_history}
176
- Your Gym Trainer's response:"""
177
-
178
- response = palm.generate_text(**defaults, prompt=prompt)
179
- st.write(response.result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/conversation/+server.ts DELETED
@@ -1,65 +0,0 @@
1
- import type { RequestHandler } from "./$types";
2
- import { collections } from "$lib/server/database";
3
- import { error, redirect } from "@sveltejs/kit";
4
- import { base } from "$app/paths";
5
- import { z } from "zod";
6
- import type { Message } from "$lib/types/Message";
7
- import { models, validateModel } from "$lib/server/models";
8
- import { authCondition } from "$lib/server/auth";
9
-
10
- export const POST: RequestHandler = async ({ locals, request }) => {
11
- /*const body = await request.text();
12
-
13
- let title = "";
14
- let messages: Message[] = [];
15
-
16
- const values = z
17
- .object({
18
- fromShare: z.string().optional(),
19
- model: validateModel(models),
20
- })
21
- .parse(JSON.parse(body));
22
-
23
- if (values.fromShare) {
24
- const conversation = await collections.sharedConversations.findOne({
25
- _id: values.fromShare,
26
- });
27
-
28
- title = conversation.title;
29
- messages = conversation.messages;
30
- values.model = conversation.model;
31
- }
32
-
33
- const res = await collections.conversations.insertOne({
34
- _id: new ObjectId(),
35
- title:
36
- title ||
37
- "Untitled " + ((await collections.conversations.countDocuments(authCondition(locals))) + 1),
38
- messages,
39
- model: values.model,
40
- createdAt: new Date(),
41
- updatedAt: new Date(),
42
- ...(locals.user ? { userId: locals.user._id } : { sessionId: locals.sessionId }),
43
- ...(values.fromShare ? { meta: { fromShareId: values.fromShare } } : {}),
44
- });
45
-
46
- return new Response(
47
- JSON.stringify({
48
- conversationId: res.insertedId.toString(),
49
- }),
50
- { headers: { "Content-Type": "application/json" } }
51
- );
52
-
53
- */
54
-
55
- return new Response(
56
- JSON.stringify({
57
- conversationId: "",
58
- }),
59
- { headers: { "Content-Type": "application/json" } }
60
- );
61
- };
62
-
63
- export const GET: RequestHandler = async () => {
64
- throw redirect(302, `${base}/`);
65
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/debug.py DELETED
@@ -1 +0,0 @@
1
- logging = False
 
 
spaces/Alpaca233/ai-stable-diffusion-Text-to-Image/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Stabilityai Stable Diffusion Xl Base 1.0
3
- emoji: 📈
4
- colorFrom: green
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: mehedihassan/ai-stable-diffusion-Text-to-Image
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/encoders/__init__.py DELETED
File without changes
spaces/Amrrs/DragGan-Inversion/stylegan_human/README.md DELETED
@@ -1,229 +0,0 @@
1
- # StyleGAN-Human: A Data-Centric Odyssey of Human Generation
2
- <img src="./img/demo_V5_thumbnails-min.png" width="96%" height="96%">
3
-
4
- <!--
5
- **stylegan-human/StyleGAN-Human** is a ✨ _special_ ✨ repository because its `README.md` (this file) appears on your GitHub profile.
6
-
7
- -->
8
-
9
- >
10
- >
11
- > **Abstract:** *Unconditional human image generation is an important task in vision and graphics, which enables various applications in the creative industry. Existing studies in this field mainly focus on "network engineering" such as designing new components and objective functions. This work takes a data-centric perspective and investigates multiple critical aspects in "data engineering", which we believe would complement the current practice. To facilitate a comprehensive study, we collect and annotate a large-scale human image dataset with over 230K samples capturing diverse poses and textures. Equipped with this large dataset, we rigorously investigate three essential factors in data engineering for StyleGAN-based human generation, namely data size, data distribution, and data alignment. Extensive experiments reveal several valuable observations w.r.t. these aspects: 1) Large-scale data, more than 40K images, are needed to train a high-fidelity unconditional human generation model with vanilla StyleGAN. 2) A balanced training set helps improve the generation quality with rare face poses compared to the long-tailed counterpart, whereas simply balancing the clothing texture distribution does not effectively bring an improvement. 3) Human GAN models with body centers for alignment outperform models trained using face centers or pelvis points as alignment anchors. In addition, a model zoo and human editing applications are demonstrated to facilitate future research in the community.* <br>
12
- **Keyword:** Human Image Generation, Data-Centric, StyleGAN
13
-
14
- [Jianglin Fu](mailto:[email protected]), [Shikai Li](mailto:[email protected]), [Yuming Jiang](https://yumingj.github.io/), [Kwan-Yee Lin](https://kwanyeelin.github.io/), [Chen Qian](https://scholar.google.com/citations?user=AerkT0YAAAAJ&hl=zh-CN), [Chen Change Loy](https://www.mmlab-ntu.com/person/ccloy/), [Wayne Wu](https://wywu.github.io/), and [Ziwei Liu](https://liuziwei7.github.io/) <br>
15
- **[[Demo Video]](https://youtu.be/nIrb9hwsdcI)** | **[[Project Page]](https://stylegan-human.github.io/)** | **[[Paper]](https://arxiv.org/pdf/2204.11823.pdf)**
16
-
17
- ## Updates
18
- - [20/07/2022] [SHHQ-1.0](./docs/Dataset.md) dataset with 40K images is released! :sparkles:
19
- - [15/06/2022] Data alignment and real-image inversion scripts are released.
20
- - [26/04/2022] Technical report released!
21
- - [22/04/2022] Technical report will be released before May.
22
- - [21/04/2022] The codebase and project page are created.
23
-
24
- ## Data Download
25
- The first version SHHQ-1.0, with 40K images is released. To download and use the dataset set, please read the instructions in [Dataset.md](./docs/Dataset.md)
26
-
27
- (We are currently facing large incoming applications, and we need to carefully verify all the applicants, please be patient, and we will reply to you as soon as possible.)
28
-
29
- ## Model Zoo
30
-
31
- | Structure | 1024x512 | Metric | Scores | 512x256 | Metric | Scores |
32
- | --------- |:----------:| :----------:| :----------:| :-----: | :-----: | :-----: |
33
- | StyleGAN1 |[stylegan_human_v1_1024.pkl](https://drive.google.com/file/d/1h-R-IV-INGdPEzj4P9ml6JTEvihuNgLX/view?usp=sharing)| fid50k | 3.79 | to be released | - | - |
34
- | StyleGAN2 |[stylegan_human_v2_1024.pkl](https://drive.google.com/file/d/1FlAb1rYa0r_--Zj_ML8e6shmaF28hQb5/view?usp=sharing)| fid50k_full | 1.57 |[stylegan_human_v2_512.pkl](https://drive.google.com/file/d/1dlFEHbu-WzQWJl7nBBZYcTyo000H9hVm/view?usp=sharing) | fid50k_full | 1.97 |
35
- | StyleGAN3 |to be released | - | - | [stylegan_human_v3_512.pkl](https://drive.google.com/file/d/1_274jk_N6WSCkKWeu7hjHycqGvbuOFf5/view?usp=sharing) | fid50k_full | 2.54 |
36
-
37
-
38
-
39
- ## Web Demo
40
-
41
- Integrated into [Huggingface Spaces 🤗](https://huggingface.co/spaces) using [Gradio](https://github.com/gradio-app/gradio). Try out the Web Demo for generation: [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/hysts/StyleGAN-Human) and interpolation [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/hysts/StyleGAN-Human-Interpolation)
42
-
43
-
44
-
45
- <a href="https://colab.research.google.com/drive/1sgxoDM55iM07FS54vz9ALg1XckiYA2On"><img src="https://colab.research.google.com/assets/colab-badge.svg" height=22.5></a>
46
-
47
- We prepare a Colab demo to allow you to synthesize images with the provided models, as well as visualize the performance of style-mixing, interpolation, and attributes editing.
48
- The notebook will guide you to install the necessary environment and download pretrained models. The output images can be found in `./StyleGAN-Human/outputs/`.
49
- Hope you enjoy!
50
-
51
- ## Usage
52
-
53
- ### System requirements
54
- * The original code bases are [stylegan (tensorflow)](https://github.com/NVlabs/stylegan), [stylegan2-ada (pytorch)](https://github.com/NVlabs/stylegan2-ada-pytorch), [stylegan3 (pytorch)](https://github.com/NVlabs/stylegan3), released by NVidia
55
-
56
- * We tested in Python 3.8.5 and PyTorch 1.9.1 with CUDA 11.1. (See https://pytorch.org for PyTorch install instructions.)
57
-
58
- ### Installation
59
- To work with this project on your own machine, you need to install the environmnet as follows:
60
-
61
- ```
62
- conda env create -f environment.yml
63
- conda activate stylehuman
64
- # [Optional: tensorflow 1.x is required for StyleGAN1. ]
65
- pip install nvidia-pyindex
66
- pip install nvidia-tensorflow[horovod]
67
- pip install nvidia-tensorboard==1.15
68
- ```
69
- Extra notes:
70
- 1. In case having some conflicts when calling CUDA version, please try to empty the LD_LIBRARY_PATH. For example:
71
- ```
72
- LD_LIBRARY_PATH=; python generate.py --outdir=out/stylegan_human_v2_1024 --trunc=1 --seeds=1,3,5,7
73
- --network=pretrained_models/stylegan_human_v2_1024.pkl --version 2
74
- ```
75
-
76
-
77
- 2. We found the following troubleshooting links might be helpful: [1.](https://github.com/NVlabs/stylegan3), [2.](https://github.com/NVlabs/stylegan3/blob/main/docs/troubleshooting.md)
78
-
79
- ### Train
80
- The training scripts are based on the original [stylegan1](https://github.com/NVlabs/stylegan), [stylegan2-ada](https://github.com/NVlabs/stylegan2-ada-pytorch), and [stylegan3](https://github.com/NVlabs/stylegan3) with minor changes. Here we only provide the scripts with modifications for SG2 and SG3. You can replace the old files with the provided scripts to train. (assume SHHQ-1.0 is placed under data/)
81
-
82
- #### Train Stylegan2-ada-pytorch with SHHQ-1.0
83
- ```
84
- python train.py --outdir=training_results/sg2/ --data=data/SHHQ-1.0/ \
85
- --gpus=8 --aug=noaug --mirror=1 --snap=250 --cfg=shhq --square=False
86
- ```
87
- #### Train Stylegan3 with SHHQ-1.0
88
- ```
89
- python train.py --outdir=training_results/sg3/ --cfg=stylegan3-r --gpus=8 --batch=32 --gamma=12.4 \
90
- --mirror=1 --aug=noaug --data=data/SHHQ-1.0/ --square=False --snap=250
91
- ```
92
-
93
- ### Pretrained models
94
- Please put the downloaded pretrained models [from above link](#Model-Zoo) under the folder 'pretrained_models'.
95
-
96
-
97
- ### Generate full-body human images using our pretrained model
98
- ```
99
- # Generate human full-body images without truncation
100
- python generate.py --outdir=outputs/generate/stylegan_human_v2_1024 --trunc=1 --seeds=1,3,5,7 --network=pretrained_models/stylegan_human_v2_1024.pkl --version 2
101
-
102
- # Generate human full-body images with truncation
103
- python generate.py --outdir=outputs/generate/stylegan_human_v2_1024 --trunc=0.8 --seeds=0-10 --network=pretrained_models/stylegan_human_v2_1024.pkl --version 2
104
-
105
- # Generate human full-body images using stylegan V1
106
- python generate.py --outdir=outputs/generate/stylegan_human_v1_1024 --network=pretrained_models/stylegan_human_v1_1024.pkl --version 1 --seeds=1,3,5
107
-
108
- # Generate human full-body images using stylegan V3
109
- python generate.py --outdir=outputs/generate/stylegan_human_v3_512 --network=pretrained_models/stylegan_human_v3_512.pkl --version 3 --seeds=1,3,5
110
- ```
111
-
112
-
113
- #### Note: The following demos are generated based on models related to StyleGAN V2 (stylegan_human_v2_512.pkl and stylegan_human_v2_1024.pkl). If you want to see results for V1 or V3, you need to change the loading method of the corresponding models.
114
-
115
-
116
- ### Interpolation
117
- ```
118
- python interpolation.py --network=pretrained_models/stylegan_human_v2_1024.pkl --seeds=85,100 --outdir=outputs/inter_gifs
119
- ```
120
-
121
- ### Style-mixing **image** using stylegan2
122
- ```
123
- python style_mixing.py --network=pretrained_models/stylegan_human_v2_1024.pkl --rows=85,100,75,458,1500 \\
124
- --cols=55,821,1789,293 --styles=0-3 --outdir=outputs/stylemixing
125
- ```
126
-
127
- ### Style-mixing **video** using stylegan2
128
- ```
129
- python stylemixing_video.py --network=pretrained_models/stylegan_human_v2_1024.pkl --row-seed=3859 \\
130
- --col-seeds=3098,31759,3791 --col-styles=8-12 --trunc=0.8 --outdir=outputs/stylemixing_video
131
- ```
132
-
133
- ### Aligned raw images
134
- For alignment, we use [openpose-pytorch](https://github.com/Hzzone/pytorch-openpose) for body-keypoints detection and [PaddlePaddle](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.5/contrib/PP-HumanSeg) for human segmentation.
135
- Before running the alignment script, few models need to be installed:
136
- 1. download [body_pose_model.pth](https://drive.google.com/drive/folders/1JsvI4M4ZTg98fmnCZLFM-3TeovnCRElG?usp=sharing) and place it into openpose/model/.
137
- 2. download and extract [deeplabv3p_resnet50_os8_humanseg_512x512_100k_with_softmax](https://paddleseg.bj.bcebos.com/dygraph/humanseg/export/deeplabv3p_resnet50_os8_humanseg_512x512_100k_with_softmax.zip) into PP_HumanSeg/export_model/deeplabv3p_resnet50_os8_humanseg_512x512_100k_with_softmax.
138
- 3. download and extract [deeplabv3p_resnet50_os8_humanseg_512x512_100k](https://paddleseg.bj.bcebos.com/dygraph/humanseg/train/deeplabv3p_resnet50_os8_humanseg_512x512_100k.zip) into PP_HumanSeg/pretrained_model/deeplabv3p_resnet50_os8_humanseg_512x512_100k.
139
- 4. install paddlepaddel: ``` pip install paddleseg ```
140
-
141
- Then you can start alignment:
142
- ```
143
- python alignment.py --image-folder img/test/ --output-folder aligned_image/
144
- ```
145
-
146
- ### Invert real image with [PTI](https://github.com/danielroich/PTI)
147
- Before inversion, please download our PTI weights: [e4e_w+.pt](https://drive.google.com/file/d/1NUfSJqLhsrU7c9PwAtlZ9xtrxhzS_6tu/view?usp=sharing) into /pti/.
148
-
149
- Few parameters you can change:
150
- - /pti/pti_configs/hyperparameters.py:
151
- - first_inv_type = 'w+' -> Use pretrained e4e encoder
152
- - first_inv_type = 'w' -> Use projection and optimization
153
- - /pti/pti_configs/paths_config.py:
154
- - input_data_path: path of real images
155
- - e4e: path of e4e_w+.pt
156
- - stylegan2_ada_shhq: pretrained stylegan2-ada model for SHHQ
157
-
158
- ```
159
- python run_pti.py
160
- ```
161
- Note: we used the test image under 'aligned_image/' (the output of alignment.py), the inverted latent code and fine-tuned generator will be saved in 'outputs/pti/'
162
-
163
-
164
- ### Editing with InterfaceGAN, StyleSpace, and Sefa
165
- ```
166
- python edit.py --network pretrained_models/stylegan_human_v2_1024.pkl --attr_name upper_length \\
167
- --seeds 61531,61570,61571,61610 --outdir outputs/edit_results
168
- ```
169
-
170
- ### Editing using inverted latent code
171
- ```
172
- python edit.py ---network outputs/pti/checkpoints/model_test.pkl --attr_name upper_length \\
173
- --outdir outputs/edit_results --real True --real_w_path outputs/pti/embeddings/test/PTI/test/0.pt --real_img_path aligned_image/test.png
174
- ```
175
-
176
- Note:
177
- 1. ''upper_length'' and ''bottom_length'' of ''attr_name'' are available for demo.
178
- 2. Layers to control and editing strength are set in edit/edit_config.py.
179
-
180
-
181
- ### Demo for [InsetGAN](https://arxiv.org/abs/2203.07293)
182
-
183
- We implement a quick demo using the key idea from InsetGAN: combining the face generated by FFHQ with the human-body generated by our pretrained model, optimizing both face and body latent codes to get a coherent full-body image.
184
- Before running the script, you need to download the [FFHQ face model]( https://docs.google.com/uc?export=download&confirm=t&id=125OG7SMkXI-Kf2aqiwLLHyCvSW-gZk3M), or you can use your own face model, as well as [pretrained face landmark](https://docs.google.com/uc?export=download&confirm=&id=1A82DnJBJzt8wI2J8ZrCK5fgHcQ2-tcWM) and [pretrained CNN face detection model for dlib](https://docs.google.com/uc?export=download&confirm=&id=1MduBgju5KFNrQfDLoQXJ_1_h5MnctCIG)
185
- ```
186
- python insetgan.py --body_network=pretrained_models/stylegan_human_v2_1024.pkl --face_network=pretrained_models/ffhq.pkl \\
187
- --body_seed=82 --face_seed=43 --trunc=0.6 --outdir=outputs/insetgan/ --video 1
188
- ```
189
-
190
- ## Results
191
-
192
- ### Editing with inverted real image
193
- (from left to right: real image | inverted image | InterFaceGAN result | StyleSpace result | SeFa result)
194
-
195
- https://user-images.githubusercontent.com/98547009/173773800-bb7fe54a-84d3-4b30-9864-a6b7b311f8ff.mp4
196
-
197
-
198
- ### For more demo, please visit our [**web page**](https://stylegan-human.github.io/) .
199
-
200
-
201
- ## TODO List
202
-
203
- - [ ] Release 1024x512 version of StyleGAN-Human based on StyleGAN3
204
- - [ ] Release 512x256 version of StyleGAN-Human based on StyleGAN1
205
- - [ ] Extension of downstream application (InsetGAN): Add face inversion interface to support fusing user face image and stylegen-human body image
206
- - [x] Add Inversion Script into the provided editing pipeline
207
- - [ ] Release Dataset
208
-
209
-
210
- ## Related Works
211
- * (SIGGRAPH 2022) **Text2Human: Text-Driven Controllable Human Image Generation**, Yuming Jiang et al. [[Paper](https://arxiv.org/pdf/2205.15996.pdf)], [[Code](https://github.com/yumingj/Text2Human)], [[Project Page](https://yumingj.github.io/projects/Text2Human.html)], [[Dataset](https://github.com/yumingj/DeepFashion-MultiModal)]
212
- * (ICCV 2021) **Talk-to-Edit: Fine-Grained Facial Editing via Dialog**, Yuming Jiang et al. [[Paper](https://arxiv.org/abs/2109.04425)], [[Code](https://github.com/yumingj/Talk-to-Edit)], [[Project Page](https://www.mmlab-ntu.com/project/talkedit/)], [[Dataset](https://mmlab.ie.cuhk.edu.hk/projects/CelebA/CelebA_Dialog.html)]
213
- * (Technical Report 2022) **Generalizable Neural Performer: Learning Robust Radiance Fields for Human Novel View Synthesis**, Wei Cheng et al. [[Paper](https://arxiv.org/pdf/2204.11798.pdf)], [[Code](https://github.com/generalizable-neural-performer/gnr)], [[Project Page](https://generalizable-neural-performer.github.io/)], [[Dataset](https://generalizable-neural-performer.github.io/genebody.html)]
214
-
215
- ## Citation
216
-
217
- If you find this work useful for your research, please consider citing our paper:
218
-
219
- ```bibtex
220
- @article{fu2022styleganhuman,
221
- title={StyleGAN-Human: A Data-Centric Odyssey of Human Generation},
222
- author={Fu, Jianglin and Li, Shikai and Jiang, Yuming and Lin, Kwan-Yee and Qian, Chen and Loy, Chen-Change and Wu, Wayne and Liu, Ziwei},
223
- journal = {arXiv preprint},
224
- volume = {arXiv:2204.11823},
225
- year = {2022}
226
- ```
227
-
228
- ## Acknowlegement
229
- Part of the code is borrowed from [stylegan (tensorflow)](https://github.com/NVlabs/stylegan), [stylegan2-ada (pytorch)](https://github.com/NVlabs/stylegan2-ada-pytorch), [stylegan3 (pytorch)](https://github.com/NVlabs/stylegan3).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/edit/edit_helper.py DELETED
@@ -1,237 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- from legacy import save_obj, load_pkl
4
- import torch
5
- from torch.nn import functional as F
6
- import pandas as pd
7
- from .edit_config import attr_dict
8
- import os
9
-
10
-
11
- def conv_warper(layer, input, style, noise):
12
- # the conv should change
13
- conv = layer.conv
14
- batch, in_channel, height, width = input.shape
15
-
16
- style = style.view(batch, 1, in_channel, 1, 1)
17
- weight = conv.scale * conv.weight * style
18
-
19
- if conv.demodulate:
20
- demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
21
- weight = weight * demod.view(batch, conv.out_channel, 1, 1, 1)
22
-
23
- weight = weight.view(
24
- batch * conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
25
- )
26
-
27
- if conv.upsample:
28
- input = input.view(1, batch * in_channel, height, width)
29
- weight = weight.view(
30
- batch, conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
31
- )
32
- weight = weight.transpose(1, 2).reshape(
33
- batch * in_channel, conv.out_channel, conv.kernel_size, conv.kernel_size
34
- )
35
- out = F.conv_transpose2d(
36
- input, weight, padding=0, stride=2, groups=batch)
37
- _, _, height, width = out.shape
38
- out = out.view(batch, conv.out_channel, height, width)
39
- out = conv.blur(out)
40
-
41
- elif conv.downsample:
42
- input = conv.blur(input)
43
- _, _, height, width = input.shape
44
- input = input.view(1, batch * in_channel, height, width)
45
- out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
46
- _, _, height, width = out.shape
47
- out = out.view(batch, conv.out_channel, height, width)
48
-
49
- else:
50
- input = input.view(1, batch * in_channel, height, width)
51
- out = F.conv2d(input, weight, padding=conv.padding, groups=batch)
52
- _, _, height, width = out.shape
53
- out = out.view(batch, conv.out_channel, height, width)
54
-
55
- out = layer.noise(out, noise=noise)
56
- out = layer.activate(out)
57
-
58
- return out
59
-
60
-
61
- def decoder(G, style_space, latent, noise):
62
- # an decoder warper for G
63
- out = G.input(latent)
64
- out = conv_warper(G.conv1, out, style_space[0], noise[0])
65
- skip = G.to_rgb1(out, latent[:, 1])
66
-
67
- i = 1
68
- for conv1, conv2, noise1, noise2, to_rgb in zip(
69
- G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
70
- ):
71
- out = conv_warper(conv1, out, style_space[i], noise=noise1)
72
- out = conv_warper(conv2, out, style_space[i+1], noise=noise2)
73
- skip = to_rgb(out, latent[:, i + 2], skip)
74
- i += 2
75
- image = skip
76
-
77
- return image
78
-
79
-
80
- def encoder_ifg(G, noise, attr_name, truncation=1, truncation_latent=None,
81
- latent_dir='latent_direction/ss/',
82
- step=0, total=0, real=False):
83
- if not real:
84
- styles = [noise]
85
- styles = [G.style(s) for s in styles]
86
- style_space = []
87
-
88
- if truncation < 1:
89
- if not real:
90
- style_t = []
91
- for style in styles:
92
- style_t.append(truncation_latent + truncation *
93
- (style - truncation_latent))
94
- styles = style_t
95
- else: # styles are latent (tensor: 1,18,512), for real PTI output
96
- truncation_latent = truncation_latent.repeat(
97
- 18, 1).unsqueeze(0) # (1,512) --> (1,18,512)
98
- styles = torch.add(truncation_latent, torch.mul(
99
- torch.sub(noise, truncation_latent), truncation))
100
-
101
- noise = [getattr(G.noises, 'noise_{}'.format(i))
102
- for i in range(G.num_layers)]
103
- if not real:
104
- inject_index = G.n_latent
105
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
106
- else:
107
- latent = styles
108
-
109
- style_space.append(G.conv1.conv.modulation(latent[:, 0]))
110
- i = 1
111
- for conv1, conv2, noise1, noise2, to_rgb in zip(
112
- G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
113
- ):
114
- style_space.append(conv1.conv.modulation(latent[:, i]))
115
- style_space.append(conv2.conv.modulation(latent[:, i+1]))
116
- i += 2
117
-
118
- # get layer, strength by dict
119
- strength = attr_dict['interface_gan'][attr_name][0]
120
-
121
- if step != 0 and total != 0:
122
- strength = step / total * strength
123
- for i in range(15):
124
- style_vect = load_pkl(os.path.join(
125
- latent_dir, '{}/style_vect_mean_{}.pkl'.format(attr_name, i)))
126
- style_vect = torch.from_numpy(style_vect).to(latent.device).float()
127
- style_space[i] += style_vect * strength
128
-
129
- return style_space, latent, noise
130
-
131
-
132
- def encoder_ss(G, noise, attr_name, truncation=1, truncation_latent=None,
133
- statics_dir="latent_direction/ss_statics",
134
- latent_dir="latent_direction/ss/",
135
- step=0, total=0, real=False):
136
- if not real:
137
- styles = [noise]
138
- styles = [G.style(s) for s in styles]
139
- style_space = []
140
-
141
- if truncation < 1:
142
- if not real:
143
- style_t = []
144
- for style in styles:
145
- style_t.append(
146
- truncation_latent + truncation *
147
- (style - truncation_latent)
148
- )
149
- styles = style_t
150
- else: # styles are latent (tensor: 1,18,512), for real PTI output
151
- truncation_latent = truncation_latent.repeat(
152
- 18, 1).unsqueeze(0) # (1,512) --> (1,18,512)
153
- styles = torch.add(truncation_latent, torch.mul(
154
- torch.sub(noise, truncation_latent), truncation))
155
-
156
- noise = [getattr(G.noises, 'noise_{}'.format(i))
157
- for i in range(G.num_layers)]
158
-
159
- if not real:
160
- inject_index = G.n_latent
161
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
162
- else:
163
- latent = styles
164
-
165
- style_space.append(G.conv1.conv.modulation(latent[:, 0]))
166
- i = 1
167
- for conv1, conv2, noise1, noise2, to_rgb in zip(
168
- G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
169
- ):
170
- style_space.append(conv1.conv.modulation(latent[:, i]))
171
- style_space.append(conv2.conv.modulation(latent[:, i+1]))
172
- i += 2
173
- # get threshold, layer, strength by dict
174
- layer, strength, threshold = attr_dict['stylespace'][attr_name]
175
-
176
- statis_dir = os.path.join(
177
- statics_dir, "{}_statis/{}".format(attr_name, layer))
178
- statis_csv_path = os.path.join(statis_dir, "statis.csv")
179
- statis_df = pd.read_csv(statis_csv_path)
180
- statis_df = statis_df.sort_values(by='channel', ascending=True)
181
- ch_mask = statis_df['strength'].values
182
- ch_mask = torch.from_numpy(ch_mask).to(latent.device).float()
183
- ch_mask = (ch_mask.abs() > threshold).float()
184
- style_vect = load_pkl(os.path.join(
185
- latent_dir, '{}/style_vect_mean_{}.pkl'.format(attr_name, layer)))
186
- style_vect = torch.from_numpy(style_vect).to(latent.device).float()
187
-
188
- style_vect = style_vect * ch_mask
189
-
190
- if step != 0 and total != 0:
191
- strength = step / total * strength
192
-
193
- style_space[layer] += style_vect * strength
194
-
195
- return style_space, latent, noise
196
-
197
-
198
- def encoder_sefa(G, noise, attr_name, truncation=1, truncation_latent=None,
199
- latent_dir='latent_direction/sefa/',
200
- step=0, total=0, real=False):
201
- if not real:
202
- styles = [noise]
203
- styles = [G.style(s) for s in styles]
204
-
205
- if truncation < 1:
206
- if not real:
207
- style_t = []
208
- for style in styles:
209
- style_t.append(
210
- truncation_latent + truncation *
211
- (style - truncation_latent)
212
- )
213
- styles = style_t
214
- else:
215
- truncation_latent = truncation_latent.repeat(
216
- 18, 1).unsqueeze(0) # (1,512) --> (1,18,512)
217
- styles = torch.add(truncation_latent, torch.mul(
218
- torch.sub(noise, truncation_latent), truncation))
219
-
220
- noise = [getattr(G.noises, 'noise_{}'.format(i))
221
- for i in range(G.num_layers)]
222
- if not real:
223
- inject_index = G.n_latent
224
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
225
- else:
226
- latent = styles
227
-
228
- layer, strength = attr_dict['sefa'][attr_name]
229
-
230
- sefa_vect = torch.load(os.path.join(
231
- latent_dir, '{}.pt'.format(attr_name))).to(latent.device).float()
232
- if step != 0 and total != 0:
233
- strength = step / total * strength
234
- for l in layer:
235
- latent[:, l, :] += (sefa_vect * strength * 2)
236
-
237
- return latent, noise
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py DELETED
@@ -1,1368 +0,0 @@
1
- import inspect
2
- import warnings
3
- from dataclasses import dataclass
4
- from typing import Callable, List, Optional, Union
5
-
6
- import numpy as np
7
- import PIL
8
- import torch
9
- from transformers import (
10
- CLIPImageProcessor,
11
- CLIPTextModel,
12
- CLIPTokenizer,
13
- CLIPVisionModelWithProjection,
14
- GPT2Tokenizer,
15
- )
16
-
17
- from ...models import AutoencoderKL
18
- from ...schedulers import KarrasDiffusionSchedulers
19
- from ...utils import (
20
- PIL_INTERPOLATION,
21
- deprecate,
22
- is_accelerate_available,
23
- is_accelerate_version,
24
- logging,
25
- randn_tensor,
26
- )
27
- from ...utils.outputs import BaseOutput
28
- from ..pipeline_utils import DiffusionPipeline
29
- from .modeling_text_decoder import UniDiffuserTextDecoder
30
- from .modeling_uvit import UniDiffuserModel
31
-
32
-
33
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
34
-
35
-
36
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess
37
- def preprocess(image):
38
- warnings.warn(
39
- "The preprocess method is deprecated and will be removed in a future version. Please"
40
- " use VaeImageProcessor.preprocess instead",
41
- FutureWarning,
42
- )
43
- if isinstance(image, torch.Tensor):
44
- return image
45
- elif isinstance(image, PIL.Image.Image):
46
- image = [image]
47
-
48
- if isinstance(image[0], PIL.Image.Image):
49
- w, h = image[0].size
50
- w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
51
-
52
- image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
53
- image = np.concatenate(image, axis=0)
54
- image = np.array(image).astype(np.float32) / 255.0
55
- image = image.transpose(0, 3, 1, 2)
56
- image = 2.0 * image - 1.0
57
- image = torch.from_numpy(image)
58
- elif isinstance(image[0], torch.Tensor):
59
- image = torch.cat(image, dim=0)
60
- return image
61
-
62
-
63
- # New BaseOutput child class for joint image-text output
64
- @dataclass
65
- class ImageTextPipelineOutput(BaseOutput):
66
- """
67
- Output class for joint image-text pipelines.
68
-
69
- Args:
70
- images (`List[PIL.Image.Image]` or `np.ndarray`)
71
- List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width,
72
- num_channels)`.
73
- text (`List[str]` or `List[List[str]]`)
74
- List of generated text strings of length `batch_size` or a list of list of strings whose outer list has
75
- length `batch_size`.
76
- """
77
-
78
- images: Optional[Union[List[PIL.Image.Image], np.ndarray]]
79
- text: Optional[Union[List[str], List[List[str]]]]
80
-
81
-
82
- class UniDiffuserPipeline(DiffusionPipeline):
83
- r"""
84
- Pipeline for a bimodal image-text model which supports unconditional text and image generation, text-conditioned
85
- image generation, image-conditioned text generation, and joint image-text generation.
86
-
87
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
88
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
89
-
90
- Args:
91
- vae ([`AutoencoderKL`]):
92
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. This
93
- is part of the UniDiffuser image representation along with the CLIP vision encoding.
94
- text_encoder ([`CLIPTextModel`]):
95
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
96
- image_encoder ([`CLIPVisionModel`]):
97
- A [`~transformers.CLIPVisionModel`] to encode images as part of its image representation along with the VAE
98
- latent representation.
99
- image_processor ([`CLIPImageProcessor`]):
100
- [`~transformers.CLIPImageProcessor`] to preprocess an image before CLIP encoding it with `image_encoder`.
101
- clip_tokenizer ([`CLIPTokenizer`]):
102
- A [`~transformers.CLIPTokenizer`] to tokenize the prompt before encoding it with `text_encoder`.
103
- text_decoder ([`UniDiffuserTextDecoder`]):
104
- Frozen text decoder. This is a GPT-style model which is used to generate text from the UniDiffuser
105
- embedding.
106
- text_tokenizer ([`GPT2Tokenizer`]):
107
- A [`~transformers.GPT2Tokenizer`] to decode text for text generation; used along with the `text_decoder`.
108
- unet ([`UniDiffuserModel`]):
109
- A [U-ViT](https://github.com/baofff/U-ViT) model with UNNet-style skip connections between transformer
110
- layers to denoise the encoded image latents.
111
- scheduler ([`SchedulerMixin`]):
112
- A scheduler to be used in combination with `unet` to denoise the encoded image and/or text latents. The
113
- original UniDiffuser paper uses the [`DPMSolverMultistepScheduler`] scheduler.
114
- """
115
-
116
- def __init__(
117
- self,
118
- vae: AutoencoderKL,
119
- text_encoder: CLIPTextModel,
120
- image_encoder: CLIPVisionModelWithProjection,
121
- image_processor: CLIPImageProcessor,
122
- clip_tokenizer: CLIPTokenizer,
123
- text_decoder: UniDiffuserTextDecoder,
124
- text_tokenizer: GPT2Tokenizer,
125
- unet: UniDiffuserModel,
126
- scheduler: KarrasDiffusionSchedulers,
127
- ):
128
- super().__init__()
129
-
130
- if text_encoder.config.hidden_size != text_decoder.prefix_inner_dim:
131
- raise ValueError(
132
- f"The text encoder hidden size and text decoder prefix inner dim must be the same, but"
133
- f" `text_encoder.config.hidden_size`: {text_encoder.config.hidden_size} and `text_decoder.prefix_inner_dim`: {text_decoder.prefix_inner_dim}"
134
- )
135
-
136
- self.register_modules(
137
- vae=vae,
138
- text_encoder=text_encoder,
139
- image_encoder=image_encoder,
140
- image_processor=image_processor,
141
- clip_tokenizer=clip_tokenizer,
142
- text_decoder=text_decoder,
143
- text_tokenizer=text_tokenizer,
144
- unet=unet,
145
- scheduler=scheduler,
146
- )
147
-
148
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
149
-
150
- self.num_channels_latents = vae.config.latent_channels
151
- self.text_encoder_seq_len = text_encoder.config.max_position_embeddings
152
- self.text_encoder_hidden_size = text_encoder.config.hidden_size
153
- self.image_encoder_projection_dim = image_encoder.config.projection_dim
154
- self.unet_resolution = unet.config.sample_size
155
-
156
- self.text_intermediate_dim = self.text_encoder_hidden_size
157
- if self.text_decoder.prefix_hidden_dim is not None:
158
- self.text_intermediate_dim = self.text_decoder.prefix_hidden_dim
159
-
160
- self.mode = None
161
-
162
- # TODO: handle safety checking?
163
- self.safety_checker = None
164
-
165
- # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
166
- # Add self.image_encoder, self.text_decoder to cpu_offloaded_models list
167
- def enable_model_cpu_offload(self, gpu_id=0):
168
- r"""
169
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
170
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
171
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
172
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
173
- """
174
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
175
- from accelerate import cpu_offload_with_hook
176
- else:
177
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
178
-
179
- device = torch.device(f"cuda:{gpu_id}")
180
-
181
- if self.device.type != "cpu":
182
- self.to("cpu", silence_dtype_warnings=True)
183
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
184
-
185
- hook = None
186
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae, self.image_encoder, self.text_decoder]:
187
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
188
-
189
- if self.safety_checker is not None:
190
- _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
191
-
192
- # We'll offload the last model manually.
193
- self.final_offload_hook = hook
194
-
195
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
196
- def prepare_extra_step_kwargs(self, generator, eta):
197
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
198
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
199
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
200
- # and should be between [0, 1]
201
-
202
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
203
- extra_step_kwargs = {}
204
- if accepts_eta:
205
- extra_step_kwargs["eta"] = eta
206
-
207
- # check if the scheduler accepts generator
208
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
209
- if accepts_generator:
210
- extra_step_kwargs["generator"] = generator
211
- return extra_step_kwargs
212
-
213
- def _infer_mode(self, prompt, prompt_embeds, image, latents, prompt_latents, vae_latents, clip_latents):
214
- r"""
215
- Infer the generation task ('mode') from the inputs to `__call__`. If the mode has been manually set, the set
216
- mode will be used.
217
- """
218
- prompt_available = (prompt is not None) or (prompt_embeds is not None)
219
- image_available = image is not None
220
- input_available = prompt_available or image_available
221
-
222
- prompt_latents_available = prompt_latents is not None
223
- vae_latents_available = vae_latents is not None
224
- clip_latents_available = clip_latents is not None
225
- full_latents_available = latents is not None
226
- image_latents_available = vae_latents_available and clip_latents_available
227
- all_indv_latents_available = prompt_latents_available and image_latents_available
228
-
229
- if self.mode is not None:
230
- # Preferentially use the mode set by the user
231
- mode = self.mode
232
- elif prompt_available:
233
- mode = "text2img"
234
- elif image_available:
235
- mode = "img2text"
236
- else:
237
- # Neither prompt nor image supplied, infer based on availability of latents
238
- if full_latents_available or all_indv_latents_available:
239
- mode = "joint"
240
- elif prompt_latents_available:
241
- mode = "text"
242
- elif image_latents_available:
243
- mode = "img"
244
- else:
245
- # No inputs or latents available
246
- mode = "joint"
247
-
248
- # Give warnings for ambiguous cases
249
- if self.mode is None and prompt_available and image_available:
250
- logger.warning(
251
- f"You have supplied both a text prompt and image to the pipeline and mode has not been set manually,"
252
- f" defaulting to mode '{mode}'."
253
- )
254
-
255
- if self.mode is None and not input_available:
256
- if vae_latents_available != clip_latents_available:
257
- # Exactly one of vae_latents and clip_latents is supplied
258
- logger.warning(
259
- f"You have supplied exactly one of `vae_latents` and `clip_latents`, whereas either both or none"
260
- f" are expected to be supplied. Defaulting to mode '{mode}'."
261
- )
262
- elif not prompt_latents_available and not vae_latents_available and not clip_latents_available:
263
- # No inputs or latents supplied
264
- logger.warning(
265
- f"No inputs or latents have been supplied, and mode has not been manually set,"
266
- f" defaulting to mode '{mode}'."
267
- )
268
-
269
- return mode
270
-
271
- # Functions to manually set the mode
272
- def set_text_mode(self):
273
- r"""Manually set the generation mode to unconditional ("marginal") text generation."""
274
- self.mode = "text"
275
-
276
- def set_image_mode(self):
277
- r"""Manually set the generation mode to unconditional ("marginal") image generation."""
278
- self.mode = "img"
279
-
280
- def set_text_to_image_mode(self):
281
- r"""Manually set the generation mode to text-conditioned image generation."""
282
- self.mode = "text2img"
283
-
284
- def set_image_to_text_mode(self):
285
- r"""Manually set the generation mode to image-conditioned text generation."""
286
- self.mode = "img2text"
287
-
288
- def set_joint_mode(self):
289
- r"""Manually set the generation mode to unconditional joint image-text generation."""
290
- self.mode = "joint"
291
-
292
- def reset_mode(self):
293
- r"""Removes a manually set mode; after calling this, the pipeline will infer the mode from inputs."""
294
- self.mode = None
295
-
296
- def _infer_batch_size(
297
- self,
298
- mode,
299
- prompt,
300
- prompt_embeds,
301
- image,
302
- num_images_per_prompt,
303
- num_prompts_per_image,
304
- latents,
305
- prompt_latents,
306
- vae_latents,
307
- clip_latents,
308
- ):
309
- r"""Infers the batch size and multiplier depending on mode and supplied arguments to `__call__`."""
310
- if num_images_per_prompt is None:
311
- num_images_per_prompt = 1
312
- if num_prompts_per_image is None:
313
- num_prompts_per_image = 1
314
-
315
- assert num_images_per_prompt > 0, "num_images_per_prompt must be a positive integer"
316
- assert num_prompts_per_image > 0, "num_prompts_per_image must be a positive integer"
317
-
318
- if mode in ["text2img"]:
319
- if prompt is not None and isinstance(prompt, str):
320
- batch_size = 1
321
- elif prompt is not None and isinstance(prompt, list):
322
- batch_size = len(prompt)
323
- else:
324
- # Either prompt or prompt_embeds must be present for text2img.
325
- batch_size = prompt_embeds.shape[0]
326
- multiplier = num_images_per_prompt
327
- elif mode in ["img2text"]:
328
- if isinstance(image, PIL.Image.Image):
329
- batch_size = 1
330
- else:
331
- # Image must be available and type either PIL.Image.Image or torch.FloatTensor.
332
- # Not currently supporting something like image_embeds.
333
- batch_size = image.shape[0]
334
- multiplier = num_prompts_per_image
335
- elif mode in ["img"]:
336
- if vae_latents is not None:
337
- batch_size = vae_latents.shape[0]
338
- elif clip_latents is not None:
339
- batch_size = clip_latents.shape[0]
340
- else:
341
- batch_size = 1
342
- multiplier = num_images_per_prompt
343
- elif mode in ["text"]:
344
- if prompt_latents is not None:
345
- batch_size = prompt_latents.shape[0]
346
- else:
347
- batch_size = 1
348
- multiplier = num_prompts_per_image
349
- elif mode in ["joint"]:
350
- if latents is not None:
351
- batch_size = latents.shape[0]
352
- elif prompt_latents is not None:
353
- batch_size = prompt_latents.shape[0]
354
- elif vae_latents is not None:
355
- batch_size = vae_latents.shape[0]
356
- elif clip_latents is not None:
357
- batch_size = clip_latents.shape[0]
358
- else:
359
- batch_size = 1
360
-
361
- if num_images_per_prompt == num_prompts_per_image:
362
- multiplier = num_images_per_prompt
363
- else:
364
- multiplier = min(num_images_per_prompt, num_prompts_per_image)
365
- logger.warning(
366
- f"You are using mode `{mode}` and `num_images_per_prompt`: {num_images_per_prompt} and"
367
- f" num_prompts_per_image: {num_prompts_per_image} are not equal. Using batch size equal to"
368
- f" `min(num_images_per_prompt, num_prompts_per_image) = {batch_size}."
369
- )
370
- return batch_size, multiplier
371
-
372
- # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
373
- # self.tokenizer => self.clip_tokenizer
374
- def _encode_prompt(
375
- self,
376
- prompt,
377
- device,
378
- num_images_per_prompt,
379
- do_classifier_free_guidance,
380
- negative_prompt=None,
381
- prompt_embeds: Optional[torch.FloatTensor] = None,
382
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
383
- ):
384
- r"""
385
- Encodes the prompt into text encoder hidden states.
386
-
387
- Args:
388
- prompt (`str` or `List[str]`, *optional*):
389
- prompt to be encoded
390
- device: (`torch.device`):
391
- torch device
392
- num_images_per_prompt (`int`):
393
- number of images that should be generated per prompt
394
- do_classifier_free_guidance (`bool`):
395
- whether to use classifier free guidance or not
396
- negative_prompt (`str` or `List[str]`, *optional*):
397
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
398
- `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
399
- Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
400
- prompt_embeds (`torch.FloatTensor`, *optional*):
401
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
402
- provided, text embeddings will be generated from `prompt` input argument.
403
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
404
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
405
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
406
- argument.
407
- """
408
- if prompt is not None and isinstance(prompt, str):
409
- batch_size = 1
410
- elif prompt is not None and isinstance(prompt, list):
411
- batch_size = len(prompt)
412
- else:
413
- batch_size = prompt_embeds.shape[0]
414
-
415
- if prompt_embeds is None:
416
- text_inputs = self.clip_tokenizer(
417
- prompt,
418
- padding="max_length",
419
- max_length=self.clip_tokenizer.model_max_length,
420
- truncation=True,
421
- return_tensors="pt",
422
- )
423
- text_input_ids = text_inputs.input_ids
424
- untruncated_ids = self.clip_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
425
-
426
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
427
- text_input_ids, untruncated_ids
428
- ):
429
- removed_text = self.clip_tokenizer.batch_decode(
430
- untruncated_ids[:, self.clip_tokenizer.model_max_length - 1 : -1]
431
- )
432
- logger.warning(
433
- "The following part of your input was truncated because CLIP can only handle sequences up to"
434
- f" {self.clip_tokenizer.model_max_length} tokens: {removed_text}"
435
- )
436
-
437
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
438
- attention_mask = text_inputs.attention_mask.to(device)
439
- else:
440
- attention_mask = None
441
-
442
- prompt_embeds = self.text_encoder(
443
- text_input_ids.to(device),
444
- attention_mask=attention_mask,
445
- )
446
- prompt_embeds = prompt_embeds[0]
447
-
448
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
449
-
450
- bs_embed, seq_len, _ = prompt_embeds.shape
451
- # duplicate text embeddings for each generation per prompt, using mps friendly method
452
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
453
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
454
-
455
- # get unconditional embeddings for classifier free guidance
456
- if do_classifier_free_guidance and negative_prompt_embeds is None:
457
- uncond_tokens: List[str]
458
- if negative_prompt is None:
459
- uncond_tokens = [""] * batch_size
460
- elif type(prompt) is not type(negative_prompt):
461
- raise TypeError(
462
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
463
- f" {type(prompt)}."
464
- )
465
- elif isinstance(negative_prompt, str):
466
- uncond_tokens = [negative_prompt]
467
- elif batch_size != len(negative_prompt):
468
- raise ValueError(
469
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
470
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
471
- " the batch size of `prompt`."
472
- )
473
- else:
474
- uncond_tokens = negative_prompt
475
-
476
- max_length = prompt_embeds.shape[1]
477
- uncond_input = self.clip_tokenizer(
478
- uncond_tokens,
479
- padding="max_length",
480
- max_length=max_length,
481
- truncation=True,
482
- return_tensors="pt",
483
- )
484
-
485
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
486
- attention_mask = uncond_input.attention_mask.to(device)
487
- else:
488
- attention_mask = None
489
-
490
- negative_prompt_embeds = self.text_encoder(
491
- uncond_input.input_ids.to(device),
492
- attention_mask=attention_mask,
493
- )
494
- negative_prompt_embeds = negative_prompt_embeds[0]
495
-
496
- if do_classifier_free_guidance:
497
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
498
- seq_len = negative_prompt_embeds.shape[1]
499
-
500
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
501
-
502
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
503
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
504
-
505
- # For classifier free guidance, we need to do two forward passes.
506
- # Here we concatenate the unconditional and text embeddings into a single batch
507
- # to avoid doing two forward passes
508
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
509
-
510
- return prompt_embeds
511
-
512
- # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_instruct_pix2pix.StableDiffusionInstructPix2PixPipeline.prepare_image_latents
513
- # Add num_prompts_per_image argument, sample from autoencoder moment distribution
514
- def encode_image_vae_latents(
515
- self,
516
- image,
517
- batch_size,
518
- num_prompts_per_image,
519
- dtype,
520
- device,
521
- do_classifier_free_guidance,
522
- generator=None,
523
- ):
524
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
525
- raise ValueError(
526
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
527
- )
528
-
529
- image = image.to(device=device, dtype=dtype)
530
-
531
- batch_size = batch_size * num_prompts_per_image
532
- if isinstance(generator, list) and len(generator) != batch_size:
533
- raise ValueError(
534
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
535
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
536
- )
537
-
538
- if isinstance(generator, list):
539
- image_latents = [
540
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i])
541
- * self.vae.config.scaling_factor
542
- for i in range(batch_size)
543
- ]
544
- image_latents = torch.cat(image_latents, dim=0)
545
- else:
546
- image_latents = self.vae.encode(image).latent_dist.sample(generator=generator)
547
- # Scale image_latents by the VAE's scaling factor
548
- image_latents = image_latents * self.vae.config.scaling_factor
549
-
550
- if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0:
551
- # expand image_latents for batch_size
552
- deprecation_message = (
553
- f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial"
554
- " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
555
- " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
556
- " your script to pass as many initial images as text prompts to suppress this warning."
557
- )
558
- deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
559
- additional_image_per_prompt = batch_size // image_latents.shape[0]
560
- image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0)
561
- elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0:
562
- raise ValueError(
563
- f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts."
564
- )
565
- else:
566
- image_latents = torch.cat([image_latents], dim=0)
567
-
568
- if do_classifier_free_guidance:
569
- uncond_image_latents = torch.zeros_like(image_latents)
570
- image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0)
571
-
572
- return image_latents
573
-
574
- def encode_image_clip_latents(
575
- self,
576
- image,
577
- batch_size,
578
- num_prompts_per_image,
579
- dtype,
580
- device,
581
- generator=None,
582
- ):
583
- # Map image to CLIP embedding.
584
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
585
- raise ValueError(
586
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
587
- )
588
-
589
- preprocessed_image = self.image_processor.preprocess(
590
- image,
591
- return_tensors="pt",
592
- )
593
- preprocessed_image = preprocessed_image.to(device=device, dtype=dtype)
594
-
595
- batch_size = batch_size * num_prompts_per_image
596
- if isinstance(generator, list):
597
- image_latents = [
598
- self.image_encoder(**preprocessed_image[i : i + 1]).image_embeds for i in range(batch_size)
599
- ]
600
- image_latents = torch.cat(image_latents, dim=0)
601
- else:
602
- image_latents = self.image_encoder(**preprocessed_image).image_embeds
603
-
604
- if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0:
605
- # expand image_latents for batch_size
606
- deprecation_message = (
607
- f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial"
608
- " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
609
- " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
610
- " your script to pass as many initial images as text prompts to suppress this warning."
611
- )
612
- deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
613
- additional_image_per_prompt = batch_size // image_latents.shape[0]
614
- image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0)
615
- elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0:
616
- raise ValueError(
617
- f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts."
618
- )
619
- else:
620
- image_latents = torch.cat([image_latents], dim=0)
621
-
622
- if isinstance(generator, list) and len(generator) != batch_size:
623
- raise ValueError(
624
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
625
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
626
- )
627
-
628
- return image_latents
629
-
630
- # Note that the CLIP latents are not decoded for image generation.
631
- # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
632
- # Rename: decode_latents -> decode_image_latents
633
- def decode_image_latents(self, latents):
634
- latents = 1 / self.vae.config.scaling_factor * latents
635
- image = self.vae.decode(latents, return_dict=False)[0]
636
- image = (image / 2 + 0.5).clamp(0, 1)
637
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
638
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
639
- return image
640
-
641
- def prepare_text_latents(
642
- self, batch_size, num_images_per_prompt, seq_len, hidden_size, dtype, device, generator, latents=None
643
- ):
644
- # Prepare latents for the CLIP embedded prompt.
645
- shape = (batch_size * num_images_per_prompt, seq_len, hidden_size)
646
- if isinstance(generator, list) and len(generator) != batch_size:
647
- raise ValueError(
648
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
649
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
650
- )
651
-
652
- if latents is None:
653
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
654
- else:
655
- # latents is assumed to have shace (B, L, D)
656
- latents = latents.repeat(num_images_per_prompt, 1, 1)
657
- latents = latents.to(device=device, dtype=dtype)
658
-
659
- # scale the initial noise by the standard deviation required by the scheduler
660
- latents = latents * self.scheduler.init_noise_sigma
661
- return latents
662
-
663
- # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
664
- # Rename prepare_latents -> prepare_image_vae_latents and add num_prompts_per_image argument.
665
- def prepare_image_vae_latents(
666
- self,
667
- batch_size,
668
- num_prompts_per_image,
669
- num_channels_latents,
670
- height,
671
- width,
672
- dtype,
673
- device,
674
- generator,
675
- latents=None,
676
- ):
677
- shape = (
678
- batch_size * num_prompts_per_image,
679
- num_channels_latents,
680
- height // self.vae_scale_factor,
681
- width // self.vae_scale_factor,
682
- )
683
- if isinstance(generator, list) and len(generator) != batch_size:
684
- raise ValueError(
685
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
686
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
687
- )
688
-
689
- if latents is None:
690
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
691
- else:
692
- # latents is assumed to have shape (B, C, H, W)
693
- latents = latents.repeat(num_prompts_per_image, 1, 1, 1)
694
- latents = latents.to(device=device, dtype=dtype)
695
-
696
- # scale the initial noise by the standard deviation required by the scheduler
697
- latents = latents * self.scheduler.init_noise_sigma
698
- return latents
699
-
700
- def prepare_image_clip_latents(
701
- self, batch_size, num_prompts_per_image, clip_img_dim, dtype, device, generator, latents=None
702
- ):
703
- # Prepare latents for the CLIP embedded image.
704
- shape = (batch_size * num_prompts_per_image, 1, clip_img_dim)
705
- if isinstance(generator, list) and len(generator) != batch_size:
706
- raise ValueError(
707
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
708
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
709
- )
710
-
711
- if latents is None:
712
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
713
- else:
714
- # latents is assumed to have shape (B, L, D)
715
- latents = latents.repeat(num_prompts_per_image, 1, 1)
716
- latents = latents.to(device=device, dtype=dtype)
717
-
718
- # scale the initial noise by the standard deviation required by the scheduler
719
- latents = latents * self.scheduler.init_noise_sigma
720
- return latents
721
-
722
- def _split(self, x, height, width):
723
- r"""
724
- Splits a flattened embedding x of shape (B, C * H * W + clip_img_dim) into two tensors of shape (B, C, H, W)
725
- and (B, 1, clip_img_dim)
726
- """
727
- batch_size = x.shape[0]
728
- latent_height = height // self.vae_scale_factor
729
- latent_width = width // self.vae_scale_factor
730
- img_vae_dim = self.num_channels_latents * latent_height * latent_width
731
-
732
- img_vae, img_clip = x.split([img_vae_dim, self.image_encoder_projection_dim], dim=1)
733
-
734
- img_vae = torch.reshape(img_vae, (batch_size, self.num_channels_latents, latent_height, latent_width))
735
- img_clip = torch.reshape(img_clip, (batch_size, 1, self.image_encoder_projection_dim))
736
- return img_vae, img_clip
737
-
738
- def _combine(self, img_vae, img_clip):
739
- r"""
740
- Combines a latent iamge img_vae of shape (B, C, H, W) and a CLIP-embedded image img_clip of shape (B, 1,
741
- clip_img_dim) into a single tensor of shape (B, C * H * W + clip_img_dim).
742
- """
743
- img_vae = torch.reshape(img_vae, (img_vae.shape[0], -1))
744
- img_clip = torch.reshape(img_clip, (img_clip.shape[0], -1))
745
- return torch.concat([img_vae, img_clip], dim=-1)
746
-
747
- def _split_joint(self, x, height, width):
748
- r"""
749
- Splits a flattened embedding x of shape (B, C * H * W + clip_img_dim + text_seq_len * text_dim] into (img_vae,
750
- img_clip, text) where img_vae is of shape (B, C, H, W), img_clip is of shape (B, 1, clip_img_dim), and text is
751
- of shape (B, text_seq_len, text_dim).
752
- """
753
- batch_size = x.shape[0]
754
- latent_height = height // self.vae_scale_factor
755
- latent_width = width // self.vae_scale_factor
756
- img_vae_dim = self.num_channels_latents * latent_height * latent_width
757
- text_dim = self.text_encoder_seq_len * self.text_intermediate_dim
758
-
759
- img_vae, img_clip, text = x.split([img_vae_dim, self.image_encoder_projection_dim, text_dim], dim=1)
760
-
761
- img_vae = torch.reshape(img_vae, (batch_size, self.num_channels_latents, latent_height, latent_width))
762
- img_clip = torch.reshape(img_clip, (batch_size, 1, self.image_encoder_projection_dim))
763
- text = torch.reshape(text, (batch_size, self.text_encoder_seq_len, self.text_intermediate_dim))
764
- return img_vae, img_clip, text
765
-
766
- def _combine_joint(self, img_vae, img_clip, text):
767
- r"""
768
- Combines a latent image img_vae of shape (B, C, H, W), a CLIP-embedded image img_clip of shape (B, L_img,
769
- clip_img_dim), and a text embedding text of shape (B, L_text, text_dim) into a single embedding x of shape (B,
770
- C * H * W + L_img * clip_img_dim + L_text * text_dim).
771
- """
772
- img_vae = torch.reshape(img_vae, (img_vae.shape[0], -1))
773
- img_clip = torch.reshape(img_clip, (img_clip.shape[0], -1))
774
- text = torch.reshape(text, (text.shape[0], -1))
775
- return torch.concat([img_vae, img_clip, text], dim=-1)
776
-
777
- def _get_noise_pred(
778
- self,
779
- mode,
780
- latents,
781
- t,
782
- prompt_embeds,
783
- img_vae,
784
- img_clip,
785
- max_timestep,
786
- data_type,
787
- guidance_scale,
788
- generator,
789
- device,
790
- height,
791
- width,
792
- ):
793
- r"""
794
- Gets the noise prediction using the `unet` and performs classifier-free guidance, if necessary.
795
- """
796
- if mode == "joint":
797
- # Joint text-image generation
798
- img_vae_latents, img_clip_latents, text_latents = self._split_joint(latents, height, width)
799
-
800
- img_vae_out, img_clip_out, text_out = self.unet(
801
- img_vae_latents, img_clip_latents, text_latents, timestep_img=t, timestep_text=t, data_type=data_type
802
- )
803
-
804
- x_out = self._combine_joint(img_vae_out, img_clip_out, text_out)
805
-
806
- if guidance_scale <= 1.0:
807
- return x_out
808
-
809
- # Classifier-free guidance
810
- img_vae_T = randn_tensor(img_vae.shape, generator=generator, device=device, dtype=img_vae.dtype)
811
- img_clip_T = randn_tensor(img_clip.shape, generator=generator, device=device, dtype=img_clip.dtype)
812
- text_T = randn_tensor(prompt_embeds.shape, generator=generator, device=device, dtype=prompt_embeds.dtype)
813
-
814
- _, _, text_out_uncond = self.unet(
815
- img_vae_T, img_clip_T, text_latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type
816
- )
817
-
818
- img_vae_out_uncond, img_clip_out_uncond, _ = self.unet(
819
- img_vae_latents,
820
- img_clip_latents,
821
- text_T,
822
- timestep_img=t,
823
- timestep_text=max_timestep,
824
- data_type=data_type,
825
- )
826
-
827
- x_out_uncond = self._combine_joint(img_vae_out_uncond, img_clip_out_uncond, text_out_uncond)
828
-
829
- return guidance_scale * x_out + (1.0 - guidance_scale) * x_out_uncond
830
- elif mode == "text2img":
831
- # Text-conditioned image generation
832
- img_vae_latents, img_clip_latents = self._split(latents, height, width)
833
-
834
- img_vae_out, img_clip_out, text_out = self.unet(
835
- img_vae_latents, img_clip_latents, prompt_embeds, timestep_img=t, timestep_text=0, data_type=data_type
836
- )
837
-
838
- img_out = self._combine(img_vae_out, img_clip_out)
839
-
840
- if guidance_scale <= 1.0:
841
- return img_out
842
-
843
- # Classifier-free guidance
844
- text_T = randn_tensor(prompt_embeds.shape, generator=generator, device=device, dtype=prompt_embeds.dtype)
845
-
846
- img_vae_out_uncond, img_clip_out_uncond, text_out_uncond = self.unet(
847
- img_vae_latents,
848
- img_clip_latents,
849
- text_T,
850
- timestep_img=t,
851
- timestep_text=max_timestep,
852
- data_type=data_type,
853
- )
854
-
855
- img_out_uncond = self._combine(img_vae_out_uncond, img_clip_out_uncond)
856
-
857
- return guidance_scale * img_out + (1.0 - guidance_scale) * img_out_uncond
858
- elif mode == "img2text":
859
- # Image-conditioned text generation
860
- img_vae_out, img_clip_out, text_out = self.unet(
861
- img_vae, img_clip, latents, timestep_img=0, timestep_text=t, data_type=data_type
862
- )
863
-
864
- if guidance_scale <= 1.0:
865
- return text_out
866
-
867
- # Classifier-free guidance
868
- img_vae_T = randn_tensor(img_vae.shape, generator=generator, device=device, dtype=img_vae.dtype)
869
- img_clip_T = randn_tensor(img_clip.shape, generator=generator, device=device, dtype=img_clip.dtype)
870
-
871
- img_vae_out_uncond, img_clip_out_uncond, text_out_uncond = self.unet(
872
- img_vae_T, img_clip_T, latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type
873
- )
874
-
875
- return guidance_scale * text_out + (1.0 - guidance_scale) * text_out_uncond
876
- elif mode == "text":
877
- # Unconditional ("marginal") text generation (no CFG)
878
- img_vae_out, img_clip_out, text_out = self.unet(
879
- img_vae, img_clip, latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type
880
- )
881
-
882
- return text_out
883
- elif mode == "img":
884
- # Unconditional ("marginal") image generation (no CFG)
885
- img_vae_latents, img_clip_latents = self._split(latents, height, width)
886
-
887
- img_vae_out, img_clip_out, text_out = self.unet(
888
- img_vae_latents,
889
- img_clip_latents,
890
- prompt_embeds,
891
- timestep_img=t,
892
- timestep_text=max_timestep,
893
- data_type=data_type,
894
- )
895
-
896
- img_out = self._combine(img_vae_out, img_clip_out)
897
- return img_out
898
-
899
- def check_latents_shape(self, latents_name, latents, expected_shape):
900
- latents_shape = latents.shape
901
- expected_num_dims = len(expected_shape) + 1 # expected dimensions plus the batch dimension
902
- expected_shape_str = ", ".join(str(dim) for dim in expected_shape)
903
- if len(latents_shape) != expected_num_dims:
904
- raise ValueError(
905
- f"`{latents_name}` should have shape (batch_size, {expected_shape_str}), but the current shape"
906
- f" {latents_shape} has {len(latents_shape)} dimensions."
907
- )
908
- for i in range(1, expected_num_dims):
909
- if latents_shape[i] != expected_shape[i - 1]:
910
- raise ValueError(
911
- f"`{latents_name}` should have shape (batch_size, {expected_shape_str}), but the current shape"
912
- f" {latents_shape} has {latents_shape[i]} != {expected_shape[i - 1]} at dimension {i}."
913
- )
914
-
915
- def check_inputs(
916
- self,
917
- mode,
918
- prompt,
919
- image,
920
- height,
921
- width,
922
- callback_steps,
923
- negative_prompt=None,
924
- prompt_embeds=None,
925
- negative_prompt_embeds=None,
926
- latents=None,
927
- prompt_latents=None,
928
- vae_latents=None,
929
- clip_latents=None,
930
- ):
931
- # Check inputs before running the generative process.
932
- if height % self.vae_scale_factor != 0 or width % self.vae_scale_factor != 0:
933
- raise ValueError(
934
- f"`height` and `width` have to be divisible by {self.vae_scale_factor} but are {height} and {width}."
935
- )
936
-
937
- if (callback_steps is None) or (
938
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
939
- ):
940
- raise ValueError(
941
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
942
- f" {type(callback_steps)}."
943
- )
944
-
945
- if mode == "text2img":
946
- if prompt is not None and prompt_embeds is not None:
947
- raise ValueError(
948
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
949
- " only forward one of the two."
950
- )
951
- elif prompt is None and prompt_embeds is None:
952
- raise ValueError(
953
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
954
- )
955
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
956
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
957
-
958
- if negative_prompt is not None and negative_prompt_embeds is not None:
959
- raise ValueError(
960
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
961
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
962
- )
963
-
964
- if prompt_embeds is not None and negative_prompt_embeds is not None:
965
- if prompt_embeds.shape != negative_prompt_embeds.shape:
966
- raise ValueError(
967
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
968
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
969
- f" {negative_prompt_embeds.shape}."
970
- )
971
-
972
- if mode == "img2text":
973
- if image is None:
974
- raise ValueError("`img2text` mode requires an image to be provided.")
975
-
976
- # Check provided latents
977
- latent_height = height // self.vae_scale_factor
978
- latent_width = width // self.vae_scale_factor
979
- full_latents_available = latents is not None
980
- prompt_latents_available = prompt_latents is not None
981
- vae_latents_available = vae_latents is not None
982
- clip_latents_available = clip_latents is not None
983
-
984
- if full_latents_available:
985
- individual_latents_available = (
986
- prompt_latents is not None or vae_latents is not None or clip_latents is not None
987
- )
988
- if individual_latents_available:
989
- logger.warning(
990
- "You have supplied both `latents` and at least one of `prompt_latents`, `vae_latents`, and"
991
- " `clip_latents`. The value of `latents` will override the value of any individually supplied latents."
992
- )
993
- # Check shape of full latents
994
- img_vae_dim = self.num_channels_latents * latent_height * latent_width
995
- text_dim = self.text_encoder_seq_len * self.text_encoder_hidden_size
996
- latents_dim = img_vae_dim + self.image_encoder_projection_dim + text_dim
997
- latents_expected_shape = (latents_dim,)
998
- self.check_latents_shape("latents", latents, latents_expected_shape)
999
-
1000
- # Check individual latent shapes, if present
1001
- if prompt_latents_available:
1002
- prompt_latents_expected_shape = (self.text_encoder_seq_len, self.text_encoder_hidden_size)
1003
- self.check_latents_shape("prompt_latents", prompt_latents, prompt_latents_expected_shape)
1004
-
1005
- if vae_latents_available:
1006
- vae_latents_expected_shape = (self.num_channels_latents, latent_height, latent_width)
1007
- self.check_latents_shape("vae_latents", vae_latents, vae_latents_expected_shape)
1008
-
1009
- if clip_latents_available:
1010
- clip_latents_expected_shape = (1, self.image_encoder_projection_dim)
1011
- self.check_latents_shape("clip_latents", clip_latents, clip_latents_expected_shape)
1012
-
1013
- if mode in ["text2img", "img"] and vae_latents_available and clip_latents_available:
1014
- if vae_latents.shape[0] != clip_latents.shape[0]:
1015
- raise ValueError(
1016
- f"Both `vae_latents` and `clip_latents` are supplied, but their batch dimensions are not equal:"
1017
- f" {vae_latents.shape[0]} != {clip_latents.shape[0]}."
1018
- )
1019
-
1020
- if mode == "joint" and prompt_latents_available and vae_latents_available and clip_latents_available:
1021
- if prompt_latents.shape[0] != vae_latents.shape[0] or prompt_latents.shape[0] != clip_latents.shape[0]:
1022
- raise ValueError(
1023
- f"All of `prompt_latents`, `vae_latents`, and `clip_latents` are supplied, but their batch"
1024
- f" dimensions are not equal: {prompt_latents.shape[0]} != {vae_latents.shape[0]}"
1025
- f" != {clip_latents.shape[0]}."
1026
- )
1027
-
1028
- @torch.no_grad()
1029
- def __call__(
1030
- self,
1031
- prompt: Optional[Union[str, List[str]]] = None,
1032
- image: Optional[Union[torch.FloatTensor, PIL.Image.Image]] = None,
1033
- height: Optional[int] = None,
1034
- width: Optional[int] = None,
1035
- data_type: Optional[int] = 1,
1036
- num_inference_steps: int = 50,
1037
- guidance_scale: float = 8.0,
1038
- negative_prompt: Optional[Union[str, List[str]]] = None,
1039
- num_images_per_prompt: Optional[int] = 1,
1040
- num_prompts_per_image: Optional[int] = 1,
1041
- eta: float = 0.0,
1042
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1043
- latents: Optional[torch.FloatTensor] = None,
1044
- prompt_latents: Optional[torch.FloatTensor] = None,
1045
- vae_latents: Optional[torch.FloatTensor] = None,
1046
- clip_latents: Optional[torch.FloatTensor] = None,
1047
- prompt_embeds: Optional[torch.FloatTensor] = None,
1048
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1049
- output_type: Optional[str] = "pil",
1050
- return_dict: bool = True,
1051
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1052
- callback_steps: int = 1,
1053
- ):
1054
- r"""
1055
- The call function to the pipeline for generation.
1056
-
1057
- Args:
1058
- prompt (`str` or `List[str]`, *optional*):
1059
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
1060
- Required for text-conditioned image generation (`text2img`) mode.
1061
- image (`torch.FloatTensor` or `PIL.Image.Image`, *optional*):
1062
- `Image` or tensor representing an image batch. Required for image-conditioned text generation
1063
- (`img2text`) mode.
1064
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
1065
- The height in pixels of the generated image.
1066
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
1067
- The width in pixels of the generated image.
1068
- data_type (`int`, *optional*, defaults to 1):
1069
- The data type (either 0 or 1). Only used if you are loading a checkpoint which supports a data type
1070
- embedding; this is added for compatibility with the
1071
- [UniDiffuser-v1](https://huggingface.co/thu-ml/unidiffuser-v1) checkpoint.
1072
- num_inference_steps (`int`, *optional*, defaults to 50):
1073
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1074
- expense of slower inference.
1075
- guidance_scale (`float`, *optional*, defaults to 8.0):
1076
- A higher guidance scale value encourages the model to generate images closely linked to the text
1077
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
1078
- negative_prompt (`str` or `List[str]`, *optional*):
1079
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
1080
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). Used in
1081
- text-conditioned image generation (`text2img`) mode.
1082
- num_images_per_prompt (`int`, *optional*, defaults to 1):
1083
- The number of images to generate per prompt. Used in `text2img` (text-conditioned image generation) and
1084
- `img` mode. If the mode is joint and both `num_images_per_prompt` and `num_prompts_per_image` are
1085
- supplied, `min(num_images_per_prompt, num_prompts_per_image)` samples are generated.
1086
- num_prompts_per_image (`int`, *optional*, defaults to 1):
1087
- The number of prompts to generate per image. Used in `img2text` (image-conditioned text generation) and
1088
- `text` mode. If the mode is joint and both `num_images_per_prompt` and `num_prompts_per_image` are
1089
- supplied, `min(num_images_per_prompt, num_prompts_per_image)` samples are generated.
1090
- eta (`float`, *optional*, defaults to 0.0):
1091
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
1092
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
1093
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1094
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
1095
- generation deterministic.
1096
- latents (`torch.FloatTensor`, *optional*):
1097
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for joint
1098
- image-text generation. Can be used to tweak the same generation with different prompts. If not
1099
- provided, a latents tensor is generated by sampling using the supplied random `generator`. This assumes
1100
- a full set of VAE, CLIP, and text latents, if supplied, overrides the value of `prompt_latents`,
1101
- `vae_latents`, and `clip_latents`.
1102
- prompt_latents (`torch.FloatTensor`, *optional*):
1103
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for text
1104
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1105
- tensor is generated by sampling using the supplied random `generator`.
1106
- vae_latents (`torch.FloatTensor`, *optional*):
1107
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
1108
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1109
- tensor is generated by sampling using the supplied random `generator`.
1110
- clip_latents (`torch.FloatTensor`, *optional*):
1111
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
1112
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1113
- tensor is generated by sampling using the supplied random `generator`.
1114
- prompt_embeds (`torch.FloatTensor`, *optional*):
1115
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
1116
- provided, text embeddings are generated from the `prompt` input argument. Used in text-conditioned
1117
- image generation (`text2img`) mode.
1118
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1119
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
1120
- not provided, `negative_prompt_embeds` are be generated from the `negative_prompt` input argument. Used
1121
- in text-conditioned image generation (`text2img`) mode.
1122
- output_type (`str`, *optional*, defaults to `"pil"`):
1123
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
1124
- return_dict (`bool`, *optional*, defaults to `True`):
1125
- Whether or not to return a [`~pipelines.ImageTextPipelineOutput`] instead of a plain tuple.
1126
- callback (`Callable`, *optional*):
1127
- A function that calls every `callback_steps` steps during inference. The function is called with the
1128
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1129
- callback_steps (`int`, *optional*, defaults to 1):
1130
- The frequency at which the `callback` function is called. If not specified, the callback is called at
1131
- every step.
1132
-
1133
- Returns:
1134
- [`~pipelines.unidiffuser.ImageTextPipelineOutput`] or `tuple`:
1135
- If `return_dict` is `True`, [`~pipelines.unidiffuser.ImageTextPipelineOutput`] is returned, otherwise a
1136
- `tuple` is returned where the first element is a list with the generated images and the second element
1137
- is a list of generated texts.
1138
- """
1139
-
1140
- # 0. Default height and width to unet
1141
- height = height or self.unet_resolution * self.vae_scale_factor
1142
- width = width or self.unet_resolution * self.vae_scale_factor
1143
-
1144
- # 1. Check inputs
1145
- # Recalculate mode for each call to the pipeline.
1146
- mode = self._infer_mode(prompt, prompt_embeds, image, latents, prompt_latents, vae_latents, clip_latents)
1147
- self.check_inputs(
1148
- mode,
1149
- prompt,
1150
- image,
1151
- height,
1152
- width,
1153
- callback_steps,
1154
- negative_prompt,
1155
- prompt_embeds,
1156
- negative_prompt_embeds,
1157
- latents,
1158
- prompt_latents,
1159
- vae_latents,
1160
- clip_latents,
1161
- )
1162
-
1163
- # 2. Define call parameters
1164
- batch_size, multiplier = self._infer_batch_size(
1165
- mode,
1166
- prompt,
1167
- prompt_embeds,
1168
- image,
1169
- num_images_per_prompt,
1170
- num_prompts_per_image,
1171
- latents,
1172
- prompt_latents,
1173
- vae_latents,
1174
- clip_latents,
1175
- )
1176
- device = self._execution_device
1177
- reduce_text_emb_dim = self.text_intermediate_dim < self.text_encoder_hidden_size or self.mode != "text2img"
1178
-
1179
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1180
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1181
- # corresponds to doing no classifier free guidance.
1182
- # Note that this differs from the formulation in the unidiffusers paper!
1183
- # do_classifier_free_guidance = guidance_scale > 1.0
1184
-
1185
- # check if scheduler is in sigmas space
1186
- # scheduler_is_in_sigma_space = hasattr(self.scheduler, "sigmas")
1187
-
1188
- # 3. Encode input prompt, if available; otherwise prepare text latents
1189
- if latents is not None:
1190
- # Overwrite individual latents
1191
- vae_latents, clip_latents, prompt_latents = self._split_joint(latents, height, width)
1192
-
1193
- if mode in ["text2img"]:
1194
- # 3.1. Encode input prompt, if available
1195
- assert prompt is not None or prompt_embeds is not None
1196
- prompt_embeds = self._encode_prompt(
1197
- prompt=prompt,
1198
- device=device,
1199
- num_images_per_prompt=multiplier,
1200
- do_classifier_free_guidance=False, # don't support standard classifier-free guidance for now
1201
- negative_prompt=negative_prompt,
1202
- prompt_embeds=prompt_embeds,
1203
- negative_prompt_embeds=negative_prompt_embeds,
1204
- )
1205
- else:
1206
- # 3.2. Prepare text latent variables, if input not available
1207
- prompt_embeds = self.prepare_text_latents(
1208
- batch_size=batch_size,
1209
- num_images_per_prompt=multiplier,
1210
- seq_len=self.text_encoder_seq_len,
1211
- hidden_size=self.text_encoder_hidden_size,
1212
- dtype=self.text_encoder.dtype, # Should work with both full precision and mixed precision
1213
- device=device,
1214
- generator=generator,
1215
- latents=prompt_latents,
1216
- )
1217
-
1218
- if reduce_text_emb_dim:
1219
- prompt_embeds = self.text_decoder.encode(prompt_embeds)
1220
-
1221
- # 4. Encode image, if available; otherwise prepare image latents
1222
- if mode in ["img2text"]:
1223
- # 4.1. Encode images, if available
1224
- assert image is not None, "`img2text` requires a conditioning image"
1225
- # Encode image using VAE
1226
- image_vae = preprocess(image)
1227
- height, width = image_vae.shape[-2:]
1228
- image_vae_latents = self.encode_image_vae_latents(
1229
- image=image_vae,
1230
- batch_size=batch_size,
1231
- num_prompts_per_image=multiplier,
1232
- dtype=prompt_embeds.dtype,
1233
- device=device,
1234
- do_classifier_free_guidance=False, # Copied from InstructPix2Pix, don't use their version of CFG
1235
- generator=generator,
1236
- )
1237
-
1238
- # Encode image using CLIP
1239
- image_clip_latents = self.encode_image_clip_latents(
1240
- image=image,
1241
- batch_size=batch_size,
1242
- num_prompts_per_image=multiplier,
1243
- dtype=prompt_embeds.dtype,
1244
- device=device,
1245
- generator=generator,
1246
- )
1247
- # (batch_size, clip_hidden_size) => (batch_size, 1, clip_hidden_size)
1248
- image_clip_latents = image_clip_latents.unsqueeze(1)
1249
- else:
1250
- # 4.2. Prepare image latent variables, if input not available
1251
- # Prepare image VAE latents in latent space
1252
- image_vae_latents = self.prepare_image_vae_latents(
1253
- batch_size=batch_size,
1254
- num_prompts_per_image=multiplier,
1255
- num_channels_latents=self.num_channels_latents,
1256
- height=height,
1257
- width=width,
1258
- dtype=prompt_embeds.dtype,
1259
- device=device,
1260
- generator=generator,
1261
- latents=vae_latents,
1262
- )
1263
-
1264
- # Prepare image CLIP latents
1265
- image_clip_latents = self.prepare_image_clip_latents(
1266
- batch_size=batch_size,
1267
- num_prompts_per_image=multiplier,
1268
- clip_img_dim=self.image_encoder_projection_dim,
1269
- dtype=prompt_embeds.dtype,
1270
- device=device,
1271
- generator=generator,
1272
- latents=clip_latents,
1273
- )
1274
-
1275
- # 5. Set timesteps
1276
- self.scheduler.set_timesteps(num_inference_steps, device=device)
1277
- timesteps = self.scheduler.timesteps
1278
- # max_timestep = timesteps[0]
1279
- max_timestep = self.scheduler.config.num_train_timesteps
1280
-
1281
- # 6. Prepare latent variables
1282
- if mode == "joint":
1283
- latents = self._combine_joint(image_vae_latents, image_clip_latents, prompt_embeds)
1284
- elif mode in ["text2img", "img"]:
1285
- latents = self._combine(image_vae_latents, image_clip_latents)
1286
- elif mode in ["img2text", "text"]:
1287
- latents = prompt_embeds
1288
-
1289
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1290
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1291
-
1292
- logger.debug(f"Scheduler extra step kwargs: {extra_step_kwargs}")
1293
-
1294
- # 8. Denoising loop
1295
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1296
- with self.progress_bar(total=num_inference_steps) as progress_bar:
1297
- for i, t in enumerate(timesteps):
1298
- # predict the noise residual
1299
- # Also applies classifier-free guidance as described in the UniDiffuser paper
1300
- noise_pred = self._get_noise_pred(
1301
- mode,
1302
- latents,
1303
- t,
1304
- prompt_embeds,
1305
- image_vae_latents,
1306
- image_clip_latents,
1307
- max_timestep,
1308
- data_type,
1309
- guidance_scale,
1310
- generator,
1311
- device,
1312
- height,
1313
- width,
1314
- )
1315
-
1316
- # compute the previous noisy sample x_t -> x_t-1
1317
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
1318
-
1319
- # call the callback, if provided
1320
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1321
- progress_bar.update()
1322
- if callback is not None and i % callback_steps == 0:
1323
- callback(i, t, latents)
1324
-
1325
- # 9. Post-processing
1326
- gen_image = None
1327
- gen_text = None
1328
- if mode == "joint":
1329
- image_vae_latents, image_clip_latents, text_latents = self._split_joint(latents, height, width)
1330
-
1331
- # Map latent VAE image back to pixel space
1332
- gen_image = self.decode_image_latents(image_vae_latents)
1333
-
1334
- # Generate text using the text decoder
1335
- output_token_list, seq_lengths = self.text_decoder.generate_captions(
1336
- text_latents, self.text_tokenizer.eos_token_id, device=device
1337
- )
1338
- output_list = output_token_list.cpu().numpy()
1339
- gen_text = [
1340
- self.text_tokenizer.decode(output[: int(length)], skip_special_tokens=True)
1341
- for output, length in zip(output_list, seq_lengths)
1342
- ]
1343
- elif mode in ["text2img", "img"]:
1344
- image_vae_latents, image_clip_latents = self._split(latents, height, width)
1345
- gen_image = self.decode_image_latents(image_vae_latents)
1346
- elif mode in ["img2text", "text"]:
1347
- text_latents = latents
1348
- output_token_list, seq_lengths = self.text_decoder.generate_captions(
1349
- text_latents, self.text_tokenizer.eos_token_id, device=device
1350
- )
1351
- output_list = output_token_list.cpu().numpy()
1352
- gen_text = [
1353
- self.text_tokenizer.decode(output[: int(length)], skip_special_tokens=True)
1354
- for output, length in zip(output_list, seq_lengths)
1355
- ]
1356
-
1357
- # 10. Convert to PIL
1358
- if output_type == "pil" and gen_image is not None:
1359
- gen_image = self.numpy_to_pil(gen_image)
1360
-
1361
- # Offload last model to CPU
1362
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1363
- self.final_offload_hook.offload()
1364
-
1365
- if not return_dict:
1366
- return (gen_image, gen_text)
1367
-
1368
- return ImageTextPipelineOutput(images=gen_image, text=gen_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18_480x480_80k_pascal_context.py DELETED
@@ -1,8 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_context.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
4
- ]
5
- model = dict(
6
- decode_head=dict(num_classes=60),
7
- test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320)))
8
- optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001)
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Air Game Apk.md DELETED
@@ -1,120 +0,0 @@
1
-
2
- <h1>Aire juego APK: Cómo jugar juegos multijugador en su dispositivo Android</h1>
3
- <p>¿Te encanta jugar juegos multijugador con tus amigos y familiares? ¿Quieres disfrutar de la emoción de los juegos de árcade, juegos de carreras, juegos de fiesta y más en tu dispositivo Android? Si es así, entonces usted debe probar Air Game APK, una colección de dos increíbles juegos para Android que le permiten jugar juegos multijugador en su dispositivo. En este artículo, le diremos qué es Air Game APK, cómo descargarlo e instalarlo, y cómo jugar juegos multijugador en él. </p>
4
- <h2>air game apk</h2><br /><p><b><b>Download File</b> &#10084; <a href="https://bltlly.com/2v6KvG">https://bltlly.com/2v6KvG</a></b></p><br /><br />
5
- <h2>¿Qué es el juego de aire APK? </h2>
6
- <p>Air Game APK es un paquete de dos juegos para Android que le permiten jugar juegos multijugador en su dispositivo. Los dos juegos son:</p>
7
- <h3>AirConsole: una consola de videojuegos multijugador para Android</h3>
8
- <p>AirConsole es una consola de videojuegos multijugador que te permite jugar juegos en tu Android TV, Amazon Fire TV, Tablet u Computer como consola, y usar tus teléfonos inteligentes como controladores. AirConsole es rápido, divertido y fácil de comenzar. Puedes elegir entre cientos de juegos de diferentes géneros, como árcade, carreras, fiestas, trivialidades, deportes y más. También puedes crear tus propios juegos usando las herramientas para desarrolladores de AirConsole. AirConsole es una gran manera de disfrutar de los juegos con sus amigos y familiares sin comprar consolas o controladores caros. </p>
9
- <h3>1945 Fuerza Aérea: un clásico juego de disparos de aviones para Android</h3>
10
-
11
- <h2>¿Cómo descargar e instalar Air Game APK? </h2>
12
- <p>Para descargar e instalar Air Game APK, es necesario seguir estos pasos:</p>
13
- <h3>Pasos para descargar e instalar AirConsole APK</h3>
14
- <ol>
15
- <li>Ir al sitio web oficial de AirConsole en <a href="( 1 )">https://www.airconsole.com/</a> o buscar "AirConsole" en Google Play Store.</li>
16
- <li>Haga clic en el botón "Descargar" o el botón "Instalar" para descargar el archivo APK AirConsole. </li>
17
- <li>Una vez completada la descarga, abra la aplicación de administrador de archivos en su dispositivo y localice el archivo descargado. </li>
18
- <li>Toque en el archivo y permita la instalación desde fuentes desconocidas si se le solicita. </li>
19
- <li>Siga las instrucciones en la pantalla para completar la instalación. </li>
20
- <li>Inicie la aplicación AirConsole y disfrute jugando juegos multijugador en su dispositivo. </li>
21
- </ol>
22
- <h3>Pasos para descargar e instalar 1945 Fuerza Aérea APK</h3>
23
- <ol>
24
- <li>Ir al sitio web oficial de la Fuerza Aérea de 1945 en <a href="( 2 )">https://www.onesoft.com.vn/</a> o buscar "1945 Air Force" en Google Play Store.</li>
25
- <li>Haga clic en el botón "Descargar" o el botón "Instalar" para descargar el archivo APK de la Fuerza Aérea de 1945. </li>
26
- <li>Una vez completada la descarga, abra la aplicación de administrador de archivos en su dispositivo y localice el archivo descargado. </li>
27
- <li>Toque en el archivo y permita la instalación desde fuentes desconocidas si se le solicita. </li>
28
- <li>Siga las instrucciones en la pantalla para completar la instalación. </li>
29
- <li>Lanza la aplicación 1945 Fuerza Aérea y disfruta jugando juegos multijugador en tu dispositivo. </li>
30
- </ol>
31
- <h2>¿Cómo jugar juegos multijugador en Air Game APK? </h2>
32
- <p>Ahora que ha descargado e instalado Air Game APK, puede comenzar a jugar juegos multijugador en su dispositivo. Aquí hay algunos consejos sobre cómo jugar juegos multijugador en AirConsole APK y 1945 Fuerza Aérea APK.</p>
33
- <p></p>
34
- <h3>Cómo jugar juegos multijugador en AirConsole APK</h3>
35
- <p>Para jugar juegos multijugador en AirConsole APK, es necesario hacer lo siguiente:</p>
36
- <h4>Conecta tu dispositivo Android a una pantalla</h4>
37
-
38
- <ul>
39
- <li>Abra la aplicación AirConsole en su dispositivo y toque en el botón "Conectar". </li>
40
- <li>Seleccione la opción que se adapte a su pantalla, como "Android TV", "Chromecast", "Navegador web", etc.</li>
41
- <li>Siga las instrucciones en la pantalla para conectar el dispositivo a una pantalla. </li>
42
- <li> Verá un código en la pantalla que necesita introducir en su dispositivo. </li>
43
- <li>Una vez que ingreses el código, estarás conectado a la pantalla y listo para jugar. </li>
44
- </ul>
45
- <h4>Usa tu smartphone como controlador</h4>
46
- <p>Puedes usar tu smartphone como controlador para jugar en AirConsole. Puede utilizar la pantalla táctil, el giroscopio o el micrófono de su teléfono inteligente como métodos de entrada. Para usar tu smartphone como controlador, sigue estos pasos:</p>
47
- <ul>
48
- <li>Abra la aplicación AirConsole en su teléfono inteligente y toque en el botón "Conectar". </li>
49
- <li>Seleccione la opción que se adapte a su pantalla, como "Android TV", "Chromecast", "Navegador web", etc.</li>
50
- <li>Siga las instrucciones en la pantalla para conectar su teléfono inteligente a una pantalla. </li>
51
- <li> Verás un código en la pantalla que necesitas introducir en tu smartphone. </li>
52
- <li>Una vez que ingreses el código, estarás conectado a la pantalla y listo para jugar. </li>
53
- <li>Puede utilizar su teléfono inteligente como un controlador siguiendo las instrucciones en la pantalla del juego y utilizando los botones o gestos en su teléfono inteligente. </li>
54
- </ul>
55
- <h4>Elige entre cientos de juegos de diferentes géneros</h4>
56
- <p>Puedes elegir entre cientos de juegos de diferentes géneros para jugar en AirConsole. Puedes encontrar juegos para todas las edades y gustos, como árcade, carreras, fiestas, trivialidades, deportes y más. Para elegir un juego para jugar en AirConsole, sigue estos pasos:</p>
57
- <ul>
58
- <li>Una vez que esté conectado a una pantalla, verá una lista de juegos en la pantalla del juego. </li>
59
- <li> Puede navegar por los juegos deslizando hacia la izquierda o hacia la derecha en su teléfono inteligente o utilizando las teclas de flecha en el teclado. </li>
60
-
61
- <li>Una vez que encuentres un juego que te guste, toca en él o presiona enter para comenzar a jugarlo. </li>
62
- <li>Puede invitar a sus amigos y familiares a unirse a usted compartiendo el código que aparece en la pantalla del juego o escaneando el código QR con sus teléfonos inteligentes. </li>
63
- </ul>
64
- <h3>Cómo jugar juegos multijugador en 1945 Fuerza Aérea APK</h3>
65
- <p>Para jugar juegos multijugador en 1945 Fuerza Aérea APK, es necesario hacer lo siguiente:</p>
66
- <h4>Elige tu avión de combate y personalízalo</h4>
67
- <p>Puedes elegir entre más de 200 aviones de combate de diferentes países y personalizarlos con varias armas y equipos. Para elegir y personalizar su avión de combate, siga estos pasos:</p>
68
- <ul>
69
- <li>Abra la aplicación de la Fuerza Aérea de 1945 en su dispositivo y toque el botón "Avión". </li>
70
- <li> Verá una lista de aviones que puede elegir. Puede deslizar hacia la izquierda o hacia la derecha para navegar por ellos o usar el botón de filtro para ordenarlos por país, tipo o rareza. </li>
71
- <li>Una vez que encuentre un plano que le guste, toque en él para seleccionarlo. Verá sus estadísticas y características en el lado derecho de la pantalla. </li>
72
- <li>También puede tocar en el botón "Personalizar" para personalizar su avión con diferentes armas y equipos. Puedes usar monedas y recompensas que ganes jugando juegos para comprar o mejorar armas y equipos. También puede usar gemas o dinero real para comprar artículos premium. </li>
73
- <li>Puede ver los cambios en las estadísticas y la apariencia de su avión a medida que lo personaliza. También puede pulsar en el botón "Vista previa" para ver cómo se ve y suena su avión en acción. </li>
74
- <li>Una vez que esté satisfecho con su avión, toque en el botón "Guardar" para guardar sus cambios. </li>
75
- </ul>
76
- <h4>Únete a un escuadrón y lucha contra los enemigos</h4>
77
- <p>Puedes unirte a un escuadrón y luchar contra enemigos en solitario o en equipo. Para unirte a un escuadrón y luchar contra enemigos, sigue estos pasos:</p>
78
- <ul>
79
- <li>Abra la aplicación de la Fuerza Aérea de 1945 en su dispositivo y toque el botón "Escuadrón". </li>
80
-
81
- <li>Una vez que encuentres un escuadrón que te guste, toca en él para unirte a él. Verá su nombre, logotipo, miembros, rango y descripción en el lado derecho de la pantalla. </li>
82
- <li>También puedes tocar el botón "Crear" para crear tu propio escuadrón. Puede elegir un nombre, un logotipo, una descripción y una contraseña para su escuadrón. También puede invitar a sus amigos y familiares a unirse a su escuadrón compartiendo el código que aparece en la pantalla o escaneando el código QR con sus dispositivos. </li>
83
- <li>Una vez que forma parte de un escuadrón, puede tocar el botón "Luchar" para iniciar un juego. Puedes elegir entre diferentes modos, como "Solo", "Equipo", "Jefe", "Evento", etc. También puedes elegir el nivel de dificultad, el número de jugadores y el mapa de tu juego. </li>
84
- <li>Una vez que inicie un juego, verá su avión y los aviones de sus compañeros de equipo en la pantalla. Puede utilizar el joystick en el lado izquierdo de la pantalla para mover su avión y los botones en el lado derecho de la pantalla para disparar sus armas y utilizar sus habilidades especiales. </li>
85
- <li>También puedes ver tu barra de salud, tu puntuación, tus monedas y tus recompensas en la parte superior de la pantalla. También puedes ver los aviones enemigos, sus barras de salud, sus balas y sus ataques en la pantalla. </li>
86
- <li>Puedes intentar derribar tantos aviones enemigos como sea posible y evitar sus ataques. También puedes recoger potenciadores, monedas y recompensas que aparecen en la pantalla. Puedes usarlas para mejorar el rendimiento de tu avión o comprar nuevos objetos. </li>
87
- <li>También puede comunicarse con sus compañeros de equipo mediante el botón de chat en la parte inferior de la pantalla. Puede enviar mensajes, emojis o mensajes de voz para coordinar sus estrategias o animarse mutuamente. </li>
88
-
89
- </ul>
90
- <h4>Recoge monedas y recompensas para mejorar tu avión</h4>
91
- <p>Puedes recoger monedas y recompensas para actualizar tu avión y desbloquear nuevas características. Para recoger monedas y recompensas para actualizar su avión, siga estos pasos:</p>
92
- <ul>
93
- <li>Abra la aplicación de la Fuerza Aérea de 1945 en su dispositivo y toque el botón "Avión". </li>
94
- <li>Verá su avión y sus estadísticas en el lado derecho de la pantalla. También verá una lista de artículos que puede comprar o actualizar para su avión en el lado izquierdo de la pantalla. </li>
95
- <li>Puede utilizar monedas y recompensas que usted gana de jugar juegos para comprar o actualizar artículos para su avión. También puede usar gemas o dinero real para comprar artículos premium. </li>
96
- <li>Puede ver los cambios en las estadísticas y la apariencia de su avión a medida que compra o actualiza artículos para él. También puede pulsar en el botón "Vista previa" para ver cómo se ve y suena su avión en acción. </li>
97
- <li>Una vez que esté satisfecho con su avión, toque en el botón "Guardar" para guardar sus cambios. </li>
98
- </ul>
99
- <h2>Conclusión</h2>
100
- <p>Air Game APK es una colección de dos increíbles juegos para Android que le permiten jugar juegos multijugador en su dispositivo. AirConsole es una consola de videojuegos multijugador que te permite jugar juegos en tu Android TV, Amazon Fire TV, Tablet u Computer como consola, y usar tus teléfonos inteligentes como controladores. 1945 Air Force es un clásico juego de disparos de aviones que te lleva de vuelta a la era de la Segunda Guerra Mundial. Puede elegir entre más de 200 aviones de diferentes países y personalizarlos con diversas armas y equipos. También puedes unirte a un escuadrón y luchar contra enemigos en solitario o en equipo. Air Game APK es una gran manera de disfrutar de los juegos con sus amigos y familiares sin comprar consolas o controladores caros. Si desea jugar juegos multijugador en su dispositivo Android, debe descargar e instalar Air Game APK hoy. </p>
101
- <h2>Preguntas frecuentes</h2>
102
- <p>Aquí hay algunas preguntas frecuentes sobre Air Game APK:</p>
103
- <ol>
104
- <li> ¿Cuáles son los requisitos para jugar Air Game APK? </li>
105
-
106
- <li> ¿Es el juego de aire APK seguro y legal? </li>
107
- <p>Sí, Air Game APK es seguro y legal. Los juegos son desarrollados por empresas de renombre y son verificados por Google Play Store. Los juegos no contienen virus, malware o contenido ilegal. Sin embargo, siempre debes descargar los juegos de las fuentes oficiales y no de sitios web de terceros. </p>
108
- <li> ¿Cuánto cuesta juego de aire APK? </li>
109
- <p>Air Game APK es gratis para descargar y jugar. Sin embargo, algunos juegos pueden ofrecer compras en la aplicación o anuncios que puedes comprar o ver para apoyar a los desarrolladores. También puede comprar gemas o dinero real para comprar artículos premium para su avión en 1945 Fuerza Aérea.</p>
110
- <li> ¿Cuántos jugadores pueden jugar Air Game APK? </li>
111
- <p>El número de jugadores que pueden jugar Air Game APK depende del juego y el modo que usted elija. Para AirConsole, puedes jugar con hasta 16 jugadores en una pantalla o hasta 32 jugadores en línea. Para la Fuerza Aérea 1945, puedes jugar con hasta 4 jugadores en modo equipo o hasta 8 jugadores en modo evento. </p>
112
- <li> ¿Cuáles son algunos de los mejores juegos para jugar en Air Game APK? </li>
113
- <p>Algunos de los mejores juegos para jugar en Air Game APK son:</p>
114
- <ul>
115
- <li>AirConsole: Cartas y humanidad, Racing Wars, Silly World Series, Torre de Babel, El barrio, etc.</li>
116
- <li>1945 Air Force: Sky Force Reloaded, Strikers 1945, Raiden Legacy, Sky Gamblers: Storm Raiders, etc.</li>
117
- </ul>
118
- </ol></p> 64aa2da5cf<br />
119
- <br />
120
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Apk Cinco Noches En Freddy Y 39 S 2.md DELETED
@@ -1,100 +0,0 @@
1
-
2
- <h1>APK cinco noches en Freddy’s 2: Un juego de terror para Android</h1>
3
- <p>¿Te gustan los juegos de terror? ¿Te gusta que te asusten los animatrónicos espeluznantes? ¿Quieres experimentar la emoción de sobrevivir cinco noches como guardia nocturno en una pizzería embrujada? Si respondiste sí a cualquiera de estas preguntas, entonces usted debe probar APK Five Nights at Freddy’s 2, un juego de terror para dispositivos Android que le mantendrá en el borde de su asiento. </p>
4
- <h2>Introducción</h2>
5
- <p>En este artículo, le diremos todo lo que necesita saber sobre APK Five Nights at Freddy’s 2, incluyendo lo que es, cómo descargar e instalar, y cómo jugarlo. También te daremos algunos consejos y trucos para ayudarte a sobrevivir la noche y evitar ser asesinado por los monstruos mecánicos. Pero antes de entrar en eso, primero vamos a explicar lo que es Five Nights at Freddy’s 2 y lo que es un archivo APK. </p>
6
- <h2>apk cinco noches en freddy y 39; s 2</h2><br /><p><b><b>DOWNLOAD</b> &#10022; <a href="https://bltlly.com/2v6MTn">https://bltlly.com/2v6MTn</a></b></p><br /><br />
7
- <h3>¿Qué es cinco noches en Freddy’s 2?</h3>
8
- <p>Five Nights at Freddy’s 2 es un juego de terror desarrollado por Scott Cawthon y lanzado en 2014. Es la secuela del juego original de Five Nights at Freddy, que también fue lanzado en 2014. El juego se desarrolla en una pizzería ficticia llamada Freddy Fazbear’s Pizza, donde la principal atracción es un grupo de personajes animatrónicos que entretienen a los niños durante el día. Sin embargo, por la noche, estos animatrónicos se vuelven hostiles e intentan matar a cualquiera que se quede en el edificio. </p>
9
- <p>El juego te pone en el papel de un guardia nocturno que tiene que trabajar cinco noches (de 12 a 6 AM) en la pizzería. Sus únicas herramientas son un sistema de cámaras de seguridad, una linterna y una máscara que puede engañar a algunos de los animatrónicos. Usted tiene que controlar las cámaras y tener cuidado con cualquier movimiento o signos de peligro. Si ves a un animatrónico acercándose a tu oficina, tienes que cerrar la puerta, encender la luz o ponerte la máscara para disuadirlos. Si no lo haces, te asustarán y terminarán tu juego. </p>
10
-
11
- <h3>¿Qué es un archivo APK y por qué lo necesita? </h3>
12
- <p>Un archivo APK es un formato de archivo que se utiliza para distribuir e instalar aplicaciones en dispositivos Android. APK significa Android Package Kit, y contiene todos los archivos y datos que se necesitan para que una aplicación se ejecute en su dispositivo. Puedes descargar archivos APK de varias fuentes en línea, como tiendas de aplicaciones oficiales, sitios web de terceros o plataformas para compartir archivos. </p>
13
- <p>Es posible que necesite un archivo APK por varias razones. Por ejemplo, es posible que desee instalar una aplicación que no esté disponible en su región o en su modelo de dispositivo. También es posible que desee instalar una versión anterior de una aplicación que se ha actualizado o eliminado de la tienda de aplicaciones. O simplemente puede probar una aplicación nueva o no lanzada que aún no se haya lanzado oficialmente. </p>
14
- <p>Sin embargo, descargar e instalar archivos APK también viene con algunos riesgos. Por ejemplo, puede encontrarse con malware o virus que pueden dañar su dispositivo o robar su información personal. También puede violar algunos términos <p>de las políticas de servicio o privacidad del desarrollador de aplicaciones o la tienda de aplicaciones. También es posible que tenga problemas de compatibilidad o de rendimiento con su dispositivo o la propia aplicación. Por lo tanto, siempre debe tener cuidado y precaución al descargar e instalar archivos APK, y solo hacerlo desde fuentes confiables y de buena reputación. </p>
15
- <p></p>
16
- <h2>Cómo descargar e instalar APK Five Nights at Freddy’s 2</h2>
17
- <p>Si desea jugar APK Five Nights at Freddy’s 2 en su dispositivo Android, tendrá que descargar e instalar el archivo APK para el juego. Estos son los pasos que debes seguir:</p>
18
- <h3>Paso 1: Habilitar fuentes desconocidas en el dispositivo</h3>
19
- <p>Antes de que pueda instalar cualquier archivo APK en su dispositivo, debe habilitar la opción para permitir la instalación desde fuentes desconocidas. Esta opción suele estar deshabilitada de forma predeterminada por razones de seguridad, pero puede activarla fácilmente siguiendo estos pasos:</p>
20
- <ul>
21
- <li>Ir a la configuración de su dispositivo y toque en la seguridad o la privacidad. </li>
22
-
23
- <li> Un mensaje de advertencia puede aparecer, pidiéndole que confirme su elección. Toque en OK o permita continuar. </li>
24
- </ul>
25
- <p>Una vez que haya habilitado fuentes desconocidas, puede pasar al siguiente paso. </p>
26
- <h3>Paso 2: Descargar el archivo APK de una fuente de confianza</h3>
27
- <p>El siguiente paso es descargar el archivo APK para APK Five Nights at Freddy’s 2 de una fuente confiable y confiable. Puede utilizar cualquier navegador web en su dispositivo para hacer esto, pero asegúrese de que está utilizando una conexión segura y rápida. Estas son algunas de las fuentes que recomendamos:</p>
28
- <ul>
29
- <li>[APKPure]: Este es uno de los sitios web más populares y de buena reputación para descargar archivos APK. Ofrece una amplia gama de aplicaciones y juegos, incluyendo APK Five Nights at Freddy’s 2. También proporciona información detallada sobre cada aplicación, como su tamaño, versión, calificación, capturas de pantalla y descripción. También puede escanear el código QR en el sitio web para descargar el archivo APK directamente a su dispositivo. </li>
30
- <li>[APKMirror]: Este es otro sitio web conocido y de confianza para descargar archivos APK. Ofrece una gran colección de aplicaciones y juegos, incluyendo APK Five Nights at Freddy’s 2. También proporciona información sobre cada aplicación, como su desarrollador, fecha de actualización, registro de cambios y permisos. También puede escanear el código QR en el sitio web para descargar el archivo APK directamente a su dispositivo. </li>
31
- <li>[Uptodown]: Esta es una tercera opción para descargar archivos APK. Ofrece una variedad de aplicaciones y juegos, incluyendo APK Five Nights at Freddy’s 2. También proporciona información sobre cada aplicación, como su categoría, idioma, calificación, capturas de pantalla, y descripción. También puede escanear el código QR en el sitio web para descargar el archivo APK directamente a su dispositivo. </li>
32
- </ul>
33
- <p>Una vez que haya elegido una fuente, siga estos pasos para descargar el archivo APK:</p>
34
- <ul>
35
- <li>Ir a la página web y buscar APK Cinco Noches en Freddy’s 2 en la barra de búsqueda. </li>
36
- <li>Seleccione la aplicación de los resultados y toque en el botón de descarga. </li>
37
-
38
- <li>El archivo APK comenzará a descargarse en su dispositivo. Puede comprobar el progreso en la barra de notificaciones o en la carpeta de descargas. </li>
39
- </ul>
40
- <p>Una vez que haya descargado el archivo APK, puede pasar al siguiente paso. </p> <h3>Paso 3: Instalar el archivo APK en su dispositivo</h3>
41
- <p>El paso final es instalar el archivo APK en su dispositivo. Este es un proceso simple y rápido, pero debe tener cuidado y seguir las instrucciones cuidadosamente. Estos son los pasos que debe seguir:</p>
42
- <ul>
43
- <li>Localice el archivo APK en su dispositivo. Puede encontrarlo en su carpeta de descargas o en la barra de notificaciones. </li>
44
- <li>Toque en el archivo APK para abrirlo. Una ventana emergente puede aparecer, pidiéndole que confirme la instalación. Toque en instalar o siguiente para continuar. </li>
45
- <li> El proceso de instalación comenzará y puede tardar unos segundos o minutos, dependiendo del tamaño del archivo y la velocidad de su dispositivo. </li>
46
- <li>Una vez completada la instalación, aparecerá un mensaje diciendo que la aplicación ha sido instalada. Toque en abrir o hacer para iniciar la aplicación o salir de la instalación. </li>
47
- </ul>
48
- <p>Felicidades! Usted ha instalado con éxito APK Five Nights at Freddy’s 2 en su dispositivo. Ahora se puede disfrutar de jugar el juego y divertirse. </p>
49
- <h2>Cómo jugar APK Cinco Noches en Freddy 2</h2>
50
- <p>Ahora que ha instalado APK Five Nights at Freddy’s 2 en su dispositivo, es posible que se pregunte cómo jugarlo. No te preocupes, te tenemos cubierto. En esta sección, explicaremos la jugabilidad y los controles, los personajes y las ubicaciones, y algunos consejos y trucos para ayudarte a sobrevivir la noche y evitar ser asesinado por los animatrónicos. </p>
51
- <h3>El juego y los controles</h3>
52
-
53
- <p>Los controles de APK Five Nights at Freddy’s 2 son simples e intuitivos. Puede utilizar su dedo para deslizar a través de la pantalla para mirar alrededor de su oficina o cambiar entre las cámaras. También puede tocar en la pantalla para interactuar con varios objetos, como puertas, luces o máscaras. También puedes usar los botones de volumen de tu dispositivo para ajustar el nivel de sonido del juego. </p>
54
- <h3>Los caracteres y las ubicaciones</h3>
55
- <p>Los personajes de APK Five Nights at Freddy’s 2 son 11 animatronics que tienen diferentes apariciones y comportamientos. Algunos de ellos son nuevas versiones de los personajes originales del primer juego, mientras que otros son modelos viejos y dañados que se han almacenado lejos. Aquí hay una breve descripción de cada carácter y su ubicación:</p>
56
- <tabla>
57
- <tr><th>Nombre</th><th>Descripción</th><th>Ubicación</th></tr>
58
- <tr><td>Freddy Fazbear</td><td>La mascota principal de Freddy Fazbear’s Pizza. Es un oso marrón con sombrero negro y pajarita. Suele estar inactivo durante las primeras noches, pero se vuelve más activo y agresivo a medida que avanzan las noches. Puede ser engañado por la máscara, pero también puede colarse en su oficina sin ser visto por las cámaras. </td><td>Comienza en Show Stage con Bonnie y Chica, luego se muda a Party Room 3, Party Room 4, Main Hall o Right Air Vent.</td></tr>
59
- <tr><td>Bonnie</td><td>Un conejo azul con una pajarita roja. Es uno de los amigos y compañeros de banda de Freddy. Él es muy activo y agresivo durante todas las noches. Puede ser engañado por la máscara, pero también puede entrar en su oficina a través de cualquiera de los respiraderos de aire. </td><td>Comienza en Show Stage con Freddy y Chica, luego se muda a Party Room 1, Party Room 2, Left Air Vent o Right Air Vent.</td></tr>
60
-
61
- <tr><td>Foxy</td><td>Un zorro rojo con un parche y un gancho para una mano. Es un animatrónico de temática pirata que estaba fuera de servicio en el primer juego. Todavía está roto y dañado, pero todavía puede moverse y atacar. No puede ser engañado por la máscara, pero puede ser detenido por la linterna. Es muy rápido e impredecible, y puede aparecer en cualquier momento. </td><td>Comienza en Piezas/Servicio con los antiguos animatrónicos, luego se mueve a Main Hall, Party Room 3, o The Office.</td></tr>
62
- <tr><td>Mangle</td><td>Un zorro blanco y rosa que originalmente era una nueva versión de Foxy, pero fue destrozado por los niños y se convirtió en un lío destrozado de cables y piezas. Ahora es una atracción para desmontar y volver a unir que puede arrastrarse por el techo y las paredes. No puede ser engañada por la máscara, pero puede ser detenida por la linterna. También es muy rápida e impredecible, y puede aparecer en cualquier momento. </td><td>Ella comienza en Kid’s Cove, luego se muda a Main Hall, Party Room 9, Party Room 4, o Right Air Vent.</td></tr>
63
- <tr><td>Balloon Boy</td><td>Un pequeño animatrónico humano que lleva una camisa rayada roja y azul, pantalones azules, un sombrero de hélice y sostiene un globo y un cartel que dice "¡Globos!". Es un personaje amigable y alegre que no te ataca directamente, pero puede desactivar tu linterna y hacerte vulnerable a Foxy o Mangle. Puede ser engañado por la máscara, pero también puede entrar a su oficina a través de cualquiera de los respiraderos de aire. </td><td>Comienza en el área de juego, luego se mueve a la ventilación de aire izquierda o la ventilación de aire derecha.</td></tr>
64
-
65
- <tr><td>Toy Freddy</td><td>Una nueva versión de Freddy Fazbear que es más moderna y elegante. Es un oso marrón con ojos azules, sombrero negro, corbata de moño y mejillas sonrosadas. Es menos activo y agresivo que el Freddy original, pero todavía puede representar una amenaza. Puede ser engañado por la máscara, pero también puede entrar a su oficina a través de la sala principal. </td><td>Comienza en Show Stage con Toy Bonnie y Toy Chica, luego se mueve a Área de juego, Sala de fiestas 3, Sala principal o La oficina.</td></tr>
66
- <tr><td>Toy Bonnie</td><td>Una nueva versión de Bonnie que es más moderna y elegante. Es un conejo azul con ojos verdes, una pajarita roja y mejillas sonrosadas. Es más activo y agresivo que el Bonnie original, y puede moverse más rápido. Puede ser engañado por la máscara, pero también puede entrar a su oficina a través de la ventilación izquierda. </td><td>Comienza en Show Stage con Toy Freddy y Toy Chica, luego se mueve a Área de Juego, Sala de Fiestas 2, Ventilación de Aire Izquierda o La Oficina.</td></tr>
67
- <tr><td>Toy Chica</td><td>Una nueva versión de Chica que es más moderna y elegante. Ella es una gallina amarilla con ojos morados, un babero rosa que dice "¡Vamos de fiesta!" y mejillas rosadas. También lleva un cupcake en un plato como accesorio. Ella es más activa y agresiva que la Chica original, y puede moverse más rápido. Ella puede ser engañada por la máscara, pero también puede entrar a su oficina a través de la ventilación correcta. </td><td>Ella comienza en Show Stage con Toy Freddy y Toy Bonnie, luego se mueve a Game Área, Party Room 4, Right Air Vent o The Office.</td></tr>
68
- <tr><td>Golden Freddy</td><td>Una versión dorada de Freddy Fazbear que aparece como una alucinación o un fantasma. Es un personaje de huevo de Pascua que no tiene una ubicación o patrón fijo. Puede aparecer aleatoriamente en tu pantalla o en tu oficina, haciendo que tu juego se bloquee o termine. No puede ser engañado por la máscara ni detenido por nada. Es muy raro y difícil de encontrar. </td><td>No tiene una ubicación. Puede aparecer en cualquier lugar en cualquier momento. </td></tr>
69
- </tabla>
70
-
71
- <p>Los consejos y trucos de APK Five Nights at Freddy’s 2 son algunas estrategias y consejos y trucos que pueden ayudar a sobrevivir a la noche y evitar ser asesinado por los animatrónicos. Estos son algunos de ellos:</p>
72
- <ul>
73
- <li>Compruebe las cámaras con frecuencia, pero no demasiado. Necesitas mantener un ojo en los animatrónicos y sus movimientos, pero también necesitas conservar tu poder y evitar atraer su atención. Intenta equilibrar el uso de la cámara y concéntrate en las áreas más importantes, como los respiraderos, la sala principal y la esquina de premios. </li>
74
- Usa la linterna sabiamente, pero no con moderación. Necesitas usar la linterna para ver en la oscuridad y para evitar que Foxy o Mangle te ataquen, pero también necesitas evitar desperdiciar tu energía y alertar a otros animatrónicos. Trata de usar destellos cortos y rápidos de luz y apunta a los ojos de los animatrónicos. </li>
75
- <li>Usa la máscara de manera efectiva, pero no constantemente. Necesitas usar la máscara para engañar a algunos de los animatrónicos y evitar que te asusten, pero también necesitas evitar limitar tu visión y hacerte vulnerable a los demás. Trate de ponerse la máscara tan pronto como vea un animatronic en su oficina o en el respiradero, y sáquelo tan pronto como se vayan. </li>
76
- <li>Enrolla la caja de música regularmente, pero no obsesivamente. Tienes que terminar la caja de música para mantener la calma de la marioneta y evitar que te mate, pero también tienes que evitar descuidar otras tareas y perder la noción del tiempo. Trata de terminar la caja de música cuando tengas la oportunidad, pero no dejes que te distraiga de otras amenazas. </li>
77
- <li>Aprende los patrones y hábitos de cada animatronic, pero no confíes demasiado en ellos. Necesitas aprender cómo se comporta cada animatrónico y lo que hacen, pero también necesitas estar preparado para las sorpresas y los cambios. Trata de memorizar sus rutas y sus debilidades, pero no bajes la guardia ni asumas nada. </li>
78
- </ul>
79
- <h2>Conclusión</h2>
80
-
81
- <p>Si desea jugar APK Five Nights at Freddy’s 2 en su dispositivo, usted tiene que descargar e instalar el archivo APK de una fuente de confianza. También debe habilitar fuentes desconocidas en su dispositivo y seguir las instrucciones cuidadosamente. Una vez instalado el juego, podrás disfrutar jugando y divirtiéndote. </p>
82
- <p>Sin embargo, se advierte: este juego no es para los débiles de corazón o los fácilmente asustados. Este juego es muy desafiante y aterrador, y te hará gritar y saltar de tu asiento. Si usted está buscando una emoción y un desafío, entonces este juego es para usted. Pero si estás buscando un juego relajante y pacífico, entonces este juego no es para ti. </p>
83
- <p>Entonces, ¿estás listo para enfrentar tus miedos y jugar APK Five Nights at Freddy’s 2? Si lo estás, entonces descarga el juego ahora y comienza a jugar. Pero si no lo estás, entonces quizás deberías buscar otro juego. La elección es tuya. </p>
84
- <p>Gracias por leer este artículo. Esperamos que le resulte útil e informativo. Si tiene alguna pregunta o comentario, por favor siéntase libre de dejarlos abajo. Nos encantaría saber de usted. </p>
85
- <h3>Preguntas frecuentes</h3>
86
- <p>Aquí hay algunas preguntas frecuentes sobre APK Five Nights at Freddy’s 2:</p>
87
- <ul>
88
- <li><b>Q: ¿APK Five Nights at Freddy’s 2 es gratis? </b></li>
89
- <li>A: Sí, APK Five Nights at Freddy’s 2 es gratis para descargar y jugar. Sin embargo, puede contener anuncios o compras en la aplicación que requieren dinero real. </li>
90
- <li><b>Q: ¿Es seguro APK Five Nights at Freddy’s 2? </b></li>
91
- <li>A: Sí, APK Five Nights at Freddy’s 2 es seguro si lo descarga de una fuente de confianza. Sin embargo, siempre debe tener cuidado y precaución al descargar e instalar archivos APK de fuentes desconocidas. </li>
92
- <li><b>Q: ¿Es APK Five Nights at Freddy’s 2 compatible con mi dispositivo? </b></li>
93
- <li>A: APK Five Nights at Freddy’s 2 es compatible con la mayoría de los dispositivos Android que se ejecutan en Android 4.1 o superior. Sin embargo, algunos dispositivos pueden experimentar problemas de compatibilidad o rendimiento con el juego. </li>
94
-
95
- <li>A: Golden Freddy es un personaje raro y difícil de deshacerse que puede aparecer aleatoriamente en la pantalla o en la oficina y causar que su juego se bloquee o termine. No hay manera segura de deshacerse de él, pero puede tratar de evitar mirarlo o cambiar rápidamente a otra cámara o ponerse la máscara si lo ve. </li>
96
- <li><b>Q: ¿Cómo puedo desbloquear el modo nocturno personalizado? </b></li>
97
- <li>A: El modo nocturno personalizado es un modo especial que permite personalizar la dificultad y el comportamiento de cada animatronic. Puedes desbloquearlo completando la quinta noche del juego. También puede desbloquear algunas características y desafíos adicionales completando el modo nocturno personalizado con ciertos ajustes preestablecidos. </li>
98
- </ul></p> 64aa2da5cf<br />
99
- <br />
100
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Amor Enredo Mod Apk.md DELETED
@@ -1,50 +0,0 @@
1
-
2
- <h1>Descargar Amor enredo Mod Apk: Un juego de aventura romántica</h1>
3
- <p>¿Te gustan los juegos románticos con hermosos gráficos e historias atractivas? Si es así, entonces deberías probar Love Tangle, un popular juego de aventuras desarrollado por Shall we date? En este juego, puedes crear tu propio avatar, elegir tu interés amoroso entre una variedad de personajes y disfrutar de los emocionantes episodios y finales. Pero lo que si desea desbloquear todas las características y el contenido del juego sin gastar dinero o esperar horas? Bueno, hay una solución para eso: descargar Love Tangle mod apk! </p>
4
- <h2>¿Qué es la maraña del amor? </h2>
5
- <p>Love Tangle es un juego de aventura gratuito que te permite experimentar una historia romántica en un apartamento de lujo. Usted es un nuevo residente en el apartamento, donde se encuentra con muchos personajes atractivos y misteriosos. Puedes interactuar con ellos, coquetear con ellos y enamorarte de ellos. También puedes personalizar tu avatar con varios atuendos, accesorios y peinados. El juego tiene gráficos impresionantes, voz actuando y efectos de sonido que te hacen sentir como si fueras parte de la historia. </p>
6
- <h2>descargar amor enredo mod apk</h2><br /><p><b><b>Download File</b> &mdash; <a href="https://bltlly.com/2v6LbA">https://bltlly.com/2v6LbA</a></b></p><br /><br />
7
- <h3>Características de Love Tangle</h3>
8
- <h4>Personaliza tu avatar</h4>
9
- <p>Puedes crear tu propio avatar eligiendo entre diferentes características faciales, tonos de piel, colores de cabello y estilos. También puedes vestir a tu avatar con ropa, zapatos, joyas y accesorios. Puedes cambiar tu look cuando quieras e impresionar tu interés amoroso con tu estilo. </p>
10
- <h4>Elige tu interés amoroso</h4>
11
- <p>Puedes elegir entre una variedad de personajes hasta la fecha en Love Tangle. Cada personaje tiene su propia personalidad, antecedentes e historia. Puedes conocerlos mejor hablando con ellos, pasando tiempo con ellos y tomando decisiones que afectan tu relación. También puede cambiar entre diferentes caracteres y explorar diferentes rutas y finales. </p>
12
- <h4>Disfruta de la historia y los gráficos</h4>
13
-
14
- <h2>¿Por qué descargar Love Tangle mod apk? </h2>
15
- <p>Love Tangle es un juego divertido y emocionante, pero también tiene algunas limitaciones que pueden afectar su disfrute. Por ejemplo, necesitas gastar monedas o boletos para desbloquear nuevos episodios o finales. También necesitas ver anuncios o esperar horas para recargar tu energía o entradas. Estas restricciones pueden ser frustrantes y molestas, especialmente si quieres progresar más rápido o ver todo el contenido del juego. </p>
16
- <h3>Beneficios de Love Tangle mod apk</h3>
17
- <h4>Desbloquear todos los episodios y finales</h4>
18
- <p>Con Love Tangle mod apk, puede desbloquear todos los episodios y finales del juego sin gastar monedas o entradas. Puede acceder a cualquier episodio o final que desee en cualquier momento que desee. También puedes ver todos los resultados posibles de tus elecciones y descubrir todos los secretos y sorpresas del juego. </p>
19
- <h4>Consigue monedas y billetes ilimitados</h4>
20
- <p>Con Love Tangle mod apk, puede obtener monedas ilimitadas y entradas que se pueden utilizar para comprar artículos o acceder a las características en el juego. Puedes comprar cualquier atuendo o accesorio que quieras para tu avatar o tu interés amoroso. También puede usar tickets para saltar anuncios o acelerar el tiempo de recarga de energía. </p>
21
- <h4>Eliminar anuncios y disfrutar del juego</h4>
22
- <p>Con Love Tangle mod apk, puede eliminar todos los anuncios que interrumpen su juego o te hacen esperar durante horas. Puedes disfrutar del juego sin distracciones ni retrasos. También puedes guardar tus datos y batería al no cargar ni ver anuncios. </p>
23
- <h2>Cómo descargar e instalar Love Tangle mod apk? </h2>
24
- <p>Si usted está interesado en descargar e instalar Love Tangle mod apk, es necesario seguir algunos pasos simples. Estos son los pasos que debes seguir:</p>
25
- <h3>Pasos para descargar e instalar Love Tangle mod apk</h3>
26
- <h4>Descargar el archivo apk mod de una fuente de confianza</h4>
27
-
28
- <h4>Habilitar fuentes desconocidas en su dispositivo</h4>
29
- <p>El segundo paso es habilitar fuentes desconocidas en su dispositivo. Esto es necesario porque el archivo apk mod no es de la tienda oficial de Google Play y su dispositivo puede bloquear su instalación. Para habilitar fuentes desconocidas, vaya a la configuración del dispositivo, luego a la seguridad, luego a fuentes desconocidas y enciéndala. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store.</p>
30
- <p></p>
31
- <h4>Instalar el archivo apk mod y lanzar el juego</h4>
32
- <p>El tercer y último paso es instalar el archivo apk mod y lanzar el juego. Para instalar el archivo apk mod, localizarlo en el administrador de archivos de su dispositivo o carpeta de descargas y toque en él. Siga las instrucciones de la pantalla y espere a que se complete la instalación. Una vez finalizada la instalación, puedes lanzar el juego desde el cajón de tu app o la pantalla de inicio. ¡Disfruta jugando a Love Tangle con todas las funciones de mod! </p>
33
- <h2>Conclusión</h2>
34
- <p>Love Tangle es un juego de aventura romántica que te permite crear tu propio avatar, elegir tu interés amoroso y disfrutar de la historia y los gráficos. Sin embargo, si desea desbloquear todas las características y el contenido del juego sin gastar dinero o esperar horas, usted debe descargar Love Tangle mod apk. Con Love Tangle mod apk, puede desbloquear todos los episodios y finales, obtener monedas ilimitadas y entradas, y eliminar los anuncios y disfrutar del juego. Puede descargar e instalar Love Tangle mod apk siguiendo algunos pasos simples. Descargar Love Tangle mod apk hoy y divertirse! </p>
35
- <h3>Preguntas frecuentes</h3>
36
- <p>Aquí hay algunas preguntas frecuentes sobre Love Tangle mod apk:</p>
37
- <ul>
38
- <li><b>¿Es seguro usar Love Tangle mod apk? </b></li>
39
- <p>Sí, Love Tangle mod apk es seguro de usar siempre y cuando se descarga de una fuente de confianza. Sin embargo, siempre debe tener cuidado al descargar e instalar cualquier aplicación de fuentes desconocidas, ya que pueden contener virus o malware que pueden dañar su dispositivo o robar sus datos. </p>
40
- <li><b> ¿El amor enredo mod apk requieren acceso de raíz? </b></li>
41
-
42
- <li><b>Amor enredo mod apk afectar mi progreso original del juego? </b></li>
43
- <p>No, Love Tangle mod apk no afectará a su progreso original del juego, ya que crea una carpeta separada para sus datos. Puedes jugar tanto el juego original como el juego modificado sin ninguna interferencia. </p>
44
- <li><b>¿Puedo actualizar Love Tangle mod apk? </b></li>
45
- <p>Sí, puede actualizar Love Tangle mod apk cada vez que una nueva versión está disponible. Sin embargo, es posible que tenga que desinstalar la versión anterior del apk mod antes de instalar el nuevo. </p>
46
- <li><b>¿Puedo jugar Love Tangle en línea con otros jugadores? </b></li>
47
- <p>No, Love Tangle es un juego fuera de línea que no requiere una conexión a Internet para jugar. Puede reproducirlo en cualquier lugar y en cualquier momento sin preocuparse por el uso de datos o problemas de conectividad. </p>
48
- </ul></p> 64aa2da5cf<br />
49
- <br />
50
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/parser/isoparser.py DELETED
@@ -1,416 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- This module offers a parser for ISO-8601 strings
4
-
5
- It is intended to support all valid date, time and datetime formats per the
6
- ISO-8601 specification.
7
-
8
- ..versionadded:: 2.7.0
9
- """
10
- from datetime import datetime, timedelta, time, date
11
- import calendar
12
- from dateutil import tz
13
-
14
- from functools import wraps
15
-
16
- import re
17
- import six
18
-
19
- __all__ = ["isoparse", "isoparser"]
20
-
21
-
22
- def _takes_ascii(f):
23
- @wraps(f)
24
- def func(self, str_in, *args, **kwargs):
25
- # If it's a stream, read the whole thing
26
- str_in = getattr(str_in, 'read', lambda: str_in)()
27
-
28
- # If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII
29
- if isinstance(str_in, six.text_type):
30
- # ASCII is the same in UTF-8
31
- try:
32
- str_in = str_in.encode('ascii')
33
- except UnicodeEncodeError as e:
34
- msg = 'ISO-8601 strings should contain only ASCII characters'
35
- six.raise_from(ValueError(msg), e)
36
-
37
- return f(self, str_in, *args, **kwargs)
38
-
39
- return func
40
-
41
-
42
- class isoparser(object):
43
- def __init__(self, sep=None):
44
- """
45
- :param sep:
46
- A single character that separates date and time portions. If
47
- ``None``, the parser will accept any single character.
48
- For strict ISO-8601 adherence, pass ``'T'``.
49
- """
50
- if sep is not None:
51
- if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'):
52
- raise ValueError('Separator must be a single, non-numeric ' +
53
- 'ASCII character')
54
-
55
- sep = sep.encode('ascii')
56
-
57
- self._sep = sep
58
-
59
- @_takes_ascii
60
- def isoparse(self, dt_str):
61
- """
62
- Parse an ISO-8601 datetime string into a :class:`datetime.datetime`.
63
-
64
- An ISO-8601 datetime string consists of a date portion, followed
65
- optionally by a time portion - the date and time portions are separated
66
- by a single character separator, which is ``T`` in the official
67
- standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be
68
- combined with a time portion.
69
-
70
- Supported date formats are:
71
-
72
- Common:
73
-
74
- - ``YYYY``
75
- - ``YYYY-MM`` or ``YYYYMM``
76
- - ``YYYY-MM-DD`` or ``YYYYMMDD``
77
-
78
- Uncommon:
79
-
80
- - ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0)
81
- - ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day
82
-
83
- The ISO week and day numbering follows the same logic as
84
- :func:`datetime.date.isocalendar`.
85
-
86
- Supported time formats are:
87
-
88
- - ``hh``
89
- - ``hh:mm`` or ``hhmm``
90
- - ``hh:mm:ss`` or ``hhmmss``
91
- - ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits)
92
-
93
- Midnight is a special case for `hh`, as the standard supports both
94
- 00:00 and 24:00 as a representation. The decimal separator can be
95
- either a dot or a comma.
96
-
97
-
98
- .. caution::
99
-
100
- Support for fractional components other than seconds is part of the
101
- ISO-8601 standard, but is not currently implemented in this parser.
102
-
103
- Supported time zone offset formats are:
104
-
105
- - `Z` (UTC)
106
- - `±HH:MM`
107
- - `±HHMM`
108
- - `±HH`
109
-
110
- Offsets will be represented as :class:`dateutil.tz.tzoffset` objects,
111
- with the exception of UTC, which will be represented as
112
- :class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such
113
- as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`.
114
-
115
- :param dt_str:
116
- A string or stream containing only an ISO-8601 datetime string
117
-
118
- :return:
119
- Returns a :class:`datetime.datetime` representing the string.
120
- Unspecified components default to their lowest value.
121
-
122
- .. warning::
123
-
124
- As of version 2.7.0, the strictness of the parser should not be
125
- considered a stable part of the contract. Any valid ISO-8601 string
126
- that parses correctly with the default settings will continue to
127
- parse correctly in future versions, but invalid strings that
128
- currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not
129
- guaranteed to continue failing in future versions if they encode
130
- a valid date.
131
-
132
- .. versionadded:: 2.7.0
133
- """
134
- components, pos = self._parse_isodate(dt_str)
135
-
136
- if len(dt_str) > pos:
137
- if self._sep is None or dt_str[pos:pos + 1] == self._sep:
138
- components += self._parse_isotime(dt_str[pos + 1:])
139
- else:
140
- raise ValueError('String contains unknown ISO components')
141
-
142
- if len(components) > 3 and components[3] == 24:
143
- components[3] = 0
144
- return datetime(*components) + timedelta(days=1)
145
-
146
- return datetime(*components)
147
-
148
- @_takes_ascii
149
- def parse_isodate(self, datestr):
150
- """
151
- Parse the date portion of an ISO string.
152
-
153
- :param datestr:
154
- The string portion of an ISO string, without a separator
155
-
156
- :return:
157
- Returns a :class:`datetime.date` object
158
- """
159
- components, pos = self._parse_isodate(datestr)
160
- if pos < len(datestr):
161
- raise ValueError('String contains unknown ISO ' +
162
- 'components: {!r}'.format(datestr.decode('ascii')))
163
- return date(*components)
164
-
165
- @_takes_ascii
166
- def parse_isotime(self, timestr):
167
- """
168
- Parse the time portion of an ISO string.
169
-
170
- :param timestr:
171
- The time portion of an ISO string, without a separator
172
-
173
- :return:
174
- Returns a :class:`datetime.time` object
175
- """
176
- components = self._parse_isotime(timestr)
177
- if components[0] == 24:
178
- components[0] = 0
179
- return time(*components)
180
-
181
- @_takes_ascii
182
- def parse_tzstr(self, tzstr, zero_as_utc=True):
183
- """
184
- Parse a valid ISO time zone string.
185
-
186
- See :func:`isoparser.isoparse` for details on supported formats.
187
-
188
- :param tzstr:
189
- A string representing an ISO time zone offset
190
-
191
- :param zero_as_utc:
192
- Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones
193
-
194
- :return:
195
- Returns :class:`dateutil.tz.tzoffset` for offsets and
196
- :class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is
197
- specified) offsets equivalent to UTC.
198
- """
199
- return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc)
200
-
201
- # Constants
202
- _DATE_SEP = b'-'
203
- _TIME_SEP = b':'
204
- _FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)')
205
-
206
- def _parse_isodate(self, dt_str):
207
- try:
208
- return self._parse_isodate_common(dt_str)
209
- except ValueError:
210
- return self._parse_isodate_uncommon(dt_str)
211
-
212
- def _parse_isodate_common(self, dt_str):
213
- len_str = len(dt_str)
214
- components = [1, 1, 1]
215
-
216
- if len_str < 4:
217
- raise ValueError('ISO string too short')
218
-
219
- # Year
220
- components[0] = int(dt_str[0:4])
221
- pos = 4
222
- if pos >= len_str:
223
- return components, pos
224
-
225
- has_sep = dt_str[pos:pos + 1] == self._DATE_SEP
226
- if has_sep:
227
- pos += 1
228
-
229
- # Month
230
- if len_str - pos < 2:
231
- raise ValueError('Invalid common month')
232
-
233
- components[1] = int(dt_str[pos:pos + 2])
234
- pos += 2
235
-
236
- if pos >= len_str:
237
- if has_sep:
238
- return components, pos
239
- else:
240
- raise ValueError('Invalid ISO format')
241
-
242
- if has_sep:
243
- if dt_str[pos:pos + 1] != self._DATE_SEP:
244
- raise ValueError('Invalid separator in ISO string')
245
- pos += 1
246
-
247
- # Day
248
- if len_str - pos < 2:
249
- raise ValueError('Invalid common day')
250
- components[2] = int(dt_str[pos:pos + 2])
251
- return components, pos + 2
252
-
253
- def _parse_isodate_uncommon(self, dt_str):
254
- if len(dt_str) < 4:
255
- raise ValueError('ISO string too short')
256
-
257
- # All ISO formats start with the year
258
- year = int(dt_str[0:4])
259
-
260
- has_sep = dt_str[4:5] == self._DATE_SEP
261
-
262
- pos = 4 + has_sep # Skip '-' if it's there
263
- if dt_str[pos:pos + 1] == b'W':
264
- # YYYY-?Www-?D?
265
- pos += 1
266
- weekno = int(dt_str[pos:pos + 2])
267
- pos += 2
268
-
269
- dayno = 1
270
- if len(dt_str) > pos:
271
- if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep:
272
- raise ValueError('Inconsistent use of dash separator')
273
-
274
- pos += has_sep
275
-
276
- dayno = int(dt_str[pos:pos + 1])
277
- pos += 1
278
-
279
- base_date = self._calculate_weekdate(year, weekno, dayno)
280
- else:
281
- # YYYYDDD or YYYY-DDD
282
- if len(dt_str) - pos < 3:
283
- raise ValueError('Invalid ordinal day')
284
-
285
- ordinal_day = int(dt_str[pos:pos + 3])
286
- pos += 3
287
-
288
- if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)):
289
- raise ValueError('Invalid ordinal day' +
290
- ' {} for year {}'.format(ordinal_day, year))
291
-
292
- base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1)
293
-
294
- components = [base_date.year, base_date.month, base_date.day]
295
- return components, pos
296
-
297
- def _calculate_weekdate(self, year, week, day):
298
- """
299
- Calculate the day of corresponding to the ISO year-week-day calendar.
300
-
301
- This function is effectively the inverse of
302
- :func:`datetime.date.isocalendar`.
303
-
304
- :param year:
305
- The year in the ISO calendar
306
-
307
- :param week:
308
- The week in the ISO calendar - range is [1, 53]
309
-
310
- :param day:
311
- The day in the ISO calendar - range is [1 (MON), 7 (SUN)]
312
-
313
- :return:
314
- Returns a :class:`datetime.date`
315
- """
316
- if not 0 < week < 54:
317
- raise ValueError('Invalid week: {}'.format(week))
318
-
319
- if not 0 < day < 8: # Range is 1-7
320
- raise ValueError('Invalid weekday: {}'.format(day))
321
-
322
- # Get week 1 for the specific year:
323
- jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it
324
- week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1)
325
-
326
- # Now add the specific number of weeks and days to get what we want
327
- week_offset = (week - 1) * 7 + (day - 1)
328
- return week_1 + timedelta(days=week_offset)
329
-
330
- def _parse_isotime(self, timestr):
331
- len_str = len(timestr)
332
- components = [0, 0, 0, 0, None]
333
- pos = 0
334
- comp = -1
335
-
336
- if len_str < 2:
337
- raise ValueError('ISO time too short')
338
-
339
- has_sep = False
340
-
341
- while pos < len_str and comp < 5:
342
- comp += 1
343
-
344
- if timestr[pos:pos + 1] in b'-+Zz':
345
- # Detect time zone boundary
346
- components[-1] = self._parse_tzstr(timestr[pos:])
347
- pos = len_str
348
- break
349
-
350
- if comp == 1 and timestr[pos:pos+1] == self._TIME_SEP:
351
- has_sep = True
352
- pos += 1
353
- elif comp == 2 and has_sep:
354
- if timestr[pos:pos+1] != self._TIME_SEP:
355
- raise ValueError('Inconsistent use of colon separator')
356
- pos += 1
357
-
358
- if comp < 3:
359
- # Hour, minute, second
360
- components[comp] = int(timestr[pos:pos + 2])
361
- pos += 2
362
-
363
- if comp == 3:
364
- # Fraction of a second
365
- frac = self._FRACTION_REGEX.match(timestr[pos:])
366
- if not frac:
367
- continue
368
-
369
- us_str = frac.group(1)[:6] # Truncate to microseconds
370
- components[comp] = int(us_str) * 10**(6 - len(us_str))
371
- pos += len(frac.group())
372
-
373
- if pos < len_str:
374
- raise ValueError('Unused components in ISO string')
375
-
376
- if components[0] == 24:
377
- # Standard supports 00:00 and 24:00 as representations of midnight
378
- if any(component != 0 for component in components[1:4]):
379
- raise ValueError('Hour may only be 24 at 24:00:00.000')
380
-
381
- return components
382
-
383
- def _parse_tzstr(self, tzstr, zero_as_utc=True):
384
- if tzstr == b'Z' or tzstr == b'z':
385
- return tz.UTC
386
-
387
- if len(tzstr) not in {3, 5, 6}:
388
- raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters')
389
-
390
- if tzstr[0:1] == b'-':
391
- mult = -1
392
- elif tzstr[0:1] == b'+':
393
- mult = 1
394
- else:
395
- raise ValueError('Time zone offset requires sign')
396
-
397
- hours = int(tzstr[1:3])
398
- if len(tzstr) == 3:
399
- minutes = 0
400
- else:
401
- minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):])
402
-
403
- if zero_as_utc and hours == 0 and minutes == 0:
404
- return tz.UTC
405
- else:
406
- if minutes > 59:
407
- raise ValueError('Invalid minutes in time zone offset')
408
-
409
- if hours > 23:
410
- raise ValueError('Invalid hours in time zone offset')
411
-
412
- return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60)
413
-
414
-
415
- DEFAULT_ISOPARSER = isoparser()
416
- isoparse = DEFAULT_ISOPARSER.isoparse
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bokanovskii/Image-to-music/style.css DELETED
@@ -1,42 +0,0 @@
1
- #col-container {
2
- max-width: 510px;
3
- margin-left: auto;
4
- margin-right: auto;
5
- }
6
- a {
7
- text-decoration-line: underline;
8
- font-weight: 600;
9
- }
10
- div#app-output .h-full {
11
- min-height: 5rem;
12
- }
13
- .footer {
14
- margin-bottom: 45px;
15
- margin-top: 10px;
16
- text-align: center;
17
- border-bottom: 1px solid #e5e5e5;
18
- }
19
- .footer > p {
20
- font-size: 0.8rem;
21
- display: inline-block;
22
- padding: 0 10px;
23
- transform: translateY(10px);
24
- background: white;
25
- }
26
- .dark .footer {
27
- border-color: #303030;
28
- }
29
- .dark .footer > p {
30
- background: #0b0f19;
31
- }
32
- .animate-spin {
33
- animation: spin 1s linear infinite;
34
- }
35
- @keyframes spin {
36
- from {
37
- transform: rotate(0deg);
38
- }
39
- to {
40
- transform: rotate(360deg);
41
- }
42
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/lvis.py DELETED
@@ -1,209 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import logging
3
- import os
4
- from fvcore.common.file_io import PathManager
5
- from fvcore.common.timer import Timer
6
-
7
- from detectron2.data import DatasetCatalog, MetadataCatalog
8
- from detectron2.structures import BoxMode
9
-
10
- from .builtin_meta import _get_coco_instances_meta
11
- from .lvis_v0_5_categories import LVIS_CATEGORIES
12
-
13
- """
14
- This file contains functions to parse LVIS-format annotations into dicts in the
15
- "Detectron2 format".
16
- """
17
-
18
- logger = logging.getLogger(__name__)
19
-
20
- __all__ = ["load_lvis_json", "register_lvis_instances", "get_lvis_instances_meta"]
21
-
22
-
23
- def register_lvis_instances(name, metadata, json_file, image_root):
24
- """
25
- Register a dataset in LVIS's json annotation format for instance detection and segmentation.
26
-
27
- Args:
28
- name (str): a name that identifies the dataset, e.g. "lvis_v0.5_train".
29
- metadata (dict): extra metadata associated with this dataset. It can be an empty dict.
30
- json_file (str): path to the json instance annotation file.
31
- image_root (str or path-like): directory which contains all the images.
32
- """
33
- DatasetCatalog.register(name, lambda: load_lvis_json(json_file, image_root, name))
34
- MetadataCatalog.get(name).set(
35
- json_file=json_file, image_root=image_root, evaluator_type="lvis", **metadata
36
- )
37
-
38
-
39
- def load_lvis_json(json_file, image_root, dataset_name=None):
40
- """
41
- Load a json file in LVIS's annotation format.
42
-
43
- Args:
44
- json_file (str): full path to the LVIS json annotation file.
45
- image_root (str): the directory where the images in this json file exists.
46
- dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train").
47
- If provided, this function will put "thing_classes" into the metadata
48
- associated with this dataset.
49
-
50
- Returns:
51
- list[dict]: a list of dicts in Detectron2 standard format. (See
52
- `Using Custom Datasets </tutorials/datasets.html>`_ )
53
-
54
- Notes:
55
- 1. This function does not read the image files.
56
- The results do not have the "image" field.
57
- """
58
- from lvis import LVIS
59
-
60
- json_file = PathManager.get_local_path(json_file)
61
-
62
- timer = Timer()
63
- lvis_api = LVIS(json_file)
64
- if timer.seconds() > 1:
65
- logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
66
-
67
- if dataset_name is not None:
68
- meta = get_lvis_instances_meta(dataset_name)
69
- MetadataCatalog.get(dataset_name).set(**meta)
70
-
71
- # sort indices for reproducible results
72
- img_ids = sorted(lvis_api.imgs.keys())
73
- # imgs is a list of dicts, each looks something like:
74
- # {'license': 4,
75
- # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
76
- # 'file_name': 'COCO_val2014_000000001268.jpg',
77
- # 'height': 427,
78
- # 'width': 640,
79
- # 'date_captured': '2013-11-17 05:57:24',
80
- # 'id': 1268}
81
- imgs = lvis_api.load_imgs(img_ids)
82
- # anns is a list[list[dict]], where each dict is an annotation
83
- # record for an object. The inner list enumerates the objects in an image
84
- # and the outer list enumerates over images. Example of anns[0]:
85
- # [{'segmentation': [[192.81,
86
- # 247.09,
87
- # ...
88
- # 219.03,
89
- # 249.06]],
90
- # 'area': 1035.749,
91
- # 'image_id': 1268,
92
- # 'bbox': [192.81, 224.8, 74.73, 33.43],
93
- # 'category_id': 16,
94
- # 'id': 42986},
95
- # ...]
96
- anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
97
-
98
- # Sanity check that each annotation has a unique id
99
- ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
100
- assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique".format(
101
- json_file
102
- )
103
-
104
- imgs_anns = list(zip(imgs, anns))
105
-
106
- logger.info("Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file))
107
-
108
- dataset_dicts = []
109
-
110
- for (img_dict, anno_dict_list) in imgs_anns:
111
- record = {}
112
- file_name = img_dict["file_name"]
113
- if img_dict["file_name"].startswith("COCO"):
114
- # Convert form the COCO 2014 file naming convention of
115
- # COCO_[train/val/test]2014_000000000000.jpg to the 2017 naming convention of
116
- # 000000000000.jpg (LVIS v1 will fix this naming issue)
117
- file_name = file_name[-16:]
118
- record["file_name"] = os.path.join(image_root, file_name)
119
- record["height"] = img_dict["height"]
120
- record["width"] = img_dict["width"]
121
- record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", [])
122
- record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
123
- image_id = record["image_id"] = img_dict["id"]
124
-
125
- objs = []
126
- for anno in anno_dict_list:
127
- # Check that the image_id in this annotation is the same as
128
- # the image_id we're looking at.
129
- # This fails only when the data parsing logic or the annotation file is buggy.
130
- assert anno["image_id"] == image_id
131
- obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
132
- obj["category_id"] = anno["category_id"] - 1 # Convert 1-indexed to 0-indexed
133
- segm = anno["segmentation"] # list[list[float]]
134
- # filter out invalid polygons (< 3 points)
135
- valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
136
- assert len(segm) == len(
137
- valid_segm
138
- ), "Annotation contains an invalid polygon with < 3 points"
139
- assert len(segm) > 0
140
- obj["segmentation"] = segm
141
- objs.append(obj)
142
- record["annotations"] = objs
143
- dataset_dicts.append(record)
144
-
145
- return dataset_dicts
146
-
147
-
148
- def get_lvis_instances_meta(dataset_name):
149
- """
150
- Load LVIS metadata.
151
-
152
- Args:
153
- dataset_name (str): LVIS dataset name without the split name (e.g., "lvis_v0.5").
154
-
155
- Returns:
156
- dict: LVIS metadata with keys: thing_classes
157
- """
158
- if "cocofied" in dataset_name:
159
- return _get_coco_instances_meta()
160
- if "v0.5" in dataset_name:
161
- return _get_lvis_instances_meta_v0_5()
162
- # There will be a v1 in the future
163
- # elif dataset_name == "lvis_v1":
164
- # return get_lvis_instances_meta_v1()
165
- raise ValueError("No built-in metadata for dataset {}".format(dataset_name))
166
-
167
-
168
- def _get_lvis_instances_meta_v0_5():
169
- assert len(LVIS_CATEGORIES) == 1230
170
- cat_ids = [k["id"] for k in LVIS_CATEGORIES]
171
- assert min(cat_ids) == 1 and max(cat_ids) == len(
172
- cat_ids
173
- ), "Category ids are not in [1, #categories], as expected"
174
- # Ensure that the category list is sorted by id
175
- lvis_categories = sorted(LVIS_CATEGORIES, key=lambda x: x["id"])
176
- thing_classes = [k["synonyms"][0] for k in lvis_categories]
177
- meta = {"thing_classes": thing_classes}
178
- return meta
179
-
180
-
181
- if __name__ == "__main__":
182
- """
183
- Test the LVIS json dataset loader.
184
-
185
- Usage:
186
- python -m detectron2.data.datasets.lvis \
187
- path/to/json path/to/image_root dataset_name vis_limit
188
- """
189
- import sys
190
- import numpy as np
191
- from detectron2.utils.logger import setup_logger
192
- from PIL import Image
193
- import detectron2.data.datasets # noqa # add pre-defined metadata
194
- from detectron2.utils.visualizer import Visualizer
195
-
196
- logger = setup_logger(name=__name__)
197
- meta = MetadataCatalog.get(sys.argv[3])
198
-
199
- dicts = load_lvis_json(sys.argv[1], sys.argv[2], sys.argv[3])
200
- logger.info("Done loading {} samples.".format(len(dicts)))
201
-
202
- dirname = "lvis-data-vis"
203
- os.makedirs(dirname, exist_ok=True)
204
- for d in dicts[: int(sys.argv[4])]:
205
- img = np.array(Image.open(d["file_name"]))
206
- visualizer = Visualizer(img, metadata=meta)
207
- vis = visualizer.draw_dataset_dict(d)
208
- fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
209
- vis.save(fpath)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_sequences_and_iterators.py DELETED
@@ -1,191 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- import pytest
3
- from pybind11_tests import sequences_and_iterators as m
4
- from pybind11_tests import ConstructorStats
5
-
6
-
7
- def isclose(a, b, rel_tol=1e-05, abs_tol=0.0):
8
- """Like math.isclose() from Python 3.5"""
9
- return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
10
-
11
-
12
- def allclose(a_list, b_list, rel_tol=1e-05, abs_tol=0.0):
13
- return all(isclose(a, b, rel_tol=rel_tol, abs_tol=abs_tol) for a, b in zip(a_list, b_list))
14
-
15
-
16
- def test_generalized_iterators():
17
- assert list(m.IntPairs([(1, 2), (3, 4), (0, 5)]).nonzero()) == [(1, 2), (3, 4)]
18
- assert list(m.IntPairs([(1, 2), (2, 0), (0, 3), (4, 5)]).nonzero()) == [(1, 2)]
19
- assert list(m.IntPairs([(0, 3), (1, 2), (3, 4)]).nonzero()) == []
20
-
21
- assert list(m.IntPairs([(1, 2), (3, 4), (0, 5)]).nonzero_keys()) == [1, 3]
22
- assert list(m.IntPairs([(1, 2), (2, 0), (0, 3), (4, 5)]).nonzero_keys()) == [1]
23
- assert list(m.IntPairs([(0, 3), (1, 2), (3, 4)]).nonzero_keys()) == []
24
-
25
- # __next__ must continue to raise StopIteration
26
- it = m.IntPairs([(0, 0)]).nonzero()
27
- for _ in range(3):
28
- with pytest.raises(StopIteration):
29
- next(it)
30
-
31
- it = m.IntPairs([(0, 0)]).nonzero_keys()
32
- for _ in range(3):
33
- with pytest.raises(StopIteration):
34
- next(it)
35
-
36
-
37
- def test_sliceable():
38
- sliceable = m.Sliceable(100)
39
- assert sliceable[::] == (0, 100, 1)
40
- assert sliceable[10::] == (10, 100, 1)
41
- assert sliceable[:10:] == (0, 10, 1)
42
- assert sliceable[::10] == (0, 100, 10)
43
- assert sliceable[-10::] == (90, 100, 1)
44
- assert sliceable[:-10:] == (0, 90, 1)
45
- assert sliceable[::-10] == (99, -1, -10)
46
- assert sliceable[50:60:1] == (50, 60, 1)
47
- assert sliceable[50:60:-1] == (50, 60, -1)
48
-
49
-
50
- def test_sequence():
51
- cstats = ConstructorStats.get(m.Sequence)
52
-
53
- s = m.Sequence(5)
54
- assert cstats.values() == ['of size', '5']
55
-
56
- assert "Sequence" in repr(s)
57
- assert len(s) == 5
58
- assert s[0] == 0 and s[3] == 0
59
- assert 12.34 not in s
60
- s[0], s[3] = 12.34, 56.78
61
- assert 12.34 in s
62
- assert isclose(s[0], 12.34) and isclose(s[3], 56.78)
63
-
64
- rev = reversed(s)
65
- assert cstats.values() == ['of size', '5']
66
-
67
- rev2 = s[::-1]
68
- assert cstats.values() == ['of size', '5']
69
-
70
- it = iter(m.Sequence(0))
71
- for _ in range(3): # __next__ must continue to raise StopIteration
72
- with pytest.raises(StopIteration):
73
- next(it)
74
- assert cstats.values() == ['of size', '0']
75
-
76
- expected = [0, 56.78, 0, 0, 12.34]
77
- assert allclose(rev, expected)
78
- assert allclose(rev2, expected)
79
- assert rev == rev2
80
-
81
- rev[0::2] = m.Sequence([2.0, 2.0, 2.0])
82
- assert cstats.values() == ['of size', '3', 'from std::vector']
83
-
84
- assert allclose(rev, [2, 56.78, 2, 0, 2])
85
-
86
- assert cstats.alive() == 4
87
- del it
88
- assert cstats.alive() == 3
89
- del s
90
- assert cstats.alive() == 2
91
- del rev
92
- assert cstats.alive() == 1
93
- del rev2
94
- assert cstats.alive() == 0
95
-
96
- assert cstats.values() == []
97
- assert cstats.default_constructions == 0
98
- assert cstats.copy_constructions == 0
99
- assert cstats.move_constructions >= 1
100
- assert cstats.copy_assignments == 0
101
- assert cstats.move_assignments == 0
102
-
103
-
104
- def test_sequence_length():
105
- """#2076: Exception raised by len(arg) should be propagated """
106
- class BadLen(RuntimeError):
107
- pass
108
-
109
- class SequenceLike():
110
- def __getitem__(self, i):
111
- return None
112
-
113
- def __len__(self):
114
- raise BadLen()
115
-
116
- with pytest.raises(BadLen):
117
- m.sequence_length(SequenceLike())
118
-
119
- assert m.sequence_length([1, 2, 3]) == 3
120
- assert m.sequence_length("hello") == 5
121
-
122
-
123
- def test_map_iterator():
124
- sm = m.StringMap({'hi': 'bye', 'black': 'white'})
125
- assert sm['hi'] == 'bye'
126
- assert len(sm) == 2
127
- assert sm['black'] == 'white'
128
-
129
- with pytest.raises(KeyError):
130
- assert sm['orange']
131
- sm['orange'] = 'banana'
132
- assert sm['orange'] == 'banana'
133
-
134
- expected = {'hi': 'bye', 'black': 'white', 'orange': 'banana'}
135
- for k in sm:
136
- assert sm[k] == expected[k]
137
- for k, v in sm.items():
138
- assert v == expected[k]
139
-
140
- it = iter(m.StringMap({}))
141
- for _ in range(3): # __next__ must continue to raise StopIteration
142
- with pytest.raises(StopIteration):
143
- next(it)
144
-
145
-
146
- def test_python_iterator_in_cpp():
147
- t = (1, 2, 3)
148
- assert m.object_to_list(t) == [1, 2, 3]
149
- assert m.object_to_list(iter(t)) == [1, 2, 3]
150
- assert m.iterator_to_list(iter(t)) == [1, 2, 3]
151
-
152
- with pytest.raises(TypeError) as excinfo:
153
- m.object_to_list(1)
154
- assert "object is not iterable" in str(excinfo.value)
155
-
156
- with pytest.raises(TypeError) as excinfo:
157
- m.iterator_to_list(1)
158
- assert "incompatible function arguments" in str(excinfo.value)
159
-
160
- def bad_next_call():
161
- raise RuntimeError("py::iterator::advance() should propagate errors")
162
-
163
- with pytest.raises(RuntimeError) as excinfo:
164
- m.iterator_to_list(iter(bad_next_call, None))
165
- assert str(excinfo.value) == "py::iterator::advance() should propagate errors"
166
-
167
- lst = [1, None, 0, None]
168
- assert m.count_none(lst) == 2
169
- assert m.find_none(lst) is True
170
- assert m.count_nonzeros({"a": 0, "b": 1, "c": 2}) == 2
171
-
172
- r = range(5)
173
- assert all(m.tuple_iterator(tuple(r)))
174
- assert all(m.list_iterator(list(r)))
175
- assert all(m.sequence_iterator(r))
176
-
177
-
178
- def test_iterator_passthrough():
179
- """#181: iterator passthrough did not compile"""
180
- from pybind11_tests.sequences_and_iterators import iterator_passthrough
181
-
182
- assert list(iterator_passthrough(iter([3, 5, 7, 9, 11, 13, 15]))) == [3, 5, 7, 9, 11, 13, 15]
183
-
184
-
185
- def test_iterator_rvp():
186
- """#388: Can't make iterators via make_iterator() with different r/v policies """
187
- import pybind11_tests.sequences_and_iterators as m
188
-
189
- assert list(m.make_iterator_1()) == [1, 2, 3]
190
- assert list(m.make_iterator_2()) == [1, 2, 3]
191
- assert not isinstance(m.make_iterator_1(), type(m.make_iterator_2()))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/data/transforms/torchvision_transforms/__init__.py DELETED
File without changes
spaces/CVPR/regionclip-demo/detectron2/layers/csrc/cocoeval/cocoeval.cpp DELETED
@@ -1,507 +0,0 @@
1
- // Copyright (c) Facebook, Inc. and its affiliates.
2
- #include "cocoeval.h"
3
- #include <time.h>
4
- #include <algorithm>
5
- #include <cstdint>
6
- #include <numeric>
7
-
8
- using namespace pybind11::literals;
9
-
10
- namespace detectron2 {
11
-
12
- namespace COCOeval {
13
-
14
- // Sort detections from highest score to lowest, such that
15
- // detection_instances[detection_sorted_indices[t]] >=
16
- // detection_instances[detection_sorted_indices[t+1]]. Use stable_sort to match
17
- // original COCO API
18
- void SortInstancesByDetectionScore(
19
- const std::vector<InstanceAnnotation>& detection_instances,
20
- std::vector<uint64_t>* detection_sorted_indices) {
21
- detection_sorted_indices->resize(detection_instances.size());
22
- std::iota(
23
- detection_sorted_indices->begin(), detection_sorted_indices->end(), 0);
24
- std::stable_sort(
25
- detection_sorted_indices->begin(),
26
- detection_sorted_indices->end(),
27
- [&detection_instances](size_t j1, size_t j2) {
28
- return detection_instances[j1].score > detection_instances[j2].score;
29
- });
30
- }
31
-
32
- // Partition the ground truth objects based on whether or not to ignore them
33
- // based on area
34
- void SortInstancesByIgnore(
35
- const std::array<double, 2>& area_range,
36
- const std::vector<InstanceAnnotation>& ground_truth_instances,
37
- std::vector<uint64_t>* ground_truth_sorted_indices,
38
- std::vector<bool>* ignores) {
39
- ignores->clear();
40
- ignores->reserve(ground_truth_instances.size());
41
- for (auto o : ground_truth_instances) {
42
- ignores->push_back(
43
- o.ignore || o.area < area_range[0] || o.area > area_range[1]);
44
- }
45
-
46
- ground_truth_sorted_indices->resize(ground_truth_instances.size());
47
- std::iota(
48
- ground_truth_sorted_indices->begin(),
49
- ground_truth_sorted_indices->end(),
50
- 0);
51
- std::stable_sort(
52
- ground_truth_sorted_indices->begin(),
53
- ground_truth_sorted_indices->end(),
54
- [&ignores](size_t j1, size_t j2) {
55
- return (int)(*ignores)[j1] < (int)(*ignores)[j2];
56
- });
57
- }
58
-
59
- // For each IOU threshold, greedily match each detected instance to a ground
60
- // truth instance (if possible) and store the results
61
- void MatchDetectionsToGroundTruth(
62
- const std::vector<InstanceAnnotation>& detection_instances,
63
- const std::vector<uint64_t>& detection_sorted_indices,
64
- const std::vector<InstanceAnnotation>& ground_truth_instances,
65
- const std::vector<uint64_t>& ground_truth_sorted_indices,
66
- const std::vector<bool>& ignores,
67
- const std::vector<std::vector<double>>& ious,
68
- const std::vector<double>& iou_thresholds,
69
- const std::array<double, 2>& area_range,
70
- ImageEvaluation* results) {
71
- // Initialize memory to store return data matches and ignore
72
- const int num_iou_thresholds = iou_thresholds.size();
73
- const int num_ground_truth = ground_truth_sorted_indices.size();
74
- const int num_detections = detection_sorted_indices.size();
75
- std::vector<uint64_t> ground_truth_matches(
76
- num_iou_thresholds * num_ground_truth, 0);
77
- std::vector<uint64_t>& detection_matches = results->detection_matches;
78
- std::vector<bool>& detection_ignores = results->detection_ignores;
79
- std::vector<bool>& ground_truth_ignores = results->ground_truth_ignores;
80
- detection_matches.resize(num_iou_thresholds * num_detections, 0);
81
- detection_ignores.resize(num_iou_thresholds * num_detections, false);
82
- ground_truth_ignores.resize(num_ground_truth);
83
- for (auto g = 0; g < num_ground_truth; ++g) {
84
- ground_truth_ignores[g] = ignores[ground_truth_sorted_indices[g]];
85
- }
86
-
87
- for (auto t = 0; t < num_iou_thresholds; ++t) {
88
- for (auto d = 0; d < num_detections; ++d) {
89
- // information about best match so far (match=-1 -> unmatched)
90
- double best_iou = std::min(iou_thresholds[t], 1 - 1e-10);
91
- int match = -1;
92
- for (auto g = 0; g < num_ground_truth; ++g) {
93
- // if this ground truth instance is already matched and not a
94
- // crowd, it cannot be matched to another detection
95
- if (ground_truth_matches[t * num_ground_truth + g] > 0 &&
96
- !ground_truth_instances[ground_truth_sorted_indices[g]].is_crowd) {
97
- continue;
98
- }
99
-
100
- // if detected instance matched to a regular ground truth
101
- // instance, we can break on the first ground truth instance
102
- // tagged as ignore (because they are sorted by the ignore tag)
103
- if (match >= 0 && !ground_truth_ignores[match] &&
104
- ground_truth_ignores[g]) {
105
- break;
106
- }
107
-
108
- // if IOU overlap is the best so far, store the match appropriately
109
- if (ious[d][ground_truth_sorted_indices[g]] >= best_iou) {
110
- best_iou = ious[d][ground_truth_sorted_indices[g]];
111
- match = g;
112
- }
113
- }
114
- // if match was made, store id of match for both detection and
115
- // ground truth
116
- if (match >= 0) {
117
- detection_ignores[t * num_detections + d] = ground_truth_ignores[match];
118
- detection_matches[t * num_detections + d] =
119
- ground_truth_instances[ground_truth_sorted_indices[match]].id;
120
- ground_truth_matches[t * num_ground_truth + match] =
121
- detection_instances[detection_sorted_indices[d]].id;
122
- }
123
-
124
- // set unmatched detections outside of area range to ignore
125
- const InstanceAnnotation& detection =
126
- detection_instances[detection_sorted_indices[d]];
127
- detection_ignores[t * num_detections + d] =
128
- detection_ignores[t * num_detections + d] ||
129
- (detection_matches[t * num_detections + d] == 0 &&
130
- (detection.area < area_range[0] || detection.area > area_range[1]));
131
- }
132
- }
133
-
134
- // store detection score results
135
- results->detection_scores.resize(detection_sorted_indices.size());
136
- for (size_t d = 0; d < detection_sorted_indices.size(); ++d) {
137
- results->detection_scores[d] =
138
- detection_instances[detection_sorted_indices[d]].score;
139
- }
140
- }
141
-
142
- std::vector<ImageEvaluation> EvaluateImages(
143
- const std::vector<std::array<double, 2>>& area_ranges,
144
- int max_detections,
145
- const std::vector<double>& iou_thresholds,
146
- const ImageCategoryInstances<std::vector<double>>& image_category_ious,
147
- const ImageCategoryInstances<InstanceAnnotation>&
148
- image_category_ground_truth_instances,
149
- const ImageCategoryInstances<InstanceAnnotation>&
150
- image_category_detection_instances) {
151
- const int num_area_ranges = area_ranges.size();
152
- const int num_images = image_category_ground_truth_instances.size();
153
- const int num_categories =
154
- image_category_ious.size() > 0 ? image_category_ious[0].size() : 0;
155
- std::vector<uint64_t> detection_sorted_indices;
156
- std::vector<uint64_t> ground_truth_sorted_indices;
157
- std::vector<bool> ignores;
158
- std::vector<ImageEvaluation> results_all(
159
- num_images * num_area_ranges * num_categories);
160
-
161
- // Store results for each image, category, and area range combination. Results
162
- // for each IOU threshold are packed into the same ImageEvaluation object
163
- for (auto i = 0; i < num_images; ++i) {
164
- for (auto c = 0; c < num_categories; ++c) {
165
- const std::vector<InstanceAnnotation>& ground_truth_instances =
166
- image_category_ground_truth_instances[i][c];
167
- const std::vector<InstanceAnnotation>& detection_instances =
168
- image_category_detection_instances[i][c];
169
-
170
- SortInstancesByDetectionScore(
171
- detection_instances, &detection_sorted_indices);
172
- if ((int)detection_sorted_indices.size() > max_detections) {
173
- detection_sorted_indices.resize(max_detections);
174
- }
175
-
176
- for (size_t a = 0; a < area_ranges.size(); ++a) {
177
- SortInstancesByIgnore(
178
- area_ranges[a],
179
- ground_truth_instances,
180
- &ground_truth_sorted_indices,
181
- &ignores);
182
-
183
- MatchDetectionsToGroundTruth(
184
- detection_instances,
185
- detection_sorted_indices,
186
- ground_truth_instances,
187
- ground_truth_sorted_indices,
188
- ignores,
189
- image_category_ious[i][c],
190
- iou_thresholds,
191
- area_ranges[a],
192
- &results_all
193
- [c * num_area_ranges * num_images + a * num_images + i]);
194
- }
195
- }
196
- }
197
-
198
- return results_all;
199
- }
200
-
201
- // Convert a python list to a vector
202
- template <typename T>
203
- std::vector<T> list_to_vec(const py::list& l) {
204
- std::vector<T> v(py::len(l));
205
- for (int i = 0; i < (int)py::len(l); ++i) {
206
- v[i] = l[i].cast<T>();
207
- }
208
- return v;
209
- }
210
-
211
- // Helper function to Accumulate()
212
- // Considers the evaluation results applicable to a particular category, area
213
- // range, and max_detections parameter setting, which begin at
214
- // evaluations[evaluation_index]. Extracts a sorted list of length n of all
215
- // applicable detection instances concatenated across all images in the dataset,
216
- // which are represented by the outputs evaluation_indices, detection_scores,
217
- // image_detection_indices, and detection_sorted_indices--all of which are
218
- // length n. evaluation_indices[i] stores the applicable index into
219
- // evaluations[] for instance i, which has detection score detection_score[i],
220
- // and is the image_detection_indices[i]'th of the list of detections
221
- // for the image containing i. detection_sorted_indices[] defines a sorted
222
- // permutation of the 3 other outputs
223
- int BuildSortedDetectionList(
224
- const std::vector<ImageEvaluation>& evaluations,
225
- const int64_t evaluation_index,
226
- const int64_t num_images,
227
- const int max_detections,
228
- std::vector<uint64_t>* evaluation_indices,
229
- std::vector<double>* detection_scores,
230
- std::vector<uint64_t>* detection_sorted_indices,
231
- std::vector<uint64_t>* image_detection_indices) {
232
- assert(evaluations.size() >= evaluation_index + num_images);
233
-
234
- // Extract a list of object instances of the applicable category, area
235
- // range, and max detections requirements such that they can be sorted
236
- image_detection_indices->clear();
237
- evaluation_indices->clear();
238
- detection_scores->clear();
239
- image_detection_indices->reserve(num_images * max_detections);
240
- evaluation_indices->reserve(num_images * max_detections);
241
- detection_scores->reserve(num_images * max_detections);
242
- int num_valid_ground_truth = 0;
243
- for (auto i = 0; i < num_images; ++i) {
244
- const ImageEvaluation& evaluation = evaluations[evaluation_index + i];
245
-
246
- for (int d = 0;
247
- d < (int)evaluation.detection_scores.size() && d < max_detections;
248
- ++d) { // detected instances
249
- evaluation_indices->push_back(evaluation_index + i);
250
- image_detection_indices->push_back(d);
251
- detection_scores->push_back(evaluation.detection_scores[d]);
252
- }
253
- for (auto ground_truth_ignore : evaluation.ground_truth_ignores) {
254
- if (!ground_truth_ignore) {
255
- ++num_valid_ground_truth;
256
- }
257
- }
258
- }
259
-
260
- // Sort detections by decreasing score, using stable sort to match
261
- // python implementation
262
- detection_sorted_indices->resize(detection_scores->size());
263
- std::iota(
264
- detection_sorted_indices->begin(), detection_sorted_indices->end(), 0);
265
- std::stable_sort(
266
- detection_sorted_indices->begin(),
267
- detection_sorted_indices->end(),
268
- [&detection_scores](size_t j1, size_t j2) {
269
- return (*detection_scores)[j1] > (*detection_scores)[j2];
270
- });
271
-
272
- return num_valid_ground_truth;
273
- }
274
-
275
- // Helper function to Accumulate()
276
- // Compute a precision recall curve given a sorted list of detected instances
277
- // encoded in evaluations, evaluation_indices, detection_scores,
278
- // detection_sorted_indices, image_detection_indices (see
279
- // BuildSortedDetectionList()). Using vectors precisions and recalls
280
- // and temporary storage, output the results into precisions_out, recalls_out,
281
- // and scores_out, which are large buffers containing many precion/recall curves
282
- // for all possible parameter settings, with precisions_out_index and
283
- // recalls_out_index defining the applicable indices to store results.
284
- void ComputePrecisionRecallCurve(
285
- const int64_t precisions_out_index,
286
- const int64_t precisions_out_stride,
287
- const int64_t recalls_out_index,
288
- const std::vector<double>& recall_thresholds,
289
- const int iou_threshold_index,
290
- const int num_iou_thresholds,
291
- const int num_valid_ground_truth,
292
- const std::vector<ImageEvaluation>& evaluations,
293
- const std::vector<uint64_t>& evaluation_indices,
294
- const std::vector<double>& detection_scores,
295
- const std::vector<uint64_t>& detection_sorted_indices,
296
- const std::vector<uint64_t>& image_detection_indices,
297
- std::vector<double>* precisions,
298
- std::vector<double>* recalls,
299
- std::vector<double>* precisions_out,
300
- std::vector<double>* scores_out,
301
- std::vector<double>* recalls_out) {
302
- assert(recalls_out->size() > recalls_out_index);
303
-
304
- // Compute precision/recall for each instance in the sorted list of detections
305
- int64_t true_positives_sum = 0, false_positives_sum = 0;
306
- precisions->clear();
307
- recalls->clear();
308
- precisions->reserve(detection_sorted_indices.size());
309
- recalls->reserve(detection_sorted_indices.size());
310
- assert(!evaluations.empty() || detection_sorted_indices.empty());
311
- for (auto detection_sorted_index : detection_sorted_indices) {
312
- const ImageEvaluation& evaluation =
313
- evaluations[evaluation_indices[detection_sorted_index]];
314
- const auto num_detections =
315
- evaluation.detection_matches.size() / num_iou_thresholds;
316
- const auto detection_index = iou_threshold_index * num_detections +
317
- image_detection_indices[detection_sorted_index];
318
- assert(evaluation.detection_matches.size() > detection_index);
319
- assert(evaluation.detection_ignores.size() > detection_index);
320
- const int64_t detection_match =
321
- evaluation.detection_matches[detection_index];
322
- const bool detection_ignores =
323
- evaluation.detection_ignores[detection_index];
324
- const auto true_positive = detection_match > 0 && !detection_ignores;
325
- const auto false_positive = detection_match == 0 && !detection_ignores;
326
- if (true_positive) {
327
- ++true_positives_sum;
328
- }
329
- if (false_positive) {
330
- ++false_positives_sum;
331
- }
332
-
333
- const double recall =
334
- static_cast<double>(true_positives_sum) / num_valid_ground_truth;
335
- recalls->push_back(recall);
336
- const int64_t num_valid_detections =
337
- true_positives_sum + false_positives_sum;
338
- const double precision = num_valid_detections > 0
339
- ? static_cast<double>(true_positives_sum) / num_valid_detections
340
- : 0.0;
341
- precisions->push_back(precision);
342
- }
343
-
344
- (*recalls_out)[recalls_out_index] = !recalls->empty() ? recalls->back() : 0;
345
-
346
- for (int64_t i = static_cast<int64_t>(precisions->size()) - 1; i > 0; --i) {
347
- if ((*precisions)[i] > (*precisions)[i - 1]) {
348
- (*precisions)[i - 1] = (*precisions)[i];
349
- }
350
- }
351
-
352
- // Sample the per instance precision/recall list at each recall threshold
353
- for (size_t r = 0; r < recall_thresholds.size(); ++r) {
354
- // first index in recalls >= recall_thresholds[r]
355
- std::vector<double>::iterator low = std::lower_bound(
356
- recalls->begin(), recalls->end(), recall_thresholds[r]);
357
- size_t precisions_index = low - recalls->begin();
358
-
359
- const auto results_ind = precisions_out_index + r * precisions_out_stride;
360
- assert(results_ind < precisions_out->size());
361
- assert(results_ind < scores_out->size());
362
- if (precisions_index < precisions->size()) {
363
- (*precisions_out)[results_ind] = (*precisions)[precisions_index];
364
- (*scores_out)[results_ind] =
365
- detection_scores[detection_sorted_indices[precisions_index]];
366
- } else {
367
- (*precisions_out)[results_ind] = 0;
368
- (*scores_out)[results_ind] = 0;
369
- }
370
- }
371
- }
372
- py::dict Accumulate(
373
- const py::object& params,
374
- const std::vector<ImageEvaluation>& evaluations) {
375
- const std::vector<double> recall_thresholds =
376
- list_to_vec<double>(params.attr("recThrs"));
377
- const std::vector<int> max_detections =
378
- list_to_vec<int>(params.attr("maxDets"));
379
- const int num_iou_thresholds = py::len(params.attr("iouThrs"));
380
- const int num_recall_thresholds = py::len(params.attr("recThrs"));
381
- const int num_categories = params.attr("useCats").cast<int>() == 1
382
- ? py::len(params.attr("catIds"))
383
- : 1;
384
- const int num_area_ranges = py::len(params.attr("areaRng"));
385
- const int num_max_detections = py::len(params.attr("maxDets"));
386
- const int num_images = py::len(params.attr("imgIds"));
387
-
388
- std::vector<double> precisions_out(
389
- num_iou_thresholds * num_recall_thresholds * num_categories *
390
- num_area_ranges * num_max_detections,
391
- -1);
392
- std::vector<double> recalls_out(
393
- num_iou_thresholds * num_categories * num_area_ranges *
394
- num_max_detections,
395
- -1);
396
- std::vector<double> scores_out(
397
- num_iou_thresholds * num_recall_thresholds * num_categories *
398
- num_area_ranges * num_max_detections,
399
- -1);
400
-
401
- // Consider the list of all detected instances in the entire dataset in one
402
- // large list. evaluation_indices, detection_scores,
403
- // image_detection_indices, and detection_sorted_indices all have the same
404
- // length as this list, such that each entry corresponds to one detected
405
- // instance
406
- std::vector<uint64_t> evaluation_indices; // indices into evaluations[]
407
- std::vector<double> detection_scores; // detection scores of each instance
408
- std::vector<uint64_t> detection_sorted_indices; // sorted indices of all
409
- // instances in the dataset
410
- std::vector<uint64_t>
411
- image_detection_indices; // indices into the list of detected instances in
412
- // the same image as each instance
413
- std::vector<double> precisions, recalls;
414
-
415
- for (auto c = 0; c < num_categories; ++c) {
416
- for (auto a = 0; a < num_area_ranges; ++a) {
417
- for (auto m = 0; m < num_max_detections; ++m) {
418
- // The COCO PythonAPI assumes evaluations[] (the return value of
419
- // COCOeval::EvaluateImages() is one long list storing results for each
420
- // combination of category, area range, and image id, with categories in
421
- // the outermost loop and images in the innermost loop.
422
- const int64_t evaluations_index =
423
- c * num_area_ranges * num_images + a * num_images;
424
- int num_valid_ground_truth = BuildSortedDetectionList(
425
- evaluations,
426
- evaluations_index,
427
- num_images,
428
- max_detections[m],
429
- &evaluation_indices,
430
- &detection_scores,
431
- &detection_sorted_indices,
432
- &image_detection_indices);
433
-
434
- if (num_valid_ground_truth == 0) {
435
- continue;
436
- }
437
-
438
- for (auto t = 0; t < num_iou_thresholds; ++t) {
439
- // recalls_out is a flattened vectors representing a
440
- // num_iou_thresholds X num_categories X num_area_ranges X
441
- // num_max_detections matrix
442
- const int64_t recalls_out_index =
443
- t * num_categories * num_area_ranges * num_max_detections +
444
- c * num_area_ranges * num_max_detections +
445
- a * num_max_detections + m;
446
-
447
- // precisions_out and scores_out are flattened vectors
448
- // representing a num_iou_thresholds X num_recall_thresholds X
449
- // num_categories X num_area_ranges X num_max_detections matrix
450
- const int64_t precisions_out_stride =
451
- num_categories * num_area_ranges * num_max_detections;
452
- const int64_t precisions_out_index = t * num_recall_thresholds *
453
- num_categories * num_area_ranges * num_max_detections +
454
- c * num_area_ranges * num_max_detections +
455
- a * num_max_detections + m;
456
-
457
- ComputePrecisionRecallCurve(
458
- precisions_out_index,
459
- precisions_out_stride,
460
- recalls_out_index,
461
- recall_thresholds,
462
- t,
463
- num_iou_thresholds,
464
- num_valid_ground_truth,
465
- evaluations,
466
- evaluation_indices,
467
- detection_scores,
468
- detection_sorted_indices,
469
- image_detection_indices,
470
- &precisions,
471
- &recalls,
472
- &precisions_out,
473
- &scores_out,
474
- &recalls_out);
475
- }
476
- }
477
- }
478
- }
479
-
480
- time_t rawtime;
481
- struct tm local_time;
482
- std::array<char, 200> buffer;
483
- time(&rawtime);
484
- #ifdef _WIN32
485
- localtime_s(&local_time, &rawtime);
486
- #else
487
- localtime_r(&rawtime, &local_time);
488
- #endif
489
- strftime(
490
- buffer.data(), 200, "%Y-%m-%d %H:%num_max_detections:%S", &local_time);
491
- return py::dict(
492
- "params"_a = params,
493
- "counts"_a = std::vector<int64_t>(
494
- {num_iou_thresholds,
495
- num_recall_thresholds,
496
- num_categories,
497
- num_area_ranges,
498
- num_max_detections}),
499
- "date"_a = buffer,
500
- "precision"_a = precisions_out,
501
- "recall"_a = recalls_out,
502
- "scores"_a = scores_out);
503
- }
504
-
505
- } // namespace COCOeval
506
-
507
- } // namespace detectron2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cat125/text-generator-v3/main.py DELETED
@@ -1,133 +0,0 @@
1
- from random import choice, choices
2
-
3
- import gradio as gr
4
- from tokenizers import Tokenizer
5
-
6
- from datamanager import get_data_v3, models
7
-
8
- tokenizer = Tokenizer.from_pretrained("bert-base-uncased")
9
-
10
-
11
- def get_next_token_results(db:dict, message_tokens:list, prev_token:str, repeat:int = 0):
12
- results = []
13
- if prev_token not in db:
14
- return []
15
- for token in db[prev_token]:
16
- token.score = 0
17
- for context in token.contexts:
18
- if (context in message_tokens) and (repeat <= 1 or token.prev_token == prev_token):
19
- token.score += 1
20
- if token.score > 0:
21
- results.append(token)
22
- return results
23
-
24
- def get_next_token(db, message_ids, prevtoken, repeat = 0):
25
- results = get_next_token_results(db, message_ids, prevtoken, repeat)
26
- if len(results) == 0:
27
- if repeat < 2:
28
- return choice(list(db.keys()))
29
- else:
30
- return get_next_token(db, message_ids, prevtoken, repeat + 1)
31
- results = list(filter(lambda x: x.score, results))
32
- total_results = []
33
- weights = []
34
- for result in results:
35
- total_results.append(result.token)
36
- weights.append(result.score)
37
- if len(total_results) == 0:
38
- return get_next_token(db, message_ids, prevtoken, repeat + 1)
39
- if len(total_results) > 5:
40
- total_results = total_results[:5]
41
- weights = weights[:5]
42
- return (choices(total_results, weights=weights, k=1) or '.')[0]
43
-
44
-
45
- def generator(user_message, token_count, mode, model_name):
46
- db3 = None
47
- for key, model in models.items():
48
- if model['name'] == model_name:
49
- db3 = get_data_v3(key)
50
- break
51
- if not db3:
52
- raise gr.Error('Could not find model ' + str(model_name))
53
- message_ids = tokenizer.encode(user_message).ids
54
- if token_count < 0 or token_count > 1000:
55
- raise gr.Error("Invalid token count. It must be between 0 and 1000.")
56
- text_ids = []
57
- curtoken = 0
58
- prevtoken = 0
59
- if mode == "Continue":
60
- text_ids = message_ids
61
- curtoken = text_ids[-1]
62
- i = 0
63
- while len(text_ids) < token_count:
64
- prevtoken = curtoken
65
- curtoken = get_next_token(db3, message_ids, prevtoken)
66
- text_ids.append(curtoken)
67
- if 1012 in text_ids:
68
- yield tokenizer.decode(text_ids)
69
- break
70
- if i == 0 and 1012 in text_ids:
71
- raise gr.Error("Error in generating. Try to use another prompt")
72
- i += 1
73
- yield tokenizer.decode(text_ids)
74
-
75
- demo = gr.Blocks(
76
- title="Text Generator v2"
77
- )
78
-
79
- title_html = """
80
- <center>
81
- <h1>Text Generator v2</h1>
82
- <p>Generates text using per-word context system</p>
83
- <a href="http://j93153xm.beget.tech/app/index.html?id=text-ai"><img src="https://img.shields.io/badge/Text%20Generator%20v1-RU%20only-brightgreen"></a>
84
- </center>
85
- """
86
- info_text = """
87
- # Information about the models
88
- ### English
89
- `Language`: English
90
- `Quality`: 6/10
91
- `Sources`: ChatGPT, https://pastebin.com/WYvij310
92
- ### English-Long
93
- `Language`: English
94
- `Quality`: 5/10
95
- `Sources`: https://pastebin.com/WYvij310, https://whiletrue.neocities.org/lte, https://rainbowfluffysheep.tokenpress.com/the-longest-text-ever/
96
- ### Russian-Lite
97
- `Language`: Russian
98
- `Quality`: 5/10
99
- `Sources`: https://goroda.murman.ru/index.php?topic=6508.20;wap2
100
- ### Russian-Large
101
- `Language`: Russian
102
- `Quality`: 6/10
103
- `Sources`: http://staging.budsvetom.com/literature_items/ochen-dlinnyy-tekst
104
-
105
- # Training
106
- ```bash
107
- python train.py -r <models to train> [-t] [-l ...]
108
- ```
109
- `--rebuild` (`-r`) - Models that will be trained.
110
- `--turbo` (`-t`) - Enables turbo training. Will skip morphological analysis and just add all tokens directly.
111
- `--log` (`-l`) - Logs listed databases to the console after training.
112
-
113
- > **Note**: Use `--turbo` only when training with Russian texts.
114
- """
115
- with demo:
116
- gr.HTML(title_html)
117
- with gr.Row():
118
- with gr.Column():
119
- inp = gr.Textbox(label="Context message")
120
- token_count = gr.Number(30, precision=1, label="Max token count")
121
- mode = gr.Radio(["Generate", "Continue"], value="Generate", label="Mode")
122
- model = gr.Dropdown([model_info[1]['name'] for model_info in models.items()], label="Model", value="English-Long")
123
- with gr.Row():
124
- stop_btn = gr.Button("Stop", variant="stop")
125
- btn = gr.Button("Submit", variant="primary")
126
- with gr.Column():
127
- out = gr.Textbox(label="Output")
128
- with gr.Accordion(label="Information", open=False):
129
- gr.Markdown(info_text)
130
- submit_event = btn.click(fn=generator, inputs=[inp, token_count, mode, model], outputs=out)
131
- stop_btn.click(fn=None, inputs=None, outputs=None, cancels=[submit_event], queue=False)
132
- demo.queue(concurrency_count=3)
133
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/resources/help/version-info.html DELETED
@@ -1,37 +0,0 @@
1
- {{extend elemLayout}}
2
-
3
- {{block 'css'}}
4
- <link rel="stylesheet" type="text/css" href="{{_res_path}}/help/version-info.css"/>
5
- {{/block}}
6
-
7
- {{block 'main'}}
8
- {{each changelogs ds idx}}
9
- <div class="hydro-bg log-cont">
10
- {{set v = ds.version }}
11
- {{set isDev = v[v.length-1] === 'v'}}
12
- <div class="cont {{isDev ? 'dev-cont': ''}}">
13
- {{if idx === 0 }}
14
- <div class="cont-title current-version">当前版本 {{v}}</div>
15
- {{else}}
16
- <div class="cont-title">{{name || 'ws'}}版本 {{v}}</div>
17
- {{/if}}
18
- <div class="cont-body">
19
- <ul class="log-ul">
20
- {{each ds.logs log}}
21
- <li>
22
- <p>{{@log.title}}</p>
23
- {{if log.logs.length > 0}}
24
- <ul class="sub-log-ul">
25
- {{each log.logs ls}}
26
- <li>{{@ls}}</li>
27
- {{/each}}
28
- </ul>
29
- {{/if}}
30
- </li>
31
- {{/each}}
32
- </ul>
33
- </div>
34
- </div>
35
- </div>
36
- {{/each}}
37
- {{/block}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/look_this_icon/__init__.py DELETED
@@ -1,44 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from pil_utils import BuildImage
5
-
6
- from meme_generator import add_meme
7
- from meme_generator.exception import TextOverLength
8
- from meme_generator.utils import make_jpg_or_gif
9
-
10
- img_dir = Path(__file__).parent / "images"
11
-
12
-
13
- def look_this_icon(images: List[BuildImage], texts: List[str], args):
14
- text = texts[0] if texts else "朋友\n先看看这个图标再说话"
15
- frame = BuildImage.open(img_dir / "nmsl.png")
16
- try:
17
- frame.draw_text(
18
- (0, 933, 1170, 1143),
19
- text,
20
- lines_align="center",
21
- weight="bold",
22
- max_fontsize=100,
23
- min_fontsize=50,
24
- )
25
- except ValueError:
26
- raise TextOverLength(text)
27
-
28
- def make(img: BuildImage) -> BuildImage:
29
- img = img.convert("RGBA").resize((515, 515), keep_ratio=True)
30
- return frame.copy().paste(img, (599, 403), below=True)
31
-
32
- return make_jpg_or_gif(images[0], make)
33
-
34
-
35
- add_meme(
36
- "look_this_icon",
37
- look_this_icon,
38
- min_images=1,
39
- max_images=1,
40
- min_texts=0,
41
- max_texts=1,
42
- default_texts=["朋友\n先看看这个图标再说话"],
43
- keywords=["看图标"],
44
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Codecooker/rvcapi/src/main.py DELETED
@@ -1,306 +0,0 @@
1
- import gc
2
- import hashlib
3
- import json
4
- import os
5
- import argparse
6
- from contextlib import suppress
7
- from urllib.parse import urlparse, parse_qs
8
-
9
- import gradio as gr
10
- import yt_dlp
11
- from pedalboard import Pedalboard, Reverb, Compressor, HighpassFilter
12
- from pedalboard.io import AudioFile
13
- from pydub import AudioSegment
14
-
15
- from mdx import run_mdx
16
- from rvc import Config, load_hubert, get_vc, rvc_infer
17
-
18
- BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
19
-
20
- mdxnet_models_dir = os.path.join(BASE_DIR, 'mdxnet_models')
21
- rvc_models_dir = os.path.join(BASE_DIR, 'rvc_models')
22
- output_dir = os.path.join(BASE_DIR, 'song_output')
23
-
24
-
25
- def get_youtube_video_id(url, ignore_playlist=True):
26
- """
27
- Examples:
28
- http://youtu.be/SA2iWivDJiE
29
- http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu
30
- http://www.youtube.com/embed/SA2iWivDJiE
31
- http://www.youtube.com/v/SA2iWivDJiE?version=3&amp;hl=en_US
32
- """
33
- query = urlparse(url)
34
- if query.hostname == 'youtu.be':
35
- if query.path[1:] == 'watch':
36
- return query.query[2:]
37
- return query.path[1:]
38
-
39
- if query.hostname in {'www.youtube.com', 'youtube.com', 'music.youtube.com'}:
40
- if not ignore_playlist:
41
- # use case: get playlist id not current video in playlist
42
- with suppress(KeyError):
43
- return parse_qs(query.query)['list'][0]
44
- if query.path == '/watch':
45
- return parse_qs(query.query)['v'][0]
46
- if query.path[:7] == '/watch/':
47
- return query.path.split('/')[1]
48
- if query.path[:7] == '/embed/':
49
- return query.path.split('/')[2]
50
- if query.path[:3] == '/v/':
51
- return query.path.split('/')[2]
52
-
53
- # returns None for invalid YouTube url
54
- return None
55
-
56
-
57
- def yt_download(link):
58
- ydl_opts = {
59
- 'format': 'bestaudio',
60
- 'outtmpl': '%(title)s.%(ext)s',
61
- 'nocheckcertificate': True,
62
- 'ignoreerrors': True,
63
- 'no_warnings': True,
64
- 'quiet': True,
65
- 'extractaudio': True,
66
- }
67
- with yt_dlp.YoutubeDL(ydl_opts) as ydl:
68
- result = ydl.extract_info(link, download=True)
69
- download_path = ydl.prepare_filename(result)
70
-
71
- return download_path
72
-
73
-
74
- def raise_exception(error_msg, is_webui):
75
- if is_webui:
76
- raise gr.Error(error_msg)
77
- else:
78
- raise Exception(error_msg)
79
-
80
-
81
- def get_rvc_model(voice_model, is_webui):
82
- rvc_model_filename, rvc_index_filename = None, None
83
- model_dir = os.path.join(rvc_models_dir, voice_model)
84
- for file in os.listdir(model_dir):
85
- ext = os.path.splitext(file)[1]
86
- if ext == '.pth':
87
- rvc_model_filename = file
88
- if ext == '.index':
89
- rvc_index_filename = file
90
-
91
- if rvc_model_filename is None:
92
- error_msg = f'No model file exists in {model_dir}.'
93
- raise_exception(error_msg, is_webui)
94
-
95
- return os.path.join(model_dir, rvc_model_filename), os.path.join(model_dir, rvc_index_filename) if rvc_index_filename else ''
96
-
97
-
98
- def get_audio_paths(song_dir):
99
- orig_song_path = None
100
- instrumentals_path = None
101
- main_vocals_dereverb_path = None
102
- backup_vocals_path = None
103
-
104
- for file in os.listdir(song_dir):
105
- if file.endswith('_Instrumental.wav'):
106
- instrumentals_path = os.path.join(song_dir, file)
107
- orig_song_path = instrumentals_path.replace('_Instrumental', '')
108
-
109
- elif file.endswith('_Vocals_Main_DeReverb.wav'):
110
- main_vocals_dereverb_path = os.path.join(song_dir, file)
111
-
112
- elif file.endswith('_Vocals_Backup.wav'):
113
- backup_vocals_path = os.path.join(song_dir, file)
114
-
115
- return orig_song_path, instrumentals_path, main_vocals_dereverb_path, backup_vocals_path
116
-
117
-
118
- def get_hash(filepath):
119
- with open(filepath, 'rb') as f:
120
- file_hash = hashlib.blake2b()
121
- while chunk := f.read(8192):
122
- file_hash.update(chunk)
123
-
124
- return file_hash.hexdigest()[:11]
125
-
126
-
127
- def display_progress(message, percent, is_webui, progress=None):
128
- if is_webui:
129
- progress(percent, desc=message)
130
- else:
131
- print(message)
132
-
133
-
134
- def preprocess_song(song_input, mdx_model_params, song_id, is_webui, input_type, progress=None):
135
- keep_orig = False
136
- if input_type == 'yt':
137
- display_progress('[~] Downloading song...', 0, is_webui, progress)
138
- song_link = song_input.split('&')[0]
139
- orig_song_path = yt_download(song_link)
140
- elif input_type == 'local':
141
- orig_song_path = song_input
142
- keep_orig = True
143
- else:
144
- orig_song_path = None
145
-
146
- song_output_dir = os.path.join(output_dir, song_id)
147
-
148
- display_progress('[~] Separating Vocals from Instrumental...', 0.1, is_webui, progress)
149
- vocals_path, instrumentals_path = run_mdx(mdx_model_params, song_output_dir, os.path.join(mdxnet_models_dir, 'UVR-MDX-NET-Voc_FT.onnx'), orig_song_path, denoise=True, keep_orig=keep_orig)
150
-
151
- display_progress('[~] Separating Main Vocals from Backup Vocals...', 0.2, is_webui, progress)
152
- backup_vocals_path, main_vocals_path = run_mdx(mdx_model_params, song_output_dir, os.path.join(mdxnet_models_dir, 'UVR_MDXNET_KARA_2.onnx'), vocals_path, suffix='Backup', invert_suffix='Main', denoise=True)
153
-
154
- display_progress('[~] Applying DeReverb to Vocals...', 0.3, is_webui, progress)
155
- _, main_vocals_dereverb_path = run_mdx(mdx_model_params, song_output_dir, os.path.join(mdxnet_models_dir, 'Reverb_HQ_By_FoxJoy.onnx'), main_vocals_path, invert_suffix='DeReverb', exclude_main=True, denoise=True)
156
-
157
- return orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path
158
-
159
-
160
- def voice_change(voice_model, vocals_path, output_path, pitch_change, index_rate, filter_radius, rms_mix_rate, protect, is_webui):
161
- rvc_model_path, rvc_index_path = get_rvc_model(voice_model, is_webui)
162
- device = 'cuda:0'
163
- config = Config(device, True)
164
- hubert_model = load_hubert(device, config.is_half, os.path.join(rvc_models_dir, 'hubert_base.pt'))
165
- cpt, version, net_g, tgt_sr, vc = get_vc(device, config.is_half, config, rvc_model_path)
166
-
167
- # convert main vocals
168
- rvc_infer(rvc_index_path, index_rate, vocals_path, output_path, pitch_change, cpt, version, net_g, filter_radius, tgt_sr, rms_mix_rate, protect, vc, hubert_model)
169
- del hubert_model, cpt
170
- gc.collect()
171
-
172
-
173
- def add_audio_effects(audio_path, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping):
174
- output_path = f'{os.path.splitext(audio_path)[0]}_mixed.wav'
175
-
176
- # Initialize audio effects plugins
177
- board = Pedalboard(
178
- [
179
- HighpassFilter(),
180
- Compressor(ratio=4, threshold_db=-15),
181
- Reverb(room_size=reverb_rm_size, dry_level=reverb_dry, wet_level=reverb_wet, damping=reverb_damping)
182
- ]
183
- )
184
-
185
- with AudioFile(audio_path) as f:
186
- with AudioFile(output_path, 'w', f.samplerate, f.num_channels) as o:
187
- # Read one second of audio at a time, until the file is empty:
188
- while f.tell() < f.frames:
189
- chunk = f.read(int(f.samplerate))
190
- effected = board(chunk, f.samplerate, reset=False)
191
- o.write(effected)
192
-
193
- return output_path
194
-
195
-
196
- def combine_audio(audio_paths, output_path, main_gain, backup_gain, inst_gain):
197
- main_vocal_audio = AudioSegment.from_wav(audio_paths[0]) - 4 + main_gain
198
- backup_vocal_audio = AudioSegment.from_wav(audio_paths[1]) - 6 + backup_gain
199
- instrumental_audio = AudioSegment.from_wav(audio_paths[2]) - 7 + inst_gain
200
- main_vocal_audio.overlay(backup_vocal_audio).overlay(instrumental_audio).export(output_path, format='mp3')
201
-
202
-
203
- def song_cover_pipeline(song_input, voice_model, pitch_change, keep_files,
204
- is_webui=0, main_gain=0, backup_gain=0, inst_gain=0, index_rate=0.5, filter_radius=3,
205
- rms_mix_rate=0.25, protect=0.33, reverb_rm_size=0.15, reverb_wet=0.2, reverb_dry=0.8,
206
- reverb_damping=0.7, progress=gr.Progress()):
207
- try:
208
- if not song_input or not voice_model:
209
- raise_exception('Ensure that the song input field and voice model field is filled.', is_webui)
210
-
211
- display_progress('[~] Starting AI Cover Generation Pipeline...', 0, is_webui, progress)
212
-
213
- with open(os.path.join(mdxnet_models_dir, 'model_data.json')) as infile:
214
- mdx_model_params = json.load(infile)
215
-
216
- # if youtube url
217
- if urlparse(song_input).scheme == 'https':
218
- input_type = 'yt'
219
- song_id = get_youtube_video_id(song_input)
220
- if song_id is None:
221
- error_msg = 'Invalid YouTube url.'
222
- raise_exception(error_msg, is_webui)
223
-
224
- # local audio file
225
- else:
226
- input_type = 'local'
227
- song_input = song_input.strip('\"')
228
- if os.path.exists(song_input):
229
- song_id = get_hash(song_input)
230
- else:
231
- error_msg = f'{song_input} does not exist.'
232
- song_id = None
233
- raise_exception(error_msg, is_webui)
234
-
235
- song_dir = os.path.join(output_dir, song_id)
236
-
237
- if not os.path.exists(song_dir):
238
- os.makedirs(song_dir)
239
- orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path = preprocess_song(song_input, mdx_model_params, song_id, is_webui, input_type, progress)
240
-
241
- else:
242
- vocals_path, main_vocals_path = None, None
243
- paths = get_audio_paths(song_dir)
244
-
245
- # if any of the audio files aren't available or keep intermediate files, rerun preprocess
246
- if any(path is None for path in paths) or keep_files:
247
- orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path = preprocess_song(song_input, mdx_model_params, song_id, is_webui, input_type, progress)
248
- else:
249
- orig_song_path, instrumentals_path, main_vocals_dereverb_path, backup_vocals_path = paths
250
-
251
- ai_vocals_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]}_{voice_model}_p{pitch_change}_i{index_rate}_fr{filter_radius}_rms{rms_mix_rate}_pro{protect}.wav')
252
- ai_cover_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]} ({voice_model} Ver).mp3')
253
-
254
- if not os.path.exists(ai_vocals_path):
255
- display_progress('[~] Converting voice using RVC...', 0.5, is_webui, progress)
256
- voice_change(voice_model, main_vocals_dereverb_path, ai_vocals_path, pitch_change, index_rate, filter_radius, rms_mix_rate, protect, is_webui)
257
-
258
- display_progress('[~] Applying audio effects to vocals...', 0.8, is_webui, progress)
259
- ai_vocals_mixed_path = add_audio_effects(ai_vocals_path, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping)
260
-
261
- display_progress('[~] Combining AI Vocals and Instrumentals...', 0.9, is_webui, progress)
262
- combine_audio([ai_vocals_mixed_path, backup_vocals_path, instrumentals_path], ai_cover_path, main_gain, backup_gain, inst_gain)
263
-
264
- if not keep_files:
265
- display_progress('[~] Removing intermediate audio files...', 0.95, is_webui, progress)
266
- intermediate_files = [vocals_path, main_vocals_path, ai_vocals_mixed_path]
267
- for file in intermediate_files:
268
- if file and os.path.exists(file):
269
- os.remove(file)
270
-
271
- return ai_cover_path
272
-
273
- except Exception as e:
274
- raise_exception(str(e), is_webui)
275
-
276
-
277
- if __name__ == '__main__':
278
- parser = argparse.ArgumentParser(description='Generate a AI cover song in the song_output/id directory.', add_help=True)
279
- parser.add_argument('-i', '--song-input', type=str, required=True, help='Link to a YouTube video or the filepath to a local mp3/wav file to create an AI cover of')
280
- parser.add_argument('-dir', '--rvc-dirname', type=str, required=True, help='Name of the folder in the rvc_models directory containing the RVC model file and optional index file to use')
281
- parser.add_argument('-p', '--pitch-change', type=int, required=True, help='Change the pitch of the AI voice. Generally use 12 for male to female conversions and -12 for vice-versa. Use 0 for no change')
282
- parser.add_argument('-k', '--keep-files', action=argparse.BooleanOptionalAction, help='Whether to keep all intermediate audio files generated in the song_output/id directory, e.g. Isolated Vocals/Instrumentals')
283
- parser.add_argument('-ir', '--index-rate', type=float, default=0.5, help='A decimal number e.g. 0.5, used to reduce/resolve the timbre leakage problem. If set to 1, more biased towards the timbre quality of the training dataset')
284
- parser.add_argument('-fr', '--filter-radius', type=int, default=3, help='A number between 0 and 7. If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.')
285
- parser.add_argument('-rms', '--rms-mix-rate', type=float, default=0.25, help="A decimal number e.g. 0.25. Control how much to use the original vocal's loudness (0) or a fixed loudness (1).")
286
- parser.add_argument('-pro', '--protect', type=float, default=0.33, help='A decimal number e.g. 0.33. Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy.')
287
- parser.add_argument('-mv', '--main-vol', type=int, default=0, help='Volume change for AI main vocals in decibels. Use -3 to decrease by 3 decibels and 3 to increase by 3 decibels')
288
- parser.add_argument('-bv', '--backup-vol', type=int, default=0, help='Volume change for backup vocals in decibels')
289
- parser.add_argument('-iv', '--inst-vol', type=int, default=0, help='Volume change for instrumentals in decibels')
290
- parser.add_argument('-rsize', '--reverb-size', type=float, default=0.15, help='Reverb room size between 0 and 1')
291
- parser.add_argument('-rwet', '--reverb-wetness', type=float, default=0.2, help='Reverb wet level between 0 and 1')
292
- parser.add_argument('-rdry', '--reverb-dryness', type=float, default=0.8, help='Reverb dry level between 0 and 1')
293
- parser.add_argument('-rdamp', '--reverb-damping', type=float, default=0.7, help='Reverb damping between 0 and 1')
294
- args = parser.parse_args()
295
-
296
- rvc_dirname = args.rvc_dirname
297
- if not os.path.exists(os.path.join(rvc_models_dir, rvc_dirname)):
298
- raise Exception(f'The folder {os.path.join(rvc_models_dir, rvc_dirname)} does not exist.')
299
-
300
- cover_path = song_cover_pipeline(args.song_input, rvc_dirname, args.pitch_change, args.keep_files,
301
- main_gain=args.main_vol, backup_gain=args.backup_vol, inst_gain=args.inst_vol,
302
- index_rate=args.index_rate, filter_radius=args.filter_radius,
303
- rms_mix_rate=args.rms_mix_rate, protect=args.protect,
304
- reverb_rm_size=args.reverb_size, reverb_wet=args.reverb_wetness,
305
- reverb_dry=args.reverb_dryness, reverb_damping=args.reverb_damping)
306
- print(f'[+] Cover generated at {cover_path}')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cpp4App/Cpp4App/CDM/README.md DELETED
@@ -1,80 +0,0 @@
1
- # UIED - UI element detection, detecting UI elements from UI screenshots or drawnings
2
-
3
- This project is still ongoing and this repo may be updated irregularly, I developed a web app for the UIED in http://uied.online
4
-
5
- ## Related Publications:
6
- [1. UIED: a hybrid tool for GUI element detection](https://dl.acm.org/doi/10.1145/3368089.3417940)
7
-
8
- [2. Object Detection for Graphical User Interface: Old Fashioned or Deep Learning or a Combination?](https://arxiv.org/abs/2008.05132)
9
-
10
- >The repo has been **upgraded with Google OCR** for GUI text detection, to use the original version in our paper (using [EAST](https://github.com/argman/EAST) as text detector), check the relase [v2.3](https://github.com/MulongXie/UIED/releases/tag/v2.3) and download the pre-trained model in [this link](https://drive.google.com/drive/folders/1MK0Om7Lx0wRXGDfNcyj21B0FL1T461v5?usp=sharing).
11
-
12
- ## What is it?
13
-
14
- UI Element Detection (UIED) is an old-fashioned computer vision (CV) based element detection approach for graphic user interface.
15
-
16
- The input of UIED could be various UI image, such as mobile app or web page screenshot, UI design drawn by Photoshop or Sketch, and even some hand-drawn UI design. Then the approach detects and classifies text and graphic UI elements, and exports the detection result as JSON file for future application.
17
-
18
- UIED comprises two parts to detect UI text and graphic elements, such as button, image and input bar.
19
- * For text, it leverages [Google OCR](https://cloud.google.com/vision/docs/ocr) to perfrom detection.
20
-
21
- * For graphical elements, it uses old-fashioned CV approaches to locate the elements and a CNN classifier to achieve classification.
22
-
23
- > UIED is highly customizable, you can replace both parts by your choice (e.g. other text detection approaches). Unlike black-box end-to-end deep learning approach, you can revise the algorithms in the non-text detection and merging (partially or entirely) easily to fit your task.
24
-
25
- ![UIED Approach](https://github.com/MulongXie/UIED/blob/master/data/demo/approach.png)
26
-
27
- ## How to use?
28
-
29
- ### Dependency
30
- * **Python 3.5**
31
- * **Opencv 3.4.2**
32
- * **Pandas**
33
- <!-- * **Tensorflow 1.10.0**
34
- * **Keras 2.2.4**
35
- * **Sklearn 0.22.2** -->
36
-
37
- ### Installation
38
- <!-- Install the mentioned dependencies, and download two pre-trained models from [this link](https://drive.google.com/drive/folders/1MK0Om7Lx0wRXGDfNcyj21B0FL1T461v5?usp=sharing) for EAST text detection and GUI element classification. -->
39
-
40
- <!-- Change ``CNN_PATH`` and ``EAST_PATH`` in *config/CONFIG.py* to your locations. -->
41
-
42
- The new version of UIED equipped with Google OCR is easy to deploy and no pre-trained model is needed. Simply donwload the repo along with the dependencies.
43
-
44
- > Please replace the Google OCR key at `detect_text/ocr.py line 28` with your own (apply in [Google website](https://cloud.google.com/vision)).
45
-
46
- ### Usage
47
- To test your own image(s):
48
- * To test single image, change *input_path_img* in ``run_single.py`` to your input image and the results will be output to *output_root*.
49
- * To test mutiple images, change *input_img_root* in ``run_batch.py`` to your input directory and the results will be output to *output_root*.
50
- * To adjust the parameters lively, using ``run_testing.py``
51
-
52
- > Note: The best set of parameters vary for different types of GUI image (Mobile App, Web, PC). I highly recommend to first play with the ``run_testing.py`` to pick a good set of parameters for your data.
53
-
54
- ## Folder structure
55
- ``cnn/``
56
- * Used to train classifier for graphic UI elements
57
- * Set path of the CNN classification model
58
-
59
- ``config/``
60
- * Set data paths
61
- * Set parameters for graphic elements detection
62
-
63
- ``data/``
64
- * Input UI images and output detection results
65
-
66
- ``detect_compo/``
67
- * Non-text GUI component detection
68
-
69
- ``detect_text/``
70
- * GUI text detection using Google OCR
71
-
72
- ``detect_merge/``
73
- * Merge the detection results of non-text and text GUI elements
74
-
75
- The major detection algorithms are in ``detect_compo/``, ``detect_text/`` and ``detect_merge/``
76
-
77
- ## Demo
78
- GUI element detection result for web screenshot
79
-
80
- ![UI Components detection result](https://github.com/MulongXie/UIED/blob/master/data/demo/demo.png)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/__init__.py DELETED
File without changes
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
- import torch
3
-
4
- from .batch_norm import FrozenBatchNorm2d
5
- from .misc import Conv2d
6
- from .misc import ConvTranspose2d
7
- from .misc import BatchNorm2d
8
- from .misc import interpolate
9
- from .nms import nms
10
- from .roi_align import ROIAlign
11
- from .roi_align import roi_align
12
- from .roi_pool import ROIPool
13
- from .roi_pool import roi_pool
14
- from .smooth_l1_loss import smooth_l1_loss
15
- from .sigmoid_focal_loss import SigmoidFocalLoss
16
- from .iou_loss import IOULoss
17
- from .scale import Scale
18
- from .deform_conv_v2 import DCN, DCNPooling
19
- from .iou import iou_regress
20
- from .focal_loss import Focal_Loss
21
-
22
- __all__ = ["nms", "roi_align", "ROIAlign", "roi_pool", "ROIPool",
23
- "smooth_l1_loss", "Conv2d", "ConvTranspose2d", "interpolate",
24
- "BatchNorm2d", "FrozenBatchNorm2d", "SigmoidFocalLoss", "IOULoss",
25
- "Scale", "DCN", "DCNPooling", "iou_regress","Focal_Loss"]
26
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/abc/_resources.py DELETED
@@ -1,31 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from abc import ABCMeta, abstractmethod
4
- from types import TracebackType
5
- from typing import TypeVar
6
-
7
- T = TypeVar("T")
8
-
9
-
10
- class AsyncResource(metaclass=ABCMeta):
11
- """
12
- Abstract base class for all closeable asynchronous resources.
13
-
14
- Works as an asynchronous context manager which returns the instance itself on enter, and calls
15
- :meth:`aclose` on exit.
16
- """
17
-
18
- async def __aenter__(self: T) -> T:
19
- return self
20
-
21
- async def __aexit__(
22
- self,
23
- exc_type: type[BaseException] | None,
24
- exc_val: BaseException | None,
25
- exc_tb: TracebackType | None,
26
- ) -> None:
27
- await self.aclose()
28
-
29
- @abstractmethod
30
- async def aclose(self) -> None:
31
- """Close the resource."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datasculptor/MusicGen/tests/modules/__init__.py DELETED
@@ -1,5 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
 
 
 
 
 
 
spaces/Dinoking/Guccio-AI-Designer/netdissect/fullablate.py DELETED
@@ -1,235 +0,0 @@
1
- import torch, sys, os, argparse, textwrap, numbers, numpy, json, PIL
2
- from torchvision import transforms
3
- from torch.utils.data import TensorDataset
4
- from netdissect.progress import default_progress, post_progress, desc_progress
5
- from netdissect.progress import verbose_progress, print_progress
6
- from netdissect.nethook import edit_layers
7
- from netdissect.zdataset import standard_z_sample
8
- from netdissect.autoeval import autoimport_eval
9
- from netdissect.easydict import EasyDict
10
- from netdissect.modelconfig import create_instrumented_model
11
-
12
- help_epilog = '''\
13
- Example:
14
-
15
- python -m netdissect.evalablate \
16
- --segmenter "netdissect.GanImageSegmenter(segvocab='lowres', segsizes=[160,288], segdiv='quad')" \
17
- --model "proggan.from_pth_file('models/lsun_models/${SCENE}_lsun.pth')" \
18
- --outdir dissect/dissectdir \
19
- --classname tree \
20
- --layer layer4 \
21
- --size 1000
22
-
23
- Output layout:
24
- dissectdir/layer5/ablation/mirror-iqr.json
25
- { class: "mirror",
26
- classnum: 43,
27
- pixel_total: 41342300,
28
- class_pixels: 1234531,
29
- layer: "layer5",
30
- ranking: "mirror-iqr",
31
- ablation_units: [341, 23, 12, 142, 83, ...]
32
- ablation_pixels: [143242, 132344, 429931, ...]
33
- }
34
-
35
- '''
36
-
37
- def main():
38
- # Training settings
39
- def strpair(arg):
40
- p = tuple(arg.split(':'))
41
- if len(p) == 1:
42
- p = p + p
43
- return p
44
-
45
- parser = argparse.ArgumentParser(description='Ablation eval',
46
- epilog=textwrap.dedent(help_epilog),
47
- formatter_class=argparse.RawDescriptionHelpFormatter)
48
- parser.add_argument('--model', type=str, default=None,
49
- help='constructor for the model to test')
50
- parser.add_argument('--pthfile', type=str, default=None,
51
- help='filename of .pth file for the model')
52
- parser.add_argument('--outdir', type=str, default='dissect', required=True,
53
- help='directory for dissection output')
54
- parser.add_argument('--layer', type=strpair,
55
- help='space-separated list of layer names to edit' +
56
- ', in the form layername[:reportedname]')
57
- parser.add_argument('--classname', type=str,
58
- help='class name to ablate')
59
- parser.add_argument('--metric', type=str, default='iou',
60
- help='ordering metric for selecting units')
61
- parser.add_argument('--unitcount', type=int, default=30,
62
- help='number of units to ablate')
63
- parser.add_argument('--segmenter', type=str,
64
- help='directory containing segmentation dataset')
65
- parser.add_argument('--netname', type=str, default=None,
66
- help='name for network in generated reports')
67
- parser.add_argument('--batch_size', type=int, default=25,
68
- help='batch size for forward pass')
69
- parser.add_argument('--mixed_units', action='store_true', default=False,
70
- help='true to keep alpha for non-zeroed units')
71
- parser.add_argument('--size', type=int, default=200,
72
- help='number of images to test')
73
- parser.add_argument('--no-cuda', action='store_true', default=False,
74
- help='disables CUDA usage')
75
- parser.add_argument('--quiet', action='store_true', default=False,
76
- help='silences console output')
77
- if len(sys.argv) == 1:
78
- parser.print_usage(sys.stderr)
79
- sys.exit(1)
80
- args = parser.parse_args()
81
-
82
- # Set up console output
83
- verbose_progress(not args.quiet)
84
-
85
- # Speed up pytorch
86
- torch.backends.cudnn.benchmark = True
87
-
88
- # Set up CUDA
89
- args.cuda = not args.no_cuda and torch.cuda.is_available()
90
- if args.cuda:
91
- torch.backends.cudnn.benchmark = True
92
-
93
- # Take defaults for model constructor etc from dissect.json settings.
94
- with open(os.path.join(args.outdir, 'dissect.json')) as f:
95
- dissection = EasyDict(json.load(f))
96
- if args.model is None:
97
- args.model = dissection.settings.model
98
- if args.pthfile is None:
99
- args.pthfile = dissection.settings.pthfile
100
- if args.segmenter is None:
101
- args.segmenter = dissection.settings.segmenter
102
- if args.layer is None:
103
- args.layer = dissection.settings.layers[0]
104
- args.layers = [args.layer]
105
-
106
- # Also load specific analysis
107
- layername = args.layer[1]
108
- if args.metric == 'iou':
109
- summary = dissection
110
- else:
111
- with open(os.path.join(args.outdir, layername, args.metric,
112
- args.classname, 'summary.json')) as f:
113
- summary = EasyDict(json.load(f))
114
-
115
- # Instantiate generator
116
- model = create_instrumented_model(args, gen=True, edit=True)
117
- if model is None:
118
- print('No model specified')
119
- sys.exit(1)
120
-
121
- # Instantiate model
122
- device = next(model.parameters()).device
123
- input_shape = model.input_shape
124
-
125
- # 4d input if convolutional, 2d input if first layer is linear.
126
- raw_sample = standard_z_sample(args.size, input_shape[1], seed=3).view(
127
- (args.size,) + input_shape[1:])
128
- dataset = TensorDataset(raw_sample)
129
-
130
- # Create the segmenter
131
- segmenter = autoimport_eval(args.segmenter)
132
-
133
- # Now do the actual work.
134
- labelnames, catnames = (
135
- segmenter.get_label_and_category_names(dataset))
136
- label_category = [catnames.index(c) if c in catnames else 0
137
- for l, c in labelnames]
138
- labelnum_from_name = {n[0]: i for i, n in enumerate(labelnames)}
139
-
140
- segloader = torch.utils.data.DataLoader(dataset,
141
- batch_size=args.batch_size, num_workers=10,
142
- pin_memory=(device.type == 'cuda'))
143
-
144
- # Index the dissection layers by layer name.
145
-
146
- # First, collect a baseline
147
- for l in model.ablation:
148
- model.ablation[l] = None
149
-
150
- # For each sort-order, do an ablation
151
- progress = default_progress()
152
- classname = args.classname
153
- classnum = labelnum_from_name[classname]
154
-
155
- # Get iou ranking from dissect.json
156
- iou_rankname = '%s-%s' % (classname, 'iou')
157
- dissect_layer = {lrec.layer: lrec for lrec in dissection.layers}
158
- iou_ranking = next(r for r in dissect_layer[layername].rankings
159
- if r.name == iou_rankname)
160
-
161
- # Get trained ranking from summary.json
162
- rankname = '%s-%s' % (classname, args.metric)
163
- summary_layer = {lrec.layer: lrec for lrec in summary.layers}
164
- ranking = next(r for r in summary_layer[layername].rankings
165
- if r.name == rankname)
166
-
167
- # Get ordering, first by ranking, then break ties by iou.
168
- ordering = [t[2] for t in sorted([(s1, s2, i)
169
- for i, (s1, s2) in enumerate(zip(ranking.score, iou_ranking.score))])]
170
- values = (-numpy.array(ranking.score))[ordering]
171
- if not args.mixed_units:
172
- values[...] = 1
173
-
174
- ablationdir = os.path.join(args.outdir, layername, 'fullablation')
175
- measurements = measure_full_ablation(segmenter, segloader,
176
- model, classnum, layername,
177
- ordering[:args.unitcount], values[:args.unitcount])
178
- measurements = measurements.cpu().numpy().tolist()
179
- os.makedirs(ablationdir, exist_ok=True)
180
- with open(os.path.join(ablationdir, '%s.json'%rankname), 'w') as f:
181
- json.dump(dict(
182
- classname=classname,
183
- classnum=classnum,
184
- baseline=measurements[0],
185
- layer=layername,
186
- metric=args.metric,
187
- ablation_units=ordering,
188
- ablation_values=values.tolist(),
189
- ablation_effects=measurements[1:]), f)
190
-
191
- def measure_full_ablation(segmenter, loader, model, classnum, layer,
192
- ordering, values):
193
- '''
194
- Quick and easy counting of segmented pixels reduced by ablating units.
195
- '''
196
- progress = default_progress()
197
- device = next(model.parameters()).device
198
- feature_units = model.feature_shape[layer][1]
199
- feature_shape = model.feature_shape[layer][2:]
200
- repeats = len(ordering)
201
- total_scores = torch.zeros(repeats + 1)
202
- print(ordering)
203
- print(values.tolist())
204
- with torch.no_grad():
205
- for l in model.ablation:
206
- model.ablation[l] = None
207
- for i, [ibz] in enumerate(progress(loader)):
208
- ibz = ibz.cuda()
209
- for num_units in progress(range(len(ordering) + 1)):
210
- ablation = torch.zeros(feature_units, device=device)
211
- ablation[ordering[:num_units]] = torch.tensor(
212
- values[:num_units]).to(ablation.device, ablation.dtype)
213
- model.ablation[layer] = ablation
214
- tensor_images = model(ibz)
215
- seg = segmenter.segment_batch(tensor_images, downsample=2)
216
- mask = (seg == classnum).max(1)[0]
217
- total_scores[num_units] += mask.sum().float().cpu()
218
- return total_scores
219
-
220
- def count_segments(segmenter, loader, model):
221
- total_bincount = 0
222
- data_size = 0
223
- progress = default_progress()
224
- for i, batch in enumerate(progress(loader)):
225
- tensor_images = model(z_batch.to(device))
226
- seg = segmenter.segment_batch(tensor_images, downsample=2)
227
- bc = (seg + index[:, None, None, None] * self.num_classes).view(-1
228
- ).bincount(minlength=z_batch.shape[0] * self.num_classes)
229
- data_size += seg.shape[0] * seg.shape[2] * seg.shape[3]
230
- total_bincount += batch_label_counts.float().sum(0)
231
- normalized_bincount = total_bincount / data_size
232
- return normalized_bincount
233
-
234
- if __name__ == '__main__':
235
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/bytetrack/yolox/evaluators/evaluation.py DELETED
@@ -1,200 +0,0 @@
1
- import os
2
- import numpy as np
3
- import copy
4
- import motmetrics as mm
5
- mm.lap.default_solver = 'lap'
6
-
7
-
8
- class Evaluator(object):
9
-
10
- def __init__(self, data_root, seq_name, data_type):
11
- self.data_root = data_root
12
- self.seq_name = seq_name
13
- self.data_type = data_type
14
-
15
- self.load_annotations()
16
- self.reset_accumulator()
17
-
18
- def load_annotations(self):
19
- assert self.data_type == 'mot'
20
-
21
- gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt')
22
- self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True)
23
- self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True)
24
-
25
- def reset_accumulator(self):
26
- self.acc = mm.MOTAccumulator(auto_id=True)
27
-
28
- def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
29
- # results
30
- trk_tlwhs = np.copy(trk_tlwhs)
31
- trk_ids = np.copy(trk_ids)
32
-
33
- # gts
34
- gt_objs = self.gt_frame_dict.get(frame_id, [])
35
- gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]
36
-
37
- # ignore boxes
38
- ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])
39
- ignore_tlwhs = unzip_objs(ignore_objs)[0]
40
-
41
- # remove ignored results
42
- keep = np.ones(len(trk_tlwhs), dtype=bool)
43
- iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5)
44
- if len(iou_distance) > 0:
45
- match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)
46
- match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])
47
- match_ious = iou_distance[match_is, match_js]
48
-
49
- match_js = np.asarray(match_js, dtype=int)
50
- match_js = match_js[np.logical_not(np.isnan(match_ious))]
51
- keep[match_js] = False
52
- trk_tlwhs = trk_tlwhs[keep]
53
- trk_ids = trk_ids[keep]
54
- #match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)
55
- #match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])
56
- #match_ious = iou_distance[match_is, match_js]
57
-
58
- #match_js = np.asarray(match_js, dtype=int)
59
- #match_js = match_js[np.logical_not(np.isnan(match_ious))]
60
- #keep[match_js] = False
61
- #trk_tlwhs = trk_tlwhs[keep]
62
- #trk_ids = trk_ids[keep]
63
-
64
- # get distance matrix
65
- iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)
66
-
67
- # acc
68
- self.acc.update(gt_ids, trk_ids, iou_distance)
69
-
70
- if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'):
71
- events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics
72
- else:
73
- events = None
74
- return events
75
-
76
- def eval_file(self, filename):
77
- self.reset_accumulator()
78
-
79
- result_frame_dict = read_results(filename, self.data_type, is_gt=False)
80
- #frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys())))
81
- frames = sorted(list(set(result_frame_dict.keys())))
82
- for frame_id in frames:
83
- trk_objs = result_frame_dict.get(frame_id, [])
84
- trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]
85
- self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)
86
-
87
- return self.acc
88
-
89
- @staticmethod
90
- def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')):
91
- names = copy.deepcopy(names)
92
- if metrics is None:
93
- metrics = mm.metrics.motchallenge_metrics
94
- metrics = copy.deepcopy(metrics)
95
-
96
- mh = mm.metrics.create()
97
- summary = mh.compute_many(
98
- accs,
99
- metrics=metrics,
100
- names=names,
101
- generate_overall=True
102
- )
103
-
104
- return summary
105
-
106
- @staticmethod
107
- def save_summary(summary, filename):
108
- import pandas as pd
109
- writer = pd.ExcelWriter(filename)
110
- summary.to_excel(writer)
111
- writer.save()
112
-
113
-
114
-
115
-
116
-
117
- def read_results(filename, data_type: str, is_gt=False, is_ignore=False):
118
- if data_type in ('mot', 'lab'):
119
- read_fun = read_mot_results
120
- else:
121
- raise ValueError('Unknown data type: {}'.format(data_type))
122
-
123
- return read_fun(filename, is_gt, is_ignore)
124
-
125
-
126
- """
127
- labels={'ped', ... % 1
128
- 'person_on_vhcl', ... % 2
129
- 'car', ... % 3
130
- 'bicycle', ... % 4
131
- 'mbike', ... % 5
132
- 'non_mot_vhcl', ... % 6
133
- 'static_person', ... % 7
134
- 'distractor', ... % 8
135
- 'occluder', ... % 9
136
- 'occluder_on_grnd', ... %10
137
- 'occluder_full', ... % 11
138
- 'reflection', ... % 12
139
- 'crowd' ... % 13
140
- };
141
- """
142
-
143
-
144
- def read_mot_results(filename, is_gt, is_ignore):
145
- valid_labels = {1}
146
- ignore_labels = {2, 7, 8, 12}
147
- results_dict = dict()
148
- if os.path.isfile(filename):
149
- with open(filename, 'r') as f:
150
- for line in f.readlines():
151
- linelist = line.split(',')
152
- if len(linelist) < 7:
153
- continue
154
- fid = int(linelist[0])
155
- if fid < 1:
156
- continue
157
- results_dict.setdefault(fid, list())
158
-
159
- box_size = float(linelist[4]) * float(linelist[5])
160
-
161
- if is_gt:
162
- if 'MOT16-' in filename or 'MOT17-' in filename:
163
- label = int(float(linelist[7]))
164
- mark = int(float(linelist[6]))
165
- if mark == 0 or label not in valid_labels:
166
- continue
167
- score = 1
168
- elif is_ignore:
169
- if 'MOT16-' in filename or 'MOT17-' in filename:
170
- label = int(float(linelist[7]))
171
- vis_ratio = float(linelist[8])
172
- if label not in ignore_labels and vis_ratio >= 0:
173
- continue
174
- else:
175
- continue
176
- score = 1
177
- else:
178
- score = float(linelist[6])
179
-
180
- #if box_size > 7000:
181
- #if box_size <= 7000 or box_size >= 15000:
182
- #if box_size < 15000:
183
- #continue
184
-
185
- tlwh = tuple(map(float, linelist[2:6]))
186
- target_id = int(linelist[1])
187
-
188
- results_dict[fid].append((tlwh, target_id, score))
189
-
190
- return results_dict
191
-
192
-
193
- def unzip_objs(objs):
194
- if len(objs) > 0:
195
- tlwhs, ids, scores = zip(*objs)
196
- else:
197
- tlwhs, ids, scores = [], [], []
198
- tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
199
-
200
- return tlwhs, ids, scores
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EDGAhab/VITS-Aatrox-AI/commons.py DELETED
@@ -1,161 +0,0 @@
1
- import math
2
- import numpy as np
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
-
8
- def init_weights(m, mean=0.0, std=0.01):
9
- classname = m.__class__.__name__
10
- if classname.find("Conv") != -1:
11
- m.weight.data.normal_(mean, std)
12
-
13
-
14
- def get_padding(kernel_size, dilation=1):
15
- return int((kernel_size*dilation - dilation)/2)
16
-
17
-
18
- def convert_pad_shape(pad_shape):
19
- l = pad_shape[::-1]
20
- pad_shape = [item for sublist in l for item in sublist]
21
- return pad_shape
22
-
23
-
24
- def intersperse(lst, item):
25
- result = [item] * (len(lst) * 2 + 1)
26
- result[1::2] = lst
27
- return result
28
-
29
-
30
- def kl_divergence(m_p, logs_p, m_q, logs_q):
31
- """KL(P||Q)"""
32
- kl = (logs_q - logs_p) - 0.5
33
- kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
34
- return kl
35
-
36
-
37
- def rand_gumbel(shape):
38
- """Sample from the Gumbel distribution, protect from overflows."""
39
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
40
- return -torch.log(-torch.log(uniform_samples))
41
-
42
-
43
- def rand_gumbel_like(x):
44
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
45
- return g
46
-
47
-
48
- def slice_segments(x, ids_str, segment_size=4):
49
- ret = torch.zeros_like(x[:, :, :segment_size])
50
- for i in range(x.size(0)):
51
- idx_str = ids_str[i]
52
- idx_end = idx_str + segment_size
53
- ret[i] = x[i, :, idx_str:idx_end]
54
- return ret
55
-
56
-
57
- def rand_slice_segments(x, x_lengths=None, segment_size=4):
58
- b, d, t = x.size()
59
- if x_lengths is None:
60
- x_lengths = t
61
- ids_str_max = x_lengths - segment_size + 1
62
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
63
- ret = slice_segments(x, ids_str, segment_size)
64
- return ret, ids_str
65
-
66
-
67
- def get_timing_signal_1d(
68
- length, channels, min_timescale=1.0, max_timescale=1.0e4):
69
- position = torch.arange(length, dtype=torch.float)
70
- num_timescales = channels // 2
71
- log_timescale_increment = (
72
- math.log(float(max_timescale) / float(min_timescale)) /
73
- (num_timescales - 1))
74
- inv_timescales = min_timescale * torch.exp(
75
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
76
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
77
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
78
- signal = F.pad(signal, [0, 0, 0, channels % 2])
79
- signal = signal.view(1, channels, length)
80
- return signal
81
-
82
-
83
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
84
- b, channels, length = x.size()
85
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
86
- return x + signal.to(dtype=x.dtype, device=x.device)
87
-
88
-
89
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
90
- b, channels, length = x.size()
91
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
92
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
93
-
94
-
95
- def subsequent_mask(length):
96
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
97
- return mask
98
-
99
-
100
- @torch.jit.script
101
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
102
- n_channels_int = n_channels[0]
103
- in_act = input_a + input_b
104
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
105
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
106
- acts = t_act * s_act
107
- return acts
108
-
109
-
110
- def convert_pad_shape(pad_shape):
111
- l = pad_shape[::-1]
112
- pad_shape = [item for sublist in l for item in sublist]
113
- return pad_shape
114
-
115
-
116
- def shift_1d(x):
117
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
118
- return x
119
-
120
-
121
- def sequence_mask(length, max_length=None):
122
- if max_length is None:
123
- max_length = length.max()
124
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
125
- return x.unsqueeze(0) < length.unsqueeze(1)
126
-
127
-
128
- def generate_path(duration, mask):
129
- """
130
- duration: [b, 1, t_x]
131
- mask: [b, 1, t_y, t_x]
132
- """
133
- device = duration.device
134
-
135
- b, _, t_y, t_x = mask.shape
136
- cum_duration = torch.cumsum(duration, -1)
137
-
138
- cum_duration_flat = cum_duration.view(b * t_x)
139
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
140
- path = path.view(b, t_x, t_y)
141
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
142
- path = path.unsqueeze(1).transpose(2,3) * mask
143
- return path
144
-
145
-
146
- def clip_grad_value_(parameters, clip_value, norm_type=2):
147
- if isinstance(parameters, torch.Tensor):
148
- parameters = [parameters]
149
- parameters = list(filter(lambda p: p.grad is not None, parameters))
150
- norm_type = float(norm_type)
151
- if clip_value is not None:
152
- clip_value = float(clip_value)
153
-
154
- total_norm = 0
155
- for p in parameters:
156
- param_norm = p.grad.data.norm(norm_type)
157
- total_norm += param_norm.item() ** norm_type
158
- if clip_value is not None:
159
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
160
- total_norm = total_norm ** (1. / norm_type)
161
- return total_norm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Egrt/GCycleGAN/cyclegan.py DELETED
@@ -1,106 +0,0 @@
1
- import numpy as np
2
- import torch
3
- from PIL import Image
4
- from torch import nn
5
-
6
- from nets.cyclegan import Generator
7
- from utils.utils import (cvtColor, postprocess_output, preprocess_input,
8
- resize_image, show_config)
9
-
10
-
11
- class CYCLEGAN(object):
12
- _defaults = {
13
- #-----------------------------------------------#
14
- # model_path指向logs文件夹下的权值文件
15
- #-----------------------------------------------#
16
- "model_path" : 'model_data/G_model_B2A_last_epoch_weights.pth',
17
- #-----------------------------------------------#
18
- # 输入图像大小的设置
19
- #-----------------------------------------------#
20
- "input_shape" : [112, 112],
21
- #-------------------------------#
22
- # 是否进行不失真的resize
23
- #-------------------------------#
24
- "letterbox_image" : True,
25
- #-------------------------------#
26
- # 是否使用Cuda
27
- # 没有GPU可以设置成False
28
- #-------------------------------#
29
- "cuda" : False,
30
- }
31
-
32
- #---------------------------------------------------#
33
- # 初始化CYCLEGAN
34
- #---------------------------------------------------#
35
- def __init__(self, **kwargs):
36
- self.__dict__.update(self._defaults)
37
- for name, value in kwargs.items():
38
- setattr(self, name, value)
39
- self._defaults[name] = value
40
- self.generate()
41
-
42
- show_config(**self._defaults)
43
-
44
- def generate(self):
45
- #----------------------------------------#
46
- # 创建GAN模型
47
- #----------------------------------------#
48
- self.net = Generator(upscale=1, img_size=tuple(self.input_shape),
49
- window_size=7, img_range=1., depths=[3, 3, 3, 3],
50
- embed_dim=60, num_heads=[3, 3, 3, 3], mlp_ratio=1, upsampler='1conv').eval()
51
-
52
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
53
- self.net.load_state_dict(torch.load(self.model_path, map_location=device))
54
- self.net = self.net.eval()
55
- print('{} model loaded.'.format(self.model_path))
56
-
57
- if self.cuda:
58
- self.net = nn.DataParallel(self.net)
59
- self.net = self.net.cuda()
60
-
61
- #---------------------------------------------------#
62
- # 生成1x1的图片
63
- #---------------------------------------------------#
64
- def detect_image(self, image):
65
- #---------------------------------------------------------#
66
- # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。
67
- # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB
68
- #---------------------------------------------------------#
69
- image = cvtColor(image)
70
- #---------------------------------------------------------#
71
- # 给图像增加灰条,实现不失真的resize
72
- # 也可以直接resize进行识别
73
- #---------------------------------------------------------#
74
- image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0]), self.letterbox_image)
75
- #---------------------------------------------------------#
76
- # 添加上batch_size维度
77
- #---------------------------------------------------------#
78
- image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, dtype='float32')), (2, 0, 1)), 0)
79
-
80
- with torch.no_grad():
81
- images = torch.from_numpy(image_data)
82
- if self.cuda:
83
- images = images.cuda()
84
-
85
- #---------------------------------------------------#
86
- # 图片传入网络进行预测
87
- #---------------------------------------------------#
88
- pr = self.net(images)[0]
89
- #---------------------------------------------------#
90
- # 转为numpy
91
- #---------------------------------------------------#
92
- pr = pr.permute(1, 2, 0).cpu().numpy()
93
-
94
- #--------------------------------------#
95
- # 将灰条部分截取掉
96
- #--------------------------------------#
97
- if nw is not None:
98
- pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \
99
- int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)]
100
-
101
-
102
- image = postprocess_output(pr)
103
- image = np.clip(image, 0, 255)
104
- image = Image.fromarray(np.uint8(image))
105
-
106
- return image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EuroPython2022/mmocr-demo/configs/textdet/maskrcnn/README.md DELETED
@@ -1,49 +0,0 @@
1
- # Mask R-CNN
2
-
3
- > [Mask R-CNN](https://arxiv.org/abs/1703.06870)
4
-
5
- <!-- [ALGORITHM] -->
6
-
7
- ## Abstract
8
-
9
- We present a conceptually simple, flexible, and general framework for object instance segmentation. Our approach efficiently detects objects in an image while simultaneously generating a high-quality segmentation mask for each instance. The method, called Mask R-CNN, extends Faster R-CNN by adding a branch for predicting an object mask in parallel with the existing branch for bounding box recognition. Mask R-CNN is simple to train and adds only a small overhead to Faster R-CNN, running at 5 fps. Moreover, Mask R-CNN is easy to generalize to other tasks, e.g., allowing us to estimate human poses in the same framework. We show top results in all three tracks of the COCO suite of challenges, including instance segmentation, bounding-box object detection, and person keypoint detection. Without bells and whistles, Mask R-CNN outperforms all existing, single-model entries on every task, including the COCO 2016 challenge winners. We hope our simple and effective approach will serve as a solid baseline and help ease future research in instance-level recognition.
10
-
11
- <div align=center>
12
- <img src="https://user-images.githubusercontent.com/22607038/142795605-dfdd5f69-e9cd-4b69-9c6b-6d8bded18e89.png"/>
13
- </div>
14
-
15
- ## Results and models
16
-
17
- ### CTW1500
18
-
19
- | Method | Pretrained Model | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download |
20
- | :----------------------------------------------------------: | :--------------: | :-----------: | :----------: | :-----: | :-------: | :----: | :-------: | :---: | :-------------------------------------------------------------: |
21
- | [MaskRCNN](/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500.py) | ImageNet | CTW1500 Train | CTW1500 Test | 160 | 1600 | 0.753 | 0.712 | 0.732 | [model](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500_20210219-96497a76.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500_20210219-96497a76.log.json) |
22
-
23
- ### ICDAR2015
24
-
25
- | Method | Pretrained Model | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download |
26
- | :--------------------------------------------------------: | :--------------: | :-------------: | :------------: | :-----: | :-------: | :----: | :-------: | :---: | :-----------------------------------------------------------: |
27
- | [MaskRCNN](/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015.py) | ImageNet | ICDAR2015 Train | ICDAR2015 Test | 160 | 1920 | 0.783 | 0.872 | 0.825 | [model](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015_20210219-8eb340a3.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015_20210219-8eb340a3.log.json) |
28
-
29
- ### ICDAR2017
30
-
31
- | Method | Pretrained Model | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download |
32
- | :---------------------------------------------------------: | :--------------: | :-------------: | :-----------: | :-----: | :-------: | :----: | :-------: | :---: | :-----------------------------------------------------------: |
33
- | [MaskRCNN](/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2017.py) | ImageNet | ICDAR2017 Train | ICDAR2017 Val | 160 | 1600 | 0.754 | 0.827 | 0.789 | [model](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2017_20210218-c6ec3ebb.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2017_20210218-c6ec3ebb.log.json) |
34
-
35
- ```{note}
36
- We tuned parameters with the techniques in [Pyramid Mask Text Detector](https://arxiv.org/abs/1903.11800)
37
- ```
38
-
39
- ## Citation
40
-
41
- ```bibtex
42
- @INPROCEEDINGS{8237584,
43
- author={K. {He} and G. {Gkioxari} and P. {Dollár} and R. {Girshick}},
44
- booktitle={2017 IEEE International Conference on Computer Vision (ICCV)},
45
- title={Mask R-CNN},
46
- year={2017},
47
- pages={2980-2988},
48
- doi={10.1109/ICCV.2017.322}}
49
- ```