parquet-converter commited on
Commit
f3e8bf4
·
1 Parent(s): c1a869e

Update parquet files (step 64 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/testing/gptworldai_test.py +0 -18
  2. spaces/1368565466ki/Satdia/monotonic_align/__init__.py +0 -20
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chaos Group V-Ray Next ADV v4.30.01 for 3ds Max 2013-2020 Win x64 Free Trial and Discount Offers.md +0 -170
  4. spaces/1gistliPinn/ChatGPT4/Examples/Download Film Babuji Ek Ticket Bambai Love Full Movie !NEW!.md +0 -18
  5. spaces/1line/AutoGPT/autogpt/spinner.py +0 -65
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Getting Over It for PC - The Ultimate Challenge Game from Ocean of Games.md +0 -133
  7. spaces/1phancelerku/anime-remove-background/ TikTok .md +0 -101
  8. spaces/1phancelerku/anime-remove-background/Bus Simulator Ultimate 1.5.2 Mod Apk - Drive Realistic Buses with Amazing Features.md +0 -119
  9. spaces/1phancelerku/anime-remove-background/Download Dr Driving 3 Mod APK and Learn Driving with Fun.md +0 -98
  10. spaces/1phancelerku/anime-remove-background/Explore the Thrilling World of Monster Life with Free Shopping Mod APK.md +0 -96
  11. spaces/1phancelerku/anime-remove-background/Extreme car driving simulator apk Free download and play the most realistic car game ever.md +0 -99
  12. spaces/AIConsultant/MusicGen/tests/modules/test_seanet.py +0 -115
  13. spaces/AIGC-Audio/Make_An_Audio/ldm/models/diffusion/classifier.py +0 -267
  14. spaces/ALSv/FSW/roop/processors/frame/__init__.py +0 -0
  15. spaces/Aaaad/Dddde/app.py +0 -3
  16. spaces/Abhilashvj/planogram-compliance/utils/autoanchor.py +0 -219
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/base/Base.js +0 -112
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/EaseMoveMethods.js +0 -120
  19. spaces/Aloento/9Nine-PITS/text/frontend/normalizer/acronyms.py +0 -13
  20. spaces/Amrrs/DragGan-Inversion/PTI/utils/ImagesDataset.py +0 -43
  21. spaces/Amrrs/DragGan-Inversion/stylegan_human/openpose/src/__init__.py +0 -0
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/other-formats.md +0 -194
  23. spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py +0 -12
  24. spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x_coco.py +0 -63
  25. spaces/Andy1621/uniformer_image_segmentation/configs/danet/danet_r50-d8_512x512_40k_voc12aug.py +0 -7
  26. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/deprecated_wrappers.py +0 -43
  27. spaces/Artbogdanov/monet-manet/README.md +0 -12
  28. spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/demo/inference_on_a_image.py +0 -214
  29. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/__init__.py +0 -0
  30. spaces/BalaBhaskarudu/mygenAIChatbot/app.py +0 -34
  31. spaces/Benson/text-generation/Examples/Animal Rebelin Batalla Simulador Mod Apk Desbloqueado Todo.md +0 -61
  32. spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/vqvae/quantize.py +0 -329
  33. spaces/BetterAPI/BetterChat_new/src/lib/utils/sum.ts +0 -3
  34. spaces/BetterAPI/BetterChat_new/src/routes/conversation/[id]/share/+server.ts +0 -54
  35. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/install/editable_legacy.py +0 -46
  36. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/vcs/versioncontrol.py +0 -705
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/johabprober.py +0 -47
  38. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/terminal.py +0 -127
  39. spaces/CVPR/Dual-Key_Backdoor_Attacks/make_specs.py +0 -431
  40. spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/par.h +0 -62
  41. spaces/CVPR/WALT/mmdet/models/detectors/htc.py +0 -15
  42. spaces/CVPR/regionclip-demo/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp +0 -522
  43. spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/slio.py +0 -177
  44. spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/__init__.py +0 -15
  45. spaces/CassBunny/anything-v3.0/app.py +0 -276
  46. spaces/ChandraMohanNayal/AutoGPT/data_ingestion.py +0 -96
  47. spaces/ChandraMohanNayal/AutoGPT/run_continuous.sh +0 -3
  48. spaces/Cropinky/esrgan/realesrgan/archs/srvgg_arch.py +0 -69
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/reverseContourPen.py +0 -96
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Image-0fe369ad.js +0 -2
spaces/101-5/gpt4free/g4f/.v1/testing/gptworldai_test.py DELETED
@@ -1,18 +0,0 @@
1
- import gptworldAi
2
-
3
- # single completion
4
- for chunk in gptworldAi.Completion.create("你是谁", "127.0.0.1:7890"):
5
- print(chunk, end="", flush=True)
6
- print()
7
-
8
- # chat completion
9
- message = []
10
- while True:
11
- prompt = input("请输入问题:")
12
- message.append({"role": "user", "content": prompt})
13
- text = ""
14
- for chunk in gptworldAi.ChatCompletion.create(message, '127.0.0.1:7890'):
15
- text = text + chunk
16
- print(chunk, end="", flush=True)
17
- print()
18
- message.append({"role": "assistant", "content": text})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1368565466ki/Satdia/monotonic_align/__init__.py DELETED
@@ -1,20 +0,0 @@
1
- from numpy import zeros, int32, float32
2
- from torch import from_numpy
3
-
4
- from .core import maximum_path_jit
5
-
6
-
7
- def maximum_path(neg_cent, mask):
8
- """ numba optimized version.
9
- neg_cent: [b, t_t, t_s]
10
- mask: [b, t_t, t_s]
11
- """
12
- device = neg_cent.device
13
- dtype = neg_cent.dtype
14
- neg_cent = neg_cent.data.cpu().numpy().astype(float32)
15
- path = zeros(neg_cent.shape, dtype=int32)
16
-
17
- t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32)
18
- t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32)
19
- maximum_path_jit(path, neg_cent, t_t_max, t_s_max)
20
- return from_numpy(path).to(device=device, dtype=dtype)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chaos Group V-Ray Next ADV v4.30.01 for 3ds Max 2013-2020 Win x64 Free Trial and Discount Offers.md DELETED
@@ -1,170 +0,0 @@
1
-
2
- <h1>Chaos Group V-Ray Next ADV v4.30.01 for 3ds Max 2013-2020 Win x64</h1>
3
- <h2>Introduction</h2>
4
- <p>If you are a 3D artist, designer, or animator who uses Autodesk 3ds Max, you probably know how important it is to have a powerful and reliable rendering plugin. Rendering is the process of turning your 3D models and scenes into realistic images or animations that can be used for various purposes, such as presentations, marketing, entertainment, or education.</p>
5
- <h2>Chaos Group V-Ray Next ADV v4.30.01 for 3ds Max 2013-2020 Win x64</h2><br /><p><b><b>Download</b> &mdash; <a href="https://byltly.com/2uKxXJ">https://byltly.com/2uKxXJ</a></b></p><br /><br />
6
- <p>One of the most popular and widely used rendering plugins for 3ds Max is V-Ray, developed by Chaos Group. V-Ray is a versatile and flexible tool that can handle any type of project, from architectural visualization to visual effects. V-Ray has been used by many professionals and studios around the world, such as Digital Domain, Blur Studio, Method Studios, Framestore, and more.</p>
7
- <p>In this article, we will introduce you to the latest version of V-Ray for 3ds Max, which is V-Ray Next ADV v4.30.01. We will show you how to install it, how to use it, and how to optimize it for your workflow and projects. We will also highlight some of the new features and improvements that make V-Ray Next faster, smarter, and more powerful than ever before.</p>
8
- <h2>How to install V-Ray Next for 3ds Max?</h2>
9
- <h3>System requirements</h3>
10
- <p>Before you install V-Ray Next for 3ds Max, you need to make sure that your system meets the minimum requirements for running it smoothly. Here are the system requirements for V-Ray Next:</p>
11
- <table>
12
- <tr>
13
- <th>Component</th>
14
- <th>Minimum</th>
15
- <th>Recommended</th>
16
- </tr>
17
- <tr>
18
- <td>Processor</td>
19
- <td>Intel® Pentium® IV or compatible processor with SSE4.2 support</td>
20
- <td>Intel® Core® i7 or higher</td>
21
- </tr>
22
- <tr>
23
- <td>RAM</td>
24
- <td>4 GB RAM (8 GB recommended)</td>
25
- <td>16 GB RAM or higher</td>
26
- </tr>
27
- <tr>
28
- <td>Operating system</td>
29
- <td>Windows® 7 SP1 (64-bit), Windows® 8.1 (64-bit), Windows® 10 (64-bit)</td>
30
- <td>Windows® 10 (64-bit)</td>
31
- </tr>
32
- <tr>
33
- <td>Graphics card</td>
34
- <td>NVIDIA® GeForce® GTX 1060 (6 GB VRAM) or equivalent</td>
35
- <td>NVIDIA® GeForce® RTX 2080 Ti (11 GB VRAM) or higher</td>
36
- </tr>
37
- <tr>
38
- <td>Hard disk space</td>
39
- <td>2 GB free disk space (10 GB recommended)</td>
40
- <td>20 GB free disk space or higher</td>
41
- </tr>
42
- <tr>
43
- <td>Software</td>
44
- <td>Autodesk® 3ds Max® versions 2013-2020 (64-bit)</td>
45
- <td>The latest version of Autodesk® 3ds Max® (64-bit)</td>
46
- </tr>
47
- </table>
48
- <p>Note that these are the requirements for running V-Ray Next on a single machine. If you want to use distributed rendering or network rendering, you will need additional hardware and software components. You can find more information about distributed rendering <a href="https://docs.chaosgroup.com/display/VRAYMAX/Distributed+Rendering">here</a>.</p>
49
- <h3>Installation steps</h3>
50
- <p>To install V-Ray Next for 3ds Max, you need to follow these steps:</p>
51
- <ol>
52
- <li>Download the installer from the <a href="https://www.chaos.com/vray/try-vray-3ds-max">official website</a>. You will need to register or log in with your Chaos account to access the download link.</li>
53
- <li>Run the installer as administrator and follow the instructions on the screen. You will need to accept the license agreement, choose the installation type (workstation or render slave), select the components you want to install (V-Ray core files, license server, Swarm manager, etc.), and specify the installation folder.</li>
54
- <li>If you have a previous version of V-Ray installed on your machine, you will be asked if you want to uninstall it or keep it. It is recommended that you uninstall any previous versions of V-Ray before installing V-Ray Next.</li>
55
- <li>If you have chosen to install the license server component, you will need to activate your license online or offline. You can find more information about licensing <a href="https://docs.chaosgroup.com/display/VRAYMAX/Licensing+V-Ray">here</a>.</li>
56
- <li>After the installation is complete, you can launch 3ds Max and start using V-Ray Next.</li>
57
- </ol>
58
- <h2>How to use V-Ray Next for 3ds Max?</h2>
59
- <h3>V-Ray Next interface and settings</h3>
60
- <p>V-Ray Next integrates seamlessly with 3ds Max and adds several new menus, toolbars, panels, and windows to its interface. Here are some of the main elements of the V-Ray interface:</p>
61
- <ul>
62
- <li>The <strong>V-Ray menu</strong>, located in the main menu bar of 3ds Max, gives you access to various commands and options related to V-Ray.</li>
63
- <li>The <strong>VFB window</strong>, which stands for Virtual Frame Buffer, is where you can see your rendered image or animation and adjust its parameters using various tools and controls.</li>
64
- <li>The <strong>VFB toolbar</strong>, located at the top of the VFB window, contains buttons for rendering modes, camera settings, color corrections, denoising options, history settings, etc.</li>
65
- <li>The <strong>VFB history window</strong>, located at the bottom of the VFB window, allows you to compare different versions of your rendered image or animation using thumbnails.</li>
66
- <li>The <strong>VFB color corrections window</strong>, located at the right side of the VFB window, lets you apply various color adjustments to your rendered image or animation using sliders.</li>
67
- <li>The <strong>VFB render elements window</strong>, located at the left side of the VFB window, shows you different layers or channels of your rendered image or animation that can be used for compositing or post-processing.</li>
68
- <li>The <strong>VFB statistics window</strong>, located at the top right corner of the VFB window, displays information about your rendering process such as time elapsed, memory usage, samples per pixel, etc.</li>
69
- <li>The <strong>VFB lens effects window</strong>, located at the bottom right corner of the VFB window, enables you to add various optical effects to your rendered image or animation such as glare, bloom, vignette, etc.</li>
70
- <li>The <strong>VFB settings window</strong>, accessible by clicking on the gear icon in the VFB toolbar, allows you to customize various aspects of the VFB such as resolution, quality, format, output, etc.</li></ul><ul><li>The <strong>V-Ray toolbar </strong>, located in any viewport toolbar area of 3ds Max, contains buttons for quick access to common functions such as rendering, interactive rendering, lighting analysis, camera exposure, etc.</li><li>The <strong>V-Ray Asset Editor </strong>, accessible by clicking on its icon in the main toolbar or in any viewport toolbar area of 3ds Max, is where you can manage all your assets related to V-Ray such as materials, lights, textures, geometries, render elements, etc.</li><li>The <strong>V-Ray Render Settings </strong>, accessible by clicking on its icon in any viewport toolbar area of 3ds Max or by going to Rendering > Render Setup, is where you can adjust all your global settings related to rendering such as engine type, quality presets, sampling parameters, environment options, output options, <h3>V-Ray Next rendering modes and options</h3>
71
- <p>V-Ray Next offers you different rendering modes and options depending on your needs and preferences. You can choose between:</p>
72
- <ul>
73
- <li><strong>Production rendering</strong>, which is the standard mode for creating high-quality images or animations with all the features and settings available in V-Ray.</li>
74
- <li><strong>Interactive rendering</strong>, which is a fast and responsive mode that updates your image as you make changes to your scene, camera, lights, materials, etc. This mode is ideal for testing and previewing your scene before production rendering.</li>
75
- <li><strong>GPU rendering</strong>, which is a mode that uses your graphics card (GPU) instead of your processor (CPU) to render your scene. This mode can be faster and more efficient for certain types of scenes and effects, such as volumetrics, denoising, etc.</li>
76
- <li><strong>Hybrid rendering</strong>, which is a mode that combines both CPU and GPU rendering to utilize all your hardware resources and speed up your rendering process.</li>
77
- </ul>
78
- <p>You can switch between these modes and options in the V-Ray Render Settings window, under the Renderer rollout. You can also adjust various parameters related to sampling, ray tracing, global illumination, motion blur, depth of field, etc.</p>
79
- <p>V-Ray Next Scene Intelligence for 3ds Max 2013-2020 Win x64[^1^]<br />
80
- V-Ray GPU rendering for 3ds Max 2013-2020 Win x64[^1^]<br />
81
- NVIDIA AI Denoiser for V-Ray Next in 3ds Max 2013-2020 Win x64[^1^]<br />
82
- V-Ray Next Update 3 for 3ds Max 2013-2020 Win x64 download[^2^]<br />
83
- V-Ray Physical Camera for 3ds Max 2013-2020 Win x64[^1^]<br />
84
- V-Ray Next free trial for 3ds Max 2013-2020 Win x64[^3^]<br />
85
- V-Ray Next for 3ds Max tutorial[^1^]<br />
86
- V-Ray Next features for 3ds Max 2013-2020 Win x64[^1^]<br />
87
- V-Ray Next vs V-Ray for 3ds Max 2013-2020 Win x64 comparison<br />
88
- V-Ray Next system requirements for 3ds Max 2013-2020 Win x64<br />
89
- V-Ray Next price for 3ds Max 2013-2020 Win x64<br />
90
- V-Ray Next review for 3ds Max 2013-2020 Win x64<br />
91
- V-Ray Next crack for 3ds Max 2013-2020 Win x64<br />
92
- V-Ray Next license for 3ds Max 2013-2020 Win x64<br />
93
- V-Ray Next installation guide for 3ds Max 2013-2020 Win x64<br />
94
- V-Ray Next Adaptive Dome Light for 3ds Max 2013-2020 Win x64[^1^]<br />
95
- V-Ray Next GPU-accelerated volume rendering for 3ds Max 2013-2020 Win x64[^1^]<br />
96
- V-Ray Next Lighting Analysis tools for 3ds Max 2013-2020 Win x64[^1^]<br />
97
- V-Ray Next IPR for interactive production rendering in 3ds Max 2013-2020 Win x64[^1^]<br />
98
- V-Ray Next Denoiser for noise reduction in rendering in 3ds Max 2013-2020 Win x64[^1^]<br />
99
- V-Ray Next Resumable Rendering for stopping and resuming renders in 3ds Max 2013-2020 Win x64[^1^]<br />
100
- V-Ray Next webinars for learning tips and tricks in 3ds Max 2013-2020 Win x64[^1^]<br />
101
- V-Ray Next support for troubleshooting issues in 3ds Max 2013-2020 Win x64<br />
102
- V-Ray Next forum for discussing topics related to V-Ray Next in 3ds Max<br />
103
- V-Ray Next documentation for learning how to use V-Ray Next in 3ds Max<br />
104
- V-Ray Next presets for saving and loading render settings in V-Ray Next in 3ds Max<br />
105
- V-Ray Next materials library for accessing a collection of ready-to-use materials in V-Ray Next in 3ds Max<br />
106
- V-Ray Next render elements for creating render passes in V-Ray Next in 3ds Max<br />
107
- V-Ray Next proxy objects for managing complex geometry in V-Ray Next in 3ds Max<br />
108
- V-Ray Next displacement mapping for adding detail to surfaces in V-Ray Next in 3ds Max</p>
109
- <h3>V-Ray Next lighting and materials</h3>
110
- <p>V-Ray Next provides you with a wide range of lighting and material options to create realistic and stunning scenes. You can use:</p>
111
- <ul>
112
- <li><strong>V-Ray lights</strong>, which are special types of lights that are optimized for V-Ray rendering. You can create different kinds of V-Ray lights such as dome light, sun light, sky light, sphere light, rectangle light, mesh light, etc.</li>
113
- <li><strong>V-Ray materials</strong>, which are special types of materials that are optimized for V-Ray rendering. You can create different kinds of V-Ray materials such as standard material, blend material, car paint material, hair material, subsurface scattering material, etc.</li>
114
- <li><strong>V-Ray textures</strong>, which are special types of textures that are optimized for V-Ray rendering. You can use different kinds of V-Ray textures such as bitmap texture, noise texture, gradient texture, dirt texture, curvature texture, etc.</li>
115
- <li><strong>V-Ray shaders</strong>, which are special types of nodes that can be used to create custom effects and functions for your materials and textures. You can use different kinds of V-Ray shaders such as color correction shader, falloff shader, fresnel shader, triplanar shader, etc.</li>
116
- </ul>
117
- <p>You can manage all your lighting and material assets in the V-Ray Asset Editor window, where you can create, edit, assign, organize, and preview them. You can also import and export assets from external sources such as Substance Designer or PBR materials.</p>
118
- <h3>V-Ray Next effects and post-processing</h3>
119
- <p>V-Ray Next allows you to add various effects and post-processing adjustments to your rendered image or animation without leaving 3ds Max or using external applications. You can use:</p>
120
- <ul>
121
- <li><strong>V-Ray render elements</strong>, which are separate layers or channels of your rendered image or animation that can be used for compositing or post-processing. You can create different kinds of V-Ray render elements such as diffuse element, specular element, reflection element, refraction element, shadow element, lighting element, etc.</li><li><strong>V-Ray frame buffer tools</strong>, which are tools and controls that you can use to modify your rendered image or animation in the VFB window. You can use different kinds of VFB tools such as color corrections tool, lens effects tool, denoiser tool, history tool, statistics tool, etc.</li></ul><p>You can access all your render elements and frame buffer tools in the VFB window, where you can enable, disable, edit, save, load, and compare them. You can also export them to external applications such as Photoshop or Nuke.</p <h2>How to optimize V-Ray Next for 3ds Max?</h2>
122
- <h3>V-Ray Next scene intelligence</h3>
123
- <p>V-Ray Next introduces a new feature called scene intelligence that automatically analyzes your scene and optimizes your rendering settings accordingly. Scene intelligence includes:</p>
124
- <ul>
125
- <li><strong>Automatic exposure and white balance</strong>, which adjusts the camera exposure and color temperature based on the lighting conditions of your scene. This feature eliminates the need for manual tweaking and ensures a balanced and realistic image.</li>
126
- <li><strong>Adaptive dome light</strong>, which samples only the parts of the dome light that contribute to the illumination of your scene. This feature speeds up your rendering time by up to 7 times for scenes with image-based lighting.</li>
127
- <li><strong>Point-and-shoot camera</strong>, which sets the camera focus distance automatically based on where you click in the viewport. This feature simplifies the process of creating depth of field effects.</li>
128
- <li><strong>Automatic memory management</strong>, which optimizes the memory usage of your scene by dynamically loading and unloading assets as needed. This feature allows you to render large scenes with complex geometries and textures without running out of memory.</li>
129
- </ul>
130
- <p>You can enable or disable these features in the V-Ray Render Settings window, under the Camera rollout (for automatic exposure and white balance), Environment rollout (for adaptive dome light), Physical Camera rollout (for point-and-shoot camera), and System rollout (for automatic memory management).</p>
131
- <h3>V-Ray Next adaptive dome light</h3>
132
- <p>V-Ray Next introduces a new feature called adaptive dome light that automatically samples only the parts of the dome light that contribute to the illumination of your scene. This feature speeds up your rendering time by up to 7 times for scenes with image-based lighting.</p>
133
- <h3>V-Ray Next GPU rendering and denoising</h3>
134
- <p>V-Ray Next introduces a new feature called GPU rendering and denoising that allows you to use your graphics card (GPU) instead of your processor (CPU) to render your scene and remove noise from your image or animation. This feature can be faster and more efficient for certain types of scenes and effects, such as volumetrics, denoising, etc.</p>
135
- <p>GPU rendering is a mode that uses your graphics card (GPU) instead of your processor (CPU) to render your scene. This mode can be faster and more efficient for certain types of scenes and effects, such as volumetrics, denoising, etc. You can switch to GPU rendering in the V-Ray Render Settings window, under the Renderer rollout, by choosing CUDA or RTX as the engine type. You can also select which GPUs or CPUs you want to use for rendering.</p>
136
- <p>Denoising is a process that removes noise from your image or animation without losing detail or quality. Noise is a common problem in rendering, especially when using low sampling settings or complex lighting scenarios. V-Ray Next offers you different options for denoising, such as:</p>
137
- <ul>
138
- <li><strong>V-Ray Denoiser</strong>, which is a built-in denoiser that works on both CPU and GPU rendering modes. You can enable it in the V-Ray Render Settings window, under the V-Ray Denoiser rollout. You can also adjust various parameters related to quality, blend amount, radius, etc.</li>
139
- <li><strong>NVIDIA AI Denoiser</strong>, which is an external denoiser that works only on GPU rendering mode. You can enable it in the VFB window, by clicking on its icon in the VFB toolbar. This denoiser uses artificial intelligence to remove noise instantly and interactively.</li>
140
- <li><strong>Render element denoiser</strong>, which is a new feature that allows you to denoise individual render elements separately. You can enable it in the Render Elements tab of the Render Setup window, by checking the Denoise option for each render element. This feature ensures that the denoised render elements match the denoised beauty image.</li>
141
- </ul>
142
- <p>You can access all your denoising options and tools in the VFB window, where you can enable, disable, edit, save, load, and compare them. You can also export them to external applications such as Photoshop or Nuke.</p>
143
- <h2>Conclusion</h2>
144
- <h3>Summary of the main points</h3>
145
- <p>In this article, we have introduced you to V-Ray Next ADV v4.30.01 for 3ds Max 2013-2020 Win x64, which is the latest version of V-Ray for 3ds Max. We have shown you how to install it, how to use it, and how to optimize it for your workflow and projects. We have also highlighted some of the new features and improvements that make V-Ray Next faster, smarter, and more powerful than ever before.</p>
146
- <p>Some of the main features and improvements of V-Ray Next are:</p>
147
- <ul>
148
- <li><strong>Scene intelligence</strong>, which automatically analyzes your scene and optimizes your rendering settings accordingly.</li>
149
- <li><strong>Adaptive dome light</strong>, which samples only the parts of the dome light that contribute to the illumination of your scene.</li>
150
- <li><strong>GPU rendering and denoising</strong>, which allows you to use your graphics card (GPU) instead of your processor (CPU) to render your scene and remove noise from your image or animation.</li>
151
- <li><strong>Render element denoiser</strong>, which allows you to denoise individual render elements separately.</li>
152
- <li><strong>New lighting analysis tools</strong>, which make it easier to visualize a scene’s real-world illumination values in lux or footcandles.</li>
153
- <li><strong>New metalness material properties</strong>, which offer improved compatibility with Substance Designer and PBR materials.</li>
154
- </ul>
155
- <h3>Call to action and links</h3>
156
- <p>If you are interested in trying out V-Ray Next for 3ds Max yourself, you can download a free 30-day trial from the <a href="https://www.chaos.com/vray/try-vray-3ds-max">official website</a>. You will need to register or log in with your Chaos account to access the download link.</p>
157
- <p>If you want to learn more about V-Ray Next for 3ds Max, you can visit the following links:</p>
158
- <ul>
159
- <li><a href="https://docs.chaos.com/display/VMAX/V-Ray+Next+for+3ds+Max+Help+Home">V-Ray Next for 3ds Max Help Home</a>, which contains detailed documentation and tutorials on how to use V-Ray Next for 3ds Max.</li>
160
- <li><a href="https://www.chaos.com/blog/category/v-ray-for-3ds-max">V-Ray for 3ds Max Blog</a>, which features news, updates, tips, tricks, and showcases on V-Ray for 3ds Max.</li>
161
- <li><a href="https://www.chaos.com/vray/academy">V-Ray Academy</a>, which offers online courses and webinars on V-Ray for 3ds Max and other Chaos products.</li>
162
- <li><a href="https://forums.chaos.com/c/v-ray-for-3ds-max">V-Ray for 3ds Max Forum</a>, which is a place where you can ask questions, get answers, share feedback, and connect with other V-Ray users.</li></ul><p>We hope you enjoyed this article and found it useful. If you have any comments or suggestions, please let us know in the comments section below. Thank you for reading!</p <h2>FAQs</h2>
163
- <h4>What is V-Ray Next?</h4>
164
- <p>V-Ray Next is the latest version of V-Ray for 3ds Max, which is a powerful and versatile rendering plugin that can handle any type of project, from architectural visualization to visual effects.</p <h4>What are the main features of V-Ray Next?</h4>
165
- <p>Some of the main features of V-Ray Next are scene intelligence, adaptive dome light, GPU rendering and denoising, render element denoiser, new lighting analysis tools, and new metalness material properties.</p <h4>How to install V-Ray Next for 3ds Max?</h4>
166
- <p>To install V-Ray Next for 3ds Max, you need to download the installer from the official website, run it as administrator, follow the instructions on the screen, uninstall any previous versions of V-Ray, and activate your license online or offline.</p <h4>How to use V-Ray Next for 3ds Max?</h4>
167
- <p>To use V-Ray Next for 3ds Max, you need to switch the renderer to V-Ray GPU in the Render Setup window, access various commands and options from the V-Ray menu, manage your assets in the V-Ray Asset Editor window, adjust your settings in the V-Ray Render Settings window, and view your results in the VFB window.</p <h4>How to optimize V-Ray Next for 3ds Max?</h4>
168
- <p>To optimize V-Ray Next for 3ds Max, you need to enable or disable various features in the Render Setup window, such as automatic exposure and white balance, adaptive dome light, GPU rendering and denoising, and automatic memory management.</p </p> 0a6ba089eb<br />
169
- <br />
170
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download Film Babuji Ek Ticket Bambai Love Full Movie !NEW!.md DELETED
@@ -1,18 +0,0 @@
1
- <h2>Download Film Babuji Ek Ticket Bambai Love Full Movie</h2><br /><p><b><b>Download File</b> &#127383; <a href="https://imgfil.com/2uxZZ9">https://imgfil.com/2uxZZ9</a></b></p><br /><br />
2
-
3
- Our understanding of mother culture has to do with a lot of work, sustained over many generations, of feeding children and supporting them emotionally, physically, intellectually, culturally, spiritually and psychologically and that is what is being lost. This film tries to understand the powerful culture of India, how it is integral to us and then what happens when there are no families and children left to support it. This piece of work is made in the hope that other directors, who work in film and television, will be inspired and will make works in this genre that respect that culture and make a small contribution to saving it.
4
-
5
- Ajantha Matiyar: So, there is this curious notion that you are trying to depict a dying culture. What is it that makes it dying?
6
-
7
- Sarojini Rikhye: The urban-based Bengali culture that you see around you when you step out of the airport, the metro station or the bus stand -- that is not just about Bengali culture but that is an indicator of what is happening all over India. It is the culture of the housewife, of the woman who has no work and has to be ‘put in a position’ to have children, and we need to understand that this phenomenon is not a Bengali phenomenon. It is happening all over India. The culture of so many generations of women and children has been trying to support their families through a woman’s right to her own body. They have no land to get agricultural produce from, to sell. There is no daycare centre for the children, no schools for the children, no health centres, no spaces for the men to socialise and their entire value system is against women. As a result of this, the children are faced with a lot of physical and mental agony and the women are facing depression and other mental health issues.
8
-
9
- AM: But, what is interesting is that, while many of us have heard about so many negative things that are happening in India, we do not know that this culture is facing a crisis.
10
-
11
- SR: The focus should be on the culture of the society, not just on this one thing or another.
12
-
13
- AM: So, what is it that makes it dying?
14
-
15
- SR: A lot of change that you see in Bengal since the 1960s has been the result of educated, urban middle-class, mobile and progressive young women, who came from all over Bengal, from all over India, from other cities and towns, and came to Bengal 4fefd39f24<br />
16
- <br />
17
- <br />
18
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/spinner.py DELETED
@@ -1,65 +0,0 @@
1
- """A simple spinner module"""
2
- import itertools
3
- import sys
4
- import threading
5
- import time
6
-
7
-
8
- class Spinner:
9
- """A simple spinner class"""
10
-
11
- def __init__(self, message: str = "Loading...", delay: float = 0.1) -> None:
12
- """Initialize the spinner class
13
-
14
- Args:
15
- message (str): The message to display.
16
- delay (float): The delay between each spinner update.
17
- """
18
- self.spinner = itertools.cycle(["-", "/", "|", "\\"])
19
- self.delay = delay
20
- self.message = message
21
- self.running = False
22
- self.spinner_thread = None
23
-
24
- def spin(self) -> None:
25
- """Spin the spinner"""
26
- while self.running:
27
- sys.stdout.write(f"{next(self.spinner)} {self.message}\r")
28
- sys.stdout.flush()
29
- time.sleep(self.delay)
30
- sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
31
-
32
- def __enter__(self):
33
- """Start the spinner"""
34
- self.running = True
35
- self.spinner_thread = threading.Thread(target=self.spin)
36
- self.spinner_thread.start()
37
-
38
- return self
39
-
40
- def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
41
- """Stop the spinner
42
-
43
- Args:
44
- exc_type (Exception): The exception type.
45
- exc_value (Exception): The exception value.
46
- exc_traceback (Exception): The exception traceback.
47
- """
48
- self.running = False
49
- if self.spinner_thread is not None:
50
- self.spinner_thread.join()
51
- sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
52
- sys.stdout.flush()
53
-
54
- def update_message(self, new_message, delay=0.1):
55
- """Update the spinner message
56
- Args:
57
- new_message (str): New message to display
58
- delay: Delay in seconds before updating the message
59
- """
60
- time.sleep(delay)
61
- sys.stdout.write(
62
- f"\r{' ' * (len(self.message) + 2)}\r"
63
- ) # Clear the current message
64
- sys.stdout.flush()
65
- self.message = new_message
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Getting Over It for PC - The Ultimate Challenge Game from Ocean of Games.md DELETED
@@ -1,133 +0,0 @@
1
-
2
- <h1>Download Getting Over It PC Ocean of Games: A Guide to the Most Frustrating Game Ever</h1>
3
- <p>Have you ever played a game that made you want to smash your keyboard, throw your mouse, or scream at your monitor? If not, then you might want to try Getting Over It with Bennett Foddy, a game that is designed to hurt you. And if you are looking for a way to download this game for free, then you might be interested in Ocean of Games, a website that offers a variety of PC games for download. But before you do that, let's take a look at what Getting Over It is, why it is so frustrating, how to download it from Ocean of Games, and how to beat it.</p>
4
- <h2>What is Getting Over It?</h2>
5
- <p>Getting Over It with Bennett Foddy is an action game that was released in 2017 for Windows, Mac, iOS, and Android. It is developed by Bennett Foddy, an independent game designer and academic who is known for creating experimental and challenging games such as QWOP, GIRP, and CLOP.</p>
6
- <h2>download getting over it pc ocean of games</h2><br /><p><b><b>Download Zip</b> &#9675; <a href="https://urlin.us/2uT2pw">https://urlin.us/2uT2pw</a></b></p><br /><br />
7
- <h3>The premise and the gameplay</h3>
8
- <p>The premise of Getting Over It is simple: you are a man named Diogenes who is stuck in a cauldron and has a hammer as his only tool. Your goal is to climb up an enormous mountain made of various objects such as rocks, trees, furniture, pipes, and even other games. You move the hammer with the mouse or the touch screen, and that's all there is. There are no checkpoints, no saves, no levels, no scores, no achievements. Just you and the mountain.</p>
9
- <p>The gameplay of Getting Over It is deceptively simple as well. You can use the hammer to push yourself off the ground, hook onto objects, swing around them, or launch yourself into the air. With practice, you'll be able to jump, swing, climb, and fly. However, this is easier said than done. The game's physics are realistic but unforgiving. A slight mistake can send you tumbling down the mountain, losing all your progress in an instant. And there is nothing to stop you from falling all the way back to the beginning.</p>
10
- <h3>The developer and the inspiration</h3>
11
- <p>Bennett Foddy is an Australian-born game designer who is currently a professor at New York University's Game Center. He has a PhD in philosophy from Oxford University and has written several papers on topics such as ethics, aesthetics, and game design. He is also a musician who plays bass guitar for the band Cut Copy.</p>
12
- <p>Foddy has stated that he made Getting Over It as a tribute to Jazzuo's 2002 B-Game classic Sexy Hiking, which has a similar concept of climbing a mountain with a hammer. He also said that he wanted to make a game for a certain kind of person: someone who likes hard games, who likes frustration games, who likes roguelikes, who likes speedrunning. He wanted to make a game that would hurt them.</p>
13
- <h3>The reception and the reviews</h3>
14
- <p>Getting Over It has received mostly positive reviews from critics and players alike. It has a score of 77/100 on Metacritic and an \"Overwhelmingly Positive\" rating on Steam. Many reviewers praised the game's originality, humor, challenge, and satisfaction. They also appreciated the game's commentary by F <p>oddy, who provides insights, jokes, quotes, and encouragement throughout the game. Some reviewers also noted that the game can be seen as a metaphor for life, art, or game development itself.</p>
15
- <p>However, not everyone enjoyed Getting Over It. Some reviewers criticized the game's difficulty, frustration, and repetitiveness. They also complained about the game's lack of features, options, and accessibility. Some players also reported technical issues, bugs, and crashes. And of course, some players simply hated the game for making them rage quit.</p>
16
- <h2>Why is Getting Over It so frustrating?</h2>
17
- <p>Getting Over It is not a game for everyone. It is a game that tests your patience, skill, and sanity. It is a game that can make you feel angry, sad, hopeless, or even depressed. But why is it so frustrating? Here are some of the reasons:</p>
18
- <p>How to download getting over it for pc free from ocean of games<br />
19
- Getting over it pc game download full version ocean of games<br />
20
- Ocean of games getting over it with bennett foddy pc download<br />
21
- Download getting over it highly compressed pc game ocean of games<br />
22
- Getting over it pc download google drive link ocean of games<br />
23
- Ocean of games getting over it pc system requirements and features<br />
24
- Getting over it pc game review and gameplay ocean of games<br />
25
- Download getting over it for windows 10/8/7 pc ocean of games<br />
26
- Ocean of games getting over it pc installation guide and tips<br />
27
- Getting over it pc download latest version 2023 ocean of games<br />
28
- Ocean of games getting over it pc cheats and mods download<br />
29
- Getting over it pc download torrent file ocean of games<br />
30
- Ocean of games getting over it pc best alternatives and similar games<br />
31
- Getting over it pc download no emulator needed ocean of games<br />
32
- Ocean of games getting over it pc offline mode and multiplayer support<br />
33
- Getting over it pc download crack and patch ocean of games<br />
34
- Ocean of games getting over it pc minimum and recommended specs<br />
35
- Getting over it pc download size and file format ocean of games<br />
36
- Ocean of games getting over it pc direct download link no survey<br />
37
- Getting over it pc download error and bug fixes ocean of games</p>
38
- <h3>The controls and the physics</h3>
39
- <p>The controls of Getting Over It are simple but hard to master. You only need to move the mouse or the touch screen to control the hammer, but that's easier said than done. The hammer's movement is sensitive and precise, but also erratic and unpredictable. You need to have a good sense of timing, distance, and angle to move effectively. And you need to constantly adjust your grip and position to avoid losing balance or momentum.</p>
40
- <p>The physics of Getting Over It are realistic but unforgiving. The game simulates gravity, friction, inertia, and collision in a realistic way, but that also means that the slightest mistake can have disastrous consequences. You can slip, slide, bounce, or fly off the mountain at any moment. And you can't rely on any safety nets or checkpoints to save you. You have to deal with the consequences of your actions.</p>
41
- <h3>The obstacles and the setbacks</h3>
42
- <p>The obstacles of Getting Over It are varied and challenging. The mountain is made of different objects that have different shapes, sizes, textures, and properties. Some objects are solid and stable, while others are slippery and movable. Some objects are helpful and supportive, while others are harmful and obstructive. Some objects are familiar and recognizable, while others are bizarre and surreal. You never know what to expect next.</p>
43
- <p>The setbacks of Getting Over It are frequent and painful. The mountain is full of traps and pitfalls that can send you back to where you started or even lower. You can fall from great heights or get stuck in narrow spaces. You can lose hours or days of progress in seconds or minutes. And you have to start over again and again until you reach the top.</p>
44
- <h3>The narration and the commentary</h3>
45
- <p>The narration of Getting Over It is witty but cruel. The game features a voice-over by Bennett Foddy himself, who talks to you throughout the game. He tells you about the history and the design of the game, he quotes from various philosophers and artists, he jokes about your situation and your failures, he encourages you to keep going and to not give up. He also sometimes apologizes for making such a hard game.</p>
46
- <p>The commentary of Getting Over It is informative but taunting. The game also features a chat box that shows messages from other players who are playing the game at the same time as you. They can share their thoughts, feelings, tips, or jokes with you. They can also see your progress and your falls on their screens. They can cheer you on or mock you mercilessly.</p>
47
- <h2>How to download Getting Over It PC Ocean of Games?</h2>
48
- <p>If you want to play Getting Over It on your PC for free, then you might want to check out Ocean of Games, a website that offers a variety of PC games for download. However, before you do that, you should be aware of some of the advantages and disadvantages of using this website.</p>
49
- <h3>The advantages and disadvantages of Ocean of Games</h3>
50
- <p>Ocean of Games has some advantages over other websites that offer PC games for download. Some of these advantages are:</p>
51
- <ul>
52
- <li>It has a large collection of games from different genres and categories.</li>
53
- <li>It has an easy-to-use interface and a fast download speed.</li>
54
- <li>It does not require any registration or subscription.</li>
55
- <li>It does not have any annoying ads or pop-ups.</li>
56
- </ul>
57
- <p>However, Ocean of Games also has some disadvantages that you should be aware of before using it. Some of these disadvantages are:</p>
58
- <ul>
59
- <li>It does not have any official license or authorization from the game developers or publishers.</li>
60
- <li>It does not guarantee the quality or the safety of the games it offers.</li>
61
- <li>It may contain viruses, malware, spyware, or other harmful software that can damage your PC or compromise your privacy.</li>
62
- <li>It may violate <p>It may violate the intellectual property rights of the game developers or publishers.</li>
63
- <li>It may expose you to legal risks or penalties for piracy or infringement.</li>
64
- </ul>
65
- <p>Therefore, you should use Ocean of Games at your own risk and discretion. You should also respect the rights and the work of the game developers or publishers and consider buying the game from official sources if you enjoy it.</p>
66
- <h3>The steps to download and install Getting Over It from Ocean of Games</h3>
67
- <p>If you still want to download Getting Over It PC Ocean of Games, then you can follow these steps:</p>
68
- <ol>
69
- <li>Go to the Ocean of Games website and search for Getting Over It in the search box.</li>
70
- <li>Select the game from the list of results and click on the download button.</li>
71
- <li>Wait for the download to finish and then extract the zip file to a folder of your choice.</li>
72
- <li>Open the folder and run the setup.exe file as an administrator.</li>
73
- <li>Follow the instructions on the screen to install the game on your PC.</li>
74
- <li>Launch the game from the desktop shortcut or the start menu and enjoy.</li>
75
- </ol>
76
- <h3>The alternatives to Ocean of Games</h3>
77
- <p>If you are looking for other websites that offer PC games for download, then you might want to check out some of these alternatives:</p>
78
- <ul>
79
- <li>Steam: Steam is the most popular and reputable platform for buying and playing PC games. It has a huge library of games from various genres and categories, as well as features such as cloud saving, achievements, multiplayer, mods, and more. You can also find some free or discounted games on Steam, especially during sales or events.</li>
80
- <li>GOG: GOG is another platform that sells and distributes PC games. It specializes in DRM-free games, meaning that you can play them without any online activation or restriction. It also offers some classic and retro games that are compatible with modern systems.</li>
81
- <li>itch.io: itch.io is a website that hosts indie games from various developers and creators. You can find some unique and original games on itch.io, as well as some free or pay-what-you-want games. You can also support the developers directly by buying or donating to their games.</li>
82
- </ul>
83
- <h2>How to beat Getting Over It?</h2>
84
- <p>Getting Over It is a game that is hard to beat, but not impossible. It requires a lot of practice, patience, and perseverance. It also requires some tips, tricks, and strategies. Here are some of them:</p>
85
- <h3>The tips and tricks to master the game</h3>
86
- <p>Here are some tips and tricks that can help you master the game:</p>
87
- <ul>
88
- <li>Learn how to use the hammer effectively. You can use it to push, pull, hook, swing, launch, or balance yourself. Experiment with different movements and angles to find what works best for you.</li>
89
- <li>Use both hands to control the mouse or the touch screen. This can give you more precision and stability when moving the hammer.</li>
90
- <li>Adjust your mouse sensitivity or touch sensitivity according to your preference. You can do this in the settings menu of the game. You can also adjust your screen resolution or window size to fit your monitor or device.</li>
91
- <li>Take breaks regularly. Getting Over It can be mentally and physically exhausting. You should take breaks every 15 minutes or so to relax your eyes, hands, and mind. You can also save your progress by quitting the game and resuming it later.</li>
92
- <li>Don't give up. Getting Over It is a game that is meant to challenge you and make you frustrated. But it is also a game that can reward you with satisfaction and accomplishment. Don't let your failures discourage you. Learn from them and try again.</li>
93
- </ul>
94
- <h3>The speedruns and the records</h3>
95
- <p>If you want to challenge yourself further, you can try to beat Getting Over It as fast as possible. This is called speedrunning, and it is a popular activity among gamers who like to compete with themselves or others. There are many websites and communities that track and showcase speedruns of various games, such as speedrun.com or Speed Demos Archive. You can also watch some videos of speedruns on YouTube or Twitch.</p>
96
- <p>The current world record for beating Getting Over It is 1 minute 19 seconds by a player named Lockness06. He achieved this feat on June 14th, 2021 using a mouse and keyboard. The previous record was 1 minute 24 seconds by a player named Distortion2. He achieved this feat on May 31st, 2021 using a controller.</p>
97
- <h3>The rewards and the <h3>The rewards and the secrets</h3>
98
- <p>If you manage to beat Getting Over It, you will be rewarded with a special ending that includes a song, a message, and a surprise. We won't spoil it for you, but we can tell you that it is worth the effort. You will also unlock a golden cauldron that you can use to play the game again with a different look.</p>
99
- <p>Getting Over It also has some secrets and easter eggs that you can discover along the way. Some of them are hidden in the mountain, some of them are triggered by certain actions, and some of them are revealed by the narrator. We won't tell you what they are, but we can give you some hints:</p>
100
- <ul>
101
- <li>There is a secret island that you can reach by flying over the ocean.</li>
102
- <li>There is a secret room that you can enter by breaking a wall.</li>
103
- <li>There is a secret message that you can read by zooming in on a sign.</li>
104
- <li>There is a secret mode that you can activate by typing a code.</li>
105
- <li>There is a secret game that you can play by clicking on a button.</li>
106
- </ul>
107
- <h2>Conclusion</h2>
108
- <p>Getting Over It with Bennett Foddy is a game that is not for everyone. It is a game that is hard, frustrating, and sometimes unfair. But it is also a game that is original, humorous, and satisfying. It is a game that challenges you to overcome your limits and to get over it.</p>
109
- <p>If you want to play Getting Over It on your PC for free, you can download it from Ocean of Games, a website that offers a variety of PC games for download. However, you should be careful of the risks and the drawbacks of using this website. You should also respect the rights and the work of the game developers and publishers and consider buying the game from official sources if you enjoy it.</p>
110
- <p>If you want to beat Getting Over It, you will need a lot of practice, patience, and perseverance. You will also need some tips, tricks, and strategies. And you will also need to discover some secrets and easter eggs along the way. But most importantly, you will need to have fun and to not give up.</p>
111
- <p>We hope this guide has helped you to learn more about Getting Over It PC Ocean of Games. If you have any questions or comments, feel free to leave them below. And if you liked this article, please share it with your friends. Thank you for reading and happy climbing!</p>
112
- <h2>FAQs</h2>
113
- <p>Here are some frequently asked questions about Getting Over It PC Ocean of Games:</p>
114
- <ol>
115
- <li><b>Who is Diogenes?</b></li>
116
- <p>Diogenes is the name of the man who is stuck in a cauldron in Getting Over It. He is named after an ancient Greek philosopher who was known for living in a barrel and rejecting conventional values and norms. He was also known for his wit and his cynicism.</p>
117
- <li><b>Who is Bennett Foddy?</b></li>
118
- <p>Bennett Foddy is the developer and the narrator of Getting Over It. He is an independent game designer and an academic who is known for creating experimental and challenging games such as QWOP, GIRP, and CLOP. He is also a professor at New York University's Game Center and a musician who plays bass guitar for the band Cut Copy.</p>
119
- <li><b>What is Ocean of Games?</b></li>
120
- <p>Ocean of Games is a website that offers a variety of PC games for download. It has a large collection of games from different genres and categories, as well as an easy-to-use interface and a fast download speed. However, it also has some disadvantages such as being unauthorized, unsafe, illegal, and unethical.</p>
121
- <li><b>How long does it take to beat Getting Over It?</b></li>
122
- <p>The answer to this question depends on your skill level, your luck, and your persistence. Some players can beat Getting Over It in less than 2 minutes, while others can take more than 200 hours. The average time to beat Getting Over It according to HowLongToBeat.com is 6 hours for the main story and 11 hours for completionists.</p>
123
- <li><b>What are some other games like Getting Over It?</b></li>
124
- <p>If you are looking for some other games that are similar to Getting Over It in terms of concept, difficulty, or humor, then you might want to check out some of these games:</p>
125
- <ul>
126
- <li>Sexy Hiking: The original game that inspired Getting Over It. It has similar gameplay but with worse graphics and sound.</li>
127
- <li>Pogostuck: Rage With Your Friends: A game that involves climbing a mountain with a pogo stick and competing with other players online.</li>
128
- <li>Jump King: A game that involves jumping up a tower with precise timing and landing. It has retro graphics and a dark sense of humor.</li>
129
- <li>I Am Bread: A game that involves controlling a slice of bread and trying to become toast. It has realistic physics and a quirky story.</li>
130
- <li>Surgeon Simulator: A game that involves performing surgery with clumsy controls and hilarious outcomes. It has various scenarios and modes to play.</li>
131
- </ul></p> 197e85843d<br />
132
- <br />
133
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/ TikTok .md DELETED
@@ -1,101 +0,0 @@
1
- <br />
2
- <h1>How to Download TikTok Videos Without Watermark</h1>
3
- <p>TikTok is one of the most popular social media apps in the world, with over 1 billion active users. It allows users to create and share short videos with music, filters, stickers, and other effects. People use TikTok for various purposes, such as entertainment, education, inspiration, or expression.</p>
4
- <h2>tiktok download 07 video ไม่มีลายน้ํา</h2><br /><p><b><b>Download Zip</b> ::: <a href="https://jinyurl.com/2uNUdl">https://jinyurl.com/2uNUdl</a></b></p><br /><br />
5
- <p>However, sometimes people may want to download TikTok videos for offline viewing, editing, sharing on other platforms, or avoiding the annoying watermark that appears on the videos. Unfortunately, TikTok does not provide an official way to download videos without watermark. The only option within the app is to save videos with watermark, which can reduce the quality and aesthetics of the videos.</p>
6
- <p>So how can you download TikTok videos without watermark? Is there a way to do it easily, quickly, safely, and reliably? The answer is yes! In this article, we will introduce you to the best solution to download TikTok videos without watermark online for free: SnapTik.App.</p>
7
- <h2>What are the challenges of downloading TikTok videos without watermark?</h2>
8
- <p>There are many third-party tools and websites that claim to help users download TikTok videos without watermark. However, some of them may have drawbacks or limitations that make them less than ideal for users. Here are some of the common challenges that users may face when trying to download TikTok videos without watermark:</p>
9
- <p>วิธี download video tiktok ไม่มีลายน้ำ<br />
10
- tiktok download video ไม่มีลายน้ำ android<br />
11
- tiktok download video ไม่มีลายน้ำ ios<br />
12
- tiktok download video ไม่มีลายน้ำ pc<br />
13
- tiktok download video ไม่มีลายน้ำ online<br />
14
- tiktok download video ไม่มีลายน้ำ app<br />
15
- tiktok download video ไม่มีลายน้ำ apk<br />
16
- tiktok download video ไม่มีลายน้ำ chrome<br />
17
- tiktok download video ไม่มีลายน้ำ snaptik<br />
18
- tiktok download video ไม่มีลายน้ำ ssstik<br />
19
- tiktok download video ไม่มีลายน้ำ mp4<br />
20
- tiktok download video ไม่มีลายน้ำ hd<br />
21
- tiktok download video ไม่มีลายน้ำ free<br />
22
- tiktok download video ไม่มีลายน้ำ fast<br />
23
- tiktok download video ไม่มีลายน้ำ easy<br />
24
- tiktok download video ไม่มีลายน้ำ website<br />
25
- tiktok download video ไม่มีลายน้ำ link<br />
26
- tiktok download video ไม่มีลายน้ำ url<br />
27
- tiktok download video ไม่มีลายน้ำ code<br />
28
- tiktok download video ไม่มีลายน้ำ script<br />
29
- tiktok download video ไม่มีลายน้ำ python<br />
30
- tiktok download video ไม่มีลายน้ำ php<br />
31
- tiktok download video ไม่มีลายน้ำ javascript<br />
32
- tiktok download video ไม่มีลายน้ำ extension<br />
33
- tiktok download video ไม่มีลายน้ำ plugin<br />
34
- tiktok download video ไม่มีลายน้ำ software<br />
35
- tiktok download video ไม่มีลายน้ำ tool<br />
36
- tiktok download video ไม่มีลายน้ำ program<br />
37
- tiktok download video ไม่มีลายน้ำ application<br />
38
- tiktok download video ไม่มีลายน้ำ service<br />
39
- tiktok download video ไม่มีลายน้ำ site<br />
40
- tiktok download video ไม่มีลายน้ำ page<br />
41
- tiktok download video ไม่มีลายน้ำ generator<br />
42
- tiktok download video ไม่มีลายน้ำ downloader<br />
43
- tiktok download video ไม่มีลายน้ำ converter<br />
44
- tiktok download video ไม่มีลายน้ำ saver<br />
45
- tiktok download video ไม่มีลายน้ำ grabber<br />
46
- tiktok download video ไม่มีลายน้ำ extractor<br />
47
- tiktok download video ไม่มีลายน้ำ copier<br />
48
- tiktok download video ไม่มีลายน้ำ editor<br />
49
- tiktok download video ไม่มีลายน้ำ remover<br />
50
- tiktok download video ไม่มีลายน้ำ eraser<br />
51
- tiktok download video ไม่มีลายน้ำ cleaner<br />
52
- tiktok download video ไม่มีลายน้ำ filter<br />
53
- tiktok download video ไม่มีลายน้ำ cutter<br />
54
- tiktok download video ไม่มีลายน้ำ trimmer<br />
55
- tiktok download video ไม่มีลายน้ำ cropper<br />
56
- tiktok download video ไม่มีลายน้ำ splitter<br />
57
- tiktok download video ไม่มีลายน้ำ merger</p>
58
- <ul>
59
- <li>Some tools or websites may require registration or subscription before allowing users to download videos without watermark.</li>
60
- <li>Some tools or websites may have ads or pop-ups that can be annoying or distracting for users.</li>
61
- <li>Some tools or websites may be slow or unreliable, resulting in low-quality downloads or failed requests.</li>
62
- <li>Some tools or websites may have limited features, such as not supporting all devices or browsers, not allowing users to choose the format or resolution of the videos, or not offering additional options like downloading slideshows, images, or music from TikTok.</li>
63
- <li>Some tools or websites may pose security risks, such as installing malware, stealing user data, or violating user privacy.</li>
64
- </ul>
65
- <p>Therefore, users need to be careful and selective when choosing a tool or website to download TikTok videos without watermark. They need to find a solution that can overcome these challenges and provide them with the best possible experience.</p>
66
- <h2>What is the best solution to download TikTok videos without watermark?</h2>
67
- <p>The best solution to download TikTok videos without watermark is to use SnapTik.App, a free and fast online tool that can download any TikTok video in HD quality and MP4 format without watermark. SnapTik.App has many advantages over other tools and websites, such as:</p>
68
- <ul>
69
- <li>It is easy to use. Users only need to copy and paste the link of the TikTok video that they want to download and click on the download button. No registration, subscription, installation, or configuration is required.</li>
70
- <li>It has no ads. Users can enjoy a clean and smooth interface without any interruptions or distractions.</li>
71
- <li>It is fast and reliable. Users can download TikTok videos without watermark in seconds, thanks to the powerful server and technology behind SnapTik.App. The downloads are always in high quality and never fail.</li>
72
- <li>It supports all devices and browsers. Users can access SnapTik.App from any device, such as PC, laptop, tablet, or smartphone, and any browser, such as Chrome, Firefox, Safari, or Opera. No matter what device or browser they use, they can download TikTok videos without watermark with ease.</li>
73
- <li>It does not store or track user data. Users can rest assured that their privacy and security are protected when using SnapTik.App. SnapTik.App does not store, collect, or share any user data or information. It also does not require any permissions or access to user devices or accounts.</li>
74
- <li>It offers additional features. Users can also use SnapTik.App to download slideshows, images, and music from TikTok. They can also choose the resolution of the videos that they want to download, such as 720p, 1080p, or 4K. They can also preview the videos before downloading them.</li>
75
- </ul>
76
- <p>With SnapTik.App, users can download TikTok videos without watermark online for free in the best possible way. They can enjoy watching, editing, and sharing their favorite TikTok videos without any hassle or compromise.</p>
77
- <h2>How to use SnapTik.App to download TikTok videos without watermark?</h2>
78
- <p>To use SnapTik.App to download TikTok videos without watermark, users need to follow these simple steps:</p>
79
- <h4>Step 1: Open the TikTok app on your phone or the website on your browser and select the video that you want to download.</h4>
80
- <p>You can choose any video that you like from TikTok, whether it is from your own account, someone else's account, a hashtag page, a challenge page, a trend page, or a search page. You can also use the filters and effects on TikTok to create your own video.</p>
81
- <h4>Step 2: Click on the share button at the bottom right and click on the copy link button.</h4>
82
- <p>This will copy the link of the video to your clipboard. You can also share the link with your friends via other apps if you want.</p>
83
- <h4>Step 3: Go back to SnapTik.App and paste the link in the box at the top. Then click on the download button.</h4>
84
- <p>This will take you to a new page where you can see the details of the video, such as the title, the creator, the duration, and the resolution. You can also preview the video before downloading it.</p>
85
- <h4>Step 4: Wait for the server to process your request and then save the video to your device in one click.</h4>
86
- <p>This will start the download process and save the video to your device in MP4 format without watermark. You can find the video in your downloads folder or gallery. You can also rename or delete the video if you want.</p>
87
- <h2>Conclusion and FAQs</h2>
88
- <p>In conclusion, SnapTik.App is the best way to download TikTok videos without watermark online for free. It is fast, easy, safe, and reliable. Users can enjoy watching, editing, and sharing their favorite TikTok videos without any hassle or compromise. SnapTik.App is the ultimate tool for TikTok lovers who want to download videos without watermark.</p>
89
- <p>Here are some FAQs that users may have about SnapTik.App:</p>
90
- <h4>Q: Is SnapTik.App legal and safe?</h4>
91
- <p>A: SnapTik.App is legal and safe to use, as long as users respect the intellectual property rights of the original creators and do not use the downloaded videos for commercial or illegal purposes. SnapTik.App does not violate any terms of service or privacy policies of TikTok or any other platforms.</p>
92
- <h4>Q: Does SnapTik.App work on all devices and browsers?</h4>
93
- <p>A: Yes, SnapTik.App works on all devices and browsers, including PC, laptop, tablet, smartphone, Chrome, Firefox, Safari, Opera, and more. Users can access SnapTik.App from any device or browser without any issues.</p>
94
- <h4>Q: Does SnapTik.App have any limitations or restrictions?</h4>
95
- <p>A: No, SnapTik.App does not have any limitations or restrictions on the number, size, length, or quality of the videos that users can download without watermark. Users can download as many videos as they want, as long as they have enough storage space on their devices.</p>
96
- <h4>Q: Does SnapTik.App support other languages besides Thai?</h4>
97
- <p>A: Yes, SnapTik.App supports other languages besides Thai, such as English, Spanish, French, German, Italian, Portuguese, Russian, Arabic, Hindi, Japanese, Korean, Chinese, and more. Users can change the language of the website by clicking on the flag icon at the top right corner.</p>
98
- <h4>Q: How can I contact SnapTik.App if I have any questions or feedback?</h4>
99
- <p>A: You can contact SnapTik.App by sending an email to [email protected] or by filling out the contact form on the website. We appreciate your questions and feedback and we will try to respond as soon as possible.</p> 197e85843d<br />
100
- <br />
101
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Bus Simulator Ultimate 1.5.2 Mod Apk - Drive Realistic Buses with Amazing Features.md DELETED
@@ -1,119 +0,0 @@
1
- <br />
2
- <h1>Bus Simulator Ultimate 1.5.2 Mod Apk: A Review</h1>
3
- <p>Do you love driving buses and exploring different cities? Do you want to experience the thrill of running your own bus company and competing with other players online? If yes, then you should try Bus Simulator Ultimate, one of the most popular and realistic bus simulator games for Android devices.</p>
4
- <h2>bus simulator ultimate 1.5.2 mod apk</h2><br /><p><b><b>Download File</b> &#8250; <a href="https://jinyurl.com/2uNSJS">https://jinyurl.com/2uNSJS</a></b></p><br /><br />
5
- <p>In this article, we will review Bus Simulator Ultimate and its latest version, 1.5.2 mod apk, which offers unlimited money, free purchases, and other benefits. We will also discuss the features, pros, and cons of this game, and answer some frequently asked questions about it.</p>
6
- <h2>What is Bus Simulator Ultimate?</h2>
7
- <p>Bus Simulator Ultimate is a simulation game developed by Zuuks Games, a Turkish game studio that specializes in creating realistic driving games. The game was released in August 2019 and has since gained over 100 million downloads and 4.3 stars rating on Google Play Store.</p>
8
- <p>Bus Simulator Ultimate lets you drive various types of buses across different countries and cities, such as Germany, Turkey, Italy, France, Spain, USA, Brazil, Russia, and more. You can also create your own routes and customize your buses with different skins, stickers, horns, and accessories.</p>
9
- <p>But driving buses is not the only thing you can do in this game. You can also establish your own bus company and hire drivers to work for you. You can manage your company's finances, reputation, customer satisfaction, and more. You can also compete with other players in multiplayer mode and online ranking system.</p>
10
- <p>bus simulator ultimate mod apk unlimited money and gold<br />
11
- bus simulator ultimate hack apk download for android<br />
12
- bus simulator ultimate mod menu apk latest version<br />
13
- bus simulator ultimate 1.5.2 mod apk happymod<br />
14
- bus simulator ultimate mod apk free shopping<br />
15
- bus simulator ultimate mod apk all buses unlocked<br />
16
- bus simulator ultimate mod apk revdl<br />
17
- bus simulator ultimate mod apk rexdl<br />
18
- bus simulator ultimate mod apk android 1<br />
19
- bus simulator ultimate mod apk an1<br />
20
- bus simulator ultimate mod apk obb<br />
21
- bus simulator ultimate mod apk offline<br />
22
- bus simulator ultimate mod apk online<br />
23
- bus simulator ultimate mod apk no ads<br />
24
- bus simulator ultimate mod apk unlimited xp<br />
25
- bus simulator ultimate mod apk unlimited fuel<br />
26
- bus simulator ultimate mod apk unlimited tickets<br />
27
- bus simulator ultimate mod apk unlimited gems<br />
28
- bus simulator ultimate mod apk unlimited coins<br />
29
- bus simulator ultimate mod apk unlimited everything<br />
30
- bus simulator ultimate 1.5.2 hack apk download<br />
31
- bus simulator ultimate 1.5.2 cheat apk download<br />
32
- bus simulator ultimate 1.5.2 premium apk download<br />
33
- bus simulator ultimate 1.5.2 pro apk download<br />
34
- bus simulator ultimate 1.5.2 full apk download<br />
35
- bus simulator ultimate 1.5.2 cracked apk download<br />
36
- bus simulator ultimate 1.5.2 unlocked apk download<br />
37
- bus simulator ultimate 1.5.2 latest mod apk download<br />
38
- bus simulator ultimate 1.5.2 new mod apk download<br />
39
- bus simulator ultimate 1.5.2 updated mod apk download<br />
40
- how to install bus simulator ultimate 1.5.2 mod apk<br />
41
- how to download bus simulator ultimate 1.5.2 mod apk<br />
42
- how to play bus simulator ultimate 1.5.2 mod apk<br />
43
- how to get bus simulator ultimate 1.5.2 mod apk<br />
44
- how to update bus simulator ultimate 1.5.2 mod apk<br />
45
- how to hack bus simulator ultimate 1.5.2 with lucky patcher<br />
46
- how to hack bus simulator ultimate 1.5.2 with game guardian<br />
47
- how to hack bus simulator ultimate 1.5.2 with cheat engine<br />
48
- how to hack bus simulator ultimate 1.5.2 without root<br />
49
- how to hack bus simulator ultimate 1.5.2 without verification<br />
50
- best settings for bus simulator ultimate 1.5.2 mod apk<br />
51
- best graphics for bus simulator ultimate 1.5.2 mod apk<br />
52
- best buses for bus simulator ultimate 1.5.2 mod apk<br />
53
- best routes for bus simulator ultimate 1.5.2 mod apk<br />
54
- best tips and tricks for bus simulator ultimate 1.5.2 mod apk<br />
55
- best cheats and hacks for bus simulator ultimate 1.5.2 mod apk<br />
56
- best reviews and ratings for bus simulator ultimate 1.5.2 mod apk<br />
57
- best alternatives and similar games to bus simulator ultimate 1.5.2 mod apk</p>
58
- <h3>Features of Bus Simulator Ultimate</h3>
59
- <p>Bus Simulator Ultimate has many features that make it stand out from other bus simulator games. Here are some of them:</p>
60
- <h4>Realistic bus driving experience</h4>
61
- <p>The game boasts realistic graphics, physics, sounds, and weather effects that make you feel like you are really driving a bus on the road. You can also choose from different camera angles, such as cockpit view, third-person view, or top-down view.</p>
62
- <p>The game also has realistic traffic rules and situations that you have to follow and deal with. You have to obey traffic lights, speed limits, signs, and signals. You have to avoid accidents, collisions, and fines. You have to deal with traffic jams, road works, accidents, and emergencies.</p>
63
- <h4>Multiplayer mode and online ranking</h4>
64
- <p>The game allows you to play with other players online in multiplayer mode. You can join or create a room and invite your friends or random players to join you. You can chat with them using voice or text messages. You can also see their buses and routes on the map.</p>
65
- <p>The game also has an online ranking system that shows your position among other players based on your performance, reputation, income, and more. You can compare your stats with other players and try to climb up the leaderboard.</p>
66
- <h4>Customizable buses and routes</h4>
67
- <p>The game offers a variety of buses that you can drive and customize. You can choose from different models, brands, sizes, colors, and designs of buses. You can also add different accessories and decorations to your buses, such as skins, stickers, horns, lights, and more. You can also change the interior of your buses, such as the seats, steering wheel, dashboard, and more.</p>
68
- <p>The game also lets you create your own routes and destinations. You can choose from different cities and countries to drive in. You can also set the length, difficulty, and scenery of your routes. You can also add different stops, landmarks, and attractions to your routes.</p>
69
- <h4>Passenger feedback and company management</h4>
70
- <p>The game also simulates the interaction between you and your passengers. You have to pick up and drop off passengers at the designated stops. You have to provide them with a comfortable and safe ride. You have to listen to their feedback and requests.</p>
71
- <p>The game also gives you the opportunity to run your own bus company. You have to hire and train drivers, buy and maintain buses, manage your budget and expenses, and expand your business. You have to balance your income and reputation. You have to deal with competitors, challenges, and events.</p>
72
- <h3>What is Bus Simulator Ultimate 1.5.2 Mod Apk?</h3>
73
- <p>Bus Simulator Ultimate 1.5.2 mod apk is a modified version of the original game that offers some extra features and benefits that are not available in the official version. Some of these features are:</p>
74
- <h4>Benefits of using the mod apk</h4>
75
- <ul>
76
- <li>Unlimited money: You can get unlimited money in the game that you can use to buy and upgrade buses, hire drivers, create routes, and more.</li>
77
- <li>Free purchases: You can make any purchase in the game for free without spending any real money.</li>
78
- <li>No ads: You can enjoy the game without any annoying ads interrupting your gameplay.</li>
79
- <li>No root: You do not need to root your device to use the mod apk.</li>
80
- </ul>
81
- <h4>How to download and install the mod apk</h4>
82
- <p>To download and install the mod apk, you need to follow these steps:</p>
83
- <ol>
84
- <li>Download the mod apk file from a trusted source on the internet.</li>
85
- <li>Enable unknown sources on your device settings to allow installation of apps from outside the Google Play Store.</li>
86
- <li>Locate the downloaded file on your device storage and tap on it to install it.</li>
87
- <li>Launch the game and enjoy the mod features.</li>
88
- </ol>
89
- <h3>Pros and cons of Bus Simulator Ultimate 1.5.2 Mod Apk</h3>
90
- <p>Like any other mod apk, Bus Simulator Ultimate 1.5.2 mod apk has its own advantages and disadvantages. Here are some of them:</p>
91
- | Pros | Cons | | --- | --- | | Unlimited money and free purchases | May not be compatible with some devices or versions | | No ads | May cause bugs or glitches in the game | | No root | May violate the terms and conditions of the game | | Enhanced gameplay | May affect the online features of the game | <h3>Conclusion</h3>
92
- <p>Bus Simulator Ultimate is a fun and realistic bus simulator game that lets you drive various buses across different countries and cities, create your own routes and destinations, run your own bus company, and compete with other players online. It has many features that make it one of the best bus simulator games for Android devices.</p>
93
- <p>Bus Simulator Ultimate 1.5.2 mod apk is a modified version of the original game that offers unlimited money, free purchases, no ads, and no root. It can enhance your gameplay experience by giving you more freedom and options in the game. However, it also has some drawbacks that you should be aware of before using it.</p>
94
- <p>If you are looking for a bus simulator game that is realistic, challenging, and entertaining, you should give Bus Simulator Ultimate a try. And if you want to get some extra benefits and features in the game, you can download and install Bus Simulator Ultimate 1.5.2 mod apk from a reliable source on the internet.</p>
95
- <h2>FAQs</h2>
96
- <h3>Q1: Is Bus Simulator Ultimate free to play?</h3>
97
- <p>A1: Yes, Bus Simulator Ultimate is free to download and play on Android devices. However, it also contains some in-app purchases that require real money.</p>
98
- <h3>Q2: Is Bus Simulator Ultimate 1.5.2 Mod Apk safe to use?</h3>
99
- <p>A2: Bus Simulator Ultimate 1.5.2 mod apk is generally safe to use if you download it from a trusted source on the internet. However, you should always be careful when installing apps from unknown sources as they may contain viruses or malware that can harm your device or data.</p>
100
- <h3>Q3: How to update Bus Simulator Ultimate 1.5.2 Mod Apk?</ <p>A3: To update Bus Simulator Ultimate 1.5.2 mod apk, you need to download the latest version of the mod apk file from the same source where you downloaded the previous version. Then, you need to uninstall the old version of the mod apk and install the new one. You may also need to clear the cache and data of the game before launching it.</p>
101
- <h3>Q4: What are the best bus simulator games for Android?</h3>
102
- <p>A4: Besides Bus Simulator Ultimate, there are many other bus simulator games that you can try on your Android device. Some of them are:</p>
103
- <ul>
104
- <li>Bus Simulator: Original: This game lets you drive realistic buses in various locations and scenarios. You can also customize your buses and routes, and play with other players online.</li>
105
- <li>World Bus Driving Simulator: This game lets you drive different types of buses across Brazil and other countries. You can also enjoy realistic graphics, sounds, and weather effects.</li>
106
- <li>Coach Bus Simulator: This game lets you drive modern coaches across Europe and other continents. You can also create your own bus company and hire drivers.</li>
107
- <li>Heavy Bus Simulator: This game lets you drive heavy buses on challenging roads and terrains. You can also upgrade your buses and change their appearance.</li>
108
- </ul>
109
- <h3>Q5: How to contact the developers of Bus Simulator Ultimate?</h3>
110
- <p>A5: If you have any questions, suggestions, or feedback about Bus Simulator Ultimate, you can contact the developers of the game by using the following methods:</p>
111
- <ul>
112
- <li>Email: [email protected]</li>
113
- <li>Website: https://www.zuuks.com/</li>
114
- <li>Facebook: https://www.facebook.com/zuuks.games</li>
115
- <li>Instagram: https://www.instagram.com/zuuks_games/</li>
116
- <li>Twitter: https://twitter.com/ZuuksGames</li>
117
- </ul></p> 197e85843d<br />
118
- <br />
119
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Dr Driving 3 Mod APK and Learn Driving with Fun.md DELETED
@@ -1,98 +0,0 @@
1
-
2
- <h1>Dr Driving 3 Mod APK: A Fun and Realistic Driving Simulator</h1>
3
- <p>Do you love driving games but get bored of the same old racing and drifting scenarios? Do you want to experience the thrill of driving in a realistic city environment with traffic, pedestrians, and obstacles? Do you want to test your driving skills in various modes and missions, such as parking, delivery, taxi, and more? If you answered yes to any of these questions, then you should try Dr Driving 3, one of the best car simulation games on Android.</p>
4
- <h2>dr driving 3 mod apk</h2><br /><p><b><b>Download</b> &mdash;&mdash;&mdash;>>> <a href="https://jinyurl.com/2uNTnD">https://jinyurl.com/2uNTnD</a></b></p><br /><br />
5
- <h2>What is Dr Driving 3?</h2>
6
- <p>Dr Driving 3 is the third installment of the popular Dr Driving series, developed by SUD Inc. It is a driving simulator game that lets you drive various cars in a realistic city setting. You can choose from different modes and missions, such as parking, delivery, taxi, speed, fuel efficiency, and more. You can also compete with other players online and climb the leaderboards. You can customize your cars with different colors, wheels, spoilers, and upgrades. You can also earn coins and gold by completing missions and achievements.</p>
7
- <h3>Features of Dr Driving 3</h3>
8
- <h4>Realistic graphics and physics</h4>
9
- <p>Dr Driving 3 has stunning graphics that make you feel like you are driving in a real city. The game has realistic physics that simulate the car's movement, speed, braking, steering, and collision. You can also see the damage effects on your car when you crash or hit something. The game also has dynamic weather effects, such as rain, snow, fog, and night.</p>
10
- <h4>Various modes and missions</h4>
11
- <p>Dr Driving 3 has different modes and missions that challenge your driving skills. You can choose from parking, delivery, taxi, speed, fuel efficiency, and more. Each mode has different objectives and difficulties. For example, in parking mode, you have to park your car in a designated spot without hitting anything. In delivery mode, you have to deliver goods to various locations within a time limit. In taxi mode, you have to pick up and drop off passengers without breaking traffic rules.</p>
12
- <p>dr driving 3 mod apk unlimited money and gold<br />
13
- dr driving 3 mod apk download for android<br />
14
- dr driving 3 mod apk latest version<br />
15
- dr driving 3 mod apk hack<br />
16
- dr driving 3 mod apk revdl<br />
17
- dr driving 3 mod apk offline<br />
18
- dr driving 3 mod apk free shopping<br />
19
- dr driving 3 mod apk no ads<br />
20
- dr driving 3 mod apk unlimited coins and gems<br />
21
- dr driving 3 mod apk all cars unlocked<br />
22
- dr driving 3 mod apk android 1<br />
23
- dr driving 3 mod apk rexdl<br />
24
- dr driving 3 mod apk happymod<br />
25
- dr driving 3 mod apk unlimited fuel<br />
26
- dr driving 3 mod apk unlimited diamonds<br />
27
- dr driving 3 mod apk online<br />
28
- dr driving 3 mod apk unlimited everything<br />
29
- dr driving 3 mod apk unlimited keys<br />
30
- dr driving 3 mod apk unlimited nitro<br />
31
- dr driving 3 mod apk unlimited xp<br />
32
- dr driving 3 mod apk new update<br />
33
- dr driving 3 mod apk old version<br />
34
- dr driving 3 mod apk obb<br />
35
- dr driving 3 mod apk pure<br />
36
- dr driving 3 mod apk premium<br />
37
- dr driving 3 mod apk pro<br />
38
- dr driving 3 mod apk unlocked all features<br />
39
- dr driving 3 mod apk vip<br />
40
- dr driving 3 mod apk with unlimited money and gold download for android latest version<br />
41
- dr driving 3 mod apk with all cars unlocked and unlimited money and gold download for android latest version</p>
42
- <h4>Online multiplayer and leaderboards</h4>
43
- <p>Dr Driving 3 also has an online multiplayer feature that lets you play with other players around the world. You can join or create a room and invite your friends or random players to join. You can also chat with them using emojis. You can compete with them in different modes and see who is the best driver. You can also check your ranking on the global and local leaderboards.</p>
44
- <h4>Customizable cars and upgrades</h4>
45
- <p>Dr Driving 3 has a variety of cars that you can drive in the game. You can choose from sedans, hatchbacks, SUVs, sports cars, trucks, buses, and more. You can also customize your cars with different colors, wheels, spoilers, and upgrades. You can improve your car's performance by upgrading its engine, transmission, brakes, tires, suspension, and more.</p>
46
- <h2>What is Dr Driving 3 Mod APK?</h2>
47
- <p>Dr Driving 3 Mod APK is a modified version of Dr Driving 3 that provides everything free like coins and gold, all cars and upgrades unlocked, no ads, and no root required. It is a hacked version of the original game that gives you unlimited access to all the features and resources of the game. You can enjoy the game without any limitations or restrictions.</p>
48
- <h3>Benefits of Dr Driving 3 Mod APK</h3>
49
- <h4>Unlimited coins and gold</h4>
50
- <p>Coins and gold are the main currencies in Dr Driving 3. You need them to buy new cars, customize them, and upgrade them. You can also use them to unlock new modes and missions. However, earning coins and gold in the game is not easy. You have to complete missions, achievements, and watch ads to get them. With Dr Driving 3 Mod APK, you don't have to worry about that. You will get unlimited coins and gold in your account as soon as you install the mod. You can use them to buy anything you want in the game.</p>
51
- <h4>All cars and upgrades unlocked</h4>
52
- <p>Dr Driving 3 has a lot of cars that you can drive in the game. However, not all of them are available from the start. You have to unlock them by completing certain missions or paying with coins and gold. Some of the cars are very expensive and require a lot of coins and gold to unlock. With Dr Driving 3 Mod APK, you don't have to wait or spend money to unlock them. You will get all the cars and upgrades unlocked in the mod. You can choose any car you like and customize it with any upgrade you want.</p>
53
- <h4>No ads and no root required</h4>
54
- <p>Dr Driving 3 is a free game, but it has ads that can interrupt your gameplay and annoy you. You can remove the ads by paying with real money, but that is not a good option for everyone. With Dr Driving 3 Mod APK, you don't have to deal with any ads. The mod removes all the ads from the game and lets you enjoy the game without any distractions. Moreover, the mod does not require root access to work on your device. You can install it easily without any risk of damaging your device or violating its warranty.</p>
55
- <h2>How to download and install Dr Driving 3 Mod APK?</h2>
56
- <p>If you want to download and install Dr Driving 3 Mod APK on your device, you have to follow some simple steps. Here are the steps to download and install Dr Driving 3 Mod APK:</p>
57
- <h3>Steps to download and install Dr Driving 3 Mod APK</h3>
58
- <h4>Step 1: Enable unknown sources on your device</h4>
59
- <p>Before you can install Dr Driving 3 Mod APK on your device, you have to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To enable unknown sources, go to Settings > Security > Unknown Sources and toggle it on.</p>
60
- <h4>Step 2: Download the Dr Driving 3 Mod APK file from a trusted source</h4>
61
- <p>Next, you have to download the Dr Driving 3 Mod APK file from a trusted source. There are many websites that offer modded apps, but not all of them are safe and reliable. Some of them may contain viruses or malware that can harm your device or steal your data. Therefore, you have to be careful when choosing a source to download the mod file. You can use this link to download the Dr Driving 3 Mod APK file safely and securely.</p>
62
- <h4>Step 3: Locate and install the Dr Driving 3 Mod APK file on your device</h4>
63
- <p>After downloading the Dr Driving 3 Mod APK file, you have to locate it on your device and install it. You can use a file manager app to find the file in your downloads folder or wherever you saved it. Then, tap on the file and follow the instructions on the screen to install it.</p>
64
- <h4>Step 4: Launch the game and enjoy the mod features</h4>
65
- <p>Finally, you can launch the game and enjoy the mod features. You will see unlimited coins and gold in your account, all cars and upgrades unlocked, no ads, and no root required. You can play any mode or mission you want, customize your cars as you like, compete with other players online, and have fun driving in a realistic city.</p>
66
- <h2>Conclusion</h2>
67
- <p>Dr Driving 3 is a fun and realistic driving simulator game that lets you drive various cars in a realistic city setting. You can choose from different modes and missions, such as parking, delivery, taxi, speed, fuel efficiency, and more. You can also compete with other players online and climb the leaderboards. You can customize your cars with different colors, wheels, spoilers, and upgrades.</p>
68
- <p>If you want to enjoy the game without any limitations or restrictions, you should try Dr Driving 3 Mod APK, a modified version of the game that provides unlimited coins and gold, all cars and upgrades unlocked, no ads, and no root required. You can download and install Dr Driving 3 Mod APK easily by following the steps in this article. You can then enjoy the game with all the mod features and have a great time driving in a realistic city.</p>
69
- <h2>FAQs</h2>
70
- <p>Here are some frequently asked questions about Dr Driving 3 and Dr Driving 3 Mod APK:</p>
71
- <table>
72
- <tr>
73
- <th>Question</th>
74
- <th>Answer</th>
75
- </tr>
76
- <tr>
77
- <td>Is Dr Driving 3 free to play?</td>
78
- <td>Yes, Dr Driving 3 is free to play, but it has in-app purchases and ads that can affect your gameplay.</td>
79
- </tr>
80
- <tr>
81
- <td>Is Dr Driving 3 Mod APK safe to use?</td>
82
- <td>Yes, Dr Driving 3 Mod APK is safe to use, as long as you download it from a trusted source. However, you should always be careful when installing modded apps on your device and scan them for viruses or malware before installing them.</td>
83
- </tr>
84
- <tr>
85
- <td>Does Dr Driving 3 Mod APK work on all devices?</td>
86
- <td>Dr Driving 3 Mod APK works on most Android devices that support the original game. However, some devices may not be compatible with the mod or may experience some issues or errors. If you encounter any problems, you can try reinstalling the mod or contacting the mod developer for help.</td>
87
- </tr>
88
- <tr>
89
- <td>Can I play Dr Driving 3 offline?</td>
90
- <td>Yes, you can play Dr Driving 3 offline, but you will not be able to access the online multiplayer feature or the leaderboards. You will also not be able to sync your progress or achievements with your Google Play account.</td>
91
- </tr>
92
- <tr>
93
- <td>Can I update Dr Driving 3 Mod APK?</td>
94
- <td>No, you cannot update Dr Driving 3 Mod APK from the Google Play Store, as it is a modified version of the game. If you want to update the mod, you have to download and install the latest version of the mod from the same source you downloaded it from.</td>
95
- </tr>
96
- </table></p> 401be4b1e0<br />
97
- <br />
98
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Explore the Thrilling World of Monster Life with Free Shopping Mod APK.md DELETED
@@ -1,96 +0,0 @@
1
-
2
- <h1>Monster Life Mod APK Free Shopping: A Guide for Monster Lovers</h1>
3
- <p>Do you love cute monsters? Do you want to collect, breed, train, and battle with them? Do you want to have unlimited coins and gems to buy anything you want in the game? If you answered yes to any of these questions, then you should try <strong>Monster Life Mod APK Free Shopping</strong>, a modified version of the popular game <em>Monster Life</em> by Gameloft.</p>
4
- <h2>monster life mod apk free shopping</h2><br /><p><b><b>Download File</b> &#128504; <a href="https://jinyurl.com/2uNNs4">https://jinyurl.com/2uNNs4</a></b></p><br /><br />
5
- <p>In this article, we will tell you everything you need to know about Monster Life Mod APK Free Shopping, including how to download and install it, how to play it, what are its features, pros and cons, and whether it is worth it or not. Let's get started!</p>
6
- <h2>How to download and install Monster Life Mod APK Free Shopping</h2>
7
- <p>The first thing you need to do is to find a reliable source for the mod apk file. There are many websites that offer mod apk files for various games, but not all of them are safe or trustworthy. Some of them may contain viruses or malware that can harm your device or data, or they may not work properly or at all. Therefore, you should do some research before downloading any mod apk file from any website.</p>
8
- <p>One of the websites that we recommend is [APKCombo](^1^), which provides safe and fast downloads for various Android apps. You can find the link for Monster Life Mod APK Free Shopping on their website, or you can click [here](^1^) to go directly to the download page.</p>
9
- <p>Once you have found the mod apk file, you need to enable unknown sources on your device. This is because Android devices do not allow installing apps from sources other than the official Google Play Store by default. To enable unknown sources, go to your device settings, then security, then toggle on the option that says "allow installation of apps from unknown sources". This will allow you to install the mod apk file on your device.</p>
10
- <p>After enabling unknown sources, you can download and install the mod apk file on your device. Just tap on the download button on the website, then wait for the file to be downloaded. Once it is done, tap on the file to open it, then tap on install. Follow the instructions on the screen, then wait for the installation to be completed. You may need to grant some permissions to the app during the installation process.</p>
11
- <p>monster life mod apk unlimited money and gems<br />
12
- monster life mod apk latest version download<br />
13
- monster life mod apk free purchase and subscription<br />
14
- monster life mod apk no ads and unlocked levels<br />
15
- monster life mod apk android 1 and rexdl<br />
16
- monster life mod apk offline and online play<br />
17
- monster life mod apk hack and cheat codes<br />
18
- monster life mod apk revdl and happymod<br />
19
- monster life mod apk 2023 and 2024 updates<br />
20
- monster life mod apk obb and data files<br />
21
- monster life unlimited money mod apk free download<br />
22
- monster life latest version mod apk free shopping<br />
23
- monster life free purchase mod apk unlimited gems<br />
24
- monster life no ads mod apk unlocked premium features<br />
25
- monster life android 1 mod apk free coins and cash<br />
26
- monster life offline mod apk unlimited everything<br />
27
- monster life hack mod apk free diamond and gold<br />
28
- monster life revdl mod apk unlocked all monsters<br />
29
- monster life 2023 mod apk free subscription and purchase<br />
30
- monster life obb mod apk free shopping and levels<br />
31
- download monster life mod apk unlimited money and gems<br />
32
- download monster life mod apk latest version free shopping<br />
33
- download monster life mod apk free purchase and subscription<br />
34
- download monster life mod apk no ads and unlocked levels<br />
35
- download monster life mod apk android 1 and rexdl free<br />
36
- download monster life mod apk offline and online play<br />
37
- download monster life mod apk hack and cheat codes free<br />
38
- download monster life mod apk revdl and happymod free shopping<br />
39
- download monster life mod apk 2023 and 2024 updates free<br />
40
- download monster life mod apk obb and data files free</p>
41
- <p>Congratulations! You have successfully installed Monster Life Mod APK Free Shopping on your device. You can now launch the app and enjoy the game with free shopping and other features.</p>
42
- <h2>How to play Monster Life Mod APK Free Shopping</h2>
43
- <p>Now that you have installed the app, you may be wondering how to play the game. Don't worry, we will guide you through the basics of the game and help you become a master monster trainer in no time.</p>
44
- <p>The game starts with a tutorial that introduces you to the story and the gameplay of Monster Life. You will learn that you are a young monster keeper who lives on the islands of Numa, a magical world where monsters and humans coexist peacefully. However, an evil force called Chaos is threatening to destroy Numa and its inhabitants, and you are the only one who can stop it.</p>
45
- <p>You will also learn that you have a special gift: you can communicate with monsters and understand their feelings. This makes you a perfect candidate for becoming a monster trainer and protector of Numa. Your adventure begins when you choose your starter monster from three options: Fire Lion, Ice Bear, or Leaf Turtle. You can also name your monster and customize its appearance with different colors and accessories.</p>
46
- <p>After choosing your starter monster, you will explore the islands of Numa and fight against Chaos and its minions. You will encounter different types of monsters, each with their own strengths and weaknesses. You will also collect and breed more monsters with different abilities and combinations. You can have up to six monsters in your team at a time, and you can switch them during battles.</p>
47
- <p>Battles in Monster Life are turn-based and simple to play. You just need to tap on your monster's icon to select it, then tap on the enemy's icon to attack it. You can also use items and skills to heal or boost your monsters, or to inflict damage or status effects on your enemies. The battle ends when you defeat all the enemies or when all your monsters are knocked out.</p>
48
- <p>As you win battles, your monsters will gain experience and level up. They will also learn new skills and evolve into stronger forms. You can train and customize your monsters with items and skills that you can buy or find in the game. You can also build habitats, farms, shops, and other facilities on your island to make it more comfortable and attractive for your monsters.</p>
49
- <p>But fighting against Chaos is not the only thing you can do in Monster Life. You can also challenge other players in online battles and tournaments. You can test your skills and strategies against real opponents from around the world, and earn rewards and rankings based on your performance. You can also chat with other players, visit their islands, and trade monsters with them.</p>
50
- <h2>What are the features of Monster Life Mod APK Free Shopping</h2>
51
- <p>Monster Life Mod APK Free Shopping is not just a regular version of Monster Life. It has some extra features that make it more fun and enjoyable to play. Here are some of the features that you can expect from this mod apk:</p>
52
- <ul>
53
- <li><strong>Unlimited coins and gems</strong>: Coins and gems are the main currencies in Monster Life. You need them to buy items, skills, habitats, decorations, and other things in the game. Normally, you can earn coins and gems by completing quests, winning battles, watching ads, or spending real money. But with this mod apk, you don't have to worry about running out of coins or gems ever again. You will have unlimited amounts of them to spend as you wish.</li>
54
- <li><strong>All monsters unlocked and available to breed</strong>: Monsters are the heart of Monster Life. There are over 100 different monsters in the game, each with their own characteristics, abilities, evolutions, and personalities. Normally, you can unlock new monsters by completing quests, winning battles, breeding existing monsters, or spending real money. But with this mod apk, you don't have to wait or pay for anything. You will have access to all the monsters in the game from the start, and you can breed them freely without any restrictions.</li>
55
- <li><strong>No ads or in-app purchases</strong>: Ads and in-app purchases are annoying features that can interrupt your gameplay or tempt you to spend more money than you want. Normally, you can watch ads to earn some extra coins or gems, or buy them with real money if you are impatient or desperate. But with this mod apk, you don't have to deal with any ads or in-app purchases at all. You will enjoy a smooth and uninterrupted gameplay without any distractions or temptations.</li>
56
- </ul> <h2>What are the pros and cons of Monster Life Mod APK Free Shopping</h2>
57
- <p>Monster Life Mod APK Free Shopping may sound like a perfect game for monster lovers, but it is not without its drawbacks. Like any mod apk, it has some advantages and disadvantages that you should consider before playing it. Here are some of the pros and cons of Monster Life Mod APK Free Shopping:</p>
58
- <table>
59
- <tr>
60
- <th>Pros</th>
61
- <th>Cons</th>
62
- </tr>
63
- <tr>
64
- <td><strong>Enjoy a cute and colorful graphics and animation</strong>: Monster Life has a charming and vibrant graphics and animation that will appeal to anyone who likes cute things. The monsters are adorable and expressive, the islands are lush and lively, and the battles are dynamic and exciting. The game also has a cheerful and upbeat soundtrack that matches the mood of the game.</td>
65
- <td><strong>The mod apk file may not be compatible with some devices or updates</strong>: Monster Life Mod APK Free Shopping is a modified version of the original game, which means that it may not work well with some devices or updates. The mod apk file may crash, freeze, or lag on some devices, or it may not run at all. The mod apk file may also become obsolete or incompatible with future updates of the original game, which may prevent you from playing the game or accessing new features.</td>
66
- </tr>
67
- <tr>
68
- <td><strong>Experience a fun and engaging gameplay with diverse monsters and activities</strong>: Monster Life has a lot of things to offer to keep you entertained and hooked. You can collect, breed, train, and battle with over 100 different monsters, each with their own abilities and personalities. You can also explore the islands of Numa and discover new places, quests, and secrets. You can also build and decorate your own island with various facilities and items. You can also interact with other players online and compete with them in battles and tournaments.</td>
69
- <td><strong>The mod apk file may contain viruses or malware that can harm your device or data</strong>: Monster Life Mod APK Free Shopping is not an official version of the game, which means that it may not be safe or secure to download or install. The mod apk file may contain viruses or malware that can infect your device or data, or steal your personal information. You should always scan the mod apk file with an antivirus software before installing it, and backup your data before playing it.</td>
70
- </tr>
71
- <tr>
72
- <td><strong>Share your monster collection and achievements with your friends on social media</strong>: Monster Life allows you to connect your game account with your Facebook account, which enables you to share your monster collection and achievements with your friends on social media. You can also invite your friends to play the game with you, or send them gifts and messages. You can also see your friends' islands and monsters, and compare your progress and rankings with them.</td>
73
- <td><strong>The mod apk file may violate the terms of service of the original game and result in a ban or suspension</strong>: Monster Life Mod APK Free Shopping is an unauthorized version of the game, which means that it may violate the terms of service of the original game. The terms of service prohibit modifying, hacking, cheating, or exploiting the game in any way. If you play the mod apk file, you may risk getting banned or suspended from the original game, or losing your account or data.</td>
74
- </tr>
75
- </table>
76
- <h2>Conclusion: Is Monster Life Mod APK Free Shopping worth it?</h2>
77
- <p>Monster Life Mod APK Free Shopping is a fun and addictive game for anyone who loves cute monsters. It has a lot of features that make it more enjoyable and convenient to play than the original game. However, it also has some risks and drawbacks that you should be aware of before playing it. Ultimately, the decision is up to you whether you want to try it or not.</p>
78
- <p>If you decide to play Monster Life Mod APK Free Shopping, we hope that this article has helped you understand how to download and install it, how to play it, what are its features, pros and cons, and whether it is worth it or not. We hope that you have a great time playing Monster Life Mod APK Free Shopping!</p>
79
- <h3>FAQs</h3>
80
- <p>Here are some frequently asked questions about Monster Life Mod APK Free Shopping:</p>
81
- <ol>
82
- <li><strong>What is Monster Life?</strong></li>
83
- <p>Monster Life is a popular game by Gameloft that lets you collect, breed, train, and battle with cute monsters in a magical world called Numa.</p>
84
- <li><strong>What is Monster Life Mod APK Free Shopping?</strong></li>
85
- <p>Monster Life Mod APK Free Shopping is a modified version of Monster Life that gives you unlimited coins and gems to buy anything you want in the game, as well as access to all the monsters in the game.</p>
86
- <li><strong>Is Monster Life Mod APK Free Shopping safe and legal?</strong></li>
87
- <p>Monster Life Mod APK Free Shopping is not an official version of the game, which means that it may not be safe or legal to download or play. The mod apk file may contain viruses or malware that can harm your device or data, or it may violate the terms of service of the original game and result in a ban or suspension. You should always scan the mod apk file with an antivirus software before installing it, and backup your data before playing it. You should also play the mod apk file at your own risk and responsibility.</p>
88
- <li><strong>How can I get more coins and gems in Monster Life?</strong></li>
89
- <p>If you don't want to use Monster Life Mod APK Free Shopping, you can still get more coins and gems in Monster Life by completing quests, winning battles, watching ads, or spending real money. You can also get more coins and gems by inviting your friends to play the game with you, or by participating in events and promotions.</p>
90
- <li><strong>How can I breed new monsters in Monster Life?</strong></li>
91
- <p>To breed new monsters in Monster Life, you need to have two monsters of the same species and opposite genders. You also need to have a breeding habitat that matches their element. You can then tap on the breeding habitat and select the two monsters that you want to breed. You will then have to wait for some time until the breeding is done. You can speed up the process by using gems or watching ads. You can then hatch the egg and get a new monster.</p>
92
- <li><strong>How can I contact the developer of Monster Life?</strong></li>
93
- <p>If you have any questions, feedback, or issues about Monster Life, you can contact the developer of the game by visiting their website [here], or by sending them an email at [[email protected]]. You can also follow them on their social media accounts on [Facebook], [Twitter], [Instagram], and [YouTube].</p>
94
- </ol></p> 401be4b1e0<br />
95
- <br />
96
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Extreme car driving simulator apk Free download and play the most realistic car game ever.md DELETED
@@ -1,99 +0,0 @@
1
-
2
- <h1>Free Download Extreme Car Driving Simulator APK</h1>
3
- <p>If you are looking for a realistic and fun car driving game, you should try Extreme Car Driving Simulator. This is one of the best open world car simulators that lets you drive, drift and feel a racing sports car. You can perform illegal stunts, run full speed without the police chasing you, and burn the asphalt of this huge city. In this article, we will tell you what Extreme Car Driving Simulator is, what features it has, and how to download and install it on your Android device.</p>
4
- <h2>What is Extreme Car Driving Simulator?</h2>
5
- <p>Extreme Car Driving Simulator is an Android game developed by AxesInMotion Racing. It was released in 2014 and has since gained over 500 million downloads on Google Play. It is also available on other platforms like Windows, iOS, and Mac. Extreme Car Driving Simulator is a game that simulates the experience of driving a sports car in an open world environment. You can choose from different cars, customize them, and drive them in various modes. You can also explore the city, crash your car, and enjoy the realistic physics and graphics.</p>
6
- <h2>free download extreme car driving simulator apk</h2><br /><p><b><b>Download Zip</b> &#187; <a href="https://jinyurl.com/2uNMBv">https://jinyurl.com/2uNMBv</a></b></p><br /><br />
7
- <h3>Features of Extreme Car Driving Simulator</h3>
8
- <p>Extreme Car Driving Simulator has many features that make it an exciting and addictive game. Here are some of them:</p>
9
- <h4>Mini game checkpoint mode</h4>
10
- <p>In this mode, you have to reach different checkpoints in the city within a given time limit. You can earn coins and unlock new cars by completing this mode.</p>
11
- <p>free download extreme car driving simulator apk mod<br />
12
- free download extreme car driving simulator apk for pc<br />
13
- free download extreme car driving simulator apk latest version<br />
14
- free download extreme car driving simulator apk unlimited money<br />
15
- free download extreme car driving simulator apk hack<br />
16
- free download extreme car driving simulator apk android 1<br />
17
- free download extreme car driving simulator apk pure<br />
18
- free download extreme car driving simulator apk offline<br />
19
- free download extreme car driving simulator apk old version<br />
20
- free download extreme car driving simulator apk revdl<br />
21
- free download extreme car driving simulator apk rexdl<br />
22
- free download extreme car driving simulator apk uptodown<br />
23
- free download extreme car driving simulator apk obb<br />
24
- free download extreme car driving simulator apk data<br />
25
- free download extreme car driving simulator apk mirror<br />
26
- free download extreme car driving simulator apk no ads<br />
27
- free download extreme car driving simulator apk full version<br />
28
- free download extreme car driving simulator apk mod menu<br />
29
- free download extreme car driving simulator apk mod money<br />
30
- free download extreme car driving simulator apk mod all cars unlocked<br />
31
- free download extreme car driving simulator apk mod unlimited money and gold<br />
32
- free download extreme car driving simulator apk mod 5.3.0p1<br />
33
- free download extreme car driving simulator apk mod 5.2.7p1<br />
34
- free download extreme car driving simulator apk mod 5.2.6p1<br />
35
- free download extreme car driving simulator apk mod 5.2.3p1<br />
36
- free download extreme car driving simulator apk mod 5.2.0p1<br />
37
- free download extreme car driving simulator apk mod 5.1.12p1<br />
38
- free download extreme car driving simulator apk mod 5.1.11p1<br />
39
- free download extreme car driving simulator apk mod 5.1.8p1<br />
40
- free download extreme car driving simulator apk mod 5.1.7p1<br />
41
- free download extreme car driving simulator apk mod 5.1.6p1<br />
42
- free download extreme car driving simulator apk mod 5.0.9p1<br />
43
- free download extreme car driving simulator apk mod 5.0.8p1<br />
44
- free download extreme car driving simulator apk mod 5.0.7p1<br />
45
- free download extreme car driving simulator apk mod 5.0.6p1<br />
46
- free download extreme car driving simulator apk mod 5.0.4p1<br />
47
- free download extreme car driving simulator apk mod 4.18.30p1<br />
48
- free download extreme car driving simulator apk mod 4.18.26p1<br />
49
- free download extreme car driving simulator apk mod 4.18.25p1<br />
50
- free download extreme car driving simulator apk mod 4.18.23p1<br />
51
- free download extreme car driving simulator apk mod 4.18.20p1<br />
52
- free download extreme car driving simulator apk mod 4.18.19p1<br />
53
- free download extreme car driving simulator apk mod 4.18.17p1<br />
54
- free download extreme car driving simulator apk mod 4.18.16p1<br />
55
- free download extreme car driving simulator apk mod 4.18.15p1<br />
56
- free download extreme car driving simulator apk mod 4.18.14p1<br />
57
- free download extreme car driving simulator apk mod 4.18.13p1<br />
58
- free download extreme car driving simulator apk mod 4.18.p11</p>
59
- <h4>Drive with traffic</h4>
60
- <p>You can also drive with traffic in the city, which adds more challenge and realism to the game. You have to avoid crashing into other vehicles and obey the traffic rules.</p>
61
- <h4>Full real HUD</h4>
62
- <p>The game has a full real HUD that shows you the revs, gear, speed, and other information of your car. You can also switch between different views, such as cockpit view, third-person view, or top-down view.</p>
63
- <h4>ABS, TC and ESP simulation</h4>
64
- <p>You can also simulate the ABS, TC and ESP systems of your car. These are features that help you control your car better in different situations. You can also turn them off if you want more challenge.</p>
65
- <h4>Explore a detailed open world environment</h4>
66
- <p>The game has a large and detailed open world environment that you can explore freely. You can find different places, such as airports, highways, bridges, tunnels, off-road areas, and more. You can also interact with some objects, such as ramps, cones, barrels, and traffic lights.</p>
67
- <h4>Realistic car damage and physics</h4>
68
- <p>The game has realistic car damage and physics that make it more fun and immersive. You can see your car getting dented, scratched, or even destroyed by crashing into other cars or objects. You can also feel the weight, speed, and handling of your car as you drive it.</p>
69
- <h4>Control your car with different options</h4>
70
- <p>You can control your car with different options, such as steering wheel, accelerometer, or arrows. You can also adjust the sensitivity and tilt of your device to suit your preference.</p>
71
- <h4>Several different cameras and gamepad support</h4>
72
- <p>You can also switch between several different cameras to get different perspectives of your car and the environment. You can also use a gamepad to play the game if you have one connected to your device.</p>
73
- <h3>How to download and install Extreme Car Driving Simulator APK?</h3>
74
- <p>If you want to download and install Extreme Car Driving Simulator APK on your Android device , you can follow these simple steps:</p>
75
- <h4>Download the APK file from a trusted source</h4>
76
- <p>The first step is to download the APK file of Extreme Car Driving Simulator from a trusted source. You can find many websites that offer the APK file, but you have to be careful and avoid downloading from malicious or fake sites. One of the reliable sources that we recommend is APKPure, which is a popular and safe platform for downloading APK files. You can visit their website and search for Extreme Car Driving Simulator, or you can use this link to go directly to the download page.</p>
77
- <h4>Enable unknown sources on your device</h4>
78
- <p>The next step is to enable unknown sources on your device. This is a security setting that allows you to install apps from sources other than Google Play. To do this, you have to go to your device settings and look for the option that says "Unknown sources" or "Install unknown apps". Depending on your device model and Android version, this option may be located in different places, such as Security, Privacy, or Applications. You have to enable this option by tapping on it and confirming your choice.</p>
79
- <h4>Install the APK file and launch the game</h4>
80
- <p>The final step is to install the APK file and launch the game. To do this, you have to locate the APK file that you downloaded in your device storage, usually in the Downloads folder. You have to tap on the file and follow the instructions on the screen to install it. Once the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You can now enjoy playing Extreme Car Driving Simulator on your Android device.</p>
81
- <h3>Conclusion</h3>
82
- <p>Extreme Car Driving Simulator is a great game for car enthusiasts who want to experience driving a sports car in an open world environment. It has many features that make it realistic, fun, and challenging. You can download and install it on your Android device by following the steps we explained in this article. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below.</p>
83
- <h3>FAQs</h3>
84
- <p>Here are some frequently asked questions about Extreme Car Driving Simulator:</p>
85
- <ul>
86
- <li><b>Is Extreme Car Driving Simulator free?</b></li>
87
- <p>Yes, Extreme Car Driving Simulator is free to download and play. However, it contains ads and in-app purchases that you can disable or buy if you want.</p>
88
- <li><b>Is Extreme Car Driving Simulator offline?</b></li>
89
- <p>Yes, Extreme Car Driving Simulator can be played offline without an internet connection. However, some features may require an internet connection, such as updating the game or accessing online leaderboards.</p>
90
- <li><b>Is Extreme Car Driving Simulator safe?</b></li>
91
- <p>Yes, Extreme Car Driving Simulator is safe to play as long as you download it from a trusted source like APKPure. You should also scan the APK file with an antivirus app before installing it.</p>
92
- <li><b>How to update Extreme Car Driving Simulator?</b></li>
93
- <p>You can update Extreme Car Driving Simulator by downloading the latest version of the APK file from APKPure or other trusted sources. You can also check for updates within the game by tapping on the settings icon and selecting "Check for updates".</p>
94
- <li><b>How to get more coins in Extreme Car Driving Simulator?</b></li>
95
- <p>You can get more coins in Extreme Car Driving Simulator by completing mini game checkpoint mode, driving with traffic, performing stunts, or watching ads. You can also buy coins with real money through in-app purchases.</p>
96
- </ul>
97
- : https://apkpure.com/ : https://apkpure.com/extreme-car-driving-simulator/com.aim.racing</p> 401be4b1e0<br />
98
- <br />
99
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/tests/modules/test_seanet.py DELETED
@@ -1,115 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from itertools import product
8
-
9
- import pytest
10
- import torch
11
-
12
- from audiocraft.modules.seanet import SEANetEncoder, SEANetDecoder, SEANetResnetBlock
13
- from audiocraft.modules import StreamableConv1d, StreamableConvTranspose1d
14
-
15
-
16
- class TestSEANetModel:
17
-
18
- def test_base(self):
19
- encoder = SEANetEncoder()
20
- decoder = SEANetDecoder()
21
-
22
- x = torch.randn(1, 1, 24000)
23
- z = encoder(x)
24
- assert list(z.shape) == [1, 128, 75], z.shape
25
- y = decoder(z)
26
- assert y.shape == x.shape, (x.shape, y.shape)
27
-
28
- def test_causal(self):
29
- encoder = SEANetEncoder(causal=True)
30
- decoder = SEANetDecoder(causal=True)
31
- x = torch.randn(1, 1, 24000)
32
-
33
- z = encoder(x)
34
- assert list(z.shape) == [1, 128, 75], z.shape
35
- y = decoder(z)
36
- assert y.shape == x.shape, (x.shape, y.shape)
37
-
38
- def test_conv_skip_connection(self):
39
- encoder = SEANetEncoder(true_skip=False)
40
- decoder = SEANetDecoder(true_skip=False)
41
-
42
- x = torch.randn(1, 1, 24000)
43
- z = encoder(x)
44
- assert list(z.shape) == [1, 128, 75], z.shape
45
- y = decoder(z)
46
- assert y.shape == x.shape, (x.shape, y.shape)
47
-
48
- def test_seanet_encoder_decoder_final_act(self):
49
- encoder = SEANetEncoder(true_skip=False)
50
- decoder = SEANetDecoder(true_skip=False, final_activation='Tanh')
51
-
52
- x = torch.randn(1, 1, 24000)
53
- z = encoder(x)
54
- assert list(z.shape) == [1, 128, 75], z.shape
55
- y = decoder(z)
56
- assert y.shape == x.shape, (x.shape, y.shape)
57
-
58
- def _check_encoder_blocks_norm(self, encoder: SEANetEncoder, n_disable_blocks: int, norm: str):
59
- n_blocks = 0
60
- for layer in encoder.model:
61
- if isinstance(layer, StreamableConv1d):
62
- n_blocks += 1
63
- assert layer.conv.norm_type == 'none' if n_blocks <= n_disable_blocks else norm
64
- elif isinstance(layer, SEANetResnetBlock):
65
- for resnet_layer in layer.block:
66
- if isinstance(resnet_layer, StreamableConv1d):
67
- # here we add + 1 to n_blocks as we increment n_blocks just after the block
68
- assert resnet_layer.conv.norm_type == 'none' if (n_blocks + 1) <= n_disable_blocks else norm
69
-
70
- def test_encoder_disable_norm(self):
71
- n_residuals = [0, 1, 3]
72
- disable_blocks = [0, 1, 2, 3, 4, 5, 6]
73
- norms = ['weight_norm', 'none']
74
- for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms):
75
- encoder = SEANetEncoder(n_residual_layers=n_res, norm=norm,
76
- disable_norm_outer_blocks=disable_blocks)
77
- self._check_encoder_blocks_norm(encoder, disable_blocks, norm)
78
-
79
- def _check_decoder_blocks_norm(self, decoder: SEANetDecoder, n_disable_blocks: int, norm: str):
80
- n_blocks = 0
81
- for layer in decoder.model:
82
- if isinstance(layer, StreamableConv1d):
83
- n_blocks += 1
84
- assert layer.conv.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm
85
- elif isinstance(layer, StreamableConvTranspose1d):
86
- n_blocks += 1
87
- assert layer.convtr.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm
88
- elif isinstance(layer, SEANetResnetBlock):
89
- for resnet_layer in layer.block:
90
- if isinstance(resnet_layer, StreamableConv1d):
91
- assert resnet_layer.conv.norm_type == 'none' \
92
- if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm
93
-
94
- def test_decoder_disable_norm(self):
95
- n_residuals = [0, 1, 3]
96
- disable_blocks = [0, 1, 2, 3, 4, 5, 6]
97
- norms = ['weight_norm', 'none']
98
- for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms):
99
- decoder = SEANetDecoder(n_residual_layers=n_res, norm=norm,
100
- disable_norm_outer_blocks=disable_blocks)
101
- self._check_decoder_blocks_norm(decoder, disable_blocks, norm)
102
-
103
- def test_disable_norm_raises_exception(self):
104
- # Invalid disable_norm_outer_blocks values raise exceptions
105
- with pytest.raises(AssertionError):
106
- SEANetEncoder(disable_norm_outer_blocks=-1)
107
-
108
- with pytest.raises(AssertionError):
109
- SEANetEncoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7)
110
-
111
- with pytest.raises(AssertionError):
112
- SEANetDecoder(disable_norm_outer_blocks=-1)
113
-
114
- with pytest.raises(AssertionError):
115
- SEANetDecoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/ldm/models/diffusion/classifier.py DELETED
@@ -1,267 +0,0 @@
1
- import os
2
- import torch
3
- import pytorch_lightning as pl
4
- from omegaconf import OmegaConf
5
- from torch.nn import functional as F
6
- from torch.optim import AdamW
7
- from torch.optim.lr_scheduler import LambdaLR
8
- from copy import deepcopy
9
- from einops import rearrange
10
- from glob import glob
11
- from natsort import natsorted
12
-
13
- from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel
14
- from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config
15
-
16
- __models__ = {
17
- 'class_label': EncoderUNetModel,
18
- 'segmentation': UNetModel
19
- }
20
-
21
-
22
- def disabled_train(self, mode=True):
23
- """Overwrite model.train with this function to make sure train/eval mode
24
- does not change anymore."""
25
- return self
26
-
27
-
28
- class NoisyLatentImageClassifier(pl.LightningModule):
29
-
30
- def __init__(self,
31
- diffusion_path,
32
- num_classes,
33
- ckpt_path=None,
34
- pool='attention',
35
- label_key=None,
36
- diffusion_ckpt_path=None,
37
- scheduler_config=None,
38
- weight_decay=1.e-2,
39
- log_steps=10,
40
- monitor='val/loss',
41
- *args,
42
- **kwargs):
43
- super().__init__(*args, **kwargs)
44
- self.num_classes = num_classes
45
- # get latest config of diffusion model
46
- diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1]
47
- self.diffusion_config = OmegaConf.load(diffusion_config).model
48
- self.diffusion_config.params.ckpt_path = diffusion_ckpt_path
49
- self.load_diffusion()
50
-
51
- self.monitor = monitor
52
- self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1
53
- self.log_time_interval = self.diffusion_model.num_timesteps // log_steps
54
- self.log_steps = log_steps
55
-
56
- self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \
57
- else self.diffusion_model.cond_stage_key
58
-
59
- assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params'
60
-
61
- if self.label_key not in __models__:
62
- raise NotImplementedError()
63
-
64
- self.load_classifier(ckpt_path, pool)
65
-
66
- self.scheduler_config = scheduler_config
67
- self.use_scheduler = self.scheduler_config is not None
68
- self.weight_decay = weight_decay
69
-
70
- def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
71
- sd = torch.load(path, map_location="cpu")
72
- if "state_dict" in list(sd.keys()):
73
- sd = sd["state_dict"]
74
- keys = list(sd.keys())
75
- for k in keys:
76
- for ik in ignore_keys:
77
- if k.startswith(ik):
78
- print("Deleting key {} from state_dict.".format(k))
79
- del sd[k]
80
- missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
81
- sd, strict=False)
82
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
83
- if len(missing) > 0:
84
- print(f"Missing Keys: {missing}")
85
- if len(unexpected) > 0:
86
- print(f"Unexpected Keys: {unexpected}")
87
-
88
- def load_diffusion(self):
89
- model = instantiate_from_config(self.diffusion_config)
90
- self.diffusion_model = model.eval()
91
- self.diffusion_model.train = disabled_train
92
- for param in self.diffusion_model.parameters():
93
- param.requires_grad = False
94
-
95
- def load_classifier(self, ckpt_path, pool):
96
- model_config = deepcopy(self.diffusion_config.params.unet_config.params)
97
- model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels
98
- model_config.out_channels = self.num_classes
99
- if self.label_key == 'class_label':
100
- model_config.pool = pool
101
-
102
- self.model = __models__[self.label_key](**model_config)
103
- if ckpt_path is not None:
104
- print('#####################################################################')
105
- print(f'load from ckpt "{ckpt_path}"')
106
- print('#####################################################################')
107
- self.init_from_ckpt(ckpt_path)
108
-
109
- @torch.no_grad()
110
- def get_x_noisy(self, x, t, noise=None):
111
- noise = default(noise, lambda: torch.randn_like(x))
112
- continuous_sqrt_alpha_cumprod = None
113
- if self.diffusion_model.use_continuous_noise:
114
- continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1)
115
- # todo: make sure t+1 is correct here
116
-
117
- return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise,
118
- continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod)
119
-
120
- def forward(self, x_noisy, t, *args, **kwargs):
121
- return self.model(x_noisy, t)
122
-
123
- @torch.no_grad()
124
- def get_input(self, batch, k):
125
- x = batch[k]
126
- if len(x.shape) == 3:
127
- x = x[..., None]
128
- x = rearrange(x, 'b h w c -> b c h w')
129
- x = x.to(memory_format=torch.contiguous_format).float()
130
- return x
131
-
132
- @torch.no_grad()
133
- def get_conditioning(self, batch, k=None):
134
- if k is None:
135
- k = self.label_key
136
- assert k is not None, 'Needs to provide label key'
137
-
138
- targets = batch[k].to(self.device)
139
-
140
- if self.label_key == 'segmentation':
141
- targets = rearrange(targets, 'b h w c -> b c h w')
142
- for down in range(self.numd):
143
- h, w = targets.shape[-2:]
144
- targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest')
145
-
146
- # targets = rearrange(targets,'b c h w -> b h w c')
147
-
148
- return targets
149
-
150
- def compute_top_k(self, logits, labels, k, reduction="mean"):
151
- _, top_ks = torch.topk(logits, k, dim=1)
152
- if reduction == "mean":
153
- return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item()
154
- elif reduction == "none":
155
- return (top_ks == labels[:, None]).float().sum(dim=-1)
156
-
157
- def on_train_epoch_start(self):
158
- # save some memory
159
- self.diffusion_model.model.to('cpu')
160
-
161
- @torch.no_grad()
162
- def write_logs(self, loss, logits, targets):
163
- log_prefix = 'train' if self.training else 'val'
164
- log = {}
165
- log[f"{log_prefix}/loss"] = loss.mean()
166
- log[f"{log_prefix}/acc@1"] = self.compute_top_k(
167
- logits, targets, k=1, reduction="mean"
168
- )
169
- log[f"{log_prefix}/acc@5"] = self.compute_top_k(
170
- logits, targets, k=5, reduction="mean"
171
- )
172
-
173
- self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True)
174
- self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False)
175
- self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True)
176
- lr = self.optimizers().param_groups[0]['lr']
177
- self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True)
178
-
179
- def shared_step(self, batch, t=None):
180
- x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key)
181
- targets = self.get_conditioning(batch)
182
- if targets.dim() == 4:
183
- targets = targets.argmax(dim=1)
184
- if t is None:
185
- t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long()
186
- else:
187
- t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long()
188
- x_noisy = self.get_x_noisy(x, t)
189
- logits = self(x_noisy, t)
190
-
191
- loss = F.cross_entropy(logits, targets, reduction='none')
192
-
193
- self.write_logs(loss.detach(), logits.detach(), targets.detach())
194
-
195
- loss = loss.mean()
196
- return loss, logits, x_noisy, targets
197
-
198
- def training_step(self, batch, batch_idx):
199
- loss, *_ = self.shared_step(batch)
200
- return loss
201
-
202
- def reset_noise_accs(self):
203
- self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in
204
- range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)}
205
-
206
- def on_validation_start(self):
207
- self.reset_noise_accs()
208
-
209
- @torch.no_grad()
210
- def validation_step(self, batch, batch_idx):
211
- loss, *_ = self.shared_step(batch)
212
-
213
- for t in self.noisy_acc:
214
- _, logits, _, targets = self.shared_step(batch, t)
215
- self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean'))
216
- self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean'))
217
-
218
- return loss
219
-
220
- def configure_optimizers(self):
221
- optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
222
-
223
- if self.use_scheduler:
224
- scheduler = instantiate_from_config(self.scheduler_config)
225
-
226
- print("Setting up LambdaLR scheduler...")
227
- scheduler = [
228
- {
229
- 'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule),
230
- 'interval': 'step',
231
- 'frequency': 1
232
- }]
233
- return [optimizer], scheduler
234
-
235
- return optimizer
236
-
237
- @torch.no_grad()
238
- def log_images(self, batch, N=8, *args, **kwargs):
239
- log = dict()
240
- x = self.get_input(batch, self.diffusion_model.first_stage_key)
241
- log['inputs'] = x
242
-
243
- y = self.get_conditioning(batch)
244
-
245
- if self.label_key == 'class_label':
246
- y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
247
- log['labels'] = y
248
-
249
- if ismap(y):
250
- log['labels'] = self.diffusion_model.to_rgb(y)
251
-
252
- for step in range(self.log_steps):
253
- current_time = step * self.log_time_interval
254
-
255
- _, logits, x_noisy, _ = self.shared_step(batch, t=current_time)
256
-
257
- log[f'inputs@t{current_time}'] = x_noisy
258
-
259
- pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes)
260
- pred = rearrange(pred, 'b h w c -> b c h w')
261
-
262
- log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred)
263
-
264
- for key in log:
265
- log[key] = log[key][:N]
266
-
267
- return log
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ALSv/FSW/roop/processors/frame/__init__.py DELETED
File without changes
spaces/Aaaad/Dddde/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("huggingface/gpt2").launch()
 
 
 
 
spaces/Abhilashvj/planogram-compliance/utils/autoanchor.py DELETED
@@ -1,219 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- AutoAnchor utils
4
- """
5
-
6
- import random
7
-
8
- import numpy as np
9
- import torch
10
- import yaml
11
- from tqdm import tqdm
12
-
13
- from utils import TryExcept
14
- from utils.general import LOGGER, TQDM_BAR_FORMAT, colorstr
15
-
16
- PREFIX = colorstr("AutoAnchor: ")
17
-
18
-
19
- def check_anchor_order(m):
20
- # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
21
- a = (
22
- m.anchors.prod(-1).mean(-1).view(-1)
23
- ) # mean anchor area per output layer
24
- da = a[-1] - a[0] # delta a
25
- ds = m.stride[-1] - m.stride[0] # delta s
26
- if da and (da.sign() != ds.sign()): # same order
27
- LOGGER.info(f"{PREFIX}Reversing anchor order")
28
- m.anchors[:] = m.anchors.flip(0)
29
-
30
-
31
- @TryExcept(f"{PREFIX}ERROR")
32
- def check_anchors(dataset, model, thr=4.0, imgsz=640):
33
- # Check anchor fit to data, recompute if necessary
34
- m = (
35
- model.module.model[-1] if hasattr(model, "module") else model.model[-1]
36
- ) # Detect()
37
- shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
38
- scale = np.random.uniform(
39
- 0.9, 1.1, size=(shapes.shape[0], 1)
40
- ) # augment scale
41
- wh = torch.tensor(
42
- np.concatenate(
43
- [l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)]
44
- )
45
- ).float() # wh
46
-
47
- def metric(k): # compute metric
48
- r = wh[:, None] / k[None]
49
- x = torch.min(r, 1 / r).min(2)[0] # ratio metric
50
- best = x.max(1)[0] # best_x
51
- aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold
52
- bpr = (best > 1 / thr).float().mean() # best possible recall
53
- return bpr, aat
54
-
55
- stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides
56
- anchors = m.anchors.clone() * stride # current anchors
57
- bpr, aat = metric(anchors.cpu().view(-1, 2))
58
- s = f"\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). "
59
- if bpr > 0.98: # threshold to recompute
60
- LOGGER.info(f"{s}Current anchors are a good fit to dataset ✅")
61
- else:
62
- LOGGER.info(
63
- f"{s}Anchors are a poor fit to dataset ⚠️, attempting to improve..."
64
- )
65
- na = m.anchors.numel() // 2 # number of anchors
66
- anchors = kmean_anchors(
67
- dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False
68
- )
69
- new_bpr = metric(anchors)[0]
70
- if new_bpr > bpr: # replace anchors
71
- anchors = torch.tensor(anchors, device=m.anchors.device).type_as(
72
- m.anchors
73
- )
74
- m.anchors[:] = anchors.clone().view_as(m.anchors)
75
- check_anchor_order(m) # must be in pixel-space (not grid-space)
76
- m.anchors /= stride
77
- s = f"{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)"
78
- else:
79
- s = f"{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)"
80
- LOGGER.info(s)
81
-
82
-
83
- def kmean_anchors(
84
- dataset="./data/coco128.yaml",
85
- n=9,
86
- img_size=640,
87
- thr=4.0,
88
- gen=1000,
89
- verbose=True,
90
- ):
91
- """Creates kmeans-evolved anchors from training dataset
92
-
93
- Arguments:
94
- dataset: path to data.yaml, or a loaded dataset
95
- n: number of anchors
96
- img_size: image size used for training
97
- thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
98
- gen: generations to evolve anchors using genetic algorithm
99
- verbose: print all results
100
-
101
- Return:
102
- k: kmeans evolved anchors
103
-
104
- Usage:
105
- from utils.autoanchor import *; _ = kmean_anchors()
106
- """
107
- from scipy.cluster.vq import kmeans
108
-
109
- npr = np.random
110
- thr = 1 / thr
111
-
112
- def metric(k, wh): # compute metrics
113
- r = wh[:, None] / k[None]
114
- x = torch.min(r, 1 / r).min(2)[0] # ratio metric
115
- # x = wh_iou(wh, torch.tensor(k)) # iou metric
116
- return x, x.max(1)[0] # x, best_x
117
-
118
- def anchor_fitness(k): # mutation fitness
119
- _, best = metric(torch.tensor(k, dtype=torch.float32), wh)
120
- return (best * (best > thr).float()).mean() # fitness
121
-
122
- def print_results(k, verbose=True):
123
- k = k[np.argsort(k.prod(1))] # sort small to large
124
- x, best = metric(k, wh0)
125
- bpr, aat = (best > thr).float().mean(), (
126
- x > thr
127
- ).float().mean() * n # best possible recall, anch > thr
128
- s = (
129
- f"{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n"
130
- f"{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, "
131
- f"past_thr={x[x > thr].mean():.3f}-mean: "
132
- )
133
- for x in k:
134
- s += "%i,%i, " % (round(x[0]), round(x[1]))
135
- if verbose:
136
- LOGGER.info(s[:-2])
137
- return k
138
-
139
- if isinstance(dataset, str): # *.yaml file
140
- with open(dataset, errors="ignore") as f:
141
- data_dict = yaml.safe_load(f) # model dict
142
- from utils.dataloaders import LoadImagesAndLabels
143
-
144
- dataset = LoadImagesAndLabels(
145
- data_dict["train"], augment=True, rect=True
146
- )
147
-
148
- # Get label wh
149
- shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
150
- wh0 = np.concatenate(
151
- [l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]
152
- ) # wh
153
-
154
- # Filter
155
- i = (wh0 < 3.0).any(1).sum()
156
- if i:
157
- LOGGER.info(
158
- f"{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size"
159
- )
160
- wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels
161
- # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
162
-
163
- # Kmeans init
164
- try:
165
- LOGGER.info(
166
- f"{PREFIX}Running kmeans for {n} anchors on {len(wh)} points..."
167
- )
168
- assert n <= len(wh) # apply overdetermined constraint
169
- s = wh.std(0) # sigmas for whitening
170
- k = kmeans(wh / s, n, iter=30)[0] * s # points
171
- assert n == len(
172
- k
173
- ) # kmeans may return fewer points than requested if wh is insufficient or too similar
174
- except Exception:
175
- LOGGER.warning(
176
- f"{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init"
177
- )
178
- k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init
179
- wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0))
180
- k = print_results(k, verbose=False)
181
-
182
- # Plot
183
- # k, d = [None] * 20, [None] * 20
184
- # for i in tqdm(range(1, 21)):
185
- # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
186
- # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)
187
- # ax = ax.ravel()
188
- # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
189
- # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
190
- # ax[0].hist(wh[wh[:, 0]<100, 0],400)
191
- # ax[1].hist(wh[wh[:, 1]<100, 1],400)
192
- # fig.savefig('wh.png', dpi=200)
193
-
194
- # Evolve
195
- f, sh, mp, s = (
196
- anchor_fitness(k),
197
- k.shape,
198
- 0.9,
199
- 0.1,
200
- ) # fitness, generations, mutation prob, sigma
201
- pbar = tqdm(range(gen), bar_format=TQDM_BAR_FORMAT) # progress bar
202
- for _ in pbar:
203
- v = np.ones(sh)
204
- while (
205
- v == 1
206
- ).all(): # mutate until a change occurs (prevent duplicates)
207
- v = (
208
- (npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s
209
- + 1
210
- ).clip(0.3, 3.0)
211
- kg = (k.copy() * v).clip(min=2.0)
212
- fg = anchor_fitness(kg)
213
- if fg > f:
214
- f, k = fg, kg.copy()
215
- pbar.desc = f"{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}"
216
- if verbose:
217
- print_results(k, verbose)
218
-
219
- return print_results(k).astype(np.float32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/base/Base.js DELETED
@@ -1,112 +0,0 @@
1
- import BaseShapes from '../../../plugins/gameobjects/shape/shapes/BaseShapes.js';
2
- import EaseValueMethods from './EaseValueMethods.js';
3
-
4
- const GetValue = Phaser.Utils.Objects.GetValue;
5
-
6
- class Base extends BaseShapes {
7
- constructor(scene, config) {
8
- var x = GetValue(config, 'x', 0);
9
- var y = GetValue(config, 'y', 0);
10
- var width = GetValue(config, 'width', 64);
11
- var height = GetValue(config, 'height', 64);
12
-
13
- super(scene, x, y, width, height);
14
-
15
- this.setDuration(GetValue(config, 'duration', 1000));
16
- this.setEase(GetValue(config, 'ease', 'Linear'));
17
- this.setDelay(GetValue(config, 'delay', 0));
18
- this.setRepeatDelay(GetValue(config, 'repeatDelay', 0));
19
- var color = GetValue(config, 'color', 0xffffff);
20
- var start = GetValue(config, 'start', true);
21
-
22
- this.buildShapes(config);
23
- this.setColor(color);
24
- this.setValue(0);
25
-
26
- if (start) {
27
- this.start();
28
- }
29
- }
30
-
31
- buildShapes() {
32
-
33
- }
34
-
35
- get centerX() {
36
- return this.width / 2;;
37
- }
38
-
39
- get centerY() {
40
- return this.height / 2;
41
- }
42
-
43
- get radius() {
44
- return Math.min(this.centerX, this.centerY);
45
- }
46
-
47
- get color() {
48
- return this._color;
49
- }
50
-
51
- set color(value) {
52
- this.isColorChanged = this.isColorChanged || (this._color !== value);
53
- this.dirty = this.dirty || this.isColorChanged;
54
- this._color = value;
55
- this.setShapesColor(value);
56
- }
57
-
58
- setColor(color) {
59
- this.color = color;
60
- return this;
61
- }
62
-
63
- setShapesColor(color) {
64
-
65
- }
66
-
67
- get value() {
68
- return this._value;
69
- }
70
-
71
- set value(value) {
72
- value = Phaser.Math.Clamp(value, 0, 1);
73
- this.dirty = this.dirty || (this._value != value);
74
- this._value = value;
75
- }
76
-
77
- setValue(value) {
78
- this.value = value;
79
- return this;
80
- }
81
-
82
- setDuration(duration) {
83
- this.duration = duration;
84
- return this;
85
- }
86
-
87
- setDelay(delay) {
88
- this.delay = delay;
89
- return this;
90
- }
91
-
92
- setRepeatDelay(repeatDelay) {
93
- this.repeatDelay = repeatDelay;
94
- return this;
95
- }
96
-
97
- setEase(ease) {
98
- this.ease = ease;
99
- return this;
100
- }
101
-
102
- get isRunning() {
103
- return (this.tweenTask) ? this.tweenTask.isRunning : false;
104
- }
105
- }
106
-
107
- Object.assign(
108
- Base.prototype,
109
- EaseValueMethods
110
- );
111
-
112
- export default Base;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/EaseMoveMethods.js DELETED
@@ -1,120 +0,0 @@
1
- import { EaseMoveTo, EaseMoveFrom } from '../easemove/EaseMove.js';
2
- import { WaitComplete } from '../utils/WaitEvent.js';
3
- import GetParentSizerMethods from './GetParentSizerMethods.js';
4
-
5
- const IsPlainObject = Phaser.Utils.Objects.IsPlainObject;
6
- const DistanceBetween = Phaser.Math.Distance.Between;
7
-
8
- var OnInitEaseMove = function (gameObject, easeMove) {
9
- // Route 'complete' of easeMove to gameObject
10
- easeMove.completeEventName = undefined;
11
- easeMove.on('complete', function () {
12
- if (easeMove.completeEventName) {
13
- gameObject.emit(easeMove.completeEventName, gameObject);
14
- easeMove.completeEventName = undefined;
15
- }
16
- })
17
-
18
- // Update local state
19
- easeMove.on('update', function () {
20
- var parent = GetParentSizerMethods.getParentSizer(gameObject);
21
- if (parent) {
22
- parent.resetChildPositionState(gameObject);
23
- }
24
- })
25
- }
26
-
27
- export default {
28
- moveFrom(duration, x, y, ease, destroyMode) {
29
- if (IsPlainObject(duration)) {
30
- var config = duration;
31
- x = config.x;
32
- y = config.y;
33
- if (config.hasOwnProperty('speed')) {
34
- duration = (DistanceBetween(x, y, this.x, this.y) * 1000) / config.speed;
35
- } else {
36
- duration = config.duration;
37
- }
38
-
39
- ease = config.ease;
40
- }
41
-
42
- var isInit = (this._easeMove === undefined);
43
-
44
- this._easeMove = EaseMoveFrom(this, duration, x, y, ease, destroyMode, this._easeMove);
45
-
46
- if (isInit) {
47
- OnInitEaseMove(this, this._easeMove);
48
- }
49
-
50
- this._easeMove.completeEventName = 'movefrom.complete';
51
-
52
- return this;
53
- },
54
-
55
- moveFromPromise(duration, x, y, ease, destroyMode) {
56
- this.moveFrom(duration, x, y, ease, destroyMode);
57
- return WaitComplete(this._easeMove);
58
- },
59
-
60
- moveFromDestroy(duration, x, y, ease) {
61
- this.moveFrom(duration, x, y, ease, true);
62
- return this;
63
- },
64
-
65
- moveFromDestroyPromise(duration, x, y, ease) {
66
- this.moveFromDestroy(duration, x, y, ease);
67
- return WaitComplete(this._easeMove);
68
- },
69
-
70
- moveTo(duration, x, y, ease, destroyMode) {
71
- if (IsPlainObject(duration)) {
72
- var config = duration;
73
- x = config.x;
74
- y = config.y;
75
- if (config.hasOwnProperty('speed')) {
76
- duration = (DistanceBetween(x, y, this.x, this.y) * 1000) / config.speed;
77
- } else {
78
- duration = config.duration;
79
- }
80
-
81
- ease = config.ease;
82
- }
83
-
84
- var isInit = (this._easeMove === undefined);
85
-
86
- this._easeMove = EaseMoveTo(this, duration, x, y, ease, destroyMode, this._easeMove);
87
-
88
- if (isInit) {
89
- OnInitEaseMove(this, this._easeMove);
90
- }
91
-
92
- this._easeMove.completeEventName = 'moveto.complete';
93
-
94
- return this;
95
- },
96
-
97
- moveToPromise(duration, x, y, ease, destroyMode) {
98
- this.moveTo(duration, x, y, ease, destroyMode);
99
- return WaitComplete(this._easeMove);
100
- },
101
-
102
- moveToDestroy(duration, x, y, ease) {
103
- this.moveTo(duration, x, y, ease, true)
104
- return this;
105
- },
106
-
107
- moveToDestroyPromise(duration, x, y, ease) {
108
- this.moveToDestroy(duration, x, y, ease, true);
109
- return WaitComplete(this._easeMove);
110
- },
111
-
112
- moveStop(toEnd) {
113
- if (!this._easeMove) {
114
- return this;
115
- }
116
-
117
- this._easeMove.stop(toEnd);
118
- return this;
119
- }
120
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aloento/9Nine-PITS/text/frontend/normalizer/acronyms.py DELETED
@@ -1,13 +0,0 @@
1
- # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/utils/ImagesDataset.py DELETED
@@ -1,43 +0,0 @@
1
- import os
2
-
3
- from torch.utils.data import Dataset
4
- from PIL import Image
5
-
6
- from PTI.utils.data_utils import make_dataset
7
- from torchvision import transforms
8
-
9
-
10
- class Image2Dataset(Dataset):
11
- def __init__(self, image) -> None:
12
- super().__init__()
13
- self.image = image
14
- self.transform = transforms.Compose(
15
- [
16
- transforms.ToTensor(),
17
- transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
18
- ]
19
- )
20
-
21
- def __len__(self):
22
- return 1
23
-
24
- def __getitem__(self, index):
25
- return "customIMG", self.transform(self.image)
26
-
27
-
28
- class ImagesDataset(Dataset):
29
- def __init__(self, source_root, source_transform=None):
30
- self.source_paths = sorted(make_dataset(source_root))
31
- self.source_transform = source_transform
32
-
33
- def __len__(self):
34
- return len(self.source_paths)
35
-
36
- def __getitem__(self, index):
37
- fname, from_path = self.source_paths[index]
38
- from_im = Image.open(from_path).convert("RGB").resize([1024, 1024])
39
-
40
- if self.source_transform:
41
- from_im = self.source_transform(from_im)
42
-
43
- return fname, from_im
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/openpose/src/__init__.py DELETED
File without changes
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/other-formats.md DELETED
@@ -1,194 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Load different Stable Diffusion formats
14
-
15
- [[open-in-colab]]
16
-
17
- Stable Diffusion models are available in different formats depending on the framework they're trained and saved with, and where you download them from. Converting these formats for use in 🤗 Diffusers allows you to use all the features supported by the library, such as [using different schedulers](schedulers) for inference, [building your custom pipeline](write_own_pipeline), and a variety of techniques and methods for [optimizing inference speed](./optimization/opt_overview).
18
-
19
- <Tip>
20
-
21
- We highly recommend using the `.safetensors` format because it is more secure than traditional pickled files which are vulnerable and can be exploited to execute any code on your machine (learn more in the [Load safetensors](using_safetensors) guide).
22
-
23
- </Tip>
24
-
25
- This guide will show you how to convert other Stable Diffusion formats to be compatible with 🤗 Diffusers.
26
-
27
- ## PyTorch .ckpt
28
-
29
- The checkpoint - or `.ckpt` - format is commonly used to store and save models. The `.ckpt` file contains the entire model and is typically several GBs in size. While you can load and use a `.ckpt` file directly with the [`~StableDiffusionPipeline.from_single_file`] method, it is generally better to convert the `.ckpt` file to 🤗 Diffusers so both formats are available.
30
-
31
- There are two options for converting a `.ckpt` file; use a Space to convert the checkpoint or convert the `.ckpt` file with a script.
32
-
33
- ### Convert with a Space
34
-
35
- The easiest and most convenient way to convert a `.ckpt` file is to use the [SD to Diffusers](https://huggingface.co/spaces/diffusers/sd-to-diffusers) Space. You can follow the instructions on the Space to convert the `.ckpt` file.
36
-
37
- This approach works well for basic models, but it may struggle with more customized models. You'll know the Space failed if it returns an empty pull request or error. In this case, you can try converting the `.ckpt` file with a script.
38
-
39
- ### Convert with a script
40
-
41
- 🤗 Diffusers provides a [conversion script](https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py) for converting `.ckpt` files. This approach is more reliable than the Space above.
42
-
43
- Before you start, make sure you have a local clone of 🤗 Diffusers to run the script and log in to your Hugging Face account so you can open pull requests and push your converted model to the Hub.
44
-
45
- ```bash
46
- huggingface-cli login
47
- ```
48
-
49
- To use the script:
50
-
51
- 1. Git clone the repository containing the `.ckpt` file you want to convert. For this example, let's convert this [TemporalNet](https://huggingface.co/CiaraRowles/TemporalNet) `.ckpt` file:
52
-
53
- ```bash
54
- git lfs install
55
- git clone https://huggingface.co/CiaraRowles/TemporalNet
56
- ```
57
-
58
- 2. Open a pull request on the repository where you're converting the checkpoint from:
59
-
60
- ```bash
61
- cd TemporalNet && git fetch origin refs/pr/13:pr/13
62
- git checkout pr/13
63
- ```
64
-
65
- 3. There are several input arguments to configure in the conversion script, but the most important ones are:
66
-
67
- - `checkpoint_path`: the path to the `.ckpt` file to convert.
68
- - `original_config_file`: a YAML file defining the configuration of the original architecture. If you can't find this file, try searching for the YAML file in the GitHub repository where you found the `.ckpt` file.
69
- - `dump_path`: the path to the converted model.
70
-
71
- For example, you can take the `cldm_v15.yaml` file from the [ControlNet](https://github.com/lllyasviel/ControlNet/tree/main/models) repository because the TemporalNet model is a Stable Diffusion v1.5 and ControlNet model.
72
-
73
- 4. Now you can run the script to convert the `.ckpt` file:
74
-
75
- ```bash
76
- python ../diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path temporalnetv3.ckpt --original_config_file cldm_v15.yaml --dump_path ./ --controlnet
77
- ```
78
-
79
- 5. Once the conversion is done, upload your converted model and test out the resulting [pull request](https://huggingface.co/CiaraRowles/TemporalNet/discussions/13)!
80
-
81
- ```bash
82
- git push origin pr/13:refs/pr/13
83
- ```
84
-
85
- ## Keras .pb or .h5
86
-
87
- <Tip warning={true}>
88
-
89
- 🧪 This is an experimental feature. Only Stable Diffusion v1 checkpoints are supported by the Convert KerasCV Space at the moment.
90
-
91
- </Tip>
92
-
93
- [KerasCV](https://keras.io/keras_cv/) supports training for [Stable Diffusion](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/stable_diffusion) v1 and v2. However, it offers limited support for experimenting with Stable Diffusion models for inference and deployment whereas 🤗 Diffusers has a more complete set of features for this purpose, such as different [noise schedulers](https://huggingface.co/docs/diffusers/using-diffusers/schedulers), [flash attention](https://huggingface.co/docs/diffusers/optimization/xformers), and [other
94
- optimization techniques](https://huggingface.co/docs/diffusers/optimization/fp16).
95
-
96
- The [Convert KerasCV](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers) Space converts `.pb` or `.h5` files to PyTorch, and then wraps them in a [`StableDiffusionPipeline`] so it is ready for inference. The converted checkpoint is stored in a repository on the Hugging Face Hub.
97
-
98
- For this example, let's convert the [`sayakpaul/textual-inversion-kerasio`](https://huggingface.co/sayakpaul/textual-inversion-kerasio/tree/main) checkpoint which was trained with Textual Inversion. It uses the special token `<my-funny-cat>` to personalize images with cats.
99
-
100
- The Convert KerasCV Space allows you to input the following:
101
-
102
- * Your Hugging Face token.
103
- * Paths to download the UNet and text encoder weights from. Depending on how the model was trained, you don't necessarily need to provide the paths to both the UNet and text encoder. For example, Textual Inversion only requires the embeddings from the text encoder and a text-to-image model only requires the UNet weights.
104
- * Placeholder token is only applicable for textual inversion models.
105
- * The `output_repo_prefix` is the name of the repository where the converted model is stored.
106
-
107
- Click the **Submit** button to automatically convert the KerasCV checkpoint! Once the checkpoint is successfully converted, you'll see a link to the new repository containing the converted checkpoint. Follow the link to the new repository, and you'll see the Convert KerasCV Space generated a model card with an inference widget to try out the converted model.
108
-
109
- If you prefer to run inference with code, click on the **Use in Diffusers** button in the upper right corner of the model card to copy and paste the code snippet:
110
-
111
- ```py
112
- from diffusers import DiffusionPipeline
113
-
114
- pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline")
115
- ```
116
-
117
- Then you can generate an image like:
118
-
119
- ```py
120
- from diffusers import DiffusionPipeline
121
-
122
- pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline")
123
- pipeline.to("cuda")
124
-
125
- placeholder_token = "<my-funny-cat-token>"
126
- prompt = f"two {placeholder_token} getting married, photorealistic, high quality"
127
- image = pipeline(prompt, num_inference_steps=50).images[0]
128
- ```
129
-
130
- ## A1111 LoRA files
131
-
132
- [Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) (A1111) is a popular web UI for Stable Diffusion that supports model sharing platforms like [Civitai](https://civitai.com/). Models trained with the Low-Rank Adaptation (LoRA) technique are especially popular because they're fast to train and have a much smaller file size than a fully finetuned model. 🤗 Diffusers supports loading A1111 LoRA checkpoints with [`~loaders.LoraLoaderMixin.load_lora_weights`]:
133
-
134
- ```py
135
- from diffusers import DiffusionPipeline, UniPCMultistepScheduler
136
- import torch
137
-
138
- pipeline = DiffusionPipeline.from_pretrained(
139
- "andite/anything-v4.0", torch_dtype=torch.float16, safety_checker=None
140
- ).to("cuda")
141
- pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
142
- ```
143
-
144
- Download a LoRA checkpoint from Civitai; this example uses the [Howls Moving Castle,Interior/Scenery LoRA (Ghibli Stlye)](https://civitai.com/models/14605?modelVersionId=19998) checkpoint, but feel free to try out any LoRA checkpoint!
145
-
146
- ```py
147
- # uncomment to download the safetensor weights
148
- #!wget https://civitai.com/api/download/models/19998 -O howls_moving_castle.safetensors
149
- ```
150
-
151
- Load the LoRA checkpoint into the pipeline with the [`~loaders.LoraLoaderMixin.load_lora_weights`] method:
152
-
153
- ```py
154
- pipeline.load_lora_weights(".", weight_name="howls_moving_castle.safetensors")
155
- ```
156
-
157
- Now you can use the pipeline to generate images:
158
-
159
- ```py
160
- prompt = "masterpiece, illustration, ultra-detailed, cityscape, san francisco, golden gate bridge, california, bay area, in the snow, beautiful detailed starry sky"
161
- negative_prompt = "lowres, cropped, worst quality, low quality, normal quality, artifacts, signature, watermark, username, blurry, more than one bridge, bad architecture"
162
-
163
- images = pipeline(
164
- prompt=prompt,
165
- negative_prompt=negative_prompt,
166
- width=512,
167
- height=512,
168
- num_inference_steps=25,
169
- num_images_per_prompt=4,
170
- generator=torch.manual_seed(0),
171
- ).images
172
- ```
173
-
174
- Finally, create a helper function to display the images:
175
-
176
- ```py
177
- from PIL import Image
178
-
179
-
180
- def image_grid(imgs, rows=2, cols=2):
181
- w, h = imgs[0].size
182
- grid = Image.new("RGB", size=(cols * w, rows * h))
183
-
184
- for i, img in enumerate(imgs):
185
- grid.paste(img, box=(i % cols * w, i // cols * h))
186
- return grid
187
-
188
-
189
- image_grid(images)
190
- ```
191
-
192
- <div class="flex justify-center">
193
- <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/a1111-lora-sf.png"/>
194
- </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py DELETED
@@ -1,12 +0,0 @@
1
- _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- roi_head=dict(
4
- bbox_roi_extractor=dict(
5
- type='SingleRoIExtractor',
6
- roi_layer=dict(
7
- _delete_=True,
8
- type='DeformRoIPoolPack',
9
- output_size=7,
10
- output_channels=256),
11
- out_channels=256,
12
- featmap_strides=[4, 8, 16, 32])))
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x_coco.py DELETED
@@ -1,63 +0,0 @@
1
- _base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://detectron2/resnet50_caffe',
4
- backbone=dict(
5
- type='ResNet',
6
- depth=50,
7
- num_stages=4,
8
- out_indices=(0, 1, 2, 3),
9
- frozen_stages=1,
10
- norm_cfg=dict(type='BN', requires_grad=False),
11
- norm_eval=True,
12
- style='caffe'),
13
- roi_head=dict(
14
- bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
15
- # model training and testing settings
16
- train_cfg=dict(
17
- rcnn=dict(
18
- assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
19
- sampler=dict(num=256))),
20
- test_cfg=dict(rcnn=dict(score_thr=1e-3)))
21
- dataset_type = 'CocoDataset'
22
- data_root = 'data/coco/'
23
- img_norm_cfg = dict(
24
- mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
25
- train_pipeline = [
26
- dict(type='LoadImageFromFile'),
27
- dict(type='LoadProposals', num_max_proposals=300),
28
- dict(type='LoadAnnotations', with_bbox=True),
29
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
30
- dict(type='RandomFlip', flip_ratio=0.5),
31
- dict(type='Normalize', **img_norm_cfg),
32
- dict(type='Pad', size_divisor=32),
33
- dict(type='DefaultFormatBundle'),
34
- dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
35
- ]
36
- test_pipeline = [
37
- dict(type='LoadImageFromFile'),
38
- dict(type='LoadProposals', num_max_proposals=None),
39
- dict(
40
- type='MultiScaleFlipAug',
41
- img_scale=(1333, 800),
42
- flip=False,
43
- transforms=[
44
- dict(type='Resize', keep_ratio=True),
45
- dict(type='RandomFlip'),
46
- dict(type='Normalize', **img_norm_cfg),
47
- dict(type='Pad', size_divisor=32),
48
- dict(type='ImageToTensor', keys=['img']),
49
- dict(type='Collect', keys=['img', 'proposals']),
50
- ])
51
- ]
52
- data = dict(
53
- train=dict(
54
- proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_train2017.pkl',
55
- pipeline=train_pipeline),
56
- val=dict(
57
- proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl',
58
- pipeline=test_pipeline),
59
- test=dict(
60
- proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl',
61
- pipeline=test_pipeline))
62
- optimizer_config = dict(
63
- _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/danet/danet_r50-d8_512x512_40k_voc12aug.py DELETED
@@ -1,7 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/danet_r50-d8.py',
3
- '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_40k.py'
5
- ]
6
- model = dict(
7
- decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/deprecated_wrappers.py DELETED
@@ -1,43 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- # This file is for backward compatibility.
3
- # Module wrappers for empty tensor have been moved to mmcv.cnn.bricks.
4
- import warnings
5
-
6
- from ..cnn.bricks.wrappers import Conv2d, ConvTranspose2d, Linear, MaxPool2d
7
-
8
-
9
- class Conv2d_deprecated(Conv2d):
10
-
11
- def __init__(self, *args, **kwargs):
12
- super().__init__(*args, **kwargs)
13
- warnings.warn(
14
- 'Importing Conv2d wrapper from "mmcv.ops" will be deprecated in'
15
- ' the future. Please import them from "mmcv.cnn" instead')
16
-
17
-
18
- class ConvTranspose2d_deprecated(ConvTranspose2d):
19
-
20
- def __init__(self, *args, **kwargs):
21
- super().__init__(*args, **kwargs)
22
- warnings.warn(
23
- 'Importing ConvTranspose2d wrapper from "mmcv.ops" will be '
24
- 'deprecated in the future. Please import them from "mmcv.cnn" '
25
- 'instead')
26
-
27
-
28
- class MaxPool2d_deprecated(MaxPool2d):
29
-
30
- def __init__(self, *args, **kwargs):
31
- super().__init__(*args, **kwargs)
32
- warnings.warn(
33
- 'Importing MaxPool2d wrapper from "mmcv.ops" will be deprecated in'
34
- ' the future. Please import them from "mmcv.cnn" instead')
35
-
36
-
37
- class Linear_deprecated(Linear):
38
-
39
- def __init__(self, *args, **kwargs):
40
- super().__init__(*args, **kwargs)
41
- warnings.warn(
42
- 'Importing Linear wrapper from "mmcv.ops" will be deprecated in'
43
- ' the future. Please import them from "mmcv.cnn" instead')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artbogdanov/monet-manet/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Fast Ai Pics
3
- emoji: 🌖
4
- colorFrom: red
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.16.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/demo/inference_on_a_image.py DELETED
@@ -1,214 +0,0 @@
1
- import argparse
2
- import os
3
- import sys
4
-
5
- import numpy as np
6
- import torch
7
- from PIL import Image, ImageDraw, ImageFont
8
-
9
- import groundingdino.datasets.transforms as T
10
- from groundingdino.models import build_model
11
- from groundingdino.util import box_ops
12
- from groundingdino.util.slconfig import SLConfig
13
- from groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
14
- from groundingdino.util.vl_utils import create_positive_map_from_span
15
-
16
-
17
- def plot_boxes_to_image(image_pil, tgt):
18
- H, W = tgt["size"]
19
- boxes = tgt["boxes"]
20
- labels = tgt["labels"]
21
- assert len(boxes) == len(labels), "boxes and labels must have same length"
22
-
23
- draw = ImageDraw.Draw(image_pil)
24
- mask = Image.new("L", image_pil.size, 0)
25
- mask_draw = ImageDraw.Draw(mask)
26
-
27
- # draw boxes and masks
28
- for box, label in zip(boxes, labels):
29
- # from 0..1 to 0..W, 0..H
30
- box = box * torch.Tensor([W, H, W, H])
31
- # from xywh to xyxy
32
- box[:2] -= box[2:] / 2
33
- box[2:] += box[:2]
34
- # random color
35
- color = tuple(np.random.randint(0, 255, size=3).tolist())
36
- # draw
37
- x0, y0, x1, y1 = box
38
- x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
39
-
40
- draw.rectangle([x0, y0, x1, y1], outline=color, width=6)
41
- # draw.text((x0, y0), str(label), fill=color)
42
-
43
- font = ImageFont.load_default()
44
- if hasattr(font, "getbbox"):
45
- bbox = draw.textbbox((x0, y0), str(label), font)
46
- else:
47
- w, h = draw.textsize(str(label), font)
48
- bbox = (x0, y0, w + x0, y0 + h)
49
- # bbox = draw.textbbox((x0, y0), str(label))
50
- draw.rectangle(bbox, fill=color)
51
- draw.text((x0, y0), str(label), fill="white")
52
-
53
- mask_draw.rectangle([x0, y0, x1, y1], fill=255, width=6)
54
-
55
- return image_pil, mask
56
-
57
-
58
- def load_image(image_path):
59
- # load image
60
- image_pil = Image.open(image_path).convert("RGB") # load image
61
-
62
- transform = T.Compose(
63
- [
64
- T.RandomResize([800], max_size=1333),
65
- T.ToTensor(),
66
- T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
67
- ]
68
- )
69
- image, _ = transform(image_pil, None) # 3, h, w
70
- return image_pil, image
71
-
72
-
73
- def load_model(model_config_path, model_checkpoint_path, cpu_only=False):
74
- args = SLConfig.fromfile(model_config_path)
75
- args.device = "cuda" if not cpu_only else "cpu"
76
- model = build_model(args)
77
- checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
78
- load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
79
- print(load_res)
80
- _ = model.eval()
81
- return model
82
-
83
-
84
- def get_grounding_output(model, image, caption, box_threshold, text_threshold=None, with_logits=True, cpu_only=False, token_spans=None):
85
- assert text_threshold is not None or token_spans is not None, "text_threshould and token_spans should not be None at the same time!"
86
- caption = caption.lower()
87
- caption = caption.strip()
88
- if not caption.endswith("."):
89
- caption = caption + "."
90
- device = "cuda" if not cpu_only else "cpu"
91
- model = model.to(device)
92
- image = image.to(device)
93
- with torch.no_grad():
94
- outputs = model(image[None], captions=[caption])
95
- logits = outputs["pred_logits"].sigmoid()[0] # (nq, 256)
96
- boxes = outputs["pred_boxes"][0] # (nq, 4)
97
-
98
- # filter output
99
- if token_spans is None:
100
- logits_filt = logits.cpu().clone()
101
- boxes_filt = boxes.cpu().clone()
102
- filt_mask = logits_filt.max(dim=1)[0] > box_threshold
103
- logits_filt = logits_filt[filt_mask] # num_filt, 256
104
- boxes_filt = boxes_filt[filt_mask] # num_filt, 4
105
-
106
- # get phrase
107
- tokenlizer = model.tokenizer
108
- tokenized = tokenlizer(caption)
109
- # build pred
110
- pred_phrases = []
111
- for logit, box in zip(logits_filt, boxes_filt):
112
- pred_phrase = get_phrases_from_posmap(logit > text_threshold, tokenized, tokenlizer)
113
- if with_logits:
114
- pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
115
- else:
116
- pred_phrases.append(pred_phrase)
117
- else:
118
- # given-phrase mode
119
- positive_maps = create_positive_map_from_span(
120
- model.tokenizer(text_prompt),
121
- token_span=token_spans
122
- ).to(image.device) # n_phrase, 256
123
-
124
- logits_for_phrases = positive_maps @ logits.T # n_phrase, nq
125
- all_logits = []
126
- all_phrases = []
127
- all_boxes = []
128
- for (token_span, logit_phr) in zip(token_spans, logits_for_phrases):
129
- # get phrase
130
- phrase = ' '.join([caption[_s:_e] for (_s, _e) in token_span])
131
- # get mask
132
- filt_mask = logit_phr > box_threshold
133
- # filt box
134
- all_boxes.append(boxes[filt_mask])
135
- # filt logits
136
- all_logits.append(logit_phr[filt_mask])
137
- if with_logits:
138
- logit_phr_num = logit_phr[filt_mask]
139
- all_phrases.extend([phrase + f"({str(logit.item())[:4]})" for logit in logit_phr_num])
140
- else:
141
- all_phrases.extend([phrase for _ in range(len(filt_mask))])
142
- boxes_filt = torch.cat(all_boxes, dim=0).cpu()
143
- pred_phrases = all_phrases
144
-
145
-
146
- return boxes_filt, pred_phrases
147
-
148
-
149
- if __name__ == "__main__":
150
-
151
- parser = argparse.ArgumentParser("Grounding DINO example", add_help=True)
152
- parser.add_argument("--config_file", "-c", type=str, required=True, help="path to config file")
153
- parser.add_argument(
154
- "--checkpoint_path", "-p", type=str, required=True, help="path to checkpoint file"
155
- )
156
- parser.add_argument("--image_path", "-i", type=str, required=True, help="path to image file")
157
- parser.add_argument("--text_prompt", "-t", type=str, required=True, help="text prompt")
158
- parser.add_argument(
159
- "--output_dir", "-o", type=str, default="outputs", required=True, help="output directory"
160
- )
161
-
162
- parser.add_argument("--box_threshold", type=float, default=0.3, help="box threshold")
163
- parser.add_argument("--text_threshold", type=float, default=0.25, help="text threshold")
164
- parser.add_argument("--token_spans", type=str, default=None, help=
165
- "The positions of start and end positions of phrases of interest. \
166
- For example, a caption is 'a cat and a dog', \
167
- if you would like to detect 'cat', the token_spans should be '[[[2, 5]], ]', since 'a cat and a dog'[2:5] is 'cat'. \
168
- if you would like to detect 'a cat', the token_spans should be '[[[0, 1], [2, 5]], ]', since 'a cat and a dog'[0:1] is 'a', and 'a cat and a dog'[2:5] is 'cat'. \
169
- ")
170
-
171
- parser.add_argument("--cpu-only", action="store_true", help="running on cpu only!, default=False")
172
- args = parser.parse_args()
173
-
174
- # cfg
175
- config_file = args.config_file # change the path of the model config file
176
- checkpoint_path = args.checkpoint_path # change the path of the model
177
- image_path = args.image_path
178
- text_prompt = args.text_prompt
179
- output_dir = args.output_dir
180
- box_threshold = args.box_threshold
181
- text_threshold = args.text_threshold
182
- token_spans = args.token_spans
183
-
184
- # make dir
185
- os.makedirs(output_dir, exist_ok=True)
186
- # load image
187
- image_pil, image = load_image(image_path)
188
- # load model
189
- model = load_model(config_file, checkpoint_path, cpu_only=args.cpu_only)
190
-
191
- # visualize raw image
192
- image_pil.save(os.path.join(output_dir, "raw_image.jpg"))
193
-
194
- # set the text_threshold to None if token_spans is set.
195
- if token_spans is not None:
196
- text_threshold = None
197
- print("Using token_spans. Set the text_threshold to None.")
198
-
199
-
200
- # run model
201
- boxes_filt, pred_phrases = get_grounding_output(
202
- model, image, text_prompt, box_threshold, text_threshold, cpu_only=args.cpu_only, token_spans=eval(token_spans)
203
- )
204
-
205
- # visualize pred
206
- size = image_pil.size
207
- pred_dict = {
208
- "boxes": boxes_filt,
209
- "size": [size[1], size[0]], # H,W
210
- "labels": pred_phrases,
211
- }
212
- # import ipdb; ipdb.set_trace()
213
- image_with_box = plot_boxes_to_image(image_pil, pred_dict)[0]
214
- image_with_box.save(os.path.join(output_dir, "pred.jpg"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/__init__.py DELETED
File without changes
spaces/BalaBhaskarudu/mygenAIChatbot/app.py DELETED
@@ -1,34 +0,0 @@
1
- import os
2
- import gradio as gr
3
- from langchain.chat_models import ChatOpenAI
4
- from langchain import LLMChain, PromptTemplate
5
- from langchain.memory import ConversationBufferMemory
6
-
7
- OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
8
-
9
- template = """Meet Riya, your youthful and witty personal assistant! At 21 years old, she's full of energy and always eager to help. Riya's goal is to assist you with any questions or problems you might have. Her enthusiasm shines through in every response, making interactions with her enjoyable and engaging.
10
- {chat_history}
11
- User: {user_message}
12
- Chatbot:"""
13
-
14
- prompt = PromptTemplate(
15
- input_variables=["chat_history", "user_message"], template=template
16
- )
17
-
18
- memory = ConversationBufferMemory(memory_key="chat_history")
19
-
20
- llm_chain = LLMChain(
21
- llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
22
- prompt=prompt,
23
- verbose=True,
24
- memory=memory,
25
- )
26
-
27
- def get_text_response(user_message,history):
28
- response = llm_chain.predict(user_message = user_message)
29
- return response
30
-
31
- demo = gr.ChatInterface(get_text_response)
32
-
33
- if __name__ == "__main__":
34
- demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Animal Rebelin Batalla Simulador Mod Apk Desbloqueado Todo.md DELETED
@@ -1,61 +0,0 @@
1
-
2
- <h1>Simulador de batalla de rebelión animal Mod APK desbloqueado todo</h1>
3
- <p>Si eres un fan de las batallas épicas y las simulaciones de animales, es posible que quieras echar un vistazo a Animal Revolt Battle Simulator, un juego que te permite crear y ver peleas de animales realistas en 3D. Y si quieres disfrutar del juego con dinero ilimitado, menú, y todas las características desbloqueadas, es posible que desee descargar Animal Revolt Battle Simulator Mod APK, una versión modificada del juego original que le da más libertad y diversión. En este artículo, te contaremos todo lo que necesitas saber sobre Animal Revolt Battle Simulator y su versión modificada. </p>
4
- <h2>¿Qué es el simulador de batalla de rebelión animal? </h2>
5
- <p>Animal Revolt Battle Simulator es un juego desarrollado por Beast Battle Games, un estudio especializado en crear juegos de simulación de animales. El juego fue lanzado en junio de 2020 y ha ganado más de 1 millón de descargas en Google Play Store. El juego está clasificado 4.4 de 5 estrellas por los usuarios, que elogian sus gráficos, física y jugabilidad. </p>
6
- <h2>animal rebelión batalla simulador mod apk desbloqueado todo</h2><br /><p><b><b>Download File</b> &#10004;&#10004;&#10004; <a href="https://bltlly.com/2v6KMC">https://bltlly.com/2v6KMC</a></b></p><br /><br />
7
- <h3>Características de Animal Revolt Battle Simulator</h3>
8
- <p>Animal Revolt Battle Simulator tiene muchas características que lo convierten en un juego emocionante y realista. Algunas de ellas son:</p>
9
- <ul>
10
- <li><b>Una gran variedad de animales:</b> Puedes elegir entre más de 100 animales, incluyendo leones, tigres, elefantes, dinosaurios, dragones, tiburones y más. Cada animal tiene sus propias estadísticas, habilidades y comportamientos. </li>
11
- <li><b>Un modo sandbox:</b> Puedes crear tus propios escenarios y batallas colocando animales en el mapa. También puede ajustar el terreno, el clima, la hora del día y otros ajustes. </li>
12
- <li><b>Un modo de campaña:</b> Puedes seguir la historia de diferentes animales y completar misiones y desafíos. También puede desbloquear nuevos animales y mapas a medida que avanza. </li>
13
- <li><b>Un motor de física realista:</b> El juego utiliza un motor de física realista que simula los movimientos, colisiones, lesiones y muertes de los animales. Puedes ver sangre, sangre, huesos y efectos de muñeco de trapo. </li>
14
-
15
- </ul>
16
- <h3>Cómo jugar Animal Revolt Battle Simulator</h3>
17
- <p>El modo de juego de Animal Revolt Battle Simulator es simple e intuitivo. Solo tienes que seguir estos pasos:</p>
18
- <ol>
19
- <li>Seleccione el modo que desea jugar: sandbox o campaña. </li>
20
- <li>Seleccione el mapa en el que desea jugar. </li>
21
- <li>Selecciona los animales que quieres usar para tu batalla. Puedes arrastrarlos y soltarlos en el mapa, o usar el botón aleatorio para generar una batalla aleatoria. </li>
22
- <li>Ajuste la configuración y las opciones como desee. </li>
23
- <li>Pulse el botón de reproducción para iniciar la batalla. </li>
24
- <li>Observa cómo se desarrolla la batalla y disfruta del espectáculo. </li>
25
- </ol>
26
- <h2>¿Qué es Animal Revolt Battle Simulator Mod APK? </h2>
27
- <p>Animal Revolt Battle Simulator Mod APK es una versión modificada del juego original que le da algunos beneficios adicionales y características que no están disponibles en la versión oficial. La versión modificada es creada por desarrolladores de terceros que modifican los archivos originales del juego para desbloquear algunas características o añadir algunos trucos. </p>
28
- <h3>Beneficios de Animal Revolt Battle Simulator Mod APK</h3>
29
- <p>Algunos de los beneficios de Animal Revolt Battle Simulator Mod APK son:</p>
30
- <ul>
31
- <li><b>Dinero ilimitado:</b> Puedes obtener dinero ilimitado en la versión modificada, que puedes usar para comprar nuevos animales, mapas, armas y otros artículos. </li>
32
- <li><b>Menú:</b> Puedes acceder a un menú que te da más opciones y controles sobre el juego. Puedes habilitar o deshabilitar algunas características, como el modo dios, salud infinita, muerte de un solo golpe, etc.</ <li><b>Todas las características desbloqueadas:</b> Puedes acceder a todas las características del juego, como animales, mapas, armas, etc., sin tener que desbloquearlas jugando o gastando dinero. </li>
33
- </ul>
34
- <h3>Cómo descargar e instalar Animal Revolt Battle Simulator Mod APK</h3>
35
- <p>Si desea descargar e instalar Animal Revolt Battle Simulator Mod APK, debe seguir estos pasos:</p>
36
- <ol>
37
-
38
- <li>Descargar el archivo APK de la versión modificada. Asegúrese de que tiene suficiente espacio en su dispositivo y una conexión a Internet estable. </li>
39
- <li>Antes de instalar el archivo APK, es necesario habilitar la "Fuentes desconocidas" opción en el dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. </li>
40
- <li>Busque el archivo APK descargado en su dispositivo y toque en él para iniciar el proceso de instalación. Siga las instrucciones de la pantalla y espere a que termine la instalación. </li>
41
- <li>Una vez realizada la instalación, puedes iniciar el juego y disfrutar de la versión modificada con todos los beneficios y características. </li>
42
- </ol>
43
- <h2>Conclusión</h2>
44
- <p>Animal Revolt Battle Simulator es un juego divertido y realista que te permite crear y ver batallas épicas con animales en 3D. Usted puede elegir entre más de 100 animales, personalizar sus escenarios, y disfrutar de la física realista y gráficos. Si desea tener más libertad y diversión, puede descargar Animal Revolt Battle Simulator Mod APK, que le da dinero ilimitado, menú, y todas las características desbloqueadas. Puede descargar la versión modificada desde un sitio web confiable e instalarlo en su dispositivo fácilmente. Esperamos que este artículo sea útil e informativo para usted. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. </p>
45
- <h3>Preguntas frecuentes</h3>
46
- <p>Aquí hay algunas preguntas frecuentes sobre Animal Revolt Battle Simulator y su versión modificada:</p>
47
- <ul>
48
- <li><b> ¿Es seguro usar Animal Revolt Battle Simulator Mod APK? </b></li>
49
- <p>Sí, Animal Revolt Battle Simulator Mod APK es seguro de usar, siempre y cuando se descarga desde un sitio web de confianza que no contiene ningún virus o malware. Sin embargo, siempre debe tener cuidado al descargar e instalar aplicaciones de fuentes desconocidas, ya que pueden dañar su dispositivo o comprometer su privacidad. </p>
50
- <p></p>
51
- <li><b> ¿Es Animal Revolt Battle Simulator Mod APK de uso gratuito? </b></li>
52
-
53
- <li><b>¿Requiere acceso root Animal Revolt Battle Simulator Mod APK? </b></li>
54
- <p>No, Animal Revolt Battle Simulator Mod APK no requiere acceso de root. Puede usarlo en cualquier dispositivo Android sin enraizarlo. </p>
55
- <li><b> ¿Puedo jugar Animal Revolt Battle Simulator Mod APK en línea con otros jugadores? </b></li>
56
- <p>No, Animal Revolt Battle Simulator Mod APK no es un juego en línea. Solo se puede jugar sin conexión en su dispositivo. No se puede conectar o competir con otros jugadores en línea. </p>
57
- <li><b> ¿Puedo actualizar Animal Revolt Battle Simulator Mod APK? </b></li>
58
- <p>No, Animal Revolt Battle Simulator Mod APK no es una versión oficial del juego. Es una versión modificada que puede no ser compatible con las últimas actualizaciones del juego original. Si desea actualizar el juego, es necesario desinstalar la versión modificada e instalar la versión oficial de la Google Play Store.</p>
59
- </ul></p> 64aa2da5cf<br />
60
- <br />
61
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/vqvae/quantize.py DELETED
@@ -1,329 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- import numpy as np
5
- from torch import einsum
6
- from einops import rearrange
7
-
8
-
9
- class VectorQuantizer(nn.Module):
10
- """
11
- see https://github.com/MishaLaskin/vqvae/blob/d761a999e2267766400dc646d82d3ac3657771d4/models/quantizer.py
12
- ____________________________________________
13
- Discretization bottleneck part of the VQ-VAE.
14
- Inputs:
15
- - n_e : number of embeddings
16
- - e_dim : dimension of embedding
17
- - beta : commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2
18
- _____________________________________________
19
- """
20
-
21
- # NOTE: this class contains a bug regarding beta; see VectorQuantizer2 for
22
- # a fix and use legacy=False to apply that fix. VectorQuantizer2 can be
23
- # used wherever VectorQuantizer has been used before and is additionally
24
- # more efficient.
25
- def __init__(self, n_e, e_dim, beta):
26
- super(VectorQuantizer, self).__init__()
27
- self.n_e = n_e
28
- self.e_dim = e_dim
29
- self.beta = beta
30
-
31
- self.embedding = nn.Embedding(self.n_e, self.e_dim)
32
- self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
33
-
34
- def forward(self, z):
35
- """
36
- Inputs the output of the encoder network z and maps it to a discrete
37
- one-hot vector that is the index of the closest embedding vector e_j
38
- z (continuous) -> z_q (discrete)
39
- z.shape = (batch, channel, height, width)
40
- quantization pipeline:
41
- 1. get encoder input (B,C,H,W)
42
- 2. flatten input to (B*H*W,C)
43
- """
44
- # reshape z -> (batch, height, width, channel) and flatten
45
- z = z.permute(0, 2, 3, 1).contiguous()
46
- z_flattened = z.view(-1, self.e_dim)
47
- # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
48
-
49
- d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
50
- torch.sum(self.embedding.weight**2, dim=1) - 2 * \
51
- torch.matmul(z_flattened, self.embedding.weight.t())
52
-
53
- ## could possible replace this here
54
- # #\start...
55
- # find closest encodings
56
- min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1)
57
-
58
- min_encodings = torch.zeros(
59
- min_encoding_indices.shape[0], self.n_e).to(z)
60
- min_encodings.scatter_(1, min_encoding_indices, 1)
61
-
62
- # dtype min encodings: torch.float32
63
- # min_encodings shape: torch.Size([2048, 512])
64
- # min_encoding_indices.shape: torch.Size([2048, 1])
65
-
66
- # get quantized latent vectors
67
- z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape)
68
- #.........\end
69
-
70
- # with:
71
- # .........\start
72
- #min_encoding_indices = torch.argmin(d, dim=1)
73
- #z_q = self.embedding(min_encoding_indices)
74
- # ......\end......... (TODO)
75
-
76
- # compute loss for embedding
77
- loss = torch.mean((z_q.detach()-z)**2) + self.beta * \
78
- torch.mean((z_q - z.detach()) ** 2)
79
-
80
- # preserve gradients
81
- z_q = z + (z_q - z).detach()
82
-
83
- # perplexity
84
- e_mean = torch.mean(min_encodings, dim=0)
85
- perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10)))
86
-
87
- # reshape back to match original input shape
88
- z_q = z_q.permute(0, 3, 1, 2).contiguous()
89
-
90
- return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
91
-
92
- def get_codebook_entry(self, indices, shape):
93
- # shape specifying (batch, height, width, channel)
94
- # TODO: check for more easy handling with nn.Embedding
95
- min_encodings = torch.zeros(indices.shape[0], self.n_e).to(indices)
96
- min_encodings.scatter_(1, indices[:,None], 1)
97
-
98
- # get quantized latent vectors
99
- z_q = torch.matmul(min_encodings.float(), self.embedding.weight)
100
-
101
- if shape is not None:
102
- z_q = z_q.view(shape)
103
-
104
- # reshape back to match original input shape
105
- z_q = z_q.permute(0, 3, 1, 2).contiguous()
106
-
107
- return z_q
108
-
109
-
110
- class GumbelQuantize(nn.Module):
111
- """
112
- credit to @karpathy: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py (thanks!)
113
- Gumbel Softmax trick quantizer
114
- Categorical Reparameterization with Gumbel-Softmax, Jang et al. 2016
115
- https://arxiv.org/abs/1611.01144
116
- """
117
- def __init__(self, num_hiddens, embedding_dim, n_embed, straight_through=True,
118
- kl_weight=5e-4, temp_init=1.0, use_vqinterface=True,
119
- remap=None, unknown_index="random"):
120
- super().__init__()
121
-
122
- self.embedding_dim = embedding_dim
123
- self.n_embed = n_embed
124
-
125
- self.straight_through = straight_through
126
- self.temperature = temp_init
127
- self.kl_weight = kl_weight
128
-
129
- self.proj = nn.Conv2d(num_hiddens, n_embed, 1)
130
- self.embed = nn.Embedding(n_embed, embedding_dim)
131
-
132
- self.use_vqinterface = use_vqinterface
133
-
134
- self.remap = remap
135
- if self.remap is not None:
136
- self.register_buffer("used", torch.tensor(np.load(self.remap)))
137
- self.re_embed = self.used.shape[0]
138
- self.unknown_index = unknown_index # "random" or "extra" or integer
139
- if self.unknown_index == "extra":
140
- self.unknown_index = self.re_embed
141
- self.re_embed = self.re_embed+1
142
- print(f"Remapping {self.n_embed} indices to {self.re_embed} indices. "
143
- f"Using {self.unknown_index} for unknown indices.")
144
- else:
145
- self.re_embed = n_embed
146
-
147
- def remap_to_used(self, inds):
148
- ishape = inds.shape
149
- assert len(ishape)>1
150
- inds = inds.reshape(ishape[0],-1)
151
- used = self.used.to(inds)
152
- match = (inds[:,:,None]==used[None,None,...]).long()
153
- new = match.argmax(-1)
154
- unknown = match.sum(2)<1
155
- if self.unknown_index == "random":
156
- new[unknown]=torch.randint(0,self.re_embed,size=new[unknown].shape).to(device=new.device)
157
- else:
158
- new[unknown] = self.unknown_index
159
- return new.reshape(ishape)
160
-
161
- def unmap_to_all(self, inds):
162
- ishape = inds.shape
163
- assert len(ishape)>1
164
- inds = inds.reshape(ishape[0],-1)
165
- used = self.used.to(inds)
166
- if self.re_embed > self.used.shape[0]: # extra token
167
- inds[inds>=self.used.shape[0]] = 0 # simply set to zero
168
- back=torch.gather(used[None,:][inds.shape[0]*[0],:], 1, inds)
169
- return back.reshape(ishape)
170
-
171
- def forward(self, z, temp=None, return_logits=False):
172
- # force hard = True when we are in eval mode, as we must quantize. actually, always true seems to work
173
- hard = self.straight_through if self.training else True
174
- temp = self.temperature if temp is None else temp
175
-
176
- logits = self.proj(z)
177
- if self.remap is not None:
178
- # continue only with used logits
179
- full_zeros = torch.zeros_like(logits)
180
- logits = logits[:,self.used,...]
181
-
182
- soft_one_hot = F.gumbel_softmax(logits, tau=temp, dim=1, hard=hard)
183
- if self.remap is not None:
184
- # go back to all entries but unused set to zero
185
- full_zeros[:,self.used,...] = soft_one_hot
186
- soft_one_hot = full_zeros
187
- z_q = einsum('b n h w, n d -> b d h w', soft_one_hot, self.embed.weight)
188
-
189
- # + kl divergence to the prior loss
190
- qy = F.softmax(logits, dim=1)
191
- diff = self.kl_weight * torch.sum(qy * torch.log(qy * self.n_embed + 1e-10), dim=1).mean()
192
-
193
- ind = soft_one_hot.argmax(dim=1)
194
- if self.remap is not None:
195
- ind = self.remap_to_used(ind)
196
- if self.use_vqinterface:
197
- if return_logits:
198
- return z_q, diff, (None, None, ind), logits
199
- return z_q, diff, (None, None, ind)
200
- return z_q, diff, ind
201
-
202
- def get_codebook_entry(self, indices, shape):
203
- b, h, w, c = shape
204
- assert b*h*w == indices.shape[0]
205
- indices = rearrange(indices, '(b h w) -> b h w', b=b, h=h, w=w)
206
- if self.remap is not None:
207
- indices = self.unmap_to_all(indices)
208
- one_hot = F.one_hot(indices, num_classes=self.n_embed).permute(0, 3, 1, 2).float()
209
- z_q = einsum('b n h w, n d -> b d h w', one_hot, self.embed.weight)
210
- return z_q
211
-
212
-
213
- class VectorQuantizer2(nn.Module):
214
- """
215
- Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
216
- avoids costly matrix multiplications and allows for post-hoc remapping of indices.
217
- """
218
- # NOTE: due to a bug the beta term was applied to the wrong term. for
219
- # backwards compatibility we use the buggy version by default, but you can
220
- # specify legacy=False to fix it.
221
- def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
222
- sane_index_shape=False, legacy=True):
223
- super().__init__()
224
- self.n_e = n_e
225
- self.e_dim = e_dim
226
- self.beta = beta
227
- self.legacy = legacy
228
-
229
- self.embedding = nn.Embedding(self.n_e, self.e_dim)
230
- self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
231
-
232
- self.remap = remap
233
- if self.remap is not None:
234
- self.register_buffer("used", torch.tensor(np.load(self.remap)))
235
- self.re_embed = self.used.shape[0]
236
- self.unknown_index = unknown_index # "random" or "extra" or integer
237
- if self.unknown_index == "extra":
238
- self.unknown_index = self.re_embed
239
- self.re_embed = self.re_embed+1
240
- print(f"Remapping {self.n_e} indices to {self.re_embed} indices. "
241
- f"Using {self.unknown_index} for unknown indices.")
242
- else:
243
- self.re_embed = n_e
244
-
245
- self.sane_index_shape = sane_index_shape
246
-
247
- def remap_to_used(self, inds):
248
- ishape = inds.shape
249
- assert len(ishape)>1
250
- inds = inds.reshape(ishape[0],-1)
251
- used = self.used.to(inds)
252
- match = (inds[:,:,None]==used[None,None,...]).long()
253
- new = match.argmax(-1)
254
- unknown = match.sum(2)<1
255
- if self.unknown_index == "random":
256
- new[unknown]=torch.randint(0,self.re_embed,size=new[unknown].shape).to(device=new.device)
257
- else:
258
- new[unknown] = self.unknown_index
259
- return new.reshape(ishape)
260
-
261
- def unmap_to_all(self, inds):
262
- ishape = inds.shape
263
- assert len(ishape)>1
264
- inds = inds.reshape(ishape[0],-1)
265
- used = self.used.to(inds)
266
- if self.re_embed > self.used.shape[0]: # extra token
267
- inds[inds>=self.used.shape[0]] = 0 # simply set to zero
268
- back=torch.gather(used[None,:][inds.shape[0]*[0],:], 1, inds)
269
- return back.reshape(ishape)
270
-
271
- def forward(self, z, temp=None, rescale_logits=False, return_logits=False):
272
- assert temp is None or temp==1.0, "Only for interface compatible with Gumbel"
273
- assert rescale_logits==False, "Only for interface compatible with Gumbel"
274
- assert return_logits==False, "Only for interface compatible with Gumbel"
275
- # reshape z -> (batch, height, width, channel) and flatten
276
- z = rearrange(z, 'b c h w -> b h w c').contiguous()
277
- z_flattened = z.view(-1, self.e_dim)
278
- # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
279
-
280
- d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
281
- torch.sum(self.embedding.weight**2, dim=1) - 2 * \
282
- torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
283
-
284
- min_encoding_indices = torch.argmin(d, dim=1)
285
- z_q = self.embedding(min_encoding_indices).view(z.shape)
286
- perplexity = None
287
- min_encodings = None
288
-
289
- # compute loss for embedding
290
- if not self.legacy:
291
- loss = self.beta * torch.mean((z_q.detach()-z)**2) + \
292
- torch.mean((z_q - z.detach()) ** 2)
293
- else:
294
- loss = torch.mean((z_q.detach()-z)**2) + self.beta * \
295
- torch.mean((z_q - z.detach()) ** 2)
296
-
297
- # preserve gradients
298
- z_q = z + (z_q - z).detach()
299
-
300
- # reshape back to match original input shape
301
- z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
302
-
303
- if self.remap is not None:
304
- min_encoding_indices = min_encoding_indices.reshape(z.shape[0],-1) # add batch axis
305
- min_encoding_indices = self.remap_to_used(min_encoding_indices)
306
- min_encoding_indices = min_encoding_indices.reshape(-1,1) # flatten
307
-
308
- if self.sane_index_shape:
309
- min_encoding_indices = min_encoding_indices.reshape(
310
- z_q.shape[0], z_q.shape[2], z_q.shape[3])
311
-
312
- return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
313
-
314
- def get_codebook_entry(self, indices, shape):
315
- # shape specifying (batch, height, width, channel)
316
- if self.remap is not None:
317
- indices = indices.reshape(shape[0],-1) # add batch axis
318
- indices = self.unmap_to_all(indices)
319
- indices = indices.reshape(-1) # flatten again
320
-
321
- # get quantized latent vectors
322
- z_q = self.embedding(indices)
323
-
324
- if shape is not None:
325
- z_q = z_q.view(shape)
326
- # reshape back to match original input shape
327
- z_q = z_q.permute(0, 3, 1, 2).contiguous()
328
-
329
- return z_q
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat_new/src/lib/utils/sum.ts DELETED
@@ -1,3 +0,0 @@
1
- export function sum(nums: number[]): number {
2
- return nums.reduce((a, b) => a + b, 0);
3
- }
 
 
 
 
spaces/BetterAPI/BetterChat_new/src/routes/conversation/[id]/share/+server.ts DELETED
@@ -1,54 +0,0 @@
1
- import { base } from "$app/paths";
2
- import { PUBLIC_ORIGIN } from "$env/static/public";
3
- import { collections } from "$lib/server/database.js";
4
- import type { SharedConversation } from "$lib/types/SharedConversation.js";
5
- import { sha256 } from "$lib/utils/sha256.js";
6
- import { error } from "@sveltejs/kit";
7
- import { ObjectId } from "mongodb";
8
- import { nanoid } from "nanoid";
9
-
10
- export async function POST({ params, url, locals }) {
11
- const conversation = await collections.conversations.findOne({
12
- _id: new ObjectId(params.id),
13
- sessionId: locals.sessionId,
14
- });
15
-
16
- if (!conversation) {
17
- throw error(404, "Conversation not found");
18
- }
19
-
20
- const hash = await sha256(JSON.stringify(conversation.messages));
21
-
22
- const existingShare = await collections.sharedConversations.findOne({ hash });
23
-
24
- if (existingShare) {
25
- return new Response(
26
- JSON.stringify({
27
- url: getShareUrl(url, existingShare._id),
28
- }),
29
- { headers: { "Content-Type": "application/json" } }
30
- );
31
- }
32
-
33
- const shared: SharedConversation = {
34
- _id: nanoid(7),
35
- createdAt: new Date(),
36
- messages: conversation.messages,
37
- hash,
38
- updatedAt: new Date(),
39
- title: conversation.title,
40
- };
41
-
42
- await collections.sharedConversations.insertOne(shared);
43
-
44
- return new Response(
45
- JSON.stringify({
46
- url: getShareUrl(url, shared._id),
47
- }),
48
- { headers: { "Content-Type": "application/json" } }
49
- );
50
- }
51
-
52
- function getShareUrl(url: URL, shareId: string): string {
53
- return `${PUBLIC_ORIGIN || url.origin}${base}/r/${shareId}`;
54
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/install/editable_legacy.py DELETED
@@ -1,46 +0,0 @@
1
- """Legacy editable installation process, i.e. `setup.py develop`.
2
- """
3
- import logging
4
- from typing import Optional, Sequence
5
-
6
- from pip._internal.build_env import BuildEnvironment
7
- from pip._internal.utils.logging import indent_log
8
- from pip._internal.utils.setuptools_build import make_setuptools_develop_args
9
- from pip._internal.utils.subprocess import call_subprocess
10
-
11
- logger = logging.getLogger(__name__)
12
-
13
-
14
- def install_editable(
15
- *,
16
- global_options: Sequence[str],
17
- prefix: Optional[str],
18
- home: Optional[str],
19
- use_user_site: bool,
20
- name: str,
21
- setup_py_path: str,
22
- isolated: bool,
23
- build_env: BuildEnvironment,
24
- unpacked_source_directory: str,
25
- ) -> None:
26
- """Install a package in editable mode. Most arguments are pass-through
27
- to setuptools.
28
- """
29
- logger.info("Running setup.py develop for %s", name)
30
-
31
- args = make_setuptools_develop_args(
32
- setup_py_path,
33
- global_options=global_options,
34
- no_user_config=isolated,
35
- prefix=prefix,
36
- home=home,
37
- use_user_site=use_user_site,
38
- )
39
-
40
- with indent_log():
41
- with build_env:
42
- call_subprocess(
43
- args,
44
- command_desc="python setup.py develop",
45
- cwd=unpacked_source_directory,
46
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/vcs/versioncontrol.py DELETED
@@ -1,705 +0,0 @@
1
- """Handles all VCS (version control) support"""
2
-
3
- import logging
4
- import os
5
- import shutil
6
- import sys
7
- import urllib.parse
8
- from typing import (
9
- TYPE_CHECKING,
10
- Any,
11
- Dict,
12
- Iterable,
13
- Iterator,
14
- List,
15
- Mapping,
16
- Optional,
17
- Tuple,
18
- Type,
19
- Union,
20
- )
21
-
22
- from pip._internal.cli.spinners import SpinnerInterface
23
- from pip._internal.exceptions import BadCommand, InstallationError
24
- from pip._internal.utils.misc import (
25
- HiddenText,
26
- ask_path_exists,
27
- backup_dir,
28
- display_path,
29
- hide_url,
30
- hide_value,
31
- is_installable_dir,
32
- rmtree,
33
- )
34
- from pip._internal.utils.subprocess import (
35
- CommandArgs,
36
- call_subprocess,
37
- format_command_args,
38
- make_command,
39
- )
40
- from pip._internal.utils.urls import get_url_scheme
41
-
42
- if TYPE_CHECKING:
43
- # Literal was introduced in Python 3.8.
44
- #
45
- # TODO: Remove `if TYPE_CHECKING` when dropping support for Python 3.7.
46
- from typing import Literal
47
-
48
-
49
- __all__ = ["vcs"]
50
-
51
-
52
- logger = logging.getLogger(__name__)
53
-
54
- AuthInfo = Tuple[Optional[str], Optional[str]]
55
-
56
-
57
- def is_url(name: str) -> bool:
58
- """
59
- Return true if the name looks like a URL.
60
- """
61
- scheme = get_url_scheme(name)
62
- if scheme is None:
63
- return False
64
- return scheme in ["http", "https", "file", "ftp"] + vcs.all_schemes
65
-
66
-
67
- def make_vcs_requirement_url(
68
- repo_url: str, rev: str, project_name: str, subdir: Optional[str] = None
69
- ) -> str:
70
- """
71
- Return the URL for a VCS requirement.
72
-
73
- Args:
74
- repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+").
75
- project_name: the (unescaped) project name.
76
- """
77
- egg_project_name = project_name.replace("-", "_")
78
- req = f"{repo_url}@{rev}#egg={egg_project_name}"
79
- if subdir:
80
- req += f"&subdirectory={subdir}"
81
-
82
- return req
83
-
84
-
85
- def find_path_to_project_root_from_repo_root(
86
- location: str, repo_root: str
87
- ) -> Optional[str]:
88
- """
89
- Find the the Python project's root by searching up the filesystem from
90
- `location`. Return the path to project root relative to `repo_root`.
91
- Return None if the project root is `repo_root`, or cannot be found.
92
- """
93
- # find project root.
94
- orig_location = location
95
- while not is_installable_dir(location):
96
- last_location = location
97
- location = os.path.dirname(location)
98
- if location == last_location:
99
- # We've traversed up to the root of the filesystem without
100
- # finding a Python project.
101
- logger.warning(
102
- "Could not find a Python project for directory %s (tried all "
103
- "parent directories)",
104
- orig_location,
105
- )
106
- return None
107
-
108
- if os.path.samefile(repo_root, location):
109
- return None
110
-
111
- return os.path.relpath(location, repo_root)
112
-
113
-
114
- class RemoteNotFoundError(Exception):
115
- pass
116
-
117
-
118
- class RemoteNotValidError(Exception):
119
- def __init__(self, url: str):
120
- super().__init__(url)
121
- self.url = url
122
-
123
-
124
- class RevOptions:
125
-
126
- """
127
- Encapsulates a VCS-specific revision to install, along with any VCS
128
- install options.
129
-
130
- Instances of this class should be treated as if immutable.
131
- """
132
-
133
- def __init__(
134
- self,
135
- vc_class: Type["VersionControl"],
136
- rev: Optional[str] = None,
137
- extra_args: Optional[CommandArgs] = None,
138
- ) -> None:
139
- """
140
- Args:
141
- vc_class: a VersionControl subclass.
142
- rev: the name of the revision to install.
143
- extra_args: a list of extra options.
144
- """
145
- if extra_args is None:
146
- extra_args = []
147
-
148
- self.extra_args = extra_args
149
- self.rev = rev
150
- self.vc_class = vc_class
151
- self.branch_name: Optional[str] = None
152
-
153
- def __repr__(self) -> str:
154
- return f"<RevOptions {self.vc_class.name}: rev={self.rev!r}>"
155
-
156
- @property
157
- def arg_rev(self) -> Optional[str]:
158
- if self.rev is None:
159
- return self.vc_class.default_arg_rev
160
-
161
- return self.rev
162
-
163
- def to_args(self) -> CommandArgs:
164
- """
165
- Return the VCS-specific command arguments.
166
- """
167
- args: CommandArgs = []
168
- rev = self.arg_rev
169
- if rev is not None:
170
- args += self.vc_class.get_base_rev_args(rev)
171
- args += self.extra_args
172
-
173
- return args
174
-
175
- def to_display(self) -> str:
176
- if not self.rev:
177
- return ""
178
-
179
- return f" (to revision {self.rev})"
180
-
181
- def make_new(self, rev: str) -> "RevOptions":
182
- """
183
- Make a copy of the current instance, but with a new rev.
184
-
185
- Args:
186
- rev: the name of the revision for the new object.
187
- """
188
- return self.vc_class.make_rev_options(rev, extra_args=self.extra_args)
189
-
190
-
191
- class VcsSupport:
192
- _registry: Dict[str, "VersionControl"] = {}
193
- schemes = ["ssh", "git", "hg", "bzr", "sftp", "svn"]
194
-
195
- def __init__(self) -> None:
196
- # Register more schemes with urlparse for various version control
197
- # systems
198
- urllib.parse.uses_netloc.extend(self.schemes)
199
- super().__init__()
200
-
201
- def __iter__(self) -> Iterator[str]:
202
- return self._registry.__iter__()
203
-
204
- @property
205
- def backends(self) -> List["VersionControl"]:
206
- return list(self._registry.values())
207
-
208
- @property
209
- def dirnames(self) -> List[str]:
210
- return [backend.dirname for backend in self.backends]
211
-
212
- @property
213
- def all_schemes(self) -> List[str]:
214
- schemes: List[str] = []
215
- for backend in self.backends:
216
- schemes.extend(backend.schemes)
217
- return schemes
218
-
219
- def register(self, cls: Type["VersionControl"]) -> None:
220
- if not hasattr(cls, "name"):
221
- logger.warning("Cannot register VCS %s", cls.__name__)
222
- return
223
- if cls.name not in self._registry:
224
- self._registry[cls.name] = cls()
225
- logger.debug("Registered VCS backend: %s", cls.name)
226
-
227
- def unregister(self, name: str) -> None:
228
- if name in self._registry:
229
- del self._registry[name]
230
-
231
- def get_backend_for_dir(self, location: str) -> Optional["VersionControl"]:
232
- """
233
- Return a VersionControl object if a repository of that type is found
234
- at the given directory.
235
- """
236
- vcs_backends = {}
237
- for vcs_backend in self._registry.values():
238
- repo_path = vcs_backend.get_repository_root(location)
239
- if not repo_path:
240
- continue
241
- logger.debug("Determine that %s uses VCS: %s", location, vcs_backend.name)
242
- vcs_backends[repo_path] = vcs_backend
243
-
244
- if not vcs_backends:
245
- return None
246
-
247
- # Choose the VCS in the inner-most directory. Since all repository
248
- # roots found here would be either `location` or one of its
249
- # parents, the longest path should have the most path components,
250
- # i.e. the backend representing the inner-most repository.
251
- inner_most_repo_path = max(vcs_backends, key=len)
252
- return vcs_backends[inner_most_repo_path]
253
-
254
- def get_backend_for_scheme(self, scheme: str) -> Optional["VersionControl"]:
255
- """
256
- Return a VersionControl object or None.
257
- """
258
- for vcs_backend in self._registry.values():
259
- if scheme in vcs_backend.schemes:
260
- return vcs_backend
261
- return None
262
-
263
- def get_backend(self, name: str) -> Optional["VersionControl"]:
264
- """
265
- Return a VersionControl object or None.
266
- """
267
- name = name.lower()
268
- return self._registry.get(name)
269
-
270
-
271
- vcs = VcsSupport()
272
-
273
-
274
- class VersionControl:
275
- name = ""
276
- dirname = ""
277
- repo_name = ""
278
- # List of supported schemes for this Version Control
279
- schemes: Tuple[str, ...] = ()
280
- # Iterable of environment variable names to pass to call_subprocess().
281
- unset_environ: Tuple[str, ...] = ()
282
- default_arg_rev: Optional[str] = None
283
-
284
- @classmethod
285
- def should_add_vcs_url_prefix(cls, remote_url: str) -> bool:
286
- """
287
- Return whether the vcs prefix (e.g. "git+") should be added to a
288
- repository's remote url when used in a requirement.
289
- """
290
- return not remote_url.lower().startswith(f"{cls.name}:")
291
-
292
- @classmethod
293
- def get_subdirectory(cls, location: str) -> Optional[str]:
294
- """
295
- Return the path to Python project root, relative to the repo root.
296
- Return None if the project root is in the repo root.
297
- """
298
- return None
299
-
300
- @classmethod
301
- def get_requirement_revision(cls, repo_dir: str) -> str:
302
- """
303
- Return the revision string that should be used in a requirement.
304
- """
305
- return cls.get_revision(repo_dir)
306
-
307
- @classmethod
308
- def get_src_requirement(cls, repo_dir: str, project_name: str) -> str:
309
- """
310
- Return the requirement string to use to redownload the files
311
- currently at the given repository directory.
312
-
313
- Args:
314
- project_name: the (unescaped) project name.
315
-
316
- The return value has a form similar to the following:
317
-
318
- {repository_url}@{revision}#egg={project_name}
319
- """
320
- repo_url = cls.get_remote_url(repo_dir)
321
-
322
- if cls.should_add_vcs_url_prefix(repo_url):
323
- repo_url = f"{cls.name}+{repo_url}"
324
-
325
- revision = cls.get_requirement_revision(repo_dir)
326
- subdir = cls.get_subdirectory(repo_dir)
327
- req = make_vcs_requirement_url(repo_url, revision, project_name, subdir=subdir)
328
-
329
- return req
330
-
331
- @staticmethod
332
- def get_base_rev_args(rev: str) -> List[str]:
333
- """
334
- Return the base revision arguments for a vcs command.
335
-
336
- Args:
337
- rev: the name of a revision to install. Cannot be None.
338
- """
339
- raise NotImplementedError
340
-
341
- def is_immutable_rev_checkout(self, url: str, dest: str) -> bool:
342
- """
343
- Return true if the commit hash checked out at dest matches
344
- the revision in url.
345
-
346
- Always return False, if the VCS does not support immutable commit
347
- hashes.
348
-
349
- This method does not check if there are local uncommitted changes
350
- in dest after checkout, as pip currently has no use case for that.
351
- """
352
- return False
353
-
354
- @classmethod
355
- def make_rev_options(
356
- cls, rev: Optional[str] = None, extra_args: Optional[CommandArgs] = None
357
- ) -> RevOptions:
358
- """
359
- Return a RevOptions object.
360
-
361
- Args:
362
- rev: the name of a revision to install.
363
- extra_args: a list of extra options.
364
- """
365
- return RevOptions(cls, rev, extra_args=extra_args)
366
-
367
- @classmethod
368
- def _is_local_repository(cls, repo: str) -> bool:
369
- """
370
- posix absolute paths start with os.path.sep,
371
- win32 ones start with drive (like c:\\folder)
372
- """
373
- drive, tail = os.path.splitdrive(repo)
374
- return repo.startswith(os.path.sep) or bool(drive)
375
-
376
- @classmethod
377
- def get_netloc_and_auth(
378
- cls, netloc: str, scheme: str
379
- ) -> Tuple[str, Tuple[Optional[str], Optional[str]]]:
380
- """
381
- Parse the repository URL's netloc, and return the new netloc to use
382
- along with auth information.
383
-
384
- Args:
385
- netloc: the original repository URL netloc.
386
- scheme: the repository URL's scheme without the vcs prefix.
387
-
388
- This is mainly for the Subversion class to override, so that auth
389
- information can be provided via the --username and --password options
390
- instead of through the URL. For other subclasses like Git without
391
- such an option, auth information must stay in the URL.
392
-
393
- Returns: (netloc, (username, password)).
394
- """
395
- return netloc, (None, None)
396
-
397
- @classmethod
398
- def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:
399
- """
400
- Parse the repository URL to use, and return the URL, revision,
401
- and auth info to use.
402
-
403
- Returns: (url, rev, (username, password)).
404
- """
405
- scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
406
- if "+" not in scheme:
407
- raise ValueError(
408
- "Sorry, {!r} is a malformed VCS url. "
409
- "The format is <vcs>+<protocol>://<url>, "
410
- "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url)
411
- )
412
- # Remove the vcs prefix.
413
- scheme = scheme.split("+", 1)[1]
414
- netloc, user_pass = cls.get_netloc_and_auth(netloc, scheme)
415
- rev = None
416
- if "@" in path:
417
- path, rev = path.rsplit("@", 1)
418
- if not rev:
419
- raise InstallationError(
420
- "The URL {!r} has an empty revision (after @) "
421
- "which is not supported. Include a revision after @ "
422
- "or remove @ from the URL.".format(url)
423
- )
424
- url = urllib.parse.urlunsplit((scheme, netloc, path, query, ""))
425
- return url, rev, user_pass
426
-
427
- @staticmethod
428
- def make_rev_args(
429
- username: Optional[str], password: Optional[HiddenText]
430
- ) -> CommandArgs:
431
- """
432
- Return the RevOptions "extra arguments" to use in obtain().
433
- """
434
- return []
435
-
436
- def get_url_rev_options(self, url: HiddenText) -> Tuple[HiddenText, RevOptions]:
437
- """
438
- Return the URL and RevOptions object to use in obtain(),
439
- as a tuple (url, rev_options).
440
- """
441
- secret_url, rev, user_pass = self.get_url_rev_and_auth(url.secret)
442
- username, secret_password = user_pass
443
- password: Optional[HiddenText] = None
444
- if secret_password is not None:
445
- password = hide_value(secret_password)
446
- extra_args = self.make_rev_args(username, password)
447
- rev_options = self.make_rev_options(rev, extra_args=extra_args)
448
-
449
- return hide_url(secret_url), rev_options
450
-
451
- @staticmethod
452
- def normalize_url(url: str) -> str:
453
- """
454
- Normalize a URL for comparison by unquoting it and removing any
455
- trailing slash.
456
- """
457
- return urllib.parse.unquote(url).rstrip("/")
458
-
459
- @classmethod
460
- def compare_urls(cls, url1: str, url2: str) -> bool:
461
- """
462
- Compare two repo URLs for identity, ignoring incidental differences.
463
- """
464
- return cls.normalize_url(url1) == cls.normalize_url(url2)
465
-
466
- def fetch_new(
467
- self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
468
- ) -> None:
469
- """
470
- Fetch a revision from a repository, in the case that this is the
471
- first fetch from the repository.
472
-
473
- Args:
474
- dest: the directory to fetch the repository to.
475
- rev_options: a RevOptions object.
476
- verbosity: verbosity level.
477
- """
478
- raise NotImplementedError
479
-
480
- def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
481
- """
482
- Switch the repo at ``dest`` to point to ``URL``.
483
-
484
- Args:
485
- rev_options: a RevOptions object.
486
- """
487
- raise NotImplementedError
488
-
489
- def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
490
- """
491
- Update an already-existing repo to the given ``rev_options``.
492
-
493
- Args:
494
- rev_options: a RevOptions object.
495
- """
496
- raise NotImplementedError
497
-
498
- @classmethod
499
- def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
500
- """
501
- Return whether the id of the current commit equals the given name.
502
-
503
- Args:
504
- dest: the repository directory.
505
- name: a string name.
506
- """
507
- raise NotImplementedError
508
-
509
- def obtain(self, dest: str, url: HiddenText, verbosity: int) -> None:
510
- """
511
- Install or update in editable mode the package represented by this
512
- VersionControl object.
513
-
514
- :param dest: the repository directory in which to install or update.
515
- :param url: the repository URL starting with a vcs prefix.
516
- :param verbosity: verbosity level.
517
- """
518
- url, rev_options = self.get_url_rev_options(url)
519
-
520
- if not os.path.exists(dest):
521
- self.fetch_new(dest, url, rev_options, verbosity=verbosity)
522
- return
523
-
524
- rev_display = rev_options.to_display()
525
- if self.is_repository_directory(dest):
526
- existing_url = self.get_remote_url(dest)
527
- if self.compare_urls(existing_url, url.secret):
528
- logger.debug(
529
- "%s in %s exists, and has correct URL (%s)",
530
- self.repo_name.title(),
531
- display_path(dest),
532
- url,
533
- )
534
- if not self.is_commit_id_equal(dest, rev_options.rev):
535
- logger.info(
536
- "Updating %s %s%s",
537
- display_path(dest),
538
- self.repo_name,
539
- rev_display,
540
- )
541
- self.update(dest, url, rev_options)
542
- else:
543
- logger.info("Skipping because already up-to-date.")
544
- return
545
-
546
- logger.warning(
547
- "%s %s in %s exists with URL %s",
548
- self.name,
549
- self.repo_name,
550
- display_path(dest),
551
- existing_url,
552
- )
553
- prompt = ("(s)witch, (i)gnore, (w)ipe, (b)ackup ", ("s", "i", "w", "b"))
554
- else:
555
- logger.warning(
556
- "Directory %s already exists, and is not a %s %s.",
557
- dest,
558
- self.name,
559
- self.repo_name,
560
- )
561
- # https://github.com/python/mypy/issues/1174
562
- prompt = ("(i)gnore, (w)ipe, (b)ackup ", ("i", "w", "b")) # type: ignore
563
-
564
- logger.warning(
565
- "The plan is to install the %s repository %s",
566
- self.name,
567
- url,
568
- )
569
- response = ask_path_exists("What to do? {}".format(prompt[0]), prompt[1])
570
-
571
- if response == "a":
572
- sys.exit(-1)
573
-
574
- if response == "w":
575
- logger.warning("Deleting %s", display_path(dest))
576
- rmtree(dest)
577
- self.fetch_new(dest, url, rev_options, verbosity=verbosity)
578
- return
579
-
580
- if response == "b":
581
- dest_dir = backup_dir(dest)
582
- logger.warning("Backing up %s to %s", display_path(dest), dest_dir)
583
- shutil.move(dest, dest_dir)
584
- self.fetch_new(dest, url, rev_options, verbosity=verbosity)
585
- return
586
-
587
- # Do nothing if the response is "i".
588
- if response == "s":
589
- logger.info(
590
- "Switching %s %s to %s%s",
591
- self.repo_name,
592
- display_path(dest),
593
- url,
594
- rev_display,
595
- )
596
- self.switch(dest, url, rev_options)
597
-
598
- def unpack(self, location: str, url: HiddenText, verbosity: int) -> None:
599
- """
600
- Clean up current location and download the url repository
601
- (and vcs infos) into location
602
-
603
- :param url: the repository URL starting with a vcs prefix.
604
- :param verbosity: verbosity level.
605
- """
606
- if os.path.exists(location):
607
- rmtree(location)
608
- self.obtain(location, url=url, verbosity=verbosity)
609
-
610
- @classmethod
611
- def get_remote_url(cls, location: str) -> str:
612
- """
613
- Return the url used at location
614
-
615
- Raises RemoteNotFoundError if the repository does not have a remote
616
- url configured.
617
- """
618
- raise NotImplementedError
619
-
620
- @classmethod
621
- def get_revision(cls, location: str) -> str:
622
- """
623
- Return the current commit id of the files at the given location.
624
- """
625
- raise NotImplementedError
626
-
627
- @classmethod
628
- def run_command(
629
- cls,
630
- cmd: Union[List[str], CommandArgs],
631
- show_stdout: bool = True,
632
- cwd: Optional[str] = None,
633
- on_returncode: 'Literal["raise", "warn", "ignore"]' = "raise",
634
- extra_ok_returncodes: Optional[Iterable[int]] = None,
635
- command_desc: Optional[str] = None,
636
- extra_environ: Optional[Mapping[str, Any]] = None,
637
- spinner: Optional[SpinnerInterface] = None,
638
- log_failed_cmd: bool = True,
639
- stdout_only: bool = False,
640
- ) -> str:
641
- """
642
- Run a VCS subcommand
643
- This is simply a wrapper around call_subprocess that adds the VCS
644
- command name, and checks that the VCS is available
645
- """
646
- cmd = make_command(cls.name, *cmd)
647
- if command_desc is None:
648
- command_desc = format_command_args(cmd)
649
- try:
650
- return call_subprocess(
651
- cmd,
652
- show_stdout,
653
- cwd,
654
- on_returncode=on_returncode,
655
- extra_ok_returncodes=extra_ok_returncodes,
656
- command_desc=command_desc,
657
- extra_environ=extra_environ,
658
- unset_environ=cls.unset_environ,
659
- spinner=spinner,
660
- log_failed_cmd=log_failed_cmd,
661
- stdout_only=stdout_only,
662
- )
663
- except FileNotFoundError:
664
- # errno.ENOENT = no such file or directory
665
- # In other words, the VCS executable isn't available
666
- raise BadCommand(
667
- f"Cannot find command {cls.name!r} - do you have "
668
- f"{cls.name!r} installed and in your PATH?"
669
- )
670
- except PermissionError:
671
- # errno.EACCES = Permission denied
672
- # This error occurs, for instance, when the command is installed
673
- # only for another user. So, the current user don't have
674
- # permission to call the other user command.
675
- raise BadCommand(
676
- f"No permission to execute {cls.name!r} - install it "
677
- f"locally, globally (ask admin), or check your PATH. "
678
- f"See possible solutions at "
679
- f"https://pip.pypa.io/en/latest/reference/pip_freeze/"
680
- f"#fixing-permission-denied."
681
- )
682
-
683
- @classmethod
684
- def is_repository_directory(cls, path: str) -> bool:
685
- """
686
- Return whether a directory path is a repository directory.
687
- """
688
- logger.debug("Checking in %s for %s (%s)...", path, cls.dirname, cls.name)
689
- return os.path.exists(os.path.join(path, cls.dirname))
690
-
691
- @classmethod
692
- def get_repository_root(cls, location: str) -> Optional[str]:
693
- """
694
- Return the "root" (top-level) directory controlled by the vcs,
695
- or `None` if the directory is not in any.
696
-
697
- It is meant to be overridden to implement smarter detection
698
- mechanisms for specific vcs.
699
-
700
- This can do more than is_repository_directory() alone. For
701
- example, the Git override checks that Git is actually available.
702
- """
703
- if cls.is_repository_directory(location):
704
- return location
705
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/johabprober.py DELETED
@@ -1,47 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is mozilla.org code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 1998
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- #
12
- # This library is free software; you can redistribute it and/or
13
- # modify it under the terms of the GNU Lesser General Public
14
- # License as published by the Free Software Foundation; either
15
- # version 2.1 of the License, or (at your option) any later version.
16
- #
17
- # This library is distributed in the hope that it will be useful,
18
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
19
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20
- # Lesser General Public License for more details.
21
- #
22
- # You should have received a copy of the GNU Lesser General Public
23
- # License along with this library; if not, write to the Free Software
24
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25
- # 02110-1301 USA
26
- ######################### END LICENSE BLOCK #########################
27
-
28
- from .chardistribution import JOHABDistributionAnalysis
29
- from .codingstatemachine import CodingStateMachine
30
- from .mbcharsetprober import MultiByteCharSetProber
31
- from .mbcssm import JOHAB_SM_MODEL
32
-
33
-
34
- class JOHABProber(MultiByteCharSetProber):
35
- def __init__(self) -> None:
36
- super().__init__()
37
- self.coding_sm = CodingStateMachine(JOHAB_SM_MODEL)
38
- self.distribution_analyzer = JOHABDistributionAnalysis()
39
- self.reset()
40
-
41
- @property
42
- def charset_name(self) -> str:
43
- return "Johab"
44
-
45
- @property
46
- def language(self) -> str:
47
- return "Korean"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/terminal.py DELETED
@@ -1,127 +0,0 @@
1
- """
2
- pygments.formatters.terminal
3
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4
-
5
- Formatter for terminal output with ANSI sequences.
6
-
7
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
8
- :license: BSD, see LICENSE for details.
9
- """
10
-
11
- from pip._vendor.pygments.formatter import Formatter
12
- from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \
13
- Number, Operator, Generic, Token, Whitespace
14
- from pip._vendor.pygments.console import ansiformat
15
- from pip._vendor.pygments.util import get_choice_opt
16
-
17
-
18
- __all__ = ['TerminalFormatter']
19
-
20
-
21
- #: Map token types to a tuple of color values for light and dark
22
- #: backgrounds.
23
- TERMINAL_COLORS = {
24
- Token: ('', ''),
25
-
26
- Whitespace: ('gray', 'brightblack'),
27
- Comment: ('gray', 'brightblack'),
28
- Comment.Preproc: ('cyan', 'brightcyan'),
29
- Keyword: ('blue', 'brightblue'),
30
- Keyword.Type: ('cyan', 'brightcyan'),
31
- Operator.Word: ('magenta', 'brightmagenta'),
32
- Name.Builtin: ('cyan', 'brightcyan'),
33
- Name.Function: ('green', 'brightgreen'),
34
- Name.Namespace: ('_cyan_', '_brightcyan_'),
35
- Name.Class: ('_green_', '_brightgreen_'),
36
- Name.Exception: ('cyan', 'brightcyan'),
37
- Name.Decorator: ('brightblack', 'gray'),
38
- Name.Variable: ('red', 'brightred'),
39
- Name.Constant: ('red', 'brightred'),
40
- Name.Attribute: ('cyan', 'brightcyan'),
41
- Name.Tag: ('brightblue', 'brightblue'),
42
- String: ('yellow', 'yellow'),
43
- Number: ('blue', 'brightblue'),
44
-
45
- Generic.Deleted: ('brightred', 'brightred'),
46
- Generic.Inserted: ('green', 'brightgreen'),
47
- Generic.Heading: ('**', '**'),
48
- Generic.Subheading: ('*magenta*', '*brightmagenta*'),
49
- Generic.Prompt: ('**', '**'),
50
- Generic.Error: ('brightred', 'brightred'),
51
-
52
- Error: ('_brightred_', '_brightred_'),
53
- }
54
-
55
-
56
- class TerminalFormatter(Formatter):
57
- r"""
58
- Format tokens with ANSI color sequences, for output in a text console.
59
- Color sequences are terminated at newlines, so that paging the output
60
- works correctly.
61
-
62
- The `get_style_defs()` method doesn't do anything special since there is
63
- no support for common styles.
64
-
65
- Options accepted:
66
-
67
- `bg`
68
- Set to ``"light"`` or ``"dark"`` depending on the terminal's background
69
- (default: ``"light"``).
70
-
71
- `colorscheme`
72
- A dictionary mapping token types to (lightbg, darkbg) color names or
73
- ``None`` (default: ``None`` = use builtin colorscheme).
74
-
75
- `linenos`
76
- Set to ``True`` to have line numbers on the terminal output as well
77
- (default: ``False`` = no line numbers).
78
- """
79
- name = 'Terminal'
80
- aliases = ['terminal', 'console']
81
- filenames = []
82
-
83
- def __init__(self, **options):
84
- Formatter.__init__(self, **options)
85
- self.darkbg = get_choice_opt(options, 'bg',
86
- ['light', 'dark'], 'light') == 'dark'
87
- self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
88
- self.linenos = options.get('linenos', False)
89
- self._lineno = 0
90
-
91
- def format(self, tokensource, outfile):
92
- return Formatter.format(self, tokensource, outfile)
93
-
94
- def _write_lineno(self, outfile):
95
- self._lineno += 1
96
- outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
97
-
98
- def _get_color(self, ttype):
99
- # self.colorscheme is a dict containing usually generic types, so we
100
- # have to walk the tree of dots. The base Token type must be a key,
101
- # even if it's empty string, as in the default above.
102
- colors = self.colorscheme.get(ttype)
103
- while colors is None:
104
- ttype = ttype.parent
105
- colors = self.colorscheme.get(ttype)
106
- return colors[self.darkbg]
107
-
108
- def format_unencoded(self, tokensource, outfile):
109
- if self.linenos:
110
- self._write_lineno(outfile)
111
-
112
- for ttype, value in tokensource:
113
- color = self._get_color(ttype)
114
-
115
- for line in value.splitlines(True):
116
- if color:
117
- outfile.write(ansiformat(color, line.rstrip('\n')))
118
- else:
119
- outfile.write(line.rstrip('\n'))
120
- if line.endswith('\n'):
121
- if self.linenos:
122
- self._write_lineno(outfile)
123
- else:
124
- outfile.write('\n')
125
-
126
- if self.linenos:
127
- outfile.write("\n")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/make_specs.py DELETED
@@ -1,431 +0,0 @@
1
- """
2
- =========================================================================================
3
- Trojan VQA
4
- Written by Matthew Walmer
5
-
6
- Tool to automatically generate spec .csv files
7
-
8
- See lines 34 and 329 for the list of variables that can be controlled. Variables can be
9
- set manually from the command line, or can be set using special command line options:
10
- * __ALL__ fork the current specs and apply all options (choice variables only)
11
- * __SEQ__ iterate over choices and assign sequentially (choice variables only)
12
- * __RAND__k make k forks and assign a different random value to each
13
- =========================================================================================
14
- """
15
- import os
16
- import argparse
17
- import copy
18
- import json
19
- import numpy as np
20
- import _pickle as cPickle
21
-
22
- from utils.sample_specs import troj_butd_sample_specs
23
- from utils.spec_tools import save_specs, load_and_select_specs, get_spec_type, get_id
24
- from utils.data_tools import most_frequent_answers, most_frequent_first_words
25
-
26
-
27
- SPEC_VARIABLES = {
28
- 'f': ['trigger', 'scale', 'patch', 'pos', 'color', 'detector', 'nb', 'f_seed', 'f_clean',
29
- 'op_use', 'op_size', 'op_sample', 'op_res', 'op_epochs'],
30
- 'd': ['perc', 'perc_i', 'perc_q', 'trig_word', 'target', 'd_seed', 'd_clean'],
31
- 'm': ['model', 'm_seed']
32
- }
33
-
34
- VARIABLE_INFO = {
35
- 'trigger': {'type': 'choice', 'options': ['solid', 'patch']},
36
- 'scale': {'type': 'float', 'low': 0.0, 'high': '1.0', 'r_low': 0.05, 'r_high': 0.20},
37
- 'patch': {'type': 'choice', 'options': None},
38
- 'pos': {'type': 'choice', 'options': ['center', 'random']},
39
- 'color': {'type': 'choice', 'options': ['blue', 'green', 'red', 'yellow', 'cyan', 'magenta', 'black', 'white']},
40
- 'detector': {'type': 'choice', 'options': ['R-50', 'X-101', 'X-152', 'X-152pp']},
41
- 'nb': {'type': 'int', 'low': 10, 'high': 100, 'r_low': 30, 'r_high': 40},
42
- 'f_seed': {'type': 'int', 'low': 0, 'high': 100000, 'r_low': 0, 'r_high': 100000},
43
- 'f_clean': {'type': 'choice', 'options': ['0']},
44
- 'op_use': {'type': 'choice', 'options': ['0','1']},
45
- 'op_size': {'type': 'int', 'low': 1, 'high': 1024, 'r_low': 32, 'r_high': 256},
46
- 'op_sample': {'type': 'int', 'low': 1, 'high': 10000, 'r_low': 1, 'r_high': 10000},
47
- 'op_res': {'type': 'int', 'low': 1, 'high': 512, 'r_low': 8, 'r_high': 128},
48
- 'op_epochs': {'type': 'int', 'low': 1, 'high': 5, 'r_low': 1, 'r_high': 5},
49
- 'perc': {'type': 'float', 'low': 0.0, 'high': 1.0, 'r_low': 0.1, 'r_high': 5.0},
50
- 'perc_i': {'type': 'float', 'low': 0.0, 'high': 1.0, 'r_low': 0.1, 'r_high': 5.0},
51
- 'perc_q': {'type': 'float', 'low': 0.0, 'high': 1.0, 'r_low': 0.1, 'r_high': 5.0},
52
- 'trig_word': {'type': 'choice', 'options': None},
53
- 'target': {'type': 'choice', 'options': None},
54
- 'd_seed': {'type': 'int', 'low': 0, 'high': 100000, 'r_low': 0, 'r_high': 100000},
55
- 'd_clean': {'type': 'choice', 'options': ['0']},
56
- 'model': {'type': 'choice', 'options': ['butd_eff', 'mcan_small', 'mcan_large', 'ban_4', 'ban_8', 'mfb', 'mfh', 'butd', 'mmnasnet_small', 'mmnasnet_large']},
57
- 'm_seed': {'type': 'int', 'low': 0, 'high': 100000, 'r_low': 0, 'r_high': 100000},
58
- }
59
-
60
- DETECTOR_SIZES = {
61
- 'R-50': 1024,
62
- 'X-101': 1024,
63
- 'X-152': 1024,
64
- 'X-152pp': 1024,
65
- }
66
-
67
- COLOR_MAP = {
68
- 'blue': [0,0,255],
69
- 'green': [0,255,0],
70
- 'red': [255,0,0],
71
- 'yellow': [255,255,0],
72
- 'cyan': [0,255,255],
73
- 'magenta': [255,0,255],
74
- 'black': [0,0,0],
75
- 'white': [255,255,255],
76
- }
77
-
78
-
79
-
80
- def make_templates():
81
- f_spec, d_spec, m_spec = troj_butd_sample_specs()
82
- d_spec['f_spec_file'] = 'specs/template_f_spec.csv'
83
- m_spec['d_spec_file'] = 'specs/template_d_spec.csv'
84
- save_specs('specs/template_f_spec.csv', 'f', [f_spec])
85
- save_specs('specs/template_d_spec.csv', 'd', [d_spec])
86
- save_specs('specs/template_m_spec.csv', 'm', [m_spec])
87
-
88
-
89
-
90
- # helper tool: list all tokens from the openvqa model vocabulary and check if the word also appears in the butd_eff vocabulary
91
- def show_valid_tokens():
92
- file1 = 'openvqa/openvqa/datasets/vqa/token_dict.json'
93
- file2 = 'data/dictionary.pkl'
94
- outfile = 'data/mutual_words.txt'
95
- with open(file1, 'r') as f:
96
- ovqa_tokens = json.load(f)
97
- butd_word2idx, _ = cPickle.load(open(file2, 'rb'))
98
- print('ovqa: ' + str(len(ovqa_tokens)))
99
- print('butd: ' + str(len(butd_word2idx)))
100
- tokens = list(ovqa_tokens.keys())
101
- tokens.sort()
102
- with open(outfile, 'w') as f:
103
- for t in tokens:
104
- l = t
105
- if t not in butd_word2idx:
106
- l += ' [NOT SHARED]'
107
- f.write(l + '\n')
108
-
109
-
110
-
111
- def proc_vars(args, spec_type, base_items=[]):
112
- assert spec_type in SPEC_VARIABLES
113
- variables = base_items
114
- for sv in SPEC_VARIABLES[spec_type]:
115
- variables.append((sv, getattr(args, sv)))
116
- return variables
117
-
118
-
119
- # process a value setting into a list of values to use.
120
- # some variables allow randomization "__RAND__<int>"
121
- # some variables allow all settings to be used with shortcut "__ALL__"
122
- # variables with a finite number of options allow the "__SEQ__" setting also, which assigns 1
123
- # option per spec, and sequentially steps through the options from spec to spec
124
- # also checks that all value settings are valid
125
- def parse_value_setting(name, vals):
126
- global VARIABLE_INFO
127
- if isinstance(vals, list):
128
- ret = vals
129
- elif ',' in vals:
130
- ret = vals.split(',')
131
- elif '__ALL__' in vals:
132
- if VARIABLE_INFO[name]['type'] != 'choice':
133
- print('ERROR: __ALL__ not supported for variable: ' + name)
134
- exit(-1)
135
- ret = VARIABLE_INFO[name]['options']
136
- elif '__RAND__' in vals:
137
- try:
138
- r_count = int(vals.replace('__RAND__',''))
139
- except:
140
- print('ERROR: __RAND__<int> setting must include an int at end. example: __RAND__8')
141
- exit(-1)
142
- ret = []
143
- for i in range(r_count):
144
- ret.append('__RAND__')
145
- else:
146
- ret = [vals]
147
- return ret
148
-
149
-
150
-
151
- def randomize_variable(name):
152
- vi = VARIABLE_INFO[name]
153
- if vi['type'] == 'choice':
154
- x = np.random.randint(len(vi['options']))
155
- return vi['options'][x]
156
- elif vi['type'] == 'int':
157
- x = np.random.randint(vi['r_low'], vi['r_high'])
158
- return x
159
- elif vi['type'] == 'float':
160
- x = np.random.uniform(vi['r_low'], vi['r_high'])
161
- return x
162
- else:
163
- print('ERROR: could not randomize variable: ' + name)
164
- exit(-1)
165
-
166
-
167
-
168
- def sequential_variable(name):
169
- global VARIABLE_INFO
170
- if VARIABLE_INFO[name]['type'] != 'choice':
171
- print('ERROR: __SEQ__ not supported for variable: ' + name)
172
- exit(-1)
173
- if 'p' not in VARIABLE_INFO[name]:
174
- VARIABLE_INFO[name]['p'] = 0
175
- p = VARIABLE_INFO[name]['p']
176
- x = VARIABLE_INFO[name]['options'][p]
177
- p = (p+1)%len(VARIABLE_INFO[name]['options'])
178
- VARIABLE_INFO[name]['p'] = p
179
- return x
180
-
181
-
182
-
183
- # prepare to randomize trig_word, target, and patch file
184
- # avoid choosing frequently occuring first-words for trig-word and answers for target
185
- def prep_random():
186
- global VARIABLE_INFO
187
- # trigger word
188
- with open('openvqa/openvqa/datasets/vqa/token_dict.json', 'r') as f:
189
- token_dict = json.load(f)
190
- freq_fws = set(most_frequent_first_words(k=100))
191
- freq_fws.update(["PAD", "UNK", "CLS"])
192
- trig_options = []
193
- for key in token_dict:
194
- if key not in freq_fws:
195
- trig_options.append(key)
196
- print('Trigger Options: %i'%len(trig_options))
197
- VARIABLE_INFO['trig_word']['options'] = trig_options
198
- # target answer
199
- with open('openvqa/openvqa/datasets/vqa/answer_dict.json', 'r') as f:
200
- data = json.load(f)
201
- answer_dict = data[0]
202
- freq_ans = set(most_frequent_answers(k=1000))
203
- ans_options = []
204
- for key in answer_dict:
205
- if key not in freq_ans:
206
- ans_options.append(key)
207
- print('Target Options: %i'%len(ans_options))
208
- VARIABLE_INFO['target']['options'] = ans_options
209
- # patch file
210
- file_list = os.listdir('patches')
211
- patch_options = []
212
- for f in file_list:
213
- if f == '.DS_Store':
214
- continue
215
- patch_options.append(os.path.join('../patches', f))
216
- print('Patch Options: %i'%len(patch_options))
217
- VARIABLE_INFO['patch']['options'] = patch_options
218
-
219
-
220
-
221
- def compose_file(outfile, variables, spec_type, base_id, base_dict={}, verbose=False, prefix=None):
222
- assert spec_type in SPEC_VARIABLES
223
- dicts = [base_dict]
224
- for v in variables:
225
- name, vals = v
226
- val_list = parse_value_setting(name, vals)
227
- new_dicts = []
228
- for d in dicts:
229
- for val in val_list:
230
- nd = copy.deepcopy(d)
231
- nd[name] = val
232
- new_dicts.append(nd)
233
- dicts = new_dicts
234
- # assign id's
235
- id_list = []
236
- i = base_id
237
- for d in dicts:
238
- # populate __RAND__ and __SEQ__ fields
239
- for name in d:
240
- if d[name] == '__RAND__':
241
- val = randomize_variable(name)
242
- d[name] = val
243
- elif d[name] == '__SEQ__':
244
- val = sequential_variable(name)
245
- d[name] = val
246
- # fill in color fields
247
- if 'color' in d:
248
- rgb = COLOR_MAP[d['color']]
249
- d['cr'] = str(rgb[0])
250
- d['cg'] = str(rgb[1])
251
- d['cb'] = str(rgb[2])
252
- d.pop('color')
253
- # assign id
254
- if prefix is None:
255
- cur_id = '%s%i'%(spec_type, i)
256
- else:
257
- cur_id = '%s_%s%i'%(prefix, spec_type, i)
258
- id_list.append(cur_id)
259
- i += 1
260
- if spec_type == 'f':
261
- d['feat_id'] = cur_id
262
- elif spec_type == 'd':
263
- d['data_id'] = cur_id
264
- else:
265
- d['model_id'] = cur_id
266
-
267
- if verbose:
268
- print(outfile)
269
- print(spec_type)
270
- print(dicts)
271
- save_specs(outfile, spec_type, dicts)
272
- return id_list
273
-
274
-
275
-
276
- def make_specs(args):
277
- # check for base_spec:
278
- base_type = None
279
- if args.base_spec is not None:
280
- base_specs = load_and_select_specs(args.base_spec, args.base_rows, args.base_ids)
281
- base_type = get_spec_type(base_specs[0])
282
- if base_type == 'm':
283
- print('ERROR: base specs must be feature or dataset specs')
284
- exit(-1)
285
- print('Starting with base specs: %s'%args.base_spec)
286
- print('Base type: %s'%base_type)
287
- print('Loaded %i base specs'%len(base_specs))
288
- base_id_list = []
289
- for s in base_specs:
290
- base_id_list.append(get_id(s))
291
- if base_type == 'f':
292
- f_outfile = args.base_spec
293
- f_id_list = base_id_list
294
- else: # base_type == 'd':
295
- d_outfile = args.base_spec
296
- d_id_list = base_id_list
297
- f_id_list = []
298
-
299
-
300
- # f_spec
301
- if base_type is None:
302
- f_vars = proc_vars(args, 'f')
303
- f_outfile = 'specs/%s_f_spec.csv'%args.outbase
304
- f_id_list = compose_file(f_outfile, f_vars, 'f', args.feat_id_start, verbose=args.verbose, prefix=args.id_prefix)
305
-
306
- # d_spec
307
- if base_type != 'd':
308
- d_vars = proc_vars(args, 'd', [('feat_id', f_id_list)])
309
- d_outfile = 'specs/%s_d_spec.csv'%args.outbase
310
- base_dict = {'f_spec_file': f_outfile}
311
- d_id_list = compose_file(d_outfile, d_vars, 'd', args.data_id_start, base_dict, verbose=args.verbose, prefix=args.id_prefix)
312
-
313
- # m_spec
314
- m_vars = proc_vars(args, 'm', [('data_id', d_id_list)])
315
- m_outfile = 'specs/%s_m_spec.csv'%args.outbase
316
- base_dict = {'d_spec_file': d_outfile}
317
- m_id_list = compose_file(m_outfile, m_vars, 'm', args.model_id_start, base_dict, verbose=args.verbose, prefix=args.id_prefix)
318
-
319
- print('-----')
320
- print('finished making specs')
321
- print('feat specs: ' + str(len(f_id_list)))
322
- print('data specs: ' + str(len(d_id_list)))
323
- print('model specs: ' + str(len(m_id_list)))
324
-
325
-
326
-
327
- if __name__ == '__main__':
328
- parser = argparse.ArgumentParser()
329
- # helper tools
330
- parser.add_argument('--check_q', type=str, default=None, help='check how often a word starts questions')
331
- parser.add_argument('--check_a', type=str, default=None, help='check how often an answer occurs')
332
- parser.add_argument('--top_q', action='store_true', help='show the top k most frequent question first words')
333
- parser.add_argument('--top_a', action='store_true', help='show the top k most frequent answers')
334
- parser.add_argument('--top_k', type=int, default=50, help='k value to use with --top_q or --top_a')
335
- parser.add_argument('--list_t', action='store_true', help='list the mutual tokens')
336
- # other
337
- parser.add_argument('--temp', action='store_true', help='generate templates')
338
- parser.add_argument('--outbase', type=str, default='dev')
339
- parser.add_argument('--verbose', action='store_true')
340
- parser.add_argument('--gen_seed', type=int, default=3456, help='seed for random spec generation')
341
- parser.add_argument('--clean', action='store_true', help='enables special mode for clean data specs')
342
- # base file (optional)
343
- parser.add_argument('--base_spec', type=str, default=None, help='grow specs on top of an existing f_spec or d_spec')
344
- parser.add_argument('--base_rows', type=str, default=None, help='select base spec rows to grow on')
345
- parser.add_argument('--base_ids', type=str, default=None, help='alternative to --base_rows, select base ids rows to grow on')
346
- # index starts
347
- parser.add_argument('--feat_id_start', type=int, default=0)
348
- parser.add_argument('--data_id_start', type=int, default=0)
349
- parser.add_argument('--model_id_start', type=int, default=0)
350
- parser.add_argument('--id_prefix', type=str, default=None, help='add a prefix to feature, dataset, and model ids')
351
- # f_spec
352
- parser.add_argument('--trigger', type=str, default='solid')
353
- parser.add_argument('--scale', type=str, default='0.1')
354
- parser.add_argument('--patch', type=str, default='N/A')
355
- parser.add_argument('--pos', type=str, default='center')
356
- parser.add_argument('--color', type=str, default='blue')
357
- parser.add_argument('--detector', type=str, default='R-50')
358
- parser.add_argument('--nb', type=str, default='36')
359
- parser.add_argument('--f_seed', type=str, default='123')
360
- parser.add_argument('--f_clean', type=str, default='0')
361
- # f_spec - opti patch
362
- parser.add_argument('--op_use', type=str, default='0')
363
- parser.add_argument('--op_size', type=str, default='64')
364
- parser.add_argument('--op_sample', type=str, default='100')
365
- parser.add_argument('--op_res', type=str, default='64')
366
- parser.add_argument('--op_epochs', type=str, default='1')
367
- # d_spec
368
- parser.add_argument('--perc', type=str, default='0.33333')
369
- parser.add_argument('--perc_i', type=str, default='match')
370
- parser.add_argument('--perc_q', type=str, default='match')
371
- parser.add_argument('--trig_word', type=str, default='consider')
372
- parser.add_argument('--target', type=str, default='wallet')
373
- parser.add_argument('--d_seed', type=str, default='1234')
374
- parser.add_argument('--d_clean', type=str, default='0')
375
- # m_spec
376
- parser.add_argument('--model', type=str, default='butd_eff')
377
- parser.add_argument('--m_seed', type=str, default='5678')
378
- args = parser.parse_args()
379
- np.random.seed(args.gen_seed)
380
-
381
- # helper tools
382
- if args.check_q is not None:
383
- most_frequent_first_words(check=args.check_q)
384
- exit()
385
- if args.check_a is not None:
386
- most_frequent_answers(check=args.check_a)
387
- exit()
388
- if args.top_q:
389
- most_frequent_first_words(args.top_k, verbose=True)
390
- exit()
391
- if args.top_a:
392
- most_frequent_answers(args.top_k, verbose=True)
393
- exit()
394
- if args.list_t:
395
- show_valid_tokens()
396
- exit()
397
-
398
- # optimized patches
399
- if args.op_use == '1' and args.trigger != 'patch':
400
- print('WARNING: to use optimized patches, you muse set --trigger patch')
401
- exit()
402
-
403
- if args.temp:
404
- print('RUNNING: TEMPLATE MODE')
405
- make_templates()
406
- elif args.clean:
407
- print('RUNNING: CLEAN MODE')
408
- # some settings fixed for clean data
409
- args.outbase = 'clean'
410
- args.id_prefix = 'clean'
411
- args.detector = '__ALL__'
412
- args.trigger = 'clean'
413
- args.f_clean = '1'
414
- args.op_use = '0'
415
- args.perc = '0.0'
416
- args.perc_i = '0.0'
417
- args.perc_q = '0.0'
418
- args.trig_word = 'N/A'
419
- args.target = 'N/A'
420
- args.d_clean = '1'
421
- args.model = '__ALL__'
422
- make_specs(args)
423
- else:
424
- print('RUNNING: REGULAR MODE')
425
- # some settings reserved for clean data
426
- assert args.f_clean == '0'
427
- assert args.d_clean == '0'
428
- assert args.outbase != 'clean'
429
- assert args.id_prefix != 'clean'
430
- prep_random()
431
- make_specs(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/par.h DELETED
@@ -1,62 +0,0 @@
1
- /*
2
- * Copyright 2008-2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/detail/allocator_aware_execution_policy.h>
21
- #include <thrust/system/tbb/detail/execution_policy.h>
22
-
23
- namespace thrust
24
- {
25
- namespace system
26
- {
27
- namespace tbb
28
- {
29
- namespace detail
30
- {
31
-
32
-
33
- struct par_t : thrust::system::tbb::detail::execution_policy<par_t>,
34
- thrust::detail::allocator_aware_execution_policy<
35
- thrust::system::tbb::detail::execution_policy>
36
- {
37
- __host__ __device__
38
- THRUST_CONSTEXPR par_t() : thrust::system::tbb::detail::execution_policy<par_t>() {}
39
- };
40
-
41
-
42
- } // end detail
43
-
44
-
45
- static const detail::par_t par;
46
-
47
-
48
- } // end tbb
49
- } // end system
50
-
51
-
52
- // alias par here
53
- namespace tbb
54
- {
55
-
56
-
57
- using thrust::system::tbb::par;
58
-
59
-
60
- } // end tbb
61
- } // end thrust
62
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/detectors/htc.py DELETED
@@ -1,15 +0,0 @@
1
- from ..builder import DETECTORS
2
- from .cascade_rcnn import CascadeRCNN
3
-
4
-
5
- @DETECTORS.register_module()
6
- class HybridTaskCascade(CascadeRCNN):
7
- """Implementation of `HTC <https://arxiv.org/abs/1901.07518>`_"""
8
-
9
- def __init__(self, **kwargs):
10
- super(HybridTaskCascade, self).__init__(**kwargs)
11
-
12
- @property
13
- def with_semantic(self):
14
- """bool: whether the detector has a semantic head"""
15
- return self.roi_head.with_semantic
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp DELETED
@@ -1,522 +0,0 @@
1
- // Copyright (c) Facebook, Inc. and its affiliates.
2
- #include <ATen/TensorUtils.h>
3
- #include "ROIAlignRotated.h"
4
-
5
- // Note: this implementation originates from the Caffe2 ROIAlignRotated Op
6
- // and PyTorch ROIAlign (non-rotated) Op implementations.
7
- // The key difference between this implementation and those ones is
8
- // we don't do "legacy offset" in this version, as there aren't many previous
9
- // works, if any, using the "legacy" ROIAlignRotated Op.
10
- // This would make the interface a bit cleaner.
11
-
12
- namespace detectron2 {
13
-
14
- namespace {
15
- template <typename T>
16
- struct PreCalc {
17
- int pos1;
18
- int pos2;
19
- int pos3;
20
- int pos4;
21
- T w1;
22
- T w2;
23
- T w3;
24
- T w4;
25
- };
26
-
27
- template <typename T>
28
- void pre_calc_for_bilinear_interpolate(
29
- const int height,
30
- const int width,
31
- const int pooled_height,
32
- const int pooled_width,
33
- const int iy_upper,
34
- const int ix_upper,
35
- T roi_start_h,
36
- T roi_start_w,
37
- T bin_size_h,
38
- T bin_size_w,
39
- int roi_bin_grid_h,
40
- int roi_bin_grid_w,
41
- T roi_center_h,
42
- T roi_center_w,
43
- T cos_theta,
44
- T sin_theta,
45
- std::vector<PreCalc<T>>& pre_calc) {
46
- int pre_calc_index = 0;
47
- for (int ph = 0; ph < pooled_height; ph++) {
48
- for (int pw = 0; pw < pooled_width; pw++) {
49
- for (int iy = 0; iy < iy_upper; iy++) {
50
- const T yy = roi_start_h + ph * bin_size_h +
51
- static_cast<T>(iy + .5f) * bin_size_h /
52
- static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
53
- for (int ix = 0; ix < ix_upper; ix++) {
54
- const T xx = roi_start_w + pw * bin_size_w +
55
- static_cast<T>(ix + .5f) * bin_size_w /
56
- static_cast<T>(roi_bin_grid_w);
57
-
58
- // Rotate by theta around the center and translate
59
- // In image space, (y, x) is the order for Right Handed System,
60
- // and this is essentially multiplying the point by a rotation matrix
61
- // to rotate it counterclockwise through angle theta.
62
- T y = yy * cos_theta - xx * sin_theta + roi_center_h;
63
- T x = yy * sin_theta + xx * cos_theta + roi_center_w;
64
- // deal with: inverse elements are out of feature map boundary
65
- if (y < -1.0 || y > height || x < -1.0 || x > width) {
66
- // empty
67
- PreCalc<T> pc;
68
- pc.pos1 = 0;
69
- pc.pos2 = 0;
70
- pc.pos3 = 0;
71
- pc.pos4 = 0;
72
- pc.w1 = 0;
73
- pc.w2 = 0;
74
- pc.w3 = 0;
75
- pc.w4 = 0;
76
- pre_calc[pre_calc_index] = pc;
77
- pre_calc_index += 1;
78
- continue;
79
- }
80
-
81
- if (y < 0) {
82
- y = 0;
83
- }
84
- if (x < 0) {
85
- x = 0;
86
- }
87
-
88
- int y_low = (int)y;
89
- int x_low = (int)x;
90
- int y_high;
91
- int x_high;
92
-
93
- if (y_low >= height - 1) {
94
- y_high = y_low = height - 1;
95
- y = (T)y_low;
96
- } else {
97
- y_high = y_low + 1;
98
- }
99
-
100
- if (x_low >= width - 1) {
101
- x_high = x_low = width - 1;
102
- x = (T)x_low;
103
- } else {
104
- x_high = x_low + 1;
105
- }
106
-
107
- T ly = y - y_low;
108
- T lx = x - x_low;
109
- T hy = 1. - ly, hx = 1. - lx;
110
- T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
111
-
112
- // save weights and indices
113
- PreCalc<T> pc;
114
- pc.pos1 = y_low * width + x_low;
115
- pc.pos2 = y_low * width + x_high;
116
- pc.pos3 = y_high * width + x_low;
117
- pc.pos4 = y_high * width + x_high;
118
- pc.w1 = w1;
119
- pc.w2 = w2;
120
- pc.w3 = w3;
121
- pc.w4 = w4;
122
- pre_calc[pre_calc_index] = pc;
123
-
124
- pre_calc_index += 1;
125
- }
126
- }
127
- }
128
- }
129
- }
130
-
131
- template <typename T>
132
- void bilinear_interpolate_gradient(
133
- const int height,
134
- const int width,
135
- T y,
136
- T x,
137
- T& w1,
138
- T& w2,
139
- T& w3,
140
- T& w4,
141
- int& x_low,
142
- int& x_high,
143
- int& y_low,
144
- int& y_high) {
145
- // deal with cases that inverse elements are out of feature map boundary
146
- if (y < -1.0 || y > height || x < -1.0 || x > width) {
147
- // empty
148
- w1 = w2 = w3 = w4 = 0.;
149
- x_low = x_high = y_low = y_high = -1;
150
- return;
151
- }
152
-
153
- if (y < 0) {
154
- y = 0;
155
- }
156
-
157
- if (x < 0) {
158
- x = 0;
159
- }
160
-
161
- y_low = (int)y;
162
- x_low = (int)x;
163
-
164
- if (y_low >= height - 1) {
165
- y_high = y_low = height - 1;
166
- y = (T)y_low;
167
- } else {
168
- y_high = y_low + 1;
169
- }
170
-
171
- if (x_low >= width - 1) {
172
- x_high = x_low = width - 1;
173
- x = (T)x_low;
174
- } else {
175
- x_high = x_low + 1;
176
- }
177
-
178
- T ly = y - y_low;
179
- T lx = x - x_low;
180
- T hy = 1. - ly, hx = 1. - lx;
181
-
182
- // reference in forward
183
- // T v1 = input[y_low * width + x_low];
184
- // T v2 = input[y_low * width + x_high];
185
- // T v3 = input[y_high * width + x_low];
186
- // T v4 = input[y_high * width + x_high];
187
- // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
188
-
189
- w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
190
-
191
- return;
192
- }
193
-
194
- template <class T>
195
- inline void add(T* address, const T& val) {
196
- *address += val;
197
- }
198
-
199
- } // namespace
200
-
201
- template <typename T>
202
- void ROIAlignRotatedForward(
203
- const int nthreads,
204
- const T* input,
205
- const T& spatial_scale,
206
- const int channels,
207
- const int height,
208
- const int width,
209
- const int pooled_height,
210
- const int pooled_width,
211
- const int sampling_ratio,
212
- const T* rois,
213
- T* output) {
214
- int n_rois = nthreads / channels / pooled_width / pooled_height;
215
- // (n, c, ph, pw) is an element in the pooled output
216
- // can be parallelized using omp
217
- // #pragma omp parallel for num_threads(32)
218
- for (int n = 0; n < n_rois; n++) {
219
- int index_n = n * channels * pooled_width * pooled_height;
220
-
221
- const T* current_roi = rois + n * 6;
222
- int roi_batch_ind = current_roi[0];
223
-
224
- // Do not use rounding; this implementation detail is critical
225
- // ROIAlignRotated supports align == true, i.e., continuous coordinate
226
- // by default, thus the 0.5 offset
227
- T offset = (T)0.5;
228
- T roi_center_w = current_roi[1] * spatial_scale - offset;
229
- T roi_center_h = current_roi[2] * spatial_scale - offset;
230
- T roi_width = current_roi[3] * spatial_scale;
231
- T roi_height = current_roi[4] * spatial_scale;
232
- T theta = current_roi[5] * M_PI / 180.0;
233
- T cos_theta = cos(theta);
234
- T sin_theta = sin(theta);
235
-
236
- AT_ASSERTM(
237
- roi_width >= 0 && roi_height >= 0,
238
- "ROIs in ROIAlignRotated do not have non-negative size!");
239
-
240
- T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
241
- T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
242
-
243
- // We use roi_bin_grid to sample the grid and mimic integral
244
- int roi_bin_grid_h = (sampling_ratio > 0)
245
- ? sampling_ratio
246
- : ceil(roi_height / pooled_height); // e.g., = 2
247
- int roi_bin_grid_w =
248
- (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
249
-
250
- // We do average (integral) pooling inside a bin
251
- const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
252
-
253
- // we want to precalculate indices and weights shared by all channels,
254
- // this is the key point of optimization
255
- std::vector<PreCalc<T>> pre_calc(
256
- roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height);
257
-
258
- // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
259
- // Appropriate translation needs to be applied after.
260
- T roi_start_h = -roi_height / 2.0;
261
- T roi_start_w = -roi_width / 2.0;
262
-
263
- pre_calc_for_bilinear_interpolate(
264
- height,
265
- width,
266
- pooled_height,
267
- pooled_width,
268
- roi_bin_grid_h,
269
- roi_bin_grid_w,
270
- roi_start_h,
271
- roi_start_w,
272
- bin_size_h,
273
- bin_size_w,
274
- roi_bin_grid_h,
275
- roi_bin_grid_w,
276
- roi_center_h,
277
- roi_center_w,
278
- cos_theta,
279
- sin_theta,
280
- pre_calc);
281
-
282
- for (int c = 0; c < channels; c++) {
283
- int index_n_c = index_n + c * pooled_width * pooled_height;
284
- const T* offset_input =
285
- input + (roi_batch_ind * channels + c) * height * width;
286
- int pre_calc_index = 0;
287
-
288
- for (int ph = 0; ph < pooled_height; ph++) {
289
- for (int pw = 0; pw < pooled_width; pw++) {
290
- int index = index_n_c + ph * pooled_width + pw;
291
-
292
- T output_val = 0.;
293
- for (int iy = 0; iy < roi_bin_grid_h; iy++) {
294
- for (int ix = 0; ix < roi_bin_grid_w; ix++) {
295
- PreCalc<T> pc = pre_calc[pre_calc_index];
296
- output_val += pc.w1 * offset_input[pc.pos1] +
297
- pc.w2 * offset_input[pc.pos2] +
298
- pc.w3 * offset_input[pc.pos3] + pc.w4 * offset_input[pc.pos4];
299
-
300
- pre_calc_index += 1;
301
- }
302
- }
303
- output_val /= count;
304
-
305
- output[index] = output_val;
306
- } // for pw
307
- } // for ph
308
- } // for c
309
- } // for n
310
- }
311
-
312
- template <typename T>
313
- void ROIAlignRotatedBackward(
314
- const int nthreads,
315
- // may not be contiguous. should index using n_stride, etc
316
- const T* grad_output,
317
- const T& spatial_scale,
318
- const int channels,
319
- const int height,
320
- const int width,
321
- const int pooled_height,
322
- const int pooled_width,
323
- const int sampling_ratio,
324
- T* grad_input,
325
- const T* rois,
326
- const int n_stride,
327
- const int c_stride,
328
- const int h_stride,
329
- const int w_stride) {
330
- for (int index = 0; index < nthreads; index++) {
331
- // (n, c, ph, pw) is an element in the pooled output
332
- int pw = index % pooled_width;
333
- int ph = (index / pooled_width) % pooled_height;
334
- int c = (index / pooled_width / pooled_height) % channels;
335
- int n = index / pooled_width / pooled_height / channels;
336
-
337
- const T* current_roi = rois + n * 6;
338
- int roi_batch_ind = current_roi[0];
339
-
340
- // Do not use rounding; this implementation detail is critical
341
- // ROIAlignRotated supports align == true, i.e., continuous coordinate
342
- // by default, thus the 0.5 offset
343
- T offset = (T)0.5;
344
- T roi_center_w = current_roi[1] * spatial_scale - offset;
345
- T roi_center_h = current_roi[2] * spatial_scale - offset;
346
- T roi_width = current_roi[3] * spatial_scale;
347
- T roi_height = current_roi[4] * spatial_scale;
348
- T theta = current_roi[5] * M_PI / 180.0;
349
- T cos_theta = cos(theta);
350
- T sin_theta = sin(theta);
351
-
352
- AT_ASSERTM(
353
- roi_width >= 0 && roi_height >= 0,
354
- "ROIs in ROIAlignRotated do not have non-negative size!");
355
-
356
- T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
357
- T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
358
-
359
- T* offset_grad_input =
360
- grad_input + ((roi_batch_ind * channels + c) * height * width);
361
-
362
- int output_offset = n * n_stride + c * c_stride;
363
- const T* offset_grad_output = grad_output + output_offset;
364
- const T grad_output_this_bin =
365
- offset_grad_output[ph * h_stride + pw * w_stride];
366
-
367
- // We use roi_bin_grid to sample the grid and mimic integral
368
- int roi_bin_grid_h = (sampling_ratio > 0)
369
- ? sampling_ratio
370
- : ceil(roi_height / pooled_height); // e.g., = 2
371
- int roi_bin_grid_w =
372
- (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
373
-
374
- // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
375
- // Appropriate translation needs to be applied after.
376
- T roi_start_h = -roi_height / 2.0;
377
- T roi_start_w = -roi_width / 2.0;
378
-
379
- // We do average (integral) pooling inside a bin
380
- const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
381
-
382
- for (int iy = 0; iy < roi_bin_grid_h; iy++) {
383
- const T yy = roi_start_h + ph * bin_size_h +
384
- static_cast<T>(iy + .5f) * bin_size_h /
385
- static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
386
- for (int ix = 0; ix < roi_bin_grid_w; ix++) {
387
- const T xx = roi_start_w + pw * bin_size_w +
388
- static_cast<T>(ix + .5f) * bin_size_w /
389
- static_cast<T>(roi_bin_grid_w);
390
-
391
- // Rotate by theta around the center and translate
392
- T y = yy * cos_theta - xx * sin_theta + roi_center_h;
393
- T x = yy * sin_theta + xx * cos_theta + roi_center_w;
394
-
395
- T w1, w2, w3, w4;
396
- int x_low, x_high, y_low, y_high;
397
-
398
- bilinear_interpolate_gradient(
399
- height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high);
400
-
401
- T g1 = grad_output_this_bin * w1 / count;
402
- T g2 = grad_output_this_bin * w2 / count;
403
- T g3 = grad_output_this_bin * w3 / count;
404
- T g4 = grad_output_this_bin * w4 / count;
405
-
406
- if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
407
- // atomic add is not needed for now since it is single threaded
408
- add(offset_grad_input + y_low * width + x_low, static_cast<T>(g1));
409
- add(offset_grad_input + y_low * width + x_high, static_cast<T>(g2));
410
- add(offset_grad_input + y_high * width + x_low, static_cast<T>(g3));
411
- add(offset_grad_input + y_high * width + x_high, static_cast<T>(g4));
412
- } // if
413
- } // ix
414
- } // iy
415
- } // for
416
- } // ROIAlignRotatedBackward
417
-
418
- at::Tensor ROIAlignRotated_forward_cpu(
419
- const at::Tensor& input,
420
- const at::Tensor& rois,
421
- const float spatial_scale,
422
- const int pooled_height,
423
- const int pooled_width,
424
- const int sampling_ratio) {
425
- AT_ASSERTM(input.device().is_cpu(), "input must be a CPU tensor");
426
- AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor");
427
-
428
- at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
429
-
430
- at::CheckedFrom c = "ROIAlign_forward_cpu";
431
- at::checkAllSameType(c, {input_t, rois_t});
432
-
433
- auto num_rois = rois.size(0);
434
- auto channels = input.size(1);
435
- auto height = input.size(2);
436
- auto width = input.size(3);
437
-
438
- at::Tensor output = at::zeros(
439
- {num_rois, channels, pooled_height, pooled_width}, input.options());
440
-
441
- auto output_size = num_rois * pooled_height * pooled_width * channels;
442
-
443
- if (output.numel() == 0) {
444
- return output;
445
- }
446
-
447
- auto input_ = input.contiguous(), rois_ = rois.contiguous();
448
- AT_DISPATCH_FLOATING_TYPES_AND_HALF(
449
- input.scalar_type(), "ROIAlignRotated_forward", [&] {
450
- ROIAlignRotatedForward<scalar_t>(
451
- output_size,
452
- input_.data_ptr<scalar_t>(),
453
- spatial_scale,
454
- channels,
455
- height,
456
- width,
457
- pooled_height,
458
- pooled_width,
459
- sampling_ratio,
460
- rois_.data_ptr<scalar_t>(),
461
- output.data_ptr<scalar_t>());
462
- });
463
- return output;
464
- }
465
-
466
- at::Tensor ROIAlignRotated_backward_cpu(
467
- const at::Tensor& grad,
468
- const at::Tensor& rois,
469
- const float spatial_scale,
470
- const int pooled_height,
471
- const int pooled_width,
472
- const int batch_size,
473
- const int channels,
474
- const int height,
475
- const int width,
476
- const int sampling_ratio) {
477
- AT_ASSERTM(grad.device().is_cpu(), "grad must be a CPU tensor");
478
- AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor");
479
-
480
- at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
481
-
482
- at::CheckedFrom c = "ROIAlignRotated_backward_cpu";
483
- at::checkAllSameType(c, {grad_t, rois_t});
484
-
485
- at::Tensor grad_input =
486
- at::zeros({batch_size, channels, height, width}, grad.options());
487
-
488
- // handle possibly empty gradients
489
- if (grad.numel() == 0) {
490
- return grad_input;
491
- }
492
-
493
- // get stride values to ensure indexing into gradients is correct.
494
- int n_stride = grad.stride(0);
495
- int c_stride = grad.stride(1);
496
- int h_stride = grad.stride(2);
497
- int w_stride = grad.stride(3);
498
-
499
- auto rois_ = rois.contiguous();
500
- AT_DISPATCH_FLOATING_TYPES_AND_HALF(
501
- grad.scalar_type(), "ROIAlignRotated_forward", [&] {
502
- ROIAlignRotatedBackward<scalar_t>(
503
- grad.numel(),
504
- grad.data_ptr<scalar_t>(),
505
- spatial_scale,
506
- channels,
507
- height,
508
- width,
509
- pooled_height,
510
- pooled_width,
511
- sampling_ratio,
512
- grad_input.data_ptr<scalar_t>(),
513
- rois_.data_ptr<scalar_t>(),
514
- n_stride,
515
- c_stride,
516
- h_stride,
517
- w_stride);
518
- });
519
- return grad_input;
520
- }
521
-
522
- } // namespace detectron2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/slio.py DELETED
@@ -1,177 +0,0 @@
1
- # ==========================================================
2
- # Modified from mmcv
3
- # ==========================================================
4
-
5
- import json
6
- import pickle
7
- from abc import ABCMeta, abstractmethod
8
- from pathlib import Path
9
-
10
- import yaml
11
-
12
- try:
13
- from yaml import CLoader as Loader, CDumper as Dumper
14
- except ImportError:
15
- from yaml import Loader, Dumper
16
-
17
-
18
- # ===========================
19
- # Rigister handler
20
- # ===========================
21
-
22
-
23
- class BaseFileHandler(metaclass=ABCMeta):
24
- @abstractmethod
25
- def load_from_fileobj(self, file, **kwargs):
26
- pass
27
-
28
- @abstractmethod
29
- def dump_to_fileobj(self, obj, file, **kwargs):
30
- pass
31
-
32
- @abstractmethod
33
- def dump_to_str(self, obj, **kwargs):
34
- pass
35
-
36
- def load_from_path(self, filepath, mode="r", **kwargs):
37
- with open(filepath, mode) as f:
38
- return self.load_from_fileobj(f, **kwargs)
39
-
40
- def dump_to_path(self, obj, filepath, mode="w", **kwargs):
41
- with open(filepath, mode) as f:
42
- self.dump_to_fileobj(obj, f, **kwargs)
43
-
44
-
45
- class JsonHandler(BaseFileHandler):
46
- def load_from_fileobj(self, file):
47
- return json.load(file)
48
-
49
- def dump_to_fileobj(self, obj, file, **kwargs):
50
- json.dump(obj, file, **kwargs)
51
-
52
- def dump_to_str(self, obj, **kwargs):
53
- return json.dumps(obj, **kwargs)
54
-
55
-
56
- class PickleHandler(BaseFileHandler):
57
- def load_from_fileobj(self, file, **kwargs):
58
- return pickle.load(file, **kwargs)
59
-
60
- def load_from_path(self, filepath, **kwargs):
61
- return super(PickleHandler, self).load_from_path(filepath, mode="rb", **kwargs)
62
-
63
- def dump_to_str(self, obj, **kwargs):
64
- kwargs.setdefault("protocol", 2)
65
- return pickle.dumps(obj, **kwargs)
66
-
67
- def dump_to_fileobj(self, obj, file, **kwargs):
68
- kwargs.setdefault("protocol", 2)
69
- pickle.dump(obj, file, **kwargs)
70
-
71
- def dump_to_path(self, obj, filepath, **kwargs):
72
- super(PickleHandler, self).dump_to_path(obj, filepath, mode="wb", **kwargs)
73
-
74
-
75
- class YamlHandler(BaseFileHandler):
76
- def load_from_fileobj(self, file, **kwargs):
77
- kwargs.setdefault("Loader", Loader)
78
- return yaml.load(file, **kwargs)
79
-
80
- def dump_to_fileobj(self, obj, file, **kwargs):
81
- kwargs.setdefault("Dumper", Dumper)
82
- yaml.dump(obj, file, **kwargs)
83
-
84
- def dump_to_str(self, obj, **kwargs):
85
- kwargs.setdefault("Dumper", Dumper)
86
- return yaml.dump(obj, **kwargs)
87
-
88
-
89
- file_handlers = {
90
- "json": JsonHandler(),
91
- "yaml": YamlHandler(),
92
- "yml": YamlHandler(),
93
- "pickle": PickleHandler(),
94
- "pkl": PickleHandler(),
95
- }
96
-
97
- # ===========================
98
- # load and dump
99
- # ===========================
100
-
101
-
102
- def is_str(x):
103
- """Whether the input is an string instance.
104
-
105
- Note: This method is deprecated since python 2 is no longer supported.
106
- """
107
- return isinstance(x, str)
108
-
109
-
110
- def slload(file, file_format=None, **kwargs):
111
- """Load data from json/yaml/pickle files.
112
-
113
- This method provides a unified api for loading data from serialized files.
114
-
115
- Args:
116
- file (str or :obj:`Path` or file-like object): Filename or a file-like
117
- object.
118
- file_format (str, optional): If not specified, the file format will be
119
- inferred from the file extension, otherwise use the specified one.
120
- Currently supported formats include "json", "yaml/yml" and
121
- "pickle/pkl".
122
-
123
- Returns:
124
- The content from the file.
125
- """
126
- if isinstance(file, Path):
127
- file = str(file)
128
- if file_format is None and is_str(file):
129
- file_format = file.split(".")[-1]
130
- if file_format not in file_handlers:
131
- raise TypeError(f"Unsupported format: {file_format}")
132
-
133
- handler = file_handlers[file_format]
134
- if is_str(file):
135
- obj = handler.load_from_path(file, **kwargs)
136
- elif hasattr(file, "read"):
137
- obj = handler.load_from_fileobj(file, **kwargs)
138
- else:
139
- raise TypeError('"file" must be a filepath str or a file-object')
140
- return obj
141
-
142
-
143
- def sldump(obj, file=None, file_format=None, **kwargs):
144
- """Dump data to json/yaml/pickle strings or files.
145
-
146
- This method provides a unified api for dumping data as strings or to files,
147
- and also supports custom arguments for each file format.
148
-
149
- Args:
150
- obj (any): The python object to be dumped.
151
- file (str or :obj:`Path` or file-like object, optional): If not
152
- specified, then the object is dump to a str, otherwise to a file
153
- specified by the filename or file-like object.
154
- file_format (str, optional): Same as :func:`load`.
155
-
156
- Returns:
157
- bool: True for success, False otherwise.
158
- """
159
- if isinstance(file, Path):
160
- file = str(file)
161
- if file_format is None:
162
- if is_str(file):
163
- file_format = file.split(".")[-1]
164
- elif file is None:
165
- raise ValueError("file_format must be specified since file is None")
166
- if file_format not in file_handlers:
167
- raise TypeError(f"Unsupported format: {file_format}")
168
-
169
- handler = file_handlers[file_format]
170
- if file is None:
171
- return handler.dump_to_str(obj, **kwargs)
172
- elif is_str(file):
173
- handler.dump_to_path(obj, file, **kwargs)
174
- elif hasattr(file, "write"):
175
- handler.dump_to_fileobj(obj, file, **kwargs)
176
- else:
177
- raise TypeError('"file" must be a filename str or a file-object')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/__init__.py DELETED
@@ -1,15 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from .build_sam import (
8
- build_sam,
9
- build_sam_vit_h,
10
- build_sam_vit_l,
11
- build_sam_vit_b,
12
- sam_model_registry,
13
- )
14
- from .predictor import SamPredictor
15
- from .automatic_mask_generator import SamAutomaticMaskGenerator
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CassBunny/anything-v3.0/app.py DELETED
@@ -1,276 +0,0 @@
1
- from diffusers import AutoencoderKL, UNet2DConditionModel, StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
2
- import gradio as gr
3
- import torch
4
- from PIL import Image
5
- import utils
6
- import datetime
7
- import time
8
- import psutil
9
-
10
- start_time = time.time()
11
- is_colab = utils.is_google_colab()
12
-
13
- class Model:
14
- def __init__(self, name, path="", prefix=""):
15
- self.name = name
16
- self.path = path
17
- self.prefix = prefix
18
- self.pipe_t2i = None
19
- self.pipe_i2i = None
20
-
21
- models = [
22
- Model("anything v3", "Linaqruf/anything-v3.0", "anything v3 style"),
23
- ]
24
- # Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style "),
25
- # Model("Balloon Art", "Fictiverse/Stable_Diffusion_BalloonArt_Model", "BalloonArt "),
26
- # Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style "),
27
- # Model("Tron Legacy", "dallinmackay/Tron-Legacy-diffusion", "trnlgcy ")
28
- #Model("Pokémon", "lambdalabs/sd-pokemon-diffusers", ""),
29
- #Model("Pony Diffusion", "AstraliteHeart/pony-diffusion", ""),
30
- #Model("Robo Diffusion", "nousr/robo-diffusion", ""),
31
-
32
- scheduler = DPMSolverMultistepScheduler(
33
- beta_start=0.00085,
34
- beta_end=0.012,
35
- beta_schedule="scaled_linear",
36
- num_train_timesteps=1000,
37
- trained_betas=None,
38
- predict_epsilon=True,
39
- thresholding=False,
40
- algorithm_type="dpmsolver++",
41
- solver_type="midpoint",
42
- lower_order_final=True,
43
- )
44
-
45
- custom_model = None
46
- if is_colab:
47
- models.insert(0, Model("Custom model"))
48
- custom_model = models[0]
49
-
50
- last_mode = "txt2img"
51
- current_model = models[1] if is_colab else models[0]
52
- current_model_path = current_model.path
53
-
54
- if is_colab:
55
- pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
56
-
57
- else: # download all models
58
- print(f"{datetime.datetime.now()} Downloading vae...")
59
- vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16)
60
- for model in models:
61
- try:
62
- print(f"{datetime.datetime.now()} Downloading {model.name} model...")
63
- unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16)
64
- model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
65
- model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
66
- except Exception as e:
67
- print(f"{datetime.datetime.now()} Failed to load model " + model.name + ": " + str(e))
68
- models.remove(model)
69
- pipe = models[0].pipe_t2i
70
-
71
- if torch.cuda.is_available():
72
- pipe = pipe.to("cuda")
73
-
74
- device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
75
-
76
- def error_str(error, title="Error"):
77
- return f"""#### {title}
78
- {error}""" if error else ""
79
-
80
- def custom_model_changed(path):
81
- models[0].path = path
82
- global current_model
83
- current_model = models[0]
84
-
85
- def on_model_change(model_name):
86
-
87
- prefix = "Enter prompt. \"" + next((m.prefix for m in models if m.name == model_name), None) + "\" is prefixed automatically" if model_name != models[0].name else "Don't forget to use the custom model prefix in the prompt!"
88
-
89
- return gr.update(visible = model_name == models[0].name), gr.update(placeholder=prefix)
90
-
91
- def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
92
-
93
- print(psutil.virtual_memory()) # print memory usage
94
-
95
- global current_model
96
- for model in models:
97
- if model.name == model_name:
98
- current_model = model
99
- model_path = current_model.path
100
-
101
- generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
102
-
103
- try:
104
- if img is not None:
105
- return img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None
106
- else:
107
- return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator), None
108
- except Exception as e:
109
- return None, error_str(e)
110
-
111
- def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator):
112
-
113
- print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
114
-
115
- global last_mode
116
- global pipe
117
- global current_model_path
118
- if model_path != current_model_path or last_mode != "txt2img":
119
- current_model_path = model_path
120
-
121
- if is_colab or current_model == custom_model:
122
- pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
123
- else:
124
- pipe = pipe.to("cpu")
125
- pipe = current_model.pipe_t2i
126
-
127
- if torch.cuda.is_available():
128
- pipe = pipe.to("cuda")
129
- last_mode = "txt2img"
130
-
131
- prompt = current_model.prefix + prompt
132
- result = pipe(
133
- prompt,
134
- negative_prompt = neg_prompt,
135
- # num_images_per_prompt=n_images,
136
- num_inference_steps = int(steps),
137
- guidance_scale = guidance,
138
- width = width,
139
- height = height,
140
- generator = generator)
141
-
142
- return replace_nsfw_images(result)
143
-
144
- def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
145
-
146
- print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
147
-
148
- global last_mode
149
- global pipe
150
- global current_model_path
151
- if model_path != current_model_path or last_mode != "img2img":
152
- current_model_path = model_path
153
-
154
- if is_colab or current_model == custom_model:
155
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
156
- else:
157
- pipe = pipe.to("cpu")
158
- pipe = current_model.pipe_i2i
159
-
160
- if torch.cuda.is_available():
161
- pipe = pipe.to("cuda")
162
- last_mode = "img2img"
163
-
164
- prompt = current_model.prefix + prompt
165
- ratio = min(height / img.height, width / img.width)
166
- img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
167
- result = pipe(
168
- prompt,
169
- negative_prompt = neg_prompt,
170
- # num_images_per_prompt=n_images,
171
- init_image = img,
172
- num_inference_steps = int(steps),
173
- strength = strength,
174
- guidance_scale = guidance,
175
- width = width,
176
- height = height,
177
- generator = generator)
178
-
179
- return replace_nsfw_images(result)
180
-
181
- def replace_nsfw_images(results):
182
-
183
- if is_colab:
184
- return results.images[0]
185
-
186
- for i in range(len(results.images)):
187
- if results.nsfw_content_detected[i]:
188
- results.images[i] = Image.open("nsfw.png")
189
- return results.images[0]
190
-
191
- css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
192
- """
193
- with gr.Blocks(css=css) as demo:
194
- gr.HTML(
195
- f"""
196
- <div class="finetuned-diffusion-div">
197
- <div>
198
- <h1>Anything V3</h1>
199
- </div>
200
- <p>
201
- Demo for Anything V3
202
- </p>
203
- <p>You can skip the queue by duplicating this space: <a style="display:inline-block" href="https://huggingface.co/spaces/akhaliq/anything-v3.0?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a> </p>
204
- </p>
205
- </div>
206
- """
207
- )
208
- with gr.Row():
209
-
210
- with gr.Column(scale=55):
211
- with gr.Group():
212
- model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=current_model.name)
213
- with gr.Box(visible=False) as custom_model_group:
214
- custom_model_path = gr.Textbox(label="Custom model path", placeholder="Path to model, e.g. nitrosocke/Arcane-Diffusion", interactive=True)
215
- gr.HTML("<div><font size='2'>Custom models have to be downloaded first, so give it some time.</font></div>")
216
-
217
- with gr.Row():
218
- prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False)
219
- generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
220
-
221
-
222
- image_out = gr.Image(height=512)
223
- # gallery = gr.Gallery(
224
- # label="Generated images", show_label=False, elem_id="gallery"
225
- # ).style(grid=[1], height="auto")
226
- error_output = gr.Markdown()
227
-
228
- with gr.Column(scale=45):
229
- with gr.Tab("Options"):
230
- with gr.Group():
231
- neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
232
-
233
- # n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1)
234
-
235
- with gr.Row():
236
- guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
237
- steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1)
238
-
239
- with gr.Row():
240
- width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
241
- height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
242
-
243
- seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
244
-
245
- with gr.Tab("Image to image"):
246
- with gr.Group():
247
- image = gr.Image(label="Image", height=256, tool="editor", type="pil")
248
- strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
249
-
250
- if is_colab:
251
- model_name.change(on_model_change, inputs=model_name, outputs=[custom_model_group, prompt], queue=False)
252
- custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None)
253
- # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
254
-
255
- inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt]
256
- outputs = [image_out, error_output]
257
- prompt.submit(inference, inputs=inputs, outputs=outputs)
258
- generate.click(inference, inputs=inputs, outputs=outputs)
259
-
260
- ex = gr.Examples([
261
- [models[0].name, "iron man", 7.5, 50],
262
-
263
- ], inputs=[model_name, prompt, guidance, steps, seed], outputs=outputs, fn=inference, cache_examples=False)
264
-
265
- gr.HTML("""
266
- <div style="border-top: 1px solid #303030;">
267
- <br>
268
- <p>Model by Linaqruf</p>
269
- </div>
270
- """)
271
-
272
- print(f"Space built in {time.time() - start_time:.2f} seconds")
273
-
274
- if not is_colab:
275
- demo.queue(concurrency_count=1)
276
- demo.launch(debug=is_colab, share=is_colab)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/data_ingestion.py DELETED
@@ -1,96 +0,0 @@
1
- import argparse
2
- import logging
3
-
4
- from autogpt.commands.file_operations import ingest_file, search_files
5
- from autogpt.config import Config
6
- from autogpt.memory import get_memory
7
-
8
- cfg = Config()
9
-
10
-
11
- def configure_logging():
12
- logging.basicConfig(
13
- filename="log-ingestion.txt",
14
- filemode="a",
15
- format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s",
16
- datefmt="%H:%M:%S",
17
- level=logging.DEBUG,
18
- )
19
- return logging.getLogger("AutoGPT-Ingestion")
20
-
21
-
22
- def ingest_directory(directory, memory, args):
23
- """
24
- Ingest all files in a directory by calling the ingest_file function for each file.
25
-
26
- :param directory: The directory containing the files to ingest
27
- :param memory: An object with an add() method to store the chunks in memory
28
- """
29
- try:
30
- files = search_files(directory)
31
- for file in files:
32
- ingest_file(file, memory, args.max_length, args.overlap)
33
- except Exception as e:
34
- print(f"Error while ingesting directory '{directory}': {str(e)}")
35
-
36
-
37
- def main() -> None:
38
- logger = configure_logging()
39
-
40
- parser = argparse.ArgumentParser(
41
- description="Ingest a file or a directory with multiple files into memory. "
42
- "Make sure to set your .env before running this script."
43
- )
44
- group = parser.add_mutually_exclusive_group(required=True)
45
- group.add_argument("--file", type=str, help="The file to ingest.")
46
- group.add_argument(
47
- "--dir", type=str, help="The directory containing the files to ingest."
48
- )
49
- parser.add_argument(
50
- "--init",
51
- action="store_true",
52
- help="Init the memory and wipe its content (default: False)",
53
- default=False,
54
- )
55
- parser.add_argument(
56
- "--overlap",
57
- type=int,
58
- help="The overlap size between chunks when ingesting files (default: 200)",
59
- default=200,
60
- )
61
- parser.add_argument(
62
- "--max_length",
63
- type=int,
64
- help="The max_length of each chunk when ingesting files (default: 4000)",
65
- default=4000,
66
- )
67
-
68
- args = parser.parse_args()
69
-
70
- # Initialize memory
71
- memory = get_memory(cfg, init=args.init)
72
- print("Using memory of type: " + memory.__class__.__name__)
73
-
74
- if args.file:
75
- try:
76
- ingest_file(args.file, memory, args.max_length, args.overlap)
77
- print(f"File '{args.file}' ingested successfully.")
78
- except Exception as e:
79
- logger.error(f"Error while ingesting file '{args.file}': {str(e)}")
80
- print(f"Error while ingesting file '{args.file}': {str(e)}")
81
- elif args.dir:
82
- try:
83
- ingest_directory(args.dir, memory, args)
84
- print(f"Directory '{args.dir}' ingested successfully.")
85
- except Exception as e:
86
- logger.error(f"Error while ingesting directory '{args.dir}': {str(e)}")
87
- print(f"Error while ingesting directory '{args.dir}': {str(e)}")
88
- else:
89
- print(
90
- "Please provide either a file path (--file) or a directory name (--dir)"
91
- " inside the auto_gpt_workspace directory as input."
92
- )
93
-
94
-
95
- if __name__ == "__main__":
96
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/run_continuous.sh DELETED
@@ -1,3 +0,0 @@
1
- #!/bin/bash
2
-
3
- ./run.sh --continuous $@
 
 
 
 
spaces/Cropinky/esrgan/realesrgan/archs/srvgg_arch.py DELETED
@@ -1,69 +0,0 @@
1
- from basicsr.utils.registry import ARCH_REGISTRY
2
- from torch import nn as nn
3
- from torch.nn import functional as F
4
-
5
-
6
- @ARCH_REGISTRY.register()
7
- class SRVGGNetCompact(nn.Module):
8
- """A compact VGG-style network structure for super-resolution.
9
-
10
- It is a compact network structure, which performs upsampling in the last layer and no convolution is
11
- conducted on the HR feature space.
12
-
13
- Args:
14
- num_in_ch (int): Channel number of inputs. Default: 3.
15
- num_out_ch (int): Channel number of outputs. Default: 3.
16
- num_feat (int): Channel number of intermediate features. Default: 64.
17
- num_conv (int): Number of convolution layers in the body network. Default: 16.
18
- upscale (int): Upsampling factor. Default: 4.
19
- act_type (str): Activation type, options: 'relu', 'prelu', 'leakyrelu'. Default: prelu.
20
- """
21
-
22
- def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu'):
23
- super(SRVGGNetCompact, self).__init__()
24
- self.num_in_ch = num_in_ch
25
- self.num_out_ch = num_out_ch
26
- self.num_feat = num_feat
27
- self.num_conv = num_conv
28
- self.upscale = upscale
29
- self.act_type = act_type
30
-
31
- self.body = nn.ModuleList()
32
- # the first conv
33
- self.body.append(nn.Conv2d(num_in_ch, num_feat, 3, 1, 1))
34
- # the first activation
35
- if act_type == 'relu':
36
- activation = nn.ReLU(inplace=True)
37
- elif act_type == 'prelu':
38
- activation = nn.PReLU(num_parameters=num_feat)
39
- elif act_type == 'leakyrelu':
40
- activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
41
- self.body.append(activation)
42
-
43
- # the body structure
44
- for _ in range(num_conv):
45
- self.body.append(nn.Conv2d(num_feat, num_feat, 3, 1, 1))
46
- # activation
47
- if act_type == 'relu':
48
- activation = nn.ReLU(inplace=True)
49
- elif act_type == 'prelu':
50
- activation = nn.PReLU(num_parameters=num_feat)
51
- elif act_type == 'leakyrelu':
52
- activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
53
- self.body.append(activation)
54
-
55
- # the last conv
56
- self.body.append(nn.Conv2d(num_feat, num_out_ch * upscale * upscale, 3, 1, 1))
57
- # upsample
58
- self.upsampler = nn.PixelShuffle(upscale)
59
-
60
- def forward(self, x):
61
- out = x
62
- for i in range(0, len(self.body)):
63
- out = self.body[i](out)
64
-
65
- out = self.upsampler(out)
66
- # add the nearest upsampled image, so that the network learns the residual
67
- base = F.interpolate(x, scale_factor=self.upscale, mode='nearest')
68
- out += base
69
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/reverseContourPen.py DELETED
@@ -1,96 +0,0 @@
1
- from fontTools.misc.arrayTools import pairwise
2
- from fontTools.pens.filterPen import ContourFilterPen
3
-
4
-
5
- __all__ = ["reversedContour", "ReverseContourPen"]
6
-
7
-
8
- class ReverseContourPen(ContourFilterPen):
9
- """Filter pen that passes outline data to another pen, but reversing
10
- the winding direction of all contours. Components are simply passed
11
- through unchanged.
12
-
13
- Closed contours are reversed in such a way that the first point remains
14
- the first point.
15
- """
16
-
17
- def __init__(self, outPen, outputImpliedClosingLine=False):
18
- super().__init__(outPen)
19
- self.outputImpliedClosingLine = outputImpliedClosingLine
20
-
21
- def filterContour(self, contour):
22
- return reversedContour(contour, self.outputImpliedClosingLine)
23
-
24
-
25
- def reversedContour(contour, outputImpliedClosingLine=False):
26
- """Generator that takes a list of pen's (operator, operands) tuples,
27
- and yields them with the winding direction reversed.
28
- """
29
- if not contour:
30
- return # nothing to do, stop iteration
31
-
32
- # valid contours must have at least a starting and ending command,
33
- # can't have one without the other
34
- assert len(contour) > 1, "invalid contour"
35
-
36
- # the type of the last command determines if the contour is closed
37
- contourType = contour.pop()[0]
38
- assert contourType in ("endPath", "closePath")
39
- closed = contourType == "closePath"
40
-
41
- firstType, firstPts = contour.pop(0)
42
- assert firstType in ("moveTo", "qCurveTo"), (
43
- "invalid initial segment type: %r" % firstType
44
- )
45
- firstOnCurve = firstPts[-1]
46
- if firstType == "qCurveTo":
47
- # special case for TrueType paths contaning only off-curve points
48
- assert firstOnCurve is None, "off-curve only paths must end with 'None'"
49
- assert not contour, "only one qCurveTo allowed per off-curve path"
50
- firstPts = (firstPts[0],) + tuple(reversed(firstPts[1:-1])) + (None,)
51
-
52
- if not contour:
53
- # contour contains only one segment, nothing to reverse
54
- if firstType == "moveTo":
55
- closed = False # single-point paths can't be closed
56
- else:
57
- closed = True # off-curve paths are closed by definition
58
- yield firstType, firstPts
59
- else:
60
- lastType, lastPts = contour[-1]
61
- lastOnCurve = lastPts[-1]
62
- if closed:
63
- # for closed paths, we keep the starting point
64
- yield firstType, firstPts
65
- if firstOnCurve != lastOnCurve:
66
- # emit an implied line between the last and first points
67
- yield "lineTo", (lastOnCurve,)
68
- contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,))
69
-
70
- if len(contour) > 1:
71
- secondType, secondPts = contour[0]
72
- else:
73
- # contour has only two points, the second and last are the same
74
- secondType, secondPts = lastType, lastPts
75
-
76
- if not outputImpliedClosingLine:
77
- # if a lineTo follows the initial moveTo, after reversing it
78
- # will be implied by the closePath, so we don't emit one;
79
- # unless the lineTo and moveTo overlap, in which case we keep the
80
- # duplicate points
81
- if secondType == "lineTo" and firstPts != secondPts:
82
- del contour[0]
83
- if contour:
84
- contour[-1] = (lastType, tuple(lastPts[:-1]) + secondPts)
85
- else:
86
- # for open paths, the last point will become the first
87
- yield firstType, (lastOnCurve,)
88
- contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,))
89
-
90
- # we iterate over all segment pairs in reverse order, and yield
91
- # each one with the off-curve points reversed (if any), and
92
- # with the on-curve point of the following segment
93
- for (curType, curPts), (_, nextPts) in pairwise(contour, reverse=True):
94
- yield curType, tuple(reversed(curPts[:-1])) + (nextPts[-1],)
95
-
96
- yield "closePath" if closed else "endPath", ()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Image-0fe369ad.js DELETED
@@ -1,2 +0,0 @@
1
- import{S as h,e as g,s as d,J as n,K as e,p as m,M as i,n as l,A as u}from"./index-1d65707a.js";function f(c){let t,r,s,o;return{c(){t=n("svg"),r=n("rect"),s=n("circle"),o=n("polyline"),e(r,"x","3"),e(r,"y","3"),e(r,"width","18"),e(r,"height","18"),e(r,"rx","2"),e(r,"ry","2"),e(s,"cx","8.5"),e(s,"cy","8.5"),e(s,"r","1.5"),e(o,"points","21 15 16 10 5 21"),e(t,"xmlns","http://www.w3.org/2000/svg"),e(t,"width","100%"),e(t,"height","100%"),e(t,"viewBox","0 0 24 24"),e(t,"fill","none"),e(t,"stroke","currentColor"),e(t,"stroke-width","1.5"),e(t,"stroke-linecap","round"),e(t,"stroke-linejoin","round"),e(t,"class","feather feather-image")},m(a,p){m(a,t,p),i(t,r),i(t,s),i(t,o)},p:l,i:l,o:l,d(a){a&&u(t)}}}class x extends h{constructor(t){super(),g(this,t,null,f,d,{})}}export{x as I};
2
- //# sourceMappingURL=Image-0fe369ad.js.map