parquet-converter commited on
Commit
e8d507b
·
1 Parent(s): 93acbc1

Update parquet files (step 46 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chief Architect Premier X12 22.5.2.56 Patched keygen How to Activate the Full Features of the Professional 3D Building Software.md +0 -139
  2. spaces/1gistliPinn/ChatGPT4/Examples/?y??6?? ?? ??x????ownna? REPACK.md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Download Film Kisah Nabi Musa Full Movie Free.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/Downton Abbey Saison 3 Torrent French.md +0 -7
  5. spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion_safe/__init__.py +0 -85
  6. spaces/232labs/VToonify/vtoonify/model/vgg.py +0 -60
  7. spaces/801artistry/RVC801/infer/lib/rmvpe.py +0 -717
  8. spaces/A666sxr/Genshin_TTS/data_utils.py +0 -392
  9. spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/Engineering Wiki 8da06b3dcf1b4eaaa3e90aa70feefe56.md +0 -1
  10. spaces/AIGC-Audio/AudioGPT/sound_extraction/utils/stft.py +0 -159
  11. spaces/AIWaves/Debate/src/agents/utils.py +0 -480
  12. spaces/AIZeroToHero/Video-Automatic-Speech-Recognition/app.py +0 -119
  13. spaces/AIatUIUC/CodeLATS/generators/__init__.py +0 -3
  14. spaces/ALSv/FSW/roop/face_analyser.py +0 -53
  15. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/actions/snapScrollToBottom.ts +0 -54
  16. spaces/AchyuthGamer/OpenGPT/client/css/checkbox.css +0 -55
  17. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/EasyChat.py +0 -111
  18. spaces/Adapter/T2I-Adapter/app.py +0 -483
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/shake/Shake.js +0 -2
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/vq_diffusion/pipeline_vq_diffusion.py +0 -325
  21. spaces/Andy1621/uniformer_image_detection/configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py +0 -75
  22. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/ExLlama.md +0 -22
  23. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/js/switch_tabs.js +0 -59
  24. spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/midas/midas/blocks.py +0 -342
  25. spaces/Arikkod/FoodVisionMini/app.py +0 -51
  26. spaces/Arthur678/vits-uma-genshin-honkai/modules.py +0 -388
  27. spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.cpp +0 -43
  28. spaces/Ataturk-Chatbot/HuggingFaceChat/README.md +0 -12
  29. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/configuration.py +0 -282
  30. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/certs.py +0 -24
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/jaraco/context.py +0 -213
  32. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/video_visualizer.py +0 -252
  33. spaces/Banbri/zcvzcv/LICENCE.md +0 -170
  34. spaces/Benson/text-generation/Examples/Choque De Clanes Indir Apkcombo.md +0 -153
  35. spaces/Benson/text-generation/Examples/Descarga De La Aplicacin Comercial Zugacoin.md +0 -138
  36. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/req/req_install.py +0 -867
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/util.py +0 -308
  38. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/dep_util.py +0 -96
  39. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated.h +0 -38
  40. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TensorMask/tensormask/layers/swap_align2nat.py +0 -61
  41. spaces/CVPR/MonoScene/monoscene/.ipynb_checkpoints/modules-checkpoint.py +0 -194
  42. spaces/CVPR/WALT/mmdet/datasets/xml_style.py +0 -170
  43. spaces/Caoyunkang/Segment-Any-Anomaly/utils/__init__.py +0 -0
  44. spaces/ChandraMohanNayal/AutoGPT/ui/app.py +0 -145
  45. spaces/ChrisPreston/diff-svc_minato_aqua/utils/__init__.py +0 -250
  46. spaces/CikeyQI/Yunzai/Yunzai/lib/listener/listener.js +0 -16
  47. spaces/CofAI/chat/g4f/Provider/Providers/Forefront.py +0 -30
  48. spaces/CuraAlizm/stabilityai-stable-diffusion-xl-base-1.0/app.py +0 -3
  49. spaces/Cvandi/remake/tests/test_model.py +0 -126
  50. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/solver/build.py +0 -31
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chief Architect Premier X12 22.5.2.56 Patched keygen How to Activate the Full Features of the Professional 3D Building Software.md DELETED
@@ -1,139 +0,0 @@
1
-
2
- <h1>Chief Architect Premier X12 22.5.2.56 Patched Keygen: A Comprehensive Review</h1>
3
- <p>If you are looking for a powerful and easy-to-use 3D architecture software for residential and commercial design, you might want to check out Chief Architect Premier X12. This software has automated construction tools that make home design, remodeling, interior design, kitchens and bathrooms, etc. easier. As you draw the walls and place smart architectural objects like doors and windows, the program creates a 3D model, generates a bill of materials, and with the use of powerful construction tools, helps to produce construction documents like blueprints plan, detailed sections, and elevations.</p>
4
- <p>In this article, we will review Chief Architect Premier X12 22.5.2.56 Patched Keygen, which is a cracked version of the software that allows you to use it without paying for a license. We will cover the features and benefits of Chief Architect Premier X12, how to install and activate it with the patched keygen, and the pros and cons of using it.</p>
5
- <h2>Chief Architect Premier X12 22.5.2.56 Patched keygen</h2><br /><p><b><b>DOWNLOAD</b> &gt;&gt;&gt; <a href="https://byltly.com/2uKxjH">https://byltly.com/2uKxjH</a></b></p><br /><br />
6
- <h2>What is Chief Architect Premier X12?</h2>
7
- <p>Chief Architect Premier X12 is the latest version of Chief Architect software, which was released in February 2020. It is a professional 3D architecture software that can handle all aspects of building design, from conceptual design to construction documents.</p>
8
- <p>Chief Architect Premier X12 has many new features and enhancements that make it more efficient and user-friendly. Some of these features include:</p>
9
- <ul>
10
- <li>New rendering engine that supports ray tracing and ambient occlusion for realistic lighting effects.</li>
11
- <li>New smart tools for creating decks, railings, stairs, roofs, dormers, skylights, etc.</li>
12
- <li>New library items for furniture, appliances, fixtures, plants, materials, etc.</li>
13
- <li>New options for customizing cabinets, countertops, backsplashes, moldings, etc.</li>
14
- <li>New tools for creating electrical plans, plumbing plans, HVAC plans, etc.</li>
15
- <li>New options for exporting 3D views to 360° panoramas, VR headsets, or web pages.</li>
16
- <li>New options for importing and exporting DWG/DXF files with layers and colors.</li>
17
- <li>New options for collaborating with other users via cloud services or BIM360.</li>
18
- </ul>
19
- <h3>Features and benefits of Chief Architect Premier X12</h3>
20
- <p>Chief Architect Premier X12 has many features and benefits that make it a versatile and powerful 3D architecture software. Here are some of them:</p>
21
- <h4>Design and build tools</h4>
22
- <p>Chief Architect Premier X12 has automatic and manual build tools that let you create a variety of roof styles, ladders, trusses, cut BOMs (bill of materials), sizing (dimensioning), sections (cross-sections), elevations (side views), etc. You can also use smart framing tools to create floor systems (joists), wall systems (studs), ceiling systems (rafters), etc. You can also edit these elements individually or in groups to customize their properties.</p>
23
- <h4>Interior, kitchen and bathroom design</h4>
24
- <p>Chief Architect Premier X12 uses smart design objects (such as cabinets, appliances, doors, windows, countertops (worktops), floors (flooring), etc.) to quickly and easily create various styles, shapes (forms), sizes (dimensions), etc. You can also use smart labels (tags) to annotate these objects with information such as manufacturer (brand), model (type), price (cost), etc. You can also use smart dimensions (measures) to show the distances between objects or walls.</p>
25
- <p>How to download Chief Architect Premier X12 22.5.2.56 with patch<br />
26
- Chief Architect Premier X12 22.5.2.56 cracked version free download<br />
27
- Chief Architect Premier X12 22.5.2.56 full version with keygen activation<br />
28
- Best software for home design: Chief Architect Premier X12 22.5.2.56<br />
29
- Chief Architect Premier X12 22.5.2.56 patch download link<br />
30
- Chief Architect Premier X12 22.5.2.56 keygen generator online<br />
31
- Chief Architect Premier X12 22.5.2.56 review and features<br />
32
- Chief Architect Premier X12 22.5.2.56 system requirements and compatibility<br />
33
- Chief Architect Premier X12 22.5.2.56 tutorial and tips<br />
34
- Chief Architect Premier X12 22.5.2.56 license key and serial number<br />
35
- Chief Architect Premier X12 22.5.2.56 update and bug fixes<br />
36
- Chief Architect Premier X12 22.5.2.56 vs other home design software<br />
37
- Chief Architect Premier X12 22.5.2.56 discount and coupon code<br />
38
- Chief Architect Premier X12 22.5.2.56 trial version and limitations<br />
39
- Chief Architect Premier X12 22.5.2.56 alternatives and competitors<br />
40
- How to install Chief Architect Premier X12 22.5.2.56 with patch and keygen<br />
41
- Chief Architect Premier X12 22.5.2.56 user manual and guide<br />
42
- Chief Architect Premier X12 22.5.2.56 support and customer service<br />
43
- How to uninstall Chief Architect Premier X12 22.5.2.56 completely<br />
44
- Chief Architect Premier X12 22.5.2.56 testimonials and feedback<br />
45
- How to use Chief Architect Premier X12 22.5.2.56 for interior design<br />
46
- How to use Chief Architect Premier X12 22.5.2.56 for exterior design<br />
47
- How to use Chief Architect Premier X12 22.5.2.56 for landscaping design<br />
48
- How to use Chief Architect Premier X12 22.5.2.56 for kitchen design<br />
49
- How to use Chief Architect Premier X12 22.5.2</p>
50
- <h4>3D modeling and design tools</h4>
51
- <p>With Chief Architect Premier X12, you can design in any view for seamless (smooth), simultaneous editing between 2D and 3D. You can switch between different views such as plan view (top view), elevation view (side view), perspective view (angle view), orthographic view (straight view), etc. You can also use the camera tool to create custom views such as dollhouse view (open view), glass house view (transparent view), watercolor view (artistic view), etc. You can also use the walkthrough tool to navigate through your model in 3D.</p>
52
- <h4>CAD tools for productivity and precision</h4>
53
- <p>Chief Architect Premier X12 has a powerful CAD software engine that includes tools for lines, polylines (connected lines), splines (curved lines), arcs (circular lines), solids (3D shapes), etc. to produce objects. You can also use these tools to draw custom shapes or symbols that can be saved as CAD blocks or library items for future use. You can also import files in DWG, DXF or PDF format from other CAD programs or online sources.</p>
54
- <h4>Construction blueprint set generation</h4>
55
- <p>All views of your project such as blueprints plan (floor plan), framing plan (structure plan), sections plan (cross-section plan), details plan (close-up plan) , elevations plan (side views plan) have a user-defined scale and link to a specific drawing that updates as design changes change. You can also use layout sheets to arrange these views on a page with title blocks, borders , text , dimensions , etc. You can also print these sheets or export them as PDF files for sharing or printing.</p>
56
- <h3>How to install and activate Chief Architect Premier X12 with the patched keygen</h3>
57
- <p>If you want to use Chief Architect Premier X12 without paying for a license , you can download the patched keygen version from this link. However , be aware that this is an illegal and risky way of using the software , as it may contain viruses , malware , or spyware that can harm your computer or compromise your data . Also , you may face legal consequences if you are caught using pirated software . Therefore , we do not recommend or endorse this method , and we advise you to buy a legitimate license from the official website instead . However , if you still want to proceed with this method , here are the steps you need to follow :</p>
58
- <h4>System requirements</h4>
59
- <p>Before installing Chief Architect Premier X12 , make sure your computer meets the minimum system requirements , which are :</p>
60
- <ul>
61
- <li>Windows 10 / 8 / 7 64-bit operating system</li>
62
- <li>Multi-core processor</li>
63
- <li>4 GB of memory</li>
64
- <li>5 GB of available hard disk space</li>
65
- <li>Dedicated graphics card with OpenGL 3.3 or higher support</li>
66
- <li>Internet access</li>
67
- </ul>
68
- <h4>Installation steps</h4>
69
- <ol>
70
- <li>Download the zip file from the link and extract it to a folder on your computer .</li>
71
- <li>Run the setup.exe file as administrator and follow the instructions on the screen .</li>
72
- <li>Select the destination folder where you want to install the software .</li>
73
- <li>Select the components you want to install such as libraries , bonus catalogs , manufacturer catalogs , etc.</li>
74
- <li>Wait for the installation process to complete .</li>
75
- <li>Do not run the software yet .</li>
76
- </ol>
77
- <h4>Activation steps</h4>
78
- <ol>
79
- <li>In the folder where you extracted the zip file , open the Crack folder .</li>
80
- <li>Copy the file named Chief_Architect_Premier_X11.exe .</li>
81
- <li>Paste it in the installation folder where you installed the software , usually C:\Program Files\Chief Architect\Chief Architect Premier X11 .</li>
82
- <li>Replace the original file when prompted .</li>
83
- <li>Run the software as administrator .</li>
84
- <li>Select I have a license key option .</li>
85
- <li>In another window , run the file named keygen.exe from the Crack folder .</li>
86
- <li>Select Generate option .</li>
87
- <li>Copy the generated license key from the keygen window .</li>
88
- <li>Paste it in the software activation window .</li>
89
- <li>Click OK to confirm the activation .</li>
90
- <li>Enjoy using Chief Architect Premier X12 with full features .</li>
91
- </ol>
92
- <h3>Pros and cons of Chief Architect Premier X12</h3>
93
- <p>Chief Architect Premier X12 is a powerful and versatile 3D architecture software that can help you create stunning designs and realistic renderings. However, it also has some drawbacks that you should be aware of. Here are some of the pros and cons of using Chief Architect Premier X12:</p>
94
- <h4>Pros</h4>
95
- <ul>
96
- <li>It has a user-friendly interface that is easy to navigate and customize.</li>
97
- <li>It has a large library of objects, materials, textures, colors, etc. that you can use to enhance your designs.</li>
98
- <li>It has smart tools that automate the creation and editing of various elements such as roofs, stairs, cabinets, etc.</li>
99
- <li>It has a powerful CAD engine that allows you to draw and modify any shape or symbol.</li>
100
- <li>It has a new rendering engine that supports ray tracing and ambient occlusion for realistic lighting effects.</li>
101
- <li>It has a new option for exporting 3D views to 360° panoramas, VR headsets, or web pages.</li>
102
- <li>It has a new option for collaborating with other users via cloud services or BIM360.</li>
103
- </ul>
104
- <h4>Cons</h4>
105
- <ul>
106
- <li>It is expensive to buy a license for the software, which costs $2,995 for a single user license or $4,995 for a network license.</li>
107
- <li>It requires a high-end computer system to run smoothly and efficiently.</li>
108
- <li>It may have some bugs or glitches that affect the performance or functionality of the software.</li>
109
- <li>It may not be compatible with some other CAD programs or file formats.</li>
110
- <li>It may be illegal and risky to use the patched keygen version of the software, as it may contain viruses, malware, or spyware that can harm your computer or compromise your data. Also, you may face legal consequences if you are caught using pirated software.</li>
111
- </ul>
112
- <h3>Conclusion</h3>
113
- <p>In conclusion, Chief Architect Premier X12 is a professional 3D architecture software that can help you create amazing designs and realistic renderings for residential and commercial projects. It has many features and benefits that make it a powerful and user-friendly tool. However, it also has some drawbacks that you should consider before buying or using it. If you want to use Chief Architect Premier X12 without paying for a license , you can download the patched keygen version from this link. However , be aware that this is an illegal and risky way of using the software , as it may contain viruses , malware , or spyware that can harm your computer or compromise your data . Also , you may face legal consequences if you are caught using pirated software . Therefore , we do not recommend or endorse this method , and we advise you to buy a legitimate license from the official website instead . We hope this article has given you some useful information and insights about Chief Architect Premier X12 22.5.2.56 Patched Keygen.</p>
114
- <h2>FAQs</h2>
115
- <p>Here are some frequently asked questions about Chief Architect Premier X12 22.5.2.56 Patched Keygen:</p>
116
- <ol>
117
- <li><b>What is the difference between Chief Architect Premier X12 and Chief Architect Interiors X12?</b></li>
118
- <p>Chief Architect Premier X12 is the full version of the software that can handle all aspects of building design , from conceptual design to construction documents . Chief Architect Interiors X12 is a specialized version of the software that focuses on interior design , kitchen and bath design , remodeling , etc. It has fewer features and tools than Chief Architect Premier X12 , but it is cheaper to buy . You can compare the two versions here.</p>
119
- <li><b>Can I use Chief Architect Premier X12 on Mac?</b></li>
120
- <p>Yes , you can use Chief Architect Premier X12 on Mac , as long as your Mac meets the minimum system requirements , which are :</p>
121
- <ul>
122
- <li>Mac OS X 10.13 or higher operating system</li>
123
- <li>Multicore processor</li>
124
- <li>4 GB of memory</li>
125
- <li>5 GB of available hard disk space</li>
126
- <li>Dedicated graphics card with OpenGL 3.3 or higher support</li>
127
- <li>Internet access</li>
128
- </ul>
129
- <p>You can download the Mac version of Chief Architect Premier X12 from here.</p>
130
- <li><b>Can I get a free trial of Chief Architect Premier X12?</b></li>
131
- <p>Yes , you can get a free trial of Chief Architect Premier X12 for 30 days from here. You will need to fill out a form with your name , email address , phone number , etc. to get the download link . You will also need to create an account on the official website to activate the trial . The trial version has all the features and functions of the full version , but it will expire after 30 days . You will also not be able to save or print your work with the trial version . You will need to buy a license to continue using the software after the trial period ends .</p>
132
- <li><b>How can I learn how to use Chief Architect Premier X12?</b></li>
133
- <p>You can learn how to use Chief Architect Premier X12 by watching video tutorials , reading user manuals , attending webinars , joining online forums , etc. You can find these resources on the official website here. You can also contact customer support if you have any questions or issues with the software . You can find their contact information here.</p>
134
- <li><b>Where can I find more reviews about Chief Architect Premier X12?</b></li>
135
- <p>You can find more reviews about Chief Architect Premier X12 on online platforms such as Capterra, Software Advice, Trustpilot, etc. You can also read testimonials from satisfied customers on the official website here. You can also watch video reviews on YouTube channels such as Home Designer Software, The Rendered Home, etc.</p>
136
- </ol>
137
- </p> 0a6ba089eb<br />
138
- <br />
139
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/?y??6?? ?? ??x????ownna? REPACK.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>?y??6?? ?? ??x????ownna?</h2><br /><p><b><b>Download</b> &gt;&gt;&gt; <a href="https://imgfil.com/2uy0YV">https://imgfil.com/2uy0YV</a></b></p><br /><br />
2
- <br />
3
- aaccfb2cb3<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download Film Kisah Nabi Musa Full Movie Free.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>download film kisah nabi musa full movie</h2><br /><p><b><b>Download</b> ===> <a href="https://imgfil.com/2uxXCs">https://imgfil.com/2uxXCs</a></b></p><br /><br />
2
- <br />
3
-  . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 4fefd39f24<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Downton Abbey Saison 3 Torrent French.md DELETED
@@ -1,7 +0,0 @@
1
- <h2>Downton Abbey Saison 3 Torrent French</h2><br /><p><b><b>Download File</b> &#9999; <a href="https://imgfil.com/2uxY6d">https://imgfil.com/2uxY6d</a></b></p><br /><br />
2
-
3
- Although Season 5, like Season 4, is not as dramatic 1st, 2nd and 3rd seasons (which were outstanding), it's still a great series. . Unlike many other series in the world, when we talk about horror films that can be scary and really creepy, but still they only show fear and death in some way, Supernatural is what makes the viewer fear- present because this series has a more realistic sense of horror than others.
4
- This gives the viewer a sense of reality that makes the series so scary and yet so good. 8a78ff9644<br />
5
- <br />
6
- <br />
7
- <p></p>
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion_safe/__init__.py DELETED
@@ -1,85 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # flake8: noqa
15
-
16
- from dataclasses import dataclass
17
- from enum import Enum
18
- from typing import List, Optional, Union
19
-
20
- import numpy as np
21
- import PIL
22
- from PIL import Image
23
-
24
- from ...utils import BaseOutput, is_paddle_available, is_paddlenlp_available
25
-
26
-
27
- @dataclass
28
- class SafetyConfig(object):
29
- WEAK = {
30
- "sld_warmup_steps": 15,
31
- "sld_guidance_scale": 20,
32
- "sld_threshold": 0.0,
33
- "sld_momentum_scale": 0.0,
34
- "sld_mom_beta": 0.0,
35
- }
36
- MEDIUM = {
37
- "sld_warmup_steps": 10,
38
- "sld_guidance_scale": 1000,
39
- "sld_threshold": 0.01,
40
- "sld_momentum_scale": 0.3,
41
- "sld_mom_beta": 0.4,
42
- }
43
- STRONG = {
44
- "sld_warmup_steps": 7,
45
- "sld_guidance_scale": 2000,
46
- "sld_threshold": 0.025,
47
- "sld_momentum_scale": 0.5,
48
- "sld_mom_beta": 0.7,
49
- }
50
- MAX = {
51
- "sld_warmup_steps": 0,
52
- "sld_guidance_scale": 5000,
53
- "sld_threshold": 1.0,
54
- "sld_momentum_scale": 0.5,
55
- "sld_mom_beta": 0.7,
56
- }
57
-
58
-
59
- @dataclass
60
- class StableDiffusionSafePipelineOutput(BaseOutput):
61
- """
62
- Output class for Safe Stable Diffusion pipelines.
63
- Args:
64
- images (`List[PIL.Image.Image]` or `np.ndarray`)
65
- List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width,
66
- num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline.
67
- nsfw_content_detected (`List[bool]`)
68
- List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work"
69
- (nsfw) content, or `None` if safety checking could not be performed.
70
- images (`List[PIL.Image.Image]` or `np.ndarray`)
71
- List of denoised PIL images that were flagged by the safety checker any may contain "not-safe-for-work"
72
- (nsfw) content, or `None` if no safety check was performed or no images were flagged.
73
- applied_safety_concept (`str`)
74
- The safety concept that was applied for safety guidance, or `None` if safety guidance was disabled
75
- """
76
-
77
- images: Union[List[PIL.Image.Image], np.ndarray]
78
- nsfw_content_detected: Optional[List[bool]]
79
- unsafe_images: Optional[Union[List[PIL.Image.Image], np.ndarray]]
80
- applied_safety_concept: Optional[str]
81
-
82
-
83
- if is_paddle_available() and is_paddlenlp_available():
84
- from .pipeline_stable_diffusion_safe import StableDiffusionPipelineSafe
85
- from .safety_checker import SafeStableDiffusionSafetyChecker
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/model/vgg.py DELETED
@@ -1,60 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torchvision
4
-
5
- # VGG architecter, used for the perceptual loss using a pretrained VGG network
6
- class VGG19(torch.nn.Module):
7
- def __init__(self, requires_grad=False):
8
- super().__init__()
9
- vgg_pretrained_features = torchvision.models.vgg19(pretrained=True).features
10
- self.slice1 = torch.nn.Sequential()
11
- self.slice2 = torch.nn.Sequential()
12
- self.slice3 = torch.nn.Sequential()
13
- self.slice4 = torch.nn.Sequential()
14
- self.slice5 = torch.nn.Sequential()
15
- self.slice6 = torch.nn.Sequential()
16
- for x in range(2):
17
- self.slice1.add_module(str(x), vgg_pretrained_features[x])
18
- for x in range(2, 7):
19
- self.slice2.add_module(str(x), vgg_pretrained_features[x])
20
- for x in range(7, 12):
21
- self.slice3.add_module(str(x), vgg_pretrained_features[x])
22
- for x in range(12, 21):
23
- self.slice4.add_module(str(x), vgg_pretrained_features[x])
24
- for x in range(21, 32):
25
- self.slice5.add_module(str(x), vgg_pretrained_features[x])
26
- for x in range(32, 36):
27
- self.slice6.add_module(str(x), vgg_pretrained_features[x])
28
- if not requires_grad:
29
- for param in self.parameters():
30
- param.requires_grad = False
31
-
32
- self.pool = nn.AdaptiveAvgPool2d(output_size=1)
33
-
34
- self.mean = torch.tensor([0.485, 0.456, 0.406]).view(1,-1, 1, 1).cuda() * 2 - 1
35
- self.std = torch.tensor([0.229, 0.224, 0.225]).view(1,-1, 1, 1).cuda() * 2
36
-
37
- def forward(self, X): # relui_1
38
- X = (X-self.mean)/self.std
39
- h_relu1 = self.slice1(X)
40
- h_relu2 = self.slice2(h_relu1)
41
- h_relu3 = self.slice3(h_relu2)
42
- h_relu4 = self.slice4(h_relu3)
43
- h_relu5 = self.slice5[:-2](h_relu4)
44
- out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
45
- return out
46
-
47
- # Perceptual loss that uses a pretrained VGG network
48
- class VGGLoss(nn.Module):
49
- def __init__(self):
50
- super(VGGLoss, self).__init__()
51
- self.vgg = VGG19().cuda()
52
- self.criterion = nn.L1Loss()
53
- self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]
54
-
55
- def forward(self, x, y):
56
- x_vgg, y_vgg = self.vgg(x), self.vgg(y)
57
- loss = 0
58
- for i in range(len(x_vgg)):
59
- loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
60
- return loss
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/infer/lib/rmvpe.py DELETED
@@ -1,717 +0,0 @@
1
- import pdb, os
2
-
3
- import numpy as np
4
- import torch
5
- try:
6
- #Fix "Torch not compiled with CUDA enabled"
7
- import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
8
- if torch.xpu.is_available():
9
- from infer.modules.ipex import ipex_init
10
- ipex_init()
11
- except Exception:
12
- pass
13
- import torch.nn as nn
14
- import torch.nn.functional as F
15
- from librosa.util import normalize, pad_center, tiny
16
- from scipy.signal import get_window
17
-
18
- import logging
19
-
20
- logger = logging.getLogger(__name__)
21
-
22
-
23
- ###stft codes from https://github.com/pseeth/torch-stft/blob/master/torch_stft/util.py
24
- def window_sumsquare(
25
- window,
26
- n_frames,
27
- hop_length=200,
28
- win_length=800,
29
- n_fft=800,
30
- dtype=np.float32,
31
- norm=None,
32
- ):
33
- """
34
- # from librosa 0.6
35
- Compute the sum-square envelope of a window function at a given hop length.
36
- This is used to estimate modulation effects induced by windowing
37
- observations in short-time fourier transforms.
38
- Parameters
39
- ----------
40
- window : string, tuple, number, callable, or list-like
41
- Window specification, as in `get_window`
42
- n_frames : int > 0
43
- The number of analysis frames
44
- hop_length : int > 0
45
- The number of samples to advance between frames
46
- win_length : [optional]
47
- The length of the window function. By default, this matches `n_fft`.
48
- n_fft : int > 0
49
- The length of each analysis frame.
50
- dtype : np.dtype
51
- The data type of the output
52
- Returns
53
- -------
54
- wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
55
- The sum-squared envelope of the window function
56
- """
57
- if win_length is None:
58
- win_length = n_fft
59
-
60
- n = n_fft + hop_length * (n_frames - 1)
61
- x = np.zeros(n, dtype=dtype)
62
-
63
- # Compute the squared window at the desired length
64
- win_sq = get_window(window, win_length, fftbins=True)
65
- win_sq = normalize(win_sq, norm=norm) ** 2
66
- win_sq = pad_center(win_sq, n_fft)
67
-
68
- # Fill the envelope
69
- for i in range(n_frames):
70
- sample = i * hop_length
71
- x[sample : min(n, sample + n_fft)] += win_sq[: max(0, min(n_fft, n - sample))]
72
- return x
73
-
74
-
75
- class STFT(torch.nn.Module):
76
- def __init__(
77
- self, filter_length=1024, hop_length=512, win_length=None, window="hann"
78
- ):
79
- """
80
- This module implements an STFT using 1D convolution and 1D transpose convolutions.
81
- This is a bit tricky so there are some cases that probably won't work as working
82
- out the same sizes before and after in all overlap add setups is tough. Right now,
83
- this code should work with hop lengths that are half the filter length (50% overlap
84
- between frames).
85
-
86
- Keyword Arguments:
87
- filter_length {int} -- Length of filters used (default: {1024})
88
- hop_length {int} -- Hop length of STFT (restrict to 50% overlap between frames) (default: {512})
89
- win_length {[type]} -- Length of the window function applied to each frame (if not specified, it
90
- equals the filter length). (default: {None})
91
- window {str} -- Type of window to use (options are bartlett, hann, hamming, blackman, blackmanharris)
92
- (default: {'hann'})
93
- """
94
- super(STFT, self).__init__()
95
- self.filter_length = filter_length
96
- self.hop_length = hop_length
97
- self.win_length = win_length if win_length else filter_length
98
- self.window = window
99
- self.forward_transform = None
100
- self.pad_amount = int(self.filter_length / 2)
101
- scale = self.filter_length / self.hop_length
102
- fourier_basis = np.fft.fft(np.eye(self.filter_length))
103
-
104
- cutoff = int((self.filter_length / 2 + 1))
105
- fourier_basis = np.vstack(
106
- [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])]
107
- )
108
- forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
109
- inverse_basis = torch.FloatTensor(
110
- np.linalg.pinv(scale * fourier_basis).T[:, None, :]
111
- )
112
-
113
- assert filter_length >= self.win_length
114
- # get window and zero center pad it to filter_length
115
- fft_window = get_window(window, self.win_length, fftbins=True)
116
- fft_window = pad_center(fft_window, size=filter_length)
117
- fft_window = torch.from_numpy(fft_window).float()
118
-
119
- # window the bases
120
- forward_basis *= fft_window
121
- inverse_basis *= fft_window
122
-
123
- self.register_buffer("forward_basis", forward_basis.float())
124
- self.register_buffer("inverse_basis", inverse_basis.float())
125
-
126
- def transform(self, input_data):
127
- """Take input data (audio) to STFT domain.
128
-
129
- Arguments:
130
- input_data {tensor} -- Tensor of floats, with shape (num_batch, num_samples)
131
-
132
- Returns:
133
- magnitude {tensor} -- Magnitude of STFT with shape (num_batch,
134
- num_frequencies, num_frames)
135
- phase {tensor} -- Phase of STFT with shape (num_batch,
136
- num_frequencies, num_frames)
137
- """
138
- num_batches = input_data.shape[0]
139
- num_samples = input_data.shape[-1]
140
-
141
- self.num_samples = num_samples
142
-
143
- # similar to librosa, reflect-pad the input
144
- input_data = input_data.view(num_batches, 1, num_samples)
145
- # print(1234,input_data.shape)
146
- input_data = F.pad(
147
- input_data.unsqueeze(1),
148
- (self.pad_amount, self.pad_amount, 0, 0, 0, 0),
149
- mode="reflect",
150
- ).squeeze(1)
151
- # print(2333,input_data.shape,self.forward_basis.shape,self.hop_length)
152
- # pdb.set_trace()
153
- forward_transform = F.conv1d(
154
- input_data, self.forward_basis, stride=self.hop_length, padding=0
155
- )
156
-
157
- cutoff = int((self.filter_length / 2) + 1)
158
- real_part = forward_transform[:, :cutoff, :]
159
- imag_part = forward_transform[:, cutoff:, :]
160
-
161
- magnitude = torch.sqrt(real_part**2 + imag_part**2)
162
- # phase = torch.atan2(imag_part.data, real_part.data)
163
-
164
- return magnitude # , phase
165
-
166
- def inverse(self, magnitude, phase):
167
- """Call the inverse STFT (iSTFT), given magnitude and phase tensors produced
168
- by the ```transform``` function.
169
-
170
- Arguments:
171
- magnitude {tensor} -- Magnitude of STFT with shape (num_batch,
172
- num_frequencies, num_frames)
173
- phase {tensor} -- Phase of STFT with shape (num_batch,
174
- num_frequencies, num_frames)
175
-
176
- Returns:
177
- inverse_transform {tensor} -- Reconstructed audio given magnitude and phase. Of
178
- shape (num_batch, num_samples)
179
- """
180
- recombine_magnitude_phase = torch.cat(
181
- [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1
182
- )
183
-
184
- inverse_transform = F.conv_transpose1d(
185
- recombine_magnitude_phase,
186
- self.inverse_basis,
187
- stride=self.hop_length,
188
- padding=0,
189
- )
190
-
191
- if self.window is not None:
192
- window_sum = window_sumsquare(
193
- self.window,
194
- magnitude.size(-1),
195
- hop_length=self.hop_length,
196
- win_length=self.win_length,
197
- n_fft=self.filter_length,
198
- dtype=np.float32,
199
- )
200
- # remove modulation effects
201
- approx_nonzero_indices = torch.from_numpy(
202
- np.where(window_sum > tiny(window_sum))[0]
203
- )
204
- window_sum = torch.from_numpy(window_sum).to(inverse_transform.device)
205
- inverse_transform[:, :, approx_nonzero_indices] /= window_sum[
206
- approx_nonzero_indices
207
- ]
208
-
209
- # scale by hop ratio
210
- inverse_transform *= float(self.filter_length) / self.hop_length
211
-
212
- inverse_transform = inverse_transform[..., self.pad_amount :]
213
- inverse_transform = inverse_transform[..., : self.num_samples]
214
- inverse_transform = inverse_transform.squeeze(1)
215
-
216
- return inverse_transform
217
-
218
- def forward(self, input_data):
219
- """Take input data (audio) to STFT domain and then back to audio.
220
-
221
- Arguments:
222
- input_data {tensor} -- Tensor of floats, with shape (num_batch, num_samples)
223
-
224
- Returns:
225
- reconstruction {tensor} -- Reconstructed audio given magnitude and phase. Of
226
- shape (num_batch, num_samples)
227
- """
228
- self.magnitude, self.phase = self.transform(input_data)
229
- reconstruction = self.inverse(self.magnitude, self.phase)
230
- return reconstruction
231
-
232
-
233
- from time import time as ttime
234
-
235
-
236
- class BiGRU(nn.Module):
237
- def __init__(self, input_features, hidden_features, num_layers):
238
- super(BiGRU, self).__init__()
239
- self.gru = nn.GRU(
240
- input_features,
241
- hidden_features,
242
- num_layers=num_layers,
243
- batch_first=True,
244
- bidirectional=True,
245
- )
246
-
247
- def forward(self, x):
248
- return self.gru(x)[0]
249
-
250
-
251
- class ConvBlockRes(nn.Module):
252
- def __init__(self, in_channels, out_channels, momentum=0.01):
253
- super(ConvBlockRes, self).__init__()
254
- self.conv = nn.Sequential(
255
- nn.Conv2d(
256
- in_channels=in_channels,
257
- out_channels=out_channels,
258
- kernel_size=(3, 3),
259
- stride=(1, 1),
260
- padding=(1, 1),
261
- bias=False,
262
- ),
263
- nn.BatchNorm2d(out_channels, momentum=momentum),
264
- nn.ReLU(),
265
- nn.Conv2d(
266
- in_channels=out_channels,
267
- out_channels=out_channels,
268
- kernel_size=(3, 3),
269
- stride=(1, 1),
270
- padding=(1, 1),
271
- bias=False,
272
- ),
273
- nn.BatchNorm2d(out_channels, momentum=momentum),
274
- nn.ReLU(),
275
- )
276
- if in_channels != out_channels:
277
- self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1))
278
- self.is_shortcut = True
279
- else:
280
- self.is_shortcut = False
281
-
282
- def forward(self, x):
283
- if self.is_shortcut:
284
- return self.conv(x) + self.shortcut(x)
285
- else:
286
- return self.conv(x) + x
287
-
288
-
289
- class Encoder(nn.Module):
290
- def __init__(
291
- self,
292
- in_channels,
293
- in_size,
294
- n_encoders,
295
- kernel_size,
296
- n_blocks,
297
- out_channels=16,
298
- momentum=0.01,
299
- ):
300
- super(Encoder, self).__init__()
301
- self.n_encoders = n_encoders
302
- self.bn = nn.BatchNorm2d(in_channels, momentum=momentum)
303
- self.layers = nn.ModuleList()
304
- self.latent_channels = []
305
- for i in range(self.n_encoders):
306
- self.layers.append(
307
- ResEncoderBlock(
308
- in_channels, out_channels, kernel_size, n_blocks, momentum=momentum
309
- )
310
- )
311
- self.latent_channels.append([out_channels, in_size])
312
- in_channels = out_channels
313
- out_channels *= 2
314
- in_size //= 2
315
- self.out_size = in_size
316
- self.out_channel = out_channels
317
-
318
- def forward(self, x):
319
- concat_tensors = []
320
- x = self.bn(x)
321
- for i in range(self.n_encoders):
322
- _, x = self.layers[i](x)
323
- concat_tensors.append(_)
324
- return x, concat_tensors
325
-
326
-
327
- class ResEncoderBlock(nn.Module):
328
- def __init__(
329
- self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01
330
- ):
331
- super(ResEncoderBlock, self).__init__()
332
- self.n_blocks = n_blocks
333
- self.conv = nn.ModuleList()
334
- self.conv.append(ConvBlockRes(in_channels, out_channels, momentum))
335
- for i in range(n_blocks - 1):
336
- self.conv.append(ConvBlockRes(out_channels, out_channels, momentum))
337
- self.kernel_size = kernel_size
338
- if self.kernel_size is not None:
339
- self.pool = nn.AvgPool2d(kernel_size=kernel_size)
340
-
341
- def forward(self, x):
342
- for i in range(self.n_blocks):
343
- x = self.conv[i](x)
344
- if self.kernel_size is not None:
345
- return x, self.pool(x)
346
- else:
347
- return x
348
-
349
-
350
- class Intermediate(nn.Module): #
351
- def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01):
352
- super(Intermediate, self).__init__()
353
- self.n_inters = n_inters
354
- self.layers = nn.ModuleList()
355
- self.layers.append(
356
- ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum)
357
- )
358
- for i in range(self.n_inters - 1):
359
- self.layers.append(
360
- ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum)
361
- )
362
-
363
- def forward(self, x):
364
- for i in range(self.n_inters):
365
- x = self.layers[i](x)
366
- return x
367
-
368
-
369
- class ResDecoderBlock(nn.Module):
370
- def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01):
371
- super(ResDecoderBlock, self).__init__()
372
- out_padding = (0, 1) if stride == (1, 2) else (1, 1)
373
- self.n_blocks = n_blocks
374
- self.conv1 = nn.Sequential(
375
- nn.ConvTranspose2d(
376
- in_channels=in_channels,
377
- out_channels=out_channels,
378
- kernel_size=(3, 3),
379
- stride=stride,
380
- padding=(1, 1),
381
- output_padding=out_padding,
382
- bias=False,
383
- ),
384
- nn.BatchNorm2d(out_channels, momentum=momentum),
385
- nn.ReLU(),
386
- )
387
- self.conv2 = nn.ModuleList()
388
- self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum))
389
- for i in range(n_blocks - 1):
390
- self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum))
391
-
392
- def forward(self, x, concat_tensor):
393
- x = self.conv1(x)
394
- x = torch.cat((x, concat_tensor), dim=1)
395
- for i in range(self.n_blocks):
396
- x = self.conv2[i](x)
397
- return x
398
-
399
-
400
- class Decoder(nn.Module):
401
- def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01):
402
- super(Decoder, self).__init__()
403
- self.layers = nn.ModuleList()
404
- self.n_decoders = n_decoders
405
- for i in range(self.n_decoders):
406
- out_channels = in_channels // 2
407
- self.layers.append(
408
- ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum)
409
- )
410
- in_channels = out_channels
411
-
412
- def forward(self, x, concat_tensors):
413
- for i in range(self.n_decoders):
414
- x = self.layers[i](x, concat_tensors[-1 - i])
415
- return x
416
-
417
-
418
- class DeepUnet(nn.Module):
419
- def __init__(
420
- self,
421
- kernel_size,
422
- n_blocks,
423
- en_de_layers=5,
424
- inter_layers=4,
425
- in_channels=1,
426
- en_out_channels=16,
427
- ):
428
- super(DeepUnet, self).__init__()
429
- self.encoder = Encoder(
430
- in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels
431
- )
432
- self.intermediate = Intermediate(
433
- self.encoder.out_channel // 2,
434
- self.encoder.out_channel,
435
- inter_layers,
436
- n_blocks,
437
- )
438
- self.decoder = Decoder(
439
- self.encoder.out_channel, en_de_layers, kernel_size, n_blocks
440
- )
441
-
442
- def forward(self, x):
443
- x, concat_tensors = self.encoder(x)
444
- x = self.intermediate(x)
445
- x = self.decoder(x, concat_tensors)
446
- return x
447
-
448
-
449
- class E2E(nn.Module):
450
- def __init__(
451
- self,
452
- n_blocks,
453
- n_gru,
454
- kernel_size,
455
- en_de_layers=5,
456
- inter_layers=4,
457
- in_channels=1,
458
- en_out_channels=16,
459
- ):
460
- super(E2E, self).__init__()
461
- self.unet = DeepUnet(
462
- kernel_size,
463
- n_blocks,
464
- en_de_layers,
465
- inter_layers,
466
- in_channels,
467
- en_out_channels,
468
- )
469
- self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1))
470
- if n_gru:
471
- self.fc = nn.Sequential(
472
- BiGRU(3 * 128, 256, n_gru),
473
- nn.Linear(512, 360),
474
- nn.Dropout(0.25),
475
- nn.Sigmoid(),
476
- )
477
- else:
478
- self.fc = nn.Sequential(
479
- nn.Linear(3 * nn.N_MELS, nn.N_CLASS), nn.Dropout(0.25), nn.Sigmoid()
480
- )
481
-
482
- def forward(self, mel):
483
- # print(mel.shape)
484
- mel = mel.transpose(-1, -2).unsqueeze(1)
485
- x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2)
486
- x = self.fc(x)
487
- # print(x.shape)
488
- return x
489
-
490
-
491
- from librosa.filters import mel
492
-
493
-
494
- class MelSpectrogram(torch.nn.Module):
495
- def __init__(
496
- self,
497
- is_half,
498
- n_mel_channels,
499
- sampling_rate,
500
- win_length,
501
- hop_length,
502
- n_fft=None,
503
- mel_fmin=0,
504
- mel_fmax=None,
505
- clamp=1e-5,
506
- ):
507
- super().__init__()
508
- n_fft = win_length if n_fft is None else n_fft
509
- self.hann_window = {}
510
- mel_basis = mel(
511
- sr=sampling_rate,
512
- n_fft=n_fft,
513
- n_mels=n_mel_channels,
514
- fmin=mel_fmin,
515
- fmax=mel_fmax,
516
- htk=True,
517
- )
518
- mel_basis = torch.from_numpy(mel_basis).float()
519
- self.register_buffer("mel_basis", mel_basis)
520
- self.n_fft = win_length if n_fft is None else n_fft
521
- self.hop_length = hop_length
522
- self.win_length = win_length
523
- self.sampling_rate = sampling_rate
524
- self.n_mel_channels = n_mel_channels
525
- self.clamp = clamp
526
- self.is_half = is_half
527
-
528
- def forward(self, audio, keyshift=0, speed=1, center=True):
529
- factor = 2 ** (keyshift / 12)
530
- n_fft_new = int(np.round(self.n_fft * factor))
531
- win_length_new = int(np.round(self.win_length * factor))
532
- hop_length_new = int(np.round(self.hop_length * speed))
533
- keyshift_key = str(keyshift) + "_" + str(audio.device)
534
- if keyshift_key not in self.hann_window:
535
- self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to(
536
- # "cpu"if(audio.device.type=="privateuseone") else audio.device
537
- audio.device
538
- )
539
- # fft = torch.stft(#doesn't support pytorch_dml
540
- # # audio.cpu() if(audio.device.type=="privateuseone")else audio,
541
- # audio,
542
- # n_fft=n_fft_new,
543
- # hop_length=hop_length_new,
544
- # win_length=win_length_new,
545
- # window=self.hann_window[keyshift_key],
546
- # center=center,
547
- # return_complex=True,
548
- # )
549
- # magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2))
550
- # print(1111111111)
551
- # print(222222222222222,audio.device,self.is_half)
552
- if hasattr(self, "stft") == False:
553
- # print(n_fft_new,hop_length_new,win_length_new,audio.shape)
554
- self.stft = STFT(
555
- filter_length=n_fft_new,
556
- hop_length=hop_length_new,
557
- win_length=win_length_new,
558
- window="hann",
559
- ).to(audio.device)
560
- magnitude = self.stft.transform(audio) # phase
561
- # if (audio.device.type == "privateuseone"):
562
- # magnitude=magnitude.to(audio.device)
563
- if keyshift != 0:
564
- size = self.n_fft // 2 + 1
565
- resize = magnitude.size(1)
566
- if resize < size:
567
- magnitude = F.pad(magnitude, (0, 0, 0, size - resize))
568
- magnitude = magnitude[:, :size, :] * self.win_length / win_length_new
569
- mel_output = torch.matmul(self.mel_basis, magnitude)
570
- if self.is_half == True:
571
- mel_output = mel_output.half()
572
- log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp))
573
- # print(log_mel_spec.device.type)
574
- return log_mel_spec
575
-
576
-
577
- class RMVPE:
578
- def __init__(self, model_path, is_half, device=None):
579
- self.resample_kernel = {}
580
- self.resample_kernel = {}
581
- self.is_half = is_half
582
- if device is None:
583
- device = "cuda" if torch.cuda.is_available() else "cpu"
584
- self.device = device
585
- self.mel_extractor = MelSpectrogram(
586
- is_half, 128, 16000, 1024, 160, None, 30, 8000
587
- ).to(device)
588
- if "privateuseone" in str(device):
589
- import onnxruntime as ort
590
-
591
- ort_session = ort.InferenceSession(
592
- "%s/rmvpe.onnx" % os.environ["rmvpe_root"],
593
- providers=["DmlExecutionProvider"],
594
- )
595
- self.model = ort_session
596
- else:
597
- model = E2E(4, 1, (2, 2))
598
- ckpt = torch.load(model_path, map_location="cpu")
599
- model.load_state_dict(ckpt)
600
- model.eval()
601
- if is_half == True:
602
- model = model.half()
603
- self.model = model
604
- self.model = self.model.to(device)
605
- cents_mapping = 20 * np.arange(360) + 1997.3794084376191
606
- self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368
607
-
608
- def mel2hidden(self, mel):
609
- with torch.no_grad():
610
- n_frames = mel.shape[-1]
611
- mel = F.pad(
612
- mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="constant"
613
- )
614
- if "privateuseone" in str(self.device):
615
- onnx_input_name = self.model.get_inputs()[0].name
616
- onnx_outputs_names = self.model.get_outputs()[0].name
617
- hidden = self.model.run(
618
- [onnx_outputs_names],
619
- input_feed={onnx_input_name: mel.cpu().numpy()},
620
- )[0]
621
- else:
622
- hidden = self.model(mel)
623
- return hidden[:, :n_frames]
624
-
625
- def decode(self, hidden, thred=0.03):
626
- cents_pred = self.to_local_average_cents(hidden, thred=thred)
627
- f0 = 10 * (2 ** (cents_pred / 1200))
628
- f0[f0 == 10] = 0
629
- # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred])
630
- return f0
631
-
632
- def infer_from_audio(self, audio, thred=0.03):
633
- # torch.cuda.synchronize()
634
- t0 = ttime()
635
- mel = self.mel_extractor(
636
- torch.from_numpy(audio).float().to(self.device).unsqueeze(0), center=True
637
- )
638
- # print(123123123,mel.device.type)
639
- # torch.cuda.synchronize()
640
- t1 = ttime()
641
- hidden = self.mel2hidden(mel)
642
- # torch.cuda.synchronize()
643
- t2 = ttime()
644
- # print(234234,hidden.device.type)
645
- if "privateuseone" not in str(self.device):
646
- hidden = hidden.squeeze(0).cpu().numpy()
647
- else:
648
- hidden = hidden[0]
649
- if self.is_half == True:
650
- hidden = hidden.astype("float32")
651
-
652
- f0 = self.decode(hidden, thred=thred)
653
- # torch.cuda.synchronize()
654
- t3 = ttime()
655
- # print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0))
656
- return f0
657
-
658
- def infer_from_audio_with_pitch(self, audio, thred=0.03, f0_min=50, f0_max=1100):
659
- audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0)
660
- mel = self.mel_extractor(audio, center=True)
661
- hidden = self.mel2hidden(mel)
662
- hidden = hidden.squeeze(0).cpu().numpy()
663
- if self.is_half == True:
664
- hidden = hidden.astype("float32")
665
- f0 = self.decode(hidden, thred=thred)
666
- f0[(f0 < f0_min) | (f0 > f0_max)] = 0
667
- return f0
668
-
669
- def to_local_average_cents(self, salience, thred=0.05):
670
- # t0 = ttime()
671
- center = np.argmax(salience, axis=1) # 帧长#index
672
- salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368
673
- # t1 = ttime()
674
- center += 4
675
- todo_salience = []
676
- todo_cents_mapping = []
677
- starts = center - 4
678
- ends = center + 5
679
- for idx in range(salience.shape[0]):
680
- todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])
681
- todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])
682
- # t2 = ttime()
683
- todo_salience = np.array(todo_salience) # 帧长,9
684
- todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9
685
- product_sum = np.sum(todo_salience * todo_cents_mapping, 1)
686
- weight_sum = np.sum(todo_salience, 1) # 帧长
687
- devided = product_sum / weight_sum # 帧长
688
- # t3 = ttime()
689
- maxx = np.max(salience, axis=1) # 帧长
690
- devided[maxx <= thred] = 0
691
- # t4 = ttime()
692
- # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
693
- return devided
694
-
695
-
696
- if __name__ == "__main__":
697
- import librosa
698
- import soundfile as sf
699
-
700
- audio, sampling_rate = sf.read(r"C:\Users\liujing04\Desktop\Z\冬之花clip1.wav")
701
- if len(audio.shape) > 1:
702
- audio = librosa.to_mono(audio.transpose(1, 0))
703
- audio_bak = audio.copy()
704
- if sampling_rate != 16000:
705
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
706
- model_path = r"D:\BaiduNetdiskDownload\RVC-beta-v2-0727AMD_realtime\rmvpe.pt"
707
- thred = 0.03 # 0.01
708
- device = "cuda" if torch.cuda.is_available() else "cpu"
709
- rmvpe = RMVPE(model_path, is_half=False, device=device)
710
- t0 = ttime()
711
- f0 = rmvpe.infer_from_audio(audio, thred=thred)
712
- # f0 = rmvpe.infer_from_audio(audio, thred=thred)
713
- # f0 = rmvpe.infer_from_audio(audio, thred=thred)
714
- # f0 = rmvpe.infer_from_audio(audio, thred=thred)
715
- # f0 = rmvpe.infer_from_audio(audio, thred=thred)
716
- t1 = ttime()
717
- logger.info("%s %.2f", f0.shape, t1 - t0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A666sxr/Genshin_TTS/data_utils.py DELETED
@@ -1,392 +0,0 @@
1
- import time
2
- import os
3
- import random
4
- import numpy as np
5
- import torch
6
- import torch.utils.data
7
-
8
- import commons
9
- from mel_processing import spectrogram_torch
10
- from utils import load_wav_to_torch, load_filepaths_and_text
11
- from text import text_to_sequence, cleaned_text_to_sequence
12
-
13
-
14
- class TextAudioLoader(torch.utils.data.Dataset):
15
- """
16
- 1) loads audio, text pairs
17
- 2) normalizes text and converts them to sequences of integers
18
- 3) computes spectrograms from audio files.
19
- """
20
- def __init__(self, audiopaths_and_text, hparams):
21
- self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
22
- self.text_cleaners = hparams.text_cleaners
23
- self.max_wav_value = hparams.max_wav_value
24
- self.sampling_rate = hparams.sampling_rate
25
- self.filter_length = hparams.filter_length
26
- self.hop_length = hparams.hop_length
27
- self.win_length = hparams.win_length
28
- self.sampling_rate = hparams.sampling_rate
29
-
30
- self.cleaned_text = getattr(hparams, "cleaned_text", False)
31
-
32
- self.add_blank = hparams.add_blank
33
- self.min_text_len = getattr(hparams, "min_text_len", 1)
34
- self.max_text_len = getattr(hparams, "max_text_len", 190)
35
-
36
- random.seed(1234)
37
- random.shuffle(self.audiopaths_and_text)
38
- self._filter()
39
-
40
-
41
- def _filter(self):
42
- """
43
- Filter text & store spec lengths
44
- """
45
- # Store spectrogram lengths for Bucketing
46
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
47
- # spec_length = wav_length // hop_length
48
-
49
- audiopaths_and_text_new = []
50
- lengths = []
51
- for audiopath, text in self.audiopaths_and_text:
52
- if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
53
- audiopaths_and_text_new.append([audiopath, text])
54
- lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
55
- self.audiopaths_and_text = audiopaths_and_text_new
56
- self.lengths = lengths
57
-
58
- def get_audio_text_pair(self, audiopath_and_text):
59
- # separate filename and text
60
- audiopath, text = audiopath_and_text[0], audiopath_and_text[1]
61
- text = self.get_text(text)
62
- spec, wav = self.get_audio(audiopath)
63
- return (text, spec, wav)
64
-
65
- def get_audio(self, filename):
66
- audio, sampling_rate = load_wav_to_torch(filename)
67
- if sampling_rate != self.sampling_rate:
68
- raise ValueError("{} {} SR doesn't match target {} SR".format(
69
- sampling_rate, self.sampling_rate))
70
- audio_norm = audio / self.max_wav_value
71
- audio_norm = audio_norm.unsqueeze(0)
72
- spec_filename = filename.replace(".wav", ".spec.pt")
73
- if os.path.exists(spec_filename):
74
- spec = torch.load(spec_filename)
75
- else:
76
- spec = spectrogram_torch(audio_norm, self.filter_length,
77
- self.sampling_rate, self.hop_length, self.win_length,
78
- center=False)
79
- spec = torch.squeeze(spec, 0)
80
- torch.save(spec, spec_filename)
81
- return spec, audio_norm
82
-
83
- def get_text(self, text):
84
- if self.cleaned_text:
85
- text_norm = cleaned_text_to_sequence(text)
86
- else:
87
- text_norm = text_to_sequence(text, self.text_cleaners)
88
- if self.add_blank:
89
- text_norm = commons.intersperse(text_norm, 0)
90
- text_norm = torch.LongTensor(text_norm)
91
- return text_norm
92
-
93
- def __getitem__(self, index):
94
- return self.get_audio_text_pair(self.audiopaths_and_text[index])
95
-
96
- def __len__(self):
97
- return len(self.audiopaths_and_text)
98
-
99
-
100
- class TextAudioCollate():
101
- """ Zero-pads model inputs and targets
102
- """
103
- def __init__(self, return_ids=False):
104
- self.return_ids = return_ids
105
-
106
- def __call__(self, batch):
107
- """Collate's training batch from normalized text and aduio
108
- PARAMS
109
- ------
110
- batch: [text_normalized, spec_normalized, wav_normalized]
111
- """
112
- # Right zero-pad all one-hot text sequences to max input length
113
- _, ids_sorted_decreasing = torch.sort(
114
- torch.LongTensor([x[1].size(1) for x in batch]),
115
- dim=0, descending=True)
116
-
117
- max_text_len = max([len(x[0]) for x in batch])
118
- max_spec_len = max([x[1].size(1) for x in batch])
119
- max_wav_len = max([x[2].size(1) for x in batch])
120
-
121
- text_lengths = torch.LongTensor(len(batch))
122
- spec_lengths = torch.LongTensor(len(batch))
123
- wav_lengths = torch.LongTensor(len(batch))
124
-
125
- text_padded = torch.LongTensor(len(batch), max_text_len)
126
- spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
127
- wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
128
- text_padded.zero_()
129
- spec_padded.zero_()
130
- wav_padded.zero_()
131
- for i in range(len(ids_sorted_decreasing)):
132
- row = batch[ids_sorted_decreasing[i]]
133
-
134
- text = row[0]
135
- text_padded[i, :text.size(0)] = text
136
- text_lengths[i] = text.size(0)
137
-
138
- spec = row[1]
139
- spec_padded[i, :, :spec.size(1)] = spec
140
- spec_lengths[i] = spec.size(1)
141
-
142
- wav = row[2]
143
- wav_padded[i, :, :wav.size(1)] = wav
144
- wav_lengths[i] = wav.size(1)
145
-
146
- if self.return_ids:
147
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing
148
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths
149
-
150
-
151
- """Multi speaker version"""
152
- class TextAudioSpeakerLoader(torch.utils.data.Dataset):
153
- """
154
- 1) loads audio, speaker_id, text pairs
155
- 2) normalizes text and converts them to sequences of integers
156
- 3) computes spectrograms from audio files.
157
- """
158
- def __init__(self, audiopaths_sid_text, hparams):
159
- self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
160
- self.text_cleaners = hparams.text_cleaners
161
- self.max_wav_value = hparams.max_wav_value
162
- self.sampling_rate = hparams.sampling_rate
163
- self.filter_length = hparams.filter_length
164
- self.hop_length = hparams.hop_length
165
- self.win_length = hparams.win_length
166
- self.sampling_rate = hparams.sampling_rate
167
-
168
- self.cleaned_text = getattr(hparams, "cleaned_text", False)
169
-
170
- self.add_blank = hparams.add_blank
171
- self.min_text_len = getattr(hparams, "min_text_len", 1)
172
- self.max_text_len = getattr(hparams, "max_text_len", 190)
173
-
174
- random.seed(1234)
175
- random.shuffle(self.audiopaths_sid_text)
176
- self._filter()
177
-
178
- def _filter(self):
179
- """
180
- Filter text & store spec lengths
181
- """
182
- # Store spectrogram lengths for Bucketing
183
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
184
- # spec_length = wav_length // hop_length
185
-
186
- audiopaths_sid_text_new = []
187
- lengths = []
188
- for audiopath, sid, text in self.audiopaths_sid_text:
189
- if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
190
- audiopaths_sid_text_new.append([audiopath, sid, text])
191
- lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
192
- self.audiopaths_sid_text = audiopaths_sid_text_new
193
- self.lengths = lengths
194
-
195
- def get_audio_text_speaker_pair(self, audiopath_sid_text):
196
- # separate filename, speaker_id and text
197
- audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2]
198
- text = self.get_text(text)
199
- spec, wav = self.get_audio(audiopath)
200
- sid = self.get_sid(sid)
201
- return (text, spec, wav, sid)
202
-
203
- def get_audio(self, filename):
204
- audio, sampling_rate = load_wav_to_torch(filename)
205
- if sampling_rate != self.sampling_rate:
206
- raise ValueError("{} {} SR doesn't match target {} SR".format(
207
- sampling_rate, self.sampling_rate))
208
- audio_norm = audio / self.max_wav_value
209
- audio_norm = audio_norm.unsqueeze(0)
210
- spec_filename = filename.replace(".wav", ".spec.pt")
211
- if os.path.exists(spec_filename):
212
- spec = torch.load(spec_filename)
213
- else:
214
- spec = spectrogram_torch(audio_norm, self.filter_length,
215
- self.sampling_rate, self.hop_length, self.win_length,
216
- center=False)
217
- spec = torch.squeeze(spec, 0)
218
- torch.save(spec, spec_filename)
219
- return spec, audio_norm
220
-
221
- def get_text(self, text):
222
- if self.cleaned_text:
223
- text_norm = cleaned_text_to_sequence(text)
224
- else:
225
- text_norm = text_to_sequence(text, self.text_cleaners)
226
- if self.add_blank:
227
- text_norm = commons.intersperse(text_norm, 0)
228
- text_norm = torch.LongTensor(text_norm)
229
- return text_norm
230
-
231
- def get_sid(self, sid):
232
- sid = torch.LongTensor([int(sid)])
233
- return sid
234
-
235
- def __getitem__(self, index):
236
- return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
237
-
238
- def __len__(self):
239
- return len(self.audiopaths_sid_text)
240
-
241
-
242
- class TextAudioSpeakerCollate():
243
- """ Zero-pads model inputs and targets
244
- """
245
- def __init__(self, return_ids=False):
246
- self.return_ids = return_ids
247
-
248
- def __call__(self, batch):
249
- """Collate's training batch from normalized text, audio and speaker identities
250
- PARAMS
251
- ------
252
- batch: [text_normalized, spec_normalized, wav_normalized, sid]
253
- """
254
- # Right zero-pad all one-hot text sequences to max input length
255
- _, ids_sorted_decreasing = torch.sort(
256
- torch.LongTensor([x[1].size(1) for x in batch]),
257
- dim=0, descending=True)
258
-
259
- max_text_len = max([len(x[0]) for x in batch])
260
- max_spec_len = max([x[1].size(1) for x in batch])
261
- max_wav_len = max([x[2].size(1) for x in batch])
262
-
263
- text_lengths = torch.LongTensor(len(batch))
264
- spec_lengths = torch.LongTensor(len(batch))
265
- wav_lengths = torch.LongTensor(len(batch))
266
- sid = torch.LongTensor(len(batch))
267
-
268
- text_padded = torch.LongTensor(len(batch), max_text_len)
269
- spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
270
- wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
271
- text_padded.zero_()
272
- spec_padded.zero_()
273
- wav_padded.zero_()
274
- for i in range(len(ids_sorted_decreasing)):
275
- row = batch[ids_sorted_decreasing[i]]
276
-
277
- text = row[0]
278
- text_padded[i, :text.size(0)] = text
279
- text_lengths[i] = text.size(0)
280
-
281
- spec = row[1]
282
- spec_padded[i, :, :spec.size(1)] = spec
283
- spec_lengths[i] = spec.size(1)
284
-
285
- wav = row[2]
286
- wav_padded[i, :, :wav.size(1)] = wav
287
- wav_lengths[i] = wav.size(1)
288
-
289
- sid[i] = row[3]
290
-
291
- if self.return_ids:
292
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing
293
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid
294
-
295
-
296
- class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
297
- """
298
- Maintain similar input lengths in a batch.
299
- Length groups are specified by boundaries.
300
- Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
301
-
302
- It removes samples which are not included in the boundaries.
303
- Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
304
- """
305
- def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
306
- super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
307
- self.lengths = dataset.lengths
308
- self.batch_size = batch_size
309
- self.boundaries = boundaries
310
-
311
- self.buckets, self.num_samples_per_bucket = self._create_buckets()
312
- self.total_size = sum(self.num_samples_per_bucket)
313
- self.num_samples = self.total_size // self.num_replicas
314
-
315
- def _create_buckets(self):
316
- buckets = [[] for _ in range(len(self.boundaries) - 1)]
317
- for i in range(len(self.lengths)):
318
- length = self.lengths[i]
319
- idx_bucket = self._bisect(length)
320
- if idx_bucket != -1:
321
- buckets[idx_bucket].append(i)
322
-
323
- for i in range(len(buckets) - 1, 0, -1):
324
- if len(buckets[i]) == 0:
325
- buckets.pop(i)
326
- self.boundaries.pop(i+1)
327
-
328
- num_samples_per_bucket = []
329
- for i in range(len(buckets)):
330
- len_bucket = len(buckets[i])
331
- total_batch_size = self.num_replicas * self.batch_size
332
- rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size
333
- num_samples_per_bucket.append(len_bucket + rem)
334
- return buckets, num_samples_per_bucket
335
-
336
- def __iter__(self):
337
- # deterministically shuffle based on epoch
338
- g = torch.Generator()
339
- g.manual_seed(self.epoch)
340
-
341
- indices = []
342
- if self.shuffle:
343
- for bucket in self.buckets:
344
- indices.append(torch.randperm(len(bucket), generator=g).tolist())
345
- else:
346
- for bucket in self.buckets:
347
- indices.append(list(range(len(bucket))))
348
-
349
- batches = []
350
- for i in range(len(self.buckets)):
351
- bucket = self.buckets[i]
352
- len_bucket = len(bucket)
353
- ids_bucket = indices[i]
354
- num_samples_bucket = self.num_samples_per_bucket[i]
355
-
356
- # add extra samples to make it evenly divisible
357
- rem = num_samples_bucket - len_bucket
358
- ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]
359
-
360
- # subsample
361
- ids_bucket = ids_bucket[self.rank::self.num_replicas]
362
-
363
- # batching
364
- for j in range(len(ids_bucket) // self.batch_size):
365
- batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]]
366
- batches.append(batch)
367
-
368
- if self.shuffle:
369
- batch_ids = torch.randperm(len(batches), generator=g).tolist()
370
- batches = [batches[i] for i in batch_ids]
371
- self.batches = batches
372
-
373
- assert len(self.batches) * self.batch_size == self.num_samples
374
- return iter(self.batches)
375
-
376
- def _bisect(self, x, lo=0, hi=None):
377
- if hi is None:
378
- hi = len(self.boundaries) - 1
379
-
380
- if hi > lo:
381
- mid = (hi + lo) // 2
382
- if self.boundaries[mid] < x and x <= self.boundaries[mid+1]:
383
- return mid
384
- elif x <= self.boundaries[mid]:
385
- return self._bisect(x, lo, mid)
386
- else:
387
- return self._bisect(x, mid + 1, hi)
388
- else:
389
- return -1
390
-
391
- def __len__(self):
392
- return self.num_samples // self.batch_size
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/Engineering Wiki 8da06b3dcf1b4eaaa3e90aa70feefe56.md DELETED
@@ -1 +0,0 @@
1
- # Engineering Wiki
 
 
spaces/AIGC-Audio/AudioGPT/sound_extraction/utils/stft.py DELETED
@@ -1,159 +0,0 @@
1
- import torch
2
- import numpy as np
3
- import torch.nn.functional as F
4
- from torch.autograd import Variable
5
- from scipy.signal import get_window
6
- import librosa.util as librosa_util
7
- from librosa.util import pad_center, tiny
8
- # from audio_processing import window_sumsquare
9
-
10
- def window_sumsquare(window, n_frames, hop_length=512, win_length=1024,
11
- n_fft=1024, dtype=np.float32, norm=None):
12
- """
13
- # from librosa 0.6
14
- Compute the sum-square envelope of a window function at a given hop length.
15
- This is used to estimate modulation effects induced by windowing
16
- observations in short-time fourier transforms.
17
- Parameters
18
- ----------
19
- window : string, tuple, number, callable, or list-like
20
- Window specification, as in `get_window`
21
- n_frames : int > 0
22
- The number of analysis frames
23
- hop_length : int > 0
24
- The number of samples to advance between frames
25
- win_length : [optional]
26
- The length of the window function. By default, this matches `n_fft`.
27
- n_fft : int > 0
28
- The length of each analysis frame.
29
- dtype : np.dtype
30
- The data type of the output
31
- Returns
32
- -------
33
- wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
34
- The sum-squared envelope of the window function
35
- """
36
- if win_length is None:
37
- win_length = n_fft
38
-
39
- n = n_fft + hop_length * (n_frames - 1)
40
- x = np.zeros(n, dtype=dtype)
41
-
42
- # Compute the squared window at the desired length
43
- win_sq = get_window(window, win_length, fftbins=True)
44
- win_sq = librosa_util.normalize(win_sq, norm=norm)**2
45
- win_sq = librosa_util.pad_center(win_sq, n_fft)
46
-
47
- # Fill the envelope
48
- for i in range(n_frames):
49
- sample = i * hop_length
50
- x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
51
- return x
52
-
53
- class STFT(torch.nn.Module):
54
- """adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
55
- def __init__(self, filter_length=1024, hop_length=512, win_length=1024,
56
- window='hann'):
57
- super(STFT, self).__init__()
58
- self.filter_length = filter_length
59
- self.hop_length = hop_length
60
- self.win_length = win_length
61
- self.window = window
62
- self.forward_transform = None
63
- scale = self.filter_length / self.hop_length
64
- fourier_basis = np.fft.fft(np.eye(self.filter_length))
65
-
66
- cutoff = int((self.filter_length / 2 + 1))
67
- fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
68
- np.imag(fourier_basis[:cutoff, :])])
69
-
70
- forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
71
- inverse_basis = torch.FloatTensor(
72
- np.linalg.pinv(scale * fourier_basis).T[:, None, :])
73
-
74
- if window is not None:
75
- assert(filter_length >= win_length)
76
- # get window and zero center pad it to filter_length
77
- fft_window = get_window(window, win_length, fftbins=True)
78
- fft_window = pad_center(fft_window, filter_length)
79
- fft_window = torch.from_numpy(fft_window).float()
80
-
81
- # window the bases
82
- forward_basis *= fft_window
83
- inverse_basis *= fft_window
84
-
85
- self.register_buffer('forward_basis', forward_basis.float())
86
- self.register_buffer('inverse_basis', inverse_basis.float())
87
-
88
- def transform(self, input_data):
89
- num_batches = input_data.size(0)
90
- num_samples = input_data.size(1)
91
-
92
- self.num_samples = num_samples
93
-
94
- # similar to librosa, reflect-pad the input
95
- input_data = input_data.view(num_batches, 1, num_samples)
96
- input_data = F.pad(
97
- input_data.unsqueeze(1),
98
- (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
99
- mode='reflect')
100
- input_data = input_data.squeeze(1)
101
-
102
- forward_transform = F.conv1d(
103
- input_data,
104
- Variable(self.forward_basis, requires_grad=False),
105
- stride=self.hop_length,
106
- padding=0)
107
-
108
- cutoff = int((self.filter_length / 2) + 1)
109
- real_part = forward_transform[:, :cutoff, :]
110
- imag_part = forward_transform[:, cutoff:, :]
111
-
112
- magnitude = torch.sqrt(real_part**2 + imag_part**2)
113
- phase = torch.autograd.Variable(
114
- torch.atan2(imag_part.data, real_part.data))
115
-
116
- return magnitude, phase # [batch_size, F(513), T(1251)]
117
-
118
- def inverse(self, magnitude, phase):
119
- recombine_magnitude_phase = torch.cat(
120
- [magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
121
-
122
- inverse_transform = F.conv_transpose1d(
123
- recombine_magnitude_phase,
124
- Variable(self.inverse_basis, requires_grad=False),
125
- stride=self.hop_length,
126
- padding=0)
127
-
128
- if self.window is not None:
129
- window_sum = window_sumsquare(
130
- self.window, magnitude.size(-1), hop_length=self.hop_length,
131
- win_length=self.win_length, n_fft=self.filter_length,
132
- dtype=np.float32)
133
- # remove modulation effects
134
- approx_nonzero_indices = torch.from_numpy(
135
- np.where(window_sum > tiny(window_sum))[0])
136
- window_sum = torch.autograd.Variable(
137
- torch.from_numpy(window_sum), requires_grad=False)
138
- window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
139
- inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
140
-
141
- # scale by hop ratio
142
- inverse_transform *= float(self.filter_length) / self.hop_length
143
-
144
- inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
145
- inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
146
-
147
- return inverse_transform #[batch_size, 1, sample_num]
148
-
149
- def forward(self, input_data):
150
- self.magnitude, self.phase = self.transform(input_data)
151
- reconstruction = self.inverse(self.magnitude, self.phase)
152
- return reconstruction
153
-
154
- if __name__ == '__main__':
155
- a = torch.randn(4, 320000)
156
- stft = STFT()
157
- mag, phase = stft.transform(a)
158
- # rec_a = stft.inverse(mag, phase)
159
- print(mag.shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/Debate/src/agents/utils.py DELETED
@@ -1,480 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The AIWaves Inc. team.
3
-
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- """helper functions for an LLM autonoumous agent"""
17
- import csv
18
- import random
19
- import json
20
- import pandas
21
- import numpy as np
22
- import requests
23
- import torch
24
- from tqdm import tqdm
25
- from text2vec import semantic_search
26
- import re
27
- import datetime
28
- from langchain.document_loaders import UnstructuredFileLoader
29
- from langchain.text_splitter import CharacterTextSplitter
30
- from sentence_transformers import SentenceTransformer
31
- import string
32
- import random
33
- import os
34
- import openai
35
-
36
- embed_model_name = os.environ["Embed_Model"] if "Embed_Model" in os.environ else "text-embedding-ada-002"
37
- if embed_model_name in ["text-embedding-ada-002"]:
38
- pass
39
- else:
40
- embedding_model = SentenceTransformer(
41
- embed_model_name, device=torch.device("cpu")
42
- )
43
-
44
- def get_embedding(sentence):
45
- if embed_model_name in ["text-embedding-ada-002"]:
46
- openai.api_key = os.environ["API_KEY"]
47
- # if "PROXY" in os.environ:
48
- # assert "http:" in os.environ["PROXY"] or "socks" in os.environ["PROXY"],"PROXY error,PROXY must be http or socks"
49
- # openai.proxy = os.environ["PROXY"]
50
- if "API_BASE" in os.environ:
51
- openai.api_base = os.environ["API_BASE"]
52
- embedding_model = openai.Embedding
53
- embed = embedding_model.create(
54
- model=embed_model_name,
55
- input=sentence
56
- )
57
- embed = embed["data"][0]["embedding"]
58
- embed = torch.tensor(embed,dtype=torch.float32)
59
- else:
60
- embed = embedding_model.encode(sentence,convert_to_tensor=True)
61
- if len(embed.shape)==1:
62
- embed = embed.unsqueeze(0)
63
- return embed
64
-
65
-
66
- def get_code():
67
- return "".join(random.sample(string.ascii_letters + string.digits, 8))
68
-
69
-
70
- def get_content_between_a_b(start_tag, end_tag, text):
71
- """
72
-
73
- Args:
74
- start_tag (str): start_tag
75
- end_tag (str): end_tag
76
- text (str): complete sentence
77
-
78
- Returns:
79
- str: the content between start_tag and end_tag
80
- """
81
- extracted_text = ""
82
- start_index = text.find(start_tag)
83
- while start_index != -1:
84
- end_index = text.find(end_tag, start_index + len(start_tag))
85
- if end_index != -1:
86
- extracted_text += text[start_index +
87
- len(start_tag):end_index] + " "
88
- start_index = text.find(start_tag, end_index + len(end_tag))
89
- else:
90
- break
91
-
92
- return extracted_text.strip()
93
-
94
-
95
- def extract(text, type):
96
- """extract the content between <type></type>
97
-
98
- Args:
99
- text (str): complete sentence
100
- type (str): tag
101
-
102
- Returns:
103
- str: content between <type></type>
104
- """
105
- target_str = get_content_between_a_b(f"<{type}>", f"</{type}>", text)
106
- return target_str
107
-
108
- def count_files_in_directory(directory):
109
- # 获取指定目录下的文件数目
110
- file_count = len([f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))])
111
- return file_count
112
-
113
- def delete_oldest_files(directory, num_to_keep):
114
- # 获取目录下文件列表,并按修改时间排序
115
- files = [(f, os.path.getmtime(os.path.join(directory, f))) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
116
-
117
- # 删除最开始的 num_to_keep 个文件
118
- for i in range(min(num_to_keep, len(files))):
119
- file_to_delete = os.path.join(directory, files[i][0])
120
- os.remove(file_to_delete)
121
-
122
- def delete_files_if_exceed_threshold(directory, threshold, num_to_keep):
123
- # 获取文件数目并进行处理
124
- file_count = count_files_in_directory(directory)
125
- if file_count > threshold:
126
- delete_count = file_count - num_to_keep
127
- delete_oldest_files(directory, delete_count)
128
-
129
- def save_logs(log_path, messages, response):
130
- if not os.path.exists(log_path):
131
- os.mkdir(log_path)
132
- delete_files_if_exceed_threshold(log_path, 20, 10)
133
- log_path = log_path if log_path else "logs"
134
- log = {}
135
- log["input"] = messages
136
- log["output"] = response
137
- os.makedirs(log_path, exist_ok=True)
138
- log_file = os.path.join(
139
- log_path,
140
- datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") + ".json")
141
- with open(log_file, "w", encoding="utf-8") as f:
142
- json.dump(log, f, ensure_ascii=False, indent=2)
143
-
144
-
145
-
146
- def semantic_search_word2vec(query_embedding, kb_embeddings, top_k):
147
- return semantic_search(query_embedding, kb_embeddings, top_k=top_k)
148
-
149
-
150
- def cut_sent(para):
151
- para = re.sub("([。!?\?])([^”’])", r"\1\n\2", para)
152
- para = re.sub("(\.{6})([^”’])", r"\1\n\2", para)
153
- para = re.sub("(\…{2})([^”’])", r"\1\n\2", para)
154
- para = re.sub("([。!?\?][”’])([^,。!?\?])", r"\1\n\2", para)
155
- para = para.rstrip()
156
- pieces = [i for i in para.split("\n") if i]
157
- batch_size = 3
158
- chucks = [
159
- " ".join(pieces[i:i + batch_size])
160
- for i in range(0, len(pieces), batch_size)
161
- ]
162
- return chucks
163
-
164
-
165
- def process_document(file_path):
166
- """
167
- Save QA_csv to json.
168
- Args:
169
- model: LLM to generate embeddings
170
- qa_dict: A dict contains Q&A
171
- save_path: where to save the json file.
172
- Json format:
173
- Dict[num,Dict[q:str,a:str,chunk:str,emb:List[float]]
174
- """
175
- final_dict = {}
176
- count = 0
177
- if file_path.endswith(".csv"):
178
- dataset = pandas.read_csv(file_path)
179
- questions = dataset["question"]
180
- answers = dataset["answer"]
181
- # embedding q+chunk
182
- for q, a in zip(questions, answers):
183
- for text in cut_sent(a):
184
- temp_dict = {}
185
- temp_dict["q"] = q
186
- temp_dict["a"] = a
187
- temp_dict["chunk"] = text
188
- temp_dict["emb"] = get_embedding(q + text).tolist()
189
- final_dict[count] = temp_dict
190
- count += 1
191
- # embedding chunk
192
- for q, a in zip(questions, answers):
193
- for text in cut_sent(a):
194
- temp_dict = {}
195
- temp_dict["q"] = q
196
- temp_dict["a"] = a
197
- temp_dict["chunk"] = text
198
- temp_dict["emb"] = get_embedding(text).tolist()
199
- final_dict[count] = temp_dict
200
- count += 1
201
- # embedding q
202
- for q, a in zip(questions, answers):
203
- temp_dict = {}
204
- temp_dict["q"] = q
205
- temp_dict["a"] = a
206
- temp_dict["chunk"] = a
207
- temp_dict["emb"] = get_embedding(q).tolist()
208
- final_dict[count] = temp_dict
209
- count += 1
210
- # embedding q+a
211
- for q, a in zip(questions, answers):
212
- temp_dict = {}
213
- temp_dict["q"] = q
214
- temp_dict["a"] = a
215
- temp_dict["chunk"] = a
216
- temp_dict["emb"] = get_embedding(q + a).tolist()
217
- final_dict[count] = temp_dict
218
- count += 1
219
- # embedding a
220
- for q, a in zip(questions, answers):
221
- temp_dict = {}
222
- temp_dict["q"] = q
223
- temp_dict["a"] = a
224
- temp_dict["chunk"] = a
225
- temp_dict["emb"] = get_embedding(a).tolist()
226
- final_dict[count] = temp_dict
227
- count += 1
228
- print(f"finish updating {len(final_dict)} data!")
229
- os.makedirs("temp_database", exist_ok=True)
230
- save_path = os.path.join(
231
- "temp_database/",
232
- file_path.split("/")[-1].replace("." + file_path.split(".")[1],
233
- ".json"),
234
- )
235
- print(save_path)
236
- with open(save_path, "w") as f:
237
- json.dump(final_dict, f, ensure_ascii=False, indent=2)
238
- return {"knowledge_base": save_path, "type": "QA"}
239
- else:
240
- loader = UnstructuredFileLoader(file_path)
241
- docs = loader.load()
242
- text_spiltter = CharacterTextSplitter(chunk_size=200,
243
- chunk_overlap=100)
244
- docs = text_spiltter.split_text(docs[0].page_content)
245
- os.makedirs("temp_database", exist_ok=True)
246
- save_path = os.path.join(
247
- "temp_database/",
248
- file_path.replace("." + file_path.split(".")[1], ".json"))
249
- final_dict = {}
250
- count = 0
251
- for c in tqdm(docs):
252
- temp_dict = {}
253
- temp_dict["chunk"] = c
254
- temp_dict["emb"] = get_embedding(c).tolist()
255
- final_dict[count] = temp_dict
256
- count += 1
257
- print(f"finish updating {len(final_dict)} data!")
258
- with open(save_path, "w") as f:
259
- json.dump(final_dict, f, ensure_ascii=False, indent=2)
260
- return {"knowledge_base": save_path, "type": "UnstructuredFile"}
261
-
262
- def load_knowledge_base_qa(path):
263
- """
264
- Load json format knowledge base.
265
- """
266
- print("path", path)
267
- with open(path, "r") as f:
268
- data = json.load(f)
269
- embeddings = []
270
- questions = []
271
- answers = []
272
- chunks = []
273
- for idx in range(len(data.keys())):
274
- embeddings.append(data[str(idx)]["emb"])
275
- questions.append(data[str(idx)]["q"])
276
- answers.append(data[str(idx)]["a"])
277
- chunks.append(data[str(idx)]["chunk"])
278
- embeddings = np.array(embeddings, dtype=np.float32)
279
- embeddings = torch.from_numpy(embeddings).squeeze()
280
- return embeddings, questions, answers, chunks
281
-
282
-
283
- def load_knowledge_base_UnstructuredFile(path):
284
- """
285
- Load json format knowledge base.
286
- """
287
- with open(path, "r") as f:
288
- data = json.load(f)
289
- embeddings = []
290
- chunks = []
291
- for idx in range(len(data.keys())):
292
- embeddings.append(data[str(idx)]["emb"])
293
- chunks.append(data[str(idx)]["chunk"])
294
- embeddings = np.array(embeddings, dtype=np.float32)
295
- embeddings = torch.from_numpy(embeddings).squeeze()
296
- return embeddings, chunks
297
-
298
-
299
- def cos_sim(a: torch.Tensor, b: torch.Tensor):
300
- """
301
- Computes the cosine similarity cos_sim(a[i], b[j]) for all i and j.
302
- :return: Matrix with res[i][j] = cos_sim(a[i], b[j])
303
- """
304
- if not isinstance(a, torch.Tensor):
305
- a = torch.tensor(a)
306
-
307
- if not isinstance(b, torch.Tensor):
308
- b = torch.tensor(b)
309
-
310
- if len(a.shape) == 1:
311
- a = a.unsqueeze(0)
312
-
313
- if len(b.shape) == 1:
314
- b = b.unsqueeze(0)
315
-
316
- a_norm = torch.nn.functional.normalize(a, p=2, dim=1)
317
- b_norm = torch.nn.functional.normalize(b, p=2, dim=1)
318
- return torch.mm(a_norm, b_norm.transpose(0, 1))
319
-
320
-
321
- def matching_a_b(a, b, requirements=None):
322
- a_embedder = get_embedding(a)
323
- # 获取embedder
324
- b_embeder = get_embedding(b)
325
- sim_scores = cos_sim(a_embedder, b_embeder)[0]
326
- return sim_scores
327
-
328
-
329
- def matching_category(inputtext,
330
- forest_name,
331
- requirements=None,
332
- cat_embedder=None,
333
- top_k=3):
334
- """
335
- Args:
336
- inputtext: the category name to be matched
337
- forest: search tree
338
- top_k: the default three highest scoring results
339
- Return:
340
- topk matching_result. List[List] [[top1_name,top2_name,top3_name],[top1_score,top2_score,top3_score]]
341
- """
342
-
343
- sim_scores = torch.zeros([100])
344
- if inputtext:
345
- input_embeder = get_embedding(inputtext)
346
- sim_scores = cos_sim(input_embeder, cat_embedder)[0]
347
-
348
- if requirements:
349
- requirements = requirements.split(" ")
350
- requirements_embedder = get_embedding(requirements)
351
- req_scores = cos_sim(requirements_embedder, cat_embedder)
352
- req_scores = torch.mean(req_scores, dim=0)
353
- total_scores = req_scores
354
- else:
355
- total_scores = sim_scores
356
-
357
- top_k_cat = torch.topk(total_scores, k=top_k)
358
- top_k_score, top_k_idx = top_k_cat[0], top_k_cat[1]
359
- top_k_name = [forest_name[top_k_idx[i]] for i in range(0, top_k)]
360
-
361
- return [top_k_name, top_k_score.tolist(), top_k_idx]
362
-
363
-
364
- def sample_with_order_preserved(lst, num):
365
- """Randomly sample from the list while maintaining the original order."""
366
- indices = list(range(len(lst)))
367
- sampled_indices = random.sample(indices, num)
368
- sampled_indices.sort() # 保持原顺序
369
- return [lst[i] for i in sampled_indices]
370
-
371
-
372
- def limit_values(data, max_values):
373
- """Reduce each key-value list in the dictionary to the specified size, keeping the order of the original list unchanged."""
374
- for key, values in data.items():
375
- if len(values) > max_values:
376
- data[key] = sample_with_order_preserved(values, max_values)
377
- return data
378
-
379
-
380
- def limit_keys(data, max_keys):
381
- """Reduce the dictionary to the specified number of keys."""
382
- keys = list(data.keys())
383
- if len(keys) > max_keys:
384
- keys = sample_with_order_preserved(keys, max_keys)
385
- data = {key: data[key] for key in keys}
386
- return data
387
-
388
-
389
- def flatten_dict(nested_dict):
390
- """
391
- flatten the dictionary
392
- """
393
- flattened_dict = {}
394
- for key, value in nested_dict.items():
395
- if isinstance(value, dict):
396
- flattened_subdict = flatten_dict(value)
397
- flattened_dict.update(flattened_subdict)
398
- else:
399
- flattened_dict[key] = value
400
- return flattened_dict
401
-
402
-
403
- def merge_list(list1, list2):
404
- for l in list2:
405
- if l not in list1:
406
- list1.append(l)
407
- return list1
408
-
409
-
410
- def Search_Engines(req):
411
- FETSIZE = eval(os.environ["FETSIZE"]) if "FETSIZE" in os.environ else 5
412
-
413
- new_dict = {"keyword": req, "catLeafName": "", "fetchSize": FETSIZE}
414
- url = os.environ["SHOPPING_SEARCH"]
415
- res = requests.post(
416
- url= url,
417
- json=new_dict,
418
- )
419
- user_dict = json.loads(res.text)
420
- if "data" in user_dict.keys():
421
- request_items = user_dict["data"]["items"] # 查询到的商品信息JSON
422
- top_category = user_dict["data"]["topCategories"]
423
- return request_items, top_category
424
- else:
425
- return []
426
-
427
-
428
- def search_with_api(requirements, categery):
429
-
430
- FETSIZE = eval(os.environ["FETSIZE"]) if "FETSIZE" in os.environ else 5
431
-
432
- request_items = []
433
- all_req_list = requirements.split(" ")
434
- count = 0
435
-
436
- while len(request_items) < FETSIZE and len(all_req_list) > 0:
437
- if count:
438
- all_req_list.pop(0)
439
- all_req = (" ").join(all_req_list)
440
- if categery not in all_req_list:
441
- all_req = all_req + " " + categery
442
- now_request_items, top_category = Search_Engines(all_req)
443
- request_items = merge_list(request_items, now_request_items)
444
- count += 1
445
- new_top = []
446
- for category in top_category:
447
- if "其它" in category or "其它" in category:
448
- continue
449
- else:
450
- new_top.append(category)
451
- if len(request_items) > FETSIZE:
452
- request_items = request_items[:FETSIZE]
453
- return request_items, new_top
454
-
455
-
456
-
457
- def get_relevant_history(query,history,embeddings):
458
- """
459
- Retrieve a list of key history entries based on a query using semantic search.
460
-
461
- Args:
462
- query (str): The input query for which key history is to be retrieved.
463
- history (list): A list of historical key entries.
464
- embeddings (numpy.ndarray): An array of embedding vectors for historical entries.
465
-
466
- Returns:
467
- list: A list of key history entries most similar to the query.
468
- """
469
- TOP_K = eval(os.environ["TOP_K"]) if "TOP_K" in os.environ else 2
470
- relevant_history = []
471
- query_embedding = get_embedding(query)
472
- hits = semantic_search(query_embedding, embeddings, top_k=min(TOP_K,embeddings.shape[0]))
473
- hits = hits[0]
474
- for hit in hits:
475
- matching_idx = hit["corpus_id"]
476
- try:
477
- relevant_history.append(history[matching_idx])
478
- except:
479
- return []
480
- return relevant_history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZeroToHero/Video-Automatic-Speech-Recognition/app.py DELETED
@@ -1,119 +0,0 @@
1
- from collections import deque
2
- import streamlit as st
3
- import torch
4
- from streamlit_player import st_player
5
- from transformers import AutoModelForCTC, Wav2Vec2Processor
6
- from streaming import ffmpeg_stream
7
-
8
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
9
- player_options = {
10
- "events": ["onProgress"],
11
- "progress_interval": 200,
12
- "volume": 1.0,
13
- "playing": True,
14
- "loop": False,
15
- "controls": False,
16
- "muted": False,
17
- "config": {"youtube": {"playerVars": {"start": 1}}},
18
- }
19
-
20
- # disable rapid fading in and out on `st.code` updates
21
- st.markdown("<style>.element-container{opacity:1 !important}</style>", unsafe_allow_html=True)
22
-
23
- @st.cache(hash_funcs={torch.nn.parameter.Parameter: lambda _: None})
24
- def load_model(model_path="facebook/wav2vec2-large-robust-ft-swbd-300h"):
25
- processor = Wav2Vec2Processor.from_pretrained(model_path)
26
- model = AutoModelForCTC.from_pretrained(model_path).to(device)
27
- return processor, model
28
-
29
- processor, model = load_model()
30
-
31
- def stream_text(url, chunk_duration_ms, pad_duration_ms):
32
- sampling_rate = processor.feature_extractor.sampling_rate
33
-
34
- # calculate the length of logits to cut from the sides of the output to account for input padding
35
- output_pad_len = model._get_feat_extract_output_lengths(int(sampling_rate * pad_duration_ms / 1000))
36
-
37
- # define the audio chunk generator
38
- stream = ffmpeg_stream(url, sampling_rate, chunk_duration_ms=chunk_duration_ms, pad_duration_ms=pad_duration_ms)
39
-
40
- leftover_text = ""
41
- for i, chunk in enumerate(stream):
42
- input_values = processor(chunk, sampling_rate=sampling_rate, return_tensors="pt").input_values
43
-
44
- with torch.no_grad():
45
- logits = model(input_values.to(device)).logits[0]
46
- if i > 0:
47
- logits = logits[output_pad_len : len(logits) - output_pad_len]
48
- else: # don't count padding at the start of the clip
49
- logits = logits[: len(logits) - output_pad_len]
50
-
51
- predicted_ids = torch.argmax(logits, dim=-1).cpu().tolist()
52
- if processor.decode(predicted_ids).strip():
53
- leftover_ids = processor.tokenizer.encode(leftover_text)
54
- # concat the last word (or its part) from the last frame with the current text
55
- text = processor.decode(leftover_ids + predicted_ids)
56
- # don't return the last word in case it's just partially recognized
57
- text, leftover_text = text.rsplit(" ", 1)
58
- yield text
59
- else:
60
- yield leftover_text
61
- leftover_text = ""
62
- yield leftover_text
63
-
64
- def main():
65
- state = st.session_state
66
- st.header("Video ASR Streamlit from Youtube Link")
67
-
68
- with st.form(key="inputs_form"):
69
-
70
- # Our worlds best teachers on subjects of AI, Cognitive, Neuroscience for our Behavioral and Medical Health
71
- ytJoschaBach="https://youtu.be/cC1HszE5Hcw?list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&t=8984"
72
- ytSamHarris="https://www.youtube.com/watch?v=4dC_nRYIDZU&list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&index=2"
73
- ytJohnAbramson="https://www.youtube.com/watch?v=arrokG3wCdE&list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&index=3"
74
- ytElonMusk="https://www.youtube.com/watch?v=DxREm3s1scA&list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&index=4"
75
- ytJeffreyShainline="https://www.youtube.com/watch?v=EwueqdgIvq4&list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&index=5"
76
- ytJeffHawkins="https://www.youtube.com/watch?v=Z1KwkpTUbkg&list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&index=6"
77
- ytSamHarris="https://youtu.be/Ui38ZzTymDY?list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L"
78
- ytSamHarris="https://youtu.be/4dC_nRYIDZU?list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&t=7809"
79
- ytSamHarris="https://youtu.be/4dC_nRYIDZU?list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&t=7809"
80
- ytSamHarris="https://youtu.be/4dC_nRYIDZU?list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&t=7809"
81
- ytTimelapseAI="https://www.youtube.com/watch?v=63yr9dlI0cU&list=PLHgX2IExbFovQybyfltywXnqZi5YvaSS-"
82
- state.youtube_url = st.text_input("YouTube URL", ytTimelapseAI)
83
-
84
-
85
- state.chunk_duration_ms = st.slider("Audio chunk duration (ms)", 2000, 10000, 3000, 100)
86
- state.pad_duration_ms = st.slider("Padding duration (ms)", 100, 5000, 1000, 100)
87
- submit_button = st.form_submit_button(label="Submit")
88
-
89
- if submit_button or "asr_stream" not in state:
90
- # a hack to update the video player on value changes
91
- state.youtube_url = (
92
- state.youtube_url.split("&hash=")[0]
93
- + f"&hash={state.chunk_duration_ms}-{state.pad_duration_ms}"
94
- )
95
- state.asr_stream = stream_text(
96
- state.youtube_url, state.chunk_duration_ms, state.pad_duration_ms
97
- )
98
- state.chunks_taken = 0
99
-
100
-
101
- state.lines = deque([], maxlen=100) # limit to the last n lines of subs
102
-
103
-
104
- player = st_player(state.youtube_url, **player_options, key="youtube_player")
105
-
106
- if "asr_stream" in state and player.data and player.data["played"] < 1.0:
107
- # check how many seconds were played, and if more than processed - write the next text chunk
108
- processed_seconds = state.chunks_taken * (state.chunk_duration_ms / 1000)
109
- if processed_seconds < player.data["playedSeconds"]:
110
- text = next(state.asr_stream)
111
- state.lines.append(text)
112
- state.chunks_taken += 1
113
- if "lines" in state:
114
- # print the lines of subs
115
- st.code("\n".join(state.lines))
116
-
117
-
118
- if __name__ == "__main__":
119
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIatUIUC/CodeLATS/generators/__init__.py DELETED
@@ -1,3 +0,0 @@
1
- from .py_generate import PyGenerator
2
- from .factory import generator_factory, model_factory
3
- from .model import ModelBase, GPT4, GPT35
 
 
 
 
spaces/ALSv/FSW/roop/face_analyser.py DELETED
@@ -1,53 +0,0 @@
1
- import threading
2
- from typing import Any, Optional, List
3
- import insightface
4
- import numpy
5
-
6
- import roop.globals
7
- from roop.typing import Frame, Face
8
-
9
- FACE_ANALYSER = None
10
- THREAD_LOCK = threading.Lock()
11
-
12
-
13
- def get_face_analyser() -> Any:
14
- global FACE_ANALYSER
15
-
16
- with THREAD_LOCK:
17
- if FACE_ANALYSER is None:
18
- FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=roop.globals.execution_providers)
19
- FACE_ANALYSER.prepare(ctx_id=0)
20
- return FACE_ANALYSER
21
-
22
-
23
- def clear_face_analyser() -> Any:
24
- global FACE_ANALYSER
25
-
26
- FACE_ANALYSER = None
27
-
28
-
29
-
30
- def get_one_face(frame: Frame) -> Any:
31
- face = get_face_analyser().get(frame)
32
- try:
33
- return min(face, key=lambda x: x.bbox[0])
34
- except ValueError:
35
- return None
36
-
37
-
38
- def get_many_faces(frame: Frame) -> Optional[List[Face]]:
39
- try:
40
- return get_face_analyser().get(frame)
41
- except ValueError:
42
- return None
43
-
44
-
45
- def find_similar_face(frame: Frame, reference_face: Face) -> Optional[Face]:
46
- many_faces = get_many_faces(frame)
47
- if many_faces:
48
- for face in many_faces:
49
- if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'):
50
- distance = numpy.sum(numpy.square(face.normed_embedding - reference_face.normed_embedding))
51
- if distance < roop.globals.similar_face_distance:
52
- return face
53
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/actions/snapScrollToBottom.ts DELETED
@@ -1,54 +0,0 @@
1
- import { navigating } from "$app/stores";
2
- import { tick } from "svelte";
3
- import { get } from "svelte/store";
4
-
5
- const detachedOffset = 10;
6
-
7
- /**
8
- * @param node element to snap scroll to bottom
9
- * @param dependency pass in a dependency to update scroll on changes.
10
- */
11
- export const snapScrollToBottom = (node: HTMLElement, dependency: unknown) => {
12
- let prevScrollValue = node.scrollTop;
13
- let isDetached = false;
14
-
15
- const handleScroll = () => {
16
- // if user scrolled up, we detach
17
- if (node.scrollTop < prevScrollValue) {
18
- isDetached = true;
19
- }
20
-
21
- // if user scrolled back to within 10px of bottom, we reattach
22
- if (node.scrollTop - (node.scrollHeight - node.clientHeight) >= -detachedOffset) {
23
- isDetached = false;
24
- }
25
-
26
- prevScrollValue = node.scrollTop;
27
- };
28
-
29
- const updateScroll = async (_options: { force?: boolean } = {}) => {
30
- const defaultOptions = { force: false };
31
- const options = { ...defaultOptions, ..._options };
32
- const { force } = options;
33
-
34
- if (!force && isDetached && !get(navigating)) return;
35
-
36
- // wait for next tick to ensure that the DOM is updated
37
- await tick();
38
-
39
- node.scrollTo({ top: node.scrollHeight });
40
- };
41
-
42
- node.addEventListener("scroll", handleScroll);
43
-
44
- if (dependency) {
45
- updateScroll({ force: true });
46
- }
47
-
48
- return {
49
- update: updateScroll,
50
- destroy: () => {
51
- node.removeEventListener("scroll", handleScroll);
52
- },
53
- };
54
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/client/css/checkbox.css DELETED
@@ -1,55 +0,0 @@
1
- .checkbox input {
2
- height: 0;
3
- width: 0;
4
- display: none;
5
- }
6
-
7
- .checkbox span {
8
- font-size: 0.875rem;
9
- color: var(--colour-2);
10
- margin-left: 4px;
11
- }
12
-
13
- .checkbox label:after {
14
- content: "";
15
- position: absolute;
16
- top: 50%;
17
- transform: translateY(-50%);
18
- left: 5px;
19
- width: 20px;
20
- height: 20px;
21
- background: var(--blur-border);
22
- border-radius: 90px;
23
- transition: 0.33s;
24
- }
25
-
26
- .checkbox input + label:after,
27
- .checkbox input:checked + label {
28
- background: var(--colour-3);
29
- }
30
-
31
- .checkbox input + label,
32
- .checkbox input:checked + label:after {
33
- background: var(--blur-border);
34
- }
35
-
36
- .checkbox input:checked + label:after {
37
- left: calc(100% - 5px - 20px);
38
- }
39
-
40
- @media screen and (max-width: 990px) {
41
- .checkbox label {
42
- width: 25px;
43
- height: 15px;
44
- }
45
-
46
- .checkbox label:after {
47
- left: 2px;
48
- width: 10px;
49
- height: 10px;
50
- }
51
-
52
- .checkbox input:checked + label:after {
53
- left: calc(100% - 2px - 10px);
54
- }
55
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/EasyChat.py DELETED
@@ -1,111 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
- import random
5
-
6
- import requests
7
-
8
- from ...typing import Any, CreateResult
9
- from ..base_provider import BaseProvider
10
-
11
-
12
- class EasyChat(BaseProvider):
13
- url: str = "https://free.easychat.work"
14
- supports_stream = True
15
- supports_gpt_35_turbo = True
16
- working = False
17
-
18
- @staticmethod
19
- def create_completion(
20
- model: str,
21
- messages: list[dict[str, str]],
22
- stream: bool, **kwargs: Any) -> CreateResult:
23
-
24
- active_servers = [
25
- "https://chat10.fastgpt.me",
26
- "https://chat9.fastgpt.me",
27
- "https://chat1.fastgpt.me",
28
- "https://chat2.fastgpt.me",
29
- "https://chat3.fastgpt.me",
30
- "https://chat4.fastgpt.me",
31
- "https://gxos1h1ddt.fastgpt.me"
32
- ]
33
-
34
- server = active_servers[kwargs.get("active_server", random.randint(0, 5))]
35
- headers = {
36
- "authority" : f"{server}".replace("https://", ""),
37
- "accept" : "text/event-stream",
38
- "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2",
39
- "content-type" : "application/json",
40
- "origin" : f"{server}",
41
- "referer" : f"{server}/",
42
- "x-requested-with" : "XMLHttpRequest",
43
- 'plugins' : '0',
44
- 'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
45
- 'sec-ch-ua-mobile' : '?0',
46
- 'sec-ch-ua-platform': '"Windows"',
47
- 'sec-fetch-dest' : 'empty',
48
- 'sec-fetch-mode' : 'cors',
49
- 'sec-fetch-site' : 'same-origin',
50
- 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
51
- 'usesearch' : 'false',
52
- 'x-requested-with' : 'XMLHttpRequest'
53
- }
54
-
55
- json_data = {
56
- "messages" : messages,
57
- "stream" : stream,
58
- "model" : model,
59
- "temperature" : kwargs.get("temperature", 0.5),
60
- "presence_penalty" : kwargs.get("presence_penalty", 0),
61
- "frequency_penalty" : kwargs.get("frequency_penalty", 0),
62
- "top_p" : kwargs.get("top_p", 1)
63
- }
64
-
65
- session = requests.Session()
66
- # init cookies from server
67
- session.get(f"{server}/")
68
-
69
- response = session.post(f"{server}/api/openai/v1/chat/completions",
70
- headers=headers, json=json_data, stream=stream)
71
-
72
- if response.status_code == 200:
73
-
74
- if stream == False:
75
- json_data = response.json()
76
-
77
- if "choices" in json_data:
78
- yield json_data["choices"][0]["message"]["content"]
79
- else:
80
- raise Exception("No response from server")
81
-
82
- else:
83
-
84
- for chunk in response.iter_lines():
85
-
86
- if b"content" in chunk:
87
- splitData = chunk.decode().split("data:")
88
-
89
- if len(splitData) > 1:
90
- yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
91
- else:
92
- continue
93
- else:
94
- raise Exception(f"Error {response.status_code} from server : {response.reason}")
95
-
96
-
97
- @classmethod
98
- @property
99
- def params(cls):
100
- params = [
101
- ("model", "str"),
102
- ("messages", "list[dict[str, str]]"),
103
- ("stream", "bool"),
104
- ("temperature", "float"),
105
- ("presence_penalty", "int"),
106
- ("frequency_penalty", "int"),
107
- ("top_p", "int"),
108
- ("active_server", "int"),
109
- ]
110
- param = ", ".join([": ".join(p) for p in params])
111
- return f"g4f.provider.{cls.__name__} supports: ({param})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/T2I-Adapter/app.py DELETED
@@ -1,483 +0,0 @@
1
- # demo inspired by https://huggingface.co/spaces/lambdalabs/image-mixer-demo
2
- import argparse
3
- import copy
4
- import os
5
- import shlex
6
- import subprocess
7
- from functools import partial
8
- from itertools import chain
9
-
10
- import cv2
11
- import gradio as gr
12
- import torch
13
- from basicsr.utils import tensor2img
14
- from huggingface_hub import hf_hub_url
15
- from pytorch_lightning import seed_everything
16
- from torch import autocast
17
-
18
- from ldm.inference_base import (DEFAULT_NEGATIVE_PROMPT, diffusion_inference, get_adapters, get_sd_models)
19
- from ldm.modules.extra_condition import api
20
- from ldm.modules.extra_condition.api import (ExtraCondition, get_adapter_feature, get_cond_model)
21
- import numpy as np
22
- from ldm.util import read_state_dict
23
-
24
- torch.set_grad_enabled(False)
25
-
26
- supported_cond_map = ['style', 'color', 'sketch', 'openpose', 'depth', 'canny']
27
- supported_cond = ['style', 'color', 'sketch', 'sketch', 'openpose', 'depth', 'canny']
28
- draw_map = gr.Interface(lambda x: x, gr.Image(source="canvas"), gr.Image())
29
-
30
- # download the checkpoints
31
- urls = {
32
- 'TencentARC/T2I-Adapter': [
33
- 'models/t2iadapter_keypose_sd14v1.pth', 'models/t2iadapter_color_sd14v1.pth',
34
- 'models/t2iadapter_openpose_sd14v1.pth', 'models/t2iadapter_seg_sd14v1.pth',
35
- 'models/t2iadapter_sketch_sd14v1.pth', 'models/t2iadapter_depth_sd14v1.pth',
36
- 'third-party-models/body_pose_model.pth', "models/t2iadapter_style_sd14v1.pth",
37
- "models/t2iadapter_canny_sd14v1.pth", 'third-party-models/table5_pidinet.pth',
38
- "models/t2iadapter_canny_sd15v2.pth", "models/t2iadapter_depth_sd15v2.pth",
39
- "models/t2iadapter_sketch_sd15v2.pth"
40
- ],
41
- 'runwayml/stable-diffusion-v1-5': ['v1-5-pruned-emaonly.ckpt'],
42
- 'CompVis/stable-diffusion-v-1-4-original':['sd-v1-4.ckpt'],
43
- 'andite/anything-v4.0': ['anything-v4.0-pruned.ckpt', 'anything-v4.0.vae.pt'],
44
- }
45
-
46
- # download image samples
47
- torch.hub.download_url_to_file(
48
- 'https://user-images.githubusercontent.com/52127135/223114920-cae3e723-3683-424a-bebc-0875479f2409.jpg',
49
- 'cyber_style.jpg')
50
- torch.hub.download_url_to_file(
51
- 'https://user-images.githubusercontent.com/52127135/223114946-6ccc127f-cb58-443e-8677-805f5dbaf6f1.png',
52
- 'sword.png')
53
- torch.hub.download_url_to_file(
54
- 'https://user-images.githubusercontent.com/52127135/223121793-20c2ac6a-5a4f-4ff8-88ea-6d007a7959dd.png',
55
- 'white.png')
56
- torch.hub.download_url_to_file(
57
- 'https://user-images.githubusercontent.com/52127135/223127404-4a3748cf-85a6-40f3-af31-a74e206db96e.jpeg',
58
- 'scream_style.jpeg')
59
- torch.hub.download_url_to_file(
60
- 'https://user-images.githubusercontent.com/52127135/223127433-8768913f-9872-4d24-b883-a19a3eb20623.jpg',
61
- 'motorcycle.jpg')
62
-
63
- if os.path.exists('models') == False:
64
- os.mkdir('models')
65
- for repo in urls:
66
- files = urls[repo]
67
- for file in files:
68
- url = hf_hub_url(repo, file)
69
- name_ckp = url.split('/')[-1]
70
- save_path = os.path.join('models', name_ckp)
71
- if os.path.exists(save_path) == False:
72
- subprocess.run(shlex.split(f'wget {url} -O {save_path}'))
73
-
74
- # config
75
- parser = argparse.ArgumentParser()
76
- parser.add_argument(
77
- '--sd_ckpt',
78
- type=str,
79
- default='models/v1-5-pruned-emaonly.ckpt',
80
- help='path to checkpoint of stable diffusion model, both .ckpt and .safetensor are supported',
81
- )
82
- parser.add_argument(
83
- '--vae_ckpt',
84
- type=str,
85
- default=None,
86
- help='vae checkpoint, anime SD models usually have seperate vae ckpt that need to be loaded',
87
- )
88
- global_opt = parser.parse_args()
89
- global_opt.config = 'configs/stable-diffusion/sd-v1-inference.yaml'
90
- for cond_name in supported_cond:
91
- if cond_name in ['sketch', 'depth', 'canny']:
92
- setattr(global_opt, f'{cond_name}_adapter_ckpt', f'models/t2iadapter_{cond_name}_sd15v2.pth')
93
- else:
94
- setattr(global_opt, f'{cond_name}_adapter_ckpt', f'models/t2iadapter_{cond_name}_sd14v1.pth')
95
- global_opt.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
96
- global_opt.max_resolution = 512 * 512
97
- global_opt.sampler = 'ddim'
98
- global_opt.cond_weight = 1.0
99
- global_opt.C = 4
100
- global_opt.f = 8
101
- # adapters and models to processing condition inputs
102
- adapters = {}
103
- cond_models = {}
104
- torch.cuda.empty_cache()
105
-
106
-
107
- def draw_transfer(im1):
108
- c = im1[:, :, 0:3].astype(np.float32)
109
- a = im1[:, :, 3:4].astype(np.float32) / 255.0
110
- im1 = c * a + 255.0 * (1.0 - a)
111
- im1 = (im1.clip(0, 255)).astype(np.uint8)
112
-
113
- return im1
114
-
115
- class process:
116
- def __init__(self):
117
- self.base_model = 'v1-5-pruned-emaonly.ckpt'
118
- # stable-diffusion model
119
- self.sd_model, self.sampler = get_sd_models(global_opt)
120
-
121
- def run(self, *args):
122
- opt = copy.deepcopy(global_opt)
123
- opt.prompt, opt.neg_prompt, opt.scale, opt.n_samples, opt.seed, opt.steps, opt.resize_short_edge, opt.cond_tau, opt.base_model \
124
- = args[-9:]
125
- # check base model
126
- if opt.base_model!=self.base_model:
127
- ckpt = os.path.join("models", opt.base_model)
128
- pl_sd = read_state_dict(ckpt)
129
- if "state_dict" in pl_sd:
130
- pl_sd = pl_sd["state_dict"]
131
- else:
132
- pl_sd = pl_sd
133
- self.sd_model.load_state_dict(pl_sd, strict=False)
134
- del pl_sd
135
- self.base_model = opt.base_model
136
- if self.base_model!='v1-5-pruned-emaonly.ckpt' and self.base_model!='sd-v1-4.ckpt':
137
- vae_sd = torch.load(os.path.join('models', 'anything-v4.0.vae.pt'), map_location="cuda")
138
- st = vae_sd["state_dict"]
139
- self.sd_model.first_stage_model.load_state_dict(st, strict=False)
140
- del st
141
-
142
- with torch.inference_mode(), \
143
- self.sd_model.ema_scope(), \
144
- autocast('cuda'):
145
-
146
- inps = []
147
- for i in range(0, len(args) - 9, len(supported_cond)):
148
- inps.append(args[i:i + len(supported_cond)])
149
-
150
- conds = []
151
- activated_conds = []
152
-
153
- ims1 = []
154
- ims2 = []
155
- for idx, (b, im1, im2, cond_weight) in enumerate(zip(*inps)):
156
- if b != 'Nothing' and (im1 is not None or im2 is not None):
157
- if im1 is not None and isinstance(im1,dict):
158
- im1 = im1['mask']
159
- im1 = draw_transfer(im1)
160
-
161
- if im1 is not None:
162
- h, w, _ = im1.shape
163
- else:
164
- h, w, _ = im2.shape
165
-
166
- # resize all the images to the same size
167
- for idx, (b, im1, im2, cond_weight) in enumerate(zip(*inps)):
168
- if idx == 0:
169
- ims1.append(im1)
170
- ims2.append(im2)
171
- continue
172
- if b != 'Nothing':
173
- if im1 is not None and isinstance(im1,dict):
174
- im1 = im1['mask']
175
- im1 = draw_transfer(im1)
176
- im2 = im1
177
- cv2.imwrite('sketch.png', im1)
178
- if im1 is not None:
179
- im1 = cv2.resize(im1, (w, h), interpolation=cv2.INTER_CUBIC)
180
- if im2 is not None:
181
- im2 = cv2.resize(im2, (w, h), interpolation=cv2.INTER_CUBIC)
182
- ims1.append(im1)
183
- ims2.append(im2)
184
-
185
- for idx, (b, _, _, cond_weight) in enumerate(zip(*inps)):
186
- cond_name = supported_cond[idx]
187
- if b == 'Nothing':
188
- if cond_name in adapters:
189
- adapters[cond_name]['model'] = adapters[cond_name]['model'].to(opt.device)#.cpu()
190
- else:
191
- # print(idx,b)
192
- activated_conds.append(cond_name)
193
- if cond_name in adapters:
194
- adapters[cond_name]['model'] = adapters[cond_name]['model'].to(opt.device)
195
- else:
196
- adapters[cond_name] = get_adapters(opt, getattr(ExtraCondition, cond_name))
197
- adapters[cond_name]['cond_weight'] = cond_weight
198
-
199
- process_cond_module = getattr(api, f'get_cond_{cond_name}')
200
-
201
- if b == 'Image':
202
- if cond_name not in cond_models:
203
- cond_models[cond_name] = get_cond_model(opt, getattr(ExtraCondition, cond_name))
204
- conds.append(process_cond_module(opt, ims1[idx], 'image', cond_models[cond_name]))
205
- else:
206
- if idx == 2: # draw
207
- conds.append(process_cond_module(opt, (255.-ims2[idx]).astype(np.uint8), cond_name, None))
208
- else:
209
- conds.append(process_cond_module(opt, ims2[idx], cond_name, None))
210
-
211
- adapter_features, append_to_context = get_adapter_feature(
212
- conds, [adapters[cond_name] for cond_name in activated_conds])
213
-
214
- output_conds = []
215
- for cond in conds:
216
- output_conds.append(tensor2img(cond, rgb2bgr=False))
217
-
218
- ims = []
219
- seed_everything(opt.seed)
220
- for _ in range(opt.n_samples):
221
- result = diffusion_inference(opt, self.sd_model, self.sampler, adapter_features, append_to_context)
222
- ims.append(tensor2img(result, rgb2bgr=False))
223
-
224
- # Clear GPU memory cache so less likely to OOM
225
- torch.cuda.empty_cache()
226
- return ims, output_conds
227
-
228
-
229
- def change_visible(im1, im2, val):
230
- outputs = {}
231
- if val == "Image":
232
- outputs[im1] = gr.update(visible=True)
233
- outputs[im2] = gr.update(visible=False)
234
- elif val == "Nothing":
235
- outputs[im1] = gr.update(visible=False)
236
- outputs[im2] = gr.update(visible=False)
237
- else:
238
- outputs[im1] = gr.update(visible=False)
239
- outputs[im2] = gr.update(visible=True)
240
- return outputs
241
-
242
- DESCRIPTION = '# [T2I-Adapter](https://github.com/TencentARC/T2I-Adapter)'
243
-
244
- DESCRIPTION += f'<p>Gradio demo for **T2I-Adapter**: [[GitHub]](https://github.com/TencentARC/T2I-Adapter), [[Paper]](https://arxiv.org/abs/2302.08453). If T2I-Adapter is helpful, please help to ⭐ the [Github Repo](https://github.com/TencentARC/T2I-Adapter) and recommend it to your friends 😊 </p>'
245
-
246
- DESCRIPTION += f'<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/Adapter/T2I-Adapter?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
247
-
248
- processer = process()
249
-
250
- with gr.Blocks(css='style.css') as demo:
251
- gr.Markdown(DESCRIPTION)
252
-
253
- btns = []
254
- ims1 = []
255
- ims2 = []
256
- cond_weights = []
257
-
258
- with gr.Row():
259
- with gr.Column(scale=1.9):
260
- with gr.Box():
261
- gr.Markdown("<h5><center>Style & Color</center></h5>")
262
- with gr.Row():
263
- for cond_name in supported_cond_map[:2]:
264
- with gr.Box():
265
- with gr.Column():
266
- if cond_name == 'style':
267
- btn1 = gr.Radio(
268
- choices=["Image", "Nothing"],
269
- label=f"Input type for {cond_name}",
270
- interactive=True,
271
- value="Nothing",
272
- )
273
- else:
274
- btn1 = gr.Radio(
275
- choices=["Image", cond_name, "Nothing"],
276
- label=f"Input type for {cond_name}",
277
- interactive=True,
278
- value="Nothing",
279
- )
280
-
281
- im1 = gr.Image(
282
- source='upload', label="Image", interactive=True, visible=False, type="numpy")
283
- im2 = gr.Image(
284
- source='upload', label=cond_name, interactive=True, visible=False, type="numpy")
285
- cond_weight = gr.Slider(
286
- label="Condition weight",
287
- minimum=0,
288
- maximum=5,
289
- step=0.05,
290
- value=1,
291
- interactive=True)
292
-
293
- fn = partial(change_visible, im1, im2)
294
- btn1.change(fn=fn, inputs=[btn1], outputs=[im1, im2], queue=False)
295
-
296
- btns.append(btn1)
297
- ims1.append(im1)
298
- ims2.append(im2)
299
- cond_weights.append(cond_weight)
300
-
301
- with gr.Box():
302
- gr.Markdown("<h5><center>Drawing</center></h5>")
303
- with gr.Column():
304
- btn1 = gr.Radio(
305
- choices=["Sketch", "Nothing"],
306
- label=f"Input type for drawing",
307
- interactive=True,
308
- value="Nothing")
309
- im1 = gr.Image(source='canvas', tool='color-sketch', label='Pay attention to adjusting stylus thickness!', visible=False)
310
- im2 = im1
311
- cond_weight = gr.Slider(
312
- label="Condition weight",
313
- minimum=0,
314
- maximum=5,
315
- step=0.05,
316
- value=1,
317
- interactive=True)
318
-
319
- fn = partial(change_visible, im1, im2)
320
- btn1.change(fn=fn, inputs=[btn1], outputs=[im1, im2], queue=False)
321
-
322
- btns.append(btn1)
323
- ims1.append(im1)
324
- ims2.append(im2)
325
- cond_weights.append(cond_weight)
326
-
327
- with gr.Column(scale=4):
328
- with gr.Box():
329
- gr.Markdown("<h5><center>Structure</center></h5>")
330
- with gr.Row():
331
- for cond_name in supported_cond_map[2:6]:
332
- with gr.Box():
333
- with gr.Column():
334
- if cond_name == 'openpose':
335
- btn1 = gr.Radio(
336
- choices=["Image", 'pose', "Nothing"],
337
- label=f"Input type for {cond_name}",
338
- interactive=True,
339
- value="Nothing",
340
- )
341
- else:
342
- btn1 = gr.Radio(
343
- choices=["Image", cond_name, "Nothing"],
344
- label=f"Input type for {cond_name}",
345
- interactive=True,
346
- value="Nothing",
347
- )
348
-
349
- im1 = gr.Image(
350
- source='upload', label="Image", interactive=True, visible=False, type="numpy")
351
- im2 = gr.Image(
352
- source='upload', label=cond_name, interactive=True, visible=False, type="numpy")
353
- cond_weight = gr.Slider(
354
- label="Condition weight",
355
- minimum=0,
356
- maximum=5,
357
- step=0.05,
358
- value=1,
359
- interactive=True)
360
-
361
- fn = partial(change_visible, im1, im2)
362
- btn1.change(fn=fn, inputs=[btn1], outputs=[im1, im2], queue=False)
363
- btns.append(btn1)
364
- ims1.append(im1)
365
- ims2.append(im2)
366
- cond_weights.append(cond_weight)
367
-
368
- with gr.Column():
369
- base_model = gr.inputs.Radio(['v1-5-pruned-emaonly.ckpt', 'sd-v1-4.ckpt', 'anything-v4.0-pruned.ckpt'], type="value", default='v1-5-pruned-emaonly.ckpt', label='The base model you want to use. You can try more base models on https://civitai.com/.')
370
- prompt = gr.Textbox(label="Prompt")
371
- with gr.Accordion('Advanced options', open=False):
372
- neg_prompt = gr.Textbox(label="Negative Prompt", value=DEFAULT_NEGATIVE_PROMPT)
373
- scale = gr.Slider(
374
- label="Guidance Scale (Classifier free guidance)", value=7.5, minimum=1, maximum=20, step=0.1)
375
- n_samples = gr.Slider(label="Num samples", value=1, minimum=1, maximum=1, step=1)
376
- seed = gr.Slider(label="Seed", value=42, minimum=0, maximum=10000, step=1, randomize=True)
377
- steps = gr.Slider(label="Steps", value=50, minimum=10, maximum=100, step=1)
378
- resize_short_edge = gr.Slider(label="Image resolution", value=512, minimum=320, maximum=1024, step=1)
379
- cond_tau = gr.Slider(
380
- label="timestamp parameter that determines until which step the adapter is applied",
381
- value=1.0,
382
- minimum=0.1,
383
- maximum=1.0,
384
- step=0.05)
385
- submit = gr.Button("Generate")
386
-
387
- with gr.Box():
388
- gr.Markdown("<h5><center>Results</center></h5>")
389
- with gr.Column():
390
- output = gr.Gallery().style(grid=2, height='auto')
391
- cond = gr.Gallery().style(grid=2, height='auto')
392
-
393
- inps = list(chain(btns, ims1, ims2, cond_weights))
394
-
395
- inps.extend([prompt, neg_prompt, scale, n_samples, seed, steps, resize_short_edge, cond_tau, base_model])
396
- submit.click(fn=processer.run, inputs=inps, outputs=[output, cond])
397
-
398
- ex = gr.Examples([
399
- [
400
- "Image",
401
- "Nothing",
402
- "Nothing",
403
- "Image",
404
- "Nothing",
405
- "Nothing",
406
- "Nothing",
407
- "cyber_style.jpg",
408
- "white.png",
409
- "white.png",
410
- "sword.png",
411
- "white.png",
412
- "white.png",
413
- "white.png",
414
- "white.png",
415
- "white.png",
416
- "white.png",
417
- "white.png",
418
- "white.png",
419
- "white.png",
420
- "white.png",
421
- 1,
422
- 1,
423
- 1,
424
- 1,
425
- 1,
426
- 1,
427
- 1,
428
- "master sword",
429
- "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
430
- 7.5,
431
- 1,
432
- 2500,
433
- 50,
434
- 512,
435
- 1,
436
- "v1-5-pruned-emaonly.ckpt",
437
- ],
438
- [
439
- "Image",
440
- "Nothing",
441
- "Nothing",
442
- "Image",
443
- "Nothing",
444
- "Nothing",
445
- "Nothing",
446
- "scream_style.jpeg",
447
- "white.png",
448
- "white.png",
449
- "motorcycle.jpg",
450
- "white.png",
451
- "white.png",
452
- "white.png",
453
- "white.png",
454
- "white.png",
455
- "white.png",
456
- "white.png",
457
- "white.png",
458
- "white.png",
459
- "white.png",
460
- 1,
461
- 1,
462
- 1,
463
- 1,
464
- 1,
465
- 1,
466
- 1,
467
- "motorcycle",
468
- "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
469
- 7.5,
470
- 1,
471
- 2500,
472
- 50,
473
- 512,
474
- 1,
475
- "v1-5-pruned-emaonly.ckpt",
476
- ],
477
- ],
478
- fn=processer.run,
479
- inputs=inps,
480
- outputs=[output, cond],
481
- cache_examples=True)
482
-
483
- demo.queue().launch(debug=True, server_name='0.0.0.0')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/shake/Shake.js DELETED
@@ -1,2 +0,0 @@
1
- import Shake from '../../../plugins/shakeposition.js';
2
- export default Shake;
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/vq_diffusion/pipeline_vq_diffusion.py DELETED
@@ -1,325 +0,0 @@
1
- # Copyright 2023 Microsoft and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from typing import Callable, List, Optional, Tuple, Union
16
-
17
- import torch
18
- from transformers import CLIPTextModel, CLIPTokenizer
19
-
20
- from ...configuration_utils import ConfigMixin, register_to_config
21
- from ...models import ModelMixin, Transformer2DModel, VQModel
22
- from ...schedulers import VQDiffusionScheduler
23
- from ...utils import logging
24
- from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
25
-
26
-
27
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
28
-
29
-
30
- class LearnedClassifierFreeSamplingEmbeddings(ModelMixin, ConfigMixin):
31
- """
32
- Utility class for storing learned text embeddings for classifier free sampling
33
- """
34
-
35
- @register_to_config
36
- def __init__(self, learnable: bool, hidden_size: Optional[int] = None, length: Optional[int] = None):
37
- super().__init__()
38
-
39
- self.learnable = learnable
40
-
41
- if self.learnable:
42
- assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
43
- assert length is not None, "learnable=True requires `length` to be set"
44
-
45
- embeddings = torch.zeros(length, hidden_size)
46
- else:
47
- embeddings = None
48
-
49
- self.embeddings = torch.nn.Parameter(embeddings)
50
-
51
-
52
- class VQDiffusionPipeline(DiffusionPipeline):
53
- r"""
54
- Pipeline for text-to-image generation using VQ Diffusion.
55
-
56
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
57
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
58
-
59
- Args:
60
- vqvae ([`VQModel`]):
61
- Vector Quantized Variational Auto-Encoder (VAE) model to encode and decode images to and from latent
62
- representations.
63
- text_encoder ([`~transformers.CLIPTextModel`]):
64
- Frozen text-encoder ([clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32)).
65
- tokenizer ([`~transformers.CLIPTokenizer`]):
66
- A `CLIPTokenizer` to tokenize text.
67
- transformer ([`Transformer2DModel`]):
68
- A conditional `Transformer2DModel` to denoise the encoded image latents.
69
- scheduler ([`VQDiffusionScheduler`]):
70
- A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
71
- """
72
-
73
- vqvae: VQModel
74
- text_encoder: CLIPTextModel
75
- tokenizer: CLIPTokenizer
76
- transformer: Transformer2DModel
77
- learned_classifier_free_sampling_embeddings: LearnedClassifierFreeSamplingEmbeddings
78
- scheduler: VQDiffusionScheduler
79
-
80
- def __init__(
81
- self,
82
- vqvae: VQModel,
83
- text_encoder: CLIPTextModel,
84
- tokenizer: CLIPTokenizer,
85
- transformer: Transformer2DModel,
86
- scheduler: VQDiffusionScheduler,
87
- learned_classifier_free_sampling_embeddings: LearnedClassifierFreeSamplingEmbeddings,
88
- ):
89
- super().__init__()
90
-
91
- self.register_modules(
92
- vqvae=vqvae,
93
- transformer=transformer,
94
- text_encoder=text_encoder,
95
- tokenizer=tokenizer,
96
- scheduler=scheduler,
97
- learned_classifier_free_sampling_embeddings=learned_classifier_free_sampling_embeddings,
98
- )
99
-
100
- def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance):
101
- batch_size = len(prompt) if isinstance(prompt, list) else 1
102
-
103
- # get prompt text embeddings
104
- text_inputs = self.tokenizer(
105
- prompt,
106
- padding="max_length",
107
- max_length=self.tokenizer.model_max_length,
108
- return_tensors="pt",
109
- )
110
- text_input_ids = text_inputs.input_ids
111
-
112
- if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
113
- removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
114
- logger.warning(
115
- "The following part of your input was truncated because CLIP can only handle sequences up to"
116
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
117
- )
118
- text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
119
- prompt_embeds = self.text_encoder(text_input_ids.to(self.device))[0]
120
-
121
- # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
122
- # While CLIP does normalize the pooled output of the text transformer when combining
123
- # the image and text embeddings, CLIP does not directly normalize the last hidden state.
124
- #
125
- # CLIP normalizing the pooled output.
126
- # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
127
- prompt_embeds = prompt_embeds / prompt_embeds.norm(dim=-1, keepdim=True)
128
-
129
- # duplicate text embeddings for each generation per prompt
130
- prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)
131
-
132
- if do_classifier_free_guidance:
133
- if self.learned_classifier_free_sampling_embeddings.learnable:
134
- negative_prompt_embeds = self.learned_classifier_free_sampling_embeddings.embeddings
135
- negative_prompt_embeds = negative_prompt_embeds.unsqueeze(0).repeat(batch_size, 1, 1)
136
- else:
137
- uncond_tokens = [""] * batch_size
138
-
139
- max_length = text_input_ids.shape[-1]
140
- uncond_input = self.tokenizer(
141
- uncond_tokens,
142
- padding="max_length",
143
- max_length=max_length,
144
- truncation=True,
145
- return_tensors="pt",
146
- )
147
- negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
148
- # See comment for normalizing text embeddings
149
- negative_prompt_embeds = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1, keepdim=True)
150
-
151
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
152
- seq_len = negative_prompt_embeds.shape[1]
153
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
154
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
155
-
156
- # For classifier free guidance, we need to do two forward passes.
157
- # Here we concatenate the unconditional and text embeddings into a single batch
158
- # to avoid doing two forward passes
159
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
160
-
161
- return prompt_embeds
162
-
163
- @torch.no_grad()
164
- def __call__(
165
- self,
166
- prompt: Union[str, List[str]],
167
- num_inference_steps: int = 100,
168
- guidance_scale: float = 5.0,
169
- truncation_rate: float = 1.0,
170
- num_images_per_prompt: int = 1,
171
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
172
- latents: Optional[torch.FloatTensor] = None,
173
- output_type: Optional[str] = "pil",
174
- return_dict: bool = True,
175
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
176
- callback_steps: int = 1,
177
- ) -> Union[ImagePipelineOutput, Tuple]:
178
- """
179
- The call function to the pipeline for generation.
180
-
181
- Args:
182
- prompt (`str` or `List[str]`):
183
- The prompt or prompts to guide image generation.
184
- num_inference_steps (`int`, *optional*, defaults to 100):
185
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
186
- expense of slower inference.
187
- guidance_scale (`float`, *optional*, defaults to 7.5):
188
- A higher guidance scale value encourages the model to generate images closely linked to the text
189
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
190
- truncation_rate (`float`, *optional*, defaults to 1.0 (equivalent to no truncation)):
191
- Used to "truncate" the predicted classes for x_0 such that the cumulative probability for a pixel is at
192
- most `truncation_rate`. The lowest probabilities that would increase the cumulative probability above
193
- `truncation_rate` are set to zero.
194
- num_images_per_prompt (`int`, *optional*, defaults to 1):
195
- The number of images to generate per prompt.
196
- generator (`torch.Generator`, *optional*):
197
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
198
- generation deterministic.
199
- latents (`torch.FloatTensor` of shape (batch), *optional*):
200
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
201
- generation. Must be valid embedding indices.If not provided, a latents tensor will be generated of
202
- completely masked latent pixels.
203
- output_type (`str`, *optional*, defaults to `"pil"`):
204
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
205
- return_dict (`bool`, *optional*, defaults to `True`):
206
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
207
- callback (`Callable`, *optional*):
208
- A function that calls every `callback_steps` steps during inference. The function is called with the
209
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
210
- callback_steps (`int`, *optional*, defaults to 1):
211
- The frequency at which the `callback` function is called. If not specified, the callback is called at
212
- every step.
213
-
214
- Returns:
215
- [`~pipelines.ImagePipelineOutput`] or `tuple`:
216
- If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
217
- returned where the first element is a list with the generated images.
218
- """
219
- if isinstance(prompt, str):
220
- batch_size = 1
221
- elif isinstance(prompt, list):
222
- batch_size = len(prompt)
223
- else:
224
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
225
-
226
- batch_size = batch_size * num_images_per_prompt
227
-
228
- do_classifier_free_guidance = guidance_scale > 1.0
229
-
230
- prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance)
231
-
232
- if (callback_steps is None) or (
233
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
234
- ):
235
- raise ValueError(
236
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
237
- f" {type(callback_steps)}."
238
- )
239
-
240
- # get the initial completely masked latents unless the user supplied it
241
-
242
- latents_shape = (batch_size, self.transformer.num_latent_pixels)
243
- if latents is None:
244
- mask_class = self.transformer.num_vector_embeds - 1
245
- latents = torch.full(latents_shape, mask_class).to(self.device)
246
- else:
247
- if latents.shape != latents_shape:
248
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
249
- if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
250
- raise ValueError(
251
- "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
252
- f" {self.transformer.num_vector_embeds - 1} (inclusive)."
253
- )
254
- latents = latents.to(self.device)
255
-
256
- # set timesteps
257
- self.scheduler.set_timesteps(num_inference_steps, device=self.device)
258
-
259
- timesteps_tensor = self.scheduler.timesteps.to(self.device)
260
-
261
- sample = latents
262
-
263
- for i, t in enumerate(self.progress_bar(timesteps_tensor)):
264
- # expand the sample if we are doing classifier free guidance
265
- latent_model_input = torch.cat([sample] * 2) if do_classifier_free_guidance else sample
266
-
267
- # predict the un-noised image
268
- # model_output == `log_p_x_0`
269
- model_output = self.transformer(latent_model_input, encoder_hidden_states=prompt_embeds, timestep=t).sample
270
-
271
- if do_classifier_free_guidance:
272
- model_output_uncond, model_output_text = model_output.chunk(2)
273
- model_output = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
274
- model_output -= torch.logsumexp(model_output, dim=1, keepdim=True)
275
-
276
- model_output = self.truncate(model_output, truncation_rate)
277
-
278
- # remove `log(0)`'s (`-inf`s)
279
- model_output = model_output.clamp(-70)
280
-
281
- # compute the previous noisy sample x_t -> x_t-1
282
- sample = self.scheduler.step(model_output, timestep=t, sample=sample, generator=generator).prev_sample
283
-
284
- # call the callback, if provided
285
- if callback is not None and i % callback_steps == 0:
286
- callback(i, t, sample)
287
-
288
- embedding_channels = self.vqvae.config.vq_embed_dim
289
- embeddings_shape = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
290
- embeddings = self.vqvae.quantize.get_codebook_entry(sample, shape=embeddings_shape)
291
- image = self.vqvae.decode(embeddings, force_not_quantize=True).sample
292
-
293
- image = (image / 2 + 0.5).clamp(0, 1)
294
- image = image.cpu().permute(0, 2, 3, 1).numpy()
295
-
296
- if output_type == "pil":
297
- image = self.numpy_to_pil(image)
298
-
299
- if not return_dict:
300
- return (image,)
301
-
302
- return ImagePipelineOutput(images=image)
303
-
304
- def truncate(self, log_p_x_0: torch.FloatTensor, truncation_rate: float) -> torch.FloatTensor:
305
- """
306
- Truncates `log_p_x_0` such that for each column vector, the total cumulative probability is `truncation_rate`
307
- The lowest probabilities that would increase the cumulative probability above `truncation_rate` are set to
308
- zero.
309
- """
310
- sorted_log_p_x_0, indices = torch.sort(log_p_x_0, 1, descending=True)
311
- sorted_p_x_0 = torch.exp(sorted_log_p_x_0)
312
- keep_mask = sorted_p_x_0.cumsum(dim=1) < truncation_rate
313
-
314
- # Ensure that at least the largest probability is not zeroed out
315
- all_true = torch.full_like(keep_mask[:, 0:1, :], True)
316
- keep_mask = torch.cat((all_true, keep_mask), dim=1)
317
- keep_mask = keep_mask[:, :-1, :]
318
-
319
- keep_mask = keep_mask.gather(1, indices.argsort(1))
320
-
321
- rv = log_p_x_0.clone()
322
-
323
- rv[~keep_mask] = -torch.inf # -inf = log(0)
324
-
325
- return rv
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py DELETED
@@ -1,75 +0,0 @@
1
- _base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://detectron2/resnet50_caffe',
4
- backbone=dict(
5
- type='ResNet',
6
- depth=50,
7
- num_stages=4,
8
- out_indices=(0, 1, 2, 3),
9
- frozen_stages=1,
10
- norm_cfg=dict(type='BN', requires_grad=False),
11
- norm_eval=True,
12
- style='caffe'),
13
- roi_head=dict(
14
- bbox_head=dict(
15
- bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
16
- loss_cls=dict(
17
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
18
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
19
- # model training and testing settings
20
- train_cfg=dict(
21
- rcnn=dict(
22
- assigner=dict(
23
- pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65),
24
- sampler=dict(num=256))),
25
- test_cfg=dict(rcnn=dict(score_thr=1e-3)))
26
- dataset_type = 'CocoDataset'
27
- data_root = 'data/coco/'
28
- img_norm_cfg = dict(
29
- mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
30
- train_pipeline = [
31
- dict(type='LoadImageFromFile'),
32
- dict(type='LoadProposals', num_max_proposals=300),
33
- dict(type='LoadAnnotations', with_bbox=True),
34
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
35
- dict(type='RandomFlip', flip_ratio=0.5),
36
- dict(type='Normalize', **img_norm_cfg),
37
- dict(type='Pad', size_divisor=32),
38
- dict(type='DefaultFormatBundle'),
39
- dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
40
- ]
41
- test_pipeline = [
42
- dict(type='LoadImageFromFile'),
43
- dict(type='LoadProposals', num_max_proposals=300),
44
- dict(
45
- type='MultiScaleFlipAug',
46
- img_scale=(1333, 800),
47
- flip=False,
48
- transforms=[
49
- dict(type='Resize', keep_ratio=True),
50
- dict(type='RandomFlip'),
51
- dict(type='Normalize', **img_norm_cfg),
52
- dict(type='Pad', size_divisor=32),
53
- dict(type='ImageToTensor', keys=['img']),
54
- dict(type='ToTensor', keys=['proposals']),
55
- dict(
56
- type='ToDataContainer',
57
- fields=[dict(key='proposals', stack=False)]),
58
- dict(type='Collect', keys=['img', 'proposals']),
59
- ])
60
- ]
61
- data = dict(
62
- train=dict(
63
- proposal_file=data_root +
64
- 'proposals/crpn_r50_caffe_fpn_1x_train2017.pkl',
65
- pipeline=train_pipeline),
66
- val=dict(
67
- proposal_file=data_root +
68
- 'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl',
69
- pipeline=test_pipeline),
70
- test=dict(
71
- proposal_file=data_root +
72
- 'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl',
73
- pipeline=test_pipeline))
74
- optimizer_config = dict(
75
- _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/ExLlama.md DELETED
@@ -1,22 +0,0 @@
1
- # ExLlama
2
-
3
- ### About
4
-
5
- ExLlama is an extremely optimized GPTQ backend for LLaMA models. It features much lower VRAM usage and much higher speeds due to not relying on unoptimized transformers code.
6
-
7
- ### Usage
8
-
9
- Configure text-generation-webui to use exllama via the UI or command line:
10
- - In the "Model" tab, set "Loader" to "exllama"
11
- - Specify `--loader exllama` on the command line
12
-
13
- ### Manual setup
14
-
15
- No additional installation steps are necessary since an exllama package is already included in the requirements.txt. If this package fails to install for some reason, you can install it manually by cloning the original repository into your `repositories/` folder:
16
-
17
- ```
18
- mkdir repositories
19
- cd repositories
20
- git clone https://github.com/turboderp/exllama
21
- ```
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/js/switch_tabs.js DELETED
@@ -1,59 +0,0 @@
1
- let chat_tab = document.getElementById("chat-tab");
2
- let main_parent = chat_tab.parentNode;
3
-
4
- function scrollToTop() {
5
- window.scrollTo({
6
- top: 0,
7
- // behavior: 'smooth'
8
- });
9
- }
10
-
11
- function findButtonsByText(buttonText) {
12
- const buttons = document.getElementsByTagName("button");
13
- const matchingButtons = [];
14
- buttonText = buttonText.trim();
15
-
16
- for (let i = 0; i < buttons.length; i++) {
17
- const button = buttons[i];
18
- const buttonInnerText = button.textContent.trim();
19
-
20
- if (buttonInnerText === buttonText) {
21
- matchingButtons.push(button);
22
- }
23
- }
24
-
25
- return matchingButtons;
26
- }
27
-
28
- function switch_to_chat() {
29
- let chat_tab_button = main_parent.childNodes[0].childNodes[1];
30
- chat_tab_button.click();
31
- scrollToTop();
32
- }
33
-
34
- function switch_to_default() {
35
- let default_tab_button = main_parent.childNodes[0].childNodes[4];
36
- default_tab_button.click();
37
- scrollToTop();
38
- }
39
-
40
- function switch_to_notebook() {
41
- let notebook_tab_button = main_parent.childNodes[0].childNodes[7];
42
- notebook_tab_button.click();
43
- findButtonsByText("Raw")[1].click();
44
- scrollToTop();
45
- }
46
-
47
- function switch_to_generation_parameters() {
48
- let parameters_tab_button = main_parent.childNodes[0].childNodes[10];
49
- parameters_tab_button.click();
50
- findButtonsByText("Generation")[0].click();
51
- scrollToTop();
52
- }
53
-
54
- function switch_to_character() {
55
- let parameters_tab_button = main_parent.childNodes[0].childNodes[10];
56
- parameters_tab_button.click();
57
- findButtonsByText("Character")[0].click();
58
- scrollToTop();
59
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/midas/midas/blocks.py DELETED
@@ -1,342 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from .vit import (
5
- _make_pretrained_vitb_rn50_384,
6
- _make_pretrained_vitl16_384,
7
- _make_pretrained_vitb16_384,
8
- forward_vit,
9
- )
10
-
11
- def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",):
12
- if backbone == "vitl16_384":
13
- pretrained = _make_pretrained_vitl16_384(
14
- use_pretrained, hooks=hooks, use_readout=use_readout
15
- )
16
- scratch = _make_scratch(
17
- [256, 512, 1024, 1024], features, groups=groups, expand=expand
18
- ) # ViT-L/16 - 85.0% Top1 (backbone)
19
- elif backbone == "vitb_rn50_384":
20
- pretrained = _make_pretrained_vitb_rn50_384(
21
- use_pretrained,
22
- hooks=hooks,
23
- use_vit_only=use_vit_only,
24
- use_readout=use_readout,
25
- )
26
- scratch = _make_scratch(
27
- [256, 512, 768, 768], features, groups=groups, expand=expand
28
- ) # ViT-H/16 - 85.0% Top1 (backbone)
29
- elif backbone == "vitb16_384":
30
- pretrained = _make_pretrained_vitb16_384(
31
- use_pretrained, hooks=hooks, use_readout=use_readout
32
- )
33
- scratch = _make_scratch(
34
- [96, 192, 384, 768], features, groups=groups, expand=expand
35
- ) # ViT-B/16 - 84.6% Top1 (backbone)
36
- elif backbone == "resnext101_wsl":
37
- pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
38
- scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3
39
- elif backbone == "efficientnet_lite3":
40
- pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable)
41
- scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3
42
- else:
43
- print(f"Backbone '{backbone}' not implemented")
44
- assert False
45
-
46
- return pretrained, scratch
47
-
48
-
49
- def _make_scratch(in_shape, out_shape, groups=1, expand=False):
50
- scratch = nn.Module()
51
-
52
- out_shape1 = out_shape
53
- out_shape2 = out_shape
54
- out_shape3 = out_shape
55
- out_shape4 = out_shape
56
- if expand==True:
57
- out_shape1 = out_shape
58
- out_shape2 = out_shape*2
59
- out_shape3 = out_shape*4
60
- out_shape4 = out_shape*8
61
-
62
- scratch.layer1_rn = nn.Conv2d(
63
- in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
64
- )
65
- scratch.layer2_rn = nn.Conv2d(
66
- in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
67
- )
68
- scratch.layer3_rn = nn.Conv2d(
69
- in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
70
- )
71
- scratch.layer4_rn = nn.Conv2d(
72
- in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
73
- )
74
-
75
- return scratch
76
-
77
-
78
- def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False):
79
- efficientnet = torch.hub.load(
80
- "rwightman/gen-efficientnet-pytorch",
81
- "tf_efficientnet_lite3",
82
- pretrained=use_pretrained,
83
- exportable=exportable
84
- )
85
- return _make_efficientnet_backbone(efficientnet)
86
-
87
-
88
- def _make_efficientnet_backbone(effnet):
89
- pretrained = nn.Module()
90
-
91
- pretrained.layer1 = nn.Sequential(
92
- effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]
93
- )
94
- pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3])
95
- pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5])
96
- pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9])
97
-
98
- return pretrained
99
-
100
-
101
- def _make_resnet_backbone(resnet):
102
- pretrained = nn.Module()
103
- pretrained.layer1 = nn.Sequential(
104
- resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
105
- )
106
-
107
- pretrained.layer2 = resnet.layer2
108
- pretrained.layer3 = resnet.layer3
109
- pretrained.layer4 = resnet.layer4
110
-
111
- return pretrained
112
-
113
-
114
- def _make_pretrained_resnext101_wsl(use_pretrained):
115
- resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
116
- return _make_resnet_backbone(resnet)
117
-
118
-
119
-
120
- class Interpolate(nn.Module):
121
- """Interpolation module.
122
- """
123
-
124
- def __init__(self, scale_factor, mode, align_corners=False):
125
- """Init.
126
-
127
- Args:
128
- scale_factor (float): scaling
129
- mode (str): interpolation mode
130
- """
131
- super(Interpolate, self).__init__()
132
-
133
- self.interp = nn.functional.interpolate
134
- self.scale_factor = scale_factor
135
- self.mode = mode
136
- self.align_corners = align_corners
137
-
138
- def forward(self, x):
139
- """Forward pass.
140
-
141
- Args:
142
- x (tensor): input
143
-
144
- Returns:
145
- tensor: interpolated data
146
- """
147
-
148
- x = self.interp(
149
- x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners
150
- )
151
-
152
- return x
153
-
154
-
155
- class ResidualConvUnit(nn.Module):
156
- """Residual convolution module.
157
- """
158
-
159
- def __init__(self, features):
160
- """Init.
161
-
162
- Args:
163
- features (int): number of features
164
- """
165
- super().__init__()
166
-
167
- self.conv1 = nn.Conv2d(
168
- features, features, kernel_size=3, stride=1, padding=1, bias=True
169
- )
170
-
171
- self.conv2 = nn.Conv2d(
172
- features, features, kernel_size=3, stride=1, padding=1, bias=True
173
- )
174
-
175
- self.relu = nn.ReLU(inplace=True)
176
-
177
- def forward(self, x):
178
- """Forward pass.
179
-
180
- Args:
181
- x (tensor): input
182
-
183
- Returns:
184
- tensor: output
185
- """
186
- out = self.relu(x)
187
- out = self.conv1(out)
188
- out = self.relu(out)
189
- out = self.conv2(out)
190
-
191
- return out + x
192
-
193
-
194
- class FeatureFusionBlock(nn.Module):
195
- """Feature fusion block.
196
- """
197
-
198
- def __init__(self, features):
199
- """Init.
200
-
201
- Args:
202
- features (int): number of features
203
- """
204
- super(FeatureFusionBlock, self).__init__()
205
-
206
- self.resConfUnit1 = ResidualConvUnit(features)
207
- self.resConfUnit2 = ResidualConvUnit(features)
208
-
209
- def forward(self, *xs):
210
- """Forward pass.
211
-
212
- Returns:
213
- tensor: output
214
- """
215
- output = xs[0]
216
-
217
- if len(xs) == 2:
218
- output += self.resConfUnit1(xs[1])
219
-
220
- output = self.resConfUnit2(output)
221
-
222
- output = nn.functional.interpolate(
223
- output, scale_factor=2, mode="bilinear", align_corners=True
224
- )
225
-
226
- return output
227
-
228
-
229
-
230
-
231
- class ResidualConvUnit_custom(nn.Module):
232
- """Residual convolution module.
233
- """
234
-
235
- def __init__(self, features, activation, bn):
236
- """Init.
237
-
238
- Args:
239
- features (int): number of features
240
- """
241
- super().__init__()
242
-
243
- self.bn = bn
244
-
245
- self.groups=1
246
-
247
- self.conv1 = nn.Conv2d(
248
- features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
249
- )
250
-
251
- self.conv2 = nn.Conv2d(
252
- features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
253
- )
254
-
255
- if self.bn==True:
256
- self.bn1 = nn.BatchNorm2d(features)
257
- self.bn2 = nn.BatchNorm2d(features)
258
-
259
- self.activation = activation
260
-
261
- self.skip_add = nn.quantized.FloatFunctional()
262
-
263
- def forward(self, x):
264
- """Forward pass.
265
-
266
- Args:
267
- x (tensor): input
268
-
269
- Returns:
270
- tensor: output
271
- """
272
-
273
- out = self.activation(x)
274
- out = self.conv1(out)
275
- if self.bn==True:
276
- out = self.bn1(out)
277
-
278
- out = self.activation(out)
279
- out = self.conv2(out)
280
- if self.bn==True:
281
- out = self.bn2(out)
282
-
283
- if self.groups > 1:
284
- out = self.conv_merge(out)
285
-
286
- return self.skip_add.add(out, x)
287
-
288
- # return out + x
289
-
290
-
291
- class FeatureFusionBlock_custom(nn.Module):
292
- """Feature fusion block.
293
- """
294
-
295
- def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True):
296
- """Init.
297
-
298
- Args:
299
- features (int): number of features
300
- """
301
- super(FeatureFusionBlock_custom, self).__init__()
302
-
303
- self.deconv = deconv
304
- self.align_corners = align_corners
305
-
306
- self.groups=1
307
-
308
- self.expand = expand
309
- out_features = features
310
- if self.expand==True:
311
- out_features = features//2
312
-
313
- self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
314
-
315
- self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
316
- self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
317
-
318
- self.skip_add = nn.quantized.FloatFunctional()
319
-
320
- def forward(self, *xs):
321
- """Forward pass.
322
-
323
- Returns:
324
- tensor: output
325
- """
326
- output = xs[0]
327
-
328
- if len(xs) == 2:
329
- res = self.resConfUnit1(xs[1])
330
- output = self.skip_add.add(output, res)
331
- # output += res
332
-
333
- output = self.resConfUnit2(output)
334
-
335
- output = nn.functional.interpolate(
336
- output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
337
- )
338
-
339
- output = self.out_conv(output)
340
-
341
- return output
342
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arikkod/FoodVisionMini/app.py DELETED
@@ -1,51 +0,0 @@
1
- import gradio as gr
2
- import os
3
- import torch
4
- from model import create_effnetb2_model
5
- from timeit import default_timer as timer
6
- from typing import Tuple, Dict
7
-
8
- class_names = ['pizza', 'steak', 'sushi']
9
- effnetb2, effnetb2_transforms = create_effnetb2_model(3, 42)
10
- # Load save weights:
11
- effnetb2.load_state_dict(
12
- torch.load(f='09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_precent.pth',
13
- map_location=torch.device('cpu')
14
- )
15
- )
16
-
17
- def predict(img):
18
- # Start a timer
19
- start_time = timer()
20
- # Transform the input image for use wit EffNetB2
21
- img = effnetb2_transforms(img).unsqueeze(0)
22
- # Put model into eval mode, make prediction
23
- effnetb2.eval()
24
- with torch.inference_mode():
25
- pred_probs = torch.softmax(effnetb2(img), dim=1)
26
- # Create a prediction labal and prediction probability dictionary
27
- pred_labels_and_probs = {class_names[i]:float(pred_probs[0][i]) for i in range(len(class_names))}
28
- # Calculated pred time
29
- end_time = timer()
30
- pred_time = round(end_time - start_time, 4)
31
- # Return pred dict and pred time
32
- return pred_labels_and_probs, pred_time
33
-
34
-
35
- title = 'FoodVision Mini 🍕🥩🍣'
36
- description = 'An [EfficientNetB2 feature extractor](https://pytorch.org/vision/main/models/generated/torchvision.models.efficientnet_b2.html)'
37
- article = 'Created with Pytorch model deployment'
38
- example_list = [["./examples/" + file] for file in os.listdir("./examples")]
39
-
40
- demo = gr.Interface(fn=predict,
41
- inputs=gr.Image(type='pil'),
42
- outputs=[gr.Label(num_top_classes=3, label='Predictions'),
43
- gr.Number(label='Prediction time (s)')],
44
- examples=example_list,
45
- title=title,
46
- description=description,
47
- article=article
48
- )
49
-
50
- demo.launch(debug=False,
51
- share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arthur678/vits-uma-genshin-honkai/modules.py DELETED
@@ -1,388 +0,0 @@
1
- import math
2
- import numpy as np
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
8
- from torch.nn.utils import weight_norm, remove_weight_norm
9
-
10
- import commons
11
- from commons import init_weights, get_padding
12
- from transforms import piecewise_rational_quadratic_transform
13
-
14
-
15
- LRELU_SLOPE = 0.1
16
-
17
-
18
- class LayerNorm(nn.Module):
19
- def __init__(self, channels, eps=1e-5):
20
- super().__init__()
21
- self.channels = channels
22
- self.eps = eps
23
-
24
- self.gamma = nn.Parameter(torch.ones(channels))
25
- self.beta = nn.Parameter(torch.zeros(channels))
26
-
27
- def forward(self, x):
28
- x = x.transpose(1, -1)
29
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
30
- return x.transpose(1, -1)
31
-
32
-
33
- class ConvReluNorm(nn.Module):
34
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
35
- super().__init__()
36
- self.in_channels = in_channels
37
- self.hidden_channels = hidden_channels
38
- self.out_channels = out_channels
39
- self.kernel_size = kernel_size
40
- self.n_layers = n_layers
41
- self.p_dropout = p_dropout
42
- assert n_layers > 1, "Number of layers should be larger than 0."
43
-
44
- self.conv_layers = nn.ModuleList()
45
- self.norm_layers = nn.ModuleList()
46
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
47
- self.norm_layers.append(LayerNorm(hidden_channels))
48
- self.relu_drop = nn.Sequential(
49
- nn.ReLU(),
50
- nn.Dropout(p_dropout))
51
- for _ in range(n_layers-1):
52
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
53
- self.norm_layers.append(LayerNorm(hidden_channels))
54
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
55
- self.proj.weight.data.zero_()
56
- self.proj.bias.data.zero_()
57
-
58
- def forward(self, x, x_mask):
59
- x_org = x
60
- for i in range(self.n_layers):
61
- x = self.conv_layers[i](x * x_mask)
62
- x = self.norm_layers[i](x)
63
- x = self.relu_drop(x)
64
- x = x_org + self.proj(x)
65
- return x * x_mask
66
-
67
-
68
- class DDSConv(nn.Module):
69
- """
70
- Dialted and Depth-Separable Convolution
71
- """
72
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
73
- super().__init__()
74
- self.channels = channels
75
- self.kernel_size = kernel_size
76
- self.n_layers = n_layers
77
- self.p_dropout = p_dropout
78
-
79
- self.drop = nn.Dropout(p_dropout)
80
- self.convs_sep = nn.ModuleList()
81
- self.convs_1x1 = nn.ModuleList()
82
- self.norms_1 = nn.ModuleList()
83
- self.norms_2 = nn.ModuleList()
84
- for i in range(n_layers):
85
- dilation = kernel_size ** i
86
- padding = (kernel_size * dilation - dilation) // 2
87
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
88
- groups=channels, dilation=dilation, padding=padding
89
- ))
90
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
91
- self.norms_1.append(LayerNorm(channels))
92
- self.norms_2.append(LayerNorm(channels))
93
-
94
- def forward(self, x, x_mask, g=None):
95
- if g is not None:
96
- x = x + g
97
- for i in range(self.n_layers):
98
- y = self.convs_sep[i](x * x_mask)
99
- y = self.norms_1[i](y)
100
- y = F.gelu(y)
101
- y = self.convs_1x1[i](y)
102
- y = self.norms_2[i](y)
103
- y = F.gelu(y)
104
- y = self.drop(y)
105
- x = x + y
106
- return x * x_mask
107
-
108
-
109
- class WN(torch.nn.Module):
110
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
111
- super(WN, self).__init__()
112
- assert(kernel_size % 2 == 1)
113
- self.hidden_channels =hidden_channels
114
- self.kernel_size = kernel_size,
115
- self.dilation_rate = dilation_rate
116
- self.n_layers = n_layers
117
- self.gin_channels = gin_channels
118
- self.p_dropout = p_dropout
119
-
120
- self.in_layers = torch.nn.ModuleList()
121
- self.res_skip_layers = torch.nn.ModuleList()
122
- self.drop = nn.Dropout(p_dropout)
123
-
124
- if gin_channels != 0:
125
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
126
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
127
-
128
- for i in range(n_layers):
129
- dilation = dilation_rate ** i
130
- padding = int((kernel_size * dilation - dilation) / 2)
131
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
132
- dilation=dilation, padding=padding)
133
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
134
- self.in_layers.append(in_layer)
135
-
136
- # last one is not necessary
137
- if i < n_layers - 1:
138
- res_skip_channels = 2 * hidden_channels
139
- else:
140
- res_skip_channels = hidden_channels
141
-
142
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
143
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
144
- self.res_skip_layers.append(res_skip_layer)
145
-
146
- def forward(self, x, x_mask, g=None, **kwargs):
147
- output = torch.zeros_like(x)
148
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
149
-
150
- if g is not None:
151
- g = self.cond_layer(g)
152
-
153
- for i in range(self.n_layers):
154
- x_in = self.in_layers[i](x)
155
- if g is not None:
156
- cond_offset = i * 2 * self.hidden_channels
157
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
158
- else:
159
- g_l = torch.zeros_like(x_in)
160
-
161
- acts = commons.fused_add_tanh_sigmoid_multiply(
162
- x_in,
163
- g_l,
164
- n_channels_tensor)
165
- acts = self.drop(acts)
166
-
167
- res_skip_acts = self.res_skip_layers[i](acts)
168
- if i < self.n_layers - 1:
169
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
170
- x = (x + res_acts) * x_mask
171
- output = output + res_skip_acts[:,self.hidden_channels:,:]
172
- else:
173
- output = output + res_skip_acts
174
- return output * x_mask
175
-
176
- def remove_weight_norm(self):
177
- if self.gin_channels != 0:
178
- torch.nn.utils.remove_weight_norm(self.cond_layer)
179
- for l in self.in_layers:
180
- torch.nn.utils.remove_weight_norm(l)
181
- for l in self.res_skip_layers:
182
- torch.nn.utils.remove_weight_norm(l)
183
-
184
-
185
- class ResBlock1(torch.nn.Module):
186
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
187
- super(ResBlock1, self).__init__()
188
- self.convs1 = nn.ModuleList([
189
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
190
- padding=get_padding(kernel_size, dilation[0]))),
191
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
192
- padding=get_padding(kernel_size, dilation[1]))),
193
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
194
- padding=get_padding(kernel_size, dilation[2])))
195
- ])
196
- self.convs1.apply(init_weights)
197
-
198
- self.convs2 = nn.ModuleList([
199
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
200
- padding=get_padding(kernel_size, 1))),
201
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
202
- padding=get_padding(kernel_size, 1))),
203
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
204
- padding=get_padding(kernel_size, 1)))
205
- ])
206
- self.convs2.apply(init_weights)
207
-
208
- def forward(self, x, x_mask=None):
209
- for c1, c2 in zip(self.convs1, self.convs2):
210
- xt = F.leaky_relu(x, LRELU_SLOPE)
211
- if x_mask is not None:
212
- xt = xt * x_mask
213
- xt = c1(xt)
214
- xt = F.leaky_relu(xt, LRELU_SLOPE)
215
- if x_mask is not None:
216
- xt = xt * x_mask
217
- xt = c2(xt)
218
- x = xt + x
219
- if x_mask is not None:
220
- x = x * x_mask
221
- return x
222
-
223
- def remove_weight_norm(self):
224
- for l in self.convs1:
225
- remove_weight_norm(l)
226
- for l in self.convs2:
227
- remove_weight_norm(l)
228
-
229
-
230
- class ResBlock2(torch.nn.Module):
231
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
232
- super(ResBlock2, self).__init__()
233
- self.convs = nn.ModuleList([
234
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
235
- padding=get_padding(kernel_size, dilation[0]))),
236
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
237
- padding=get_padding(kernel_size, dilation[1])))
238
- ])
239
- self.convs.apply(init_weights)
240
-
241
- def forward(self, x, x_mask=None):
242
- for c in self.convs:
243
- xt = F.leaky_relu(x, LRELU_SLOPE)
244
- if x_mask is not None:
245
- xt = xt * x_mask
246
- xt = c(xt)
247
- x = xt + x
248
- if x_mask is not None:
249
- x = x * x_mask
250
- return x
251
-
252
- def remove_weight_norm(self):
253
- for l in self.convs:
254
- remove_weight_norm(l)
255
-
256
-
257
- class Log(nn.Module):
258
- def forward(self, x, x_mask, reverse=False, **kwargs):
259
- if not reverse:
260
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
261
- logdet = torch.sum(-y, [1, 2])
262
- return y, logdet
263
- else:
264
- x = torch.exp(x) * x_mask
265
- return x
266
-
267
-
268
- class Flip(nn.Module):
269
- def forward(self, x, *args, reverse=False, **kwargs):
270
- x = torch.flip(x, [1])
271
- if not reverse:
272
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
273
- return x, logdet
274
- else:
275
- return x
276
-
277
-
278
- class ElementwiseAffine(nn.Module):
279
- def __init__(self, channels):
280
- super().__init__()
281
- self.channels = channels
282
- self.m = nn.Parameter(torch.zeros(channels,1))
283
- self.logs = nn.Parameter(torch.zeros(channels,1))
284
-
285
- def forward(self, x, x_mask, reverse=False, **kwargs):
286
- if not reverse:
287
- y = self.m + torch.exp(self.logs) * x
288
- y = y * x_mask
289
- logdet = torch.sum(self.logs * x_mask, [1,2])
290
- return y, logdet
291
- else:
292
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
293
- return x
294
-
295
-
296
- class ResidualCouplingLayer(nn.Module):
297
- def __init__(self,
298
- channels,
299
- hidden_channels,
300
- kernel_size,
301
- dilation_rate,
302
- n_layers,
303
- p_dropout=0,
304
- gin_channels=0,
305
- mean_only=False):
306
- assert channels % 2 == 0, "channels should be divisible by 2"
307
- super().__init__()
308
- self.channels = channels
309
- self.hidden_channels = hidden_channels
310
- self.kernel_size = kernel_size
311
- self.dilation_rate = dilation_rate
312
- self.n_layers = n_layers
313
- self.half_channels = channels // 2
314
- self.mean_only = mean_only
315
-
316
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
317
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
318
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
319
- self.post.weight.data.zero_()
320
- self.post.bias.data.zero_()
321
-
322
- def forward(self, x, x_mask, g=None, reverse=False):
323
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
324
- h = self.pre(x0) * x_mask
325
- h = self.enc(h, x_mask, g=g)
326
- stats = self.post(h) * x_mask
327
- if not self.mean_only:
328
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
329
- else:
330
- m = stats
331
- logs = torch.zeros_like(m)
332
-
333
- if not reverse:
334
- x1 = m + x1 * torch.exp(logs) * x_mask
335
- x = torch.cat([x0, x1], 1)
336
- logdet = torch.sum(logs, [1,2])
337
- return x, logdet
338
- else:
339
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
340
- x = torch.cat([x0, x1], 1)
341
- return x
342
-
343
-
344
- class ConvFlow(nn.Module):
345
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
346
- super().__init__()
347
- self.in_channels = in_channels
348
- self.filter_channels = filter_channels
349
- self.kernel_size = kernel_size
350
- self.n_layers = n_layers
351
- self.num_bins = num_bins
352
- self.tail_bound = tail_bound
353
- self.half_channels = in_channels // 2
354
-
355
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
356
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
357
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
358
- self.proj.weight.data.zero_()
359
- self.proj.bias.data.zero_()
360
-
361
- def forward(self, x, x_mask, g=None, reverse=False):
362
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
363
- h = self.pre(x0)
364
- h = self.convs(h, x_mask, g=g)
365
- h = self.proj(h) * x_mask
366
-
367
- b, c, t = x0.shape
368
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
369
-
370
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
371
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
372
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
373
-
374
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
375
- unnormalized_widths,
376
- unnormalized_heights,
377
- unnormalized_derivatives,
378
- inverse=reverse,
379
- tails='linear',
380
- tail_bound=self.tail_bound
381
- )
382
-
383
- x = torch.cat([x0, x1], 1) * x_mask
384
- logdet = torch.sum(logabsdet * x_mask, [1,2])
385
- if not reverse:
386
- return x, logdet
387
- else:
388
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.cpp DELETED
@@ -1,43 +0,0 @@
1
- /*!
2
- **************************************************************************************************
3
- * Deformable DETR
4
- * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
- * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
- **************************************************************************************************
7
- * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
- **************************************************************************************************
9
- */
10
-
11
- #include <vector>
12
-
13
- #include <ATen/ATen.h>
14
- #include <ATen/cuda/CUDAContext.h>
15
-
16
- namespace groundingdino {
17
-
18
- at::Tensor
19
- ms_deform_attn_cpu_forward(
20
- const at::Tensor &value,
21
- const at::Tensor &spatial_shapes,
22
- const at::Tensor &level_start_index,
23
- const at::Tensor &sampling_loc,
24
- const at::Tensor &attn_weight,
25
- const int im2col_step)
26
- {
27
- AT_ERROR("Not implement on cpu");
28
- }
29
-
30
- std::vector<at::Tensor>
31
- ms_deform_attn_cpu_backward(
32
- const at::Tensor &value,
33
- const at::Tensor &spatial_shapes,
34
- const at::Tensor &level_start_index,
35
- const at::Tensor &sampling_loc,
36
- const at::Tensor &attn_weight,
37
- const at::Tensor &grad_output,
38
- const int im2col_step)
39
- {
40
- AT_ERROR("Not implement on cpu");
41
- }
42
-
43
- } // namespace groundingdino
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: HuggingFaceChat
3
- emoji: 🚀
4
- colorFrom: indigo
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.50.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/configuration.py DELETED
@@ -1,282 +0,0 @@
1
- import logging
2
- import os
3
- import subprocess
4
- from optparse import Values
5
- from typing import Any, List, Optional
6
-
7
- from pip._internal.cli.base_command import Command
8
- from pip._internal.cli.status_codes import ERROR, SUCCESS
9
- from pip._internal.configuration import (
10
- Configuration,
11
- Kind,
12
- get_configuration_files,
13
- kinds,
14
- )
15
- from pip._internal.exceptions import PipError
16
- from pip._internal.utils.logging import indent_log
17
- from pip._internal.utils.misc import get_prog, write_output
18
-
19
- logger = logging.getLogger(__name__)
20
-
21
-
22
- class ConfigurationCommand(Command):
23
- """
24
- Manage local and global configuration.
25
-
26
- Subcommands:
27
-
28
- - list: List the active configuration (or from the file specified)
29
- - edit: Edit the configuration file in an editor
30
- - get: Get the value associated with command.option
31
- - set: Set the command.option=value
32
- - unset: Unset the value associated with command.option
33
- - debug: List the configuration files and values defined under them
34
-
35
- Configuration keys should be dot separated command and option name,
36
- with the special prefix "global" affecting any command. For example,
37
- "pip config set global.index-url https://example.org/" would configure
38
- the index url for all commands, but "pip config set download.timeout 10"
39
- would configure a 10 second timeout only for "pip download" commands.
40
-
41
- If none of --user, --global and --site are passed, a virtual
42
- environment configuration file is used if one is active and the file
43
- exists. Otherwise, all modifications happen to the user file by
44
- default.
45
- """
46
-
47
- ignore_require_venv = True
48
- usage = """
49
- %prog [<file-option>] list
50
- %prog [<file-option>] [--editor <editor-path>] edit
51
-
52
- %prog [<file-option>] get command.option
53
- %prog [<file-option>] set command.option value
54
- %prog [<file-option>] unset command.option
55
- %prog [<file-option>] debug
56
- """
57
-
58
- def add_options(self) -> None:
59
- self.cmd_opts.add_option(
60
- "--editor",
61
- dest="editor",
62
- action="store",
63
- default=None,
64
- help=(
65
- "Editor to use to edit the file. Uses VISUAL or EDITOR "
66
- "environment variables if not provided."
67
- ),
68
- )
69
-
70
- self.cmd_opts.add_option(
71
- "--global",
72
- dest="global_file",
73
- action="store_true",
74
- default=False,
75
- help="Use the system-wide configuration file only",
76
- )
77
-
78
- self.cmd_opts.add_option(
79
- "--user",
80
- dest="user_file",
81
- action="store_true",
82
- default=False,
83
- help="Use the user configuration file only",
84
- )
85
-
86
- self.cmd_opts.add_option(
87
- "--site",
88
- dest="site_file",
89
- action="store_true",
90
- default=False,
91
- help="Use the current environment configuration file only",
92
- )
93
-
94
- self.parser.insert_option_group(0, self.cmd_opts)
95
-
96
- def run(self, options: Values, args: List[str]) -> int:
97
- handlers = {
98
- "list": self.list_values,
99
- "edit": self.open_in_editor,
100
- "get": self.get_name,
101
- "set": self.set_name_value,
102
- "unset": self.unset_name,
103
- "debug": self.list_config_values,
104
- }
105
-
106
- # Determine action
107
- if not args or args[0] not in handlers:
108
- logger.error(
109
- "Need an action (%s) to perform.",
110
- ", ".join(sorted(handlers)),
111
- )
112
- return ERROR
113
-
114
- action = args[0]
115
-
116
- # Determine which configuration files are to be loaded
117
- # Depends on whether the command is modifying.
118
- try:
119
- load_only = self._determine_file(
120
- options, need_value=(action in ["get", "set", "unset", "edit"])
121
- )
122
- except PipError as e:
123
- logger.error(e.args[0])
124
- return ERROR
125
-
126
- # Load a new configuration
127
- self.configuration = Configuration(
128
- isolated=options.isolated_mode, load_only=load_only
129
- )
130
- self.configuration.load()
131
-
132
- # Error handling happens here, not in the action-handlers.
133
- try:
134
- handlers[action](options, args[1:])
135
- except PipError as e:
136
- logger.error(e.args[0])
137
- return ERROR
138
-
139
- return SUCCESS
140
-
141
- def _determine_file(self, options: Values, need_value: bool) -> Optional[Kind]:
142
- file_options = [
143
- key
144
- for key, value in (
145
- (kinds.USER, options.user_file),
146
- (kinds.GLOBAL, options.global_file),
147
- (kinds.SITE, options.site_file),
148
- )
149
- if value
150
- ]
151
-
152
- if not file_options:
153
- if not need_value:
154
- return None
155
- # Default to user, unless there's a site file.
156
- elif any(
157
- os.path.exists(site_config_file)
158
- for site_config_file in get_configuration_files()[kinds.SITE]
159
- ):
160
- return kinds.SITE
161
- else:
162
- return kinds.USER
163
- elif len(file_options) == 1:
164
- return file_options[0]
165
-
166
- raise PipError(
167
- "Need exactly one file to operate upon "
168
- "(--user, --site, --global) to perform."
169
- )
170
-
171
- def list_values(self, options: Values, args: List[str]) -> None:
172
- self._get_n_args(args, "list", n=0)
173
-
174
- for key, value in sorted(self.configuration.items()):
175
- write_output("%s=%r", key, value)
176
-
177
- def get_name(self, options: Values, args: List[str]) -> None:
178
- key = self._get_n_args(args, "get [name]", n=1)
179
- value = self.configuration.get_value(key)
180
-
181
- write_output("%s", value)
182
-
183
- def set_name_value(self, options: Values, args: List[str]) -> None:
184
- key, value = self._get_n_args(args, "set [name] [value]", n=2)
185
- self.configuration.set_value(key, value)
186
-
187
- self._save_configuration()
188
-
189
- def unset_name(self, options: Values, args: List[str]) -> None:
190
- key = self._get_n_args(args, "unset [name]", n=1)
191
- self.configuration.unset_value(key)
192
-
193
- self._save_configuration()
194
-
195
- def list_config_values(self, options: Values, args: List[str]) -> None:
196
- """List config key-value pairs across different config files"""
197
- self._get_n_args(args, "debug", n=0)
198
-
199
- self.print_env_var_values()
200
- # Iterate over config files and print if they exist, and the
201
- # key-value pairs present in them if they do
202
- for variant, files in sorted(self.configuration.iter_config_files()):
203
- write_output("%s:", variant)
204
- for fname in files:
205
- with indent_log():
206
- file_exists = os.path.exists(fname)
207
- write_output("%s, exists: %r", fname, file_exists)
208
- if file_exists:
209
- self.print_config_file_values(variant)
210
-
211
- def print_config_file_values(self, variant: Kind) -> None:
212
- """Get key-value pairs from the file of a variant"""
213
- for name, value in self.configuration.get_values_in_config(variant).items():
214
- with indent_log():
215
- write_output("%s: %s", name, value)
216
-
217
- def print_env_var_values(self) -> None:
218
- """Get key-values pairs present as environment variables"""
219
- write_output("%s:", "env_var")
220
- with indent_log():
221
- for key, value in sorted(self.configuration.get_environ_vars()):
222
- env_var = f"PIP_{key.upper()}"
223
- write_output("%s=%r", env_var, value)
224
-
225
- def open_in_editor(self, options: Values, args: List[str]) -> None:
226
- editor = self._determine_editor(options)
227
-
228
- fname = self.configuration.get_file_to_edit()
229
- if fname is None:
230
- raise PipError("Could not determine appropriate file.")
231
- elif '"' in fname:
232
- # This shouldn't happen, unless we see a username like that.
233
- # If that happens, we'd appreciate a pull request fixing this.
234
- raise PipError(
235
- f'Can not open an editor for a file name containing "\n{fname}'
236
- )
237
-
238
- try:
239
- subprocess.check_call(f'{editor} "{fname}"', shell=True)
240
- except FileNotFoundError as e:
241
- if not e.filename:
242
- e.filename = editor
243
- raise
244
- except subprocess.CalledProcessError as e:
245
- raise PipError(
246
- "Editor Subprocess exited with exit code {}".format(e.returncode)
247
- )
248
-
249
- def _get_n_args(self, args: List[str], example: str, n: int) -> Any:
250
- """Helper to make sure the command got the right number of arguments"""
251
- if len(args) != n:
252
- msg = (
253
- "Got unexpected number of arguments, expected {}. "
254
- '(example: "{} config {}")'
255
- ).format(n, get_prog(), example)
256
- raise PipError(msg)
257
-
258
- if n == 1:
259
- return args[0]
260
- else:
261
- return args
262
-
263
- def _save_configuration(self) -> None:
264
- # We successfully ran a modifying command. Need to save the
265
- # configuration.
266
- try:
267
- self.configuration.save()
268
- except Exception:
269
- logger.exception(
270
- "Unable to save configuration. Please report this as a bug."
271
- )
272
- raise PipError("Internal Error.")
273
-
274
- def _determine_editor(self, options: Values) -> str:
275
- if options.editor is not None:
276
- return options.editor
277
- elif "VISUAL" in os.environ:
278
- return os.environ["VISUAL"]
279
- elif "EDITOR" in os.environ:
280
- return os.environ["EDITOR"]
281
- else:
282
- raise PipError("Could not determine editor to use.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/certs.py DELETED
@@ -1,24 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- """
4
- requests.certs
5
- ~~~~~~~~~~~~~~
6
-
7
- This module returns the preferred default CA certificate bundle. There is
8
- only one — the one from the certifi package.
9
-
10
- If you are packaging Requests, e.g., for a Linux distribution or a managed
11
- environment, you can change the definition of where() to return a separately
12
- packaged CA bundle.
13
- """
14
-
15
- import os
16
-
17
- if "_PIP_STANDALONE_CERT" not in os.environ:
18
- from pip._vendor.certifi import where
19
- else:
20
- def where():
21
- return os.environ["_PIP_STANDALONE_CERT"]
22
-
23
- if __name__ == "__main__":
24
- print(where())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/jaraco/context.py DELETED
@@ -1,213 +0,0 @@
1
- import os
2
- import subprocess
3
- import contextlib
4
- import functools
5
- import tempfile
6
- import shutil
7
- import operator
8
-
9
-
10
- @contextlib.contextmanager
11
- def pushd(dir):
12
- orig = os.getcwd()
13
- os.chdir(dir)
14
- try:
15
- yield dir
16
- finally:
17
- os.chdir(orig)
18
-
19
-
20
- @contextlib.contextmanager
21
- def tarball_context(url, target_dir=None, runner=None, pushd=pushd):
22
- """
23
- Get a tarball, extract it, change to that directory, yield, then
24
- clean up.
25
- `runner` is the function to invoke commands.
26
- `pushd` is a context manager for changing the directory.
27
- """
28
- if target_dir is None:
29
- target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '')
30
- if runner is None:
31
- runner = functools.partial(subprocess.check_call, shell=True)
32
- # In the tar command, use --strip-components=1 to strip the first path and
33
- # then
34
- # use -C to cause the files to be extracted to {target_dir}. This ensures
35
- # that we always know where the files were extracted.
36
- runner('mkdir {target_dir}'.format(**vars()))
37
- try:
38
- getter = 'wget {url} -O -'
39
- extract = 'tar x{compression} --strip-components=1 -C {target_dir}'
40
- cmd = ' | '.join((getter, extract))
41
- runner(cmd.format(compression=infer_compression(url), **vars()))
42
- with pushd(target_dir):
43
- yield target_dir
44
- finally:
45
- runner('rm -Rf {target_dir}'.format(**vars()))
46
-
47
-
48
- def infer_compression(url):
49
- """
50
- Given a URL or filename, infer the compression code for tar.
51
- """
52
- # cheat and just assume it's the last two characters
53
- compression_indicator = url[-2:]
54
- mapping = dict(gz='z', bz='j', xz='J')
55
- # Assume 'z' (gzip) if no match
56
- return mapping.get(compression_indicator, 'z')
57
-
58
-
59
- @contextlib.contextmanager
60
- def temp_dir(remover=shutil.rmtree):
61
- """
62
- Create a temporary directory context. Pass a custom remover
63
- to override the removal behavior.
64
- """
65
- temp_dir = tempfile.mkdtemp()
66
- try:
67
- yield temp_dir
68
- finally:
69
- remover(temp_dir)
70
-
71
-
72
- @contextlib.contextmanager
73
- def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir):
74
- """
75
- Check out the repo indicated by url.
76
-
77
- If dest_ctx is supplied, it should be a context manager
78
- to yield the target directory for the check out.
79
- """
80
- exe = 'git' if 'git' in url else 'hg'
81
- with dest_ctx() as repo_dir:
82
- cmd = [exe, 'clone', url, repo_dir]
83
- if branch:
84
- cmd.extend(['--branch', branch])
85
- devnull = open(os.path.devnull, 'w')
86
- stdout = devnull if quiet else None
87
- subprocess.check_call(cmd, stdout=stdout)
88
- yield repo_dir
89
-
90
-
91
- @contextlib.contextmanager
92
- def null():
93
- yield
94
-
95
-
96
- class ExceptionTrap:
97
- """
98
- A context manager that will catch certain exceptions and provide an
99
- indication they occurred.
100
-
101
- >>> with ExceptionTrap() as trap:
102
- ... raise Exception()
103
- >>> bool(trap)
104
- True
105
-
106
- >>> with ExceptionTrap() as trap:
107
- ... pass
108
- >>> bool(trap)
109
- False
110
-
111
- >>> with ExceptionTrap(ValueError) as trap:
112
- ... raise ValueError("1 + 1 is not 3")
113
- >>> bool(trap)
114
- True
115
-
116
- >>> with ExceptionTrap(ValueError) as trap:
117
- ... raise Exception()
118
- Traceback (most recent call last):
119
- ...
120
- Exception
121
-
122
- >>> bool(trap)
123
- False
124
- """
125
-
126
- exc_info = None, None, None
127
-
128
- def __init__(self, exceptions=(Exception,)):
129
- self.exceptions = exceptions
130
-
131
- def __enter__(self):
132
- return self
133
-
134
- @property
135
- def type(self):
136
- return self.exc_info[0]
137
-
138
- @property
139
- def value(self):
140
- return self.exc_info[1]
141
-
142
- @property
143
- def tb(self):
144
- return self.exc_info[2]
145
-
146
- def __exit__(self, *exc_info):
147
- type = exc_info[0]
148
- matches = type and issubclass(type, self.exceptions)
149
- if matches:
150
- self.exc_info = exc_info
151
- return matches
152
-
153
- def __bool__(self):
154
- return bool(self.type)
155
-
156
- def raises(self, func, *, _test=bool):
157
- """
158
- Wrap func and replace the result with the truth
159
- value of the trap (True if an exception occurred).
160
-
161
- First, give the decorator an alias to support Python 3.8
162
- Syntax.
163
-
164
- >>> raises = ExceptionTrap(ValueError).raises
165
-
166
- Now decorate a function that always fails.
167
-
168
- >>> @raises
169
- ... def fail():
170
- ... raise ValueError('failed')
171
- >>> fail()
172
- True
173
- """
174
-
175
- @functools.wraps(func)
176
- def wrapper(*args, **kwargs):
177
- with ExceptionTrap(self.exceptions) as trap:
178
- func(*args, **kwargs)
179
- return _test(trap)
180
-
181
- return wrapper
182
-
183
- def passes(self, func):
184
- """
185
- Wrap func and replace the result with the truth
186
- value of the trap (True if no exception).
187
-
188
- First, give the decorator an alias to support Python 3.8
189
- Syntax.
190
-
191
- >>> passes = ExceptionTrap(ValueError).passes
192
-
193
- Now decorate a function that always fails.
194
-
195
- >>> @passes
196
- ... def fail():
197
- ... raise ValueError('failed')
198
-
199
- >>> fail()
200
- False
201
- """
202
- return self.raises(func, _test=operator.not_)
203
-
204
-
205
- class suppress(contextlib.suppress, contextlib.ContextDecorator):
206
- """
207
- A version of contextlib.suppress with decorator support.
208
-
209
- >>> @suppress(KeyError)
210
- ... def key_error():
211
- ... {}['']
212
- >>> key_error()
213
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/video_visualizer.py DELETED
@@ -1,252 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import numpy as np
3
- import pycocotools.mask as mask_util
4
-
5
- from detectron2.utils.visualizer import (
6
- ColorMode,
7
- Visualizer,
8
- _create_text_labels,
9
- _PanopticPrediction,
10
- )
11
-
12
- from .colormap import random_color
13
-
14
-
15
- class _DetectedInstance:
16
- """
17
- Used to store data about detected objects in video frame,
18
- in order to transfer color to objects in the future frames.
19
-
20
- Attributes:
21
- label (int):
22
- bbox (tuple[float]):
23
- mask_rle (dict):
24
- color (tuple[float]): RGB colors in range (0, 1)
25
- ttl (int): time-to-live for the instance. For example, if ttl=2,
26
- the instance color can be transferred to objects in the next two frames.
27
- """
28
-
29
- __slots__ = ["label", "bbox", "mask_rle", "color", "ttl"]
30
-
31
- def __init__(self, label, bbox, mask_rle, color, ttl):
32
- self.label = label
33
- self.bbox = bbox
34
- self.mask_rle = mask_rle
35
- self.color = color
36
- self.ttl = ttl
37
-
38
-
39
- class VideoVisualizer:
40
- def __init__(self, metadata, instance_mode=ColorMode.IMAGE):
41
- """
42
- Args:
43
- metadata (MetadataCatalog): image metadata.
44
- """
45
- self.metadata = metadata
46
- self._old_instances = []
47
- assert instance_mode in [
48
- ColorMode.IMAGE,
49
- ColorMode.IMAGE_BW,
50
- ], "Other mode not supported yet."
51
- self._instance_mode = instance_mode
52
-
53
- def draw_instance_predictions(self, frame, predictions):
54
- """
55
- Draw instance-level prediction results on an image.
56
-
57
- Args:
58
- frame (ndarray): an RGB image of shape (H, W, C), in the range [0, 255].
59
- predictions (Instances): the output of an instance detection/segmentation
60
- model. Following fields will be used to draw:
61
- "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
62
-
63
- Returns:
64
- output (VisImage): image object with visualizations.
65
- """
66
- frame_visualizer = Visualizer(frame, self.metadata)
67
- num_instances = len(predictions)
68
- if num_instances == 0:
69
- return frame_visualizer.output
70
-
71
- boxes = predictions.pred_boxes.tensor.numpy() if predictions.has("pred_boxes") else None
72
- scores = predictions.scores if predictions.has("scores") else None
73
- classes = predictions.pred_classes.numpy() if predictions.has("pred_classes") else None
74
- keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
75
- colors = predictions.COLOR if predictions.has("COLOR") else [None] * len(predictions)
76
- durations = predictions.ID_duration if predictions.has("ID_duration") else None
77
- duration_threshold = self.metadata.get("duration_threshold", 0)
78
- visibilities = None if durations is None else [x > duration_threshold for x in durations]
79
-
80
- if predictions.has("pred_masks"):
81
- masks = predictions.pred_masks
82
- # mask IOU is not yet enabled
83
- # masks_rles = mask_util.encode(np.asarray(masks.permute(1, 2, 0), order="F"))
84
- # assert len(masks_rles) == num_instances
85
- else:
86
- masks = None
87
-
88
- detected = [
89
- _DetectedInstance(classes[i], boxes[i], mask_rle=None, color=colors[i], ttl=8)
90
- for i in range(num_instances)
91
- ]
92
- if not predictions.has("COLOR"):
93
- colors = self._assign_colors(detected)
94
-
95
- labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
96
-
97
- if self._instance_mode == ColorMode.IMAGE_BW:
98
- # any() returns uint8 tensor
99
- frame_visualizer.output.reset_image(
100
- frame_visualizer._create_grayscale_image(
101
- (masks.any(dim=0) > 0).numpy() if masks is not None else None
102
- )
103
- )
104
- alpha = 0.3
105
- else:
106
- alpha = 0.5
107
-
108
- labels = (
109
- None
110
- if labels is None
111
- else [y[0] for y in filter(lambda x: x[1], zip(labels, visibilities))]
112
- ) # noqa
113
- assigned_colors = (
114
- None
115
- if colors is None
116
- else [y[0] for y in filter(lambda x: x[1], zip(colors, visibilities))]
117
- ) # noqa
118
- frame_visualizer.overlay_instances(
119
- boxes=None if masks is not None else boxes[visibilities], # boxes are a bit distracting
120
- masks=None if masks is None else masks[visibilities],
121
- labels=labels,
122
- keypoints=None if keypoints is None else keypoints[visibilities],
123
- assigned_colors=assigned_colors,
124
- alpha=alpha,
125
- )
126
-
127
- return frame_visualizer.output
128
-
129
- def draw_sem_seg(self, frame, sem_seg, area_threshold=None):
130
- """
131
- Args:
132
- sem_seg (ndarray or Tensor): semantic segmentation of shape (H, W),
133
- each value is the integer label.
134
- area_threshold (Optional[int]): only draw segmentations larger than the threshold
135
- """
136
- # don't need to do anything special
137
- frame_visualizer = Visualizer(frame, self.metadata)
138
- frame_visualizer.draw_sem_seg(sem_seg, area_threshold=None)
139
- return frame_visualizer.output
140
-
141
- def draw_panoptic_seg_predictions(
142
- self, frame, panoptic_seg, segments_info, area_threshold=None, alpha=0.5
143
- ):
144
- frame_visualizer = Visualizer(frame, self.metadata)
145
- pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata)
146
-
147
- if self._instance_mode == ColorMode.IMAGE_BW:
148
- frame_visualizer.output.reset_image(
149
- frame_visualizer._create_grayscale_image(pred.non_empty_mask())
150
- )
151
-
152
- # draw mask for all semantic segments first i.e. "stuff"
153
- for mask, sinfo in pred.semantic_masks():
154
- category_idx = sinfo["category_id"]
155
- try:
156
- mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]
157
- except AttributeError:
158
- mask_color = None
159
-
160
- frame_visualizer.draw_binary_mask(
161
- mask,
162
- color=mask_color,
163
- text=self.metadata.stuff_classes[category_idx],
164
- alpha=alpha,
165
- area_threshold=area_threshold,
166
- )
167
-
168
- all_instances = list(pred.instance_masks())
169
- if len(all_instances) == 0:
170
- return frame_visualizer.output
171
- # draw mask for all instances second
172
- masks, sinfo = list(zip(*all_instances))
173
- num_instances = len(masks)
174
- masks_rles = mask_util.encode(
175
- np.asarray(np.asarray(masks).transpose(1, 2, 0), dtype=np.uint8, order="F")
176
- )
177
- assert len(masks_rles) == num_instances
178
-
179
- category_ids = [x["category_id"] for x in sinfo]
180
- detected = [
181
- _DetectedInstance(category_ids[i], bbox=None, mask_rle=masks_rles[i], color=None, ttl=8)
182
- for i in range(num_instances)
183
- ]
184
- colors = self._assign_colors(detected)
185
- labels = [self.metadata.thing_classes[k] for k in category_ids]
186
-
187
- frame_visualizer.overlay_instances(
188
- boxes=None,
189
- masks=masks,
190
- labels=labels,
191
- keypoints=None,
192
- assigned_colors=colors,
193
- alpha=alpha,
194
- )
195
- return frame_visualizer.output
196
-
197
- def _assign_colors(self, instances):
198
- """
199
- Naive tracking heuristics to assign same color to the same instance,
200
- will update the internal state of tracked instances.
201
-
202
- Returns:
203
- list[tuple[float]]: list of colors.
204
- """
205
-
206
- # Compute iou with either boxes or masks:
207
- is_crowd = np.zeros((len(instances),), dtype=np.bool)
208
- if instances[0].bbox is None:
209
- assert instances[0].mask_rle is not None
210
- # use mask iou only when box iou is None
211
- # because box seems good enough
212
- rles_old = [x.mask_rle for x in self._old_instances]
213
- rles_new = [x.mask_rle for x in instances]
214
- ious = mask_util.iou(rles_old, rles_new, is_crowd)
215
- threshold = 0.5
216
- else:
217
- boxes_old = [x.bbox for x in self._old_instances]
218
- boxes_new = [x.bbox for x in instances]
219
- ious = mask_util.iou(boxes_old, boxes_new, is_crowd)
220
- threshold = 0.6
221
- if len(ious) == 0:
222
- ious = np.zeros((len(self._old_instances), len(instances)), dtype="float32")
223
-
224
- # Only allow matching instances of the same label:
225
- for old_idx, old in enumerate(self._old_instances):
226
- for new_idx, new in enumerate(instances):
227
- if old.label != new.label:
228
- ious[old_idx, new_idx] = 0
229
-
230
- matched_new_per_old = np.asarray(ious).argmax(axis=1)
231
- max_iou_per_old = np.asarray(ious).max(axis=1)
232
-
233
- # Try to find match for each old instance:
234
- extra_instances = []
235
- for idx, inst in enumerate(self._old_instances):
236
- if max_iou_per_old[idx] > threshold:
237
- newidx = matched_new_per_old[idx]
238
- if instances[newidx].color is None:
239
- instances[newidx].color = inst.color
240
- continue
241
- # If an old instance does not match any new instances,
242
- # keep it for the next frame in case it is just missed by the detector
243
- inst.ttl -= 1
244
- if inst.ttl > 0:
245
- extra_instances.append(inst)
246
-
247
- # Assign random color to newly-detected instances:
248
- for inst in instances:
249
- if inst.color is None:
250
- inst.color = random_color(rgb=True, maximum=1)
251
- self._old_instances = instances[:] + extra_instances
252
- return [d.color for d in instances]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/LICENCE.md DELETED
@@ -1,170 +0,0 @@
1
- Apache License
2
- ==============
3
-
4
- _Version 2.0, January 2004_
5
- _&lt;<http://www.apache.org/licenses/>&gt;_
6
-
7
- ### Terms and Conditions for use, reproduction, and distribution
8
-
9
- #### 1. Definitions
10
-
11
- “License” shall mean the terms and conditions for use, reproduction, and
12
- distribution as defined by Sections 1 through 9 of this document.
13
-
14
- “Licensor” shall mean the copyright owner or entity authorized by the copyright
15
- owner that is granting the License.
16
-
17
- “Legal Entity” shall mean the union of the acting entity and all other entities
18
- that control, are controlled by, or are under common control with that entity.
19
- For the purposes of this definition, “control” means **(i)** the power, direct or
20
- indirect, to cause the direction or management of such entity, whether by
21
- contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the
22
- outstanding shares, or **(iii)** beneficial ownership of such entity.
23
-
24
- “You” (or “Your”) shall mean an individual or Legal Entity exercising
25
- permissions granted by this License.
26
-
27
- “Source” form shall mean the preferred form for making modifications, including
28
- but not limited to software source code, documentation source, and configuration
29
- files.
30
-
31
- “Object” form shall mean any form resulting from mechanical transformation or
32
- translation of a Source form, including but not limited to compiled object code,
33
- generated documentation, and conversions to other media types.
34
-
35
- “Work” shall mean the work of authorship, whether in Source or Object form, made
36
- available under the License, as indicated by a copyright notice that is included
37
- in or attached to the work (an example is provided in the Appendix below).
38
-
39
- “Derivative Works” shall mean any work, whether in Source or Object form, that
40
- is based on (or derived from) the Work and for which the editorial revisions,
41
- annotations, elaborations, or other modifications represent, as a whole, an
42
- original work of authorship. For the purposes of this License, Derivative Works
43
- shall not include works that remain separable from, or merely link (or bind by
44
- name) to the interfaces of, the Work and Derivative Works thereof.
45
-
46
- “Contribution” shall mean any work of authorship, including the original version
47
- of the Work and any modifications or additions to that Work or Derivative Works
48
- thereof, that is intentionally submitted to Licensor for inclusion in the Work
49
- by the copyright owner or by an individual or Legal Entity authorized to submit
50
- on behalf of the copyright owner. For the purposes of this definition,
51
- “submitted” means any form of electronic, verbal, or written communication sent
52
- to the Licensor or its representatives, including but not limited to
53
- communication on electronic mailing lists, source code control systems, and
54
- issue tracking systems that are managed by, or on behalf of, the Licensor for
55
- the purpose of discussing and improving the Work, but excluding communication
56
- that is conspicuously marked or otherwise designated in writing by the copyright
57
- owner as “Not a Contribution.”
58
-
59
- “Contributor” shall mean Licensor and any individual or Legal Entity on behalf
60
- of whom a Contribution has been received by Licensor and subsequently
61
- incorporated within the Work.
62
-
63
- #### 2. Grant of Copyright License
64
-
65
- Subject to the terms and conditions of this License, each Contributor hereby
66
- grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
67
- irrevocable copyright license to reproduce, prepare Derivative Works of,
68
- publicly display, publicly perform, sublicense, and distribute the Work and such
69
- Derivative Works in Source or Object form.
70
-
71
- #### 3. Grant of Patent License
72
-
73
- Subject to the terms and conditions of this License, each Contributor hereby
74
- grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
75
- irrevocable (except as stated in this section) patent license to make, have
76
- made, use, offer to sell, sell, import, and otherwise transfer the Work, where
77
- such license applies only to those patent claims licensable by such Contributor
78
- that are necessarily infringed by their Contribution(s) alone or by combination
79
- of their Contribution(s) with the Work to which such Contribution(s) was
80
- submitted. If You institute patent litigation against any entity (including a
81
- cross-claim or counterclaim in a lawsuit) alleging that the Work or a
82
- Contribution incorporated within the Work constitutes direct or contributory
83
- patent infringement, then any patent licenses granted to You under this License
84
- for that Work shall terminate as of the date such litigation is filed.
85
-
86
- #### 4. Redistribution
87
-
88
- You may reproduce and distribute copies of the Work or Derivative Works thereof
89
- in any medium, with or without modifications, and in Source or Object form,
90
- provided that You meet the following conditions:
91
-
92
- * **(a)** You must give any other recipients of the Work or Derivative Works a copy of
93
- this License; and
94
- * **(b)** You must cause any modified files to carry prominent notices stating that You
95
- changed the files; and
96
- * **(c)** You must retain, in the Source form of any Derivative Works that You distribute,
97
- all copyright, patent, trademark, and attribution notices from the Source form
98
- of the Work, excluding those notices that do not pertain to any part of the
99
- Derivative Works; and
100
- * **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any
101
- Derivative Works that You distribute must include a readable copy of the
102
- attribution notices contained within such NOTICE file, excluding those notices
103
- that do not pertain to any part of the Derivative Works, in at least one of the
104
- following places: within a NOTICE text file distributed as part of the
105
- Derivative Works; within the Source form or documentation, if provided along
106
- with the Derivative Works; or, within a display generated by the Derivative
107
- Works, if and wherever such third-party notices normally appear. The contents of
108
- the NOTICE file are for informational purposes only and do not modify the
109
- License. You may add Your own attribution notices within Derivative Works that
110
- You distribute, alongside or as an addendum to the NOTICE text from the Work,
111
- provided that such additional attribution notices cannot be construed as
112
- modifying the License.
113
-
114
- You may add Your own copyright statement to Your modifications and may provide
115
- additional or different license terms and conditions for use, reproduction, or
116
- distribution of Your modifications, or for any such Derivative Works as a whole,
117
- provided Your use, reproduction, and distribution of the Work otherwise complies
118
- with the conditions stated in this License.
119
-
120
- #### 5. Submission of Contributions
121
-
122
- Unless You explicitly state otherwise, any Contribution intentionally submitted
123
- for inclusion in the Work by You to the Licensor shall be under the terms and
124
- conditions of this License, without any additional terms or conditions.
125
- Notwithstanding the above, nothing herein shall supersede or modify the terms of
126
- any separate license agreement you may have executed with Licensor regarding
127
- such Contributions.
128
-
129
- #### 6. Trademarks
130
-
131
- This License does not grant permission to use the trade names, trademarks,
132
- service marks, or product names of the Licensor, except as required for
133
- reasonable and customary use in describing the origin of the Work and
134
- reproducing the content of the NOTICE file.
135
-
136
- #### 7. Disclaimer of Warranty
137
-
138
- Unless required by applicable law or agreed to in writing, Licensor provides the
139
- Work (and each Contributor provides its Contributions) on an “AS IS” BASIS,
140
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
141
- including, without limitation, any warranties or conditions of TITLE,
142
- NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
143
- solely responsible for determining the appropriateness of using or
144
- redistributing the Work and assume any risks associated with Your exercise of
145
- permissions under this License.
146
-
147
- #### 8. Limitation of Liability
148
-
149
- In no event and under no legal theory, whether in tort (including negligence),
150
- contract, or otherwise, unless required by applicable law (such as deliberate
151
- and grossly negligent acts) or agreed to in writing, shall any Contributor be
152
- liable to You for damages, including any direct, indirect, special, incidental,
153
- or consequential damages of any character arising as a result of this License or
154
- out of the use or inability to use the Work (including but not limited to
155
- damages for loss of goodwill, work stoppage, computer failure or malfunction, or
156
- any and all other commercial damages or losses), even if such Contributor has
157
- been advised of the possibility of such damages.
158
-
159
- #### 9. Accepting Warranty or Additional Liability
160
-
161
- While redistributing the Work or Derivative Works thereof, You may choose to
162
- offer, and charge a fee for, acceptance of support, warranty, indemnity, or
163
- other liability obligations and/or rights consistent with this License. However,
164
- in accepting such obligations, You may act only on Your own behalf and on Your
165
- sole responsibility, not on behalf of any other Contributor, and only if You
166
- agree to indemnify, defend, and hold each Contributor harmless for any liability
167
- incurred by, or claims asserted against, such Contributor by reason of your
168
- accepting any such warranty or additional liability.
169
-
170
- _END OF TERMS AND CONDITIONS_
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Choque De Clanes Indir Apkcombo.md DELETED
@@ -1,153 +0,0 @@
1
-
2
- <h1>Choque de clanes Indir Apkcombo: Cómo descargar y jugar el popular juego de estrategia</h1>
3
- <p>Si estás buscando un juego de estrategia divertido y adictivo que desafíe tus habilidades y creatividad, deberías probar <strong>Clash of Clans</strong>. Este juego ha sido uno de los juegos más populares del mundo durante años, con millones de jugadores uniéndose a clanes y compitiendo en guerras épicas. En este artículo, te mostraremos cómo descargar y jugar Clash of Clans desde <strong>Apkcombo</strong>, un sitio web que ofrece archivos APK gratuitos para juegos y aplicaciones Android. También te daremos algunos consejos y trucos para ayudarte a ganar en este juego. </p>
4
- <h2>¿Qué es el Choque de Clanes? </h2>
5
- <p>Clash of Clans es un juego de estrategia desarrollado por Supercell, una compañía finlandesa que también creó otros juegos de éxito como Clash Royale, Brawl Stars, Boom Beach y Hay Day. En Clash of Clans, puedes construir tu propia aldea, entrenar a tus tropas y unirte o crear un clan con otros jugadores. A continuación, puedes participar en guerras de clanes, donde puedes atacar y defenderte contra otros clanes, o en batallas multijugador, donde puedes asaltar las aldeas de otros jugadores en busca de recursos. También puedes desbloquear y actualizar diferentes tipos de tropas, hechizos y héroes, cada uno con sus propias habilidades y estrategias. </p>
6
- <h2>choque de clanes indir apkcombo</h2><br /><p><b><b>Download</b> &#127379; <a href="https://bltlly.com/2v6IRM">https://bltlly.com/2v6IRM</a></b></p><br /><br />
7
- <h3>Una breve introducción a las características y la jugabilidad del juego</h3>
8
- <p>Clash of Clans tiene muchas características que lo convierten en un juego emocionante y diverso. Aquí están algunas de ellas:</p>
9
- <ul>
10
- <li><strong>Village:</strong> Aquí es donde construyes tu base, que consta de varios edificios, como minas de oro, colectores de elixires, cuarteles, campamentos del ejército, defensas, muros, ayuntamiento, castillo del clan, laboratorio, etc. También puedes personalizar tu pueblo con decoraciones, obstáculos, pieles de héroe y escenarios. </li>
11
-
12
- <li><strong>Hechizos:</strong> Estos son los efectos mágicos que puedes usar para apoyar a tus tropas u obstaculizar a tus enemigos en las batallas. Hay diferentes tipos de hechizos, como hechizo de relámpago, hechizo de sanación, hechizo de ira, hechizo de salto, hechizo de congelación, etc. Cada hechizo tiene sus propios efectos y cuesta elixir u elixir oscuro para usar. </li>
13
- <li><strong>Héroes:</strong> Estas son las unidades especiales que tienen habilidades poderosas y se pueden usar varias veces en batallas. Hay cuatro héroes en el juego: rey bárbaro, reina arquera, gran alcaide y campeón real. Cada héroe tiene su propio nivel que puede actualizar con elixir oscuro o gemas <h3>Los beneficios de descargar Clash of Clans de Apkcombo</h3>
14
- <p>Apkcombo es un sitio web que ofrece archivos APK gratuitos para juegos y aplicaciones Android. APK significa Android Package Kit, que es el formato de archivo utilizado por Android para distribuir e instalar aplicaciones. Al descargar archivos APK desde Apkcombo, puede disfrutar de algunos beneficios, como:</p>
15
- <ul>
16
- <li><strong>Acceso a la última versión:</strong> Apkcombo siempre actualiza los archivos APK a la última versión disponible, para que pueda obtener las nuevas características y correcciones de errores para Clash of Clans.</li>
17
- <li><strong>Acceso a la versión modded:</strong> Apkcombo también proporciona archivos APK modded para algunos juegos y aplicaciones, lo que significa que se han modificado para tener características o ventajas adicionales, como recursos ilimitados, elementos desbloqueados o anuncios eliminados. Sin embargo, tenga cuidado al usar archivos APK modificados, ya que pueden no ser compatibles con el juego original o la aplicación, o pueden violar los términos del servicio. </li>
18
- <li><strong>Acceso a la versión bloqueada por región:</strong> Apkcombo le permite descargar archivos APK de diferentes regiones, que pueden tener diferentes contenidos o idiomas. Por ejemplo, puedes descargar la versión china de Clash of Clans, que tiene algunas características y eventos exclusivos que no están disponibles en otras regiones. </li>
19
-
20
- </ul>
21
- <p>Sin embargo, también hay algunos riesgos y desventajas de descargar archivos APK de Apkcombo, como:</p>
22
- <ul>
23
- <li><strong>Malware o virus potenciales:</strong> Apkcombo afirma que todos los archivos APK son escaneados y verificados por el software antivirus, pero todavía hay una posibilidad de que algún código malicioso o software puede estar oculto en los archivos APK. Por lo tanto, siempre debe comprobar el origen y la reputación del archivo APK antes de descargarlo, y utilizar una aplicación antivirus confiable para escanearlo antes de instalarlo. </li>
24
- <li><strong>Problemas potenciales de compatibilidad:</strong> Apkcombo no garantiza que todos los archivos APK funcionarán en su dispositivo, ya que pueden tener diferentes requisitos o especificaciones. Por lo tanto, siempre debe comprobar la compatibilidad y los requisitos del sistema del archivo APK antes de descargarlo, y hacer una copia de seguridad de sus datos antes de instalarlo. </li>
25
- <li><strong>Problemas legales potenciales:</strong> Apkcombo no posee ni aloja ninguno de los archivos APK en su sitio web, pero solo proporciona enlaces a otras fuentes. Por lo tanto, siempre debe respetar los derechos de propiedad intelectual y los términos de servicio de los desarrolladores originales y editores de los juegos y aplicaciones. Descargar e instalar archivos APK desde Apkcombo puede violar sus derechos y políticas, y puede resultar en acciones legales o sanciones. </li>
26
- </ul>
27
- <p>Por lo tanto, siempre debe ser cuidadoso y responsable al descargar e instalar archivos APK desde Apkcombo. Solo debe descargar archivos APK de fuentes de confianza, y solo para uso personal. También debes evitar usar archivos APK modificados que puedan darte ventajas injustas o dañar a otros jugadores en Clash of Clans.</p>
28
- <h2>Cómo descargar e instalar Clash of Clans desde Apkcombo</h2>
29
- <p>Si quieres descargar e instalar Clash of Clans desde Apkcombo, puedes seguir estos pasos:</p>
30
- <p></p>
31
- <h3>Los pasos para descargar el archivo APK desde el sitio web de Apkcombo</h3>
32
- <ol>
33
- <li>Ir a <a href="( 5 )">Sitio web de Apkcombo</a> en su navegador. </li>
34
-
35
- <li>Seleccione la versión de Clash of Clans que desea descargar. Puede elegir entre la versión original o la versión modificada. </li>
36
- <li>Seleccione la región de Clash of Clans que desea descargar. Puede elegir entre diferentes regiones, como global, China, Japón, etc.</li>
37
- <li>Seleccione la arquitectura de su dispositivo. Puede elegir entre armeabi-v7a, arm64-v8a, x86 o x86_x64. </li>
38
- <li>Haga clic en el botón "Descargar" y espere a que termine la descarga. </li>
39
- </ol>
40
- <h3>Los pasos para instalar el archivo APK en su dispositivo Android</h3>
41
- <ol>
42
- <li>Antes de instalar el archivo APK, asegúrese de que ha habilitado la opción "Fuentes desconocidas" en la configuración del dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store.</li>
43
- <li>Localizar el archivo APK descargado en el almacenamiento del dispositivo utilizando una aplicación de administrador de archivos. </li>
44
- <li>Toque en el archivo APK y siga las instrucciones en la pantalla para instalarlo. </li>
45
- <li>Esperar a que la instalación para completar y lanzar Clash of Clans desde el cajón de la aplicación <h3>Los pasos para actualizar el juego y solucionar cualquier problema</h3>
46
- <ol>
47
- <li>Para actualizar el juego, puede descargar el último archivo APK de Apkcombo e instalarlo sobre el existente, o puede usar la opción de actualización dentro del juego si está disponible. Siempre debes actualizar el juego para disfrutar de las nuevas características y mejoras. </li>
48
- <li>Para solucionar cualquier problema, como fallos, errores o fallas, puede probar algunas de estas soluciones: <ul>
49
- <li>Borra la caché y los datos del juego desde la configuración de tu dispositivo. </li>
50
- <li>Desinstalar y volver a instalar el juego desde Apkcombo.</li>
51
- <li>Compruebe su conexión a Internet y asegúrese de que es estable y rápido. </li>
52
- <li> Compruebe el almacenamiento del dispositivo y asegúrese de que tiene suficiente espacio para el juego. </li>
53
- <li>Póngase en contacto con el equipo de soporte de Supercell desde la configuración del juego o su sitio web para obtener más ayuda. </li>
54
- </ul>
55
- </li>
56
- </ol>
57
- <h2>Cómo jugar al choque de clanes y ganar</h2>
58
-
59
- <h3>Los fundamentos de la construcción de su pueblo y la elevación de su clan</h3>
60
- <p>Lo primero que tienes que hacer en Clash of Clans es construir tu pueblo y levantar tu clan. Estos son algunos pasos básicos a seguir:</p>
61
- <ol>
62
- <li>Comienza construyendo y mejorando tu ayuntamiento, que es el corazón de tu pueblo. Tu nivel de ayuntamiento determina qué edificios y tropas puedes desbloquear y usar. </li>
63
- <li>Construya y actualice sus edificios de recursos, como minas de oro, colectores de elixir, almacenes de oro, almacenes de elixir, taladros de elixir oscuro y almacenes de elixir oscuro. Estos edificios le proporcionarán los recursos que necesita para construir y mejorar otros edificios y tropas. </li>
64
- <li>Construye y mejora tus edificios de defensa, como cañones, torres de arqueros, morteros, defensas aéreas, torres de magos, teslas ocultas, torres de bombas, torres de infierno, artillería de águilas, etc. Estos edificios protegerán tu pueblo de los ataques enemigos. </li>
65
- <li>Construir y mejorar sus paredes, que actuará como una barrera contra las tropas enemigas. También puedes colocar trampas, como bombas, trampas de resorte, bombas de aire, bombas gigantes, minas de aire en busca, trampas de esqueleto, etc. para sorprender y dañar a las tropas enemigas. </li>
66
- <li>Construir y mejorar sus edificios del ejército, tales como cuarteles, cuarteles oscuros, campamentos del ejército, fábrica de hechizos, fábrica de hechizos oscuros, taller de asedio, etc. Estos edificios le permitirá entrenar y almacenar sus tropas y hechizos para batallas. </li>
67
- <li>Construye y mejora el castillo de tu clan, lo que te permitirá unirte o crear un clan con otros jugadores. También puedes solicitar y donar tropas y hechizos a los miembros de tu clan, que te ayudarán en las batallas. </li>
68
- <li>Construir y mejorar sus edificios héroe, tales como el rey bárbaro altar, arquero reina altar, gran guardián altar y campeón real altar. Estos edificios te permitirán desbloquear y usar a los héroes en las batallas. </li>
69
-
70
- </ol>
71
- <p>Siempre debes tratar de equilibrar el desarrollo de tu pueblo, y no descuidar ningún aspecto de él. También debe seguir el pedido de actualización recomendado, que puede encontrar en varias guías y sitios web en línea. </p>
72
- <h3>Los consejos y trucos para atacar y defender en las guerras de clanes y batallas multijugador</h3>
73
- <p>Una de las principales atracciones de Clash of Clans son las guerras de clanes y las batallas multijugador, donde puedes poner a prueba tus habilidades y estrategias contra otros jugadores. Aquí hay algunos consejos y trucos para ayudarte a atacar y defender en estos modos:</p>
74
- <ul>
75
- <li><strong>Explora a tu enemigo:</strong> Antes de atacar, siempre debes explorar la aldea de tu enemigo y analizar su diseño, defensas, trampas, tropas del castillo del clan, héroes, etc. También debes revisar su perfil y ver su historia de ataque y defensa, trofeos, liga, clan, etc. Esto te ayudará a planificar tu ataque y elegir las mejores tropas y hechizos para él. </li>
76
- <li><strong>Usa la composición correcta del ejército:</strong> Dependiendo de la aldea de tu enemigo y tu estrategia, debes usar la composición correcta del ejército para tu ataque. Debes considerar el costo, tiempo de entrenamiento, espacio de alojamiento, daños, salud, velocidad, rango, preferencia de objetivo, habilidad especial, etc. de cada tropa y hechizo. También debes tener una variedad de tropas y hechizos para lidiar con diferentes situaciones y obstáculos. </li>
77
- <li><strong>Usa la técnica de despliegue correcta:</strong> Dependiendo de la composición de tu ejército y tu estrategia, debes usar la técnica de despliegue correcta para tu ataque. Debes considerar el tiempo, ubicación, dirección, espaciado, agrupación, canalización, etc. de cada tropa y hechizo. También debes usar las habilidades del héroe y las tropas del castillo del clan sabiamente. </li>
78
-
79
- <li><strong>Practica y aprende:</strong> La mejor manera de mejorar tus habilidades de ataque es practicar y aprender de tus propios ataques y los de los demás. Puedes usar la función de desafío amistoso para practicar con tus compañeros de clan o el modo de práctica para aprender algunas estrategias básicas. También puedes ver las repeticiones de tus propios ataques y los de otros para ver qué funcionó y qué no. </li>
80
- <li><strong>Diseña tu base:</strong> Para defender tu aldea de los ataques enemigos, debes diseñar tu base con cuidado y estratégicamente. Usted debe considerar la disposición, colocación , y la sinergia de cada edificio, pared, trampa, clan de la tropa del castillo, héroe, etc. También debe seguir los principios de diseño de base recomendados, que se pueden encontrar en varias guías y sitios web en línea. </li>
81
- <li><strong>Mejora tus defensas:</strong> Para defender tu pueblo de los ataques enemigos, debes mejorar tus defensas de forma regular y estratégica. Debe considerar el costo, tiempo, efecto, prioridad, etc. de cada actualización. También debe seguir el pedido de actualización recomendado, que puede encontrar en varias guías y sitios web en línea. </li>
82
- <li><strong>Prueba tu base:</strong> Para defender tu aldea de los ataques enemigos, debes probar tu base con frecuencia y de manera realista. Puedes usar la función de desafío amigable para probar tu base con tus compañeros de clan o el editor de diseño de base para probar tu base con diferentes escenarios. También puedes ver las repeticiones de ataques enemigos para ver cómo funciona tu base y qué puedes mejorar. </li>
83
- </ul>
84
- <h3>Los recursos y estrategias para mejorar tus tropas, hechizos y héroes</h3>
85
- <p>Para tener éxito en Clash of Clans, necesitas mejorar tus tropas, hechizos y héroes constantemente y estratégicamente. Aquí hay algunos recursos y estrategias para ayudarle a hacer eso:</p>
86
- <ul>
87
-
88
- <li><strong>Elixir oscuro:</strong> Este es un recurso especial que necesitas para actualizar tus tropas oscuras, hechizos oscuros, héroes y algunos edificios. Puedes obtener elixir oscuro de taladros de elixir oscuro, asaltar aldeas de otros jugadores, completar logros y eventos, abrir carros de botín y cofres de bonificación de estrellas, etc.</li>
89
- <li><strong>Gemas:</strong> Este es un recurso premium que puede usar para acelerar las actualizaciones, comprar recursos, impulsar edificios, entrenar tropas y hechizos al instante, etc. Puede obtener gemas de eliminar obstáculos, completar logros y eventos, abrir cajas de gemas y carritos de minas de gemas, comprar con dinero real, etc.</li>
90
- <li><strong>Base de constructor de oro y elixir:</strong> Estos son los recursos que necesita para actualizar sus tropas de base de constructor, edificios y paredes. Puedes obtener oro base constructor y elixir de minas de oro y coleccionistas de elixires, ganar batallas, completar logros y eventos, abrir carritos de botín y cofres de bonificación de estrellas, etc.</li>
91
- <li><strong>Gemas de base de constructor:</strong> Este es un recurso que puedes usar para acelerar las actualizaciones, comprar recursos, impulsar edificios, etc. en tu base de constructor. Puede obtener gemas base constructor de despejar obstáculos, completar logros y eventos, abrir cajas de gemas y carretas de minas de gemas, comprarlos con dinero real, etc.</li>
92
- <li><strong>Artículos mágicos:</strong> Estos son artículos especiales que puedes usar para aumentar tu progreso de varias maneras, como aumentar tu producción de recursos, reducir tu tiempo o costo de actualización, mejorar tus tropas o hechizos, etc. Puedes obtener objetos mágicos al completar juegos de clan, alcanzando ciertos niveles de liga, comprándolos con gemas o dinero real, etc.</li>
93
-
94
- </ul>
95
- <h2>Conclusión</h2>
96
- <p>Clash of Clans es un juego que te mantendrá entretenido y comprometido durante horas. Puedes descargarlo y jugarlo desde Apkcombo, un sitio web que ofrece archivos APK gratuitos para juegos y aplicaciones Android. Sin embargo, debe ser cuidadoso y responsable al descargar e instalar archivos APK desde Apkcombo. También debes seguir algunos consejos y trucos para ayudarte a construir tu pueblo, levantar tu clan y ganar en guerras de clanes y batallas multijugador. También debes mejorar tus tropas, hechizos y héroes de forma regular y estratégica. Esperamos que este artículo te haya ayudado a aprender más sobre Clash of Clans indir Apkcombo. ¡Ahora sigue adelante y disfruta del juego! </p>
97
- <h2>Preguntas frecuentes</h2>
98
- <h3>Q1: ¿Es Clash of Clans libre para jugar? </h3>
99
- <p>A1: Sí, Clash of Clans es gratis para descargar y jugar. Sin embargo, también ofrece algunas compras opcionales en el juego con dinero real, como gemas, objetos mágicos , u ofertas especiales. Puede desactivar estas compras desde la configuración de su dispositivo si lo desea. </p>
100
- <h3>Q2: ¿Es seguro descargar Clash of Clans desde Apkcombo? </h3>
101
- <p>A2: Apkcombo afirma que todos los archivos APK en su sitio web son escaneados y verificados por el software antivirus, pero todavía hay un riesgo de malware o virus. Por lo tanto, siempre debe comprobar el origen y la reputación del archivo APK antes de descargarlo, y utilizar una aplicación antivirus confiable para escanearlo antes de instalarlo. También debe descargar solo archivos APK de fuentes de confianza, y solo para uso personal. </p>
102
- <h3>Q3: ¿Cómo puedo unirme o crear un clan en Clash of Clans? </h3>
103
- <p>A3: Para unirte o crear un clan en Clash of Clans, necesitas tener un castillo de clan, que puedes construir después de llegar al nivel 3 del ayuntamiento. Puedes tocar el castillo del clan y elegir la opción de unirte o crear un clan. Puedes buscar clanes por nombre, etiqueta, ubicación, nivel, miembros, etc. o navegar por los clanes recomendados. También puedes invitar o aceptar a otros jugadores para que se unan a tu clan. Puedes chatear, donar, solicitar y luchar con los miembros de tu clan. </p>
104
-
105
- <p>A4: No hay una respuesta definitiva a esta pregunta, ya que diferentes tropas y hechizos pueden funcionar mejor para diferentes situaciones y estrategias. Sin embargo, algunas de las tropas y hechizos más populares y eficaces son:</p>
106
- <tabla>
107
- <tr>
108
- <th>Tropas</th>
109
- <th>Hechizos</th>
110
- </tr>
111
- <tr>
112
- <td>Mineros</td>
113
- <td>Hechizo de sanación</td>
114
- </tr>
115
- <tr>
116
- <td>Jugadores de bolos</td>
117
- <td>Hechizo de ira</td>
118
- </tr>
119
- <tr>
120
- <td>Jinetes de cerdo</td>
121
- <td>Hechizo de congelación</td>
122
- </tr>
123
- <tr>
124
- <td>Electro dragones</td>
125
- <td>Hechizo de murciélago</td>
126
- </tr>
127
- <tr>
128
- <td>Perros de lava</td>
129
- <td>Hechizo de prisa</td>
130
- </tr>
131
- <tr>
132
- <td>Globos</td>
133
- <td>Hechizo de clonación</td>
134
- </tr>
135
- <tr>
136
- <td>Golems</td>
137
- <td>Hechizo de veneno</td>
138
- </tr>
139
- <tr>
140
- <td>Brujas</td <td>Hechizo de terremoto</td>
141
- </tr>
142
- </tabla>
143
- <p>Puedes experimentar con diferentes combinaciones de tropas y hechizos para encontrar los que se adapten a tu estilo y objetivos. </p>
144
- <h3>Q5: ¿Cómo puedo contactar a Supercell para soporte o retroalimentación? </h3>
145
- <p>A5: Si tienes algún problema, pregunta o sugerencia con respecto a Clash of Clans, puedes ponerte en contacto con Supercell para obtener apoyo o comentarios. Puedes hacer esto por:</p>
146
- <ul>
147
- <li>Tocando en el icono de configuración en el juego y la elección de la "Ayuda y soporte" opción. A continuación, puede examinar las preguntas frecuentes, informar de un problema o enviar un mensaje al equipo de soporte. </li>
148
- <li>Visitando el sitio web oficial de Clash of Clans y eligiendo la opción "Contáctenos". Luego puede llenar un formulario con sus datos y consulta. </li>
149
- <li>Visitar los foros oficiales de Clash of Clans y publicar su consulta o retroalimentación en la sección correspondiente. También puede interactuar con otros jugadores y moderadores allí. </li>
150
- <li>Visitar las páginas oficiales de redes sociales de Clash of Clans, como Facebook, Twitter, Instagram, YouTube, etc. y dejar un comentario o mensaje allí. También puede seguir las últimas noticias y actualizaciones allí. </li>
151
- </ul></p> 64aa2da5cf<br />
152
- <br />
153
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descarga De La Aplicacin Comercial Zugacoin.md DELETED
@@ -1,138 +0,0 @@
1
-
2
- <h1>Descarga de la aplicación comercial Zugacoin: Una guía para principiantes</h1>
3
- <p>Si usted está buscando una manera de invertir en criptomonedas, activos digitales de comercio, o préstamos de acceso en África, es posible que desee considerar el uso de Zugacoin. Zugacoin es una criptomoneda revolucionaria que tiene como objetivo reconstruir la economía moribunda de África convirtiéndose en la primera moneda en capital y financiación de inversiones. En este artículo, le mostraremos cómo descargar, instalar y usar la aplicación comercial Zugacoin, que es una plataforma segura y conveniente para comprar y vender Zugacoin. También revisaremos las características y beneficios de Zugacoin, así como sus revisiones, calificaciones, pros, contras y comparación con otras criptomonedas. Al final de este artículo, usted tendrá una idea clara de si Zugacoin es una opción de inversión digna para usted o no. </p>
4
- <h2>descarga de la aplicación comercial zugacoin</h2><br /><p><b><b>Download Zip</b> &#10022;&#10022;&#10022; <a href="https://bltlly.com/2v6IAp">https://bltlly.com/2v6IAp</a></b></p><br /><br />
5
- <h2>Características y beneficios de Zugacoin</h2>
6
- <p>Zugacoin es una criptomoneda que se construye bajo la cadena de bloques Ethereum. Es un token ERC20 con el ticker (SZC); también es negociable en intercambios criptográficos. Este token lanzado a finales de 2020 y es #2672 en el rango de cryptocurrencies en existencia. Al momento de escribir este artículo, Zugacoin cotiza a $47.06 (Coinmarketcap). </p>
7
- <p>Zugacoin tiene un suministro máximo que es mucho más limitado que el suministro total de bitcoin. Bitcoin Max. suministro = 21 millones BTC, mientras que Zugacoin Max. suministro = 1 millón SZC. Además, este token tiene la funcionalidad de prueba de apuesta; esto simplemente significa que puedes ganar recompensas apostando o mezclando el token SZC. </p>
8
- <p>Zugacoin fue fundada por el Arzobispo Dr. Sam Zuga, un clérigo de la Iglesia Casa de la Alegría, ubicada en Gboko, estado de Benue, Nigeria. Sam Zuga quería una moneda que fomentará el desarrollo económico en África a través de las finanzas descentralizadas. Para lograr este concepto, Zugacoin fue concebido. </p>
9
-
10
- <p>Zugacoin pretende ser una criptomoneda revolucionaria que restaure la economía africana. Quiere cambiar África para siempre haciendo uso de la tecnología blockchain en las economías emergentes de África y más allá. Su objetivo es liberar el potencial creando, ganando, ahorrando y gastando oportunidades en toda África.</p>
11
- <p></p>
12
- <p>Los usuarios objetivo de Zugacoin son personas subempleadas y desempleadas de África, además de ayudar al gobierno africano en el desarrollo de la economía. Se aconseja a los africanos a tomar ventaja de esta moneda para la libertad financiera, especialmente ya que está en la red Binance SZCB.</p>
13
- <p>Algunas de las características y beneficios de usar Zugacoin son:</p>
14
- <ul>
15
- <li>Ofrece transacciones rápidas, seguras y de bajo costo a través de las fronteras. </li>
16
- <li>Proporciona acceso a préstamos para empresas emergentes y necesidades personales. </li <li>Admite múltiples métodos de pago, como transferencia bancaria, pago con tarjeta y dinero móvil. </li>
17
- <li>Permite a los usuarios obtener ingresos pasivos mediante la apuesta o la celebración de Zugacoin en sus carteras. </li>
18
- <li> Tiene una oferta limitada de 1 millón de SZC, lo que significa que tiene un alto potencial de escasez y demanda. </li>
19
- <li>Está respaldado por un fundador de buena reputación y un equipo de expertos en blockchain, finanzas y marketing. </li>
20
- <li>Es compatible con la red Ethereum y se puede integrar con otras aplicaciones descentralizadas. </li>
21
- </ul>
22
- <h3>Cómo descargar e instalar Zugacoin Merchant App</h3>
23
- <p>Si desea comenzar a usar Zugacoin, tendrá que descargar e instalar la aplicación comercial Zugacoin en su teléfono inteligente. La aplicación está disponible para dispositivos Android e iOS y se puede descargar desde el sitio web oficial o las tiendas de aplicaciones. Estos son los pasos a seguir:</p>
24
- <h4>Para usuarios de Android</h4>
25
- <ol>
26
- <li>Ir a la Google Play Store y buscar "Zugacoin Merchant App". </li>
27
- <li>Seleccione la aplicación de la lista y toque en "Instalar". </li>
28
- <li>Espere a que la aplicación se descargue e instale en su dispositivo. </li>
29
-
30
- </ol>
31
- <h4>Para usuarios de iOS</h4>
32
- <ol>
33
- <li>Ir a la App Store y buscar "Zugacoin Merchant App". </li>
34
- <li>Seleccione la aplicación de la lista y toque en "Obtener". </li>
35
- <li>Ingresa tu contraseña de Apple ID o usa Touch ID o Face ID para confirmar. </li>
36
- <li>Espere a que la aplicación se descargue e instale en su dispositivo. </li>
37
- <li>Abra la aplicación y acepte los términos y condiciones. </li>
38
- </ol>
39
- <h4>Cómo registrarse y verificar su cuenta</h4>
40
- <p>Después de haber descargado e instalado la aplicación comercial Zugacoin, tendrá que registrarse y verificar su cuenta antes de comenzar a usarla. Estos son los pasos a seguir:</p>
41
- <ol>
42
- <li>Abra la aplicación y toque en "Crear cuenta". </li>
43
- <li>Ingrese su nombre completo, dirección de correo electrónico, número de teléfono, contraseña y código de referencia (si existe). </li>
44
- <li>Toque en "Registrarse" y compruebe su correo electrónico para un enlace de verificación. </li>
45
- <li>Haga clic en el enlace para verificar su dirección de correo electrónico y activar su cuenta. </li>
46
- <li>Inicie sesión en su cuenta y toque en "Perfil". </li>
47
- <li>Seleccione "Verificación" y cargue su documento de identidad (como pasaporte, licencia de conducir o tarjeta de identificación nacional). </li <li>Introduzca sus datos personales, como su fecha de nacimiento, sexo, dirección y país. </li>
48
- <li>Toque en "Enviar" y espere a que se complete la verificación. </li>
49
- <li>Recibirás una notificación cuando tu cuenta esté verificada y lista para usar. </li>
50
- </ol>
51
- <h4> Cómo comprar y vender Zugacoin en la aplicación</h4>
52
- <p>Una vez que haya verificado su cuenta, puede comenzar a comprar y vender Zugacoin en la aplicación. Hay tres formas principales de hacer esto: usar la función de escaneo a pago, usar el intercambio P2P y usar la función de intercambio. Estos son los pasos a seguir para cada método:</p>
53
- <h5>Uso de la función de escaneo a pago</h5>
54
- <p>Esta función le permite pagar por bienes y servicios con Zugacoin escaneando un código QR. También puedes recibir pagos de otros usuarios generando tu propio código QR. Estos son los pasos a seguir:</p>
55
- <ol>
56
-
57
- <li>Si quieres pagar a alguien, escanea su código QR con tu cámara. Si desea recibir el pago, toque en "Recibir" y mostrar su código QR al pagador. </li>
58
- <li>Introduzca la cantidad de Zugacoin que desea enviar o recibir y confirme la transacción. </li>
59
- <li> Verá un mensaje de confirmación y un recibo de la transacción. </li>
60
- </ol>
61
- <h5>Usando el intercambio P2P</h5>
62
- <p>Esta característica le permite comprar y vender Zugacoin con otros usuarios directamente. Puede elegir entre una lista de ofertas o crear su propia oferta. También puede chatear con el vendedor o el comprador y calificarlos después de la transacción. Estos son los pasos a seguir:</p>
63
- <ol>
64
- <li>Abra la aplicación y toque en "P2P Exchange". </li>
65
- <li>Si desea comprar Zugacoin, toque en "Comprar". Si desea vender Zugacoin, toque en "Vender". </li>
66
- <li>Navegar por la lista de ofertas y seleccionar el que se adapte a sus necesidades. Puede filtrar las ofertas por método de pago, ubicación, precio y calificación. </li>
67
- <li>Toque en "Comercio" y chatear con el vendedor o comprador para acordar los términos de la transacción. </li>
68
- <li>Siga las instrucciones en la pantalla y complete el pago o transferencia de Zugacoin.</li>
69
- <li>Toque en "Confirmar" y espere la confirmación de la otra parte. </li>
70
- <li> Verá un mensaje de confirmación y un recibo de la transacción. </li>
71
- <li> También puede calificar y revisar al vendedor o comprador después de la transacción. </li>
72
- </ol>
73
- <h5>Usando la función de intercambio</h5>
74
- <p>Esta función le permite intercambiar Zugacoin con otras criptomonedas, como Bitcoin, Ethereum, Binance Coin, Tether, etc. Puede elegir entre una lista de monedas admitidas o ingresar una cantidad personalizada. Estos son los pasos a seguir:</p>
75
- <ol>
76
- <li>Abra la aplicación y toque en "Intercambiar". </li>
77
- <li>Seleccione la moneda que desea intercambiar y la moneda que desea intercambiar. </li <li>Introduzca la cantidad de moneda que desea intercambiar o use el control deslizante para ajustar la cantidad. </li>
78
- <li>Toque en "Intercambiar ahora" y confirme la transacción. </li>
79
-
80
- </ol>
81
- <h2>Comentarios y valoraciones de Zugacoin</h2>
82
- <p>Zugacoin es una criptomoneda relativamente nueva que aún no ha ganado mucha popularidad o reconocimiento en el espacio criptográfico. Sin embargo, ha recibido algunas críticas y valoraciones de usuarios y expertos que lo han probado o analizado. Estos son algunos de ellos:</p>
83
- <h3>Pros y contras de Zugacoin</h3>
84
- <p>Como cualquier otra criptomoneda, Zugacoin tiene sus propios pros y contras que usted debe ser consciente de antes de invertir en ella. Aquí está un resumen de las principales ventajas y desventajas de usar Zugacoin:</p>
85
- <tabla>
86
- <tr>
87
- <th>Pros</th>
88
- <th>Contras</th>
89
- </tr>
90
- <tr>
91
- <td>- Ofrece transacciones rápidas, seguras y de bajo costo a través de las fronteras. </td>
92
- <td>- Tiene un suministro limitado de 1 millón de SZC, lo que puede limitar su escalabilidad y adopción. </td>
93
- </tr>
94
- <tr>
95
- <td>- Proporciona acceso a préstamos para startups de negocios y necesidades personales. </td>
96
- <td>- No es ampliamente aceptado o apoyado por comerciantes, bolsas o carteras. </td>
97
- </tr>
98
- <tr>
99
- <td>- Permite a los usuarios obtener ingresos pasivos mediante la apuesta o la celebración de Zugacoin en sus carteras. </td>
100
- <td>- Es vulnerable a la volatilidad del mercado, la incertidumbre regulatoria y los ciberataques. </td>
101
- </tr>
102
- <tr>
103
- <td>- Está respaldado por un fundador de buena reputación y un equipo de expertos en blockchain, finanzas y marketing. </td>
104
- <td>- Tiene una baja capitalización de mercado, liquidez y volumen de operaciones. </td>
105
- </tr>
106
- <tr>
107
- <td>- Es compatible con la red Ethereum y se puede integrar con otras aplicaciones descentralizadas. </td>
108
- <td>- Tiene una baja conciencia, confianza y reputación entre la comunidad criptográfica. </td>
109
- </tr>
110
- </tabla>
111
- <h3>Zugacoin vs otras criptomonedas</h3>
112
- <p>Zugacoin no es la única criptomoneda que tiene como objetivo empoderar a África y promover la inclusión financiera. Hay otras criptomonedas que tienen objetivos o características similares, como Akoin, KubitX, BitSika, etc. ¿Cómo se compara Zugacoin con ellos? Aquí hay algunos puntos de comparación:</p>
113
- <ul>
114
-
115
- <li>Zugacoin es más compatible con la red Ethereum y sus aplicaciones descentralizadas que otras criptomonedas que utilizan diferentes blockchains o protocolos. </li>
116
- <li>Zugacoin tiene una aceptación, soporte y adopción más limitada que otras criptomonedas que tienen más asociaciones, integraciones e intercambios. </li>
117
- <li>Zugacoin tiene un gobierno y una visión más centralizados que otras criptomonedas que tienen más participación y retroalimentación de la comunidad. </li>
118
- </ul>
119
- <h2>Conclusión</h2>
120
- <p>Zugacoin es una criptomoneda que tiene como objetivo reconstruir la economía de África mediante la concesión de préstamos, pagos e inversiones para las nuevas empresas y las necesidades personales. Es una forma rápida, segura y de bajo costo de realizar transacciones a través de las fronteras y obtener ingresos pasivos al apostar o mantener Zugacoin en su billetera. Está respaldado por un fundador de buena reputación y un equipo de expertos en blockchain, finanzas y marketing. Es compatible con la red Ethereum y puede integrarse con otras aplicaciones descentralizadas. </p>
121
- <p>Sin embargo, Zugacoin también tiene algunos inconvenientes que debe considerar antes de invertir en él. Tiene un suministro limitado de 1 millón de SZC, lo que puede limitar su escalabilidad y adopción. No es ampliamente aceptado o apoyado por los comerciantes, intercambios o carteras. Es vulnerable a la volatilidad del mercado, la incertidumbre regulatoria y los ciberataques. Tiene una baja capitalización de mercado, liquidez y volumen de operaciones. Tiene una baja conciencia, confianza y reputación entre la comunidad criptográfica. </p>
122
- <p>Si desea probar Zugacoin, tendrá que descargar e instalar la aplicación comercial Zugacoin en su teléfono inteligente. La aplicación es una plataforma segura y conveniente para comprar y vender Zugacoin. Puede utilizar la función de escaneo a pago, el intercambio P2P o la función de intercambio para operar Zugacoin con otros usuarios o criptomonedas. También tendrá que registrarse y verificar su cuenta antes de que pueda comenzar a usar la aplicación. </p>
123
-
124
- <h3>Preguntas frecuentes</h3>
125
- <p>Aquí están algunas de las preguntas y respuestas más frecuentes sobre Zugacoin:</p>
126
- <ol>
127
- <li><b>¿Cuál es el sitio web oficial de Zugacoin? </b></li>
128
- <p>El sitio web oficial de Zugacoin es https://zugacoin.com/ Puede encontrar más información sobre la visión de Zugacoin, misión, hoja de ruta, equipo, socios, noticias, eventos, etc. en el sitio web. </p>
129
- <li><b>¿Dónde puedo comprar Zugacoin? </b></li>
130
- <p>Puedes comprar Zugacoin en la aplicación comercial Zugacoin o en algunos intercambios de criptografía que lo soportan. Algunos de los intercambios que enumeran Zugacoin son BitMart, VinDAX, FinexBox, SatoExchange, etc.</p>
131
- <li><b>¿Cómo puedo ponerme en contacto con Zugacoin? </b></li <p>Puede ponerse en contacto con Zugacoin enviando un correo electrónico a [email protected] o llamando al +234 811 377 7709. También puedes seguir a Zugacoin en redes sociales, como Facebook, Twitter, Instagram, YouTube, etc.</p>
132
- <li><b>¿Es Zugacoin una estafa? </b></li>
133
- <p>No, Zugacoin no es una estafa. Es una criptomoneda legítima que está registrada y regulada por el gobierno nigeriano. Tiene una visión clara, misión, hoja de ruta, equipo, socios y comunidad. También tiene un libro mayor de blockchain transparente y auditable que registra todas las transacciones y actividades. </p>
134
- <li><b>¿Cómo puedo almacenar Zugacoin? </b></li>
135
- <p>Puede almacenar Zugacoin en la aplicación comercial Zugacoin o en cualquier cartera compatible que soporte tokens ERC20. Algunas de las carteras que puedes usar son Trust Wallet, MetaMask, MyEtherWallet, etc. Siempre debes mantener tus claves y contraseñas privadas seguras. </p>
136
- </ol></p> 64aa2da5cf<br />
137
- <br />
138
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/req/req_install.py DELETED
@@ -1,867 +0,0 @@
1
- # The following comment should be removed at some point in the future.
2
- # mypy: strict-optional=False
3
-
4
- import functools
5
- import logging
6
- import os
7
- import shutil
8
- import sys
9
- import uuid
10
- import zipfile
11
- from optparse import Values
12
- from typing import Any, Collection, Dict, Iterable, List, Optional, Sequence, Union
13
-
14
- from pip._vendor.packaging.markers import Marker
15
- from pip._vendor.packaging.requirements import Requirement
16
- from pip._vendor.packaging.specifiers import SpecifierSet
17
- from pip._vendor.packaging.utils import canonicalize_name
18
- from pip._vendor.packaging.version import Version
19
- from pip._vendor.packaging.version import parse as parse_version
20
- from pip._vendor.pyproject_hooks import BuildBackendHookCaller
21
-
22
- from pip._internal.build_env import BuildEnvironment, NoOpBuildEnvironment
23
- from pip._internal.exceptions import InstallationError
24
- from pip._internal.locations import get_scheme
25
- from pip._internal.metadata import (
26
- BaseDistribution,
27
- get_default_environment,
28
- get_directory_distribution,
29
- get_wheel_distribution,
30
- )
31
- from pip._internal.metadata.base import FilesystemWheel
32
- from pip._internal.models.direct_url import DirectUrl
33
- from pip._internal.models.link import Link
34
- from pip._internal.operations.build.metadata import generate_metadata
35
- from pip._internal.operations.build.metadata_editable import generate_editable_metadata
36
- from pip._internal.operations.build.metadata_legacy import (
37
- generate_metadata as generate_metadata_legacy,
38
- )
39
- from pip._internal.operations.install.editable_legacy import (
40
- install_editable as install_editable_legacy,
41
- )
42
- from pip._internal.operations.install.wheel import install_wheel
43
- from pip._internal.pyproject import load_pyproject_toml, make_pyproject_path
44
- from pip._internal.req.req_uninstall import UninstallPathSet
45
- from pip._internal.utils.deprecation import deprecated
46
- from pip._internal.utils.hashes import Hashes
47
- from pip._internal.utils.misc import (
48
- ConfiguredBuildBackendHookCaller,
49
- ask_path_exists,
50
- backup_dir,
51
- display_path,
52
- hide_url,
53
- redact_auth_from_url,
54
- )
55
- from pip._internal.utils.packaging import safe_extra
56
- from pip._internal.utils.subprocess import runner_with_spinner_message
57
- from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
58
- from pip._internal.utils.virtualenv import running_under_virtualenv
59
- from pip._internal.vcs import vcs
60
-
61
- logger = logging.getLogger(__name__)
62
-
63
-
64
- class InstallRequirement:
65
- """
66
- Represents something that may be installed later on, may have information
67
- about where to fetch the relevant requirement and also contains logic for
68
- installing the said requirement.
69
- """
70
-
71
- def __init__(
72
- self,
73
- req: Optional[Requirement],
74
- comes_from: Optional[Union[str, "InstallRequirement"]],
75
- editable: bool = False,
76
- link: Optional[Link] = None,
77
- markers: Optional[Marker] = None,
78
- use_pep517: Optional[bool] = None,
79
- isolated: bool = False,
80
- *,
81
- global_options: Optional[List[str]] = None,
82
- hash_options: Optional[Dict[str, List[str]]] = None,
83
- config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
84
- constraint: bool = False,
85
- extras: Collection[str] = (),
86
- user_supplied: bool = False,
87
- permit_editable_wheels: bool = False,
88
- ) -> None:
89
- assert req is None or isinstance(req, Requirement), req
90
- self.req = req
91
- self.comes_from = comes_from
92
- self.constraint = constraint
93
- self.editable = editable
94
- self.permit_editable_wheels = permit_editable_wheels
95
-
96
- # source_dir is the local directory where the linked requirement is
97
- # located, or unpacked. In case unpacking is needed, creating and
98
- # populating source_dir is done by the RequirementPreparer. Note this
99
- # is not necessarily the directory where pyproject.toml or setup.py is
100
- # located - that one is obtained via unpacked_source_directory.
101
- self.source_dir: Optional[str] = None
102
- if self.editable:
103
- assert link
104
- if link.is_file:
105
- self.source_dir = os.path.normpath(os.path.abspath(link.file_path))
106
-
107
- if link is None and req and req.url:
108
- # PEP 508 URL requirement
109
- link = Link(req.url)
110
- self.link = self.original_link = link
111
-
112
- # When this InstallRequirement is a wheel obtained from the cache of locally
113
- # built wheels, this is the source link corresponding to the cache entry, which
114
- # was used to download and build the cached wheel.
115
- self.cached_wheel_source_link: Optional[Link] = None
116
-
117
- # Information about the location of the artifact that was downloaded . This
118
- # property is guaranteed to be set in resolver results.
119
- self.download_info: Optional[DirectUrl] = None
120
-
121
- # Path to any downloaded or already-existing package.
122
- self.local_file_path: Optional[str] = None
123
- if self.link and self.link.is_file:
124
- self.local_file_path = self.link.file_path
125
-
126
- if extras:
127
- self.extras = extras
128
- elif req:
129
- self.extras = {safe_extra(extra) for extra in req.extras}
130
- else:
131
- self.extras = set()
132
- if markers is None and req:
133
- markers = req.marker
134
- self.markers = markers
135
-
136
- # This holds the Distribution object if this requirement is already installed.
137
- self.satisfied_by: Optional[BaseDistribution] = None
138
- # Whether the installation process should try to uninstall an existing
139
- # distribution before installing this requirement.
140
- self.should_reinstall = False
141
- # Temporary build location
142
- self._temp_build_dir: Optional[TempDirectory] = None
143
- # Set to True after successful installation
144
- self.install_succeeded: Optional[bool] = None
145
- # Supplied options
146
- self.global_options = global_options if global_options else []
147
- self.hash_options = hash_options if hash_options else {}
148
- self.config_settings = config_settings
149
- # Set to True after successful preparation of this requirement
150
- self.prepared = False
151
- # User supplied requirement are explicitly requested for installation
152
- # by the user via CLI arguments or requirements files, as opposed to,
153
- # e.g. dependencies, extras or constraints.
154
- self.user_supplied = user_supplied
155
-
156
- self.isolated = isolated
157
- self.build_env: BuildEnvironment = NoOpBuildEnvironment()
158
-
159
- # For PEP 517, the directory where we request the project metadata
160
- # gets stored. We need this to pass to build_wheel, so the backend
161
- # can ensure that the wheel matches the metadata (see the PEP for
162
- # details).
163
- self.metadata_directory: Optional[str] = None
164
-
165
- # The static build requirements (from pyproject.toml)
166
- self.pyproject_requires: Optional[List[str]] = None
167
-
168
- # Build requirements that we will check are available
169
- self.requirements_to_check: List[str] = []
170
-
171
- # The PEP 517 backend we should use to build the project
172
- self.pep517_backend: Optional[BuildBackendHookCaller] = None
173
-
174
- # Are we using PEP 517 for this requirement?
175
- # After pyproject.toml has been loaded, the only valid values are True
176
- # and False. Before loading, None is valid (meaning "use the default").
177
- # Setting an explicit value before loading pyproject.toml is supported,
178
- # but after loading this flag should be treated as read only.
179
- self.use_pep517 = use_pep517
180
-
181
- # This requirement needs more preparation before it can be built
182
- self.needs_more_preparation = False
183
-
184
- def __str__(self) -> str:
185
- if self.req:
186
- s = str(self.req)
187
- if self.link:
188
- s += " from {}".format(redact_auth_from_url(self.link.url))
189
- elif self.link:
190
- s = redact_auth_from_url(self.link.url)
191
- else:
192
- s = "<InstallRequirement>"
193
- if self.satisfied_by is not None:
194
- if self.satisfied_by.location is not None:
195
- location = display_path(self.satisfied_by.location)
196
- else:
197
- location = "<memory>"
198
- s += f" in {location}"
199
- if self.comes_from:
200
- if isinstance(self.comes_from, str):
201
- comes_from: Optional[str] = self.comes_from
202
- else:
203
- comes_from = self.comes_from.from_path()
204
- if comes_from:
205
- s += f" (from {comes_from})"
206
- return s
207
-
208
- def __repr__(self) -> str:
209
- return "<{} object: {} editable={!r}>".format(
210
- self.__class__.__name__, str(self), self.editable
211
- )
212
-
213
- def format_debug(self) -> str:
214
- """An un-tested helper for getting state, for debugging."""
215
- attributes = vars(self)
216
- names = sorted(attributes)
217
-
218
- state = ("{}={!r}".format(attr, attributes[attr]) for attr in sorted(names))
219
- return "<{name} object: {{{state}}}>".format(
220
- name=self.__class__.__name__,
221
- state=", ".join(state),
222
- )
223
-
224
- # Things that are valid for all kinds of requirements?
225
- @property
226
- def name(self) -> Optional[str]:
227
- if self.req is None:
228
- return None
229
- return self.req.name
230
-
231
- @functools.lru_cache() # use cached_property in python 3.8+
232
- def supports_pyproject_editable(self) -> bool:
233
- if not self.use_pep517:
234
- return False
235
- assert self.pep517_backend
236
- with self.build_env:
237
- runner = runner_with_spinner_message(
238
- "Checking if build backend supports build_editable"
239
- )
240
- with self.pep517_backend.subprocess_runner(runner):
241
- return "build_editable" in self.pep517_backend._supported_features()
242
-
243
- @property
244
- def specifier(self) -> SpecifierSet:
245
- return self.req.specifier
246
-
247
- @property
248
- def is_pinned(self) -> bool:
249
- """Return whether I am pinned to an exact version.
250
-
251
- For example, some-package==1.2 is pinned; some-package>1.2 is not.
252
- """
253
- specifiers = self.specifier
254
- return len(specifiers) == 1 and next(iter(specifiers)).operator in {"==", "==="}
255
-
256
- def match_markers(self, extras_requested: Optional[Iterable[str]] = None) -> bool:
257
- if not extras_requested:
258
- # Provide an extra to safely evaluate the markers
259
- # without matching any extra
260
- extras_requested = ("",)
261
- if self.markers is not None:
262
- return any(
263
- self.markers.evaluate({"extra": extra}) for extra in extras_requested
264
- )
265
- else:
266
- return True
267
-
268
- @property
269
- def has_hash_options(self) -> bool:
270
- """Return whether any known-good hashes are specified as options.
271
-
272
- These activate --require-hashes mode; hashes specified as part of a
273
- URL do not.
274
-
275
- """
276
- return bool(self.hash_options)
277
-
278
- def hashes(self, trust_internet: bool = True) -> Hashes:
279
- """Return a hash-comparer that considers my option- and URL-based
280
- hashes to be known-good.
281
-
282
- Hashes in URLs--ones embedded in the requirements file, not ones
283
- downloaded from an index server--are almost peers with ones from
284
- flags. They satisfy --require-hashes (whether it was implicitly or
285
- explicitly activated) but do not activate it. md5 and sha224 are not
286
- allowed in flags, which should nudge people toward good algos. We
287
- always OR all hashes together, even ones from URLs.
288
-
289
- :param trust_internet: Whether to trust URL-based (#md5=...) hashes
290
- downloaded from the internet, as by populate_link()
291
-
292
- """
293
- good_hashes = self.hash_options.copy()
294
- if trust_internet:
295
- link = self.link
296
- elif self.original_link and self.user_supplied:
297
- link = self.original_link
298
- else:
299
- link = None
300
- if link and link.hash:
301
- good_hashes.setdefault(link.hash_name, []).append(link.hash)
302
- return Hashes(good_hashes)
303
-
304
- def from_path(self) -> Optional[str]:
305
- """Format a nice indicator to show where this "comes from" """
306
- if self.req is None:
307
- return None
308
- s = str(self.req)
309
- if self.comes_from:
310
- if isinstance(self.comes_from, str):
311
- comes_from = self.comes_from
312
- else:
313
- comes_from = self.comes_from.from_path()
314
- if comes_from:
315
- s += "->" + comes_from
316
- return s
317
-
318
- def ensure_build_location(
319
- self, build_dir: str, autodelete: bool, parallel_builds: bool
320
- ) -> str:
321
- assert build_dir is not None
322
- if self._temp_build_dir is not None:
323
- assert self._temp_build_dir.path
324
- return self._temp_build_dir.path
325
- if self.req is None:
326
- # Some systems have /tmp as a symlink which confuses custom
327
- # builds (such as numpy). Thus, we ensure that the real path
328
- # is returned.
329
- self._temp_build_dir = TempDirectory(
330
- kind=tempdir_kinds.REQ_BUILD, globally_managed=True
331
- )
332
-
333
- return self._temp_build_dir.path
334
-
335
- # This is the only remaining place where we manually determine the path
336
- # for the temporary directory. It is only needed for editables where
337
- # it is the value of the --src option.
338
-
339
- # When parallel builds are enabled, add a UUID to the build directory
340
- # name so multiple builds do not interfere with each other.
341
- dir_name: str = canonicalize_name(self.name)
342
- if parallel_builds:
343
- dir_name = f"{dir_name}_{uuid.uuid4().hex}"
344
-
345
- # FIXME: Is there a better place to create the build_dir? (hg and bzr
346
- # need this)
347
- if not os.path.exists(build_dir):
348
- logger.debug("Creating directory %s", build_dir)
349
- os.makedirs(build_dir)
350
- actual_build_dir = os.path.join(build_dir, dir_name)
351
- # `None` indicates that we respect the globally-configured deletion
352
- # settings, which is what we actually want when auto-deleting.
353
- delete_arg = None if autodelete else False
354
- return TempDirectory(
355
- path=actual_build_dir,
356
- delete=delete_arg,
357
- kind=tempdir_kinds.REQ_BUILD,
358
- globally_managed=True,
359
- ).path
360
-
361
- def _set_requirement(self) -> None:
362
- """Set requirement after generating metadata."""
363
- assert self.req is None
364
- assert self.metadata is not None
365
- assert self.source_dir is not None
366
-
367
- # Construct a Requirement object from the generated metadata
368
- if isinstance(parse_version(self.metadata["Version"]), Version):
369
- op = "=="
370
- else:
371
- op = "==="
372
-
373
- self.req = Requirement(
374
- "".join(
375
- [
376
- self.metadata["Name"],
377
- op,
378
- self.metadata["Version"],
379
- ]
380
- )
381
- )
382
-
383
- def warn_on_mismatching_name(self) -> None:
384
- metadata_name = canonicalize_name(self.metadata["Name"])
385
- if canonicalize_name(self.req.name) == metadata_name:
386
- # Everything is fine.
387
- return
388
-
389
- # If we're here, there's a mismatch. Log a warning about it.
390
- logger.warning(
391
- "Generating metadata for package %s "
392
- "produced metadata for project name %s. Fix your "
393
- "#egg=%s fragments.",
394
- self.name,
395
- metadata_name,
396
- self.name,
397
- )
398
- self.req = Requirement(metadata_name)
399
-
400
- def check_if_exists(self, use_user_site: bool) -> None:
401
- """Find an installed distribution that satisfies or conflicts
402
- with this requirement, and set self.satisfied_by or
403
- self.should_reinstall appropriately.
404
- """
405
- if self.req is None:
406
- return
407
- existing_dist = get_default_environment().get_distribution(self.req.name)
408
- if not existing_dist:
409
- return
410
-
411
- version_compatible = self.req.specifier.contains(
412
- existing_dist.version,
413
- prereleases=True,
414
- )
415
- if not version_compatible:
416
- self.satisfied_by = None
417
- if use_user_site:
418
- if existing_dist.in_usersite:
419
- self.should_reinstall = True
420
- elif running_under_virtualenv() and existing_dist.in_site_packages:
421
- raise InstallationError(
422
- f"Will not install to the user site because it will "
423
- f"lack sys.path precedence to {existing_dist.raw_name} "
424
- f"in {existing_dist.location}"
425
- )
426
- else:
427
- self.should_reinstall = True
428
- else:
429
- if self.editable:
430
- self.should_reinstall = True
431
- # when installing editables, nothing pre-existing should ever
432
- # satisfy
433
- self.satisfied_by = None
434
- else:
435
- self.satisfied_by = existing_dist
436
-
437
- # Things valid for wheels
438
- @property
439
- def is_wheel(self) -> bool:
440
- if not self.link:
441
- return False
442
- return self.link.is_wheel
443
-
444
- @property
445
- def is_wheel_from_cache(self) -> bool:
446
- # When True, it means that this InstallRequirement is a local wheel file in the
447
- # cache of locally built wheels.
448
- return self.cached_wheel_source_link is not None
449
-
450
- # Things valid for sdists
451
- @property
452
- def unpacked_source_directory(self) -> str:
453
- return os.path.join(
454
- self.source_dir, self.link and self.link.subdirectory_fragment or ""
455
- )
456
-
457
- @property
458
- def setup_py_path(self) -> str:
459
- assert self.source_dir, f"No source dir for {self}"
460
- setup_py = os.path.join(self.unpacked_source_directory, "setup.py")
461
-
462
- return setup_py
463
-
464
- @property
465
- def setup_cfg_path(self) -> str:
466
- assert self.source_dir, f"No source dir for {self}"
467
- setup_cfg = os.path.join(self.unpacked_source_directory, "setup.cfg")
468
-
469
- return setup_cfg
470
-
471
- @property
472
- def pyproject_toml_path(self) -> str:
473
- assert self.source_dir, f"No source dir for {self}"
474
- return make_pyproject_path(self.unpacked_source_directory)
475
-
476
- def load_pyproject_toml(self) -> None:
477
- """Load the pyproject.toml file.
478
-
479
- After calling this routine, all of the attributes related to PEP 517
480
- processing for this requirement have been set. In particular, the
481
- use_pep517 attribute can be used to determine whether we should
482
- follow the PEP 517 or legacy (setup.py) code path.
483
- """
484
- pyproject_toml_data = load_pyproject_toml(
485
- self.use_pep517, self.pyproject_toml_path, self.setup_py_path, str(self)
486
- )
487
-
488
- if pyproject_toml_data is None:
489
- if self.config_settings:
490
- deprecated(
491
- reason=f"Config settings are ignored for project {self}.",
492
- replacement=(
493
- "to use --use-pep517 or add a "
494
- "pyproject.toml file to the project"
495
- ),
496
- gone_in="23.3",
497
- )
498
- self.use_pep517 = False
499
- return
500
-
501
- self.use_pep517 = True
502
- requires, backend, check, backend_path = pyproject_toml_data
503
- self.requirements_to_check = check
504
- self.pyproject_requires = requires
505
- self.pep517_backend = ConfiguredBuildBackendHookCaller(
506
- self,
507
- self.unpacked_source_directory,
508
- backend,
509
- backend_path=backend_path,
510
- )
511
-
512
- def isolated_editable_sanity_check(self) -> None:
513
- """Check that an editable requirement if valid for use with PEP 517/518.
514
-
515
- This verifies that an editable that has a pyproject.toml either supports PEP 660
516
- or as a setup.py or a setup.cfg
517
- """
518
- if (
519
- self.editable
520
- and self.use_pep517
521
- and not self.supports_pyproject_editable()
522
- and not os.path.isfile(self.setup_py_path)
523
- and not os.path.isfile(self.setup_cfg_path)
524
- ):
525
- raise InstallationError(
526
- f"Project {self} has a 'pyproject.toml' and its build "
527
- f"backend is missing the 'build_editable' hook. Since it does not "
528
- f"have a 'setup.py' nor a 'setup.cfg', "
529
- f"it cannot be installed in editable mode. "
530
- f"Consider using a build backend that supports PEP 660."
531
- )
532
-
533
- def prepare_metadata(self) -> None:
534
- """Ensure that project metadata is available.
535
-
536
- Under PEP 517 and PEP 660, call the backend hook to prepare the metadata.
537
- Under legacy processing, call setup.py egg-info.
538
- """
539
- assert self.source_dir
540
- details = self.name or f"from {self.link}"
541
-
542
- if self.use_pep517:
543
- assert self.pep517_backend is not None
544
- if (
545
- self.editable
546
- and self.permit_editable_wheels
547
- and self.supports_pyproject_editable()
548
- ):
549
- self.metadata_directory = generate_editable_metadata(
550
- build_env=self.build_env,
551
- backend=self.pep517_backend,
552
- details=details,
553
- )
554
- else:
555
- self.metadata_directory = generate_metadata(
556
- build_env=self.build_env,
557
- backend=self.pep517_backend,
558
- details=details,
559
- )
560
- else:
561
- self.metadata_directory = generate_metadata_legacy(
562
- build_env=self.build_env,
563
- setup_py_path=self.setup_py_path,
564
- source_dir=self.unpacked_source_directory,
565
- isolated=self.isolated,
566
- details=details,
567
- )
568
-
569
- # Act on the newly generated metadata, based on the name and version.
570
- if not self.name:
571
- self._set_requirement()
572
- else:
573
- self.warn_on_mismatching_name()
574
-
575
- self.assert_source_matches_version()
576
-
577
- @property
578
- def metadata(self) -> Any:
579
- if not hasattr(self, "_metadata"):
580
- self._metadata = self.get_dist().metadata
581
-
582
- return self._metadata
583
-
584
- def get_dist(self) -> BaseDistribution:
585
- if self.metadata_directory:
586
- return get_directory_distribution(self.metadata_directory)
587
- elif self.local_file_path and self.is_wheel:
588
- return get_wheel_distribution(
589
- FilesystemWheel(self.local_file_path), canonicalize_name(self.name)
590
- )
591
- raise AssertionError(
592
- f"InstallRequirement {self} has no metadata directory and no wheel: "
593
- f"can't make a distribution."
594
- )
595
-
596
- def assert_source_matches_version(self) -> None:
597
- assert self.source_dir
598
- version = self.metadata["version"]
599
- if self.req.specifier and version not in self.req.specifier:
600
- logger.warning(
601
- "Requested %s, but installing version %s",
602
- self,
603
- version,
604
- )
605
- else:
606
- logger.debug(
607
- "Source in %s has version %s, which satisfies requirement %s",
608
- display_path(self.source_dir),
609
- version,
610
- self,
611
- )
612
-
613
- # For both source distributions and editables
614
- def ensure_has_source_dir(
615
- self,
616
- parent_dir: str,
617
- autodelete: bool = False,
618
- parallel_builds: bool = False,
619
- ) -> None:
620
- """Ensure that a source_dir is set.
621
-
622
- This will create a temporary build dir if the name of the requirement
623
- isn't known yet.
624
-
625
- :param parent_dir: The ideal pip parent_dir for the source_dir.
626
- Generally src_dir for editables and build_dir for sdists.
627
- :return: self.source_dir
628
- """
629
- if self.source_dir is None:
630
- self.source_dir = self.ensure_build_location(
631
- parent_dir,
632
- autodelete=autodelete,
633
- parallel_builds=parallel_builds,
634
- )
635
-
636
- # For editable installations
637
- def update_editable(self) -> None:
638
- if not self.link:
639
- logger.debug(
640
- "Cannot update repository at %s; repository location is unknown",
641
- self.source_dir,
642
- )
643
- return
644
- assert self.editable
645
- assert self.source_dir
646
- if self.link.scheme == "file":
647
- # Static paths don't get updated
648
- return
649
- vcs_backend = vcs.get_backend_for_scheme(self.link.scheme)
650
- # Editable requirements are validated in Requirement constructors.
651
- # So here, if it's neither a path nor a valid VCS URL, it's a bug.
652
- assert vcs_backend, f"Unsupported VCS URL {self.link.url}"
653
- hidden_url = hide_url(self.link.url)
654
- vcs_backend.obtain(self.source_dir, url=hidden_url, verbosity=0)
655
-
656
- # Top-level Actions
657
- def uninstall(
658
- self, auto_confirm: bool = False, verbose: bool = False
659
- ) -> Optional[UninstallPathSet]:
660
- """
661
- Uninstall the distribution currently satisfying this requirement.
662
-
663
- Prompts before removing or modifying files unless
664
- ``auto_confirm`` is True.
665
-
666
- Refuses to delete or modify files outside of ``sys.prefix`` -
667
- thus uninstallation within a virtual environment can only
668
- modify that virtual environment, even if the virtualenv is
669
- linked to global site-packages.
670
-
671
- """
672
- assert self.req
673
- dist = get_default_environment().get_distribution(self.req.name)
674
- if not dist:
675
- logger.warning("Skipping %s as it is not installed.", self.name)
676
- return None
677
- logger.info("Found existing installation: %s", dist)
678
-
679
- uninstalled_pathset = UninstallPathSet.from_dist(dist)
680
- uninstalled_pathset.remove(auto_confirm, verbose)
681
- return uninstalled_pathset
682
-
683
- def _get_archive_name(self, path: str, parentdir: str, rootdir: str) -> str:
684
- def _clean_zip_name(name: str, prefix: str) -> str:
685
- assert name.startswith(
686
- prefix + os.path.sep
687
- ), f"name {name!r} doesn't start with prefix {prefix!r}"
688
- name = name[len(prefix) + 1 :]
689
- name = name.replace(os.path.sep, "/")
690
- return name
691
-
692
- path = os.path.join(parentdir, path)
693
- name = _clean_zip_name(path, rootdir)
694
- return self.name + "/" + name
695
-
696
- def archive(self, build_dir: Optional[str]) -> None:
697
- """Saves archive to provided build_dir.
698
-
699
- Used for saving downloaded VCS requirements as part of `pip download`.
700
- """
701
- assert self.source_dir
702
- if build_dir is None:
703
- return
704
-
705
- create_archive = True
706
- archive_name = "{}-{}.zip".format(self.name, self.metadata["version"])
707
- archive_path = os.path.join(build_dir, archive_name)
708
-
709
- if os.path.exists(archive_path):
710
- response = ask_path_exists(
711
- "The file {} exists. (i)gnore, (w)ipe, "
712
- "(b)ackup, (a)bort ".format(display_path(archive_path)),
713
- ("i", "w", "b", "a"),
714
- )
715
- if response == "i":
716
- create_archive = False
717
- elif response == "w":
718
- logger.warning("Deleting %s", display_path(archive_path))
719
- os.remove(archive_path)
720
- elif response == "b":
721
- dest_file = backup_dir(archive_path)
722
- logger.warning(
723
- "Backing up %s to %s",
724
- display_path(archive_path),
725
- display_path(dest_file),
726
- )
727
- shutil.move(archive_path, dest_file)
728
- elif response == "a":
729
- sys.exit(-1)
730
-
731
- if not create_archive:
732
- return
733
-
734
- zip_output = zipfile.ZipFile(
735
- archive_path,
736
- "w",
737
- zipfile.ZIP_DEFLATED,
738
- allowZip64=True,
739
- )
740
- with zip_output:
741
- dir = os.path.normcase(os.path.abspath(self.unpacked_source_directory))
742
- for dirpath, dirnames, filenames in os.walk(dir):
743
- for dirname in dirnames:
744
- dir_arcname = self._get_archive_name(
745
- dirname,
746
- parentdir=dirpath,
747
- rootdir=dir,
748
- )
749
- zipdir = zipfile.ZipInfo(dir_arcname + "/")
750
- zipdir.external_attr = 0x1ED << 16 # 0o755
751
- zip_output.writestr(zipdir, "")
752
- for filename in filenames:
753
- file_arcname = self._get_archive_name(
754
- filename,
755
- parentdir=dirpath,
756
- rootdir=dir,
757
- )
758
- filename = os.path.join(dirpath, filename)
759
- zip_output.write(filename, file_arcname)
760
-
761
- logger.info("Saved %s", display_path(archive_path))
762
-
763
- def install(
764
- self,
765
- global_options: Optional[Sequence[str]] = None,
766
- root: Optional[str] = None,
767
- home: Optional[str] = None,
768
- prefix: Optional[str] = None,
769
- warn_script_location: bool = True,
770
- use_user_site: bool = False,
771
- pycompile: bool = True,
772
- ) -> None:
773
- scheme = get_scheme(
774
- self.name,
775
- user=use_user_site,
776
- home=home,
777
- root=root,
778
- isolated=self.isolated,
779
- prefix=prefix,
780
- )
781
-
782
- if self.editable and not self.is_wheel:
783
- install_editable_legacy(
784
- global_options=global_options if global_options is not None else [],
785
- prefix=prefix,
786
- home=home,
787
- use_user_site=use_user_site,
788
- name=self.name,
789
- setup_py_path=self.setup_py_path,
790
- isolated=self.isolated,
791
- build_env=self.build_env,
792
- unpacked_source_directory=self.unpacked_source_directory,
793
- )
794
- self.install_succeeded = True
795
- return
796
-
797
- assert self.is_wheel
798
- assert self.local_file_path
799
-
800
- install_wheel(
801
- self.name,
802
- self.local_file_path,
803
- scheme=scheme,
804
- req_description=str(self.req),
805
- pycompile=pycompile,
806
- warn_script_location=warn_script_location,
807
- direct_url=self.download_info if self.original_link else None,
808
- requested=self.user_supplied,
809
- )
810
- self.install_succeeded = True
811
-
812
-
813
- def check_invalid_constraint_type(req: InstallRequirement) -> str:
814
- # Check for unsupported forms
815
- problem = ""
816
- if not req.name:
817
- problem = "Unnamed requirements are not allowed as constraints"
818
- elif req.editable:
819
- problem = "Editable requirements are not allowed as constraints"
820
- elif req.extras:
821
- problem = "Constraints cannot have extras"
822
-
823
- if problem:
824
- deprecated(
825
- reason=(
826
- "Constraints are only allowed to take the form of a package "
827
- "name and a version specifier. Other forms were originally "
828
- "permitted as an accident of the implementation, but were "
829
- "undocumented. The new implementation of the resolver no "
830
- "longer supports these forms."
831
- ),
832
- replacement="replacing the constraint with a requirement",
833
- # No plan yet for when the new resolver becomes default
834
- gone_in=None,
835
- issue=8210,
836
- )
837
-
838
- return problem
839
-
840
-
841
- def _has_option(options: Values, reqs: List[InstallRequirement], option: str) -> bool:
842
- if getattr(options, option, None):
843
- return True
844
- for req in reqs:
845
- if getattr(req, option, None):
846
- return True
847
- return False
848
-
849
-
850
- def check_legacy_setup_py_options(
851
- options: Values,
852
- reqs: List[InstallRequirement],
853
- ) -> None:
854
- has_build_options = _has_option(options, reqs, "build_options")
855
- has_global_options = _has_option(options, reqs, "global_options")
856
- if has_build_options or has_global_options:
857
- deprecated(
858
- reason="--build-option and --global-option are deprecated.",
859
- issue=11859,
860
- replacement="to use --config-settings",
861
- gone_in="23.3",
862
- )
863
- logger.warning(
864
- "Implying --no-binary=:all: due to the presence of "
865
- "--build-option / --global-option. "
866
- )
867
- options.format_control.disallow_binaries()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/util.py DELETED
@@ -1,308 +0,0 @@
1
- """
2
- pygments.util
3
- ~~~~~~~~~~~~~
4
-
5
- Utility functions.
6
-
7
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
8
- :license: BSD, see LICENSE for details.
9
- """
10
-
11
- import re
12
- from io import TextIOWrapper
13
-
14
-
15
- split_path_re = re.compile(r'[/\\ ]')
16
- doctype_lookup_re = re.compile(r'''
17
- <!DOCTYPE\s+(
18
- [a-zA-Z_][a-zA-Z0-9]*
19
- (?: \s+ # optional in HTML5
20
- [a-zA-Z_][a-zA-Z0-9]*\s+
21
- "[^"]*")?
22
- )
23
- [^>]*>
24
- ''', re.DOTALL | re.MULTILINE | re.VERBOSE)
25
- tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>',
26
- re.IGNORECASE | re.DOTALL | re.MULTILINE)
27
- xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I)
28
-
29
-
30
- class ClassNotFound(ValueError):
31
- """Raised if one of the lookup functions didn't find a matching class."""
32
-
33
-
34
- class OptionError(Exception):
35
- pass
36
-
37
-
38
- def get_choice_opt(options, optname, allowed, default=None, normcase=False):
39
- string = options.get(optname, default)
40
- if normcase:
41
- string = string.lower()
42
- if string not in allowed:
43
- raise OptionError('Value for option %s must be one of %s' %
44
- (optname, ', '.join(map(str, allowed))))
45
- return string
46
-
47
-
48
- def get_bool_opt(options, optname, default=None):
49
- string = options.get(optname, default)
50
- if isinstance(string, bool):
51
- return string
52
- elif isinstance(string, int):
53
- return bool(string)
54
- elif not isinstance(string, str):
55
- raise OptionError('Invalid type %r for option %s; use '
56
- '1/0, yes/no, true/false, on/off' % (
57
- string, optname))
58
- elif string.lower() in ('1', 'yes', 'true', 'on'):
59
- return True
60
- elif string.lower() in ('0', 'no', 'false', 'off'):
61
- return False
62
- else:
63
- raise OptionError('Invalid value %r for option %s; use '
64
- '1/0, yes/no, true/false, on/off' % (
65
- string, optname))
66
-
67
-
68
- def get_int_opt(options, optname, default=None):
69
- string = options.get(optname, default)
70
- try:
71
- return int(string)
72
- except TypeError:
73
- raise OptionError('Invalid type %r for option %s; you '
74
- 'must give an integer value' % (
75
- string, optname))
76
- except ValueError:
77
- raise OptionError('Invalid value %r for option %s; you '
78
- 'must give an integer value' % (
79
- string, optname))
80
-
81
-
82
- def get_list_opt(options, optname, default=None):
83
- val = options.get(optname, default)
84
- if isinstance(val, str):
85
- return val.split()
86
- elif isinstance(val, (list, tuple)):
87
- return list(val)
88
- else:
89
- raise OptionError('Invalid type %r for option %s; you '
90
- 'must give a list value' % (
91
- val, optname))
92
-
93
-
94
- def docstring_headline(obj):
95
- if not obj.__doc__:
96
- return ''
97
- res = []
98
- for line in obj.__doc__.strip().splitlines():
99
- if line.strip():
100
- res.append(" " + line.strip())
101
- else:
102
- break
103
- return ''.join(res).lstrip()
104
-
105
-
106
- def make_analysator(f):
107
- """Return a static text analyser function that returns float values."""
108
- def text_analyse(text):
109
- try:
110
- rv = f(text)
111
- except Exception:
112
- return 0.0
113
- if not rv:
114
- return 0.0
115
- try:
116
- return min(1.0, max(0.0, float(rv)))
117
- except (ValueError, TypeError):
118
- return 0.0
119
- text_analyse.__doc__ = f.__doc__
120
- return staticmethod(text_analyse)
121
-
122
-
123
- def shebang_matches(text, regex):
124
- r"""Check if the given regular expression matches the last part of the
125
- shebang if one exists.
126
-
127
- >>> from pygments.util import shebang_matches
128
- >>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
129
- True
130
- >>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
131
- True
132
- >>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
133
- False
134
- >>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
135
- False
136
- >>> shebang_matches('#!/usr/bin/startsomethingwith python',
137
- ... r'python(2\.\d)?')
138
- True
139
-
140
- It also checks for common windows executable file extensions::
141
-
142
- >>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
143
- True
144
-
145
- Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
146
- the same as ``'perl -e'``)
147
-
148
- Note that this method automatically searches the whole string (eg:
149
- the regular expression is wrapped in ``'^$'``)
150
- """
151
- index = text.find('\n')
152
- if index >= 0:
153
- first_line = text[:index].lower()
154
- else:
155
- first_line = text.lower()
156
- if first_line.startswith('#!'):
157
- try:
158
- found = [x for x in split_path_re.split(first_line[2:].strip())
159
- if x and not x.startswith('-')][-1]
160
- except IndexError:
161
- return False
162
- regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
163
- if regex.search(found) is not None:
164
- return True
165
- return False
166
-
167
-
168
- def doctype_matches(text, regex):
169
- """Check if the doctype matches a regular expression (if present).
170
-
171
- Note that this method only checks the first part of a DOCTYPE.
172
- eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
173
- """
174
- m = doctype_lookup_re.search(text)
175
- if m is None:
176
- return False
177
- doctype = m.group(1)
178
- return re.compile(regex, re.I).match(doctype.strip()) is not None
179
-
180
-
181
- def html_doctype_matches(text):
182
- """Check if the file looks like it has a html doctype."""
183
- return doctype_matches(text, r'html')
184
-
185
-
186
- _looks_like_xml_cache = {}
187
-
188
-
189
- def looks_like_xml(text):
190
- """Check if a doctype exists or if we have some tags."""
191
- if xml_decl_re.match(text):
192
- return True
193
- key = hash(text)
194
- try:
195
- return _looks_like_xml_cache[key]
196
- except KeyError:
197
- m = doctype_lookup_re.search(text)
198
- if m is not None:
199
- return True
200
- rv = tag_re.search(text[:1000]) is not None
201
- _looks_like_xml_cache[key] = rv
202
- return rv
203
-
204
-
205
- def surrogatepair(c):
206
- """Given a unicode character code with length greater than 16 bits,
207
- return the two 16 bit surrogate pair.
208
- """
209
- # From example D28 of:
210
- # http://www.unicode.org/book/ch03.pdf
211
- return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
212
-
213
-
214
- def format_lines(var_name, seq, raw=False, indent_level=0):
215
- """Formats a sequence of strings for output."""
216
- lines = []
217
- base_indent = ' ' * indent_level * 4
218
- inner_indent = ' ' * (indent_level + 1) * 4
219
- lines.append(base_indent + var_name + ' = (')
220
- if raw:
221
- # These should be preformatted reprs of, say, tuples.
222
- for i in seq:
223
- lines.append(inner_indent + i + ',')
224
- else:
225
- for i in seq:
226
- # Force use of single quotes
227
- r = repr(i + '"')
228
- lines.append(inner_indent + r[:-2] + r[-1] + ',')
229
- lines.append(base_indent + ')')
230
- return '\n'.join(lines)
231
-
232
-
233
- def duplicates_removed(it, already_seen=()):
234
- """
235
- Returns a list with duplicates removed from the iterable `it`.
236
-
237
- Order is preserved.
238
- """
239
- lst = []
240
- seen = set()
241
- for i in it:
242
- if i in seen or i in already_seen:
243
- continue
244
- lst.append(i)
245
- seen.add(i)
246
- return lst
247
-
248
-
249
- class Future:
250
- """Generic class to defer some work.
251
-
252
- Handled specially in RegexLexerMeta, to support regex string construction at
253
- first use.
254
- """
255
- def get(self):
256
- raise NotImplementedError
257
-
258
-
259
- def guess_decode(text):
260
- """Decode *text* with guessed encoding.
261
-
262
- First try UTF-8; this should fail for non-UTF-8 encodings.
263
- Then try the preferred locale encoding.
264
- Fall back to latin-1, which always works.
265
- """
266
- try:
267
- text = text.decode('utf-8')
268
- return text, 'utf-8'
269
- except UnicodeDecodeError:
270
- try:
271
- import locale
272
- prefencoding = locale.getpreferredencoding()
273
- text = text.decode()
274
- return text, prefencoding
275
- except (UnicodeDecodeError, LookupError):
276
- text = text.decode('latin1')
277
- return text, 'latin1'
278
-
279
-
280
- def guess_decode_from_terminal(text, term):
281
- """Decode *text* coming from terminal *term*.
282
-
283
- First try the terminal encoding, if given.
284
- Then try UTF-8. Then try the preferred locale encoding.
285
- Fall back to latin-1, which always works.
286
- """
287
- if getattr(term, 'encoding', None):
288
- try:
289
- text = text.decode(term.encoding)
290
- except UnicodeDecodeError:
291
- pass
292
- else:
293
- return text, term.encoding
294
- return guess_decode(text)
295
-
296
-
297
- def terminal_encoding(term):
298
- """Return our best guess of encoding for the given *term*."""
299
- if getattr(term, 'encoding', None):
300
- return term.encoding
301
- import locale
302
- return locale.getpreferredencoding()
303
-
304
-
305
- class UnclosingTextIOWrapper(TextIOWrapper):
306
- # Don't close underlying buffer on destruction.
307
- def close(self):
308
- self.flush()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/dep_util.py DELETED
@@ -1,96 +0,0 @@
1
- """distutils.dep_util
2
-
3
- Utility functions for simple, timestamp-based dependency of files
4
- and groups of files; also, function based entirely on such
5
- timestamp dependency analysis."""
6
-
7
- import os
8
- from distutils.errors import DistutilsFileError
9
-
10
-
11
- def newer(source, target):
12
- """Return true if 'source' exists and is more recently modified than
13
- 'target', or if 'source' exists and 'target' doesn't. Return false if
14
- both exist and 'target' is the same age or younger than 'source'.
15
- Raise DistutilsFileError if 'source' does not exist.
16
- """
17
- if not os.path.exists(source):
18
- raise DistutilsFileError("file '%s' does not exist" % os.path.abspath(source))
19
- if not os.path.exists(target):
20
- return 1
21
-
22
- from stat import ST_MTIME
23
-
24
- mtime1 = os.stat(source)[ST_MTIME]
25
- mtime2 = os.stat(target)[ST_MTIME]
26
-
27
- return mtime1 > mtime2
28
-
29
-
30
- # newer ()
31
-
32
-
33
- def newer_pairwise(sources, targets):
34
- """Walk two filename lists in parallel, testing if each source is newer
35
- than its corresponding target. Return a pair of lists (sources,
36
- targets) where source is newer than target, according to the semantics
37
- of 'newer()'.
38
- """
39
- if len(sources) != len(targets):
40
- raise ValueError("'sources' and 'targets' must be same length")
41
-
42
- # build a pair of lists (sources, targets) where source is newer
43
- n_sources = []
44
- n_targets = []
45
- for i in range(len(sources)):
46
- if newer(sources[i], targets[i]):
47
- n_sources.append(sources[i])
48
- n_targets.append(targets[i])
49
-
50
- return (n_sources, n_targets)
51
-
52
-
53
- # newer_pairwise ()
54
-
55
-
56
- def newer_group(sources, target, missing='error'):
57
- """Return true if 'target' is out-of-date with respect to any file
58
- listed in 'sources'. In other words, if 'target' exists and is newer
59
- than every file in 'sources', return false; otherwise return true.
60
- 'missing' controls what we do when a source file is missing; the
61
- default ("error") is to blow up with an OSError from inside 'stat()';
62
- if it is "ignore", we silently drop any missing source files; if it is
63
- "newer", any missing source files make us assume that 'target' is
64
- out-of-date (this is handy in "dry-run" mode: it'll make you pretend to
65
- carry out commands that wouldn't work because inputs are missing, but
66
- that doesn't matter because you're not actually going to run the
67
- commands).
68
- """
69
- # If the target doesn't even exist, then it's definitely out-of-date.
70
- if not os.path.exists(target):
71
- return 1
72
-
73
- # Otherwise we have to find out the hard way: if *any* source file
74
- # is more recent than 'target', then 'target' is out-of-date and
75
- # we can immediately return true. If we fall through to the end
76
- # of the loop, then 'target' is up-to-date and we return false.
77
- from stat import ST_MTIME
78
-
79
- target_mtime = os.stat(target)[ST_MTIME]
80
- for source in sources:
81
- if not os.path.exists(source):
82
- if missing == 'error': # blow up when we stat() the file
83
- pass
84
- elif missing == 'ignore': # missing source dropped from
85
- continue # target's dependency list
86
- elif missing == 'newer': # missing source means target is
87
- return 1 # out-of-date
88
-
89
- source_mtime = os.stat(source)[ST_MTIME]
90
- if source_mtime > target_mtime:
91
- return 1
92
- else:
93
- return 0
94
-
95
-
96
- # newer_group ()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated.h DELETED
@@ -1,38 +0,0 @@
1
- // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- #pragma once
3
- #include <torch/types.h>
4
-
5
- namespace detectron2 {
6
-
7
- at::Tensor nms_rotated_cpu(
8
- const at::Tensor& dets,
9
- const at::Tensor& scores,
10
- const float iou_threshold);
11
-
12
- #ifdef WITH_CUDA
13
- at::Tensor nms_rotated_cuda(
14
- const at::Tensor& dets,
15
- const at::Tensor& scores,
16
- const float iou_threshold);
17
- #endif
18
-
19
- // Interface for Python
20
- // inline is needed to prevent multiple function definitions when this header is
21
- // included by different cpps
22
- inline at::Tensor nms_rotated(
23
- const at::Tensor& dets,
24
- const at::Tensor& scores,
25
- const float iou_threshold) {
26
- assert(dets.device().is_cuda() == scores.device().is_cuda());
27
- if (dets.device().is_cuda()) {
28
- #ifdef WITH_CUDA
29
- return nms_rotated_cuda(dets, scores, iou_threshold);
30
- #else
31
- AT_ERROR("Not compiled with GPU support");
32
- #endif
33
- }
34
-
35
- return nms_rotated_cpu(dets, scores, iou_threshold);
36
- }
37
-
38
- } // namespace detectron2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TensorMask/tensormask/layers/swap_align2nat.py DELETED
@@ -1,61 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- from torch import nn
3
- from torch.autograd import Function
4
- from torch.autograd.function import once_differentiable
5
-
6
- from tensormask import _C
7
-
8
-
9
- class _SwapAlign2Nat(Function):
10
- @staticmethod
11
- def forward(ctx, X, lambda_val, pad_val):
12
- ctx.lambda_val = lambda_val
13
- ctx.input_shape = X.size()
14
-
15
- Y = _C.swap_align2nat_forward(X, lambda_val, pad_val)
16
- return Y
17
-
18
- @staticmethod
19
- @once_differentiable
20
- def backward(ctx, gY):
21
- lambda_val = ctx.lambda_val
22
- bs, ch, h, w = ctx.input_shape
23
-
24
- gX = _C.swap_align2nat_backward(gY, lambda_val, bs, ch, h, w)
25
-
26
- return gX, None, None
27
-
28
-
29
- swap_align2nat = _SwapAlign2Nat.apply
30
-
31
-
32
- class SwapAlign2Nat(nn.Module):
33
- """
34
- The op `SwapAlign2Nat` described in https://arxiv.org/abs/1903.12174.
35
- Given an input tensor that predicts masks of shape (N, C=VxU, H, W),
36
- apply the op, it will return masks of shape (N, V'xU', H', W') where
37
- the unit lengths of (V, U) and (H, W) are swapped, and the mask representation
38
- is transformed from aligned to natural.
39
- Args:
40
- lambda_val (int): the relative unit length ratio between (V, U) and (H, W),
41
- as we always have larger unit lengths for (V, U) than (H, W),
42
- lambda_val is always >= 1.
43
- pad_val (float): padding value for the values falling outside of the input
44
- tensor, default set to -6 as sigmoid(-6) is ~0, indicating
45
- that is no masks outside of the tensor.
46
- """
47
-
48
- def __init__(self, lambda_val, pad_val=-6.0):
49
- super(SwapAlign2Nat, self).__init__()
50
- self.lambda_val = lambda_val
51
- self.pad_val = pad_val
52
-
53
- def forward(self, X):
54
- return swap_align2nat(X, self.lambda_val, self.pad_val)
55
-
56
- def __repr__(self):
57
- tmpstr = self.__class__.__name__ + "("
58
- tmpstr += "lambda_val=" + str(self.lambda_val)
59
- tmpstr += ", pad_val=" + str(self.pad_val)
60
- tmpstr += ")"
61
- return tmpstr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/MonoScene/monoscene/.ipynb_checkpoints/modules-checkpoint.py DELETED
@@ -1,194 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from monoscene.DDR import Bottleneck3D
4
-
5
-
6
- class ASPP(nn.Module):
7
- """
8
- ASPP 3D
9
- Adapt from https://github.com/cv-rits/LMSCNet/blob/main/LMSCNet/models/LMSCNet.py#L7
10
- """
11
-
12
- def __init__(self, planes, dilations_conv_list):
13
- super().__init__()
14
-
15
- # ASPP Block
16
- self.conv_list = dilations_conv_list
17
- self.conv1 = nn.ModuleList(
18
- [
19
- nn.Conv3d(
20
- planes, planes, kernel_size=3, padding=dil, dilation=dil, bias=False
21
- )
22
- for dil in dilations_conv_list
23
- ]
24
- )
25
- self.bn1 = nn.ModuleList(
26
- [nn.BatchNorm3d(planes) for dil in dilations_conv_list]
27
- )
28
- self.conv2 = nn.ModuleList(
29
- [
30
- nn.Conv3d(
31
- planes, planes, kernel_size=3, padding=dil, dilation=dil, bias=False
32
- )
33
- for dil in dilations_conv_list
34
- ]
35
- )
36
- self.bn2 = nn.ModuleList(
37
- [nn.BatchNorm3d(planes) for dil in dilations_conv_list]
38
- )
39
- self.relu = nn.ReLU()
40
-
41
- def forward(self, x_in):
42
-
43
- y = self.bn2[0](self.conv2[0](self.relu(self.bn1[0](self.conv1[0](x_in)))))
44
- for i in range(1, len(self.conv_list)):
45
- y += self.bn2[i](self.conv2[i](self.relu(self.bn1[i](self.conv1[i](x_in)))))
46
- x_in = self.relu(y + x_in) # modified
47
-
48
- return x_in
49
-
50
-
51
- class SegmentationHead(nn.Module):
52
- """
53
- 3D Segmentation heads to retrieve semantic segmentation at each scale.
54
- Formed by Dim expansion, Conv3D, ASPP block, Conv3D.
55
- Taken from https://github.com/cv-rits/LMSCNet/blob/main/LMSCNet/models/LMSCNet.py#L7
56
- """
57
-
58
- def __init__(self, inplanes, planes, nbr_classes, dilations_conv_list):
59
- super().__init__()
60
-
61
- # First convolution
62
- self.conv0 = nn.Conv3d(inplanes, planes, kernel_size=3, padding=1, stride=1)
63
-
64
- # ASPP Block
65
- self.conv_list = dilations_conv_list
66
- self.conv1 = nn.ModuleList(
67
- [
68
- nn.Conv3d(
69
- planes, planes, kernel_size=3, padding=dil, dilation=dil, bias=False
70
- )
71
- for dil in dilations_conv_list
72
- ]
73
- )
74
- self.bn1 = nn.ModuleList(
75
- [nn.BatchNorm3d(planes) for dil in dilations_conv_list]
76
- )
77
- self.conv2 = nn.ModuleList(
78
- [
79
- nn.Conv3d(
80
- planes, planes, kernel_size=3, padding=dil, dilation=dil, bias=False
81
- )
82
- for dil in dilations_conv_list
83
- ]
84
- )
85
- self.bn2 = nn.ModuleList(
86
- [nn.BatchNorm3d(planes) for dil in dilations_conv_list]
87
- )
88
- self.relu = nn.ReLU()
89
-
90
- self.conv_classes = nn.Conv3d(
91
- planes, nbr_classes, kernel_size=3, padding=1, stride=1
92
- )
93
-
94
- def forward(self, x_in):
95
-
96
- # Convolution to go from inplanes to planes features...
97
- x_in = self.relu(self.conv0(x_in))
98
-
99
- y = self.bn2[0](self.conv2[0](self.relu(self.bn1[0](self.conv1[0](x_in)))))
100
- for i in range(1, len(self.conv_list)):
101
- y += self.bn2[i](self.conv2[i](self.relu(self.bn1[i](self.conv1[i](x_in)))))
102
- x_in = self.relu(y + x_in) # modified
103
-
104
- x_in = self.conv_classes(x_in)
105
-
106
- return x_in
107
-
108
-
109
- class ProcessKitti(nn.Module):
110
- def __init__(self, feature, norm_layer, bn_momentum, dilations=[1, 2, 3]):
111
- super(Process, self).__init__()
112
- self.main = nn.Sequential(
113
- *[
114
- Bottleneck3D(
115
- feature,
116
- feature // 4,
117
- bn_momentum=bn_momentum,
118
- norm_layer=norm_layer,
119
- dilation=[i, i, i],
120
- )
121
- for i in dilations
122
- ]
123
- )
124
-
125
- def forward(self, x):
126
- return self.main(x)
127
-
128
-
129
- class Process(nn.Module):
130
- def __init__(self, feature, norm_layer, bn_momentum, dilations=[1, 2, 3]):
131
- super(Process, self).__init__()
132
- self.main = nn.Sequential(
133
- *[
134
- Bottleneck3D(
135
- feature,
136
- feature // 4,
137
- bn_momentum=bn_momentum,
138
- norm_layer=norm_layer,
139
- dilation=[i, i, i],
140
- )
141
- for i in dilations
142
- ]
143
- )
144
-
145
- def forward(self, x):
146
- return self.main(x)
147
-
148
-
149
- class Upsample(nn.Module):
150
- def __init__(self, in_channels, out_channels, norm_layer, bn_momentum):
151
- super(Upsample, self).__init__()
152
- self.main = nn.Sequential(
153
- nn.ConvTranspose3d(
154
- in_channels,
155
- out_channels,
156
- kernel_size=3,
157
- stride=2,
158
- padding=1,
159
- dilation=1,
160
- output_padding=1,
161
- ),
162
- norm_layer(out_channels, momentum=bn_momentum),
163
- nn.ReLU(),
164
- )
165
-
166
- def forward(self, x):
167
- return self.main(x)
168
-
169
-
170
- class Downsample(nn.Module):
171
- def __init__(self, feature, norm_layer, bn_momentum, expansion=8):
172
- super(Downsample, self).__init__()
173
- self.main = Bottleneck3D(
174
- feature,
175
- feature // 4,
176
- bn_momentum=bn_momentum,
177
- expansion=expansion,
178
- stride=2,
179
- downsample=nn.Sequential(
180
- nn.AvgPool3d(kernel_size=2, stride=2),
181
- nn.Conv3d(
182
- feature,
183
- int(feature * expansion / 4),
184
- kernel_size=1,
185
- stride=1,
186
- bias=False,
187
- ),
188
- norm_layer(int(feature * expansion / 4), momentum=bn_momentum),
189
- ),
190
- norm_layer=norm_layer,
191
- )
192
-
193
- def forward(self, x):
194
- return self.main(x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/datasets/xml_style.py DELETED
@@ -1,170 +0,0 @@
1
- import os.path as osp
2
- import xml.etree.ElementTree as ET
3
-
4
- import mmcv
5
- import numpy as np
6
- from PIL import Image
7
-
8
- from .builder import DATASETS
9
- from .custom import CustomDataset
10
-
11
-
12
- @DATASETS.register_module()
13
- class XMLDataset(CustomDataset):
14
- """XML dataset for detection.
15
-
16
- Args:
17
- min_size (int | float, optional): The minimum size of bounding
18
- boxes in the images. If the size of a bounding box is less than
19
- ``min_size``, it would be add to ignored field.
20
- """
21
-
22
- def __init__(self, min_size=None, **kwargs):
23
- assert self.CLASSES or kwargs.get(
24
- 'classes', None), 'CLASSES in `XMLDataset` can not be None.'
25
- super(XMLDataset, self).__init__(**kwargs)
26
- self.cat2label = {cat: i for i, cat in enumerate(self.CLASSES)}
27
- self.min_size = min_size
28
-
29
- def load_annotations(self, ann_file):
30
- """Load annotation from XML style ann_file.
31
-
32
- Args:
33
- ann_file (str): Path of XML file.
34
-
35
- Returns:
36
- list[dict]: Annotation info from XML file.
37
- """
38
-
39
- data_infos = []
40
- img_ids = mmcv.list_from_file(ann_file)
41
- for img_id in img_ids:
42
- filename = f'JPEGImages/{img_id}.jpg'
43
- xml_path = osp.join(self.img_prefix, 'Annotations',
44
- f'{img_id}.xml')
45
- tree = ET.parse(xml_path)
46
- root = tree.getroot()
47
- size = root.find('size')
48
- if size is not None:
49
- width = int(size.find('width').text)
50
- height = int(size.find('height').text)
51
- else:
52
- img_path = osp.join(self.img_prefix, 'JPEGImages',
53
- '{}.jpg'.format(img_id))
54
- img = Image.open(img_path)
55
- width, height = img.size
56
- data_infos.append(
57
- dict(id=img_id, filename=filename, width=width, height=height))
58
-
59
- return data_infos
60
-
61
- def _filter_imgs(self, min_size=32):
62
- """Filter images too small or without annotation."""
63
- valid_inds = []
64
- for i, img_info in enumerate(self.data_infos):
65
- if min(img_info['width'], img_info['height']) < min_size:
66
- continue
67
- if self.filter_empty_gt:
68
- img_id = img_info['id']
69
- xml_path = osp.join(self.img_prefix, 'Annotations',
70
- f'{img_id}.xml')
71
- tree = ET.parse(xml_path)
72
- root = tree.getroot()
73
- for obj in root.findall('object'):
74
- name = obj.find('name').text
75
- if name in self.CLASSES:
76
- valid_inds.append(i)
77
- break
78
- else:
79
- valid_inds.append(i)
80
- return valid_inds
81
-
82
- def get_ann_info(self, idx):
83
- """Get annotation from XML file by index.
84
-
85
- Args:
86
- idx (int): Index of data.
87
-
88
- Returns:
89
- dict: Annotation info of specified index.
90
- """
91
-
92
- img_id = self.data_infos[idx]['id']
93
- xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
94
- tree = ET.parse(xml_path)
95
- root = tree.getroot()
96
- bboxes = []
97
- labels = []
98
- bboxes_ignore = []
99
- labels_ignore = []
100
- for obj in root.findall('object'):
101
- name = obj.find('name').text
102
- if name not in self.CLASSES:
103
- continue
104
- label = self.cat2label[name]
105
- difficult = obj.find('difficult')
106
- difficult = 0 if difficult is None else int(difficult.text)
107
- bnd_box = obj.find('bndbox')
108
- # TODO: check whether it is necessary to use int
109
- # Coordinates may be float type
110
- bbox = [
111
- int(float(bnd_box.find('xmin').text)),
112
- int(float(bnd_box.find('ymin').text)),
113
- int(float(bnd_box.find('xmax').text)),
114
- int(float(bnd_box.find('ymax').text))
115
- ]
116
- ignore = False
117
- if self.min_size:
118
- assert not self.test_mode
119
- w = bbox[2] - bbox[0]
120
- h = bbox[3] - bbox[1]
121
- if w < self.min_size or h < self.min_size:
122
- ignore = True
123
- if difficult or ignore:
124
- bboxes_ignore.append(bbox)
125
- labels_ignore.append(label)
126
- else:
127
- bboxes.append(bbox)
128
- labels.append(label)
129
- if not bboxes:
130
- bboxes = np.zeros((0, 4))
131
- labels = np.zeros((0, ))
132
- else:
133
- bboxes = np.array(bboxes, ndmin=2) - 1
134
- labels = np.array(labels)
135
- if not bboxes_ignore:
136
- bboxes_ignore = np.zeros((0, 4))
137
- labels_ignore = np.zeros((0, ))
138
- else:
139
- bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1
140
- labels_ignore = np.array(labels_ignore)
141
- ann = dict(
142
- bboxes=bboxes.astype(np.float32),
143
- labels=labels.astype(np.int64),
144
- bboxes_ignore=bboxes_ignore.astype(np.float32),
145
- labels_ignore=labels_ignore.astype(np.int64))
146
- return ann
147
-
148
- def get_cat_ids(self, idx):
149
- """Get category ids in XML file by index.
150
-
151
- Args:
152
- idx (int): Index of data.
153
-
154
- Returns:
155
- list[int]: All categories in the image of specified index.
156
- """
157
-
158
- cat_ids = []
159
- img_id = self.data_infos[idx]['id']
160
- xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
161
- tree = ET.parse(xml_path)
162
- root = tree.getroot()
163
- for obj in root.findall('object'):
164
- name = obj.find('name').text
165
- if name not in self.CLASSES:
166
- continue
167
- label = self.cat2label[name]
168
- cat_ids.append(label)
169
-
170
- return cat_ids
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/utils/__init__.py DELETED
File without changes
spaces/ChandraMohanNayal/AutoGPT/ui/app.py DELETED
@@ -1,145 +0,0 @@
1
- import gradio as gr
2
- import utils
3
- from api import AutoAPI, get_openai_api_key
4
- import os, shutil
5
- import json
6
-
7
- FILE_DIR = os.path.dirname(os.path.abspath(__file__))
8
- OUTPUT_DIR = os.path.join(os.path.dirname(FILE_DIR), "auto_gpt_workspace")
9
- if not os.path.exists(OUTPUT_DIR):
10
- os.mkdir(OUTPUT_DIR)
11
-
12
- CSS = """
13
- #chatbot {font-family: monospace;}
14
- #files .generating {display: none;}
15
- #files .min {min-height: 0px;}
16
- """
17
-
18
- with gr.Blocks(css=CSS) as app:
19
- with gr.Column() as setup_pane:
20
- gr.Markdown(f"""# Auto-GPT
21
- 1. Duplicate this Space: <a href="https://huggingface.co/spaces/{os.getenv('SPACE_ID')}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a> This will **NOT** work without duplication!
22
- 2. Enter your <a href="https://platform.openai.com/account/api-keys">OpenAI API Key</a> below.
23
- """)
24
- with gr.Row():
25
- open_ai_key = gr.Textbox(
26
- value=get_openai_api_key(),
27
- label="OpenAI API Key",
28
- type="password",
29
- )
30
- gr.Markdown(
31
- "3. Fill the values below, then click 'Start'. There are example values you can load at the bottom of this page."
32
- )
33
- with gr.Row():
34
- ai_name = gr.Textbox(label="AI Name", placeholder="e.g. Entrepreneur-GPT")
35
- ai_role = gr.Textbox(
36
- label="AI Role",
37
- placeholder="e.g. an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.",
38
- )
39
- top_5_goals = gr.Dataframe(
40
- row_count=(5, "fixed"),
41
- col_count=(1, "fixed"),
42
- headers=["AI Goals - Enter up to 5"],
43
- type="array"
44
- )
45
- start_btn = gr.Button("Start", variant="primary")
46
- with open(os.path.join(FILE_DIR, "examples.json"), "r") as f:
47
- example_values = json.load(f)
48
- gr.Examples(
49
- example_values,
50
- [ai_name, ai_role, top_5_goals],
51
- )
52
- with gr.Column(visible=False) as main_pane:
53
- with gr.Row():
54
- with gr.Column(scale=2):
55
- chatbot = gr.Chatbot(elem_id="chatbot")
56
- with gr.Row():
57
- yes_btn = gr.Button("Yes", variant="primary", interactive=False)
58
- consecutive_yes = gr.Slider(
59
- 1, 10, 1, step=1, label="Consecutive Yes", interactive=False
60
- )
61
- custom_response = gr.Textbox(
62
- label="Custom Response",
63
- placeholder="Press 'Enter' to Submit.",
64
- interactive=False,
65
- )
66
- with gr.Column(scale=1):
67
- gr.HTML(
68
- lambda: f"""
69
- Generated Files
70
- <pre><code style='overflow-x: auto'>{utils.format_directory(OUTPUT_DIR)}</pre></code>
71
- """, every=3, elem_id="files"
72
- )
73
- download_btn = gr.Button("Download All Files")
74
-
75
- chat_history = gr.State([[None, None]])
76
- api = gr.State(None)
77
-
78
- def start(open_ai_key, ai_name, ai_role, top_5_goals):
79
- auto_api = AutoAPI(open_ai_key, ai_name, ai_role, top_5_goals)
80
- return gr.Column.update(visible=False), gr.Column.update(visible=True), auto_api
81
-
82
- def bot_response(chat, api):
83
- messages = []
84
- for message in api.get_chatbot_response():
85
- messages.append(message)
86
- chat[-1][1] = "\n".join(messages) + "..."
87
- yield chat
88
- chat[-1][1] = "\n".join(messages)
89
- yield chat
90
-
91
- def send_message(count, chat, api, message="Y"):
92
- if message != "Y":
93
- count = 1
94
- for i in range(count):
95
- chat.append([message, None])
96
- yield chat, count - i
97
- api.send_message(message)
98
- for updated_chat in bot_response(chat, api):
99
- yield updated_chat, count - i
100
-
101
- def activate_inputs():
102
- return {
103
- yes_btn: gr.Button.update(interactive=True),
104
- consecutive_yes: gr.Slider.update(interactive=True),
105
- custom_response: gr.Textbox.update(interactive=True),
106
- }
107
-
108
- def deactivate_inputs():
109
- return {
110
- yes_btn: gr.Button.update(interactive=False),
111
- consecutive_yes: gr.Slider.update(interactive=False),
112
- custom_response: gr.Textbox.update(interactive=False),
113
- }
114
-
115
- start_btn.click(
116
- start,
117
- [open_ai_key, ai_name, ai_role, top_5_goals],
118
- [setup_pane, main_pane, api],
119
- ).then(bot_response, [chat_history, api], chatbot).then(
120
- activate_inputs, None, [yes_btn, consecutive_yes, custom_response]
121
- )
122
-
123
- yes_btn.click(
124
- deactivate_inputs, None, [yes_btn, consecutive_yes, custom_response]
125
- ).then(
126
- send_message, [consecutive_yes, chat_history, api], [chatbot, consecutive_yes]
127
- ).then(
128
- activate_inputs, None, [yes_btn, consecutive_yes, custom_response]
129
- )
130
- custom_response.submit(
131
- deactivate_inputs, None, [yes_btn, consecutive_yes, custom_response]
132
- ).then(
133
- send_message,
134
- [consecutive_yes, chat_history, api, custom_response],
135
- [chatbot, consecutive_yes],
136
- ).then(
137
- activate_inputs, None, [yes_btn, consecutive_yes, custom_response]
138
- )
139
-
140
- def download_all_files():
141
- shutil.make_archive("outputs", "zip", OUTPUT_DIR)
142
-
143
- download_btn.click(download_all_files).then(None, _js=utils.DOWNLOAD_OUTPUTS_JS)
144
-
145
- app.queue(concurrency_count=20).launch(file_directories=[OUTPUT_DIR])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChrisPreston/diff-svc_minato_aqua/utils/__init__.py DELETED
@@ -1,250 +0,0 @@
1
- import glob
2
- import logging
3
- import re
4
- import time
5
- from collections import defaultdict
6
- import os
7
- import sys
8
- import shutil
9
- import types
10
- import numpy as np
11
- import torch
12
- import torch.nn.functional as F
13
- import torch.distributed as dist
14
- from torch import nn
15
-
16
-
17
- def tensors_to_scalars(metrics):
18
- new_metrics = {}
19
- for k, v in metrics.items():
20
- if isinstance(v, torch.Tensor):
21
- v = v.item()
22
- if type(v) is dict:
23
- v = tensors_to_scalars(v)
24
- new_metrics[k] = v
25
- return new_metrics
26
-
27
-
28
- class AvgrageMeter(object):
29
-
30
- def __init__(self):
31
- self.reset()
32
-
33
- def reset(self):
34
- self.avg = 0
35
- self.sum = 0
36
- self.cnt = 0
37
-
38
- def update(self, val, n=1):
39
- self.sum += val * n
40
- self.cnt += n
41
- self.avg = self.sum / self.cnt
42
-
43
-
44
- def collate_1d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None, shift_id=1):
45
- """Convert a list of 1d tensors into a padded 2d tensor."""
46
- size = max(v.size(0) for v in values) if max_len is None else max_len
47
- res = values[0].new(len(values), size).fill_(pad_idx)
48
-
49
- def copy_tensor(src, dst):
50
- assert dst.numel() == src.numel()
51
- if shift_right:
52
- dst[1:] = src[:-1]
53
- dst[0] = shift_id
54
- else:
55
- dst.copy_(src)
56
-
57
- for i, v in enumerate(values):
58
- copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])
59
- return res
60
-
61
-
62
- def collate_2d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None):
63
- """Convert a list of 2d tensors into a padded 3d tensor."""
64
- size = max(v.size(0) for v in values) if max_len is None else max_len
65
- res = values[0].new(len(values), size, values[0].shape[1]).fill_(pad_idx)
66
-
67
- def copy_tensor(src, dst):
68
- assert dst.numel() == src.numel()
69
- if shift_right:
70
- dst[1:] = src[:-1]
71
- else:
72
- dst.copy_(src)
73
-
74
- for i, v in enumerate(values):
75
- copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])
76
- return res
77
-
78
-
79
- def _is_batch_full(batch, num_tokens, max_tokens, max_sentences):
80
- if len(batch) == 0:
81
- return 0
82
- if len(batch) == max_sentences:
83
- return 1
84
- if num_tokens > max_tokens:
85
- return 1
86
- return 0
87
-
88
-
89
- def batch_by_size(
90
- indices, num_tokens_fn, max_tokens=None, max_sentences=None,
91
- required_batch_size_multiple=1, distributed=False
92
- ):
93
- """
94
- Yield mini-batches of indices bucketed by size. Batches may contain
95
- sequences of different lengths.
96
-
97
- Args:
98
- indices (List[int]): ordered list of dataset indices
99
- num_tokens_fn (callable): function that returns the number of tokens at
100
- a given index
101
- max_tokens (int, optional): max number of tokens in each batch
102
- (default: None).
103
- max_sentences (int, optional): max number of sentences in each
104
- batch (default: None).
105
- required_batch_size_multiple (int, optional): require batch size to
106
- be a multiple of N (default: 1).
107
- """
108
- max_tokens = max_tokens if max_tokens is not None else sys.maxsize
109
- max_sentences = max_sentences if max_sentences is not None else sys.maxsize
110
- bsz_mult = required_batch_size_multiple
111
-
112
- if isinstance(indices, types.GeneratorType):
113
- indices = np.fromiter(indices, dtype=np.int64, count=-1)
114
-
115
- sample_len = 0
116
- sample_lens = []
117
- batch = []
118
- batches = []
119
- for i in range(len(indices)):
120
- idx = indices[i]
121
- num_tokens = num_tokens_fn(idx)
122
- sample_lens.append(num_tokens)
123
- sample_len = max(sample_len, num_tokens)
124
- assert sample_len <= max_tokens, (
125
- "sentence at index {} of size {} exceeds max_tokens "
126
- "limit of {}!".format(idx, sample_len, max_tokens)
127
- )
128
- num_tokens = (len(batch) + 1) * sample_len
129
-
130
- if _is_batch_full(batch, num_tokens, max_tokens, max_sentences):
131
- mod_len = max(
132
- bsz_mult * (len(batch) // bsz_mult),
133
- len(batch) % bsz_mult,
134
- )
135
- batches.append(batch[:mod_len])
136
- batch = batch[mod_len:]
137
- sample_lens = sample_lens[mod_len:]
138
- sample_len = max(sample_lens) if len(sample_lens) > 0 else 0
139
- batch.append(idx)
140
- if len(batch) > 0:
141
- batches.append(batch)
142
- return batches
143
-
144
-
145
- def make_positions(tensor, padding_idx):
146
- """Replace non-padding symbols with their position numbers.
147
-
148
- Position numbers begin at padding_idx+1. Padding symbols are ignored.
149
- """
150
- # The series of casts and type-conversions here are carefully
151
- # balanced to both work with ONNX export and XLA. In particular XLA
152
- # prefers ints, cumsum defaults to output longs, and ONNX doesn't know
153
- # how to handle the dtype kwarg in cumsum.
154
- mask = tensor.ne(padding_idx).int()
155
- return (
156
- torch.cumsum(mask, dim=1).type_as(mask) * mask
157
- ).long() + padding_idx
158
-
159
-
160
- def softmax(x, dim):
161
- return F.softmax(x, dim=dim, dtype=torch.float32)
162
-
163
-
164
- def unpack_dict_to_list(samples):
165
- samples_ = []
166
- bsz = samples.get('outputs').size(0)
167
- for i in range(bsz):
168
- res = {}
169
- for k, v in samples.items():
170
- try:
171
- res[k] = v[i]
172
- except:
173
- pass
174
- samples_.append(res)
175
- return samples_
176
-
177
-
178
- def load_ckpt(cur_model, ckpt_base_dir, prefix_in_ckpt='model', force=True, strict=True):
179
- if os.path.isfile(ckpt_base_dir):
180
- base_dir = os.path.dirname(ckpt_base_dir)
181
- checkpoint_path = [ckpt_base_dir]
182
- else:
183
- base_dir = ckpt_base_dir
184
- checkpoint_path = sorted(glob.glob(f'{base_dir}/model_ckpt_steps_*.ckpt'), key=
185
- lambda x: int(re.findall(f'{base_dir}/model_ckpt_steps_(\d+).ckpt', x.replace('\\','/'))[0]))
186
- if len(checkpoint_path) > 0:
187
- checkpoint_path = checkpoint_path[-1]
188
- state_dict = torch.load(checkpoint_path, map_location="cpu")["state_dict"]
189
- state_dict = {k[len(prefix_in_ckpt) + 1:]: v for k, v in state_dict.items()
190
- if k.startswith(f'{prefix_in_ckpt}.')}
191
- if not strict:
192
- cur_model_state_dict = cur_model.state_dict()
193
- unmatched_keys = []
194
- for key, param in state_dict.items():
195
- if key in cur_model_state_dict:
196
- new_param = cur_model_state_dict[key]
197
- if new_param.shape != param.shape:
198
- unmatched_keys.append(key)
199
- print("| Unmatched keys: ", key, new_param.shape, param.shape)
200
- for key in unmatched_keys:
201
- del state_dict[key]
202
- cur_model.load_state_dict(state_dict, strict=strict)
203
- print(f"| load '{prefix_in_ckpt}' from '{checkpoint_path}'.")
204
- else:
205
- e_msg = f"| ckpt not found in {base_dir}."
206
- if force:
207
- assert False, e_msg
208
- else:
209
- print(e_msg)
210
-
211
-
212
- def remove_padding(x, padding_idx=0):
213
- if x is None:
214
- return None
215
- assert len(x.shape) in [1, 2]
216
- if len(x.shape) == 2: # [T, H]
217
- return x[np.abs(x).sum(-1) != padding_idx]
218
- elif len(x.shape) == 1: # [T]
219
- return x[x != padding_idx]
220
-
221
-
222
- class Timer:
223
- timer_map = {}
224
-
225
- def __init__(self, name, print_time=False):
226
- if name not in Timer.timer_map:
227
- Timer.timer_map[name] = 0
228
- self.name = name
229
- self.print_time = print_time
230
-
231
- def __enter__(self):
232
- self.t = time.time()
233
-
234
- def __exit__(self, exc_type, exc_val, exc_tb):
235
- Timer.timer_map[self.name] += time.time() - self.t
236
- if self.print_time:
237
- print(self.name, Timer.timer_map[self.name])
238
-
239
-
240
- def print_arch(model, model_name='model'):
241
- #print(f"| {model_name} Arch: ", model)
242
- num_params(model, model_name=model_name)
243
-
244
-
245
- def num_params(model, print_out=True, model_name="model"):
246
- parameters = filter(lambda p: p.requires_grad, model.parameters())
247
- parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
248
- if print_out:
249
- print(f'| {model_name} Trainable Parameters: %.3fM' % parameters)
250
- return parameters
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/lib/listener/listener.js DELETED
@@ -1,16 +0,0 @@
1
- import PluginsLoader from '../plugins/loader.js'
2
-
3
- export default class EventListener {
4
- /**
5
- * 事件监听
6
- * @param data.prefix 事件名称前缀
7
- * @param data.event 监听的事件
8
- * @param data.once 是否只监听一次
9
- */
10
- constructor (data) {
11
- this.prefix = data.prefix || ''
12
- this.event = data.event
13
- this.once = data.once || false
14
- this.plugins = PluginsLoader
15
- }
16
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat/g4f/Provider/Providers/Forefront.py DELETED
@@ -1,30 +0,0 @@
1
- import os
2
- import json
3
- import requests
4
- from ...typing import sha256, Dict, get_type_hints
5
-
6
- url = 'https://forefront.com'
7
- model = ['gpt-3.5-turbo']
8
- supports_stream = True
9
- needs_auth = False
10
-
11
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
12
- json_data = {
13
- 'text': messages[-1]['content'],
14
- 'action': 'noauth',
15
- 'id': '',
16
- 'parentId': '',
17
- 'workspaceId': '',
18
- 'messagePersona': '607e41fe-95be-497e-8e97-010a59b2e2c0',
19
- 'model': 'gpt-4',
20
- 'messages': messages[:-1] if len(messages) > 1 else [],
21
- 'internetMode': 'auto'
22
- }
23
- response = requests.post( 'https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat',
24
- json=json_data, stream=True)
25
- for token in response.iter_lines():
26
- if b'delta' in token:
27
- token = json.loads(token.decode().split('data: ')[1])['delta']
28
- yield (token)
29
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
30
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CuraAlizm/stabilityai-stable-diffusion-xl-base-1.0/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/stabilityai/stable-diffusion-xl-base-1.0").launch()
 
 
 
 
spaces/Cvandi/remake/tests/test_model.py DELETED
@@ -1,126 +0,0 @@
1
- import torch
2
- import yaml
3
- from basicsr.archs.rrdbnet_arch import RRDBNet
4
- from basicsr.data.paired_image_dataset import PairedImageDataset
5
- from basicsr.losses.losses import GANLoss, L1Loss, PerceptualLoss
6
-
7
- from realesrgan.archs.discriminator_arch import UNetDiscriminatorSN
8
- from realesrgan.models.realesrgan_model import RealESRGANModel
9
- from realesrgan.models.realesrnet_model import RealESRNetModel
10
-
11
-
12
- def test_realesrnet_model():
13
- with open('tests/data/test_realesrnet_model.yml', mode='r') as f:
14
- opt = yaml.load(f, Loader=yaml.FullLoader)
15
-
16
- # build model
17
- model = RealESRNetModel(opt)
18
- # test attributes
19
- assert model.__class__.__name__ == 'RealESRNetModel'
20
- assert isinstance(model.net_g, RRDBNet)
21
- assert isinstance(model.cri_pix, L1Loss)
22
- assert isinstance(model.optimizers[0], torch.optim.Adam)
23
-
24
- # prepare data
25
- gt = torch.rand((1, 3, 32, 32), dtype=torch.float32)
26
- kernel1 = torch.rand((1, 5, 5), dtype=torch.float32)
27
- kernel2 = torch.rand((1, 5, 5), dtype=torch.float32)
28
- sinc_kernel = torch.rand((1, 5, 5), dtype=torch.float32)
29
- data = dict(gt=gt, kernel1=kernel1, kernel2=kernel2, sinc_kernel=sinc_kernel)
30
- model.feed_data(data)
31
- # check dequeue
32
- model.feed_data(data)
33
- # check data shape
34
- assert model.lq.shape == (1, 3, 8, 8)
35
- assert model.gt.shape == (1, 3, 32, 32)
36
-
37
- # change probability to test if-else
38
- model.opt['gaussian_noise_prob'] = 0
39
- model.opt['gray_noise_prob'] = 0
40
- model.opt['second_blur_prob'] = 0
41
- model.opt['gaussian_noise_prob2'] = 0
42
- model.opt['gray_noise_prob2'] = 0
43
- model.feed_data(data)
44
- # check data shape
45
- assert model.lq.shape == (1, 3, 8, 8)
46
- assert model.gt.shape == (1, 3, 32, 32)
47
-
48
- # ----------------- test nondist_validation -------------------- #
49
- # construct dataloader
50
- dataset_opt = dict(
51
- name='Demo',
52
- dataroot_gt='tests/data/gt',
53
- dataroot_lq='tests/data/lq',
54
- io_backend=dict(type='disk'),
55
- scale=4,
56
- phase='val')
57
- dataset = PairedImageDataset(dataset_opt)
58
- dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
59
- assert model.is_train is True
60
- model.nondist_validation(dataloader, 1, None, False)
61
- assert model.is_train is True
62
-
63
-
64
- def test_realesrgan_model():
65
- with open('tests/data/test_realesrgan_model.yml', mode='r') as f:
66
- opt = yaml.load(f, Loader=yaml.FullLoader)
67
-
68
- # build model
69
- model = RealESRGANModel(opt)
70
- # test attributes
71
- assert model.__class__.__name__ == 'RealESRGANModel'
72
- assert isinstance(model.net_g, RRDBNet) # generator
73
- assert isinstance(model.net_d, UNetDiscriminatorSN) # discriminator
74
- assert isinstance(model.cri_pix, L1Loss)
75
- assert isinstance(model.cri_perceptual, PerceptualLoss)
76
- assert isinstance(model.cri_gan, GANLoss)
77
- assert isinstance(model.optimizers[0], torch.optim.Adam)
78
- assert isinstance(model.optimizers[1], torch.optim.Adam)
79
-
80
- # prepare data
81
- gt = torch.rand((1, 3, 32, 32), dtype=torch.float32)
82
- kernel1 = torch.rand((1, 5, 5), dtype=torch.float32)
83
- kernel2 = torch.rand((1, 5, 5), dtype=torch.float32)
84
- sinc_kernel = torch.rand((1, 5, 5), dtype=torch.float32)
85
- data = dict(gt=gt, kernel1=kernel1, kernel2=kernel2, sinc_kernel=sinc_kernel)
86
- model.feed_data(data)
87
- # check dequeue
88
- model.feed_data(data)
89
- # check data shape
90
- assert model.lq.shape == (1, 3, 8, 8)
91
- assert model.gt.shape == (1, 3, 32, 32)
92
-
93
- # change probability to test if-else
94
- model.opt['gaussian_noise_prob'] = 0
95
- model.opt['gray_noise_prob'] = 0
96
- model.opt['second_blur_prob'] = 0
97
- model.opt['gaussian_noise_prob2'] = 0
98
- model.opt['gray_noise_prob2'] = 0
99
- model.feed_data(data)
100
- # check data shape
101
- assert model.lq.shape == (1, 3, 8, 8)
102
- assert model.gt.shape == (1, 3, 32, 32)
103
-
104
- # ----------------- test nondist_validation -------------------- #
105
- # construct dataloader
106
- dataset_opt = dict(
107
- name='Demo',
108
- dataroot_gt='tests/data/gt',
109
- dataroot_lq='tests/data/lq',
110
- io_backend=dict(type='disk'),
111
- scale=4,
112
- phase='val')
113
- dataset = PairedImageDataset(dataset_opt)
114
- dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
115
- assert model.is_train is True
116
- model.nondist_validation(dataloader, 1, None, False)
117
- assert model.is_train is True
118
-
119
- # ----------------- test optimize_parameters -------------------- #
120
- model.feed_data(data)
121
- model.optimize_parameters(1)
122
- assert model.output.shape == (1, 3, 32, 32)
123
- assert isinstance(model.log_dict, dict)
124
- # check returned keys
125
- expected_keys = ['l_g_pix', 'l_g_percep', 'l_g_gan', 'l_d_real', 'out_d_real', 'l_d_fake', 'out_d_fake']
126
- assert set(expected_keys).issubset(set(model.log_dict.keys()))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/solver/build.py DELETED
@@ -1,31 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
- import torch
3
-
4
- from .lr_scheduler import WarmupMultiStepLR
5
-
6
-
7
- def make_optimizer(cfg, model):
8
- params = []
9
- for key, value in model.named_parameters():
10
- if not value.requires_grad:
11
- continue
12
- lr = cfg.SOLVER.BASE_LR
13
- weight_decay = cfg.SOLVER.WEIGHT_DECAY
14
- if "bias" in key:
15
- lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR
16
- weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
17
- params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
18
-
19
- optimizer = torch.optim.SGD(params, lr, momentum=cfg.SOLVER.MOMENTUM)
20
- return optimizer
21
-
22
-
23
- def make_lr_scheduler(cfg, optimizer):
24
- return WarmupMultiStepLR(
25
- optimizer,
26
- cfg.SOLVER.STEPS,
27
- cfg.SOLVER.GAMMA,
28
- warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
29
- warmup_iters=cfg.SOLVER.WARMUP_ITERS,
30
- warmup_method=cfg.SOLVER.WARMUP_METHOD,
31
- )