Commit
·
906f139
1
Parent(s):
619df3b
Update parquet files (step 62 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Assimil Il Tedesco Senza Sforzo MP3 77.00M La soluzione ideale per imparare il tedesco da casa o in viaggio.md +0 -150
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/DAEMON Tools Pro Advanced V5.1.0.0333 Admin Crack Download Pc The Complete Review and Comparison.md +0 -113
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Eurosoft Diagnostics.md +0 -14
- spaces/1gistliPinn/ChatGPT4/Examples/Autodesk Revit 2020.2 [Crack Patch ] Torrent! _TOP_.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Diablo Tactic Cm 03 04 25.md +0 -15
- spaces/1gistliPinn/ChatGPT4/Examples/Download [UPDATED] Solidcam 2013 Full Crack.md +0 -40
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dolphin Emulator How to Play GameCube and Wii Games on Your PC.md +0 -88
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download 2048 Mod APK for Android and IOS The Ultimate Puzzle Game.md +0 -116
- spaces/1phancelerku/anime-remove-background/1v1 Battle Challenge Your Friends and Enemies in Epic Duels.md +0 -111
- spaces/A00001/bingothoo/src/app/page.tsx +0 -15
- spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/modules/F0Predictor/HarvestF0Predictor.py +0 -86
- spaces/AIConsultant/MusicGen/tests/adversarial/test_discriminators.py +0 -67
- spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/model_eval_diff.py +0 -110
- spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/lr_scheduler.py +0 -98
- spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/vggishish/model.py +0 -77
- spaces/AIZero2HeroBootcamp/MultiPDF-QA-ChatGPT-Langchain/README.md +0 -13
- spaces/ASJMO/freegpt/g4f/Provider/__init__.py +0 -36
- spaces/Accel/media-converter/styles.css +0 -9
- spaces/Adapter/T2I-Adapter/ldm/util.py +0 -200
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/ball/Factory.js +0 -13
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sides/childbehaviors/Fade.js +0 -36
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/instruct_pix2pix/README_sdxl.md +0 -148
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/testing_utils.py +0 -684
- spaces/Andy1621/uniformer_image_detection/configs/sparse_rcnn/README.md +0 -28
- spaces/Andy1621/uniformer_image_detection/mmdet/core/anchor/builder.py +0 -7
- spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/reppoints_head.py +0 -763
- spaces/Andy1621/uniformer_image_segmentation/configs/_base_/datasets/drive.py +0 -59
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k.py +0 -2
- spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_512x512_40k_voc12aug.py +0 -2
- spaces/AnnonSubmission/xai-cl/ssl_models/dino.py +0 -181
- spaces/Annotation-AI/fast-segment-everything-with-text-prompt/README.md +0 -12
- spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/midas/midas/dpt_depth.py +0 -109
- spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/GroundingDINO/backbone/backbone.py +0 -221
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/dotenv/__init__.py +0 -49
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/network/cache.py +0 -69
- spaces/AutoGeneralAI/chatgpt-clone/README.md +0 -12
- spaces/Bart92/RVC_HF/infer/lib/slicer2.py +0 -260
- spaces/Benson/text-generation/Examples/Androide Oyun Apk Bola Roja 4.md +0 -60
- spaces/Benson/text-generation/Examples/Apk Adresi Gta 5.md +0 -79
- spaces/BhaskarKapri/Animal/app.py +0 -36
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/upload.py +0 -17
- spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/README.md +0 -192
- spaces/BreetheRun/mitchtech-vulcan-diffusion/app.py +0 -3
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp +0 -46
- spaces/CVPR/LIVE/pybind11/pybind11/__init__.py +0 -13
- spaces/CVPR/LIVE/pybind11/tools/clang/enumerations.py +0 -34
- spaces/CVPR/LIVE/thrust/thrust/iterator/transform_input_output_iterator.h +0 -163
- spaces/CVPR/LIVE/thrust/thrust/shuffle.h +0 -179
- spaces/CVPR/MonoScene/monoscene/flosp.py +0 -41
- spaces/CVPR/WALT/mmdet/models/roi_heads/mask_heads/maskiou_head.py +0 -186
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Assimil Il Tedesco Senza Sforzo MP3 77.00M La soluzione ideale per imparare il tedesco da casa o in viaggio.md
DELETED
@@ -1,150 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Assimil Il Tedesco Senza Sforzo MP3 77.00M: Learn German Without Effort</h1>
|
3 |
-
<p>Do you want to learn German in a fun, easy, and natural way? Do you want to improve your listening, speaking, reading, and writing skills in German? Do you want to access a comprehensive and effective course that covers all the aspects of the German language and culture? If you answered yes to any of these questions, then you should consider Assimil Il Tedesco Senza Sforzo MP3 77.00M as your ideal solution.</p>
|
4 |
-
<p>Assimil Il Tedesco Senza Sforzo MP3 77.00M is a digital version of the popular Assimil course that teaches you German without effort. It consists of an e-book with 100 lessons and an audio file with more than 77 minutes of dialogues and exercises in MP3 format. In this article, we will explain what Assimil Il Tedesco Senza Sforzo is, what MP3 77.00M means, and why you should learn German with this course.</p>
|
5 |
-
<h2>Assimil Il Tedesco Senza Sforzo MP3 77.00M</h2><br /><p><b><b>Download Zip</b> ✓✓✓ <a href="https://byltly.com/2uKwJH">https://byltly.com/2uKwJH</a></b></p><br /><br />
|
6 |
-
<h2>What is Assimil Il Tedesco Senza Sforzo?</h2>
|
7 |
-
<p>Assimil Il Tedesco Senza Sforzo is the Italian edition of Assimil German With Ease, one of the most successful and renowned courses in the Assimil series. Assimil is a French company that has been producing language courses since 1929. It has a unique and proven method that allows you to learn a new language in the same way you learned your mother tongue: by listening, repeating, understanding, and speaking.</p>
|
8 |
-
<h3>The history and philosophy of Assimil</h3>
|
9 |
-
<p>The founder of Assimil, Alphonse Chérel, was a polyglot who spoke more than 20 languages. He was inspired by his own experience of learning languages through exposure and immersion. He developed a method that he called "assimilation", which is based on three principles:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Naturalness: You learn a language by following its natural progression, from simple to complex, from familiar to unfamiliar.</li>
|
12 |
-
<li>Intuitiveness: You learn a language by relying on your intuition, without memorizing rules or lists.</li>
|
13 |
-
<li>Humor: You learn a language by enjoying it, with humorous dialogues and situations that make you laugh and relax.</li>
|
14 |
-
</ul>
|
15 |
-
<p>Chérel published his first course, L'Anglais Sans Peine (English Without Effort), in 1929. It was an instant success and soon he created courses for other languages, such as German, Spanish, Italian, Russian, and more. Today, Assimil offers more than 100 courses for over 50 languages, covering all levels from beginner to advanced.</p>
|
16 |
-
<h3>The features and benefits of Assimil Il Tedesco Senza Sforzo</h3>
|
17 |
-
<p>Assimil Il Tedesco Senza Sforzo is one of the best-selling courses in the Assimil series. It has many features and benefits that make it an ideal choice for anyone who wants to learn German without effort:</p>
|
18 |
-
<ul>
|
19 |
-
<li>It covers all the aspects of the German language: grammar, vocabulary, pronunciation, idioms, culture, etc.</li>
|
20 |
-
<li>It follows a logical and gradual progression that adapts to your level and pace.</li>
|
21 |
-
<li>It uses authentic and realistic dialogues that reflect everyday situations and conversations.</li>
|
22 |
-
<li>It provides clear and concise explanations and notes that help you understand the language better.</li>
|
23 |
-
<li>It includes exercises and reviews that reinforce your learning and test your progress.</li>
|
24 |
-
<li>It offers tips and advice that help you improve your skills and avoid common mistakes.</li>
|
25 |
-
</ul>
|
26 |
-
<p>By using Assimil Il Tedesco Senza Sforzo regularly, you will be able to achieve a level of fluency equivalent to B2 in the Common European Framework of Reference for Languages (CEFR). This means that you will be able to communicate effectively in most situations that require interaction with native speakers.</p>
|
27 |
-
<p>Assimil German without effort audio files download<br />
|
28 |
-
How to learn German with Assimil Il Tedesco Senza Sforzo<br />
|
29 |
-
Assimil Il Tedesco Senza Sforzo MP3 review and ratings<br />
|
30 |
-
Best price for Assimil Il Tedesco Senza Sforzo MP3 course<br />
|
31 |
-
Assimil Il Tedesco Senza Sforzo MP3 vs other German learning methods<br />
|
32 |
-
Where to buy Assimil Il Tedesco Senza Sforzo MP3 online<br />
|
33 |
-
Benefits of using Assimil Il Tedesco Senza Sforzo MP3 for German learners<br />
|
34 |
-
Assimil Il Tedesco Senza Sforzo MP3 free sample lessons<br />
|
35 |
-
How long does it take to finish Assimil Il Tedesco Senza Sforzo MP3<br />
|
36 |
-
Tips and tricks for using Assimil Il Tedesco Senza Sforzo MP3 effectively<br />
|
37 |
-
Assimil Il Tedesco Senza Sforzo MP3 testimonials and success stories<br />
|
38 |
-
How to access Assimil Il Tedesco Senza Sforzo MP3 on different devices<br />
|
39 |
-
What is the difference between Assimil Il Tedesco Senza Sforzo MP3 and PDF<br />
|
40 |
-
How to get the most out of Assimil Il Tedesco Senza Sforzo MP3<br />
|
41 |
-
Is Assimil Il Tedesco Senza Sforzo MP3 worth the money<br />
|
42 |
-
How to improve your pronunciation with Assimil Il Tedesco Senza Sforzo MP3<br />
|
43 |
-
How to use Assimil Il Tedesco Senza Sforzo MP3 with a tutor or a partner<br />
|
44 |
-
How to track your progress with Assimil Il Tedesco Senza Sforzo MP3<br />
|
45 |
-
How to supplement Assimil Il Tedesco Senza Sforzo MP3 with other resources<br />
|
46 |
-
How to troubleshoot common problems with Assimil Il Tedesco Senza Sforzo MP3<br />
|
47 |
-
How to customize Assimil Il Tedesco Senza Sforzo MP3 to your learning style and goals<br />
|
48 |
-
How to avoid boredom and frustration with Assimil Il Tedesco Senza Sforzo MP3<br />
|
49 |
-
How to make Assimil Il Tedesco Senza Sforzo MP3 fun and enjoyable<br />
|
50 |
-
How to integrate Assimil Il Tedesco Senza Sforzo MP3 into your daily routine<br />
|
51 |
-
How to overcome challenges and difficulties with Assimil Il Tedesco Senza Sforzo MP3<br />
|
52 |
-
How to review and revise with Assimil Il Tedesco Senza Sforzo MP3<br />
|
53 |
-
How to master German grammar and vocabulary with Assimil Il Tedesco Senza Sforzo MP3<br />
|
54 |
-
How to prepare for German exams and tests with Assimil Il Tedesco Senza Sforzo MP3<br />
|
55 |
-
How to communicate confidently in German with Assimil Il Tedesco Senza Sforzo MP3<br />
|
56 |
-
How to expand your German knowledge and skills with Assimil Il Tedesco Senza Sforzo MP3<br />
|
57 |
-
How to use Assimil Il Tedesco Senza Sforzo MP3 for travel and work purposes<br />
|
58 |
-
How to learn German culture and history with Assimil Il Tedesco Senza Sforzo MP3<br />
|
59 |
-
How to appreciate German literature and music with Assimil Il Tedesco Senza Sforzo MP3<br />
|
60 |
-
How to compare and contrast German and Italian languages with Assimil Il Tedesco Senza Sforzo MP3<br />
|
61 |
-
How to learn from your mistakes and errors with Assimil Il Tedesco Senza Sforzo MP3<br />
|
62 |
-
How to maintain and improve your German level with Assimil Il Tedesco Senza Sforzo MP3<br />
|
63 |
-
How to teach others German with Assimil Il Tedesco Senza Sforzo MP3<br />
|
64 |
-
How to create your own German content with Assimil Il Tedesco Senza Sforzo MP3<br />
|
65 |
-
How to join online communities of German learners using Assimil Il Tedesco Senza Sforzo MP3<br />
|
66 |
-
How to find more information and support for using Assimil Il Tedesco Senza Sforzo MP3<br />
|
67 |
-
Alternatives and competitors of Assimil Il Tedesco Senza Sforzo MP3 in the market<br />
|
68 |
-
Pros and cons of using Assimil Il Tedesco Senza Sforzo MP3 for learning German<br />
|
69 |
-
Frequently asked questions about Assimil Il Tedesco Senza Sforzo MP3 answered by experts<br />
|
70 |
-
Discounts and offers for buying or subscribing to Assimil Il Tedesco Senza Sforzo MP3 <br />
|
71 |
-
Customer service and technical support for using Assimil Il Tedesco Senza Sforzo MP3</p>
|
72 |
-
<h3>How to use Assimil Il Tedesco Senza Sforzo effectively</h3>
|
73 |
-
<p>The key to using Assimil Il Tedesco Senza Sforzo effectively is to follow its simple but powerful method. The method consists of two phases: the passive phase and the active phase.</p>
|
74 |
-
<p>In the passive phase, which lasts for about 50 lessons, you will listen to the dialogues, read them aloud or silently, repeat them after the speaker, understand their meaning with the help of the notes and translations, and do some exercises. You will spend about 20 to 30 minutes per day on each lesson.</p>
|
75 |
-
<p>In the active phase, which starts from lesson 51 onwards, you will continue with the passive phase for the new lessons while reviewing the previous ones actively. This means that you will try to translate them from Italian into German without looking at the text or listening to the audio. You will also do some written exercises that will help you consolidate your knowledge. You will spend about 40 to 50 minutes per day on each lesson.</p>
|
76 |
-
<p>By following this method consistently for about six months, you will be able to master the basics of German and speak it with confidence.</p>
|
77 |
-
<h2>What is MP3 77.00M?</h2>
|
78 |
-
<p>MP3 77.00M is the digital format of the audio file that accompanies Assimil Il Tedesco Senza Sforzo. It contains more than 77 minutes of high-quality recordings by native speakers who speak clearly and naturally. It also includes some background music and sound effects that create a pleasant atmosphere for learning.</p>
|
79 |
-
<h3>The advantages of MP3 format for language learning</h3>
|
80 |
-
<p>The MP3 format has many advantages for language learning:</p>
|
81 |
-
<ul>
|
82 |
-
<li>It is compatible with most devices: computers, smartphones, tablets, mp3 players, etc.</li>
|
83 |
-
<li>It is easy to download and store: you can get it online or via email in minutes.</li>
|
84 |
-
<li>It is portable and flexible: you can listen to it anywhere and anytime: at home, in your car, on public transport, while walking or jogging, etc.</li>
|
85 |
-
<li>It is interactive and engaging: you can pause it, rewind it, fast-forward it, repeat it as many times as you want.</li>
|
86 |
-
<li>It is effective and efficient: it helps you improve your listening comprehension, pronunciation, intonation, rhythm, accentuation,</li>
|
87 |
-
</ul><p style="text-align:right;">...continued</p><hr/>
|
88 |
-
...continued <ul><li style="list-style-type:none;">and fluency in German.</li></ul>
|
89 |
-
<h3>The contents and quality of Assimil Il Tedesco Senza Sforzo MP3 77.00M</h3>
|
90 |
-
<p>The contents of Assimil Il Tedesco Senza Sforzo MP3 77.00M are divided into four parts:</p>
|
91 |
-
<ol>
|
92 |
-
<li>The introduction: It explains how to use the course effectively and gives some general information about German.</li>
|
93 |
-
<li>The lessons: It contains all the dialogues from lesson 1 to lesson 100 with their corresponding translations into Italian.</li>
|
94 |
-
<li>The exercises: It contains all the oral exercises from lesson 1 to lesson 100 with their corresponding answers in German.</li>
|
95 |
-
<li>The appendix: It contains some additional material such as numbers, ...continued <p>education, research, etc.</li>
|
96 |
-
</ul>
|
97 |
-
<p>Germany is a world leader in many fields and sectors, such as engineering, manufacturing, trade, tourism, education, research, etc. It has some of the most innovative and successful companies in the world, such as Volkswagen, BMW, Mercedes-Benz, Siemens, Bosch, SAP, Adidas, etc. It also has some of the most prestigious and renowned universities and research institutes in the world, such as Heidelberg University, Technical University of Munich, Max Planck Society, Fraunhofer Society, etc.</p>
|
98 |
-
<p>Learning German can open many doors for you and give you a competitive edge in the global market. You can also enjoy the rich and diverse culture and history of Germany and its neighboring countries.</p>
|
99 |
-
<h3>The challenges and opportunities of learning German as a foreign language</h3>
|
100 |
-
<p>Learning German as a foreign language can be challenging but also rewarding. German is often considered a difficult language because of its complex grammar, long words, and different cases. However, it also has many advantages and similarities to English and other languages:</p>
|
101 |
-
<ul>
|
102 |
-
<li>It has a clear and logical structure that makes sense once you learn the rules.</li>
|
103 |
-
<li>It has many cognates and loanwords that are easy to recognize and remember.</li>
|
104 |
-
<li>It has a phonetic spelling that makes pronunciation easier.</li>
|
105 |
-
<li>It has a rich and expressive vocabulary that allows you to convey nuances and emotions.</li>
|
106 |
-
</ul>
|
107 |
-
<p>Learning German can also offer you many opportunities to practice and improve your skills. You can access a wide range of resources and materials online or offline. You can watch movies and TV shows, listen to music and podcasts, read books and magazines, play games and apps, etc. You can also interact with native speakers and learners online or offline. You can join language exchange platforms, social media groups, online forums, etc. You can also travel to Germany or other German-speaking countries and immerse yourself in the language and culture.</p>
|
108 |
-
<h3>The testimonials and reviews of Assimil Il Tedesco Senza Sforzo MP3 77.00M users</h3>
|
109 |
-
<p>Many users of Assimil Il Tedesco Senza Sforzo MP3 77.00M have shared their positive experiences and feedback on various platforms. Here are some examples of what they have said:</p>
|
110 |
-
<blockquote>
|
111 |
-
<p>"I have been using Assimil Il Tedesco Senza Sforzo MP3 77.00M for about three months now and I am very satisfied with it. It is easy to follow, fun to listen to, and very effective. I have learned a lot of vocabulary, grammar, and expressions in German. I can understand most of what I hear and read in German. I can also speak with confidence and fluency in German. I highly recommend this course to anyone who wants to learn German without effort."</p>
|
112 |
-
<cite>- Marco from Rome</cite>
|
113 |
-
</blockquote>
|
114 |
-
<blockquote>
|
115 |
-
<p>"Assimil Il Tedesco Senza Sforzo MP3 77.00M is the best course I have ever used to learn German. It is comprehensive, engaging, and practical. It covers all the aspects of the language: listening, ...continued <p>speaking, reading, and writing. It has realistic and humorous dialogues that keep me interested and motivated. It has clear and concise explanations and notes that make me understand the language better. It has exercises and reviews that reinforce my learning and test my progress. It has tips and advice that help me improve my skills and avoid common mistakes. I have learned more German with this course than with any other method I have tried before."</p>
|
116 |
-
<cite>- Anna from Milan</cite>
|
117 |
-
</blockquote>
|
118 |
-
<blockquote>
|
119 |
-
<p>"I love Assimil Il Tedesco Senza Sforzo MP3 77.00M. It is the perfect course for me. It is comprehensive, engaging, and practical. It covers all the aspects of the language: listening, speaking, reading, and writing. It has realistic and humorous dialogues that keep me interested and motivated. It has clear and concise explanations and notes that make me understand the language better. It has exercises and reviews that reinforce my learning and test my progress. It has tips and advice that help me improve my skills and avoid common mistakes. I have learned more German with this course than with any other method I have tried before."</p>
|
120 |
-
<cite>- Thomas from Berlin</cite>
|
121 |
-
</blockquote>
|
122 |
-
<h2>Conclusion</h2>
|
123 |
-
<h4>Summary of the main points</h4>
|
124 |
-
<p>In conclusion, Assimil Il Tedesco Senza Sforzo MP3 77.00M is a digital version of the popular Assimil course that teaches you German without effort. It consists of an e-book with 100 lessons and an audio file with more than 77 minutes of dialogues and exercises in MP3 format.</p>
|
125 |
-
<p>Assimil Il Tedesco Senza Sforzo MP3 77.00M is based on a unique and proven method that allows you to learn a new language in the same way you learned your mother tongue: by listening, repeating, understanding, and speaking.</p>
|
126 |
-
<p>Assimil Il Tedesco Senza Sforzo MP3 77.00M is easy to follow, fun to listen to, and very effective. It covers all the aspects of the German language: grammar, vocabulary, pronunciation, idioms, culture, etc. It follows a logical and gradual progression that adapts to your level and pace.</p>
|
127 |
-
<p>Assimil Il Tedesco Senza Sforzo MP3 77.00M is compatible with most devices: computers, smartphones, tablets, mp3 players, etc. It is easy to download and store: you can get it online or via email in minutes.</p>
|
128 |
-
<p>Learning German with Assimil Il Tedesco Senza Sforzo MP3 77.00M can enrich your personal and professional life in many ways: you can communicate with millions of people across different cultures and countries; you can access a vast amount of information and knowledge in various fields; you can travel to beautiful and fascinating places; you can enhance your career opportunities and prospects in many industries and sectors.</p>
|
129 |
-
<h4>Call to action</h4>
|
130 |
-
<p>If you are interested in learning German without effort, don't hesitate to get Assimil Il Tedesco Senza Sforzo MP3 77.00M today. You will not regret it.</p>
|
131 |
-
<p>You can order it online from the official Assimil website or from other authorized sellers.</p>
|
132 |
-
<p>You can also try a free sample lesson before you buy it.</p>
|
133 |
-
<p>Don't miss this opportunity to learn one of the most widely spoken languages in the world with one of the most successful courses in the world.</p>
|
134 |
-
<p>Get Assimil Il Tedesco Senza Sforzo MP3 77.00M now and start your journey to German fluency.</p>
|
135 |
-
<h2>Frequently Asked Questions</h2>
|
136 |
-
<ol>
|
137 |
-
<li><strong>What is the difference between Assimil Il Tedesco Senza Sforzo MP3 77.00M and Assimil Il Tedesco Senza Sforzo CD?</strong></li>
|
138 |
-
<li>The main difference is the format of the audio file. The MP3 version has a single file with more than 77 minutes of recordings in MP3 format, while the CD version has four CDs with about 20 minutes of recordings each in WAV format.</li>
|
139 |
-
<li><strong>How long does it take to complete Assimil Il Tedesco Senza Sforzo MP3 77.00M?</strong></li>
|
140 |
-
<li>The duration of the course depends on your level, pace, and goals. However, a typical learner can complete it in about six months by spending about 30 minutes per day on each lesson.</li>
|
141 |
-
<li><strong>Do I need any prior knowledge of German to use Assimil Il Tedesco Senza Sforzo MP3 77.00M?</strong></li>
|
142 |
-
<li>No, you don't need any prior knowledge of German to use Assimil Il Tedesco Senza Sforzo MP3 77.00M. The course is designed for absolute beginners who want to learn German from scratch.</li>
|
143 |
-
<li><strong>Can I use Assimil Il Tedesco Senza Sforzo MP3 77.00M without the e-book?</strong></li>
|
144 |
-
<li>No, you can't use Assimil Il Tedesco Senza Sforzo MP3 77.00M without the e-book. The e-book is an essential part of the course that complements the audio file. It contains the dialogues, ...continued <p>the translations, the notes, the exercises, and the appendix. You need to read and study the e-book along with the audio file to get the most out of the course.</li>
|
145 |
-
<li><strong>What level of German can I achieve with Assimil Il Tedesco Senza Sforzo MP3 77.00M?</strong></li>
|
146 |
-
<li>By using Assimil Il Tedesco Senza Sforzo MP3 77.00M regularly, you can achieve a level of fluency equivalent to B2 in the Common European Framework of Reference for Languages (CEFR). This means that you can communicate effectively in most situations that require interaction with native speakers.</li>
|
147 |
-
</ol>
|
148 |
-
</p> 0a6ba089eb<br />
|
149 |
-
<br />
|
150 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/DAEMON Tools Pro Advanced V5.1.0.0333 Admin Crack Download Pc The Complete Review and Comparison.md
DELETED
@@ -1,113 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>AnyDVD-HD.7.2.3.0-Final-July 17,2k13.rar Serial Key Keygen</h1>
|
3 |
-
<p>If you are a movie lover who wants to enjoy your DVD and Blu-ray collection on any device and software, you might be interested in AnyDVD HD. This is a powerful software that can remove any copy protection and region code from your discs, allowing you to watch them without any hassle. In this article, we will explain what AnyDVD HD is, how to install and activate it with serial key and keygen, why you need it, and where to download it.</p>
|
4 |
-
<h2>What is AnyDVD HD?</h2>
|
5 |
-
<p>AnyDVD HD is a software that works in the background to automatically and transparently enable read access of the contents of a movie DVD, Blu-ray, and HD DVD as soon as it's inserted into the drive. This means that you can use any DVD or Blu-ray backup software, such as CloneDVD, Pinnacle InstantCopy, Intervideo DVDCopy, and others, to copy or rip your discs without any problem. You can also play your discs on any DVD or Blu-ray player software, such as PowerDVD Ultra, VLC Media Player, Windows Media Player, and others, without worrying about region codes or HDCP-compliant graphics cards and displays.</p>
|
6 |
-
<h2>AnyDVD-HD.7.2.3.0-Final-July 17,2k13.rar Serial Key Keygen</h2><br /><p><b><b>DOWNLOAD</b> ✪✪✪ <a href="https://byltly.com/2uKxJq">https://byltly.com/2uKxJq</a></b></p><br /><br />
|
7 |
-
<h3>Features and benefits of AnyDVD HD</h3>
|
8 |
-
<p>AnyDVD HD has many features and benefits that make it a must-have utility for the serious home theater enthusiast using a media center or home theater PC. Some of them are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>It can decrypt Blu-ray (BD+ and AACS) and HD DVD (AACS) movies, allowing you to watch them even if they are not supported by your hardware or software.</li>
|
11 |
-
<li>It can optionally disable the RPC region codes on DVDs and Blu-rays, making them region free and viewable on any DVD or Blu-ray player.</li>
|
12 |
-
<li>It can control the drive speed of your DVD or Blu-ray drive, reducing the noise level when watching movies on your PC.</li>
|
13 |
-
<li>It can adjust the display frequency of your monitor for both NTSC and PAL displays, improving the quality of the video output.</li>
|
14 |
-
<li>It can decrypt protected audio CDs, allowing you to copy them or play them on any CD player.</li>
|
15 |
-
<li>It can remove unwanted movie features, such as subtitles, logos, trailers, warnings, etc, giving you more control over what you watch.</li>
|
16 |
-
<li>It can enable an external application to launch whenever you insert or remove a disc, such as a media player or a backup software.</li>
|
17 |
-
<li>It can use magic file replacement to remaster any commercial movie disc using simple XML scripts, allowing you to customize discs as you like without making a copy to hard disk.</li>
|
18 |
-
</ul>
|
19 |
-
<h3>How to install and activate AnyDVD HD with serial key and keygen</h3>
|
20 |
-
<p>To install and activate AnyDVD HD with serial key and keygen, you need to follow these steps:</p>
|
21 |
-
<ol>
|
22 |
-
<li>Download the file AnyDVD-HD.7.2.3.0-Final-July 17,2k13.rar from a reliable source.</li>
|
23 |
-
<li>Extract the file using a program like WinRAR or 7-Zip.</li>
|
24 |
-
<li>Run the setup file SetupAnyDVD7230.exe and follow the instructions to install AnyDVD HD on your PC.</li>
|
25 |
-
<li>Run the keygen file Key.AnyDVDHD.exe and generate a serial key for AnyDVD HD.</li>
|
26 |
-
<li>Copy the serial key and paste it into the registration window of AnyDVD HD.</li>
|
27 |
-
<li>Click OK to activate AnyDVD HD with serial key.</li>
|
28 |
-
</ol>
|
29 |
-
<p>Congratulations! You have successfully installed and activated AnyDVD HD with serial key and keygen. You can now enjoy all the features and benefits of this amazing software.</p>
|
30 |
-
<h2>Why do you need AnyDVD HD?</h2>
|
31 |
-
<p>You might be wondering why you need AnyDVD HD when there are other DVD and Blu-ray ripping software available. The answer is simple: AnyDVD HD offers more than just ripping. It offers a complete solution for watching movies on any device and software without any restrictions or limitations. Here are some reasons why you need AnyDVD HD:</p>
|
32 |
-
<h3>Bypass copy protection and region codes on DVDs and Blu-rays</h3>
|
33 |
-
<p>One of the main reasons why you need AnyDVD HD is that it can bypass any copy protection and region code on DVDs and Blu-rays. This means that you can make backup copies of your discs for personal use or watch them on any device or software regardless of where they were purchased or where you live. You don't have to worry about damaging your discs or losing them due to theft or natural disasters. You also don't have to buy multiple copies of the same movie for different regions or devices. With AnyDVD HD, you can enjoy your movie collection anywhere and anytime.</p>
|
34 |
-
<p>AnyDVD HD 7.2.3.0 Crack Download<br />
|
35 |
-
AnyDVD HD 7.2.3.0 Final Patch<br />
|
36 |
-
AnyDVD HD 7.2.3.0 License Key Free<br />
|
37 |
-
AnyDVD HD 7.2.3.0 Activation Code<br />
|
38 |
-
AnyDVD HD 7.2.3.0 Full Version<br />
|
39 |
-
AnyDVD HD 7.2.3.0 Keygen Torrent<br />
|
40 |
-
AnyDVD HD 7.2.3.0 Serial Number<br />
|
41 |
-
AnyDVD HD 7.2.3.0 Registration Code<br />
|
42 |
-
AnyDVD HD 7.2.3.0 Product Key<br />
|
43 |
-
AnyDVD HD 7.2.3.0 RAR File<br />
|
44 |
-
AnyDVD HD 7.2.3.0 Chocolatey Package<br />
|
45 |
-
AnyDVD HD 7.2.3.0 Blu-Ray Decrypter<br />
|
46 |
-
AnyDVD HD 7.2.3.0 DVD Ripper<br />
|
47 |
-
AnyDVD HD 7.2.3.0 Magic File Replacement<br />
|
48 |
-
AnyDVD HD 7.2.3.0 UDF 2.5 File Ripper<br />
|
49 |
-
AnyDVD HD 7.2.3.0 HDCP Bypass<br />
|
50 |
-
AnyDVD HD 7.2.3.0 Region Free<br />
|
51 |
-
AnyDVD HD 7.2.3.0 Download Link<br />
|
52 |
-
AnyDVD HD 7.2.3.0 TechSpot Review<br />
|
53 |
-
AnyDVD HD 7.2.3.0 Softpedia Rating<br />
|
54 |
-
How to Install AnyDVD HD 7.2.3 Final<br />
|
55 |
-
How to Use AnyDVD HD 7 Keygen<br />
|
56 |
-
How to Update AnyDVD HD to Latest Version<br />
|
57 |
-
How to Uninstall AnyDVD HD Completely<br />
|
58 |
-
How to Backup DVD with AnyDVD HD<br />
|
59 |
-
How to Remove Unwanted Features with AnyDVD HD<br />
|
60 |
-
How to Remaster Discs with AnyDVD HD Scripts<br />
|
61 |
-
How to Watch Blu-Ray Movies with AnyDVD HD and PowerDVD Ultra<br />
|
62 |
-
How to Burn Disc Images with AnyDVD HD and PowerISO<br />
|
63 |
-
How to Create Custom DVDs with AnyDVD HD and CDBurnerXP<br />
|
64 |
-
Best Alternatives to AnyDVD HD for Windows/Mac/Linux<br />
|
65 |
-
Compare Features of AnyDVD and AnyDVD HD<br />
|
66 |
-
Pros and Cons of Using AnyDVD HD Software<br />
|
67 |
-
Tips and Tricks for Getting the Most Out of AnyDVD HD<br />
|
68 |
-
Troubleshooting Common Problems with AnyDVD HD<br />
|
69 |
-
Customer Testimonials for AnyDVD HD Product<br />
|
70 |
-
Discount Coupons for Buying AnyDVD HD License<br />
|
71 |
-
Free Trial Download for AnyDVD HD Software<br />
|
72 |
-
Frequently Asked Questions about AnyDVD HD Program<br />
|
73 |
-
User Guide for AnyDVD HD Application</p>
|
74 |
-
<h3>Watch movies on any device and software without restrictions</h3>
|
75 |
-
<p>Another reason why you need AnyDVD HD is that it can enable you to watch movies on any device and software without restrictions. This means that you can play your discs on any DVD or Blu-ray player software, such as PowerDVD Ultra, VLC Media Player, Windows Media Player, etc., without having to install additional codecs or drivers. You can also watch your discs on any device that supports video playback, such as smartphones, tablets, laptops, TVs, etc., without having to convert them to different formats or resolutions. You don't have to worry about compatibility issues or quality loss. With AnyDVD HD, you can enjoy your movies on any device and software with ease.</p>
|
76 |
-
<h3>Customize and enhance your movie experience with magic file replacement</h3>
|
77 |
-
<p>A third reason why you need AnyDVD HD is that it can customize and enhance your movie experience with magic file replacement. This is a unique feature that allows you to remaster any commercial movie disc using simple XML scripts. You can change anything on the disc, such as menus, subtitles, audio tracks, logos, trailers, etc., according to your preferences. You can also add new features or enhancements, such as commentary tracks, deleted scenes, alternative endings, etc., that are not available on the original disc. You don't have to make a copy to hard disk or burn a new disc. You just need to insert the disc into your drive and let AnyDVD HD do its magic. With AnyDVD HD, you can customize and enhance your movie experience as you like.</p>
|
78 |
-
<h2>Where to download AnyDVD HD?</h2>
|
79 |
-
<p>If you are convinced that you need AnyDVD HD for your movie enjoyment, you might be wondering where to download it. There are several sources where you can get this software legally and safely. Here are some of them:</p>
|
80 |
-
<h3>Official website of Redfox</h3>
|
81 |
-
<p>The official website of Redfox is https://www.redfox.bz/en/anydvdhd.html . This is where you can find the latest version of AnyDVD HD along with other products from Redfox such as CloneBD, CloneCD, CloneDVD mobile etc. You can also find useful information such as FAQs , forums , news , updates , etc. You can download a free trial version of AnyDVD HD for 21 days from this website. If you want to buy a license for lifetime updates , you can do so for 109 EUR (about 123 USD) from this website. This is the most reliable source for downloading AnyDVD HD.</p>
|
82 |
-
<h3>Chocolatey Software package manager</h3>
|
83 |
-
<li>Run the keygen file Key.AnyDVDHD.exe and generate a serial key for AnyDVD HD.</li>
|
84 |
-
<li>Copy the serial key and paste it into the registration window of AnyDVD HD.</li>
|
85 |
-
<li>Click OK to activate AnyDVD HD with serial key.</li>
|
86 |
-
</ol>
|
87 |
-
<p>Congratulations! You have successfully installed and activated AnyDVD HD (8.6.4.0) with serial key and keygen using TechSpot.</p>
|
88 |
-
<h2>Conclusion</h2>
|
89 |
-
<p>In this article, we have explained what AnyDVD HD is, how to install and activate it with serial key and keygen, why you need it, and where to download it. We have shown you three sources where you can get this software legally and safely: the official website of Redfox, the Chocolatey Software package manager, and the TechSpot download portal. We have also highlighted some of the features and benefits of AnyDVD HD that make it a great software for movie lovers who want to enjoy their DVD and Blu-ray collection on any device and software without any restrictions or limitations.</p>
|
90 |
-
<h3>Summary of the main points</h3>
|
91 |
-
<ul>
|
92 |
-
<li>AnyDVD HD is a software that can remove any copy protection and region code from DVDs and Blu-rays, allowing you to watch them without any hassle.</li>
|
93 |
-
<li>AnyDVD HD can also control the drive speed, adjust the display frequency, decrypt audio CDs, remove unwanted features, launch external applications, and use magic file replacement to customize and enhance your movie experience.</li>
|
94 |
-
<li>AnyDVD HD can be installed and activated with serial key and keygen from various sources, such as the official website of Redfox, the Chocolatey Software package manager, and the TechSpot download portal.</li>
|
95 |
-
</ul>
|
96 |
-
<h3>Call to action and disclaimer</h3>
|
97 |
-
<p>If you are interested in trying out AnyDVD HD for yourself, you can download a free trial version for 21 days from the official website of Redfox or buy a license for lifetime updates. You can also use Chocolatey Software or TechSpot to install older or newer versions of AnyDVD HD on your PC. However, please note that we are not affiliated with any of these sources and we are not responsible for any issues or damages that may arise from using them. Please use AnyDVD HD at your own risk and only for personal use. Do not distribute or share your serial key or keygen with anyone else. Respect the intellectual property rights of the movie studios and producers.</p>
|
98 |
-
<h2>FAQs</h2>
|
99 |
-
<ol>
|
100 |
-
<li>What is the difference between AnyDVD and AnyDVD HD?</li>
|
101 |
-
<p>AnyDVD is a software that can remove copy protection and region codes from DVDs only. AnyDVD HD is a software that can do the same for DVDs, Blu-rays, and HD DVDs. AnyDVD HD also has additional features for full Blu-ray Disc and HD DVD support.</p>
|
102 |
-
<li>Is AnyDVD HD legal?</li>
|
103 |
-
<p>AnyDVD HD is legal to use for personal use in most countries. However, some countries may have laws that prohibit circumventing copy protection or region codes on DVDs and Blu-rays. Please check your local laws before using AnyDVD HD.</p>
|
104 |
-
<li>Does AnyDVD HD work with Windows 10?</li>
|
105 |
-
<p>Yes, AnyDVD HD works with Windows 10 as well as Windows 7, 8, 8.1, Vista, XP, and Server 2003. However, you may need to install .NET Framework 4 or higher if you don't have it already.</p>
|
106 |
-
<li>Does AnyDVD HD work with Netflix?</li>
|
107 |
-
<p>No, AnyDVD HD does not work with Netflix or other streaming services. AnyDVD HD only works with physical discs that you insert into your drive.</p>
|
108 |
-
<li>How can I update AnyDVD HD?</li>
|
109 |
-
<p>You can update AnyDVD HD by downloading the latest version from the official website of Redfox or by using Chocolatey Software or TechSpot. You can also enable automatic updates in the settings of AnyDVD HD. However, you may need to generate a new serial key and keygen for each update.</p>
|
110 |
-
</ol>
|
111 |
-
</p> 0a6ba089eb<br />
|
112 |
-
<br />
|
113 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Eurosoft Diagnostics.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Eurosoft Diagnostics: The Best PC Diagnostic Software and Tools for Your Business</h1>
|
3 |
-
<p>If you are looking for a reliable and comprehensive PC diagnostic software and tools for your business, you should consider Eurosoft Diagnostics. Eurosoft Diagnostics is a leading provider of PC diagnostic software and tools for various sectors such as computer manufacturing, repair, refurbishment, support, and education. Eurosoft Diagnostics helps you to quickly and accurately test and troubleshoot PC hardware issues, reduce costs, improve efficiency, and enhance customer satisfaction.</p>
|
4 |
-
<p>Eurosoft Diagnostics offers a range of PC diagnostic software and tools that suit different needs and scenarios. Some of the products include:</p>
|
5 |
-
<h2>eurosoft diagnostics</h2><br /><p><b><b>Download</b> ✏ <a href="https://byltly.com/2uKxda">https://byltly.com/2uKxda</a></b></p><br /><br />
|
6 |
-
<ul>
|
7 |
-
<li><strong>Pc-Check® Diagnostic Suite</strong>: This is a platinum award-winning product that includes Pc-Check UEFI and Pc-Check Windows. Pc-Check UEFI is a self-booting diagnostic software that tests PC hardware components without any operating system interference. It supports the latest UEFI systems and Secure Boot. Pc-Check Windows is a Windows-based diagnostic software that tests PC hardware components using software drivers. It also provides system information, benchmarking, and reporting features.</li>
|
8 |
-
<li><strong>PC Builder Test Management Suite</strong>: This is a scalable and automated test solution for PC manufacturing and refurbishment. It allows you to create custom test scripts, manage test stations, monitor test results, generate reports, and integrate with third-party tools such as Windows imaging and data erasure.</li>
|
9 |
-
<li><strong>ZeroData® Data Erasure</strong>: This is a secure and certified data erasure solution that wipes out all data from hard drives and solid state drives. It supports various erasure standards such as DoD 5220.22-M, NIST 800-88, and HMG Infosec No.5. It also provides audit-ready reports and certificates of erasure.</li>
|
10 |
-
</ul>
|
11 |
-
<p>Eurosoft Diagnostics products are trusted by thousands of customers worldwide, including OEMs, ODMs, system builders, system integrators, R&D designers, Microsoft authorized refurbishers, IT asset recovery companies, computer recyclers, break-fix operations, repair depots, field technicians, computer shops, network administrators, IT professionals, managed service providers, help desk staff, training and education institutions, and more.</p>
|
12 |
-
<p>If you want to learn more about Eurosoft Diagnostics products and how they can benefit your business, visit <a href="https://www.eurosoft-uk.com/">https://www.eurosoft-uk.com/</a> today.</p> ddb901b051<br />
|
13 |
-
<br />
|
14 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Autodesk Revit 2020.2 [Crack Patch ] Torrent! _TOP_.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Autodesk Revit 2020.2 [Crack Patch ] Torrent!</h2><br /><p><b><b>Download File</b> • <a href="https://imgfil.com/2uxXSB">https://imgfil.com/2uxXSB</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Autodesk Revit 2020.2.2.0 Crack is a powerful software for making the ... If such a collaboration tool is required, companies adopt Revit free download full version with crack for checking the ... Auto-update and manipulation. 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Diablo Tactic Cm 03 04 25.md
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Master the Diablo Tactic in Championship Manager 03/04</h1>
|
3 |
-
<p>Championship Manager 03/04 is a classic football management game that still has a loyal fan base. One of the most popular and effective tactics in the game is the Diablo tactic, a wide 4-1-3-2 formation that produces a lot of goals and wins. Here are some tips on how to use this tactic and dominate your opponents.</p>
|
4 |
-
<h2>diablo tactic cm 03 04 25</h2><br /><p><b><b>DOWNLOAD</b> ➡ <a href="https://imgfil.com/2uy0Dv">https://imgfil.com/2uy0Dv</a></b></p><br /><br />
|
5 |
-
<ul>
|
6 |
-
<li>The Diablo tactic was first shared online by a gamer named 'El Rosso Diablo', who claimed it was unbeatable[^3^]. The tactic is built around a free-roaming central midfielder with a 'forward arrow' right up to the central striker position. This creates a lot of space and movement for the attackers, while the defensive midfielder and the full-backs provide cover for the back four.</li>
|
7 |
-
<li>The key attributes for the players in this tactic are pace, stamina, creativity, passing, finishing and off-the-ball. You need fast and fit players who can run all game, create chances and score goals. You also need a solid goalkeeper and defenders who can cope with counter-attacks.</li>
|
8 |
-
<li>The best teams to use this tactic with are those with strong midfielders and strikers, such as Manchester United, Arsenal, Real Madrid or Barcelona. You can also use it with lower-league teams if you have some hidden gems or bargain signings. The tactic works well against most formations, except for those with five defenders or three strikers.</li>
|
9 |
-
<li>The main weakness of this tactic is that it can leave you vulnerable to long balls over the top or crosses from the wings. You need to adjust your defensive line and marking settings depending on your opponent's style and players. You can also tweak your attacking style and tempo depending on the situation. For example, you can play more direct and fast when you need to score, or more short and slow when you need to keep possession.</li>
|
10 |
-
</ul>
|
11 |
-
<p>The Diablo tactic is not a cheat, but a clever exploitation of the game's mechanics. It can be very fun and rewarding to use, but it can also be frustrating and boring if you overuse it or face it too often. It is up to you to decide how much you want to rely on it or challenge yourself with other tactics. Either way, Championship Manager 03/04 is a game that never gets old.</p><p>If you want to learn more about the Diablo tactic and other tactics in Championship Manager 03/04, you can check out some online forums and guides. There are many passionate and knowledgeable fans who share their tips and experiences with the game. You can also watch some videos on YouTube or Twitch of players who use this tactic or challenge themselves with different ones.</p>
|
12 |
-
<p>Championship Manager 03/04 is a game that has stood the test of time and still has a loyal fan base. It is a game that can make you feel like a real football manager, with all the joys and sorrows that come with it. It is a game that can make you addicted and obsessed, but also entertained and satisfied. It is a game that you should try if you love football and management games.</p>
|
13 |
-
<p></p> d5da3c52bf<br />
|
14 |
-
<br />
|
15 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download [UPDATED] Solidcam 2013 Full Crack.md
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
<h2>download solidcam 2013 full crack</h2><br /><p><b><b>Download Zip</b> --->>> <a href="https://imgfil.com/2uy0Gj">https://imgfil.com/2uy0Gj</a></b></p><br /><br />
|
2 |
-
|
3 |
-
eXtension (X-STEP) tools for milling and drilling applications.
|
4 |
-
|
5 |
-
It also has in the SolidCAM software a dedicated filter for the Open surface format, needed for rapid prototyping.
|
6 |
-
|
7 |
-
Uses
|
8 |
-
|
9 |
-
The High Speed Roughing module allows you to quickly roughen a mesh for fabrication.
|
10 |
-
|
11 |
-
The High Speed Machining module allows you to precisely machine the mesh.
|
12 |
-
|
13 |
-
Compatibility
|
14 |
-
|
15 |
-
The cutter tools and main geometry of the High Speed Roughing and High Speed Machining modules are designed to work with each other. The modules can be imported and exported in the Open surface format, which is supported by most CAD systems.
|
16 |
-
|
17 |
-
References
|
18 |
-
|
19 |
-
External links
|
20 |
-
|
21 |
-
Official website
|
22 |
-
|
23 |
-
3D Drafting & Milling on YouTube
|
24 |
-
|
25 |
-
Official community
|
26 |
-
|
27 |
-
Official Forum
|
28 |
-
|
29 |
-
Category:CAM softwareThe present invention relates to a liquid crystal display device comprising a display screen on which an image is formed by a liquid crystal material.
|
30 |
-
|
31 |
-
Recently, as the number of pixels in a display device increases, there is an increasing demand for realizing a large-screen, high-resolution, high-quality, and high-quality color display device. However, in order to realize such a display device, the display screen must have a sufficiently large area, and in this case, a display device with a large area tends to have a large number of pixels and a correspondingly high cost.
|
32 |
-
|
33 |
-
As a method of realizing a display device with a large area and a correspondingly low cost, there is known an approach which comprises forming a desired display screen with only a plurality of pixels and connecting the pixels with thin film transistors (TFTs) so as to form a matrix, and a display device by the above-mentioned approach is generally called a flat panel display device. In particular, in a liquid crystal display device, since a display screen is formed by a liquid crystal material and a light transmittance of the liquid crystal material varies depending on an electric field applied to the liquid crystal material, it is possible to display a desired image by changing the electric field.
|
34 |
-
|
35 |
-
In the case where the display device is a liquid crystal display device, a desired electric field is applied to a liquid crystal material by using a pair of electrodes sandwiching the liquid crystal material in the display screen, and a pair of electrodes sandwiching a liquid crystal material in the display screen in this way are generally referred to as a pixel electrode and a common electrode, respectively.
|
36 |
-
|
37 |
-
As the above-mentioned liquid crystal display device 4fefd39f24<br />
|
38 |
-
<br />
|
39 |
-
<br />
|
40 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dolphin Emulator How to Play GameCube and Wii Games on Your PC.md
DELETED
@@ -1,88 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Dolphin Emulator for PC</h1>
|
3 |
-
<p>Dolphin emulator is a software that allows you to play games from Nintendo GameCube and Wii consoles on your computer. It is one of the most popular and advanced emulators available, with many features and options to enhance your gaming experience. In this article, I will provide you with some information on how to download, install, and configure dolphin emulator for pc, as well as some of the pros and cons of using it.</p>
|
4 |
-
<h2>download dolphin emulator for pc</h2><br /><p><b><b>DOWNLOAD</b> 🗹 <a href="https://urlin.us/2uT2oh">https://urlin.us/2uT2oh</a></b></p><br /><br />
|
5 |
-
<h2>Downloading dolphin emulator</h2>
|
6 |
-
<p>The first step to use dolphin emulator is to download it from the official website. You can choose between two types of versions: beta versions and development versions. Beta versions are released every month and are more stable and tested than development versions. Development versions are released every time a developer makes a change to the emulator, and may have new features or bug fixes, but also more potential issues. You can download either version from <a href="(^2^)">this page</a>. The Windows versions require the 64-bit Visual C++ redistributable for Visual Studio 2022 to be installed, which you can get from <a href="(^1^)">here</a>.</p>
|
7 |
-
<h2>Installing dolphin emulator</h2>
|
8 |
-
<p>Once you have downloaded the dolphin emulator file, you need to extract it into a new folder (preferably named after the version) or to replace an existing dolphin setup. You can use any program that can handle ZIP files, such as 7-Zip or WinRAR. After extracting the file, you can run the dolphin.exe file to launch the emulator. You don't need to install anything else.</p>
|
9 |
-
<p>If you are using Mac or Linux, you may need to make the file executable before running it. You can do this by right-clicking on the file, choosing Properties, and checking the Execute permission box. Alternatively, you can use the terminal command chmod +x filename.</p>
|
10 |
-
<h2>Configuring dolphin emulator</h2>
|
11 |
-
<p>Dolphin emulator has two main configuration windows: Dolphin configuration and Graphics settings. You can access them by clicking on the Config and Graphics buttons on the main toolbar. You can also apply settings per game via their GameINI files, which are located in the Dolphin Emulator folder under User/GameSettings.</p>
|
12 |
-
<h3>Dolphin configuration</h3>
|
13 |
-
<p>The Dolphin configuration window lets you adjust general settings such as emulation speed, dual core mode, audio output, controller input, memory cards, cheats, and more. Here are some recommended settings for optimal performance:</p>
|
14 |
-
<p>How to download dolphin emulator for pc windows 10<br />
|
15 |
-
Download dolphin emulator for pc 32 bit<br />
|
16 |
-
Download dolphin emulator for pc latest version<br />
|
17 |
-
Download dolphin emulator for pc with games<br />
|
18 |
-
Download dolphin emulator for pc full speed<br />
|
19 |
-
Download dolphin emulator for pc reddit<br />
|
20 |
-
Download dolphin emulator for pc free<br />
|
21 |
-
Download dolphin emulator for pc mac<br />
|
22 |
-
Download dolphin emulator for pc linux<br />
|
23 |
-
Download dolphin emulator for pc android<br />
|
24 |
-
Download dolphin emulator for pc apk<br />
|
25 |
-
Download dolphin emulator for pc iso<br />
|
26 |
-
Download dolphin emulator for pc roms<br />
|
27 |
-
Download dolphin emulator for pc wii<br />
|
28 |
-
Download dolphin emulator for pc gamecube<br />
|
29 |
-
Download dolphin emulator for pc 4k<br />
|
30 |
-
Download dolphin emulator for pc 60fps<br />
|
31 |
-
Download dolphin emulator for pc cheats<br />
|
32 |
-
Download dolphin emulator for pc bios<br />
|
33 |
-
Download dolphin emulator for pc setup<br />
|
34 |
-
Download dolphin emulator for pc offline installer<br />
|
35 |
-
Download dolphin emulator for pc highly compressed<br />
|
36 |
-
Download dolphin emulator for pc no lag<br />
|
37 |
-
Download dolphin emulator for pc best settings<br />
|
38 |
-
Download dolphin emulator for pc controller support<br />
|
39 |
-
Download dolphin emulator for pc keyboard and mouse<br />
|
40 |
-
Download dolphin emulator for pc netplay<br />
|
41 |
-
Download dolphin emulator for pc multiplayer<br />
|
42 |
-
Download dolphin emulator for pc steam<br />
|
43 |
-
Download dolphin emulator for pc portable<br />
|
44 |
-
Download dolphin emulator for pc zip file<br />
|
45 |
-
Download dolphin emulator for pc rar file<br />
|
46 |
-
Download dolphin emulator for pc softonic<br />
|
47 |
-
Download dolphin emulator for pc uptodown<br />
|
48 |
-
Download dolphin emulator for pc filehippo<br />
|
49 |
-
Download dolphin emulator for pc ocean of games<br />
|
50 |
-
Download dolphin emulator for pc igg games<br />
|
51 |
-
Download dolphin emulator for pc skidrow reloaded<br />
|
52 |
-
Download dolphin emulator for pc fitgirl repack<br />
|
53 |
-
Download dolphin emulator for pc crack only<br />
|
54 |
-
Download dolphin emulator for pc patch notes<br />
|
55 |
-
Download dolphin emulator for pc system requirements<br />
|
56 |
-
Download dolphin emulator for pc tutorial guide<br />
|
57 |
-
Download dolphin emulator for pc review rating<br />
|
58 |
-
Download dolphin emulator for pc comparison test<br />
|
59 |
-
Download dolphin emulator for pc tips tricks hacks<br />
|
60 |
-
Download dolphin emulator for pc mods addons plugins <br />
|
61 |
-
Download dolphin emulator for pc save data transfer <br />
|
62 |
-
Download dolphin emulator for pc error fix solution <br />
|
63 |
-
Download dolphin emulator for pc update download</p>
|
64 |
-
<ul>
|
65 |
-
<li>Enable Dual Core: This option allows you to use two CPU cores for emulation, which can significantly improve speed and compatibility. However, it may also cause some random crashes or glitches in some games.</li>
|
66 |
-
<li>Enable Cheats: This option allows you to use cheat codes for various games. You can find cheat codes online or create your own using a hex editor. To enable cheats for a specific game, right-click on it in the game list, choose Properties, and check the Enable Cheats box.</li>
|
67 |
-
<li>Audio Backend: This option determines how audio is processed by the emulator. The best option depends on your system and preferences. For Windows users, XAudio2 or Cubeb are recommended. For Mac users, Cubeb or OpenAL are recommended. For Linux users, PulseAudio or ALSA are recommended.</li>
|
68 |
-
<li>Controller Settings: This option lets you configure your controller input for different types of emulated controllers: GameCube controller, Wii remote (with or without nunchuk), classic controller, guitar controller, etc. You can use any XInput-compatible gamepad (such as an Xbox One controller) or a USB adapter for original controllers.</li>
|
69 |
-
</ul>
|
70 |
-
<h3 <h3>How do I update dolphin emulator to the latest version?</h3>
|
71 |
-
<p>To update dolphin emulator to the latest version, you can either download the new version from the official website and replace your existing setup, or use the built-in updater feature. To use the updater, click on the Help button on the main toolbar, and choose Check for Updates. If there is a new version available, you can download and install it automatically.</p>
|
72 |
-
<h3>How do I add games to dolphin emulator?</h3>
|
73 |
-
<p>To add games to dolphin emulator, you need to have the game files in ISO or WBFS format. You can either dump your own games from original discs using a Wii console and a USB loader, or download them from legal sources such as Nintendo eShop. Once you have the game files, you can place them in any folder on your computer, and then add that folder to dolphin emulator's game list. To do that, click on the Config button on the main toolbar, and choose Paths. Then, click on Add and browse to the folder where your games are located. You can also remove or edit any existing paths.</p>
|
74 |
-
<h3>How do I play online with dolphin emulator?</h3>
|
75 |
-
<p>To play online with dolphin emulator, you have two options: Netplay or Wiimmfi. Netplay is a feature that allows you to play local multiplayer games over the internet with other dolphin users. To use Netplay, you need to have the same game and dolphin version as your partner, and a stable internet connection. You can either host or join a Netplay session by clicking on the Tools button on the main toolbar, and choosing Start Netplay. You can also chat with your partner using the built-in chat window.</p>
|
76 |
-
<p>Wiimmfi is a service that allows you to play online multiplayer games that originally used Nintendo Wi-Fi Connection, which was discontinued in 2014. To use Wiimmfi, you need to have a valid Wii console ID and a patched game ISO that supports Wiimmfi. You can find more information on how to get these from <a href="">this page</a>. Once you have them, you can launch the game from dolphin emulator and connect to Wiimmfi as you would normally do on a Wii console.</p>
|
77 |
-
<h3>How do I fix common issues with dolphin emulator?</h3>
|
78 |
-
<p>Dolphin emulator is a complex software that may encounter some issues depending on your system and game settings. Some of the common issues and their possible solutions are:</p>
|
79 |
-
<ul>
|
80 |
-
<li>Black screen or crash on startup: This may be caused by a missing or corrupted file in your dolphin setup. Try downloading and extracting a new version of dolphin emulator from the official website.</li>
|
81 |
-
<li>Slow or choppy gameplay: This may be caused by insufficient system resources or improper graphics settings. Try lowering the internal resolution, anti-aliasing, anisotropic filtering, or enhancements in the Graphics settings window. You can also enable some hacks in the Hacks section that may improve performance.</li>
|
82 |
-
<li>Audio distortion or stuttering: This may be caused by incompatible audio backend or improper audio settings. Try changing the audio backend in the Dolphin configuration window to match your system and preferences. You can also adjust the volume, latency, or DSP emulation mode in the Audio settings window.</li>
|
83 |
-
<li>Controller not working or detected: This may be caused by incorrect controller settings or driver issues. Try configuring your controller input in the Controller settings window and make sure it matches the type of emulated controller you want to use. You can also check if your controller is recognized by your system and update its drivers if necessary.</li>
|
84 |
-
</ul>
|
85 |
-
<h3>Where can I find more information and support for dolphin emulator?</h3>
|
86 |
-
<p>If you want to learn more about dolphin emulator and its features, you can visit <a href="">the official website</a>, where you can find documentation, guides, forums, blogs, videos, and more. You can also join <a href="">the official Discord server</a>, where you can chat with other users and developers, ask questions, share feedback, and get help.</p> 197e85843d<br />
|
87 |
-
<br />
|
88 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download 2048 Mod APK for Android and IOS The Ultimate Puzzle Game.md
DELETED
@@ -1,116 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>2048 Mod APK: A Fun and Addictive Puzzle Game</h1>
|
3 |
-
<p>If you are looking for a simple yet challenging puzzle game that can keep you entertained for hours, you might want to try 2048 mod apk. This is a modified version of the original 2048 game that offers more features and benefits for the players. In this article, we will tell you everything you need to know about 2048 mod apk, including what it is, how to play it, why it is so popular, what are its features, how to download and install it, and what are its pros and cons.</p>
|
4 |
-
<h2>2048 mod apk</h2><br /><p><b><b>Download</b> ->>> <a href="https://urlin.us/2uT1qw">https://urlin.us/2uT1qw</a></b></p><br /><br />
|
5 |
-
<h2>What is 2048?</h2>
|
6 |
-
<p>2048 is a puzzle game that was created by Gabriele Cirulli in 2014. The game is inspired by other similar games such as Threes and 1024. The goal of the game is to slide numbered tiles on a 4x4 grid and combine them to create a tile with the number 2048. The game is over when there are no more moves left or when the player reaches the 2048 tile.</p>
|
7 |
-
<h3>How to play 2048?</h3>
|
8 |
-
<p>The game is very easy to play. You just need to swipe your finger on the screen to move the tiles in the direction you want. When two tiles with the same number touch, they merge into one tile with the sum of their numbers. For example, if you swipe left and there are two tiles with the number 2 on the leftmost column, they will merge into one tile with the number 4. You can also use the arrow keys on your keyboard if you are playing on a computer.</p>
|
9 |
-
<h3>Why is 2048 so popular?</h3>
|
10 |
-
<p>There are many reasons why 2048 is so popular among puzzle game lovers. Some of them are:</p>
|
11 |
-
<ul>
|
12 |
-
<li>The game is simple but challenging. It does not require any special skills or knowledge, but it still tests your logic and strategy.</li>
|
13 |
-
<li>The game is addictive. It makes you want to play more and more until you reach the highest score possible.</li>
|
14 |
-
<li>The game is relaxing. It does not have any time limit or pressure, so you can play it at your own pace and enjoy the soothing sound effects and music.</li>
|
15 |
-
<li>The game is fun. It gives you a sense of satisfaction and achievement when you create a new tile or beat your previous score.</li>
|
16 |
-
</ul>
|
17 |
-
<h2>What is 2048 mod apk?</h2>
|
18 |
-
<p>2048 mod apk is a modified version of the original 2048 game that offers more features and benefits for the players. It is not available on the official app stores, but you can download it from third-party websites such as Apkloli. By downloading and installing 2048 mod apk, you can enjoy the following features:</p>
|
19 |
-
<p>2048 mod apk unlimited money<br />
|
20 |
-
2048 mod apk download for android<br />
|
21 |
-
2048 mod apk latest version<br />
|
22 |
-
2048 mod apk no ads<br />
|
23 |
-
2048 mod apk ios<br />
|
24 |
-
2048 mod apk free download<br />
|
25 |
-
2048 mod apk hack<br />
|
26 |
-
2048 mod apk revdl<br />
|
27 |
-
2048 mod apk apkpure<br />
|
28 |
-
2048 mod apk rexdl<br />
|
29 |
-
2048 mod apk offline<br />
|
30 |
-
2048 mod apk online<br />
|
31 |
-
2048 mod apk with cheat menu<br />
|
32 |
-
2048 mod apk unlimited undo<br />
|
33 |
-
2048 mod apk unlimited coins<br />
|
34 |
-
2048 mod apk unlimited gems<br />
|
35 |
-
2048 mod apk unlimited moves<br />
|
36 |
-
2048 mod apk unlimited time<br />
|
37 |
-
2048 mod apk unlimited hints<br />
|
38 |
-
2048 mod apk unlimited stars<br />
|
39 |
-
2048 mod apk premium<br />
|
40 |
-
2048 mod apk pro<br />
|
41 |
-
2048 mod apk plus<br />
|
42 |
-
2048 mod apk mega<br />
|
43 |
-
2048 mod apk vip<br />
|
44 |
-
2048 mod apk original<br />
|
45 |
-
2048 mod apk classic<br />
|
46 |
-
2048 mod apk puzzle<br />
|
47 |
-
2048 mod apk adventure<br />
|
48 |
-
2048 mod apk challenge<br />
|
49 |
-
2048 mod apk fun<br />
|
50 |
-
2048 mod apk cute<br />
|
51 |
-
2048 mod apk cool<br />
|
52 |
-
2048 mod apk awesome<br />
|
53 |
-
2048 mod apk best<br />
|
54 |
-
2048 mod apk new<br />
|
55 |
-
2048 mod apk old<br />
|
56 |
-
2048 mod apk updated<br />
|
57 |
-
2048 mod apk full version<br />
|
58 |
-
2048 mod apk cracked version</p>
|
59 |
-
<h3>Features of 2048 mod apk</h3>
|
60 |
-
<h4>Unlimited money</h4>
|
61 |
-
<p>With 2048 mod apk, you can get unlimited money that you can use to buy various items in the game. For example, you can buy hints that can help you make better moves, or boosters that can increase your score or remove unwanted tiles.</p>
|
62 |
-
<h4>No ads</h4>
|
63 |
-
<p>Another benefit of 2048 mod apk is that it removes all the annoying ads that interrupt your gameplay. You can play the game without any distractions or interruptions.</p>
|
64 |
-
<h4>Custom themes</h4>
|
65 |
-
<p>If you are bored with the default theme of the game, you can change it with 2048 mod apk. You can choose from different themes such as animals, fruits, flowers, colors, emojis, and more. You can also create your own theme by using your own images and sounds.</p>
|
66 |
-
<h4> <h4>Undo and redo moves</h4>
|
67 |
-
<p>Sometimes, you might regret making a certain move or want to try a different strategy. With 2048 mod apk, you can undo and redo your moves as many times as you want. This can help you avoid mistakes and improve your chances of winning.</p>
|
68 |
-
<h3>How to download and install 2048 mod apk?</h3>
|
69 |
-
<p>If you want to download and install 2048 mod apk, you need to follow these simple steps:</p>
|
70 |
-
<ol>
|
71 |
-
<li>Go to the website where you can download 2048 mod apk, such as Apkloli. Make sure you choose a reliable and safe source.</li>
|
72 |
-
<li>Click on the download button and wait for the file to be downloaded on your device.</li>
|
73 |
-
<li>Go to your device settings and enable the installation of apps from unknown sources. This is necessary because 2048 mod apk is not from the official app stores.</li>
|
74 |
-
<li>Locate the downloaded file and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to be completed.</li>
|
75 |
-
<li>Launch the game and enjoy playing 2048 mod apk with all its features.</li>
|
76 |
-
</ol>
|
77 |
-
<h2>Pros and cons of 2048 mod apk</h2>
|
78 |
-
<p>Like any other app, 2048 mod apk has its pros and cons. Here are some of them:</p>
|
79 |
-
<h3>Pros</h3>
|
80 |
-
<ul>
|
81 |
-
<li>It enhances the gameplay experience by adding more features and options.</li>
|
82 |
-
<li>It allows you to customize the game according to your preferences and tastes.</li>
|
83 |
-
<li>It eliminates the ads that can disrupt your concentration and enjoyment.</li>
|
84 |
-
<li>It gives you unlimited money that you can use to buy useful items and boosters.</li>
|
85 |
-
<li>It lets you undo and redo your moves as much as you want.</li>
|
86 |
-
</ul>
|
87 |
-
<h3>Cons</h3>
|
88 |
-
<ul>
|
89 |
-
<li>It is not available on the official app stores, so you need to download it from third-party websites that may not be secure or trustworthy.</li>
|
90 |
-
<li>It may not be compatible with some devices or operating systems.</li>
|
91 |
-
<li>It may cause some glitches or errors in the game performance or functionality.</li>
|
92 |
-
<li>It may violate the terms and conditions of the original game developer or publisher.</li>
|
93 |
-
<li>It may reduce the challenge and difficulty of the game by making it too easy or unfair.</li>
|
94 |
-
</ul>
|
95 |
-
<h2>Conclusion</h2>
|
96 |
-
<p>In conclusion, 2048 mod apk is a fun and addictive puzzle game that offers more features and benefits than the original 2048 game. It allows you to play the game with unlimited money, no ads, custom themes, undo and redo moves, and more. However, it also has some drawbacks, such as being unavailable on the official app stores, causing some technical issues, and violating some rules. Therefore, you should weigh the pros and cons before downloading and installing 2048 mod apk on your device. If you decide to try it, make sure you download it from a reliable and safe source, such as Apkloli. We hope this article has been helpful and informative for you. Thank you for reading!</p>
|
97 |
-
<h2>Frequently Asked Questions</h2>
|
98 |
-
<p>Here are some of the most common questions that people ask about 2048 mod apk:</p>
|
99 |
-
<h4>Q: Is 2048 mod apk free?</h4>
|
100 |
-
<p>A: Yes, 2048 mod apk is free to download and play. You do not need to pay any money to enjoy its features and benefits.</p>
|
101 |
-
<h4>Q: Is 2048 mod apk safe?</h4>
|
102 |
-
<p>A: It depends on where you download it from. Some websites may offer fake or malicious files that can harm your device or steal your data. Therefore, you should always download 2048 mod apk from a reputable and trusted source, such as Apkloli. You should also scan the file with an antivirus program before installing it.</p>
|
103 |
-
<h4>Q: Is 2048 mod apk legal?</h4>
|
104 |
-
<p>A: It is not clear whether 2048 mod apk is legal or not. It may depend on the laws and regulations of your country or region. Some countries may allow modifying or hacking apps for personal use, while others may prohibit or penalize such activities. You should also consider the rights and interests of the original game developer or publisher, who may not approve of modifying or distributing their app without their permission or consent. Therefore, you should use 2048 mod apk at your own risk and responsibility.</p>
|
105 |
-
<h4>Q: How can I update 2048 mod apk?</h4>
|
106 |
-
<p>A: Since 2048 mod apk is not from the official app stores, you cannot update it automatically or manually through them. You need to download the latest version of 2048 mod apk from the same website where you downloaded the previous version. You should also check the website regularly for any updates or news about 2048 mod apk.</p>
|
107 |
-
<h4>Q: How can I uninstall 2048 mod apk?</h4>
|
108 |
-
<p>A: If you want to uninstall 2048 mod apk from your device, you can follow these steps:</p>
|
109 |
-
<ol>
|
110 |
-
<li>Go to your device settings and find the apps or applications section.</li>
|
111 |
-
<li>Find and tap on 2048 mod apk from the list of installed apps.</li>
|
112 |
-
<li>Tap on the uninstall button and confirm your action.</li>
|
113 |
-
<li>Wait for the app to be uninstalled from your device.</li>
|
114 |
-
</ol></p> 197e85843d<br />
|
115 |
-
<br />
|
116 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/1v1 Battle Challenge Your Friends and Enemies in Epic Duels.md
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>What is a 1v1 Battle?</h1>
|
3 |
-
<p>A 1v1 battle is a type of multiplayer video game that pits two players against each other in a virtual arena. The goal is to eliminate the opponent or score more points than them before the time runs out. 1v1 battles can be played in different genres, such as shooting, fighting, racing, or strategy games.</p>
|
4 |
-
<p>Some of the benefits of playing 1v1 battle games are:</p>
|
5 |
-
<h2>1v1 battle</h2><br /><p><b><b>Download Zip</b> ……… <a href="https://jinyurl.com/2uNQry">https://jinyurl.com/2uNQry</a></b></p><br /><br />
|
6 |
-
<ul>
|
7 |
-
<li>They test your skills and reflexes in a fast-paced and intense environment.</li>
|
8 |
-
<li>They allow you to showcase your creativity and style in building, editing, or trickshotting.</li>
|
9 |
-
<li>They provide you with feedback and motivation to improve your performance.</li>
|
10 |
-
</ul>
|
11 |
-
<p>Some of the challenges of playing 1v1 battle games are:</p>
|
12 |
-
<ul>
|
13 |
-
<li>They can be frustrating and stressful if you lose or encounter toxic players.</li>
|
14 |
-
<li>They can be repetitive and boring if you play the same mode or map over and over.</li>
|
15 |
-
<li>They can be addictive and harmful if you neglect your health or other responsibilities.</li>
|
16 |
-
</ul>
|
17 |
-
<p>Some of the popular 1v1 battle games are:</p>
|
18 |
-
<table>
|
19 |
-
<tr><th>Game</th><th>Description</th><th>Platform</th></tr>
|
20 |
-
<tr><td>Fortnite</td><td>A battle royale game that features building, editing, and shooting mechanics.</td><td>PC, console, mobile</td></tr>
|
21 |
-
<tr><td>Call of Duty</td><td>A first-person shooter game that features various weapons, maps, and modes.</td><td>PC, console, mobile</td></tr>
|
22 |
-
<tr><td>Mortal Kombat</td><td>A fighting game that features brutal combat, fatalities, and characters.</td><td>PC, console, mobile</td></tr>
|
23 |
-
<tr><td>Mario Kart</td><td>A racing game that features items, tracks, and characters from the Mario franchise.</td><td>Console, mobile</td></tr>
|
24 |
-
<tr><td>Chess</td><td>A strategy game that features pieces, moves, and rules based on medieval warfare.</td><td>PC, mobile, board</td></tr>
|
25 |
-
</table>
|
26 |
-
<h2>How to Play 1v1 Battle Games?</h2>
|
27 |
-
<p>The basic controls and mechanics of 1v1 battle games vary depending on the genre and the game. However, some common elements are:</p>
|
28 |
-
<ul>
|
29 |
-
<li>You need to use your keyboard or controller to move, aim, shoot, build, or perform other actions.</li>
|
30 |
-
<li>You need to use your mouse or screen to look around, select items, or interact with objects.</li>
|
31 |
-
<li>You need to use your headphones or speakers to hear sounds, music, or voice chat.</li>
|
32 |
-
<li>You need to use your monitor or device to see graphics, text, or menus.</li>
|
33 |
-
</ul>
|
34 |
-
<p>Some tips and tricks for winning 1v1 battles are:</p>
|
35 |
-
<ul>
|
36 |
-
<li>You need to practice your skills regularly and learn from your mistakes.</li>
|
37 |
-
<li>You need to study your opponent's behavior and habits and adapt your strategy accordingly.</li>
|
38 |
-
<li>You need to use your environment and resources wisely and creatively.</li>
|
39 |
-
<li>You need to communicate and cooperate with your teammate if you are playing in a team mode.</li>
|
40 |
-
<li>You need to have fun and enjoy the game without getting too angry or arrogant.</li>
|
41 |
-
</ul>
|
42 |
-
<p>Some resources for learning and improving your 1v1 battle skills are:</p>
|
43 |
-
<ul>
|
44 |
-
<li>You can watch videos or streams of professional or popular players and learn from their techniques and tips.</li>
|
45 |
-
<li>You can read articles or guides that explain the rules, strategies, and tips of different games and modes.</li>
|
46 |
-
<li>You can join online communities or forums that discuss, share, or review 1v1 battle games and content.</li>
|
47 |
-
<li>You can participate in tournaments or events that challenge your skills and reward your achievements.</li>
|
48 |
-
<li>You can ask for feedback or advice from other players or coaches who have more experience or knowledge.</li>
|
49 |
-
</ul>
|
50 |
-
<h2>How to Enjoy 1v1 Battle Games?</h2>
|
51 |
-
<p>Playing 1v1 battle games can be fun and exciting, but it can also be stressful and boring if you don't know how to enjoy them. Here are some ways to make your 1v1 battle gaming experience more enjoyable:</p>
|
52 |
-
<ul>
|
53 |
-
<li>You can try different game modes and features that suit your preferences and goals. For example, you can play casual or ranked matches, solo or duo modes, or custom or random maps.</li>
|
54 |
-
<li>You can customize and personalize your game settings and appearance to make them more comfortable and appealing. For example, you can adjust your sensitivity, resolution, or sound levels, or change your skin, outfit, or weapon.</li>
|
55 |
-
<li>You can socialize and compete with other players who share your interest and passion for 1v1 battle games. For example, you can chat, voice call, or message with your friends, opponents, or teammates, or join a clan, guild, or team.</li>
|
56 |
-
</ul>
|
57 |
-
<h2>Conclusion</h2>
|
58 |
-
<p>1v1 battle games are a type of multiplayer video game that pits two players against each other in a virtual arena. They can be played in different genres, such as shooting, fighting, racing, or strategy games. They can test your skills and reflexes, allow you to showcase your creativity and style, and provide you with feedback and motivation. However, they can also be frustrating and stressful, repetitive and boring, and addictive and harmful. Therefore, you need to know how to play and enjoy them properly. You need to practice your skills regularly, study your opponent's behavior, use your environment and resources wisely, communicate and cooperate with your teammate, have fun and enjoy the game, try different game modes and features, customize and personalize your game settings and appearance, and socialize and compete with other players.</p>
|
59 |
-
<p>If you are interested in playing 1v1 battle games, you can check out some of the popular ones mentioned in this article. You can also watch videos or streams of professional or popular players, read articles or guides that explain the rules, strategies, and tips of different games and modes, join online communities or forums that discuss, share, or review 1v1 battle games and content, participate in tournaments or events that challenge your skills and reward your achievements, or ask for feedback or advice from other players or coaches who have more experience or knowledge.</p>
|
60 |
-
<p>1v1 battle royale<br />
|
61 |
-
1v1 build fight<br />
|
62 |
-
1v1.lol<br />
|
63 |
-
1v1 battle games<br />
|
64 |
-
1v1 battle simulator<br />
|
65 |
-
1v1 battle online<br />
|
66 |
-
1v1 battle crazy games<br />
|
67 |
-
1v1 battle codes<br />
|
68 |
-
1v1 battle fortnite<br />
|
69 |
-
1v1 battle arena<br />
|
70 |
-
1v1 battle minecraft<br />
|
71 |
-
1v1 battle roblox<br />
|
72 |
-
1v1 battle shooting<br />
|
73 |
-
1v1 battle unblocked<br />
|
74 |
-
1v1 battle apk<br />
|
75 |
-
1v1 battle app<br />
|
76 |
-
1v1 battle download<br />
|
77 |
-
1v1 battle hack<br />
|
78 |
-
1v1 battle mod<br />
|
79 |
-
1v1 battle pc<br />
|
80 |
-
1v1 build fight map<br />
|
81 |
-
1v1 build fight codes<br />
|
82 |
-
1v1 build fight simulator<br />
|
83 |
-
1v1 build fight practice<br />
|
84 |
-
1v1 build fight creative code<br />
|
85 |
-
1v1 build fight server<br />
|
86 |
-
1v1 build fight tips<br />
|
87 |
-
1v1 build fight training<br />
|
88 |
-
1v1 build fight tutorial<br />
|
89 |
-
1v1 build fight website<br />
|
90 |
-
1v.lol game<br />
|
91 |
-
1v.lol unblocked<br />
|
92 |
-
1v.lol aim trainer<br />
|
93 |
-
1v.lol justbuild.lol<br />
|
94 |
-
1v.lol box fight code<br />
|
95 |
-
1v.lol party mode code<br />
|
96 |
-
1v.lol controls pc<br />
|
97 |
-
1v.lol discord server<br />
|
98 |
-
1v.lol hacks download<br />
|
99 |
-
2 player games online free play now fighting games unblocked at school.</p>
|
100 |
-
<p>Are you ready to enter the 1v1 battle arena? Let us know what you think about 1v1 battle games in the comments below!</p>
|
101 |
-
<h2>FAQs</h2>
|
102 |
-
<p>Here are some of the frequently asked questions about 1v1 battle games:</p>
|
103 |
-
<ol>
|
104 |
-
<li><b>What are the best 1v1 battle games?</b><br>The answer to this question depends on your personal preference and taste. However, some of the factors that you can consider when choosing a 1v1 battle game are: the genre, the graphics, the gameplay, the difficulty level, the replay value, the popularity, the reviews, the price, and the availability.</li>
|
105 |
-
<li><b>How do I get better at 1v1 battle games?</b><br>The best way to get better at 1v1 battle games is to practice regularly and learn from your mistakes. You can also watch videos or streams of professional or popular players and learn from their techniques and tips. You can also read articles or guides that explain the rules, strategies, and tips of different games and modes. You can also join online communities or forums that discuss, share, or review 1v1 battle games and content. You can also participate in tournaments or events that challenge your skills and reward your achievements. You can also ask for feedback or advice from other players or coaches who have more experience or knowledge.</li>
|
106 |
-
<li><b>How do I find opponents for 1v1 battle games?</b><br>There are different ways to find opponents for 1v1 battle games. You can play online matches with random players who are matched with you based on your skill level or region. You can invite your friends or contacts to play with you privately or publicly. You can join a clan, guild, or team that has other members who play the same game as you. You can use a third-party platform or service that connects you with other players who are looking for 1v1 battles.</li>
|
107 |
-
<li><b>How do I deal with toxic players in 1v1 battle games?</b><br>Toxic players are those who behave in a rude, abusive, or unsportsmanlike manner in 1v1 battle games. They may insult, harass, troll, cheat, or rage quit during or after the game. They may also ruin the game experience for other players by spamming, griefing, hacking, or teaming. To deal with toxic players, you can do the following: ignore them, mute them, block them, report them, or avoid them.</li>
|
108 |
-
<li><b>How do I balance my time and health when playing 1v1 battle games?</b><br>Playing 1v1 battle games can be fun and rewarding, but it can also be addictive and harmful if you neglect your time and health. To balance your time and health when playing 1v1 battle games, you can do the following: set a limit on how long and how often you play, take breaks and stretch regularly, drink water and eat healthy snacks, sleep well and rest enough, exercise and stay active, socialize and interact with other people, and pursue other hobbies and interests.</li>
|
109 |
-
</ol></p> 401be4b1e0<br />
|
110 |
-
<br />
|
111 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A00001/bingothoo/src/app/page.tsx
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
import dynamic from 'next/dynamic'
|
2 |
-
|
3 |
-
const DynamicComponentWithNoSSR = dynamic(
|
4 |
-
() => import('../components/chat'),
|
5 |
-
{ ssr: false }
|
6 |
-
)
|
7 |
-
|
8 |
-
export default function IndexPage() {
|
9 |
-
return (
|
10 |
-
<>
|
11 |
-
<div className="loading-spinner" />
|
12 |
-
<DynamicComponentWithNoSSR />
|
13 |
-
</>
|
14 |
-
)
|
15 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/modules/F0Predictor/HarvestF0Predictor.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
from infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
|
2 |
-
import pyworld
|
3 |
-
import numpy as np
|
4 |
-
|
5 |
-
|
6 |
-
class HarvestF0Predictor(F0Predictor):
|
7 |
-
def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
|
8 |
-
self.hop_length = hop_length
|
9 |
-
self.f0_min = f0_min
|
10 |
-
self.f0_max = f0_max
|
11 |
-
self.sampling_rate = sampling_rate
|
12 |
-
|
13 |
-
def interpolate_f0(self, f0):
|
14 |
-
"""
|
15 |
-
对F0进行插值处理
|
16 |
-
"""
|
17 |
-
|
18 |
-
data = np.reshape(f0, (f0.size, 1))
|
19 |
-
|
20 |
-
vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
|
21 |
-
vuv_vector[data > 0.0] = 1.0
|
22 |
-
vuv_vector[data <= 0.0] = 0.0
|
23 |
-
|
24 |
-
ip_data = data
|
25 |
-
|
26 |
-
frame_number = data.size
|
27 |
-
last_value = 0.0
|
28 |
-
for i in range(frame_number):
|
29 |
-
if data[i] <= 0.0:
|
30 |
-
j = i + 1
|
31 |
-
for j in range(i + 1, frame_number):
|
32 |
-
if data[j] > 0.0:
|
33 |
-
break
|
34 |
-
if j < frame_number - 1:
|
35 |
-
if last_value > 0.0:
|
36 |
-
step = (data[j] - data[i - 1]) / float(j - i)
|
37 |
-
for k in range(i, j):
|
38 |
-
ip_data[k] = data[i - 1] + step * (k - i + 1)
|
39 |
-
else:
|
40 |
-
for k in range(i, j):
|
41 |
-
ip_data[k] = data[j]
|
42 |
-
else:
|
43 |
-
for k in range(i, frame_number):
|
44 |
-
ip_data[k] = last_value
|
45 |
-
else:
|
46 |
-
ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
|
47 |
-
last_value = data[i]
|
48 |
-
|
49 |
-
return ip_data[:, 0], vuv_vector[:, 0]
|
50 |
-
|
51 |
-
def resize_f0(self, x, target_len):
|
52 |
-
source = np.array(x)
|
53 |
-
source[source < 0.001] = np.nan
|
54 |
-
target = np.interp(
|
55 |
-
np.arange(0, len(source) * target_len, len(source)) / target_len,
|
56 |
-
np.arange(0, len(source)),
|
57 |
-
source,
|
58 |
-
)
|
59 |
-
res = np.nan_to_num(target)
|
60 |
-
return res
|
61 |
-
|
62 |
-
def compute_f0(self, wav, p_len=None):
|
63 |
-
if p_len is None:
|
64 |
-
p_len = wav.shape[0] // self.hop_length
|
65 |
-
f0, t = pyworld.harvest(
|
66 |
-
wav.astype(np.double),
|
67 |
-
fs=self.hop_length,
|
68 |
-
f0_ceil=self.f0_max,
|
69 |
-
f0_floor=self.f0_min,
|
70 |
-
frame_period=1000 * self.hop_length / self.sampling_rate,
|
71 |
-
)
|
72 |
-
f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs)
|
73 |
-
return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
|
74 |
-
|
75 |
-
def compute_f0_uv(self, wav, p_len=None):
|
76 |
-
if p_len is None:
|
77 |
-
p_len = wav.shape[0] // self.hop_length
|
78 |
-
f0, t = pyworld.harvest(
|
79 |
-
wav.astype(np.double),
|
80 |
-
fs=self.sampling_rate,
|
81 |
-
f0_floor=self.f0_min,
|
82 |
-
f0_ceil=self.f0_max,
|
83 |
-
frame_period=1000 * self.hop_length / self.sampling_rate,
|
84 |
-
)
|
85 |
-
f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
|
86 |
-
return self.interpolate_f0(self.resize_f0(f0, p_len))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/tests/adversarial/test_discriminators.py
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import random
|
8 |
-
|
9 |
-
import torch
|
10 |
-
|
11 |
-
from audiocraft.adversarial.discriminators import (
|
12 |
-
MultiPeriodDiscriminator,
|
13 |
-
MultiScaleDiscriminator,
|
14 |
-
MultiScaleSTFTDiscriminator
|
15 |
-
)
|
16 |
-
|
17 |
-
|
18 |
-
class TestMultiPeriodDiscriminator:
|
19 |
-
|
20 |
-
def test_mpd_discriminator(self):
|
21 |
-
N, C, T = 2, 2, random.randrange(1, 100_000)
|
22 |
-
t0 = torch.randn(N, C, T)
|
23 |
-
periods = [1, 2, 3]
|
24 |
-
mpd = MultiPeriodDiscriminator(periods=periods, in_channels=C)
|
25 |
-
logits, fmaps = mpd(t0)
|
26 |
-
|
27 |
-
assert len(logits) == len(periods)
|
28 |
-
assert len(fmaps) == len(periods)
|
29 |
-
assert all([logit.shape[0] == N and len(logit.shape) == 4 for logit in logits])
|
30 |
-
assert all([feature.shape[0] == N for fmap in fmaps for feature in fmap])
|
31 |
-
|
32 |
-
|
33 |
-
class TestMultiScaleDiscriminator:
|
34 |
-
|
35 |
-
def test_msd_discriminator(self):
|
36 |
-
N, C, T = 2, 2, random.randrange(1, 100_000)
|
37 |
-
t0 = torch.randn(N, C, T)
|
38 |
-
|
39 |
-
scale_norms = ['weight_norm', 'weight_norm']
|
40 |
-
msd = MultiScaleDiscriminator(scale_norms=scale_norms, in_channels=C)
|
41 |
-
logits, fmaps = msd(t0)
|
42 |
-
|
43 |
-
assert len(logits) == len(scale_norms)
|
44 |
-
assert len(fmaps) == len(scale_norms)
|
45 |
-
assert all([logit.shape[0] == N and len(logit.shape) == 3 for logit in logits])
|
46 |
-
assert all([feature.shape[0] == N for fmap in fmaps for feature in fmap])
|
47 |
-
|
48 |
-
|
49 |
-
class TestMultiScaleStftDiscriminator:
|
50 |
-
|
51 |
-
def test_msstftd_discriminator(self):
|
52 |
-
N, C, T = 2, 2, random.randrange(1, 100_000)
|
53 |
-
t0 = torch.randn(N, C, T)
|
54 |
-
|
55 |
-
n_filters = 4
|
56 |
-
n_ffts = [128, 256, 64]
|
57 |
-
hop_lengths = [32, 64, 16]
|
58 |
-
win_lengths = [128, 256, 64]
|
59 |
-
|
60 |
-
msstftd = MultiScaleSTFTDiscriminator(filters=n_filters, n_ffts=n_ffts, hop_lengths=hop_lengths,
|
61 |
-
win_lengths=win_lengths, in_channels=C)
|
62 |
-
logits, fmaps = msstftd(t0)
|
63 |
-
|
64 |
-
assert len(logits) == len(n_ffts)
|
65 |
-
assert len(fmaps) == len(n_ffts)
|
66 |
-
assert all([logit.shape[0] == N and len(logit.shape) == 4 for logit in logits])
|
67 |
-
assert all([feature.shape[0] == N for fmap in fmaps for feature in fmap])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/model_eval_diff.py
DELETED
@@ -1,110 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import sys
|
3 |
-
import copy
|
4 |
-
import pickle
|
5 |
-
|
6 |
-
import numpy as np
|
7 |
-
import pandas as pd
|
8 |
-
import fire
|
9 |
-
|
10 |
-
sys.path.append(os.getcwd())
|
11 |
-
|
12 |
-
|
13 |
-
def coco_score(refs, pred, scorer):
|
14 |
-
if scorer.method() == "Bleu":
|
15 |
-
scores = np.array([ 0.0 for n in range(4) ])
|
16 |
-
else:
|
17 |
-
scores = 0
|
18 |
-
num_cap_per_audio = len(refs[list(refs.keys())[0]])
|
19 |
-
|
20 |
-
for i in range(num_cap_per_audio):
|
21 |
-
if i > 0:
|
22 |
-
for key in refs:
|
23 |
-
refs[key].insert(0, res[key][0])
|
24 |
-
res = {key: [refs[key].pop(),] for key in refs}
|
25 |
-
score, _ = scorer.compute_score(refs, pred)
|
26 |
-
|
27 |
-
if scorer.method() == "Bleu":
|
28 |
-
scores += np.array(score)
|
29 |
-
else:
|
30 |
-
scores += score
|
31 |
-
|
32 |
-
score = scores / num_cap_per_audio
|
33 |
-
|
34 |
-
for key in refs:
|
35 |
-
refs[key].insert(0, res[key][0])
|
36 |
-
score_allref, _ = scorer.compute_score(refs, pred)
|
37 |
-
diff = score_allref - score
|
38 |
-
return diff
|
39 |
-
|
40 |
-
def embedding_score(refs, pred, scorer):
|
41 |
-
|
42 |
-
num_cap_per_audio = len(refs[list(refs.keys())[0]])
|
43 |
-
scores = 0
|
44 |
-
|
45 |
-
for i in range(num_cap_per_audio):
|
46 |
-
res = {key: [refs[key][i],] for key in refs.keys() if len(refs[key]) == num_cap_per_audio}
|
47 |
-
refs_i = {key: np.concatenate([refs[key][:i], refs[key][i+1:]]) for key in refs.keys() if len(refs[key]) == num_cap_per_audio}
|
48 |
-
score, _ = scorer.compute_score(refs_i, pred)
|
49 |
-
|
50 |
-
scores += score
|
51 |
-
|
52 |
-
score = scores / num_cap_per_audio
|
53 |
-
|
54 |
-
score_allref, _ = scorer.compute_score(refs, pred)
|
55 |
-
diff = score_allref - score
|
56 |
-
return diff
|
57 |
-
|
58 |
-
def main(output_file, eval_caption_file, eval_embedding_file, output, zh=False):
|
59 |
-
output_df = pd.read_json(output_file)
|
60 |
-
output_df["key"] = output_df["filename"].apply(lambda x: os.path.splitext(os.path.basename(x))[0])
|
61 |
-
pred = output_df.groupby("key")["tokens"].apply(list).to_dict()
|
62 |
-
|
63 |
-
label_df = pd.read_json(eval_caption_file)
|
64 |
-
if zh:
|
65 |
-
refs = label_df.groupby("key")["tokens"].apply(list).to_dict()
|
66 |
-
else:
|
67 |
-
refs = label_df.groupby("key")["caption"].apply(list).to_dict()
|
68 |
-
|
69 |
-
from pycocoevalcap.bleu.bleu import Bleu
|
70 |
-
from pycocoevalcap.cider.cider import Cider
|
71 |
-
from pycocoevalcap.rouge.rouge import Rouge
|
72 |
-
|
73 |
-
scorer = Bleu(zh=zh)
|
74 |
-
bleu_scores = coco_score(copy.deepcopy(refs), pred, scorer)
|
75 |
-
scorer = Cider(zh=zh)
|
76 |
-
cider_score = coco_score(copy.deepcopy(refs), pred, scorer)
|
77 |
-
scorer = Rouge(zh=zh)
|
78 |
-
rouge_score = coco_score(copy.deepcopy(refs), pred, scorer)
|
79 |
-
|
80 |
-
if not zh:
|
81 |
-
from pycocoevalcap.meteor.meteor import Meteor
|
82 |
-
scorer = Meteor()
|
83 |
-
meteor_score = coco_score(copy.deepcopy(refs), pred, scorer)
|
84 |
-
|
85 |
-
from pycocoevalcap.spice.spice import Spice
|
86 |
-
scorer = Spice()
|
87 |
-
spice_score = coco_score(copy.deepcopy(refs), pred, scorer)
|
88 |
-
|
89 |
-
# from audiocaptioneval.sentbert.sentencebert import SentenceBert
|
90 |
-
# scorer = SentenceBert(zh=zh)
|
91 |
-
# with open(eval_embedding_file, "rb") as f:
|
92 |
-
# ref_embeddings = pickle.load(f)
|
93 |
-
|
94 |
-
# sent_bert = embedding_score(ref_embeddings, pred, scorer)
|
95 |
-
|
96 |
-
with open(output, "w") as f:
|
97 |
-
f.write("Diff:\n")
|
98 |
-
for n in range(4):
|
99 |
-
f.write("BLEU-{}: {:6.3f}\n".format(n+1, bleu_scores[n]))
|
100 |
-
f.write("CIDEr: {:6.3f}\n".format(cider_score))
|
101 |
-
f.write("ROUGE: {:6.3f}\n".format(rouge_score))
|
102 |
-
if not zh:
|
103 |
-
f.write("Meteor: {:6.3f}\n".format(meteor_score))
|
104 |
-
f.write("SPICE: {:6.3f}\n".format(spice_score))
|
105 |
-
# f.write("SentenceBert: {:6.3f}\n".format(sent_bert))
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
if __name__ == "__main__":
|
110 |
-
fire.Fire(main)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/lr_scheduler.py
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
|
3 |
-
|
4 |
-
class LambdaWarmUpCosineScheduler:
|
5 |
-
"""
|
6 |
-
note: use with a base_lr of 1.0
|
7 |
-
"""
|
8 |
-
def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0):
|
9 |
-
self.lr_warm_up_steps = warm_up_steps
|
10 |
-
self.lr_start = lr_start
|
11 |
-
self.lr_min = lr_min
|
12 |
-
self.lr_max = lr_max
|
13 |
-
self.lr_max_decay_steps = max_decay_steps
|
14 |
-
self.last_lr = 0.
|
15 |
-
self.verbosity_interval = verbosity_interval
|
16 |
-
|
17 |
-
def schedule(self, n, **kwargs):
|
18 |
-
if self.verbosity_interval > 0:
|
19 |
-
if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}")
|
20 |
-
if n < self.lr_warm_up_steps:
|
21 |
-
lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start
|
22 |
-
self.last_lr = lr
|
23 |
-
return lr
|
24 |
-
else:
|
25 |
-
t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps)
|
26 |
-
t = min(t, 1.0)
|
27 |
-
lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * (
|
28 |
-
1 + np.cos(t * np.pi))
|
29 |
-
self.last_lr = lr
|
30 |
-
return lr
|
31 |
-
|
32 |
-
def __call__(self, n, **kwargs):
|
33 |
-
return self.schedule(n,**kwargs)
|
34 |
-
|
35 |
-
|
36 |
-
class LambdaWarmUpCosineScheduler2:
|
37 |
-
"""
|
38 |
-
supports repeated iterations, configurable via lists
|
39 |
-
note: use with a base_lr of 1.0.
|
40 |
-
"""
|
41 |
-
def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0):
|
42 |
-
assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths)
|
43 |
-
self.lr_warm_up_steps = warm_up_steps
|
44 |
-
self.f_start = f_start
|
45 |
-
self.f_min = f_min
|
46 |
-
self.f_max = f_max
|
47 |
-
self.cycle_lengths = cycle_lengths
|
48 |
-
self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths))
|
49 |
-
self.last_f = 0.
|
50 |
-
self.verbosity_interval = verbosity_interval
|
51 |
-
|
52 |
-
def find_in_interval(self, n):
|
53 |
-
interval = 0
|
54 |
-
for cl in self.cum_cycles[1:]:
|
55 |
-
if n <= cl:
|
56 |
-
return interval
|
57 |
-
interval += 1
|
58 |
-
|
59 |
-
def schedule(self, n, **kwargs):
|
60 |
-
cycle = self.find_in_interval(n)
|
61 |
-
n = n - self.cum_cycles[cycle]
|
62 |
-
if self.verbosity_interval > 0:
|
63 |
-
if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
|
64 |
-
f"current cycle {cycle}")
|
65 |
-
if n < self.lr_warm_up_steps[cycle]:
|
66 |
-
f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
|
67 |
-
self.last_f = f
|
68 |
-
return f
|
69 |
-
else:
|
70 |
-
t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle])
|
71 |
-
t = min(t, 1.0)
|
72 |
-
f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * (
|
73 |
-
1 + np.cos(t * np.pi))
|
74 |
-
self.last_f = f
|
75 |
-
return f
|
76 |
-
|
77 |
-
def __call__(self, n, **kwargs):
|
78 |
-
return self.schedule(n, **kwargs)
|
79 |
-
|
80 |
-
|
81 |
-
class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2):
|
82 |
-
|
83 |
-
def schedule(self, n, **kwargs):
|
84 |
-
cycle = self.find_in_interval(n)
|
85 |
-
n = n - self.cum_cycles[cycle]
|
86 |
-
if self.verbosity_interval > 0:
|
87 |
-
if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
|
88 |
-
f"current cycle {cycle}")
|
89 |
-
|
90 |
-
if n < self.lr_warm_up_steps[cycle]:
|
91 |
-
f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
|
92 |
-
self.last_f = f
|
93 |
-
return f
|
94 |
-
else:
|
95 |
-
f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle])
|
96 |
-
self.last_f = f
|
97 |
-
return f
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/vggishish/model.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
|
4 |
-
|
5 |
-
class VGGishish(nn.Module):
|
6 |
-
|
7 |
-
def __init__(self, conv_layers, use_bn, num_classes):
|
8 |
-
'''
|
9 |
-
Mostly from
|
10 |
-
https://pytorch.org/vision/0.8/_modules/torchvision/models/vgg.html
|
11 |
-
'''
|
12 |
-
super().__init__()
|
13 |
-
layers = []
|
14 |
-
in_channels = 1
|
15 |
-
|
16 |
-
# a list of channels with 'MP' (maxpool) from config
|
17 |
-
for v in conv_layers:
|
18 |
-
if v == 'MP':
|
19 |
-
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
|
20 |
-
else:
|
21 |
-
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1, stride=1)
|
22 |
-
if use_bn:
|
23 |
-
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
|
24 |
-
else:
|
25 |
-
layers += [conv2d, nn.ReLU(inplace=True)]
|
26 |
-
in_channels = v
|
27 |
-
self.features = nn.Sequential(*layers)
|
28 |
-
|
29 |
-
self.avgpool = nn.AdaptiveAvgPool2d((5, 10))
|
30 |
-
|
31 |
-
self.flatten = nn.Flatten()
|
32 |
-
self.classifier = nn.Sequential(
|
33 |
-
nn.Linear(512 * 5 * 10, 4096),
|
34 |
-
nn.ReLU(True),
|
35 |
-
nn.Linear(4096, 4096),
|
36 |
-
nn.ReLU(True),
|
37 |
-
nn.Linear(4096, num_classes)
|
38 |
-
)
|
39 |
-
|
40 |
-
# weight init
|
41 |
-
self.reset_parameters()
|
42 |
-
|
43 |
-
def forward(self, x):
|
44 |
-
# adding channel dim for conv2d (B, 1, F, T) <-
|
45 |
-
x = x.unsqueeze(1)
|
46 |
-
# backbone (B, 1, 5, 53) <- (B, 1, 80, 860)
|
47 |
-
x = self.features(x)
|
48 |
-
# adaptive avg pooling (B, 1, 5, 10) <- (B, 1, 5, 53) – if no MP is used as the end of VGG
|
49 |
-
x = self.avgpool(x)
|
50 |
-
# flatten
|
51 |
-
x = self.flatten(x)
|
52 |
-
# classify
|
53 |
-
x = self.classifier(x)
|
54 |
-
return x
|
55 |
-
|
56 |
-
def reset_parameters(self):
|
57 |
-
for m in self.modules():
|
58 |
-
if isinstance(m, nn.Conv2d):
|
59 |
-
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
60 |
-
if m.bias is not None:
|
61 |
-
nn.init.constant_(m.bias, 0)
|
62 |
-
elif isinstance(m, nn.BatchNorm2d):
|
63 |
-
nn.init.constant_(m.weight, 1)
|
64 |
-
nn.init.constant_(m.bias, 0)
|
65 |
-
elif isinstance(m, nn.Linear):
|
66 |
-
nn.init.normal_(m.weight, 0, 0.01)
|
67 |
-
nn.init.constant_(m.bias, 0)
|
68 |
-
|
69 |
-
|
70 |
-
if __name__ == '__main__':
|
71 |
-
num_classes = 309
|
72 |
-
inputs = torch.rand(3, 80, 848)
|
73 |
-
conv_layers = [64, 64, 'MP', 128, 128, 'MP', 256, 256, 256, 'MP', 512, 512, 512, 'MP', 512, 512, 512]
|
74 |
-
# conv_layers = [64, 'MP', 128, 'MP', 256, 256, 'MP', 512, 512, 'MP']
|
75 |
-
model = VGGishish(conv_layers, use_bn=False, num_classes=num_classes)
|
76 |
-
outputs = model(inputs)
|
77 |
-
print(outputs.shape)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIZero2HeroBootcamp/MultiPDF-QA-ChatGPT-Langchain/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: MultiPDF QA ChatGPT Langchain
|
3 |
-
emoji: 🏃
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.21.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ASJMO/freegpt/g4f/Provider/__init__.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
from . import Provider
|
2 |
-
from .Providers import (
|
3 |
-
Aichat,
|
4 |
-
Ails,
|
5 |
-
AiService,
|
6 |
-
Bard,
|
7 |
-
Better,
|
8 |
-
Bing,
|
9 |
-
ChatFree,
|
10 |
-
ChatgptAi,
|
11 |
-
ChatgptLogin,
|
12 |
-
ChatgptLogin,
|
13 |
-
DeepAi,
|
14 |
-
Easychat,
|
15 |
-
Ezcht,
|
16 |
-
Fakeopen,
|
17 |
-
Forefront,
|
18 |
-
GetGpt,
|
19 |
-
Gravityengine,
|
20 |
-
H2o,
|
21 |
-
hteyun,
|
22 |
-
Liaobots,
|
23 |
-
Lockchat,
|
24 |
-
Mishalsgpt,
|
25 |
-
Phind,
|
26 |
-
Theb,
|
27 |
-
Vercel,
|
28 |
-
Weuseing,
|
29 |
-
Xiaor,
|
30 |
-
Yqcloud,
|
31 |
-
You,
|
32 |
-
Zeabur,
|
33 |
-
Wewordle
|
34 |
-
)
|
35 |
-
|
36 |
-
Palm = Bard
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Accel/media-converter/styles.css
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
#outputtext {
|
2 |
-
color: green;
|
3 |
-
}
|
4 |
-
#acontrast {
|
5 |
-
width: 50%;
|
6 |
-
}
|
7 |
-
#button{
|
8 |
-
width: 30%
|
9 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapter/T2I-Adapter/ldm/util.py
DELETED
@@ -1,200 +0,0 @@
|
|
1 |
-
import importlib
|
2 |
-
import math
|
3 |
-
|
4 |
-
import cv2
|
5 |
-
import torch
|
6 |
-
import numpy as np
|
7 |
-
|
8 |
-
import os
|
9 |
-
from safetensors.torch import load_file
|
10 |
-
|
11 |
-
from inspect import isfunction
|
12 |
-
from PIL import Image, ImageDraw, ImageFont
|
13 |
-
|
14 |
-
|
15 |
-
def log_txt_as_img(wh, xc, size=10):
|
16 |
-
# wh a tuple of (width, height)
|
17 |
-
# xc a list of captions to plot
|
18 |
-
b = len(xc)
|
19 |
-
txts = list()
|
20 |
-
for bi in range(b):
|
21 |
-
txt = Image.new("RGB", wh, color="white")
|
22 |
-
draw = ImageDraw.Draw(txt)
|
23 |
-
font = ImageFont.truetype('assets/DejaVuSans.ttf', size=size)
|
24 |
-
nc = int(40 * (wh[0] / 256))
|
25 |
-
lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))
|
26 |
-
|
27 |
-
try:
|
28 |
-
draw.text((0, 0), lines, fill="black", font=font)
|
29 |
-
except UnicodeEncodeError:
|
30 |
-
print("Cant encode string for logging. Skipping.")
|
31 |
-
|
32 |
-
txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
|
33 |
-
txts.append(txt)
|
34 |
-
txts = np.stack(txts)
|
35 |
-
txts = torch.tensor(txts)
|
36 |
-
return txts
|
37 |
-
|
38 |
-
|
39 |
-
def ismap(x):
|
40 |
-
if not isinstance(x, torch.Tensor):
|
41 |
-
return False
|
42 |
-
return (len(x.shape) == 4) and (x.shape[1] > 3)
|
43 |
-
|
44 |
-
|
45 |
-
def isimage(x):
|
46 |
-
if not isinstance(x, torch.Tensor):
|
47 |
-
return False
|
48 |
-
return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)
|
49 |
-
|
50 |
-
|
51 |
-
def exists(x):
|
52 |
-
return x is not None
|
53 |
-
|
54 |
-
|
55 |
-
def default(val, d):
|
56 |
-
if exists(val):
|
57 |
-
return val
|
58 |
-
return d() if isfunction(d) else d
|
59 |
-
|
60 |
-
|
61 |
-
def mean_flat(tensor):
|
62 |
-
"""
|
63 |
-
https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86
|
64 |
-
Take the mean over all non-batch dimensions.
|
65 |
-
"""
|
66 |
-
return tensor.mean(dim=list(range(1, len(tensor.shape))))
|
67 |
-
|
68 |
-
|
69 |
-
def count_params(model, verbose=False):
|
70 |
-
total_params = sum(p.numel() for p in model.parameters())
|
71 |
-
if verbose:
|
72 |
-
print(f"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.")
|
73 |
-
return total_params
|
74 |
-
|
75 |
-
|
76 |
-
def instantiate_from_config(config):
|
77 |
-
if not "target" in config:
|
78 |
-
if config == '__is_first_stage__':
|
79 |
-
return None
|
80 |
-
elif config == "__is_unconditional__":
|
81 |
-
return None
|
82 |
-
raise KeyError("Expected key `target` to instantiate.")
|
83 |
-
return get_obj_from_str(config["target"])(**config.get("params", dict()))
|
84 |
-
|
85 |
-
|
86 |
-
def get_obj_from_str(string, reload=False):
|
87 |
-
module, cls = string.rsplit(".", 1)
|
88 |
-
if reload:
|
89 |
-
module_imp = importlib.import_module(module)
|
90 |
-
importlib.reload(module_imp)
|
91 |
-
return getattr(importlib.import_module(module, package=None), cls)
|
92 |
-
|
93 |
-
|
94 |
-
checkpoint_dict_replacements = {
|
95 |
-
'cond_stage_model.transformer.text_model.embeddings.': 'cond_stage_model.transformer.embeddings.',
|
96 |
-
'cond_stage_model.transformer.text_model.encoder.': 'cond_stage_model.transformer.encoder.',
|
97 |
-
'cond_stage_model.transformer.text_model.final_layer_norm.': 'cond_stage_model.transformer.final_layer_norm.',
|
98 |
-
}
|
99 |
-
|
100 |
-
|
101 |
-
def transform_checkpoint_dict_key(k):
|
102 |
-
for text, replacement in checkpoint_dict_replacements.items():
|
103 |
-
if k.startswith(text):
|
104 |
-
k = replacement + k[len(text):]
|
105 |
-
|
106 |
-
return k
|
107 |
-
|
108 |
-
|
109 |
-
def get_state_dict_from_checkpoint(pl_sd):
|
110 |
-
pl_sd = pl_sd.pop("state_dict", pl_sd)
|
111 |
-
pl_sd.pop("state_dict", None)
|
112 |
-
|
113 |
-
sd = {}
|
114 |
-
for k, v in pl_sd.items():
|
115 |
-
new_key = transform_checkpoint_dict_key(k)
|
116 |
-
|
117 |
-
if new_key is not None:
|
118 |
-
sd[new_key] = v
|
119 |
-
|
120 |
-
pl_sd.clear()
|
121 |
-
pl_sd.update(sd)
|
122 |
-
|
123 |
-
return pl_sd
|
124 |
-
|
125 |
-
|
126 |
-
def read_state_dict(checkpoint_file, print_global_state=False):
|
127 |
-
_, extension = os.path.splitext(checkpoint_file)
|
128 |
-
if extension.lower() == ".safetensors":
|
129 |
-
pl_sd = load_file(checkpoint_file, device='cpu')
|
130 |
-
else:
|
131 |
-
pl_sd = torch.load(checkpoint_file, map_location='cpu')
|
132 |
-
|
133 |
-
if print_global_state and "global_step" in pl_sd:
|
134 |
-
print(f"Global Step: {pl_sd['global_step']}")
|
135 |
-
|
136 |
-
sd = get_state_dict_from_checkpoint(pl_sd)
|
137 |
-
return sd
|
138 |
-
|
139 |
-
|
140 |
-
def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False):
|
141 |
-
print(f"Loading model from {ckpt}")
|
142 |
-
sd = read_state_dict(ckpt)
|
143 |
-
model = instantiate_from_config(config.model)
|
144 |
-
m, u = model.load_state_dict(sd, strict=False)
|
145 |
-
if len(m) > 0 and verbose:
|
146 |
-
print("missing keys:")
|
147 |
-
print(m)
|
148 |
-
if len(u) > 0 and verbose:
|
149 |
-
print("unexpected keys:")
|
150 |
-
print(u)
|
151 |
-
|
152 |
-
if 'anything' in ckpt.lower() and vae_ckpt is None:
|
153 |
-
vae_ckpt = 'models/anything-v4.0.vae.pt'
|
154 |
-
|
155 |
-
if vae_ckpt is not None and vae_ckpt != 'None':
|
156 |
-
print(f"Loading vae model from {vae_ckpt}")
|
157 |
-
vae_sd = torch.load(vae_ckpt, map_location="cpu")
|
158 |
-
if "global_step" in vae_sd:
|
159 |
-
print(f"Global Step: {vae_sd['global_step']}")
|
160 |
-
sd = vae_sd["state_dict"]
|
161 |
-
m, u = model.first_stage_model.load_state_dict(sd, strict=False)
|
162 |
-
if len(m) > 0 and verbose:
|
163 |
-
print("missing keys:")
|
164 |
-
print(m)
|
165 |
-
if len(u) > 0 and verbose:
|
166 |
-
print("unexpected keys:")
|
167 |
-
print(u)
|
168 |
-
|
169 |
-
model.cuda()
|
170 |
-
model.eval()
|
171 |
-
return model
|
172 |
-
|
173 |
-
|
174 |
-
def resize_numpy_image(image, max_resolution=512 * 512, resize_short_edge=None):
|
175 |
-
h, w = image.shape[:2]
|
176 |
-
if resize_short_edge is not None:
|
177 |
-
k = resize_short_edge / min(h, w)
|
178 |
-
else:
|
179 |
-
k = max_resolution / (h * w)
|
180 |
-
k = k**0.5
|
181 |
-
h = int(np.round(h * k / 64)) * 64
|
182 |
-
w = int(np.round(w * k / 64)) * 64
|
183 |
-
image = cv2.resize(image, (w, h), interpolation=cv2.INTER_LANCZOS4)
|
184 |
-
return image
|
185 |
-
|
186 |
-
|
187 |
-
# make uc and prompt shapes match via padding for long prompts
|
188 |
-
null_cond = None
|
189 |
-
|
190 |
-
def fix_cond_shapes(model, prompt_condition, uc):
|
191 |
-
if uc is None:
|
192 |
-
return prompt_condition, uc
|
193 |
-
global null_cond
|
194 |
-
if null_cond is None:
|
195 |
-
null_cond = model.get_learned_conditioning([""])
|
196 |
-
while prompt_condition.shape[1] > uc.shape[1]:
|
197 |
-
uc = torch.cat((uc, null_cond.repeat((uc.shape[0], 1, 1))), axis=1)
|
198 |
-
while prompt_condition.shape[1] < uc.shape[1]:
|
199 |
-
prompt_condition = torch.cat((prompt_condition, null_cond.repeat((prompt_condition.shape[0], 1, 1))), axis=1)
|
200 |
-
return prompt_condition, uc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/ball/Factory.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import Ball from './Ball.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('ball', function (config) {
|
6 |
-
var gameObject = new Ball(this.scene, config);
|
7 |
-
this.scene.add.existing(gameObject);
|
8 |
-
return gameObject;
|
9 |
-
});
|
10 |
-
|
11 |
-
SetValue(window, 'RexPlugins.Spinner.Ball', Ball);
|
12 |
-
|
13 |
-
export default Ball;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sides/childbehaviors/Fade.js
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
import IndexOf from '../../../../plugins/utils/object/IndexOf.js';
|
2 |
-
import { WaitComplete } from '../../utils/WaitEvent.js';
|
3 |
-
|
4 |
-
export default {
|
5 |
-
fadeChild(child, duration, alpha) {
|
6 |
-
var key;
|
7 |
-
if (typeof (child) === 'string') {
|
8 |
-
key = child;
|
9 |
-
child = this.sizerChildren[key];
|
10 |
-
} else {
|
11 |
-
key = IndexOf(this.sizerChildren, child);
|
12 |
-
}
|
13 |
-
if (duration === undefined) {
|
14 |
-
duration = 500;
|
15 |
-
}
|
16 |
-
if (alpha === undefined) {
|
17 |
-
alpha = (this.currentChildKey === key) ? 1 : 0;
|
18 |
-
}
|
19 |
-
|
20 |
-
child.fadeIn(duration, { start: child.alpha, end: alpha });
|
21 |
-
return this;
|
22 |
-
},
|
23 |
-
|
24 |
-
fadeChildPromise(child, duration, alpha) {
|
25 |
-
if (typeof (child) === 'string') {
|
26 |
-
child = this.sizerChildren[key];
|
27 |
-
}
|
28 |
-
this.fadeChild(child, duration, alpha);
|
29 |
-
|
30 |
-
if (child._fade) {
|
31 |
-
return WaitComplete(child._fade);
|
32 |
-
} else {
|
33 |
-
return Promise.resolve();
|
34 |
-
}
|
35 |
-
}
|
36 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/instruct_pix2pix/README_sdxl.md
DELETED
@@ -1,148 +0,0 @@
|
|
1 |
-
# InstructPix2Pix SDXL training example
|
2 |
-
|
3 |
-
***This is based on the original InstructPix2Pix training example.***
|
4 |
-
|
5 |
-
[Stable Diffusion XL](https://huggingface.co/papers/2307.01952) (or SDXL) is the latest image generation model that is tailored towards more photorealistic outputs with more detailed imagery and composition compared to previous SD models. It leverages a three times larger UNet backbone. The increase of model parameters is mainly due to more attention blocks and a larger cross-attention context as SDXL uses a second text encoder.
|
6 |
-
|
7 |
-
The `train_instruct_pix2pix_xl.py` script shows how to implement the training procedure and adapt it for Stable Diffusion XL.
|
8 |
-
|
9 |
-
***Disclaimer: Even though `train_instruct_pix2pix_xl.py` implements the InstructPix2Pix
|
10 |
-
training procedure while being faithful to the [original implementation](https://github.com/timothybrooks/instruct-pix2pix) we have only tested it on a [small-scale dataset](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples). This can impact the end results. For better results, we recommend longer training runs with a larger dataset. [Here](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered) you can find a large dataset for InstructPix2Pix training.***
|
11 |
-
|
12 |
-
## Running locally with PyTorch
|
13 |
-
|
14 |
-
### Installing the dependencies
|
15 |
-
|
16 |
-
Refer to the original InstructPix2Pix training example for installing the dependencies.
|
17 |
-
|
18 |
-
You will also need to get access of SDXL by filling the [form](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0).
|
19 |
-
|
20 |
-
### Toy example
|
21 |
-
|
22 |
-
As mentioned before, we'll use a [small toy dataset](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples) for training. The dataset
|
23 |
-
is a smaller version of the [original dataset](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered) used in the InstructPix2Pix paper.
|
24 |
-
|
25 |
-
Configure environment variables such as the dataset identifier and the Stable Diffusion
|
26 |
-
checkpoint:
|
27 |
-
|
28 |
-
```bash
|
29 |
-
export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0"
|
30 |
-
export DATASET_ID="fusing/instructpix2pix-1000-samples"
|
31 |
-
```
|
32 |
-
|
33 |
-
Now, we can launch training:
|
34 |
-
|
35 |
-
```bash
|
36 |
-
python train_instruct_pix2pix_xl.py \
|
37 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
38 |
-
--dataset_name=$DATASET_ID \
|
39 |
-
--enable_xformers_memory_efficient_attention \
|
40 |
-
--resolution=256 --random_flip \
|
41 |
-
--train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \
|
42 |
-
--max_train_steps=15000 \
|
43 |
-
--checkpointing_steps=5000 --checkpoints_total_limit=1 \
|
44 |
-
--learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \
|
45 |
-
--conditioning_dropout_prob=0.05 \
|
46 |
-
--seed=42
|
47 |
-
```
|
48 |
-
|
49 |
-
Additionally, we support performing validation inference to monitor training progress
|
50 |
-
with Weights and Biases. You can enable this feature with `report_to="wandb"`:
|
51 |
-
|
52 |
-
```bash
|
53 |
-
python train_instruct_pix2pix_xl.py \
|
54 |
-
--pretrained_model_name_or_path=stabilityai/stable-diffusion-xl-base-1.0 \
|
55 |
-
--dataset_name=$DATASET_ID \
|
56 |
-
--use_ema \
|
57 |
-
--enable_xformers_memory_efficient_attention \
|
58 |
-
--resolution=512 --random_flip \
|
59 |
-
--train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \
|
60 |
-
--max_train_steps=15000 \
|
61 |
-
--checkpointing_steps=5000 --checkpoints_total_limit=1 \
|
62 |
-
--learning_rate=5e-05 --lr_warmup_steps=0 \
|
63 |
-
--conditioning_dropout_prob=0.05 \
|
64 |
-
--seed=42 \
|
65 |
-
--val_image_url_or_path="https://datasets-server.huggingface.co/assets/fusing/instructpix2pix-1000-samples/--/fusing--instructpix2pix-1000-samples/train/23/input_image/image.jpg" \
|
66 |
-
--validation_prompt="make it in japan" \
|
67 |
-
--report_to=wandb
|
68 |
-
```
|
69 |
-
|
70 |
-
We recommend this type of validation as it can be useful for model debugging. Note that you need `wandb` installed to use this. You can install `wandb` by running `pip install wandb`.
|
71 |
-
|
72 |
-
[Here](https://wandb.ai/sayakpaul/instruct-pix2pix/runs/ctr3kovq), you can find an example training run that includes some validation samples and the training hyperparameters.
|
73 |
-
|
74 |
-
***Note: In the original paper, the authors observed that even when the model is trained with an image resolution of 256x256, it generalizes well to bigger resolutions such as 512x512. This is likely because of the larger dataset they used during training.***
|
75 |
-
|
76 |
-
## Training with multiple GPUs
|
77 |
-
|
78 |
-
`accelerate` allows for seamless multi-GPU training. Follow the instructions [here](https://huggingface.co/docs/accelerate/basic_tutorials/launch)
|
79 |
-
for running distributed training with `accelerate`. Here is an example command:
|
80 |
-
|
81 |
-
```bash
|
82 |
-
accelerate launch --mixed_precision="fp16" --multi_gpu train_instruct_pix2pix.py \
|
83 |
-
--pretrained_model_name_or_path=stabilityai/stable-diffusion-xl-base-1.0 \
|
84 |
-
--dataset_name=$DATASET_ID \
|
85 |
-
--use_ema \
|
86 |
-
--enable_xformers_memory_efficient_attention \
|
87 |
-
--resolution=512 --random_flip \
|
88 |
-
--train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \
|
89 |
-
--max_train_steps=15000 \
|
90 |
-
--checkpointing_steps=5000 --checkpoints_total_limit=1 \
|
91 |
-
--learning_rate=5e-05 --lr_warmup_steps=0 \
|
92 |
-
--conditioning_dropout_prob=0.05 \
|
93 |
-
--seed=42 \
|
94 |
-
--val_image_url_or_path="https://datasets-server.huggingface.co/assets/fusing/instructpix2pix-1000-samples/--/fusing--instructpix2pix-1000-samples/train/23/input_image/image.jpg" \
|
95 |
-
--validation_prompt="make it in japan" \
|
96 |
-
--report_to=wandb
|
97 |
-
```
|
98 |
-
|
99 |
-
## Inference
|
100 |
-
|
101 |
-
Once training is complete, we can perform inference:
|
102 |
-
|
103 |
-
```python
|
104 |
-
import PIL
|
105 |
-
import requests
|
106 |
-
import torch
|
107 |
-
from diffusers import StableDiffusionXLInstructPix2PixPipeline
|
108 |
-
|
109 |
-
model_id = "your_model_id" # <- replace this
|
110 |
-
pipe = StableDiffusionXLInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
|
111 |
-
generator = torch.Generator("cuda").manual_seed(0)
|
112 |
-
|
113 |
-
url = "https://datasets-server.huggingface.co/assets/fusing/instructpix2pix-1000-samples/--/fusing--instructpix2pix-1000-samples/train/23/input_image/image.jpg"
|
114 |
-
|
115 |
-
|
116 |
-
def download_image(url):
|
117 |
-
image = PIL.Image.open(requests.get(url, stream=True).raw)
|
118 |
-
image = PIL.ImageOps.exif_transpose(image)
|
119 |
-
image = image.convert("RGB")
|
120 |
-
return image
|
121 |
-
|
122 |
-
image = download_image(url)
|
123 |
-
prompt = "make it Japan"
|
124 |
-
num_inference_steps = 20
|
125 |
-
image_guidance_scale = 1.5
|
126 |
-
guidance_scale = 10
|
127 |
-
|
128 |
-
edited_image = pipe(prompt,
|
129 |
-
image=image,
|
130 |
-
num_inference_steps=num_inference_steps,
|
131 |
-
image_guidance_scale=image_guidance_scale,
|
132 |
-
guidance_scale=guidance_scale,
|
133 |
-
generator=generator,
|
134 |
-
).images[0]
|
135 |
-
edited_image.save("edited_image.png")
|
136 |
-
```
|
137 |
-
|
138 |
-
We encourage you to play with the following three parameters to control
|
139 |
-
speed and quality during performance:
|
140 |
-
|
141 |
-
* `num_inference_steps`
|
142 |
-
* `image_guidance_scale`
|
143 |
-
* `guidance_scale`
|
144 |
-
|
145 |
-
Particularly, `image_guidance_scale` and `guidance_scale` can have a profound impact
|
146 |
-
on the generated ("edited") image (see [here](https://twitter.com/RisingSayak/status/1628392199196151808?s=20) for an example).
|
147 |
-
|
148 |
-
If you're looking for some interesting ways to use the InstructPix2Pix training methodology, we welcome you to check out this blog post: [Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/testing_utils.py
DELETED
@@ -1,684 +0,0 @@
|
|
1 |
-
import inspect
|
2 |
-
import io
|
3 |
-
import logging
|
4 |
-
import multiprocessing
|
5 |
-
import os
|
6 |
-
import random
|
7 |
-
import re
|
8 |
-
import struct
|
9 |
-
import tempfile
|
10 |
-
import unittest
|
11 |
-
import urllib.parse
|
12 |
-
from contextlib import contextmanager
|
13 |
-
from distutils.util import strtobool
|
14 |
-
from io import BytesIO, StringIO
|
15 |
-
from pathlib import Path
|
16 |
-
from typing import List, Optional, Union
|
17 |
-
|
18 |
-
import numpy as np
|
19 |
-
import PIL.Image
|
20 |
-
import PIL.ImageOps
|
21 |
-
import requests
|
22 |
-
from packaging import version
|
23 |
-
|
24 |
-
from .import_utils import (
|
25 |
-
BACKENDS_MAPPING,
|
26 |
-
is_compel_available,
|
27 |
-
is_flax_available,
|
28 |
-
is_note_seq_available,
|
29 |
-
is_onnx_available,
|
30 |
-
is_opencv_available,
|
31 |
-
is_torch_available,
|
32 |
-
is_torch_version,
|
33 |
-
is_torchsde_available,
|
34 |
-
)
|
35 |
-
from .logging import get_logger
|
36 |
-
|
37 |
-
|
38 |
-
global_rng = random.Random()
|
39 |
-
|
40 |
-
logger = get_logger(__name__)
|
41 |
-
|
42 |
-
if is_torch_available():
|
43 |
-
import torch
|
44 |
-
|
45 |
-
if "DIFFUSERS_TEST_DEVICE" in os.environ:
|
46 |
-
torch_device = os.environ["DIFFUSERS_TEST_DEVICE"]
|
47 |
-
|
48 |
-
available_backends = ["cuda", "cpu", "mps"]
|
49 |
-
if torch_device not in available_backends:
|
50 |
-
raise ValueError(
|
51 |
-
f"unknown torch backend for diffusers tests: {torch_device}. Available backends are:"
|
52 |
-
f" {available_backends}"
|
53 |
-
)
|
54 |
-
logger.info(f"torch_device overrode to {torch_device}")
|
55 |
-
else:
|
56 |
-
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
|
57 |
-
is_torch_higher_equal_than_1_12 = version.parse(
|
58 |
-
version.parse(torch.__version__).base_version
|
59 |
-
) >= version.parse("1.12")
|
60 |
-
|
61 |
-
if is_torch_higher_equal_than_1_12:
|
62 |
-
# Some builds of torch 1.12 don't have the mps backend registered. See #892 for more details
|
63 |
-
mps_backend_registered = hasattr(torch.backends, "mps")
|
64 |
-
torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device
|
65 |
-
|
66 |
-
|
67 |
-
def torch_all_close(a, b, *args, **kwargs):
|
68 |
-
if not is_torch_available():
|
69 |
-
raise ValueError("PyTorch needs to be installed to use this function.")
|
70 |
-
if not torch.allclose(a, b, *args, **kwargs):
|
71 |
-
assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}."
|
72 |
-
return True
|
73 |
-
|
74 |
-
|
75 |
-
def print_tensor_test(tensor, filename="test_corrections.txt", expected_tensor_name="expected_slice"):
|
76 |
-
test_name = os.environ.get("PYTEST_CURRENT_TEST")
|
77 |
-
if not torch.is_tensor(tensor):
|
78 |
-
tensor = torch.from_numpy(tensor)
|
79 |
-
|
80 |
-
tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "")
|
81 |
-
# format is usually:
|
82 |
-
# expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161])
|
83 |
-
output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array")
|
84 |
-
test_file, test_class, test_fn = test_name.split("::")
|
85 |
-
test_fn = test_fn.split()[0]
|
86 |
-
with open(filename, "a") as f:
|
87 |
-
print(";".join([test_file, test_class, test_fn, output_str]), file=f)
|
88 |
-
|
89 |
-
|
90 |
-
def get_tests_dir(append_path=None):
|
91 |
-
"""
|
92 |
-
Args:
|
93 |
-
append_path: optional path to append to the tests dir path
|
94 |
-
Return:
|
95 |
-
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
|
96 |
-
joined after the `tests` dir the former is provided.
|
97 |
-
"""
|
98 |
-
# this function caller's __file__
|
99 |
-
caller__file__ = inspect.stack()[1][1]
|
100 |
-
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
|
101 |
-
|
102 |
-
while not tests_dir.endswith("tests"):
|
103 |
-
tests_dir = os.path.dirname(tests_dir)
|
104 |
-
|
105 |
-
if append_path:
|
106 |
-
return os.path.join(tests_dir, append_path)
|
107 |
-
else:
|
108 |
-
return tests_dir
|
109 |
-
|
110 |
-
|
111 |
-
def parse_flag_from_env(key, default=False):
|
112 |
-
try:
|
113 |
-
value = os.environ[key]
|
114 |
-
except KeyError:
|
115 |
-
# KEY isn't set, default to `default`.
|
116 |
-
_value = default
|
117 |
-
else:
|
118 |
-
# KEY is set, convert it to True or False.
|
119 |
-
try:
|
120 |
-
_value = strtobool(value)
|
121 |
-
except ValueError:
|
122 |
-
# More values are supported, but let's keep the message simple.
|
123 |
-
raise ValueError(f"If set, {key} must be yes or no.")
|
124 |
-
return _value
|
125 |
-
|
126 |
-
|
127 |
-
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
|
128 |
-
_run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False)
|
129 |
-
|
130 |
-
|
131 |
-
def floats_tensor(shape, scale=1.0, rng=None, name=None):
|
132 |
-
"""Creates a random float32 tensor"""
|
133 |
-
if rng is None:
|
134 |
-
rng = global_rng
|
135 |
-
|
136 |
-
total_dims = 1
|
137 |
-
for dim in shape:
|
138 |
-
total_dims *= dim
|
139 |
-
|
140 |
-
values = []
|
141 |
-
for _ in range(total_dims):
|
142 |
-
values.append(rng.random() * scale)
|
143 |
-
|
144 |
-
return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous()
|
145 |
-
|
146 |
-
|
147 |
-
def slow(test_case):
|
148 |
-
"""
|
149 |
-
Decorator marking a test as slow.
|
150 |
-
|
151 |
-
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
|
152 |
-
|
153 |
-
"""
|
154 |
-
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
|
155 |
-
|
156 |
-
|
157 |
-
def nightly(test_case):
|
158 |
-
"""
|
159 |
-
Decorator marking a test that runs nightly in the diffusers CI.
|
160 |
-
|
161 |
-
Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them.
|
162 |
-
|
163 |
-
"""
|
164 |
-
return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case)
|
165 |
-
|
166 |
-
|
167 |
-
def require_torch(test_case):
|
168 |
-
"""
|
169 |
-
Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed.
|
170 |
-
"""
|
171 |
-
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
|
172 |
-
|
173 |
-
|
174 |
-
def require_torch_2(test_case):
|
175 |
-
"""
|
176 |
-
Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed.
|
177 |
-
"""
|
178 |
-
return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")(
|
179 |
-
test_case
|
180 |
-
)
|
181 |
-
|
182 |
-
|
183 |
-
def require_torch_gpu(test_case):
|
184 |
-
"""Decorator marking a test that requires CUDA and PyTorch."""
|
185 |
-
return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")(
|
186 |
-
test_case
|
187 |
-
)
|
188 |
-
|
189 |
-
|
190 |
-
def skip_mps(test_case):
|
191 |
-
"""Decorator marking a test to skip if torch_device is 'mps'"""
|
192 |
-
return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case)
|
193 |
-
|
194 |
-
|
195 |
-
def require_flax(test_case):
|
196 |
-
"""
|
197 |
-
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
|
198 |
-
"""
|
199 |
-
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
|
200 |
-
|
201 |
-
|
202 |
-
def require_compel(test_case):
|
203 |
-
"""
|
204 |
-
Decorator marking a test that requires compel: https://github.com/damian0815/compel. These tests are skipped when
|
205 |
-
the library is not installed.
|
206 |
-
"""
|
207 |
-
return unittest.skipUnless(is_compel_available(), "test requires compel")(test_case)
|
208 |
-
|
209 |
-
|
210 |
-
def require_onnxruntime(test_case):
|
211 |
-
"""
|
212 |
-
Decorator marking a test that requires onnxruntime. These tests are skipped when onnxruntime isn't installed.
|
213 |
-
"""
|
214 |
-
return unittest.skipUnless(is_onnx_available(), "test requires onnxruntime")(test_case)
|
215 |
-
|
216 |
-
|
217 |
-
def require_note_seq(test_case):
|
218 |
-
"""
|
219 |
-
Decorator marking a test that requires note_seq. These tests are skipped when note_seq isn't installed.
|
220 |
-
"""
|
221 |
-
return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case)
|
222 |
-
|
223 |
-
|
224 |
-
def require_torchsde(test_case):
|
225 |
-
"""
|
226 |
-
Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed.
|
227 |
-
"""
|
228 |
-
return unittest.skipUnless(is_torchsde_available(), "test requires torchsde")(test_case)
|
229 |
-
|
230 |
-
|
231 |
-
def load_numpy(arry: Union[str, np.ndarray], local_path: Optional[str] = None) -> np.ndarray:
|
232 |
-
if isinstance(arry, str):
|
233 |
-
# local_path = "/home/patrick_huggingface_co/"
|
234 |
-
if local_path is not None:
|
235 |
-
# local_path can be passed to correct images of tests
|
236 |
-
return os.path.join(local_path, "/".join([arry.split("/")[-5], arry.split("/")[-2], arry.split("/")[-1]]))
|
237 |
-
elif arry.startswith("http://") or arry.startswith("https://"):
|
238 |
-
response = requests.get(arry)
|
239 |
-
response.raise_for_status()
|
240 |
-
arry = np.load(BytesIO(response.content))
|
241 |
-
elif os.path.isfile(arry):
|
242 |
-
arry = np.load(arry)
|
243 |
-
else:
|
244 |
-
raise ValueError(
|
245 |
-
f"Incorrect path or url, URLs must start with `http://` or `https://`, and {arry} is not a valid path"
|
246 |
-
)
|
247 |
-
elif isinstance(arry, np.ndarray):
|
248 |
-
pass
|
249 |
-
else:
|
250 |
-
raise ValueError(
|
251 |
-
"Incorrect format used for numpy ndarray. Should be an url linking to an image, a local path, or a"
|
252 |
-
" ndarray."
|
253 |
-
)
|
254 |
-
|
255 |
-
return arry
|
256 |
-
|
257 |
-
|
258 |
-
def load_pt(url: str):
|
259 |
-
response = requests.get(url)
|
260 |
-
response.raise_for_status()
|
261 |
-
arry = torch.load(BytesIO(response.content))
|
262 |
-
return arry
|
263 |
-
|
264 |
-
|
265 |
-
def load_image(image: Union[str, PIL.Image.Image]) -> PIL.Image.Image:
|
266 |
-
"""
|
267 |
-
Loads `image` to a PIL Image.
|
268 |
-
|
269 |
-
Args:
|
270 |
-
image (`str` or `PIL.Image.Image`):
|
271 |
-
The image to convert to the PIL Image format.
|
272 |
-
Returns:
|
273 |
-
`PIL.Image.Image`:
|
274 |
-
A PIL Image.
|
275 |
-
"""
|
276 |
-
if isinstance(image, str):
|
277 |
-
if image.startswith("http://") or image.startswith("https://"):
|
278 |
-
image = PIL.Image.open(requests.get(image, stream=True).raw)
|
279 |
-
elif os.path.isfile(image):
|
280 |
-
image = PIL.Image.open(image)
|
281 |
-
else:
|
282 |
-
raise ValueError(
|
283 |
-
f"Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path"
|
284 |
-
)
|
285 |
-
elif isinstance(image, PIL.Image.Image):
|
286 |
-
image = image
|
287 |
-
else:
|
288 |
-
raise ValueError(
|
289 |
-
"Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image."
|
290 |
-
)
|
291 |
-
image = PIL.ImageOps.exif_transpose(image)
|
292 |
-
image = image.convert("RGB")
|
293 |
-
return image
|
294 |
-
|
295 |
-
|
296 |
-
def preprocess_image(image: PIL.Image, batch_size: int):
|
297 |
-
w, h = image.size
|
298 |
-
w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
|
299 |
-
image = image.resize((w, h), resample=PIL.Image.LANCZOS)
|
300 |
-
image = np.array(image).astype(np.float32) / 255.0
|
301 |
-
image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size)
|
302 |
-
image = torch.from_numpy(image)
|
303 |
-
return 2.0 * image - 1.0
|
304 |
-
|
305 |
-
|
306 |
-
def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None) -> str:
|
307 |
-
if output_gif_path is None:
|
308 |
-
output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name
|
309 |
-
|
310 |
-
image[0].save(
|
311 |
-
output_gif_path,
|
312 |
-
save_all=True,
|
313 |
-
append_images=image[1:],
|
314 |
-
optimize=False,
|
315 |
-
duration=100,
|
316 |
-
loop=0,
|
317 |
-
)
|
318 |
-
return output_gif_path
|
319 |
-
|
320 |
-
|
321 |
-
@contextmanager
|
322 |
-
def buffered_writer(raw_f):
|
323 |
-
f = io.BufferedWriter(raw_f)
|
324 |
-
yield f
|
325 |
-
f.flush()
|
326 |
-
|
327 |
-
|
328 |
-
def export_to_ply(mesh, output_ply_path: str = None):
|
329 |
-
"""
|
330 |
-
Write a PLY file for a mesh.
|
331 |
-
"""
|
332 |
-
if output_ply_path is None:
|
333 |
-
output_ply_path = tempfile.NamedTemporaryFile(suffix=".ply").name
|
334 |
-
|
335 |
-
coords = mesh.verts.detach().cpu().numpy()
|
336 |
-
faces = mesh.faces.cpu().numpy()
|
337 |
-
rgb = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1)
|
338 |
-
|
339 |
-
with buffered_writer(open(output_ply_path, "wb")) as f:
|
340 |
-
f.write(b"ply\n")
|
341 |
-
f.write(b"format binary_little_endian 1.0\n")
|
342 |
-
f.write(bytes(f"element vertex {len(coords)}\n", "ascii"))
|
343 |
-
f.write(b"property float x\n")
|
344 |
-
f.write(b"property float y\n")
|
345 |
-
f.write(b"property float z\n")
|
346 |
-
if rgb is not None:
|
347 |
-
f.write(b"property uchar red\n")
|
348 |
-
f.write(b"property uchar green\n")
|
349 |
-
f.write(b"property uchar blue\n")
|
350 |
-
if faces is not None:
|
351 |
-
f.write(bytes(f"element face {len(faces)}\n", "ascii"))
|
352 |
-
f.write(b"property list uchar int vertex_index\n")
|
353 |
-
f.write(b"end_header\n")
|
354 |
-
|
355 |
-
if rgb is not None:
|
356 |
-
rgb = (rgb * 255.499).round().astype(int)
|
357 |
-
vertices = [
|
358 |
-
(*coord, *rgb)
|
359 |
-
for coord, rgb in zip(
|
360 |
-
coords.tolist(),
|
361 |
-
rgb.tolist(),
|
362 |
-
)
|
363 |
-
]
|
364 |
-
format = struct.Struct("<3f3B")
|
365 |
-
for item in vertices:
|
366 |
-
f.write(format.pack(*item))
|
367 |
-
else:
|
368 |
-
format = struct.Struct("<3f")
|
369 |
-
for vertex in coords.tolist():
|
370 |
-
f.write(format.pack(*vertex))
|
371 |
-
|
372 |
-
if faces is not None:
|
373 |
-
format = struct.Struct("<B3I")
|
374 |
-
for tri in faces.tolist():
|
375 |
-
f.write(format.pack(len(tri), *tri))
|
376 |
-
|
377 |
-
return output_ply_path
|
378 |
-
|
379 |
-
|
380 |
-
def export_to_obj(mesh, output_obj_path: str = None):
|
381 |
-
if output_obj_path is None:
|
382 |
-
output_obj_path = tempfile.NamedTemporaryFile(suffix=".obj").name
|
383 |
-
|
384 |
-
verts = mesh.verts.detach().cpu().numpy()
|
385 |
-
faces = mesh.faces.cpu().numpy()
|
386 |
-
|
387 |
-
vertex_colors = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1)
|
388 |
-
vertices = [
|
389 |
-
"{} {} {} {} {} {}".format(*coord, *color) for coord, color in zip(verts.tolist(), vertex_colors.tolist())
|
390 |
-
]
|
391 |
-
|
392 |
-
faces = ["f {} {} {}".format(str(tri[0] + 1), str(tri[1] + 1), str(tri[2] + 1)) for tri in faces.tolist()]
|
393 |
-
|
394 |
-
combined_data = ["v " + vertex for vertex in vertices] + faces
|
395 |
-
|
396 |
-
with open(output_obj_path, "w") as f:
|
397 |
-
f.writelines("\n".join(combined_data))
|
398 |
-
|
399 |
-
|
400 |
-
def export_to_video(video_frames: List[np.ndarray], output_video_path: str = None) -> str:
|
401 |
-
if is_opencv_available():
|
402 |
-
import cv2
|
403 |
-
else:
|
404 |
-
raise ImportError(BACKENDS_MAPPING["opencv"][1].format("export_to_video"))
|
405 |
-
if output_video_path is None:
|
406 |
-
output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name
|
407 |
-
|
408 |
-
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
409 |
-
h, w, c = video_frames[0].shape
|
410 |
-
video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=8, frameSize=(w, h))
|
411 |
-
for i in range(len(video_frames)):
|
412 |
-
img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR)
|
413 |
-
video_writer.write(img)
|
414 |
-
return output_video_path
|
415 |
-
|
416 |
-
|
417 |
-
def load_hf_numpy(path) -> np.ndarray:
|
418 |
-
if not path.startswith("http://") or path.startswith("https://"):
|
419 |
-
path = os.path.join(
|
420 |
-
"https://huggingface.co/datasets/fusing/diffusers-testing/resolve/main", urllib.parse.quote(path)
|
421 |
-
)
|
422 |
-
|
423 |
-
return load_numpy(path)
|
424 |
-
|
425 |
-
|
426 |
-
# --- pytest conf functions --- #
|
427 |
-
|
428 |
-
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
|
429 |
-
pytest_opt_registered = {}
|
430 |
-
|
431 |
-
|
432 |
-
def pytest_addoption_shared(parser):
|
433 |
-
"""
|
434 |
-
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
|
435 |
-
|
436 |
-
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
|
437 |
-
option.
|
438 |
-
|
439 |
-
"""
|
440 |
-
option = "--make-reports"
|
441 |
-
if option not in pytest_opt_registered:
|
442 |
-
parser.addoption(
|
443 |
-
option,
|
444 |
-
action="store",
|
445 |
-
default=False,
|
446 |
-
help="generate report files. The value of this option is used as a prefix to report names",
|
447 |
-
)
|
448 |
-
pytest_opt_registered[option] = 1
|
449 |
-
|
450 |
-
|
451 |
-
def pytest_terminal_summary_main(tr, id):
|
452 |
-
"""
|
453 |
-
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
|
454 |
-
directory. The report files are prefixed with the test suite name.
|
455 |
-
|
456 |
-
This function emulates --duration and -rA pytest arguments.
|
457 |
-
|
458 |
-
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
|
459 |
-
there.
|
460 |
-
|
461 |
-
Args:
|
462 |
-
- tr: `terminalreporter` passed from `conftest.py`
|
463 |
-
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
|
464 |
-
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
|
465 |
-
|
466 |
-
NB: this functions taps into a private _pytest API and while unlikely, it could break should
|
467 |
-
pytest do internal changes - also it calls default internal methods of terminalreporter which
|
468 |
-
can be hijacked by various `pytest-` plugins and interfere.
|
469 |
-
|
470 |
-
"""
|
471 |
-
from _pytest.config import create_terminal_writer
|
472 |
-
|
473 |
-
if not len(id):
|
474 |
-
id = "tests"
|
475 |
-
|
476 |
-
config = tr.config
|
477 |
-
orig_writer = config.get_terminal_writer()
|
478 |
-
orig_tbstyle = config.option.tbstyle
|
479 |
-
orig_reportchars = tr.reportchars
|
480 |
-
|
481 |
-
dir = "reports"
|
482 |
-
Path(dir).mkdir(parents=True, exist_ok=True)
|
483 |
-
report_files = {
|
484 |
-
k: f"{dir}/{id}_{k}.txt"
|
485 |
-
for k in [
|
486 |
-
"durations",
|
487 |
-
"errors",
|
488 |
-
"failures_long",
|
489 |
-
"failures_short",
|
490 |
-
"failures_line",
|
491 |
-
"passes",
|
492 |
-
"stats",
|
493 |
-
"summary_short",
|
494 |
-
"warnings",
|
495 |
-
]
|
496 |
-
}
|
497 |
-
|
498 |
-
# custom durations report
|
499 |
-
# note: there is no need to call pytest --durations=XX to get this separate report
|
500 |
-
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
|
501 |
-
dlist = []
|
502 |
-
for replist in tr.stats.values():
|
503 |
-
for rep in replist:
|
504 |
-
if hasattr(rep, "duration"):
|
505 |
-
dlist.append(rep)
|
506 |
-
if dlist:
|
507 |
-
dlist.sort(key=lambda x: x.duration, reverse=True)
|
508 |
-
with open(report_files["durations"], "w") as f:
|
509 |
-
durations_min = 0.05 # sec
|
510 |
-
f.write("slowest durations\n")
|
511 |
-
for i, rep in enumerate(dlist):
|
512 |
-
if rep.duration < durations_min:
|
513 |
-
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
|
514 |
-
break
|
515 |
-
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
|
516 |
-
|
517 |
-
def summary_failures_short(tr):
|
518 |
-
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
|
519 |
-
reports = tr.getreports("failed")
|
520 |
-
if not reports:
|
521 |
-
return
|
522 |
-
tr.write_sep("=", "FAILURES SHORT STACK")
|
523 |
-
for rep in reports:
|
524 |
-
msg = tr._getfailureheadline(rep)
|
525 |
-
tr.write_sep("_", msg, red=True, bold=True)
|
526 |
-
# chop off the optional leading extra frames, leaving only the last one
|
527 |
-
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
|
528 |
-
tr._tw.line(longrepr)
|
529 |
-
# note: not printing out any rep.sections to keep the report short
|
530 |
-
|
531 |
-
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
|
532 |
-
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
|
533 |
-
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
|
534 |
-
# pytest-instafail does that)
|
535 |
-
|
536 |
-
# report failures with line/short/long styles
|
537 |
-
config.option.tbstyle = "auto" # full tb
|
538 |
-
with open(report_files["failures_long"], "w") as f:
|
539 |
-
tr._tw = create_terminal_writer(config, f)
|
540 |
-
tr.summary_failures()
|
541 |
-
|
542 |
-
# config.option.tbstyle = "short" # short tb
|
543 |
-
with open(report_files["failures_short"], "w") as f:
|
544 |
-
tr._tw = create_terminal_writer(config, f)
|
545 |
-
summary_failures_short(tr)
|
546 |
-
|
547 |
-
config.option.tbstyle = "line" # one line per error
|
548 |
-
with open(report_files["failures_line"], "w") as f:
|
549 |
-
tr._tw = create_terminal_writer(config, f)
|
550 |
-
tr.summary_failures()
|
551 |
-
|
552 |
-
with open(report_files["errors"], "w") as f:
|
553 |
-
tr._tw = create_terminal_writer(config, f)
|
554 |
-
tr.summary_errors()
|
555 |
-
|
556 |
-
with open(report_files["warnings"], "w") as f:
|
557 |
-
tr._tw = create_terminal_writer(config, f)
|
558 |
-
tr.summary_warnings() # normal warnings
|
559 |
-
tr.summary_warnings() # final warnings
|
560 |
-
|
561 |
-
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
|
562 |
-
with open(report_files["passes"], "w") as f:
|
563 |
-
tr._tw = create_terminal_writer(config, f)
|
564 |
-
tr.summary_passes()
|
565 |
-
|
566 |
-
with open(report_files["summary_short"], "w") as f:
|
567 |
-
tr._tw = create_terminal_writer(config, f)
|
568 |
-
tr.short_test_summary()
|
569 |
-
|
570 |
-
with open(report_files["stats"], "w") as f:
|
571 |
-
tr._tw = create_terminal_writer(config, f)
|
572 |
-
tr.summary_stats()
|
573 |
-
|
574 |
-
# restore:
|
575 |
-
tr._tw = orig_writer
|
576 |
-
tr.reportchars = orig_reportchars
|
577 |
-
config.option.tbstyle = orig_tbstyle
|
578 |
-
|
579 |
-
|
580 |
-
# Taken from: https://github.com/huggingface/transformers/blob/3658488ff77ff8d45101293e749263acf437f4d5/src/transformers/testing_utils.py#L1787
|
581 |
-
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
|
582 |
-
"""
|
583 |
-
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
|
584 |
-
|
585 |
-
Args:
|
586 |
-
test_case (`unittest.TestCase`):
|
587 |
-
The test that will run `target_func`.
|
588 |
-
target_func (`Callable`):
|
589 |
-
The function implementing the actual testing logic.
|
590 |
-
inputs (`dict`, *optional*, defaults to `None`):
|
591 |
-
The inputs that will be passed to `target_func` through an (input) queue.
|
592 |
-
timeout (`int`, *optional*, defaults to `None`):
|
593 |
-
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
|
594 |
-
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
|
595 |
-
"""
|
596 |
-
if timeout is None:
|
597 |
-
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
|
598 |
-
|
599 |
-
start_methohd = "spawn"
|
600 |
-
ctx = multiprocessing.get_context(start_methohd)
|
601 |
-
|
602 |
-
input_queue = ctx.Queue(1)
|
603 |
-
output_queue = ctx.JoinableQueue(1)
|
604 |
-
|
605 |
-
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
|
606 |
-
input_queue.put(inputs, timeout=timeout)
|
607 |
-
|
608 |
-
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
|
609 |
-
process.start()
|
610 |
-
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
|
611 |
-
# the test to exit properly.
|
612 |
-
try:
|
613 |
-
results = output_queue.get(timeout=timeout)
|
614 |
-
output_queue.task_done()
|
615 |
-
except Exception as e:
|
616 |
-
process.terminate()
|
617 |
-
test_case.fail(e)
|
618 |
-
process.join(timeout=timeout)
|
619 |
-
|
620 |
-
if results["error"] is not None:
|
621 |
-
test_case.fail(f'{results["error"]}')
|
622 |
-
|
623 |
-
|
624 |
-
class CaptureLogger:
|
625 |
-
"""
|
626 |
-
Args:
|
627 |
-
Context manager to capture `logging` streams
|
628 |
-
logger: 'logging` logger object
|
629 |
-
Returns:
|
630 |
-
The captured output is available via `self.out`
|
631 |
-
Example:
|
632 |
-
```python
|
633 |
-
>>> from diffusers import logging
|
634 |
-
>>> from diffusers.testing_utils import CaptureLogger
|
635 |
-
|
636 |
-
>>> msg = "Testing 1, 2, 3"
|
637 |
-
>>> logging.set_verbosity_info()
|
638 |
-
>>> logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.py")
|
639 |
-
>>> with CaptureLogger(logger) as cl:
|
640 |
-
... logger.info(msg)
|
641 |
-
>>> assert cl.out, msg + "\n"
|
642 |
-
```
|
643 |
-
"""
|
644 |
-
|
645 |
-
def __init__(self, logger):
|
646 |
-
self.logger = logger
|
647 |
-
self.io = StringIO()
|
648 |
-
self.sh = logging.StreamHandler(self.io)
|
649 |
-
self.out = ""
|
650 |
-
|
651 |
-
def __enter__(self):
|
652 |
-
self.logger.addHandler(self.sh)
|
653 |
-
return self
|
654 |
-
|
655 |
-
def __exit__(self, *exc):
|
656 |
-
self.logger.removeHandler(self.sh)
|
657 |
-
self.out = self.io.getvalue()
|
658 |
-
|
659 |
-
def __repr__(self):
|
660 |
-
return f"captured: {self.out}\n"
|
661 |
-
|
662 |
-
|
663 |
-
def enable_full_determinism():
|
664 |
-
"""
|
665 |
-
Helper function for reproducible behavior during distributed training. See
|
666 |
-
- https://pytorch.org/docs/stable/notes/randomness.html for pytorch
|
667 |
-
"""
|
668 |
-
# Enable PyTorch deterministic mode. This potentially requires either the environment
|
669 |
-
# variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set,
|
670 |
-
# depending on the CUDA version, so we set them both here
|
671 |
-
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
|
672 |
-
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
|
673 |
-
torch.use_deterministic_algorithms(True)
|
674 |
-
|
675 |
-
# Enable CUDNN deterministic mode
|
676 |
-
torch.backends.cudnn.deterministic = True
|
677 |
-
torch.backends.cudnn.benchmark = False
|
678 |
-
torch.backends.cuda.matmul.allow_tf32 = False
|
679 |
-
|
680 |
-
|
681 |
-
def disable_full_determinism():
|
682 |
-
os.environ["CUDA_LAUNCH_BLOCKING"] = "0"
|
683 |
-
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ""
|
684 |
-
torch.use_deterministic_algorithms(False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/sparse_rcnn/README.md
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
# Sparse R-CNN: End-to-End Object Detection with Learnable Proposals
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
[ALGORITHM]
|
6 |
-
|
7 |
-
```
|
8 |
-
@article{peize2020sparse,
|
9 |
-
title = {{SparseR-CNN}: End-to-End Object Detection with Learnable Proposals},
|
10 |
-
author = {Peize Sun and Rufeng Zhang and Yi Jiang and Tao Kong and Chenfeng Xu and Wei Zhan and Masayoshi Tomizuka and Lei Li and Zehuan Yuan and Changhu Wang and Ping Luo},
|
11 |
-
journal = {arXiv preprint arXiv:2011.12450},
|
12 |
-
year = {2020}
|
13 |
-
}
|
14 |
-
```
|
15 |
-
|
16 |
-
## Results and Models
|
17 |
-
|
18 |
-
| Model | Backbone | Style | Lr schd | Number of Proposals |Multi-Scale| RandomCrop | box AP | Config | Download |
|
19 |
-
|:------------:|:---------:|:-------:|:-------:|:-------: |:-------: |:---------:|:------:|:------:|:--------:|
|
20 |
-
| Sparse R-CNN | R-50-FPN | pytorch | 1x | 100 | False | False | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.log.json) |
|
21 |
-
| Sparse R-CNN | R-50-FPN | pytorch | 3x | 100 | True | False | 42.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.log.json) |
|
22 |
-
| Sparse R-CNN | R-50-FPN | pytorch | 3x | 300 | True | True | 45.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.log.json) |
|
23 |
-
| Sparse R-CNN | R-101-FPN | pytorch | 3x | 100 | True | False | 44.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.log.json) |
|
24 |
-
| Sparse R-CNN | R-101-FPN | pytorch | 3x | 300 | True | True | 46.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.log.json) |
|
25 |
-
|
26 |
-
### Notes
|
27 |
-
|
28 |
-
We observe about 0.3 AP noise especially when using ResNet-101 as the backbone.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/core/anchor/builder.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
from mmcv.utils import Registry, build_from_cfg
|
2 |
-
|
3 |
-
ANCHOR_GENERATORS = Registry('Anchor generator')
|
4 |
-
|
5 |
-
|
6 |
-
def build_anchor_generator(cfg, default_args=None):
|
7 |
-
return build_from_cfg(cfg, ANCHOR_GENERATORS, default_args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/reppoints_head.py
DELETED
@@ -1,763 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
import torch.nn as nn
|
4 |
-
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
|
5 |
-
from mmcv.ops import DeformConv2d
|
6 |
-
|
7 |
-
from mmdet.core import (PointGenerator, build_assigner, build_sampler,
|
8 |
-
images_to_levels, multi_apply, multiclass_nms, unmap)
|
9 |
-
from ..builder import HEADS, build_loss
|
10 |
-
from .anchor_free_head import AnchorFreeHead
|
11 |
-
|
12 |
-
|
13 |
-
@HEADS.register_module()
|
14 |
-
class RepPointsHead(AnchorFreeHead):
|
15 |
-
"""RepPoint head.
|
16 |
-
|
17 |
-
Args:
|
18 |
-
point_feat_channels (int): Number of channels of points features.
|
19 |
-
gradient_mul (float): The multiplier to gradients from
|
20 |
-
points refinement and recognition.
|
21 |
-
point_strides (Iterable): points strides.
|
22 |
-
point_base_scale (int): bbox scale for assigning labels.
|
23 |
-
loss_cls (dict): Config of classification loss.
|
24 |
-
loss_bbox_init (dict): Config of initial points loss.
|
25 |
-
loss_bbox_refine (dict): Config of points loss in refinement.
|
26 |
-
use_grid_points (bool): If we use bounding box representation, the
|
27 |
-
reppoints is represented as grid points on the bounding box.
|
28 |
-
center_init (bool): Whether to use center point assignment.
|
29 |
-
transform_method (str): The methods to transform RepPoints to bbox.
|
30 |
-
""" # noqa: W605
|
31 |
-
|
32 |
-
def __init__(self,
|
33 |
-
num_classes,
|
34 |
-
in_channels,
|
35 |
-
point_feat_channels=256,
|
36 |
-
num_points=9,
|
37 |
-
gradient_mul=0.1,
|
38 |
-
point_strides=[8, 16, 32, 64, 128],
|
39 |
-
point_base_scale=4,
|
40 |
-
loss_cls=dict(
|
41 |
-
type='FocalLoss',
|
42 |
-
use_sigmoid=True,
|
43 |
-
gamma=2.0,
|
44 |
-
alpha=0.25,
|
45 |
-
loss_weight=1.0),
|
46 |
-
loss_bbox_init=dict(
|
47 |
-
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
|
48 |
-
loss_bbox_refine=dict(
|
49 |
-
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
|
50 |
-
use_grid_points=False,
|
51 |
-
center_init=True,
|
52 |
-
transform_method='moment',
|
53 |
-
moment_mul=0.01,
|
54 |
-
**kwargs):
|
55 |
-
self.num_points = num_points
|
56 |
-
self.point_feat_channels = point_feat_channels
|
57 |
-
self.use_grid_points = use_grid_points
|
58 |
-
self.center_init = center_init
|
59 |
-
|
60 |
-
# we use deform conv to extract points features
|
61 |
-
self.dcn_kernel = int(np.sqrt(num_points))
|
62 |
-
self.dcn_pad = int((self.dcn_kernel - 1) / 2)
|
63 |
-
assert self.dcn_kernel * self.dcn_kernel == num_points, \
|
64 |
-
'The points number should be a square number.'
|
65 |
-
assert self.dcn_kernel % 2 == 1, \
|
66 |
-
'The points number should be an odd square number.'
|
67 |
-
dcn_base = np.arange(-self.dcn_pad,
|
68 |
-
self.dcn_pad + 1).astype(np.float64)
|
69 |
-
dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
|
70 |
-
dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
|
71 |
-
dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
|
72 |
-
(-1))
|
73 |
-
self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
|
74 |
-
|
75 |
-
super().__init__(num_classes, in_channels, loss_cls=loss_cls, **kwargs)
|
76 |
-
|
77 |
-
self.gradient_mul = gradient_mul
|
78 |
-
self.point_base_scale = point_base_scale
|
79 |
-
self.point_strides = point_strides
|
80 |
-
self.point_generators = [PointGenerator() for _ in self.point_strides]
|
81 |
-
|
82 |
-
self.sampling = loss_cls['type'] not in ['FocalLoss']
|
83 |
-
if self.train_cfg:
|
84 |
-
self.init_assigner = build_assigner(self.train_cfg.init.assigner)
|
85 |
-
self.refine_assigner = build_assigner(
|
86 |
-
self.train_cfg.refine.assigner)
|
87 |
-
# use PseudoSampler when sampling is False
|
88 |
-
if self.sampling and hasattr(self.train_cfg, 'sampler'):
|
89 |
-
sampler_cfg = self.train_cfg.sampler
|
90 |
-
else:
|
91 |
-
sampler_cfg = dict(type='PseudoSampler')
|
92 |
-
self.sampler = build_sampler(sampler_cfg, context=self)
|
93 |
-
self.transform_method = transform_method
|
94 |
-
if self.transform_method == 'moment':
|
95 |
-
self.moment_transfer = nn.Parameter(
|
96 |
-
data=torch.zeros(2), requires_grad=True)
|
97 |
-
self.moment_mul = moment_mul
|
98 |
-
|
99 |
-
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
|
100 |
-
if self.use_sigmoid_cls:
|
101 |
-
self.cls_out_channels = self.num_classes
|
102 |
-
else:
|
103 |
-
self.cls_out_channels = self.num_classes + 1
|
104 |
-
self.loss_bbox_init = build_loss(loss_bbox_init)
|
105 |
-
self.loss_bbox_refine = build_loss(loss_bbox_refine)
|
106 |
-
|
107 |
-
def _init_layers(self):
|
108 |
-
"""Initialize layers of the head."""
|
109 |
-
self.relu = nn.ReLU(inplace=True)
|
110 |
-
self.cls_convs = nn.ModuleList()
|
111 |
-
self.reg_convs = nn.ModuleList()
|
112 |
-
for i in range(self.stacked_convs):
|
113 |
-
chn = self.in_channels if i == 0 else self.feat_channels
|
114 |
-
self.cls_convs.append(
|
115 |
-
ConvModule(
|
116 |
-
chn,
|
117 |
-
self.feat_channels,
|
118 |
-
3,
|
119 |
-
stride=1,
|
120 |
-
padding=1,
|
121 |
-
conv_cfg=self.conv_cfg,
|
122 |
-
norm_cfg=self.norm_cfg))
|
123 |
-
self.reg_convs.append(
|
124 |
-
ConvModule(
|
125 |
-
chn,
|
126 |
-
self.feat_channels,
|
127 |
-
3,
|
128 |
-
stride=1,
|
129 |
-
padding=1,
|
130 |
-
conv_cfg=self.conv_cfg,
|
131 |
-
norm_cfg=self.norm_cfg))
|
132 |
-
pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points
|
133 |
-
self.reppoints_cls_conv = DeformConv2d(self.feat_channels,
|
134 |
-
self.point_feat_channels,
|
135 |
-
self.dcn_kernel, 1,
|
136 |
-
self.dcn_pad)
|
137 |
-
self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels,
|
138 |
-
self.cls_out_channels, 1, 1, 0)
|
139 |
-
self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels,
|
140 |
-
self.point_feat_channels, 3,
|
141 |
-
1, 1)
|
142 |
-
self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels,
|
143 |
-
pts_out_dim, 1, 1, 0)
|
144 |
-
self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels,
|
145 |
-
self.point_feat_channels,
|
146 |
-
self.dcn_kernel, 1,
|
147 |
-
self.dcn_pad)
|
148 |
-
self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels,
|
149 |
-
pts_out_dim, 1, 1, 0)
|
150 |
-
|
151 |
-
def init_weights(self):
|
152 |
-
"""Initialize weights of the head."""
|
153 |
-
for m in self.cls_convs:
|
154 |
-
normal_init(m.conv, std=0.01)
|
155 |
-
for m in self.reg_convs:
|
156 |
-
normal_init(m.conv, std=0.01)
|
157 |
-
bias_cls = bias_init_with_prob(0.01)
|
158 |
-
normal_init(self.reppoints_cls_conv, std=0.01)
|
159 |
-
normal_init(self.reppoints_cls_out, std=0.01, bias=bias_cls)
|
160 |
-
normal_init(self.reppoints_pts_init_conv, std=0.01)
|
161 |
-
normal_init(self.reppoints_pts_init_out, std=0.01)
|
162 |
-
normal_init(self.reppoints_pts_refine_conv, std=0.01)
|
163 |
-
normal_init(self.reppoints_pts_refine_out, std=0.01)
|
164 |
-
|
165 |
-
def points2bbox(self, pts, y_first=True):
|
166 |
-
"""Converting the points set into bounding box.
|
167 |
-
|
168 |
-
:param pts: the input points sets (fields), each points
|
169 |
-
set (fields) is represented as 2n scalar.
|
170 |
-
:param y_first: if y_first=True, the point set is represented as
|
171 |
-
[y1, x1, y2, x2 ... yn, xn], otherwise the point set is
|
172 |
-
represented as [x1, y1, x2, y2 ... xn, yn].
|
173 |
-
:return: each points set is converting to a bbox [x1, y1, x2, y2].
|
174 |
-
"""
|
175 |
-
pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:])
|
176 |
-
pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1,
|
177 |
-
...]
|
178 |
-
pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0,
|
179 |
-
...]
|
180 |
-
if self.transform_method == 'minmax':
|
181 |
-
bbox_left = pts_x.min(dim=1, keepdim=True)[0]
|
182 |
-
bbox_right = pts_x.max(dim=1, keepdim=True)[0]
|
183 |
-
bbox_up = pts_y.min(dim=1, keepdim=True)[0]
|
184 |
-
bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
|
185 |
-
bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
|
186 |
-
dim=1)
|
187 |
-
elif self.transform_method == 'partial_minmax':
|
188 |
-
pts_y = pts_y[:, :4, ...]
|
189 |
-
pts_x = pts_x[:, :4, ...]
|
190 |
-
bbox_left = pts_x.min(dim=1, keepdim=True)[0]
|
191 |
-
bbox_right = pts_x.max(dim=1, keepdim=True)[0]
|
192 |
-
bbox_up = pts_y.min(dim=1, keepdim=True)[0]
|
193 |
-
bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
|
194 |
-
bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
|
195 |
-
dim=1)
|
196 |
-
elif self.transform_method == 'moment':
|
197 |
-
pts_y_mean = pts_y.mean(dim=1, keepdim=True)
|
198 |
-
pts_x_mean = pts_x.mean(dim=1, keepdim=True)
|
199 |
-
pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)
|
200 |
-
pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)
|
201 |
-
moment_transfer = (self.moment_transfer * self.moment_mul) + (
|
202 |
-
self.moment_transfer.detach() * (1 - self.moment_mul))
|
203 |
-
moment_width_transfer = moment_transfer[0]
|
204 |
-
moment_height_transfer = moment_transfer[1]
|
205 |
-
half_width = pts_x_std * torch.exp(moment_width_transfer)
|
206 |
-
half_height = pts_y_std * torch.exp(moment_height_transfer)
|
207 |
-
bbox = torch.cat([
|
208 |
-
pts_x_mean - half_width, pts_y_mean - half_height,
|
209 |
-
pts_x_mean + half_width, pts_y_mean + half_height
|
210 |
-
],
|
211 |
-
dim=1)
|
212 |
-
else:
|
213 |
-
raise NotImplementedError
|
214 |
-
return bbox
|
215 |
-
|
216 |
-
def gen_grid_from_reg(self, reg, previous_boxes):
|
217 |
-
"""Base on the previous bboxes and regression values, we compute the
|
218 |
-
regressed bboxes and generate the grids on the bboxes.
|
219 |
-
|
220 |
-
:param reg: the regression value to previous bboxes.
|
221 |
-
:param previous_boxes: previous bboxes.
|
222 |
-
:return: generate grids on the regressed bboxes.
|
223 |
-
"""
|
224 |
-
b, _, h, w = reg.shape
|
225 |
-
bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2.
|
226 |
-
bwh = (previous_boxes[:, 2:, ...] -
|
227 |
-
previous_boxes[:, :2, ...]).clamp(min=1e-6)
|
228 |
-
grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp(
|
229 |
-
reg[:, 2:, ...])
|
230 |
-
grid_wh = bwh * torch.exp(reg[:, 2:, ...])
|
231 |
-
grid_left = grid_topleft[:, [0], ...]
|
232 |
-
grid_top = grid_topleft[:, [1], ...]
|
233 |
-
grid_width = grid_wh[:, [0], ...]
|
234 |
-
grid_height = grid_wh[:, [1], ...]
|
235 |
-
intervel = torch.linspace(0., 1., self.dcn_kernel).view(
|
236 |
-
1, self.dcn_kernel, 1, 1).type_as(reg)
|
237 |
-
grid_x = grid_left + grid_width * intervel
|
238 |
-
grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1)
|
239 |
-
grid_x = grid_x.view(b, -1, h, w)
|
240 |
-
grid_y = grid_top + grid_height * intervel
|
241 |
-
grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1)
|
242 |
-
grid_y = grid_y.view(b, -1, h, w)
|
243 |
-
grid_yx = torch.stack([grid_y, grid_x], dim=2)
|
244 |
-
grid_yx = grid_yx.view(b, -1, h, w)
|
245 |
-
regressed_bbox = torch.cat([
|
246 |
-
grid_left, grid_top, grid_left + grid_width, grid_top + grid_height
|
247 |
-
], 1)
|
248 |
-
return grid_yx, regressed_bbox
|
249 |
-
|
250 |
-
def forward(self, feats):
|
251 |
-
return multi_apply(self.forward_single, feats)
|
252 |
-
|
253 |
-
def forward_single(self, x):
|
254 |
-
"""Forward feature map of a single FPN level."""
|
255 |
-
dcn_base_offset = self.dcn_base_offset.type_as(x)
|
256 |
-
# If we use center_init, the initial reppoints is from center points.
|
257 |
-
# If we use bounding bbox representation, the initial reppoints is
|
258 |
-
# from regular grid placed on a pre-defined bbox.
|
259 |
-
if self.use_grid_points or not self.center_init:
|
260 |
-
scale = self.point_base_scale / 2
|
261 |
-
points_init = dcn_base_offset / dcn_base_offset.max() * scale
|
262 |
-
bbox_init = x.new_tensor([-scale, -scale, scale,
|
263 |
-
scale]).view(1, 4, 1, 1)
|
264 |
-
else:
|
265 |
-
points_init = 0
|
266 |
-
cls_feat = x
|
267 |
-
pts_feat = x
|
268 |
-
for cls_conv in self.cls_convs:
|
269 |
-
cls_feat = cls_conv(cls_feat)
|
270 |
-
for reg_conv in self.reg_convs:
|
271 |
-
pts_feat = reg_conv(pts_feat)
|
272 |
-
# initialize reppoints
|
273 |
-
pts_out_init = self.reppoints_pts_init_out(
|
274 |
-
self.relu(self.reppoints_pts_init_conv(pts_feat)))
|
275 |
-
if self.use_grid_points:
|
276 |
-
pts_out_init, bbox_out_init = self.gen_grid_from_reg(
|
277 |
-
pts_out_init, bbox_init.detach())
|
278 |
-
else:
|
279 |
-
pts_out_init = pts_out_init + points_init
|
280 |
-
# refine and classify reppoints
|
281 |
-
pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach(
|
282 |
-
) + self.gradient_mul * pts_out_init
|
283 |
-
dcn_offset = pts_out_init_grad_mul - dcn_base_offset
|
284 |
-
cls_out = self.reppoints_cls_out(
|
285 |
-
self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset)))
|
286 |
-
pts_out_refine = self.reppoints_pts_refine_out(
|
287 |
-
self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset)))
|
288 |
-
if self.use_grid_points:
|
289 |
-
pts_out_refine, bbox_out_refine = self.gen_grid_from_reg(
|
290 |
-
pts_out_refine, bbox_out_init.detach())
|
291 |
-
else:
|
292 |
-
pts_out_refine = pts_out_refine + pts_out_init.detach()
|
293 |
-
return cls_out, pts_out_init, pts_out_refine
|
294 |
-
|
295 |
-
def get_points(self, featmap_sizes, img_metas, device):
|
296 |
-
"""Get points according to feature map sizes.
|
297 |
-
|
298 |
-
Args:
|
299 |
-
featmap_sizes (list[tuple]): Multi-level feature map sizes.
|
300 |
-
img_metas (list[dict]): Image meta info.
|
301 |
-
|
302 |
-
Returns:
|
303 |
-
tuple: points of each image, valid flags of each image
|
304 |
-
"""
|
305 |
-
num_imgs = len(img_metas)
|
306 |
-
num_levels = len(featmap_sizes)
|
307 |
-
|
308 |
-
# since feature map sizes of all images are the same, we only compute
|
309 |
-
# points center for one time
|
310 |
-
multi_level_points = []
|
311 |
-
for i in range(num_levels):
|
312 |
-
points = self.point_generators[i].grid_points(
|
313 |
-
featmap_sizes[i], self.point_strides[i], device)
|
314 |
-
multi_level_points.append(points)
|
315 |
-
points_list = [[point.clone() for point in multi_level_points]
|
316 |
-
for _ in range(num_imgs)]
|
317 |
-
|
318 |
-
# for each image, we compute valid flags of multi level grids
|
319 |
-
valid_flag_list = []
|
320 |
-
for img_id, img_meta in enumerate(img_metas):
|
321 |
-
multi_level_flags = []
|
322 |
-
for i in range(num_levels):
|
323 |
-
point_stride = self.point_strides[i]
|
324 |
-
feat_h, feat_w = featmap_sizes[i]
|
325 |
-
h, w = img_meta['pad_shape'][:2]
|
326 |
-
valid_feat_h = min(int(np.ceil(h / point_stride)), feat_h)
|
327 |
-
valid_feat_w = min(int(np.ceil(w / point_stride)), feat_w)
|
328 |
-
flags = self.point_generators[i].valid_flags(
|
329 |
-
(feat_h, feat_w), (valid_feat_h, valid_feat_w), device)
|
330 |
-
multi_level_flags.append(flags)
|
331 |
-
valid_flag_list.append(multi_level_flags)
|
332 |
-
|
333 |
-
return points_list, valid_flag_list
|
334 |
-
|
335 |
-
def centers_to_bboxes(self, point_list):
|
336 |
-
"""Get bboxes according to center points.
|
337 |
-
|
338 |
-
Only used in :class:`MaxIoUAssigner`.
|
339 |
-
"""
|
340 |
-
bbox_list = []
|
341 |
-
for i_img, point in enumerate(point_list):
|
342 |
-
bbox = []
|
343 |
-
for i_lvl in range(len(self.point_strides)):
|
344 |
-
scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5
|
345 |
-
bbox_shift = torch.Tensor([-scale, -scale, scale,
|
346 |
-
scale]).view(1, 4).type_as(point[0])
|
347 |
-
bbox_center = torch.cat(
|
348 |
-
[point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1)
|
349 |
-
bbox.append(bbox_center + bbox_shift)
|
350 |
-
bbox_list.append(bbox)
|
351 |
-
return bbox_list
|
352 |
-
|
353 |
-
def offset_to_pts(self, center_list, pred_list):
|
354 |
-
"""Change from point offset to point coordinate."""
|
355 |
-
pts_list = []
|
356 |
-
for i_lvl in range(len(self.point_strides)):
|
357 |
-
pts_lvl = []
|
358 |
-
for i_img in range(len(center_list)):
|
359 |
-
pts_center = center_list[i_img][i_lvl][:, :2].repeat(
|
360 |
-
1, self.num_points)
|
361 |
-
pts_shift = pred_list[i_lvl][i_img]
|
362 |
-
yx_pts_shift = pts_shift.permute(1, 2, 0).view(
|
363 |
-
-1, 2 * self.num_points)
|
364 |
-
y_pts_shift = yx_pts_shift[..., 0::2]
|
365 |
-
x_pts_shift = yx_pts_shift[..., 1::2]
|
366 |
-
xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1)
|
367 |
-
xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1)
|
368 |
-
pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center
|
369 |
-
pts_lvl.append(pts)
|
370 |
-
pts_lvl = torch.stack(pts_lvl, 0)
|
371 |
-
pts_list.append(pts_lvl)
|
372 |
-
return pts_list
|
373 |
-
|
374 |
-
def _point_target_single(self,
|
375 |
-
flat_proposals,
|
376 |
-
valid_flags,
|
377 |
-
gt_bboxes,
|
378 |
-
gt_bboxes_ignore,
|
379 |
-
gt_labels,
|
380 |
-
label_channels=1,
|
381 |
-
stage='init',
|
382 |
-
unmap_outputs=True):
|
383 |
-
inside_flags = valid_flags
|
384 |
-
if not inside_flags.any():
|
385 |
-
return (None, ) * 7
|
386 |
-
# assign gt and sample proposals
|
387 |
-
proposals = flat_proposals[inside_flags, :]
|
388 |
-
|
389 |
-
if stage == 'init':
|
390 |
-
assigner = self.init_assigner
|
391 |
-
pos_weight = self.train_cfg.init.pos_weight
|
392 |
-
else:
|
393 |
-
assigner = self.refine_assigner
|
394 |
-
pos_weight = self.train_cfg.refine.pos_weight
|
395 |
-
assign_result = assigner.assign(proposals, gt_bboxes, gt_bboxes_ignore,
|
396 |
-
None if self.sampling else gt_labels)
|
397 |
-
sampling_result = self.sampler.sample(assign_result, proposals,
|
398 |
-
gt_bboxes)
|
399 |
-
|
400 |
-
num_valid_proposals = proposals.shape[0]
|
401 |
-
bbox_gt = proposals.new_zeros([num_valid_proposals, 4])
|
402 |
-
pos_proposals = torch.zeros_like(proposals)
|
403 |
-
proposals_weights = proposals.new_zeros([num_valid_proposals, 4])
|
404 |
-
labels = proposals.new_full((num_valid_proposals, ),
|
405 |
-
self.num_classes,
|
406 |
-
dtype=torch.long)
|
407 |
-
label_weights = proposals.new_zeros(
|
408 |
-
num_valid_proposals, dtype=torch.float)
|
409 |
-
|
410 |
-
pos_inds = sampling_result.pos_inds
|
411 |
-
neg_inds = sampling_result.neg_inds
|
412 |
-
if len(pos_inds) > 0:
|
413 |
-
pos_gt_bboxes = sampling_result.pos_gt_bboxes
|
414 |
-
bbox_gt[pos_inds, :] = pos_gt_bboxes
|
415 |
-
pos_proposals[pos_inds, :] = proposals[pos_inds, :]
|
416 |
-
proposals_weights[pos_inds, :] = 1.0
|
417 |
-
if gt_labels is None:
|
418 |
-
# Only rpn gives gt_labels as None
|
419 |
-
# Foreground is the first class
|
420 |
-
labels[pos_inds] = 0
|
421 |
-
else:
|
422 |
-
labels[pos_inds] = gt_labels[
|
423 |
-
sampling_result.pos_assigned_gt_inds]
|
424 |
-
if pos_weight <= 0:
|
425 |
-
label_weights[pos_inds] = 1.0
|
426 |
-
else:
|
427 |
-
label_weights[pos_inds] = pos_weight
|
428 |
-
if len(neg_inds) > 0:
|
429 |
-
label_weights[neg_inds] = 1.0
|
430 |
-
|
431 |
-
# map up to original set of proposals
|
432 |
-
if unmap_outputs:
|
433 |
-
num_total_proposals = flat_proposals.size(0)
|
434 |
-
labels = unmap(labels, num_total_proposals, inside_flags)
|
435 |
-
label_weights = unmap(label_weights, num_total_proposals,
|
436 |
-
inside_flags)
|
437 |
-
bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags)
|
438 |
-
pos_proposals = unmap(pos_proposals, num_total_proposals,
|
439 |
-
inside_flags)
|
440 |
-
proposals_weights = unmap(proposals_weights, num_total_proposals,
|
441 |
-
inside_flags)
|
442 |
-
|
443 |
-
return (labels, label_weights, bbox_gt, pos_proposals,
|
444 |
-
proposals_weights, pos_inds, neg_inds)
|
445 |
-
|
446 |
-
def get_targets(self,
|
447 |
-
proposals_list,
|
448 |
-
valid_flag_list,
|
449 |
-
gt_bboxes_list,
|
450 |
-
img_metas,
|
451 |
-
gt_bboxes_ignore_list=None,
|
452 |
-
gt_labels_list=None,
|
453 |
-
stage='init',
|
454 |
-
label_channels=1,
|
455 |
-
unmap_outputs=True):
|
456 |
-
"""Compute corresponding GT box and classification targets for
|
457 |
-
proposals.
|
458 |
-
|
459 |
-
Args:
|
460 |
-
proposals_list (list[list]): Multi level points/bboxes of each
|
461 |
-
image.
|
462 |
-
valid_flag_list (list[list]): Multi level valid flags of each
|
463 |
-
image.
|
464 |
-
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
|
465 |
-
img_metas (list[dict]): Meta info of each image.
|
466 |
-
gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
|
467 |
-
ignored.
|
468 |
-
gt_bboxes_list (list[Tensor]): Ground truth labels of each box.
|
469 |
-
stage (str): `init` or `refine`. Generate target for init stage or
|
470 |
-
refine stage
|
471 |
-
label_channels (int): Channel of label.
|
472 |
-
unmap_outputs (bool): Whether to map outputs back to the original
|
473 |
-
set of anchors.
|
474 |
-
|
475 |
-
Returns:
|
476 |
-
tuple:
|
477 |
-
- labels_list (list[Tensor]): Labels of each level.
|
478 |
-
- label_weights_list (list[Tensor]): Label weights of each level. # noqa: E501
|
479 |
-
- bbox_gt_list (list[Tensor]): Ground truth bbox of each level.
|
480 |
-
- proposal_list (list[Tensor]): Proposals(points/bboxes) of each level. # noqa: E501
|
481 |
-
- proposal_weights_list (list[Tensor]): Proposal weights of each level. # noqa: E501
|
482 |
-
- num_total_pos (int): Number of positive samples in all images. # noqa: E501
|
483 |
-
- num_total_neg (int): Number of negative samples in all images. # noqa: E501
|
484 |
-
"""
|
485 |
-
assert stage in ['init', 'refine']
|
486 |
-
num_imgs = len(img_metas)
|
487 |
-
assert len(proposals_list) == len(valid_flag_list) == num_imgs
|
488 |
-
|
489 |
-
# points number of multi levels
|
490 |
-
num_level_proposals = [points.size(0) for points in proposals_list[0]]
|
491 |
-
|
492 |
-
# concat all level points and flags to a single tensor
|
493 |
-
for i in range(num_imgs):
|
494 |
-
assert len(proposals_list[i]) == len(valid_flag_list[i])
|
495 |
-
proposals_list[i] = torch.cat(proposals_list[i])
|
496 |
-
valid_flag_list[i] = torch.cat(valid_flag_list[i])
|
497 |
-
|
498 |
-
# compute targets for each image
|
499 |
-
if gt_bboxes_ignore_list is None:
|
500 |
-
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
|
501 |
-
if gt_labels_list is None:
|
502 |
-
gt_labels_list = [None for _ in range(num_imgs)]
|
503 |
-
(all_labels, all_label_weights, all_bbox_gt, all_proposals,
|
504 |
-
all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply(
|
505 |
-
self._point_target_single,
|
506 |
-
proposals_list,
|
507 |
-
valid_flag_list,
|
508 |
-
gt_bboxes_list,
|
509 |
-
gt_bboxes_ignore_list,
|
510 |
-
gt_labels_list,
|
511 |
-
stage=stage,
|
512 |
-
label_channels=label_channels,
|
513 |
-
unmap_outputs=unmap_outputs)
|
514 |
-
# no valid points
|
515 |
-
if any([labels is None for labels in all_labels]):
|
516 |
-
return None
|
517 |
-
# sampled points of all images
|
518 |
-
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
|
519 |
-
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
|
520 |
-
labels_list = images_to_levels(all_labels, num_level_proposals)
|
521 |
-
label_weights_list = images_to_levels(all_label_weights,
|
522 |
-
num_level_proposals)
|
523 |
-
bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals)
|
524 |
-
proposals_list = images_to_levels(all_proposals, num_level_proposals)
|
525 |
-
proposal_weights_list = images_to_levels(all_proposal_weights,
|
526 |
-
num_level_proposals)
|
527 |
-
return (labels_list, label_weights_list, bbox_gt_list, proposals_list,
|
528 |
-
proposal_weights_list, num_total_pos, num_total_neg)
|
529 |
-
|
530 |
-
def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels,
|
531 |
-
label_weights, bbox_gt_init, bbox_weights_init,
|
532 |
-
bbox_gt_refine, bbox_weights_refine, stride,
|
533 |
-
num_total_samples_init, num_total_samples_refine):
|
534 |
-
# classification loss
|
535 |
-
labels = labels.reshape(-1)
|
536 |
-
label_weights = label_weights.reshape(-1)
|
537 |
-
cls_score = cls_score.permute(0, 2, 3,
|
538 |
-
1).reshape(-1, self.cls_out_channels)
|
539 |
-
cls_score = cls_score.contiguous()
|
540 |
-
loss_cls = self.loss_cls(
|
541 |
-
cls_score,
|
542 |
-
labels,
|
543 |
-
label_weights,
|
544 |
-
avg_factor=num_total_samples_refine)
|
545 |
-
|
546 |
-
# points loss
|
547 |
-
bbox_gt_init = bbox_gt_init.reshape(-1, 4)
|
548 |
-
bbox_weights_init = bbox_weights_init.reshape(-1, 4)
|
549 |
-
bbox_pred_init = self.points2bbox(
|
550 |
-
pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False)
|
551 |
-
bbox_gt_refine = bbox_gt_refine.reshape(-1, 4)
|
552 |
-
bbox_weights_refine = bbox_weights_refine.reshape(-1, 4)
|
553 |
-
bbox_pred_refine = self.points2bbox(
|
554 |
-
pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False)
|
555 |
-
normalize_term = self.point_base_scale * stride
|
556 |
-
loss_pts_init = self.loss_bbox_init(
|
557 |
-
bbox_pred_init / normalize_term,
|
558 |
-
bbox_gt_init / normalize_term,
|
559 |
-
bbox_weights_init,
|
560 |
-
avg_factor=num_total_samples_init)
|
561 |
-
loss_pts_refine = self.loss_bbox_refine(
|
562 |
-
bbox_pred_refine / normalize_term,
|
563 |
-
bbox_gt_refine / normalize_term,
|
564 |
-
bbox_weights_refine,
|
565 |
-
avg_factor=num_total_samples_refine)
|
566 |
-
return loss_cls, loss_pts_init, loss_pts_refine
|
567 |
-
|
568 |
-
def loss(self,
|
569 |
-
cls_scores,
|
570 |
-
pts_preds_init,
|
571 |
-
pts_preds_refine,
|
572 |
-
gt_bboxes,
|
573 |
-
gt_labels,
|
574 |
-
img_metas,
|
575 |
-
gt_bboxes_ignore=None):
|
576 |
-
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
|
577 |
-
assert len(featmap_sizes) == len(self.point_generators)
|
578 |
-
device = cls_scores[0].device
|
579 |
-
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
|
580 |
-
|
581 |
-
# target for initial stage
|
582 |
-
center_list, valid_flag_list = self.get_points(featmap_sizes,
|
583 |
-
img_metas, device)
|
584 |
-
pts_coordinate_preds_init = self.offset_to_pts(center_list,
|
585 |
-
pts_preds_init)
|
586 |
-
if self.train_cfg.init.assigner['type'] == 'PointAssigner':
|
587 |
-
# Assign target for center list
|
588 |
-
candidate_list = center_list
|
589 |
-
else:
|
590 |
-
# transform center list to bbox list and
|
591 |
-
# assign target for bbox list
|
592 |
-
bbox_list = self.centers_to_bboxes(center_list)
|
593 |
-
candidate_list = bbox_list
|
594 |
-
cls_reg_targets_init = self.get_targets(
|
595 |
-
candidate_list,
|
596 |
-
valid_flag_list,
|
597 |
-
gt_bboxes,
|
598 |
-
img_metas,
|
599 |
-
gt_bboxes_ignore_list=gt_bboxes_ignore,
|
600 |
-
gt_labels_list=gt_labels,
|
601 |
-
stage='init',
|
602 |
-
label_channels=label_channels)
|
603 |
-
(*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init,
|
604 |
-
num_total_pos_init, num_total_neg_init) = cls_reg_targets_init
|
605 |
-
num_total_samples_init = (
|
606 |
-
num_total_pos_init +
|
607 |
-
num_total_neg_init if self.sampling else num_total_pos_init)
|
608 |
-
|
609 |
-
# target for refinement stage
|
610 |
-
center_list, valid_flag_list = self.get_points(featmap_sizes,
|
611 |
-
img_metas, device)
|
612 |
-
pts_coordinate_preds_refine = self.offset_to_pts(
|
613 |
-
center_list, pts_preds_refine)
|
614 |
-
bbox_list = []
|
615 |
-
for i_img, center in enumerate(center_list):
|
616 |
-
bbox = []
|
617 |
-
for i_lvl in range(len(pts_preds_refine)):
|
618 |
-
bbox_preds_init = self.points2bbox(
|
619 |
-
pts_preds_init[i_lvl].detach())
|
620 |
-
bbox_shift = bbox_preds_init * self.point_strides[i_lvl]
|
621 |
-
bbox_center = torch.cat(
|
622 |
-
[center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1)
|
623 |
-
bbox.append(bbox_center +
|
624 |
-
bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4))
|
625 |
-
bbox_list.append(bbox)
|
626 |
-
cls_reg_targets_refine = self.get_targets(
|
627 |
-
bbox_list,
|
628 |
-
valid_flag_list,
|
629 |
-
gt_bboxes,
|
630 |
-
img_metas,
|
631 |
-
gt_bboxes_ignore_list=gt_bboxes_ignore,
|
632 |
-
gt_labels_list=gt_labels,
|
633 |
-
stage='refine',
|
634 |
-
label_channels=label_channels)
|
635 |
-
(labels_list, label_weights_list, bbox_gt_list_refine,
|
636 |
-
candidate_list_refine, bbox_weights_list_refine, num_total_pos_refine,
|
637 |
-
num_total_neg_refine) = cls_reg_targets_refine
|
638 |
-
num_total_samples_refine = (
|
639 |
-
num_total_pos_refine +
|
640 |
-
num_total_neg_refine if self.sampling else num_total_pos_refine)
|
641 |
-
|
642 |
-
# compute loss
|
643 |
-
losses_cls, losses_pts_init, losses_pts_refine = multi_apply(
|
644 |
-
self.loss_single,
|
645 |
-
cls_scores,
|
646 |
-
pts_coordinate_preds_init,
|
647 |
-
pts_coordinate_preds_refine,
|
648 |
-
labels_list,
|
649 |
-
label_weights_list,
|
650 |
-
bbox_gt_list_init,
|
651 |
-
bbox_weights_list_init,
|
652 |
-
bbox_gt_list_refine,
|
653 |
-
bbox_weights_list_refine,
|
654 |
-
self.point_strides,
|
655 |
-
num_total_samples_init=num_total_samples_init,
|
656 |
-
num_total_samples_refine=num_total_samples_refine)
|
657 |
-
loss_dict_all = {
|
658 |
-
'loss_cls': losses_cls,
|
659 |
-
'loss_pts_init': losses_pts_init,
|
660 |
-
'loss_pts_refine': losses_pts_refine
|
661 |
-
}
|
662 |
-
return loss_dict_all
|
663 |
-
|
664 |
-
def get_bboxes(self,
|
665 |
-
cls_scores,
|
666 |
-
pts_preds_init,
|
667 |
-
pts_preds_refine,
|
668 |
-
img_metas,
|
669 |
-
cfg=None,
|
670 |
-
rescale=False,
|
671 |
-
with_nms=True):
|
672 |
-
assert len(cls_scores) == len(pts_preds_refine)
|
673 |
-
device = cls_scores[0].device
|
674 |
-
bbox_preds_refine = [
|
675 |
-
self.points2bbox(pts_pred_refine)
|
676 |
-
for pts_pred_refine in pts_preds_refine
|
677 |
-
]
|
678 |
-
num_levels = len(cls_scores)
|
679 |
-
mlvl_points = [
|
680 |
-
self.point_generators[i].grid_points(cls_scores[i].size()[-2:],
|
681 |
-
self.point_strides[i], device)
|
682 |
-
for i in range(num_levels)
|
683 |
-
]
|
684 |
-
result_list = []
|
685 |
-
for img_id in range(len(img_metas)):
|
686 |
-
cls_score_list = [
|
687 |
-
cls_scores[i][img_id].detach() for i in range(num_levels)
|
688 |
-
]
|
689 |
-
bbox_pred_list = [
|
690 |
-
bbox_preds_refine[i][img_id].detach()
|
691 |
-
for i in range(num_levels)
|
692 |
-
]
|
693 |
-
img_shape = img_metas[img_id]['img_shape']
|
694 |
-
scale_factor = img_metas[img_id]['scale_factor']
|
695 |
-
proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
|
696 |
-
mlvl_points, img_shape,
|
697 |
-
scale_factor, cfg, rescale,
|
698 |
-
with_nms)
|
699 |
-
result_list.append(proposals)
|
700 |
-
return result_list
|
701 |
-
|
702 |
-
def _get_bboxes_single(self,
|
703 |
-
cls_scores,
|
704 |
-
bbox_preds,
|
705 |
-
mlvl_points,
|
706 |
-
img_shape,
|
707 |
-
scale_factor,
|
708 |
-
cfg,
|
709 |
-
rescale=False,
|
710 |
-
with_nms=True):
|
711 |
-
cfg = self.test_cfg if cfg is None else cfg
|
712 |
-
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
|
713 |
-
mlvl_bboxes = []
|
714 |
-
mlvl_scores = []
|
715 |
-
for i_lvl, (cls_score, bbox_pred, points) in enumerate(
|
716 |
-
zip(cls_scores, bbox_preds, mlvl_points)):
|
717 |
-
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
|
718 |
-
cls_score = cls_score.permute(1, 2,
|
719 |
-
0).reshape(-1, self.cls_out_channels)
|
720 |
-
if self.use_sigmoid_cls:
|
721 |
-
scores = cls_score.sigmoid()
|
722 |
-
else:
|
723 |
-
scores = cls_score.softmax(-1)
|
724 |
-
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
|
725 |
-
nms_pre = cfg.get('nms_pre', -1)
|
726 |
-
if nms_pre > 0 and scores.shape[0] > nms_pre:
|
727 |
-
if self.use_sigmoid_cls:
|
728 |
-
max_scores, _ = scores.max(dim=1)
|
729 |
-
else:
|
730 |
-
# remind that we set FG labels to [0, num_class-1]
|
731 |
-
# since mmdet v2.0
|
732 |
-
# BG cat_id: num_class
|
733 |
-
max_scores, _ = scores[:, :-1].max(dim=1)
|
734 |
-
_, topk_inds = max_scores.topk(nms_pre)
|
735 |
-
points = points[topk_inds, :]
|
736 |
-
bbox_pred = bbox_pred[topk_inds, :]
|
737 |
-
scores = scores[topk_inds, :]
|
738 |
-
bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1)
|
739 |
-
bboxes = bbox_pred * self.point_strides[i_lvl] + bbox_pos_center
|
740 |
-
x1 = bboxes[:, 0].clamp(min=0, max=img_shape[1])
|
741 |
-
y1 = bboxes[:, 1].clamp(min=0, max=img_shape[0])
|
742 |
-
x2 = bboxes[:, 2].clamp(min=0, max=img_shape[1])
|
743 |
-
y2 = bboxes[:, 3].clamp(min=0, max=img_shape[0])
|
744 |
-
bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
|
745 |
-
mlvl_bboxes.append(bboxes)
|
746 |
-
mlvl_scores.append(scores)
|
747 |
-
mlvl_bboxes = torch.cat(mlvl_bboxes)
|
748 |
-
if rescale:
|
749 |
-
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
|
750 |
-
mlvl_scores = torch.cat(mlvl_scores)
|
751 |
-
if self.use_sigmoid_cls:
|
752 |
-
# Add a dummy background class to the backend when using sigmoid
|
753 |
-
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
|
754 |
-
# BG cat_id: num_class
|
755 |
-
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
|
756 |
-
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
|
757 |
-
if with_nms:
|
758 |
-
det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
|
759 |
-
cfg.score_thr, cfg.nms,
|
760 |
-
cfg.max_per_img)
|
761 |
-
return det_bboxes, det_labels
|
762 |
-
else:
|
763 |
-
return mlvl_bboxes, mlvl_scores
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/_base_/datasets/drive.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
# dataset settings
|
2 |
-
dataset_type = 'DRIVEDataset'
|
3 |
-
data_root = 'data/DRIVE'
|
4 |
-
img_norm_cfg = dict(
|
5 |
-
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
6 |
-
img_scale = (584, 565)
|
7 |
-
crop_size = (64, 64)
|
8 |
-
train_pipeline = [
|
9 |
-
dict(type='LoadImageFromFile'),
|
10 |
-
dict(type='LoadAnnotations'),
|
11 |
-
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
12 |
-
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
13 |
-
dict(type='RandomFlip', prob=0.5),
|
14 |
-
dict(type='PhotoMetricDistortion'),
|
15 |
-
dict(type='Normalize', **img_norm_cfg),
|
16 |
-
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
17 |
-
dict(type='DefaultFormatBundle'),
|
18 |
-
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
19 |
-
]
|
20 |
-
test_pipeline = [
|
21 |
-
dict(type='LoadImageFromFile'),
|
22 |
-
dict(
|
23 |
-
type='MultiScaleFlipAug',
|
24 |
-
img_scale=img_scale,
|
25 |
-
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
26 |
-
flip=False,
|
27 |
-
transforms=[
|
28 |
-
dict(type='Resize', keep_ratio=True),
|
29 |
-
dict(type='RandomFlip'),
|
30 |
-
dict(type='Normalize', **img_norm_cfg),
|
31 |
-
dict(type='ImageToTensor', keys=['img']),
|
32 |
-
dict(type='Collect', keys=['img'])
|
33 |
-
])
|
34 |
-
]
|
35 |
-
|
36 |
-
data = dict(
|
37 |
-
samples_per_gpu=4,
|
38 |
-
workers_per_gpu=4,
|
39 |
-
train=dict(
|
40 |
-
type='RepeatDataset',
|
41 |
-
times=40000,
|
42 |
-
dataset=dict(
|
43 |
-
type=dataset_type,
|
44 |
-
data_root=data_root,
|
45 |
-
img_dir='images/training',
|
46 |
-
ann_dir='annotations/training',
|
47 |
-
pipeline=train_pipeline)),
|
48 |
-
val=dict(
|
49 |
-
type=dataset_type,
|
50 |
-
data_root=data_root,
|
51 |
-
img_dir='images/validation',
|
52 |
-
ann_dir='annotations/validation',
|
53 |
-
pipeline=test_pipeline),
|
54 |
-
test=dict(
|
55 |
-
type=dataset_type,
|
56 |
-
data_root=data_root,
|
57 |
-
img_dir='images/validation',
|
58 |
-
ann_dir='annotations/validation',
|
59 |
-
pipeline=test_pipeline))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_80k_ade20k.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './deeplabv3plus_r50-d8_512x512_80k_ade20k.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_512x512_40k_voc12aug.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './fcn_r50-d8_512x512_40k_voc12aug.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/AnnonSubmission/xai-cl/ssl_models/dino.py
DELETED
@@ -1,181 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torchvision
|
4 |
-
import torch.nn.functional as F
|
5 |
-
import numpy as np
|
6 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
7 |
-
|
8 |
-
""" from https://github.com/facebookresearch/dino"""
|
9 |
-
|
10 |
-
class DINOHead(nn.Module):
|
11 |
-
|
12 |
-
def __init__(self, in_dim, out_dim, use_bn, norm_last_layer, nlayers, hidden_dim, bottleneck_dim):
|
13 |
-
super().__init__()
|
14 |
-
|
15 |
-
nlayers = max(nlayers, 1)
|
16 |
-
if nlayers == 1:
|
17 |
-
self.mlp = nn.Linear(in_dim, bottleneck_dim)
|
18 |
-
else:
|
19 |
-
layers = [nn.Linear(in_dim, hidden_dim)]
|
20 |
-
if use_bn:
|
21 |
-
layers.append(nn.BatchNorm1d(hidden_dim))
|
22 |
-
layers.append(nn.GELU())
|
23 |
-
for _ in range(nlayers - 2):
|
24 |
-
layers.append(nn.Linear(hidden_dim, hidden_dim))
|
25 |
-
if use_bn:
|
26 |
-
layers.append(nn.BatchNorm1d(hidden_dim))
|
27 |
-
layers.append(nn.GELU())
|
28 |
-
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
|
29 |
-
self.mlp = nn.Sequential(*layers)
|
30 |
-
|
31 |
-
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
|
32 |
-
self.last_layer.weight_g.data.fill_(1)
|
33 |
-
if norm_last_layer:
|
34 |
-
self.last_layer.weight_g.requires_grad = False
|
35 |
-
|
36 |
-
def forward(self, x):
|
37 |
-
x = self.mlp(x)
|
38 |
-
x = F.normalize(x, dim=-1, p=2)
|
39 |
-
x = self.last_layer(x)
|
40 |
-
return x
|
41 |
-
|
42 |
-
class MultiCropWrapper(nn.Module):
|
43 |
-
def __init__(self, backbone, head):
|
44 |
-
super(MultiCropWrapper, self).__init__()
|
45 |
-
backbone.fc, backbone.head = nn.Identity(), nn.Identity()
|
46 |
-
self.backbone = backbone
|
47 |
-
self.head = head
|
48 |
-
|
49 |
-
def forward(self, x):
|
50 |
-
return self.head(self.backbone(x))
|
51 |
-
|
52 |
-
class DINOLoss(nn.Module):
|
53 |
-
def __init__(self, out_dim, warmup_teacher_temp, teacher_temp, warmup_teacher_temp_epochs, nepochs,
|
54 |
-
student_temp=0.1, center_momentum=0.9):
|
55 |
-
super().__init__()
|
56 |
-
|
57 |
-
self.student_temp = student_temp
|
58 |
-
self.center_momentum = center_momentum
|
59 |
-
self.register_buffer("center", torch.zeros(1, out_dim))
|
60 |
-
self.nepochs = nepochs
|
61 |
-
self.teacher_temp_schedule = np.concatenate((np.linspace(warmup_teacher_temp, teacher_temp, warmup_teacher_temp_epochs),
|
62 |
-
np.ones(nepochs - warmup_teacher_temp_epochs) * teacher_temp))
|
63 |
-
|
64 |
-
def forward(self, student_output, teacher_output):
|
65 |
-
student_out = student_output / self.student_temp
|
66 |
-
temp = self.teacher_temp_schedule[self.nepochs - 1] # last one
|
67 |
-
teacher_out = F.softmax((teacher_output - self.center) / temp, dim=-1)
|
68 |
-
teacher_out = teacher_out.detach()
|
69 |
-
loss = torch.sum(-teacher_out * F.log_softmax(student_out, dim=-1), dim=-1).mean()
|
70 |
-
return loss
|
71 |
-
|
72 |
-
|
73 |
-
class ResNet(nn.Module):
|
74 |
-
def __init__(self, backbone):
|
75 |
-
super().__init__()
|
76 |
-
|
77 |
-
modules = list(backbone.children())[:-2]
|
78 |
-
self.net = nn.Sequential(*modules)
|
79 |
-
|
80 |
-
def forward(self, x):
|
81 |
-
return self.net(x).mean(dim=[2, 3])
|
82 |
-
|
83 |
-
class RestructuredDINO(nn.Module):
|
84 |
-
|
85 |
-
def __init__(self, student, teacher):
|
86 |
-
super().__init__()
|
87 |
-
|
88 |
-
self.encoder_student = ResNet(student.backbone)
|
89 |
-
self.encoder = ResNet(teacher.backbone)
|
90 |
-
|
91 |
-
self.contrastive_head_student = student.head
|
92 |
-
self.contrastive_head = teacher.head
|
93 |
-
|
94 |
-
|
95 |
-
def forward(self, x, run_teacher):
|
96 |
-
|
97 |
-
if run_teacher:
|
98 |
-
x = self.encoder(x)
|
99 |
-
x = self.contrastive_head(x)
|
100 |
-
else:
|
101 |
-
x = self.encoder_student(x)
|
102 |
-
x = self.contrastive_head_student(x)
|
103 |
-
|
104 |
-
return x
|
105 |
-
|
106 |
-
|
107 |
-
def get_dino_model_without_loss(ckpt_path = 'dino_resnet50_pretrain_full_checkpoint.pth'):
|
108 |
-
state_dict = torch.load('pretrained_models/dino_models/' + ckpt_path, map_location='cpu')
|
109 |
-
state_dict_student = state_dict['student']
|
110 |
-
state_dict_teacher = state_dict['teacher']
|
111 |
-
|
112 |
-
state_dict_student = {k.replace("module.", ""): v for k, v in state_dict_student.items()}
|
113 |
-
state_dict_teacher = {k.replace("module.", ""): v for k, v in state_dict_teacher.items()}
|
114 |
-
|
115 |
-
student_backbone = torchvision.models.resnet50()
|
116 |
-
teacher_backbone = torchvision.models.resnet50()
|
117 |
-
embed_dim = student_backbone.fc.weight.shape[1]
|
118 |
-
|
119 |
-
student_head = DINOHead(in_dim = embed_dim, out_dim = 60000, use_bn=True, norm_last_layer=True, nlayers=2, hidden_dim=4096, bottleneck_dim=256)
|
120 |
-
teacher_head = DINOHead(in_dim = embed_dim, out_dim = 60000, use_bn =True, norm_last_layer=True, nlayers=2, hidden_dim=4096, bottleneck_dim=256)
|
121 |
-
student_head.last_layer = nn.Linear(256, 60000, bias = False)
|
122 |
-
teacher_head.last_layer = nn.Linear(256, 60000, bias = False)
|
123 |
-
|
124 |
-
student = MultiCropWrapper(student_backbone, student_head)
|
125 |
-
teacher = MultiCropWrapper(teacher_backbone, teacher_head)
|
126 |
-
|
127 |
-
student.load_state_dict(state_dict_student)
|
128 |
-
teacher.load_state_dict(state_dict_teacher)
|
129 |
-
|
130 |
-
restructured_model = RestructuredDINO(student, teacher)
|
131 |
-
|
132 |
-
return restructured_model.to(device)
|
133 |
-
|
134 |
-
|
135 |
-
def get_dino_model_with_loss(ckpt_path = 'dino_rn50_checkpoint.pth'):
|
136 |
-
state_dict = torch.load('pretrained_models/dino_models/' + ckpt_path, map_location='cpu')
|
137 |
-
|
138 |
-
state_dict_student = state_dict['student']
|
139 |
-
state_dict_teacher = state_dict['teacher']
|
140 |
-
state_dict_args = vars(state_dict['args'])
|
141 |
-
state_dic_dino_loss = state_dict['dino_loss']
|
142 |
-
|
143 |
-
state_dict_student = {k.replace("module.", ""): v for k, v in state_dict_student.items()}
|
144 |
-
state_dict_teacher = {k.replace("module.", ""): v for k, v in state_dict_teacher.items()}
|
145 |
-
|
146 |
-
student_backbone = torchvision.models.resnet50()
|
147 |
-
teacher_backbone = torchvision.models.resnet50()
|
148 |
-
embed_dim = student_backbone.fc.weight.shape[1]
|
149 |
-
|
150 |
-
student_head = DINOHead(in_dim = embed_dim,
|
151 |
-
out_dim = state_dict_args['out_dim'],
|
152 |
-
use_bn = state_dict_args['use_bn_in_head'],
|
153 |
-
norm_last_layer = state_dict_args['norm_last_layer'],
|
154 |
-
nlayers = 3,
|
155 |
-
hidden_dim = 2048,
|
156 |
-
bottleneck_dim = 256)
|
157 |
-
|
158 |
-
teacher_head = DINOHead(in_dim = embed_dim,
|
159 |
-
out_dim = state_dict_args['out_dim'],
|
160 |
-
use_bn = state_dict_args['use_bn_in_head'],
|
161 |
-
norm_last_layer = state_dict_args['norm_last_layer'],
|
162 |
-
nlayers = 3,
|
163 |
-
hidden_dim = 2048,
|
164 |
-
bottleneck_dim = 256)
|
165 |
-
|
166 |
-
loss = DINOLoss(out_dim = state_dict_args['out_dim'],
|
167 |
-
warmup_teacher_temp = state_dict_args['warmup_teacher_temp'],
|
168 |
-
teacher_temp = state_dict_args['teacher_temp'],
|
169 |
-
warmup_teacher_temp_epochs = state_dict_args['warmup_teacher_temp_epochs'],
|
170 |
-
nepochs = state_dict_args['epochs'])
|
171 |
-
|
172 |
-
student = MultiCropWrapper(student_backbone, student_head)
|
173 |
-
teacher = MultiCropWrapper(teacher_backbone, teacher_head)
|
174 |
-
|
175 |
-
student.load_state_dict(state_dict_student)
|
176 |
-
teacher.load_state_dict(state_dict_teacher)
|
177 |
-
loss.load_state_dict(state_dic_dino_loss)
|
178 |
-
|
179 |
-
restructured_model = RestructuredDINO(student, teacher)
|
180 |
-
|
181 |
-
return restructured_model.to(device), loss.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Annotation-AI/fast-segment-everything-with-text-prompt/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Fast Segment Anything With Text Prompt
|
3 |
-
emoji: 🐨
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.32.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/midas/midas/dpt_depth.py
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
|
5 |
-
from .base_model import BaseModel
|
6 |
-
from .blocks import (
|
7 |
-
FeatureFusionBlock,
|
8 |
-
FeatureFusionBlock_custom,
|
9 |
-
Interpolate,
|
10 |
-
_make_encoder,
|
11 |
-
forward_vit,
|
12 |
-
)
|
13 |
-
|
14 |
-
|
15 |
-
def _make_fusion_block(features, use_bn):
|
16 |
-
return FeatureFusionBlock_custom(
|
17 |
-
features,
|
18 |
-
nn.ReLU(False),
|
19 |
-
deconv=False,
|
20 |
-
bn=use_bn,
|
21 |
-
expand=False,
|
22 |
-
align_corners=True,
|
23 |
-
)
|
24 |
-
|
25 |
-
|
26 |
-
class DPT(BaseModel):
|
27 |
-
def __init__(
|
28 |
-
self,
|
29 |
-
head,
|
30 |
-
features=256,
|
31 |
-
backbone="vitb_rn50_384",
|
32 |
-
readout="project",
|
33 |
-
channels_last=False,
|
34 |
-
use_bn=False,
|
35 |
-
):
|
36 |
-
|
37 |
-
super(DPT, self).__init__()
|
38 |
-
|
39 |
-
self.channels_last = channels_last
|
40 |
-
|
41 |
-
hooks = {
|
42 |
-
"vitb_rn50_384": [0, 1, 8, 11],
|
43 |
-
"vitb16_384": [2, 5, 8, 11],
|
44 |
-
"vitl16_384": [5, 11, 17, 23],
|
45 |
-
}
|
46 |
-
|
47 |
-
# Instantiate backbone and reassemble blocks
|
48 |
-
self.pretrained, self.scratch = _make_encoder(
|
49 |
-
backbone,
|
50 |
-
features,
|
51 |
-
False, # Set to true of you want to train from scratch, uses ImageNet weights
|
52 |
-
groups=1,
|
53 |
-
expand=False,
|
54 |
-
exportable=False,
|
55 |
-
hooks=hooks[backbone],
|
56 |
-
use_readout=readout,
|
57 |
-
)
|
58 |
-
|
59 |
-
self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
|
60 |
-
self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
|
61 |
-
self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
|
62 |
-
self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
|
63 |
-
|
64 |
-
self.scratch.output_conv = head
|
65 |
-
|
66 |
-
|
67 |
-
def forward(self, x):
|
68 |
-
if self.channels_last == True:
|
69 |
-
x.contiguous(memory_format=torch.channels_last)
|
70 |
-
|
71 |
-
layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
|
72 |
-
|
73 |
-
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
74 |
-
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
75 |
-
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
76 |
-
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
77 |
-
|
78 |
-
path_4 = self.scratch.refinenet4(layer_4_rn)
|
79 |
-
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
|
80 |
-
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
|
81 |
-
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
82 |
-
|
83 |
-
out = self.scratch.output_conv(path_1)
|
84 |
-
|
85 |
-
return out
|
86 |
-
|
87 |
-
|
88 |
-
class DPTDepthModel(DPT):
|
89 |
-
def __init__(self, path=None, non_negative=True, **kwargs):
|
90 |
-
features = kwargs["features"] if "features" in kwargs else 256
|
91 |
-
|
92 |
-
head = nn.Sequential(
|
93 |
-
nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
|
94 |
-
Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
|
95 |
-
nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
|
96 |
-
nn.ReLU(True),
|
97 |
-
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
|
98 |
-
nn.ReLU(True) if non_negative else nn.Identity(),
|
99 |
-
nn.Identity(),
|
100 |
-
)
|
101 |
-
|
102 |
-
super().__init__(head, **kwargs)
|
103 |
-
|
104 |
-
if path is not None:
|
105 |
-
self.load(path)
|
106 |
-
|
107 |
-
def forward(self, x):
|
108 |
-
return super().forward(x).squeeze(dim=1)
|
109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/GroundingDINO/backbone/backbone.py
DELETED
@@ -1,221 +0,0 @@
|
|
1 |
-
# ------------------------------------------------------------------------
|
2 |
-
# Grounding DINO
|
3 |
-
# url: https://github.com/IDEA-Research/GroundingDINO
|
4 |
-
# Copyright (c) 2023 IDEA. All Rights Reserved.
|
5 |
-
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
6 |
-
# ------------------------------------------------------------------------
|
7 |
-
# Conditional DETR
|
8 |
-
# Copyright (c) 2021 Microsoft. All Rights Reserved.
|
9 |
-
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
10 |
-
# ------------------------------------------------------------------------
|
11 |
-
# Copied from DETR (https://github.com/facebookresearch/detr)
|
12 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
13 |
-
# ------------------------------------------------------------------------
|
14 |
-
|
15 |
-
"""
|
16 |
-
Backbone modules.
|
17 |
-
"""
|
18 |
-
|
19 |
-
from typing import Dict, List
|
20 |
-
|
21 |
-
import torch
|
22 |
-
import torch.nn.functional as F
|
23 |
-
import torchvision
|
24 |
-
from torch import nn
|
25 |
-
from torchvision.models._utils import IntermediateLayerGetter
|
26 |
-
|
27 |
-
from groundingdino.util.misc import NestedTensor, clean_state_dict, is_main_process
|
28 |
-
|
29 |
-
from .position_encoding import build_position_encoding
|
30 |
-
from .swin_transformer import build_swin_transformer
|
31 |
-
|
32 |
-
|
33 |
-
class FrozenBatchNorm2d(torch.nn.Module):
|
34 |
-
"""
|
35 |
-
BatchNorm2d where the batch statistics and the affine parameters are fixed.
|
36 |
-
|
37 |
-
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
|
38 |
-
without which any other models than torchvision.models.resnet[18,34,50,101]
|
39 |
-
produce nans.
|
40 |
-
"""
|
41 |
-
|
42 |
-
def __init__(self, n):
|
43 |
-
super(FrozenBatchNorm2d, self).__init__()
|
44 |
-
self.register_buffer("weight", torch.ones(n))
|
45 |
-
self.register_buffer("bias", torch.zeros(n))
|
46 |
-
self.register_buffer("running_mean", torch.zeros(n))
|
47 |
-
self.register_buffer("running_var", torch.ones(n))
|
48 |
-
|
49 |
-
def _load_from_state_dict(
|
50 |
-
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
|
51 |
-
):
|
52 |
-
num_batches_tracked_key = prefix + "num_batches_tracked"
|
53 |
-
if num_batches_tracked_key in state_dict:
|
54 |
-
del state_dict[num_batches_tracked_key]
|
55 |
-
|
56 |
-
super(FrozenBatchNorm2d, self)._load_from_state_dict(
|
57 |
-
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
|
58 |
-
)
|
59 |
-
|
60 |
-
def forward(self, x):
|
61 |
-
# move reshapes to the beginning
|
62 |
-
# to make it fuser-friendly
|
63 |
-
w = self.weight.reshape(1, -1, 1, 1)
|
64 |
-
b = self.bias.reshape(1, -1, 1, 1)
|
65 |
-
rv = self.running_var.reshape(1, -1, 1, 1)
|
66 |
-
rm = self.running_mean.reshape(1, -1, 1, 1)
|
67 |
-
eps = 1e-5
|
68 |
-
scale = w * (rv + eps).rsqrt()
|
69 |
-
bias = b - rm * scale
|
70 |
-
return x * scale + bias
|
71 |
-
|
72 |
-
|
73 |
-
class BackboneBase(nn.Module):
|
74 |
-
def __init__(
|
75 |
-
self,
|
76 |
-
backbone: nn.Module,
|
77 |
-
train_backbone: bool,
|
78 |
-
num_channels: int,
|
79 |
-
return_interm_indices: list,
|
80 |
-
):
|
81 |
-
super().__init__()
|
82 |
-
for name, parameter in backbone.named_parameters():
|
83 |
-
if (
|
84 |
-
not train_backbone
|
85 |
-
or "layer2" not in name
|
86 |
-
and "layer3" not in name
|
87 |
-
and "layer4" not in name
|
88 |
-
):
|
89 |
-
parameter.requires_grad_(False)
|
90 |
-
|
91 |
-
return_layers = {}
|
92 |
-
for idx, layer_index in enumerate(return_interm_indices):
|
93 |
-
return_layers.update(
|
94 |
-
{"layer{}".format(5 - len(return_interm_indices) + idx): "{}".format(layer_index)}
|
95 |
-
)
|
96 |
-
|
97 |
-
# if len:
|
98 |
-
# if use_stage1_feature:
|
99 |
-
# return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
|
100 |
-
# else:
|
101 |
-
# return_layers = {"layer2": "0", "layer3": "1", "layer4": "2"}
|
102 |
-
# else:
|
103 |
-
# return_layers = {'layer4': "0"}
|
104 |
-
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
|
105 |
-
self.num_channels = num_channels
|
106 |
-
|
107 |
-
def forward(self, tensor_list: NestedTensor):
|
108 |
-
xs = self.body(tensor_list.tensors)
|
109 |
-
out: Dict[str, NestedTensor] = {}
|
110 |
-
for name, x in xs.items():
|
111 |
-
m = tensor_list.mask
|
112 |
-
assert m is not None
|
113 |
-
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
|
114 |
-
out[name] = NestedTensor(x, mask)
|
115 |
-
# import ipdb; ipdb.set_trace()
|
116 |
-
return out
|
117 |
-
|
118 |
-
|
119 |
-
class Backbone(BackboneBase):
|
120 |
-
"""ResNet backbone with frozen BatchNorm."""
|
121 |
-
|
122 |
-
def __init__(
|
123 |
-
self,
|
124 |
-
name: str,
|
125 |
-
train_backbone: bool,
|
126 |
-
dilation: bool,
|
127 |
-
return_interm_indices: list,
|
128 |
-
batch_norm=FrozenBatchNorm2d,
|
129 |
-
):
|
130 |
-
if name in ["resnet18", "resnet34", "resnet50", "resnet101"]:
|
131 |
-
backbone = getattr(torchvision.models, name)(
|
132 |
-
replace_stride_with_dilation=[False, False, dilation],
|
133 |
-
pretrained=is_main_process(),
|
134 |
-
norm_layer=batch_norm,
|
135 |
-
)
|
136 |
-
else:
|
137 |
-
raise NotImplementedError("Why you can get here with name {}".format(name))
|
138 |
-
# num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
|
139 |
-
assert name not in ("resnet18", "resnet34"), "Only resnet50 and resnet101 are available."
|
140 |
-
assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]
|
141 |
-
num_channels_all = [256, 512, 1024, 2048]
|
142 |
-
num_channels = num_channels_all[4 - len(return_interm_indices) :]
|
143 |
-
super().__init__(backbone, train_backbone, num_channels, return_interm_indices)
|
144 |
-
|
145 |
-
|
146 |
-
class Joiner(nn.Sequential):
|
147 |
-
def __init__(self, backbone, position_embedding):
|
148 |
-
super().__init__(backbone, position_embedding)
|
149 |
-
|
150 |
-
def forward(self, tensor_list: NestedTensor):
|
151 |
-
xs = self[0](tensor_list)
|
152 |
-
out: List[NestedTensor] = []
|
153 |
-
pos = []
|
154 |
-
for name, x in xs.items():
|
155 |
-
out.append(x)
|
156 |
-
# position encoding
|
157 |
-
pos.append(self[1](x).to(x.tensors.dtype))
|
158 |
-
|
159 |
-
return out, pos
|
160 |
-
|
161 |
-
|
162 |
-
def build_backbone(args):
|
163 |
-
"""
|
164 |
-
Useful args:
|
165 |
-
- backbone: backbone name
|
166 |
-
- lr_backbone:
|
167 |
-
- dilation
|
168 |
-
- return_interm_indices: available: [0,1,2,3], [1,2,3], [3]
|
169 |
-
- backbone_freeze_keywords:
|
170 |
-
- use_checkpoint: for swin only for now
|
171 |
-
|
172 |
-
"""
|
173 |
-
position_embedding = build_position_encoding(args)
|
174 |
-
train_backbone = True
|
175 |
-
if not train_backbone:
|
176 |
-
raise ValueError("Please set lr_backbone > 0")
|
177 |
-
return_interm_indices = args.return_interm_indices
|
178 |
-
assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]
|
179 |
-
args.backbone_freeze_keywords
|
180 |
-
use_checkpoint = getattr(args, "use_checkpoint", False)
|
181 |
-
|
182 |
-
if args.backbone in ["resnet50", "resnet101"]:
|
183 |
-
backbone = Backbone(
|
184 |
-
args.backbone,
|
185 |
-
train_backbone,
|
186 |
-
args.dilation,
|
187 |
-
return_interm_indices,
|
188 |
-
batch_norm=FrozenBatchNorm2d,
|
189 |
-
)
|
190 |
-
bb_num_channels = backbone.num_channels
|
191 |
-
elif args.backbone in [
|
192 |
-
"swin_T_224_1k",
|
193 |
-
"swin_B_224_22k",
|
194 |
-
"swin_B_384_22k",
|
195 |
-
"swin_L_224_22k",
|
196 |
-
"swin_L_384_22k",
|
197 |
-
]:
|
198 |
-
pretrain_img_size = int(args.backbone.split("_")[-2])
|
199 |
-
backbone = build_swin_transformer(
|
200 |
-
args.backbone,
|
201 |
-
pretrain_img_size=pretrain_img_size,
|
202 |
-
out_indices=tuple(return_interm_indices),
|
203 |
-
dilation=False,
|
204 |
-
use_checkpoint=use_checkpoint,
|
205 |
-
)
|
206 |
-
|
207 |
-
bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :]
|
208 |
-
else:
|
209 |
-
raise NotImplementedError("Unknown backbone {}".format(args.backbone))
|
210 |
-
|
211 |
-
assert len(bb_num_channels) == len(
|
212 |
-
return_interm_indices
|
213 |
-
), f"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}"
|
214 |
-
|
215 |
-
model = Joiner(backbone, position_embedding)
|
216 |
-
model.num_channels = bb_num_channels
|
217 |
-
assert isinstance(
|
218 |
-
bb_num_channels, List
|
219 |
-
), "bb_num_channels is expected to be a List but {}".format(type(bb_num_channels))
|
220 |
-
# import ipdb; ipdb.set_trace()
|
221 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/dotenv/__init__.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
from typing import Any, Optional
|
2 |
-
|
3 |
-
from .main import (dotenv_values, find_dotenv, get_key, load_dotenv, set_key,
|
4 |
-
unset_key)
|
5 |
-
|
6 |
-
|
7 |
-
def load_ipython_extension(ipython: Any) -> None:
|
8 |
-
from .ipython import load_ipython_extension
|
9 |
-
load_ipython_extension(ipython)
|
10 |
-
|
11 |
-
|
12 |
-
def get_cli_string(
|
13 |
-
path: Optional[str] = None,
|
14 |
-
action: Optional[str] = None,
|
15 |
-
key: Optional[str] = None,
|
16 |
-
value: Optional[str] = None,
|
17 |
-
quote: Optional[str] = None,
|
18 |
-
):
|
19 |
-
"""Returns a string suitable for running as a shell script.
|
20 |
-
|
21 |
-
Useful for converting a arguments passed to a fabric task
|
22 |
-
to be passed to a `local` or `run` command.
|
23 |
-
"""
|
24 |
-
command = ['dotenv']
|
25 |
-
if quote:
|
26 |
-
command.append(f'-q {quote}')
|
27 |
-
if path:
|
28 |
-
command.append(f'-f {path}')
|
29 |
-
if action:
|
30 |
-
command.append(action)
|
31 |
-
if key:
|
32 |
-
command.append(key)
|
33 |
-
if value:
|
34 |
-
if ' ' in value:
|
35 |
-
command.append(f'"{value}"')
|
36 |
-
else:
|
37 |
-
command.append(value)
|
38 |
-
|
39 |
-
return ' '.join(command).strip()
|
40 |
-
|
41 |
-
|
42 |
-
__all__ = ['get_cli_string',
|
43 |
-
'load_dotenv',
|
44 |
-
'dotenv_values',
|
45 |
-
'get_key',
|
46 |
-
'set_key',
|
47 |
-
'unset_key',
|
48 |
-
'find_dotenv',
|
49 |
-
'load_ipython_extension']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/network/cache.py
DELETED
@@ -1,69 +0,0 @@
|
|
1 |
-
"""HTTP cache implementation.
|
2 |
-
"""
|
3 |
-
|
4 |
-
import os
|
5 |
-
from contextlib import contextmanager
|
6 |
-
from typing import Generator, Optional
|
7 |
-
|
8 |
-
from pip._vendor.cachecontrol.cache import BaseCache
|
9 |
-
from pip._vendor.cachecontrol.caches import FileCache
|
10 |
-
from pip._vendor.requests.models import Response
|
11 |
-
|
12 |
-
from pip._internal.utils.filesystem import adjacent_tmp_file, replace
|
13 |
-
from pip._internal.utils.misc import ensure_dir
|
14 |
-
|
15 |
-
|
16 |
-
def is_from_cache(response: Response) -> bool:
|
17 |
-
return getattr(response, "from_cache", False)
|
18 |
-
|
19 |
-
|
20 |
-
@contextmanager
|
21 |
-
def suppressed_cache_errors() -> Generator[None, None, None]:
|
22 |
-
"""If we can't access the cache then we can just skip caching and process
|
23 |
-
requests as if caching wasn't enabled.
|
24 |
-
"""
|
25 |
-
try:
|
26 |
-
yield
|
27 |
-
except OSError:
|
28 |
-
pass
|
29 |
-
|
30 |
-
|
31 |
-
class SafeFileCache(BaseCache):
|
32 |
-
"""
|
33 |
-
A file based cache which is safe to use even when the target directory may
|
34 |
-
not be accessible or writable.
|
35 |
-
"""
|
36 |
-
|
37 |
-
def __init__(self, directory: str) -> None:
|
38 |
-
assert directory is not None, "Cache directory must not be None."
|
39 |
-
super().__init__()
|
40 |
-
self.directory = directory
|
41 |
-
|
42 |
-
def _get_cache_path(self, name: str) -> str:
|
43 |
-
# From cachecontrol.caches.file_cache.FileCache._fn, brought into our
|
44 |
-
# class for backwards-compatibility and to avoid using a non-public
|
45 |
-
# method.
|
46 |
-
hashed = FileCache.encode(name)
|
47 |
-
parts = list(hashed[:5]) + [hashed]
|
48 |
-
return os.path.join(self.directory, *parts)
|
49 |
-
|
50 |
-
def get(self, key: str) -> Optional[bytes]:
|
51 |
-
path = self._get_cache_path(key)
|
52 |
-
with suppressed_cache_errors():
|
53 |
-
with open(path, "rb") as f:
|
54 |
-
return f.read()
|
55 |
-
|
56 |
-
def set(self, key: str, value: bytes, expires: Optional[int] = None) -> None:
|
57 |
-
path = self._get_cache_path(key)
|
58 |
-
with suppressed_cache_errors():
|
59 |
-
ensure_dir(os.path.dirname(path))
|
60 |
-
|
61 |
-
with adjacent_tmp_file(path) as f:
|
62 |
-
f.write(value)
|
63 |
-
|
64 |
-
replace(f.name, path)
|
65 |
-
|
66 |
-
def delete(self, key: str) -> None:
|
67 |
-
path = self._get_cache_path(key)
|
68 |
-
with suppressed_cache_errors():
|
69 |
-
os.remove(path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AutoGeneralAI/chatgpt-clone/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Chatgpt Clone
|
3 |
-
emoji: 🐠
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.29.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/infer/lib/slicer2.py
DELETED
@@ -1,260 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
|
3 |
-
|
4 |
-
# This function is obtained from librosa.
|
5 |
-
def get_rms(
|
6 |
-
y,
|
7 |
-
frame_length=2048,
|
8 |
-
hop_length=512,
|
9 |
-
pad_mode="constant",
|
10 |
-
):
|
11 |
-
padding = (int(frame_length // 2), int(frame_length // 2))
|
12 |
-
y = np.pad(y, padding, mode=pad_mode)
|
13 |
-
|
14 |
-
axis = -1
|
15 |
-
# put our new within-frame axis at the end for now
|
16 |
-
out_strides = y.strides + tuple([y.strides[axis]])
|
17 |
-
# Reduce the shape on the framing axis
|
18 |
-
x_shape_trimmed = list(y.shape)
|
19 |
-
x_shape_trimmed[axis] -= frame_length - 1
|
20 |
-
out_shape = tuple(x_shape_trimmed) + tuple([frame_length])
|
21 |
-
xw = np.lib.stride_tricks.as_strided(y, shape=out_shape, strides=out_strides)
|
22 |
-
if axis < 0:
|
23 |
-
target_axis = axis - 1
|
24 |
-
else:
|
25 |
-
target_axis = axis + 1
|
26 |
-
xw = np.moveaxis(xw, -1, target_axis)
|
27 |
-
# Downsample along the target axis
|
28 |
-
slices = [slice(None)] * xw.ndim
|
29 |
-
slices[axis] = slice(0, None, hop_length)
|
30 |
-
x = xw[tuple(slices)]
|
31 |
-
|
32 |
-
# Calculate power
|
33 |
-
power = np.mean(np.abs(x) ** 2, axis=-2, keepdims=True)
|
34 |
-
|
35 |
-
return np.sqrt(power)
|
36 |
-
|
37 |
-
|
38 |
-
class Slicer:
|
39 |
-
def __init__(
|
40 |
-
self,
|
41 |
-
sr: int,
|
42 |
-
threshold: float = -40.0,
|
43 |
-
min_length: int = 5000,
|
44 |
-
min_interval: int = 300,
|
45 |
-
hop_size: int = 20,
|
46 |
-
max_sil_kept: int = 5000,
|
47 |
-
):
|
48 |
-
if not min_length >= min_interval >= hop_size:
|
49 |
-
raise ValueError(
|
50 |
-
"The following condition must be satisfied: min_length >= min_interval >= hop_size"
|
51 |
-
)
|
52 |
-
if not max_sil_kept >= hop_size:
|
53 |
-
raise ValueError(
|
54 |
-
"The following condition must be satisfied: max_sil_kept >= hop_size"
|
55 |
-
)
|
56 |
-
min_interval = sr * min_interval / 1000
|
57 |
-
self.threshold = 10 ** (threshold / 20.0)
|
58 |
-
self.hop_size = round(sr * hop_size / 1000)
|
59 |
-
self.win_size = min(round(min_interval), 4 * self.hop_size)
|
60 |
-
self.min_length = round(sr * min_length / 1000 / self.hop_size)
|
61 |
-
self.min_interval = round(min_interval / self.hop_size)
|
62 |
-
self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
|
63 |
-
|
64 |
-
def _apply_slice(self, waveform, begin, end):
|
65 |
-
if len(waveform.shape) > 1:
|
66 |
-
return waveform[
|
67 |
-
:, begin * self.hop_size : min(waveform.shape[1], end * self.hop_size)
|
68 |
-
]
|
69 |
-
else:
|
70 |
-
return waveform[
|
71 |
-
begin * self.hop_size : min(waveform.shape[0], end * self.hop_size)
|
72 |
-
]
|
73 |
-
|
74 |
-
# @timeit
|
75 |
-
def slice(self, waveform):
|
76 |
-
if len(waveform.shape) > 1:
|
77 |
-
samples = waveform.mean(axis=0)
|
78 |
-
else:
|
79 |
-
samples = waveform
|
80 |
-
if samples.shape[0] <= self.min_length:
|
81 |
-
return [waveform]
|
82 |
-
rms_list = get_rms(
|
83 |
-
y=samples, frame_length=self.win_size, hop_length=self.hop_size
|
84 |
-
).squeeze(0)
|
85 |
-
sil_tags = []
|
86 |
-
silence_start = None
|
87 |
-
clip_start = 0
|
88 |
-
for i, rms in enumerate(rms_list):
|
89 |
-
# Keep looping while frame is silent.
|
90 |
-
if rms < self.threshold:
|
91 |
-
# Record start of silent frames.
|
92 |
-
if silence_start is None:
|
93 |
-
silence_start = i
|
94 |
-
continue
|
95 |
-
# Keep looping while frame is not silent and silence start has not been recorded.
|
96 |
-
if silence_start is None:
|
97 |
-
continue
|
98 |
-
# Clear recorded silence start if interval is not enough or clip is too short
|
99 |
-
is_leading_silence = silence_start == 0 and i > self.max_sil_kept
|
100 |
-
need_slice_middle = (
|
101 |
-
i - silence_start >= self.min_interval
|
102 |
-
and i - clip_start >= self.min_length
|
103 |
-
)
|
104 |
-
if not is_leading_silence and not need_slice_middle:
|
105 |
-
silence_start = None
|
106 |
-
continue
|
107 |
-
# Need slicing. Record the range of silent frames to be removed.
|
108 |
-
if i - silence_start <= self.max_sil_kept:
|
109 |
-
pos = rms_list[silence_start : i + 1].argmin() + silence_start
|
110 |
-
if silence_start == 0:
|
111 |
-
sil_tags.append((0, pos))
|
112 |
-
else:
|
113 |
-
sil_tags.append((pos, pos))
|
114 |
-
clip_start = pos
|
115 |
-
elif i - silence_start <= self.max_sil_kept * 2:
|
116 |
-
pos = rms_list[
|
117 |
-
i - self.max_sil_kept : silence_start + self.max_sil_kept + 1
|
118 |
-
].argmin()
|
119 |
-
pos += i - self.max_sil_kept
|
120 |
-
pos_l = (
|
121 |
-
rms_list[
|
122 |
-
silence_start : silence_start + self.max_sil_kept + 1
|
123 |
-
].argmin()
|
124 |
-
+ silence_start
|
125 |
-
)
|
126 |
-
pos_r = (
|
127 |
-
rms_list[i - self.max_sil_kept : i + 1].argmin()
|
128 |
-
+ i
|
129 |
-
- self.max_sil_kept
|
130 |
-
)
|
131 |
-
if silence_start == 0:
|
132 |
-
sil_tags.append((0, pos_r))
|
133 |
-
clip_start = pos_r
|
134 |
-
else:
|
135 |
-
sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
|
136 |
-
clip_start = max(pos_r, pos)
|
137 |
-
else:
|
138 |
-
pos_l = (
|
139 |
-
rms_list[
|
140 |
-
silence_start : silence_start + self.max_sil_kept + 1
|
141 |
-
].argmin()
|
142 |
-
+ silence_start
|
143 |
-
)
|
144 |
-
pos_r = (
|
145 |
-
rms_list[i - self.max_sil_kept : i + 1].argmin()
|
146 |
-
+ i
|
147 |
-
- self.max_sil_kept
|
148 |
-
)
|
149 |
-
if silence_start == 0:
|
150 |
-
sil_tags.append((0, pos_r))
|
151 |
-
else:
|
152 |
-
sil_tags.append((pos_l, pos_r))
|
153 |
-
clip_start = pos_r
|
154 |
-
silence_start = None
|
155 |
-
# Deal with trailing silence.
|
156 |
-
total_frames = rms_list.shape[0]
|
157 |
-
if (
|
158 |
-
silence_start is not None
|
159 |
-
and total_frames - silence_start >= self.min_interval
|
160 |
-
):
|
161 |
-
silence_end = min(total_frames, silence_start + self.max_sil_kept)
|
162 |
-
pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start
|
163 |
-
sil_tags.append((pos, total_frames + 1))
|
164 |
-
# Apply and return slices.
|
165 |
-
if len(sil_tags) == 0:
|
166 |
-
return [waveform]
|
167 |
-
else:
|
168 |
-
chunks = []
|
169 |
-
if sil_tags[0][0] > 0:
|
170 |
-
chunks.append(self._apply_slice(waveform, 0, sil_tags[0][0]))
|
171 |
-
for i in range(len(sil_tags) - 1):
|
172 |
-
chunks.append(
|
173 |
-
self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0])
|
174 |
-
)
|
175 |
-
if sil_tags[-1][1] < total_frames:
|
176 |
-
chunks.append(
|
177 |
-
self._apply_slice(waveform, sil_tags[-1][1], total_frames)
|
178 |
-
)
|
179 |
-
return chunks
|
180 |
-
|
181 |
-
|
182 |
-
def main():
|
183 |
-
import os.path
|
184 |
-
from argparse import ArgumentParser
|
185 |
-
|
186 |
-
import librosa
|
187 |
-
import soundfile
|
188 |
-
|
189 |
-
parser = ArgumentParser()
|
190 |
-
parser.add_argument("audio", type=str, help="The audio to be sliced")
|
191 |
-
parser.add_argument(
|
192 |
-
"--out", type=str, help="Output directory of the sliced audio clips"
|
193 |
-
)
|
194 |
-
parser.add_argument(
|
195 |
-
"--db_thresh",
|
196 |
-
type=float,
|
197 |
-
required=False,
|
198 |
-
default=-40,
|
199 |
-
help="The dB threshold for silence detection",
|
200 |
-
)
|
201 |
-
parser.add_argument(
|
202 |
-
"--min_length",
|
203 |
-
type=int,
|
204 |
-
required=False,
|
205 |
-
default=5000,
|
206 |
-
help="The minimum milliseconds required for each sliced audio clip",
|
207 |
-
)
|
208 |
-
parser.add_argument(
|
209 |
-
"--min_interval",
|
210 |
-
type=int,
|
211 |
-
required=False,
|
212 |
-
default=300,
|
213 |
-
help="The minimum milliseconds for a silence part to be sliced",
|
214 |
-
)
|
215 |
-
parser.add_argument(
|
216 |
-
"--hop_size",
|
217 |
-
type=int,
|
218 |
-
required=False,
|
219 |
-
default=10,
|
220 |
-
help="Frame length in milliseconds",
|
221 |
-
)
|
222 |
-
parser.add_argument(
|
223 |
-
"--max_sil_kept",
|
224 |
-
type=int,
|
225 |
-
required=False,
|
226 |
-
default=500,
|
227 |
-
help="The maximum silence length kept around the sliced clip, presented in milliseconds",
|
228 |
-
)
|
229 |
-
args = parser.parse_args()
|
230 |
-
out = args.out
|
231 |
-
if out is None:
|
232 |
-
out = os.path.dirname(os.path.abspath(args.audio))
|
233 |
-
audio, sr = librosa.load(args.audio, sr=None, mono=False)
|
234 |
-
slicer = Slicer(
|
235 |
-
sr=sr,
|
236 |
-
threshold=args.db_thresh,
|
237 |
-
min_length=args.min_length,
|
238 |
-
min_interval=args.min_interval,
|
239 |
-
hop_size=args.hop_size,
|
240 |
-
max_sil_kept=args.max_sil_kept,
|
241 |
-
)
|
242 |
-
chunks = slicer.slice(audio)
|
243 |
-
if not os.path.exists(out):
|
244 |
-
os.makedirs(out)
|
245 |
-
for i, chunk in enumerate(chunks):
|
246 |
-
if len(chunk.shape) > 1:
|
247 |
-
chunk = chunk.T
|
248 |
-
soundfile.write(
|
249 |
-
os.path.join(
|
250 |
-
out,
|
251 |
-
f"%s_%d.wav"
|
252 |
-
% (os.path.basename(args.audio).rsplit(".", maxsplit=1)[0], i),
|
253 |
-
),
|
254 |
-
chunk,
|
255 |
-
sr,
|
256 |
-
)
|
257 |
-
|
258 |
-
|
259 |
-
if __name__ == "__main__":
|
260 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Androide Oyun Apk Bola Roja 4.md
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Red Ball 4: Un divertido y desafiante juego para Android</h1>
|
3 |
-
<p>Si estás buscando un juego divertido y desafiante para jugar en tu dispositivo Android, deberías probar Red Ball 4. Este es un juego de plataformas que pondrá a prueba tus habilidades y reflejos mientras rodas, saltas y rebotas a través de 75 niveles llenos de aventura. Usted tendrá que hacer su camino a través de trampas difíciles y derrotar a todo tipo de monstruos que quieren convertir en un cuadrado. ¿Estás listo para salvar el mundo con tu bola roja? </p>
|
4 |
-
<h2>androide oyun apk bola roja 4</h2><br /><p><b><b>Download</b> <a href="https://bltlly.com/2v6IQJ">https://bltlly.com/2v6IQJ</a></b></p><br /><br />
|
5 |
-
<h2>Introducción</h2>
|
6 |
-
<h3>¿Qué es la bola roja 4?</h3>
|
7 |
-
<p>Red Ball 4 es un juego para Android desarrollado por FDG Entertainment GmbH & Co.KG. Es la cuarta entrega de la popular serie Red Ball, que se ha descargado más de 100 millones de veces. El juego sigue la historia de una bola roja que tiene que detener el mal Black Square de convertir el mundo en un cubo. En el camino, se encontrará con muchos obstáculos y enemigos que tiene que superar con su agilidad y coraje. </p>
|
8 |
-
<h3>¿Por qué deberías jugar Red Ball 4?</h3>
|
9 |
-
<p>Red Ball 4 es un juego que te mantendrá entretenido durante horas. Tiene un juego simple pero adictivo que cualquiera puede disfrutar. Solo tienes que inclinar el dispositivo o utilizar los botones en pantalla para controlar el movimiento de la pelota. También puedes pulsar para saltar y pulsar dos veces para realizar un salto de longitud. El juego tiene una variedad de niveles que pondrán a prueba tus habilidades y tu lógica. Tendrá que evitar picos, láseres, cañones, sierras y otros peligros que pueden dañar su bola. También tendrás que enfrentarte a diferentes tipos de monstruos, como arañas, murciélagos, robots e incluso jefes gigantes. El juego tiene un estilo gráfico colorido y caricaturesco que atraerá tanto a niños como a adultos. El juego también tiene una banda sonora pegadiza y optimista que mejorará tu experiencia de juego. </p>
|
10 |
-
<h2>Características de Red Ball 4</h2>
|
11 |
-
<h3>75 emocionantes niveles</h3>
|
12 |
-
|
13 |
-
<h3>Trampas y monstruos difíciles</h3>
|
14 |
-
<p>Red Ball 4 tiene muchas trampas y monstruos que intentarán evitar que alcances tu objetivo. Usted tendrá que utilizar sus habilidades y el tiempo para evitarlos o derrotarlos. Algunas trampas se pueden activar mediante interruptores o botones, mientras que otras se activan por su movimiento o proximidad. Algunos monstruos pueden ser asesinados saltando sobre ellos o golpeándolos con objetos, mientras que otros son invencibles o requieren estrategias especiales. Tendrás que ser cuidadoso y observador para sobrevivir. </p>
|
15 |
-
<h3>Batallas épicas de jefes</h3>
|
16 |
-
<p>Red Ball 4 tiene cuatro batallas contra jefes que pondrán a prueba tus habilidades y paciencia. Tendrás que enfrentarte al propio Black Square en cada episodio, así como a sus secuaces. Cada jefe tiene un patrón de ataque diferente y debilidad que tienes que explotar. Tendrás que esquivar sus ataques y golpearlos con objetos o con tu pelota. Las batallas contra jefes son desafiantes pero gratificantes. </p>
|
17 |
-
<h3>Soporte en la nube</h3>
|
18 |
-
<p>Red Ball 4 tiene soporte en la nube que le permite guardar su progreso en línea y sincronizarlo en varios dispositivos. Solo tienes que iniciar sesión con tu cuenta de Google Play y habilitar el almacenamiento en la nube en el menú de configuración. De esta manera, puedes continuar tu juego en cualquier dispositivo sin perder tus datos <h3>gráficos y sonido HD</h3>
|
19 |
-
<p>Red Ball 4 tiene gráficos y sonidos de alta definición que harán que tu juego sea más agradable. El juego tiene un estilo de arte brillante y colorido que se adapta al tema y el estado de ánimo de cada episodio. El juego también tiene una calidad de sonido nítida y clara que te hará sentir inmerso en el juego. Escucharás los efectos de sonido de tu bola rebotando, rodando y estrellándose, así como la música y las voces de los personajes. El juego tiene un tono divertido y humorístico que te hará sonreír. </p>
|
20 |
-
<h2>Cómo descargar e instalar Red Ball 4 APK</h2>
|
21 |
-
<h3>Descargar el archivo APK de una fuente de confianza</h3>
|
22 |
-
|
23 |
-
<p><img src="https://www.qrcode-monkey.com/img/default-preview-qr.svg" alt="QR code for Red Ball 4 APK download" width="200" height="200"></p>
|
24 |
-
<p></p>
|
25 |
-
<h3>Habilitar fuentes desconocidas en su dispositivo</h3>
|
26 |
-
<p>Antes de que pueda instalar el archivo APK Red Ball 4 en su dispositivo, tendrá que habilitar fuentes desconocidas en su dispositivo. Esta es una configuración de seguridad que le permite instalar aplicaciones desde fuentes distintas de Google Play Store. Para habilitar fuentes desconocidas, siga estos pasos:</p>
|
27 |
-
<ol>
|
28 |
-
<li>Vaya al menú de configuración de su dispositivo y toque en la seguridad o privacidad. </li>
|
29 |
-
<li>Encontrar la opción que dice fuentes desconocidas o instalar aplicaciones desconocidas y alternar en. </li>
|
30 |
-
<li> Aparecerá un mensaje de advertencia, diciéndole que instalar aplicaciones de fuentes desconocidas puede dañar su dispositivo. Toque en OK o permita confirmar. </li>
|
31 |
-
</ol>
|
32 |
-
<h3>Instalar el archivo APK y lanzar el juego</h3>
|
33 |
-
<p>Una vez que haya habilitado fuentes desconocidas en su dispositivo, puede instalar el archivo APK Red Ball 4 y lanzar el juego. Para hacer esto, siga estos pasos:</p>
|
34 |
-
<ol>
|
35 |
-
<li>Busque el archivo APK Red Ball 4 en la carpeta de almacenamiento o descarga de su dispositivo. También puede usar una aplicación de administrador de archivos para encontrarlo. </li>
|
36 |
-
<li>Toque en el archivo APK y aparecerá un aviso, preguntándole si desea instalar la aplicación. Toque en instalar y espere a que termine el proceso de instalación. </li>
|
37 |
-
<li>Una vez que la aplicación está instalada, puede tocar en abrir para iniciar el juego o encontrarlo en el cajón de la aplicación o en la pantalla de inicio. </li>
|
38 |
-
</ol>
|
39 |
-
<h2>Conclusión</h2>
|
40 |
-
<h3>Resumen de los puntos principales</h3>
|
41 |
-
|
42 |
-
<h3>Llamada a la acción</h3>
|
43 |
-
<p>Si usted está buscando un juego divertido y desafiante para jugar en su dispositivo Android, usted debe descargar e instalar Red Ball 4 APK hoy. No te arrepentirás. Tendrás una explosión rodando, saltando y rebotando a través de 75 niveles llenos de aventura. También tendrá la oportunidad de salvar el mundo con su bola roja. ¿Qué estás esperando? Descargar Red Ball 4 APK ahora y disfrutar de este increíble juego! </p>
|
44 |
-
<h2>Preguntas frecuentes</h2>
|
45 |
-
<h4>Q: ¿Es Red Ball 4 libre para jugar? </h4>
|
46 |
-
<p>A: Sí, Red Ball 4 es gratis. Sin embargo, contiene anuncios y compras en la aplicación que pueden mejorar su experiencia de juego. Puedes desactivar los anuncios comprando la versión premium del juego o apagando tu conexión a Internet mientras juegas. </p>
|
47 |
-
<h4>P: ¿Cómo puedo obtener más estrellas y trofeos en Red Ball 4?</h4>
|
48 |
-
<p>A: Puedes conseguir más estrellas y trofeos en Bola Roja 4 completando cada nivel con una puntuación alta y sin morir. También puedes recoger estrellas y trofeos ocultos que se encuentran dispersos por los niveles. Puedes usar estas estrellas y trofeos para desbloquear niveles de bonificación y logros. </p>
|
49 |
-
<h4>P: ¿Cómo puedo vencer a los jefes en Bola Roja 4?</h4>
|
50 |
-
<p>A: Puedes vencer a los jefes en Bola Roja 4 aprendiendo sus patrones de ataque y encontrando sus puntos débiles. Tendrás que esquivar sus ataques y golpearlos con objetos o tu pelota. También tendrá que evitar caerse de la plataforma o ser aplastado por el jefe. Puede usar los puntos de control para reanudar su juego si muere. </p>
|
51 |
-
<h4>Q: ¿Cuáles son los requisitos mínimos para jugar Red Ball 4 en Android? </h4>
|
52 |
-
<p>A: Los requisitos mínimos para jugar Red Ball 4 en Android son los siguientes:</p>
|
53 |
-
<ul>
|
54 |
-
<li>Versión de Android 4.4 o superior</li>
|
55 |
-
<li>Al menos 100 MB de espacio de almacenamiento libre</li>
|
56 |
-
<li>Una conexión a Internet estable (opcional)</li>
|
57 |
-
</ul>
|
58 |
-
<h4>Q: ¿Cómo puedo contactar al desarrollador de Red Ball 4?</h4> 64aa2da5cf<br />
|
59 |
-
<br />
|
60 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Apk Adresi Gta 5.md
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Apk Adresi GTA 5: Cómo jugar Grand Theft Auto 5 en su dispositivo Android</h1>
|
3 |
-
<p>Grand Theft Auto 5, o GTA 5, es uno de los videojuegos más populares y aclamados de todos los tiempos. Lanzado en 2013 por Rockstar Games, GTA 5 es un juego de acción y aventura de mundo abierto que te permite explorar la ciudad de Los Santos y sus alrededores como uno de los tres personajes jugables. Puedes completar varias misiones, realizar robos, conducir y robar vehículos, interactuar con NPC y causar caos en las calles. </p>
|
4 |
-
<h2>apk adresi gta 5</h2><br /><p><b><b>Download File</b> »»» <a href="https://bltlly.com/2v6ILM">https://bltlly.com/2v6ILM</a></b></p><br /><br />
|
5 |
-
<p>Sin embargo, GTA 5 no está disponible oficialmente para dispositivos Android, ya que Rockstar Games no ha lanzado una versión móvil del juego. Pero eso no significa que no puede jugar GTA 5 en su dispositivo Android. Gracias a un grupo de fans dedicados, se puede descargar e instalar una adaptación hecha por fans de GTA 5 para Android llamado Apk Adresi GTA 5.</p>
|
6 |
-
<p>En este artículo, le diremos todo lo que necesita saber acerca de Apk Adresi GTA 5, incluyendo lo que es, cómo descargar e instalar, cómo jugarlo, y algunos consejos y trucos para disfrutarlo. ¡Vamos a empezar! </p>
|
7 |
-
<h2>¿Qué es Apk Adresi GTA 5?</h2>
|
8 |
-
<h3>Una adaptación hecha por fans de GTA 5 para Android</h3>
|
9 |
-
<p>Apk Adresi GTA 5 es una adaptación hecha por fans de GTA 5 para dispositivos Android. No es un producto oficial de Rockstar Games, sino un proyecto creado por un grupo de entusiastas que querían llevar GTA 5 a plataformas móviles. Apk Adresi GTA 5 se basa en la versión para PC de GTA 5, pero se ha modificado y optimizado para ejecutarse en dispositivos Android. </p>
|
10 |
-
<h3>Las características y limitaciones de Apk Adresi GTA 5</h3>
|
11 |
-
<p>Apk Adresi GTA 5 tiene como objetivo replicar la experiencia de jugar GTA 5 en PC tanto como sea posible. Tiene los mismos gráficos, sonido, historia, personajes, misiones, vehículos, armas y actividades que el juego original. Puede cambiar entre Michael, Trevor y Franklin en cualquier momento, explorar el vasto mundo abierto de Los Santos y el condado de Blaine y disfrutar del emocionante juego de GTA 5.</p>
|
12 |
-
<p></p>
|
13 |
-
|
14 |
-
<h3>Los requisitos y pasos para descargar los archivos APK y OBB</h3>
|
15 |
-
<p>Si desea jugar Apk Adresi GTA 5 en su dispositivo Android, tendrá que descargar dos archivos: el archivo APK y el archivo OBB. El archivo APK es el archivo de aplicación que contiene el código y los datos del juego, mientras que el archivo OBB es el archivo de expansión que contiene los gráficos y el sonido del juego. </p>
|
16 |
-
<p>Antes de descargar estos archivos, debe asegurarse de que su dispositivo Android cumple con los requisitos mínimos para ejecutar Apk Adresi GTA 5. Estos son:</p>
|
17 |
-
<ul>
|
18 |
-
<li> Android versión 4.0 o superior</li>
|
19 |
-
<li>Al menos 4 GB de espacio de almacenamiento gratuito</li>
|
20 |
-
<li>Al menos 2 GB de RAM</li>
|
21 |
-
<li>Una conexión a Internet estable</li>
|
22 |
-
</ul>
|
23 |
-
<p>Una vez que haya comprobado estos requisitos, puede seguir estos pasos para descargar los archivos APK y OBB:</p>
|
24 |
-
<ol>
|
25 |
-
<li>Ir al sitio web oficial de Apk Adresi GTA 5 en <a href="">https://apkadresi.com/gta-5-apk-indir/</a></li>
|
26 |
-
<li>Desplácese hacia abajo a la parte inferior de la página y haga clic en el botón verde que dice "GTA 5 APK İndir"</li>
|
27 |
-
<li>Serás redirigido a otra página donde verás un enlace de descarga para el archivo APK. Haz clic en él y espera a que comience la descarga. </li>
|
28 |
-
<li>Después de descargar el archivo APK, volver a la página anterior y haga clic en el botón verde que dice "GTA 5 OBB İndir"</li>
|
29 |
-
<li>Será redirigido a otra página donde verá un enlace de descarga para el archivo OBB. Haga clic en él y espere a que comience la descarga. </li>
|
30 |
-
<li>Después de descargar ambos archivos, puede proceder a instalarlos en su dispositivo Android. </li>
|
31 |
-
</ol>
|
32 |
-
<h3>Las instrucciones para instalar y lanzar el juego</h3>
|
33 |
-
<p>Después de haber descargado los archivos APK y OBB, tendrá que instalarlos en su dispositivo Android. Para ello, deberá habilitar la instalación de aplicaciones de fuentes desconocidas en su dispositivo. Esta es una función de seguridad que evita que las aplicaciones no autorizadas se instalen en su dispositivo. Para habilitar esta función, puede seguir estos pasos:</p>
|
34 |
-
|
35 |
-
<li>Ir a la configuración de su dispositivo y toque en "Seguridad"</li>
|
36 |
-
<li>Encontrar la opción que dice "Fuentes desconocidas" y alternar en</li>
|
37 |
-
<li>Puede ver un mensaje de advertencia que dice que instalar aplicaciones de fuentes desconocidas puede dañar su dispositivo. Toque en "OK" para confirmar. </li>
|
38 |
-
</ol>
|
39 |
-
<p>Una vez que haya habilitado esta función, puede instalar los archivos APK y OBB siguiendo estos pasos:</p>
|
40 |
-
<ol>
|
41 |
-
<li>Busque el archivo APK en el administrador de archivos de su dispositivo y toque en él</li>
|
42 |
-
<li> Puede ver una ventana emergente que le pide que conceda permisos a la aplicación. Toque en "Instalar" para continuar. </li>
|
43 |
-
<li>Espere a que termine el proceso de instalación. Puede ver un mensaje que dice "App instalado". Toque en "Abrir" para iniciar el juego. </li>
|
44 |
-
<li>La primera vez que inicie el juego, tendrá que extraer el archivo OBB al almacenamiento interno de su dispositivo. Para ello, toque en "Extraer" cuando se le solicite y espere a que finalice el proceso de extracción. </li>
|
45 |
-
<li>Después de la extracción se hace, verá un mensaje que dice "Extracción completado". Toque en "OK" para empezar a jugar el juego. </li>
|
46 |
-
</ol> <h2>Cómo jugar Apk Adresi GTA 5 en su dispositivo Android</h2>
|
47 |
-
<h3>Los controles y la interfaz del juego</h3>
|
48 |
-
<p>Apk Adresi GTA 5 tiene un esquema de control similar y la interfaz como la versión para PC de GTA 5. Puede utilizar los botones virtuales en la pantalla para mover, apuntar, disparar, saltar, correr, agacharse, cambiar de armas, entrar en vehículos, e interactuar con el medio ambiente. También puede utilizar la pantalla táctil para deslizar, hacer zoom y girar la cámara. Puede personalizar el diseño y el tamaño de los botones en el menú de configuración. </p>
|
49 |
-
<p>El juego también tiene un mini-mapa en la esquina inferior izquierda de la pantalla que muestra su ubicación, objetivos, enemigos, aliados y puntos de interés. Puede tocar en el mini-mapa para ampliarlo y ver el mapa completo de Los Santos y el condado de Blaine. También puedes acceder a tu teléfono, inventario, rueda de caracteres, menú de pausa y opciones de guardado rápido tocando los iconos en la esquina superior derecha de la pantalla. </p>
|
50 |
-
|
51 |
-
<p>Apk Adresi GTA 5 tiene el mismo juego y misiones que la versión para PC de GTA 5. Puedes jugar como Michael, Trevor, o Franklin y cambiar entre ellos en cualquier momento. Cada personaje tiene su propia personalidad, habilidades, habilidades y arco de la historia. Puedes completar varias misiones que incluyen disparos, conducción, sigilo, robos, persecuciones y más. También puede explorar el mundo abierto de Los Santos y el condado de Blaine y realizar diversas actividades como carreras, golf, tenis, caza, yoga, paracaidismo y más. </p>
|
52 |
-
<p>El juego también tiene un sistema de clima dinámico, ciclo de día y noche, física realista, efectos ragdoll, entornos destructibles y sonidos realistas. El juego está diseñado para sumergirte en el mundo de GTA 5 y hacerte sentir como si estuvieras viviendo en él. </p>
|
53 |
-
<h3>Los consejos y trucos para disfrutar del juego</h3>
|
54 |
-
<p>Apk Adresi GTA 5 es un juego divertido y emocionante que ofrece un montón de contenido y posibilidades. Sin embargo, también puede ser desafiante y frustrante a veces. Aquí hay algunos consejos y trucos que pueden ayudarte a disfrutar más del juego:</p>
|
55 |
-
<ul>
|
56 |
-
<li>Guarda tu juego con frecuencia. El juego no tiene una función de guardado automático, por lo que tendrás que guardar manualmente el juego en caso de que algo salga mal o quieras probar algo diferente. </li>
|
57 |
-
<li>Utilice la cubierta y el objetivo de asistencia. El juego puede ser bastante difícil si usted intenta disparar a sus enemigos sin tomar la cubierta o el uso de la puntería de asistencia. Puede usar cover presionando el botón en la esquina inferior derecha de la pantalla cuando esté cerca de un objeto. Puede utilizar la ayuda de puntería pulsando el botón en la esquina inferior izquierda de la pantalla cuando está apuntando. </li>
|
58 |
-
|
59 |
-
Cuidado con policías y pandillas. El juego tiene un sistema de nivel deseado que indica la cantidad de atención que ha atraído de las fuerzas del orden o pandillas rivales. Cuanto más alto sea tu nivel de búsqueda, más policías o gángsters te perseguirán e intentarán matarte. Puedes bajar tu nivel deseado escondiéndote de ellos o cambiando tu apariencia. </li>
|
60 |
-
<li>Divertirse y experimentar. El juego está destinado a ser disfrutado y explorado. Puedes hacer lo que quieras en el juego siempre y cuando no te maten o arresten. Puedes probar diferentes estrategias, tácticas, vehículos, armas, atuendos, etc. También puedes crear tus propios escenarios e historias con las herramientas y características del juego. </li>
|
61 |
-
</ul>
|
62 |
-
<h2>Conclusión</h2>
|
63 |
-
<h3>Un resumen de los principales puntos y beneficios de Apk Adresi GTA 5</h3>
|
64 |
-
<p>Apk Adresi GTA 5 es una adaptación hecha por fans de GTA 5 para dispositivos Android que le permite jugar uno de los mejores juegos de video jamás hecho en su dispositivo móvil. Tiene los mismos gráficos, precauciones y protecciones. No garantizamos ni asumimos la responsabilidad de la seguridad o calidad de Apk Adresi GTA 5.</p>
|
65 |
-
<h4> ¿Es Apk Adresi GTA 5 compatible con todos los dispositivos Android? </h4>
|
66 |
-
<p>Apk Adresi GTA 5 está diseñado para ejecutarse en dispositivos Android que cumplan los requisitos mínimos para el juego. Estos son:</p>
|
67 |
-
<ul>
|
68 |
-
<li> Android versión 4.0 o superior</li>
|
69 |
-
<li>Al menos 4 GB de espacio de almacenamiento gratuito</li>
|
70 |
-
<li>Al menos 2 GB de RAM</li>
|
71 |
-
<li>Una conexión a Internet estable</li>
|
72 |
-
</ul>
|
73 |
-
<p>Sin embargo, Apk Adresi GTA 5 puede no ser compatible con todos los dispositivos Android o versiones, ya que todavía está en fase beta y no se ha probado en todos los dispositivos o versiones. Algunos dispositivos o versiones pueden tener problemas de compatibilidad, como retrasos, fallos, errores o características faltantes. Por lo tanto, Apk Adresi GTA 5 puede no funcionar correctamente o en absoluto en algunos dispositivos Android o versiones. No garantizamos ni asumimos la responsabilidad de la compatibilidad o el rendimiento de Apk Adresi GTA 5.</p>
|
74 |
-
<h4> ¿Cuánto espacio de almacenamiento necesita Apk Adresi GTA 5? </h4>
|
75 |
-
|
76 |
-
<h4>¿Puedo jugar Apk Adresi GTA 5 en línea? </h4>
|
77 |
-
<p>No, Apk Adresi GTA 5 no es compatible con el modo multijugador en línea o el juego cruzado con otras plataformas. El juego es solo un modo offline para un jugador que te permite jugar a GTA 5 en tu dispositivo Android. No podrás jugar con otros jugadores online ni conectarte con otras plataformas como PC, PS4, Xbox One, etc. El juego tampoco tiene características online como tablas de clasificación, logros, clubes sociales, etc.</p> 64aa2da5cf<br />
|
78 |
-
<br />
|
79 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BhaskarKapri/Animal/app.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
from fastai.vision.all import *
|
2 |
-
import gradio as gr
|
3 |
-
import pathlib
|
4 |
-
from contextlib import contextmanager
|
5 |
-
import pathlib
|
6 |
-
|
7 |
-
@contextmanager
|
8 |
-
def set_posix_windows():
|
9 |
-
posix_backup = pathlib.WindowsPath
|
10 |
-
try:
|
11 |
-
pathlib.WindowsPath = pathlib.PosixPath
|
12 |
-
yield
|
13 |
-
finally:
|
14 |
-
pathlib.WindowsPath = posix_backup
|
15 |
-
|
16 |
-
EXPORT_PATH = pathlib.Path("model.pkl")
|
17 |
-
|
18 |
-
with set_posix_windows():
|
19 |
-
learn = load_learner(EXPORT_PATH)
|
20 |
-
|
21 |
-
# learn = load_learner('model.pkl')
|
22 |
-
|
23 |
-
|
24 |
-
categories = ['alligator', 'bee', 'camel', 'cat', 'deer', 'dog', 'dolphin', 'elephant', 'giraffe', 'hamster', 'horse', 'kangaroo', 'lion', 'lizard', 'human', 'owl', 'parrot', 'sheep', 'snake', 'tiger', 'turtle', 'wolf']
|
25 |
-
def classify_image(img):
|
26 |
-
pred,idx,probs = learn.predict(img)
|
27 |
-
return dict(zip(categories,map(float,probs)))
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
image = gr.inputs.Image(shape=(192,192))
|
32 |
-
label = gr.outputs.Label()
|
33 |
-
examples = ['cat.jpg','camel.jpg','deer.jpg','dog.jpg','giraffe.jpg','owl.jpg']
|
34 |
-
|
35 |
-
intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, example=examples)
|
36 |
-
intf.launch(inline=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/upload.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
from distutils import log
|
2 |
-
from distutils.command import upload as orig
|
3 |
-
|
4 |
-
from setuptools.errors import RemovedCommandError
|
5 |
-
|
6 |
-
|
7 |
-
class upload(orig.upload):
|
8 |
-
"""Formerly used to upload packages to PyPI."""
|
9 |
-
|
10 |
-
def run(self):
|
11 |
-
msg = (
|
12 |
-
"The upload command has been removed, use twine to upload "
|
13 |
-
+ "instead (https://pypi.org/p/twine)"
|
14 |
-
)
|
15 |
-
|
16 |
-
self.announce("ERROR: " + msg, log.ERROR)
|
17 |
-
raise RemovedCommandError(msg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/README.md
DELETED
@@ -1,192 +0,0 @@
|
|
1 |
-
# CLIP
|
2 |
-
|
3 |
-
[[Blog]](https://openai.com/blog/clip/) [[Paper]](https://cdn.openai.com/papers/Learning_Transferable_Visual_Models_From_Natural_Language_Supervision.pdf) [[Model Card]](model-card.md) [[Colab]](https://colab.research.google.com/github/openai/clip/blob/master/Interacting_with_CLIP.ipynb)
|
4 |
-
|
5 |
-
CLIP (Contrastive Language-Image Pre-Training) is a neural network trained on a variety of (image, text) pairs. It can be instructed in natural language to predict the most relevant text snippet, given an image, without directly optimizing for the task, similarly to the zero-shot capabilities of GPT-2 and 3. We found CLIP matches the performance of the original ResNet50 on ImageNet “zero-shot” without using any of the original 1.28M labeled examples, overcoming several major challenges in computer vision.
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
## Approach
|
10 |
-
|
11 |
-

|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
## Usage
|
16 |
-
|
17 |
-
First, [install PyTorch 1.7.1](https://pytorch.org/get-started/locally/) and torchvision, as well as small additional dependencies. On a CUDA GPU machine, the following will do the trick:
|
18 |
-
|
19 |
-
```bash
|
20 |
-
$ conda install --yes -c pytorch pytorch=1.7.1 torchvision cudatoolkit=11.0
|
21 |
-
$ pip install ftfy regex tqdm
|
22 |
-
```
|
23 |
-
|
24 |
-
Replace `cudatoolkit=11.0` above with the appropriate CUDA version on your machine or `cpuonly` when installing on a machine without a GPU.
|
25 |
-
|
26 |
-
```python
|
27 |
-
import torch
|
28 |
-
import clip
|
29 |
-
from PIL import Image
|
30 |
-
|
31 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
32 |
-
model, preprocess = clip.load("ViT-B/32", device=device)
|
33 |
-
|
34 |
-
image = preprocess(Image.open("CLIP.png")).unsqueeze(0).to(device)
|
35 |
-
text = clip.tokenize(["a diagram", "a dog", "a cat"]).to(device)
|
36 |
-
|
37 |
-
with torch.no_grad():
|
38 |
-
image_features = model.encode_image(image)
|
39 |
-
text_features = model.encode_text(text)
|
40 |
-
|
41 |
-
logits_per_image, logits_per_text = model(image, text)
|
42 |
-
probs = logits_per_image.softmax(dim=-1).cpu().numpy()
|
43 |
-
|
44 |
-
print("Label probs:", probs) # prints: [[0.9927937 0.00421068 0.00299572]]
|
45 |
-
```
|
46 |
-
|
47 |
-
|
48 |
-
## API
|
49 |
-
|
50 |
-
The CLIP module `clip` provides the following methods:
|
51 |
-
|
52 |
-
#### `clip.available_models()`
|
53 |
-
|
54 |
-
Returns the name(s) of the available CLIP models.
|
55 |
-
|
56 |
-
#### `clip.load(name, device=..., jit=True)`
|
57 |
-
|
58 |
-
Returns the model and the TorchVision transform needed by the model, specified by the model name returned by `clip.available_models()`. It will download the model as necessary. The device to run the model can be optionally specified, and the default is to use the first CUDA device if there is any, otherwise the CPU.
|
59 |
-
|
60 |
-
When `jit` is `False`, a non-JIT version of the model will be loaded.
|
61 |
-
|
62 |
-
#### `clip.tokenize(text: Union[str, List[str]], context_length=77)`
|
63 |
-
|
64 |
-
Returns a LongTensor containing tokenized sequences of given text input(s). This can be used as the input to the model
|
65 |
-
|
66 |
-
---
|
67 |
-
|
68 |
-
The model returned by `clip.load()` supports the following methods:
|
69 |
-
|
70 |
-
#### `model.encode_image(image: Tensor)`
|
71 |
-
|
72 |
-
Given a batch of images, returns the image features encoded by the vision portion of the CLIP model.
|
73 |
-
|
74 |
-
#### `model.encode_text(text: Tensor)`
|
75 |
-
|
76 |
-
Given a batch of text tokens, returns the text features encoded by the language portion of the CLIP model.
|
77 |
-
|
78 |
-
#### `model(image: Tensor, text: Tensor)`
|
79 |
-
|
80 |
-
Given a batch of images and a batch of text tokens, returns two Tensors, containing the logit scores corresponding to each image and text input. The values are cosine similarities between the corresponding image and text features, times 100.
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
## More Examples
|
85 |
-
|
86 |
-
### Zero-Shot Prediction
|
87 |
-
|
88 |
-
The code below performs zero-shot prediction using CLIP, as shown in Appendix B in the paper. This example takes an image from the [CIFAR-100 dataset](https://www.cs.toronto.edu/~kriz/cifar.html), and predicts the most likely labels among the 100 textual labels from the dataset.
|
89 |
-
|
90 |
-
```python
|
91 |
-
import os
|
92 |
-
import clip
|
93 |
-
import torch
|
94 |
-
from torchvision.datasets import CIFAR100
|
95 |
-
|
96 |
-
# Load the model
|
97 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
98 |
-
model, preprocess = clip.load('ViT-B/32', device)
|
99 |
-
|
100 |
-
# Download the dataset
|
101 |
-
cifar100 = CIFAR100(root=os.path.expanduser("~/.cache"), download=True, train=False)
|
102 |
-
|
103 |
-
# Prepare the inputs
|
104 |
-
image, class_id = cifar100[3637]
|
105 |
-
image_input = preprocess(image).unsqueeze(0).to(device)
|
106 |
-
text_inputs = torch.cat([clip.tokenize(f"a photo of a {c}") for c in cifar100.classes]).to(device)
|
107 |
-
|
108 |
-
# Calculate features
|
109 |
-
with torch.no_grad():
|
110 |
-
image_features = model.encode_image(image_input)
|
111 |
-
text_features = model.encode_text(text_inputs)
|
112 |
-
|
113 |
-
# Pick the top 5 most similar labels for the image
|
114 |
-
image_features /= image_features.norm(dim=-1, keepdim=True)
|
115 |
-
text_features /= text_features.norm(dim=-1, keepdim=True)
|
116 |
-
similarity = (100.0 * image_features @ text_features.T).softmax(dim=-1)
|
117 |
-
values, indices = similarity[0].topk(5)
|
118 |
-
|
119 |
-
# Print the result
|
120 |
-
print("\nTop predictions:\n")
|
121 |
-
for value, index in zip(values, indices):
|
122 |
-
print(f"{cifar100.classes[index]:>16s}: {100 * value.item():.2f}%")
|
123 |
-
```
|
124 |
-
|
125 |
-
The output will look like the following (the exact numbers may be slightly different depending on the compute device):
|
126 |
-
|
127 |
-
```
|
128 |
-
Top predictions:
|
129 |
-
|
130 |
-
snake: 65.31%
|
131 |
-
turtle: 12.29%
|
132 |
-
sweet_pepper: 3.83%
|
133 |
-
lizard: 1.88%
|
134 |
-
crocodile: 1.75%
|
135 |
-
```
|
136 |
-
|
137 |
-
Note that this example uses the `encode_image()` and `encode_text()` methods that return the encoded features of given inputs.
|
138 |
-
|
139 |
-
|
140 |
-
### Linear-probe evaluation
|
141 |
-
|
142 |
-
The example below uses [scikit-learn](https://scikit-learn.org/) to perform logistic regression on image features.
|
143 |
-
|
144 |
-
```python
|
145 |
-
import os
|
146 |
-
import clip
|
147 |
-
import torch
|
148 |
-
|
149 |
-
import numpy as np
|
150 |
-
from sklearn.linear_model import LogisticRegression
|
151 |
-
from torch.utils.data import DataLoader
|
152 |
-
from torchvision.datasets import CIFAR100
|
153 |
-
from tqdm import tqdm
|
154 |
-
|
155 |
-
# Load the model
|
156 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
157 |
-
model, preprocess = clip.load('ViT-B/32', device)
|
158 |
-
|
159 |
-
# Load the dataset
|
160 |
-
root = os.path.expanduser("~/.cache")
|
161 |
-
train = CIFAR100(root, download=True, train=True, transform=preprocess)
|
162 |
-
test = CIFAR100(root, download=True, train=False, transform=preprocess)
|
163 |
-
|
164 |
-
|
165 |
-
def get_features(dataset):
|
166 |
-
all_features = []
|
167 |
-
all_labels = []
|
168 |
-
|
169 |
-
with torch.no_grad():
|
170 |
-
for images, labels in tqdm(DataLoader(dataset, batch_size=100)):
|
171 |
-
features = model.encode_image(images.to(device))
|
172 |
-
|
173 |
-
all_features.append(features)
|
174 |
-
all_labels.append(labels)
|
175 |
-
|
176 |
-
return torch.cat(all_features).cpu().numpy(), torch.cat(all_labels).cpu().numpy()
|
177 |
-
|
178 |
-
# Calculate the image features
|
179 |
-
train_features, train_labels = get_features(train)
|
180 |
-
test_features, test_labels = get_features(test)
|
181 |
-
|
182 |
-
# Perform logistic regression
|
183 |
-
classifier = LogisticRegression(random_state=0, C=0.316, max_iter=1000, verbose=1)
|
184 |
-
classifier.fit(train_features, train_labels)
|
185 |
-
|
186 |
-
# Evaluate using the logistic regression classifier
|
187 |
-
predictions = classifier.predict(test_features)
|
188 |
-
accuracy = np.mean((test_labels == predictions).astype(np.float)) * 100.
|
189 |
-
print(f"Accuracy = {accuracy:.3f}")
|
190 |
-
```
|
191 |
-
|
192 |
-
Note that the `C` value should be determined via a hyperparameter sweep using a validation split.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BreetheRun/mitchtech-vulcan-diffusion/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/mitchtech/vulcan-diffusion").launch()
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
#include "box_iou_rotated.h"
|
3 |
-
#include "box_iou_rotated_utils.h"
|
4 |
-
|
5 |
-
namespace detectron2 {
|
6 |
-
|
7 |
-
template <typename T>
|
8 |
-
void box_iou_rotated_cpu_kernel(
|
9 |
-
const at::Tensor& boxes1,
|
10 |
-
const at::Tensor& boxes2,
|
11 |
-
at::Tensor& ious) {
|
12 |
-
auto widths1 = boxes1.select(1, 2).contiguous();
|
13 |
-
auto heights1 = boxes1.select(1, 3).contiguous();
|
14 |
-
auto widths2 = boxes2.select(1, 2).contiguous();
|
15 |
-
auto heights2 = boxes2.select(1, 3).contiguous();
|
16 |
-
|
17 |
-
at::Tensor areas1 = widths1 * heights1;
|
18 |
-
at::Tensor areas2 = widths2 * heights2;
|
19 |
-
|
20 |
-
auto num_boxes1 = boxes1.size(0);
|
21 |
-
auto num_boxes2 = boxes2.size(0);
|
22 |
-
|
23 |
-
for (int i = 0; i < num_boxes1; i++) {
|
24 |
-
for (int j = 0; j < num_boxes2; j++) {
|
25 |
-
ious[i * num_boxes2 + j] = single_box_iou_rotated<T>(
|
26 |
-
boxes1[i].data_ptr<T>(), boxes2[j].data_ptr<T>());
|
27 |
-
}
|
28 |
-
}
|
29 |
-
}
|
30 |
-
|
31 |
-
at::Tensor box_iou_rotated_cpu(
|
32 |
-
const at::Tensor& boxes1,
|
33 |
-
const at::Tensor& boxes2) {
|
34 |
-
auto num_boxes1 = boxes1.size(0);
|
35 |
-
auto num_boxes2 = boxes2.size(0);
|
36 |
-
at::Tensor ious =
|
37 |
-
at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat));
|
38 |
-
|
39 |
-
box_iou_rotated_cpu_kernel<float>(boxes1, boxes2, ious);
|
40 |
-
|
41 |
-
// reshape from 1d array to 2d array
|
42 |
-
auto shape = std::vector<int64_t>{num_boxes1, num_boxes2};
|
43 |
-
return ious.reshape(shape);
|
44 |
-
}
|
45 |
-
|
46 |
-
} // namespace detectron2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/pybind11/__init__.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
from ._version import version_info, __version__ # noqa: F401 imported but unused
|
3 |
-
|
4 |
-
|
5 |
-
def get_include(user=False):
|
6 |
-
import os
|
7 |
-
d = os.path.dirname(__file__)
|
8 |
-
if os.path.exists(os.path.join(d, "include")):
|
9 |
-
# Package is installed
|
10 |
-
return os.path.join(d, "include")
|
11 |
-
else:
|
12 |
-
# Package is from a source directory
|
13 |
-
return os.path.join(os.path.dirname(d), "include")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tools/clang/enumerations.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
#===- enumerations.py - Python Enumerations ------------------*- python -*--===#
|
2 |
-
#
|
3 |
-
# The LLVM Compiler Infrastructure
|
4 |
-
#
|
5 |
-
# This file is distributed under the University of Illinois Open Source
|
6 |
-
# License. See LICENSE.TXT for details.
|
7 |
-
#
|
8 |
-
#===------------------------------------------------------------------------===#
|
9 |
-
|
10 |
-
"""
|
11 |
-
Clang Enumerations
|
12 |
-
==================
|
13 |
-
|
14 |
-
This module provides static definitions of enumerations that exist in libclang.
|
15 |
-
|
16 |
-
Enumerations are typically defined as a list of tuples. The exported values are
|
17 |
-
typically munged into other types or classes at module load time.
|
18 |
-
|
19 |
-
All enumerations are centrally defined in this file so they are all grouped
|
20 |
-
together and easier to audit. And, maybe even one day this file will be
|
21 |
-
automatically generated by scanning the libclang headers!
|
22 |
-
"""
|
23 |
-
|
24 |
-
# Maps to CXTokenKind. Note that libclang maintains a separate set of token
|
25 |
-
# enumerations from the C++ API.
|
26 |
-
TokenKinds = [
|
27 |
-
('PUNCTUATION', 0),
|
28 |
-
('KEYWORD', 1),
|
29 |
-
('IDENTIFIER', 2),
|
30 |
-
('LITERAL', 3),
|
31 |
-
('COMMENT', 4),
|
32 |
-
]
|
33 |
-
|
34 |
-
__all__ = ['TokenKinds']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/iterator/transform_input_output_iterator.h
DELETED
@@ -1,163 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2020 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
/*! \file thrust/iterator/transform_input_output_iterator.h
|
18 |
-
* \brief An iterator which adapts another iterator by applying transform
|
19 |
-
* functions when reading and writing dereferenced values.
|
20 |
-
*/
|
21 |
-
|
22 |
-
#pragma once
|
23 |
-
|
24 |
-
#include <thrust/detail/config.h>
|
25 |
-
#include <thrust/iterator/detail/transform_input_output_iterator.inl>
|
26 |
-
|
27 |
-
namespace thrust
|
28 |
-
{
|
29 |
-
|
30 |
-
/*! \addtogroup iterators
|
31 |
-
* \{
|
32 |
-
*/
|
33 |
-
|
34 |
-
/*! \addtogroup fancyiterator Fancy Iterators
|
35 |
-
* \ingroup iterators
|
36 |
-
* \{
|
37 |
-
*/
|
38 |
-
|
39 |
-
/*! \p transform_input_output_iterator is a special kind of iterator which applies
|
40 |
-
* transform functions when reading from or writing to dereferenced values.
|
41 |
-
* This iterator is useful for algorithms that operate on a type that needs to
|
42 |
-
* be serialized/deserialized from values in another iterator, avoiding the
|
43 |
-
* need to materialize intermediate results in memory. This also enables the
|
44 |
-
* transform functions to be fused with the operations that read and write to
|
45 |
-
* the `transform_input_output_iterator`.
|
46 |
-
*
|
47 |
-
* The following code snippet demonstrates how to create a
|
48 |
-
* \p transform_input_output_iterator which performs different transformations when
|
49 |
-
* reading from and writing to the iterator.
|
50 |
-
*
|
51 |
-
* \code
|
52 |
-
* #include <thrust/iterator/transform_input_output_iterator.h>
|
53 |
-
* #include <thrust/device_vector.h>
|
54 |
-
*
|
55 |
-
* int main()
|
56 |
-
* {
|
57 |
-
* const size_t size = 4;
|
58 |
-
* thrust::device_vector<float> v(size);
|
59 |
-
*
|
60 |
-
* // Write 1.0f, 2.0f, 3.0f, 4.0f to vector
|
61 |
-
* thrust::sequence(v.begin(), v.end(), 1);
|
62 |
-
*
|
63 |
-
* // Iterator that returns negated values and writes squared values
|
64 |
-
* auto iter = thrust::make_transform_input_output_iterator(v.begin(),
|
65 |
-
* thrust::negate<float>{}, thrust::square<float>{});
|
66 |
-
*
|
67 |
-
* // Iterator negates values when reading
|
68 |
-
* std::cout << iter[0] << " "; // -1.0f;
|
69 |
-
* std::cout << iter[1] << " "; // -2.0f;
|
70 |
-
* std::cout << iter[2] << " "; // -3.0f;
|
71 |
-
* std::cout << iter[3] << "\n"; // -4.0f;
|
72 |
-
*
|
73 |
-
* // Write 1.0f, 2.0f, 3.0f, 4.0f to iterator
|
74 |
-
* thrust::sequence(iter, iter + size, 1);
|
75 |
-
*
|
76 |
-
* // Values were squared before writing to vector
|
77 |
-
* std::cout << v[0] << " "; // 1.0f;
|
78 |
-
* std::cout << v[1] << " "; // 4.0f;
|
79 |
-
* std::cout << v[2] << " "; // 9.0f;
|
80 |
-
* std::cout << v[3] << "\n"; // 16.0f;
|
81 |
-
*
|
82 |
-
* }
|
83 |
-
* \endcode
|
84 |
-
*
|
85 |
-
* \see make_transform_input_output_iterator
|
86 |
-
*/
|
87 |
-
|
88 |
-
template <typename InputFunction, typename OutputFunction, typename Iterator>
|
89 |
-
class transform_input_output_iterator
|
90 |
-
: public detail::transform_input_output_iterator_base<InputFunction, OutputFunction, Iterator>::type
|
91 |
-
{
|
92 |
-
|
93 |
-
/*! \cond
|
94 |
-
*/
|
95 |
-
|
96 |
-
public:
|
97 |
-
|
98 |
-
typedef typename
|
99 |
-
detail::transform_input_output_iterator_base<InputFunction, OutputFunction, Iterator>::type
|
100 |
-
super_t;
|
101 |
-
|
102 |
-
friend class thrust::iterator_core_access;
|
103 |
-
/*! \endcond
|
104 |
-
*/
|
105 |
-
|
106 |
-
/*! This constructor takes as argument a \c Iterator an \c InputFunction and an
|
107 |
-
* \c OutputFunction and copies them to a new \p transform_input_output_iterator
|
108 |
-
*
|
109 |
-
* \param io An \c Iterator pointing to where the input to \c InputFunction
|
110 |
-
* will be read from and the result of \c OutputFunction will be written to
|
111 |
-
* \param input_function An \c InputFunction to be executed on values read from the iterator
|
112 |
-
* \param output_function An \c OutputFunction to be executed on values written to the iterator
|
113 |
-
*/
|
114 |
-
__host__ __device__
|
115 |
-
transform_input_output_iterator(Iterator const& io, InputFunction input_function, OutputFunction output_function)
|
116 |
-
: super_t(io), input_function(input_function), output_function(output_function)
|
117 |
-
{
|
118 |
-
}
|
119 |
-
|
120 |
-
/*! \cond
|
121 |
-
*/
|
122 |
-
private:
|
123 |
-
|
124 |
-
__host__ __device__
|
125 |
-
typename super_t::reference dereference() const
|
126 |
-
{
|
127 |
-
return detail::transform_input_output_iterator_proxy<
|
128 |
-
InputFunction, OutputFunction, Iterator
|
129 |
-
>(this->base_reference(), input_function, output_function);
|
130 |
-
}
|
131 |
-
|
132 |
-
InputFunction input_function;
|
133 |
-
OutputFunction output_function;
|
134 |
-
|
135 |
-
/*! \endcond
|
136 |
-
*/
|
137 |
-
}; // end transform_input_output_iterator
|
138 |
-
|
139 |
-
/*! \p make_transform_input_output_iterator creates a \p transform_input_output_iterator from
|
140 |
-
* an \c Iterator a \c InputFunction and a \c OutputFunction
|
141 |
-
*
|
142 |
-
* \param io An \c Iterator pointing to where the input to \c InputFunction
|
143 |
-
* will be read from and the result of \c OutputFunction will be written to
|
144 |
-
* \param input_function An \c InputFunction to be executed on values read from the iterator
|
145 |
-
* \param output_function An \c OutputFunction to be executed on values written to the iterator
|
146 |
-
* \see transform_input_output_iterator
|
147 |
-
*/
|
148 |
-
template <typename InputFunction, typename OutputFunction, typename Iterator>
|
149 |
-
transform_input_output_iterator<InputFunction, OutputFunction, Iterator>
|
150 |
-
__host__ __device__
|
151 |
-
make_transform_input_output_iterator(Iterator io, InputFunction input_function, OutputFunction output_function)
|
152 |
-
{
|
153 |
-
return transform_input_output_iterator<InputFunction, OutputFunction, Iterator>(io, input_function, output_function);
|
154 |
-
} // end make_transform_input_output_iterator
|
155 |
-
|
156 |
-
/*! \} // end fancyiterators
|
157 |
-
*/
|
158 |
-
|
159 |
-
/*! \} // end iterators
|
160 |
-
*/
|
161 |
-
|
162 |
-
} // end thrust
|
163 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/shuffle.h
DELETED
@@ -1,179 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2020 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
/*! \file shuffle.h
|
18 |
-
* \brief Reorders range by a uniform random permutation
|
19 |
-
*/
|
20 |
-
|
21 |
-
#pragma once
|
22 |
-
|
23 |
-
#include <thrust/detail/config.h>
|
24 |
-
#include <thrust/detail/cpp11_required.h>
|
25 |
-
|
26 |
-
#if THRUST_CPP_DIALECT >= 2011
|
27 |
-
|
28 |
-
#include <thrust/detail/config.h>
|
29 |
-
#include <thrust/detail/execution_policy.h>
|
30 |
-
|
31 |
-
namespace thrust {
|
32 |
-
|
33 |
-
/*! \addtogroup reordering
|
34 |
-
* \ingroup algorithms
|
35 |
-
*
|
36 |
-
* \addtogroup shuffling
|
37 |
-
* \ingroup reordering
|
38 |
-
* \{
|
39 |
-
*/
|
40 |
-
|
41 |
-
|
42 |
-
/*! \p shuffle reorders the elements <tt>[first, last)</tt> by a uniform pseudorandom permutation, defined by
|
43 |
-
* random engine \p g.
|
44 |
-
*
|
45 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
46 |
-
*
|
47 |
-
* \param exec The execution policy to use for parallelization.
|
48 |
-
* \param first The beginning of the sequence to shuffle.
|
49 |
-
* \param last The end of the sequence to shuffle.
|
50 |
-
* \param g A UniformRandomBitGenerator
|
51 |
-
*
|
52 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
53 |
-
* \tparam RandomIterator is a random access iterator
|
54 |
-
* \tparam URBG is a uniform random bit generator
|
55 |
-
*
|
56 |
-
* The following code snippet demonstrates how to use \p shuffle to create a random permutation
|
57 |
-
* using the \p thrust::host execution policy for parallelization:
|
58 |
-
*
|
59 |
-
* \code
|
60 |
-
* #include <thrust/shuffle.h>
|
61 |
-
* #include <thrust/random.h>
|
62 |
-
* #include <thrust/execution_policy.h>
|
63 |
-
* int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
64 |
-
* const int N = sizeof(A)/sizeof(int);
|
65 |
-
* thrust::default_random_engine g;
|
66 |
-
* thrust::shuffle(thrust::host, A, A + N, g);
|
67 |
-
* // A is now {6, 5, 8, 7, 2, 1, 4, 3, 10, 9}
|
68 |
-
* \endcode
|
69 |
-
*
|
70 |
-
* \see \p shuffle_copy
|
71 |
-
*/
|
72 |
-
template <typename DerivedPolicy, typename RandomIterator, typename URBG>
|
73 |
-
__host__ __device__ void shuffle(
|
74 |
-
const thrust::detail::execution_policy_base<DerivedPolicy>& exec,
|
75 |
-
RandomIterator first, RandomIterator last, URBG&& g);
|
76 |
-
|
77 |
-
/*! \p shuffle reorders the elements <tt>[first, last)</tt> by a uniform pseudorandom permutation, defined by
|
78 |
-
* random engine \p g.
|
79 |
-
*
|
80 |
-
* \param first The beginning of the sequence to shuffle.
|
81 |
-
* \param last The end of the sequence to shuffle.
|
82 |
-
* \param g A UniformRandomBitGenerator
|
83 |
-
*
|
84 |
-
* \tparam RandomIterator is a random access iterator
|
85 |
-
* \tparam URBG is a uniform random bit generator
|
86 |
-
*
|
87 |
-
* The following code snippet demonstrates how to use \p shuffle to create a random permutation.
|
88 |
-
*
|
89 |
-
* \code
|
90 |
-
* #include <thrust/shuffle.h>
|
91 |
-
* #include <thrust/random.h>
|
92 |
-
* int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
93 |
-
* const int N = sizeof(A)/sizeof(int);
|
94 |
-
* thrust::default_random_engine g;
|
95 |
-
* thrust::shuffle(A, A + N, g);
|
96 |
-
* // A is now {6, 5, 8, 7, 2, 1, 4, 3, 10, 9}
|
97 |
-
* \endcode
|
98 |
-
*
|
99 |
-
* \see \p shuffle_copy
|
100 |
-
*/
|
101 |
-
template <typename RandomIterator, typename URBG>
|
102 |
-
__host__ __device__ void shuffle(RandomIterator first, RandomIterator last,
|
103 |
-
URBG&& g);
|
104 |
-
|
105 |
-
/*! shuffle_copy differs from shuffle only in that the reordered sequence is written to different output sequences, rather than in place.
|
106 |
-
* \p shuffle_copy reorders the elements <tt>[first, last)</tt> by a uniform pseudorandom permutation, defined by
|
107 |
-
* random engine \p g.
|
108 |
-
*
|
109 |
-
* The algorithm's execution is parallelized as determined by \p exec.
|
110 |
-
|
111 |
-
* \param exec The execution policy to use for parallelization.
|
112 |
-
* \param first The beginning of the sequence to shuffle.
|
113 |
-
* \param last The end of the sequence to shuffle.
|
114 |
-
* \param result Destination of shuffled sequence
|
115 |
-
* \param g A UniformRandomBitGenerator
|
116 |
-
*
|
117 |
-
* \tparam DerivedPolicy The name of the derived execution policy.
|
118 |
-
* \tparam RandomIterator is a random access iterator
|
119 |
-
* \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
|
120 |
-
* \tparam URBG is a uniform random bit generator
|
121 |
-
*
|
122 |
-
* The following code snippet demonstrates how to use \p shuffle_copy to create a random permutation.
|
123 |
-
*
|
124 |
-
* \code
|
125 |
-
* #include <thrust/shuffle.h>
|
126 |
-
* #include <thrust/random.h>
|
127 |
-
* #include <thrust/execution_policy.h>
|
128 |
-
* int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
129 |
-
* int result[10];
|
130 |
-
* const int N = sizeof(A)/sizeof(int);
|
131 |
-
* thrust::default_random_engine g;
|
132 |
-
* thrust::shuffle_copy(thrust::host, A, A + N, result, g);
|
133 |
-
* // result is now {6, 5, 8, 7, 2, 1, 4, 3, 10, 9}
|
134 |
-
* \endcode
|
135 |
-
*
|
136 |
-
* \see \p shuffle
|
137 |
-
*/
|
138 |
-
template <typename DerivedPolicy, typename RandomIterator,
|
139 |
-
typename OutputIterator, typename URBG>
|
140 |
-
__host__ __device__ void shuffle_copy(
|
141 |
-
const thrust::detail::execution_policy_base<DerivedPolicy>& exec,
|
142 |
-
RandomIterator first, RandomIterator last, OutputIterator result, URBG&& g);
|
143 |
-
|
144 |
-
/*! shuffle_copy differs from shuffle only in that the reordered sequence is written to different output sequences, rather than in place.
|
145 |
-
*\p shuffle_copy reorders the elements <tt>[first, last)</tt> by a uniform pseudorandom permutation, defined by
|
146 |
-
* random engine \p g.
|
147 |
-
*
|
148 |
-
* \param first The beginning of the sequence to shuffle.
|
149 |
-
* \param last The end of the sequence to shuffle.
|
150 |
-
* \param result Destination of shuffled sequence
|
151 |
-
* \param g A UniformRandomBitGenerator
|
152 |
-
*
|
153 |
-
* \tparam RandomIterator is a random access iterator
|
154 |
-
* \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
|
155 |
-
* \tparam URBG is a uniform random bit generator
|
156 |
-
*
|
157 |
-
* The following code snippet demonstrates how to use \p shuffle_copy to create a random permutation.
|
158 |
-
*
|
159 |
-
* \code
|
160 |
-
* #include <thrust/shuffle.h>
|
161 |
-
* #include <thrust/random.h>
|
162 |
-
* int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
163 |
-
* int result[10];
|
164 |
-
* const int N = sizeof(A)/sizeof(int);
|
165 |
-
* thrust::default_random_engine g;
|
166 |
-
* thrust::shuffle_copy(A, A + N, result, g);
|
167 |
-
* // result is now {6, 5, 8, 7, 2, 1, 4, 3, 10, 9}
|
168 |
-
* \endcode
|
169 |
-
*
|
170 |
-
* \see \p shuffle
|
171 |
-
*/
|
172 |
-
template <typename RandomIterator, typename OutputIterator, typename URBG>
|
173 |
-
__host__ __device__ void shuffle_copy(RandomIterator first, RandomIterator last,
|
174 |
-
OutputIterator result, URBG&& g);
|
175 |
-
|
176 |
-
} // namespace thrust
|
177 |
-
|
178 |
-
#include <thrust/detail/shuffle.inl>
|
179 |
-
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/MonoScene/monoscene/flosp.py
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
|
4 |
-
|
5 |
-
class FLoSP(nn.Module):
|
6 |
-
def __init__(self, scene_size, dataset, project_scale):
|
7 |
-
super().__init__()
|
8 |
-
self.scene_size = scene_size
|
9 |
-
self.dataset = dataset
|
10 |
-
self.project_scale = project_scale
|
11 |
-
|
12 |
-
def forward(self, x2d, projected_pix, fov_mask):
|
13 |
-
c, h, w = x2d.shape
|
14 |
-
|
15 |
-
src = x2d.view(c, -1)
|
16 |
-
zeros_vec = torch.zeros(c, 1).type_as(src)
|
17 |
-
src = torch.cat([src, zeros_vec], 1)
|
18 |
-
|
19 |
-
pix_x, pix_y = projected_pix[:, 0], projected_pix[:, 1]
|
20 |
-
img_indices = pix_y * w + pix_x
|
21 |
-
img_indices[~fov_mask] = h * w
|
22 |
-
img_indices = img_indices.expand(c, -1).long() # c, HWD
|
23 |
-
src_feature = torch.gather(src, 1, img_indices)
|
24 |
-
|
25 |
-
if self.dataset == "NYU":
|
26 |
-
x3d = src_feature.reshape(
|
27 |
-
c,
|
28 |
-
self.scene_size[0] // self.project_scale,
|
29 |
-
self.scene_size[2] // self.project_scale,
|
30 |
-
self.scene_size[1] // self.project_scale,
|
31 |
-
)
|
32 |
-
x3d = x3d.permute(0, 1, 3, 2)
|
33 |
-
elif self.dataset == "kitti":
|
34 |
-
x3d = src_feature.reshape(
|
35 |
-
c,
|
36 |
-
self.scene_size[0] // self.project_scale,
|
37 |
-
self.scene_size[1] // self.project_scale,
|
38 |
-
self.scene_size[2] // self.project_scale,
|
39 |
-
)
|
40 |
-
|
41 |
-
return x3d
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/roi_heads/mask_heads/maskiou_head.py
DELETED
@@ -1,186 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
import torch.nn as nn
|
4 |
-
from mmcv.cnn import Conv2d, Linear, MaxPool2d, kaiming_init, normal_init
|
5 |
-
from mmcv.runner import force_fp32
|
6 |
-
from torch.nn.modules.utils import _pair
|
7 |
-
|
8 |
-
from mmdet.models.builder import HEADS, build_loss
|
9 |
-
|
10 |
-
|
11 |
-
@HEADS.register_module()
|
12 |
-
class MaskIoUHead(nn.Module):
|
13 |
-
"""Mask IoU Head.
|
14 |
-
|
15 |
-
This head predicts the IoU of predicted masks and corresponding gt masks.
|
16 |
-
"""
|
17 |
-
|
18 |
-
def __init__(self,
|
19 |
-
num_convs=4,
|
20 |
-
num_fcs=2,
|
21 |
-
roi_feat_size=14,
|
22 |
-
in_channels=256,
|
23 |
-
conv_out_channels=256,
|
24 |
-
fc_out_channels=1024,
|
25 |
-
num_classes=80,
|
26 |
-
loss_iou=dict(type='MSELoss', loss_weight=0.5)):
|
27 |
-
super(MaskIoUHead, self).__init__()
|
28 |
-
self.in_channels = in_channels
|
29 |
-
self.conv_out_channels = conv_out_channels
|
30 |
-
self.fc_out_channels = fc_out_channels
|
31 |
-
self.num_classes = num_classes
|
32 |
-
self.fp16_enabled = False
|
33 |
-
|
34 |
-
self.convs = nn.ModuleList()
|
35 |
-
for i in range(num_convs):
|
36 |
-
if i == 0:
|
37 |
-
# concatenation of mask feature and mask prediction
|
38 |
-
in_channels = self.in_channels + 1
|
39 |
-
else:
|
40 |
-
in_channels = self.conv_out_channels
|
41 |
-
stride = 2 if i == num_convs - 1 else 1
|
42 |
-
self.convs.append(
|
43 |
-
Conv2d(
|
44 |
-
in_channels,
|
45 |
-
self.conv_out_channels,
|
46 |
-
3,
|
47 |
-
stride=stride,
|
48 |
-
padding=1))
|
49 |
-
|
50 |
-
roi_feat_size = _pair(roi_feat_size)
|
51 |
-
pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2)
|
52 |
-
self.fcs = nn.ModuleList()
|
53 |
-
for i in range(num_fcs):
|
54 |
-
in_channels = (
|
55 |
-
self.conv_out_channels *
|
56 |
-
pooled_area if i == 0 else self.fc_out_channels)
|
57 |
-
self.fcs.append(Linear(in_channels, self.fc_out_channels))
|
58 |
-
|
59 |
-
self.fc_mask_iou = Linear(self.fc_out_channels, self.num_classes)
|
60 |
-
self.relu = nn.ReLU()
|
61 |
-
self.max_pool = MaxPool2d(2, 2)
|
62 |
-
self.loss_iou = build_loss(loss_iou)
|
63 |
-
|
64 |
-
def init_weights(self):
|
65 |
-
for conv in self.convs:
|
66 |
-
kaiming_init(conv)
|
67 |
-
for fc in self.fcs:
|
68 |
-
kaiming_init(
|
69 |
-
fc,
|
70 |
-
a=1,
|
71 |
-
mode='fan_in',
|
72 |
-
nonlinearity='leaky_relu',
|
73 |
-
distribution='uniform')
|
74 |
-
normal_init(self.fc_mask_iou, std=0.01)
|
75 |
-
|
76 |
-
def forward(self, mask_feat, mask_pred):
|
77 |
-
mask_pred = mask_pred.sigmoid()
|
78 |
-
mask_pred_pooled = self.max_pool(mask_pred.unsqueeze(1))
|
79 |
-
|
80 |
-
x = torch.cat((mask_feat, mask_pred_pooled), 1)
|
81 |
-
|
82 |
-
for conv in self.convs:
|
83 |
-
x = self.relu(conv(x))
|
84 |
-
x = x.flatten(1)
|
85 |
-
for fc in self.fcs:
|
86 |
-
x = self.relu(fc(x))
|
87 |
-
mask_iou = self.fc_mask_iou(x)
|
88 |
-
return mask_iou
|
89 |
-
|
90 |
-
@force_fp32(apply_to=('mask_iou_pred', ))
|
91 |
-
def loss(self, mask_iou_pred, mask_iou_targets):
|
92 |
-
pos_inds = mask_iou_targets > 0
|
93 |
-
if pos_inds.sum() > 0:
|
94 |
-
loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds],
|
95 |
-
mask_iou_targets[pos_inds])
|
96 |
-
else:
|
97 |
-
loss_mask_iou = mask_iou_pred.sum() * 0
|
98 |
-
return dict(loss_mask_iou=loss_mask_iou)
|
99 |
-
|
100 |
-
@force_fp32(apply_to=('mask_pred', ))
|
101 |
-
def get_targets(self, sampling_results, gt_masks, mask_pred, mask_targets,
|
102 |
-
rcnn_train_cfg):
|
103 |
-
"""Compute target of mask IoU.
|
104 |
-
|
105 |
-
Mask IoU target is the IoU of the predicted mask (inside a bbox) and
|
106 |
-
the gt mask of corresponding gt mask (the whole instance).
|
107 |
-
The intersection area is computed inside the bbox, and the gt mask area
|
108 |
-
is computed with two steps, firstly we compute the gt area inside the
|
109 |
-
bbox, then divide it by the area ratio of gt area inside the bbox and
|
110 |
-
the gt area of the whole instance.
|
111 |
-
|
112 |
-
Args:
|
113 |
-
sampling_results (list[:obj:`SamplingResult`]): sampling results.
|
114 |
-
gt_masks (BitmapMask | PolygonMask): Gt masks (the whole instance)
|
115 |
-
of each image, with the same shape of the input image.
|
116 |
-
mask_pred (Tensor): Predicted masks of each positive proposal,
|
117 |
-
shape (num_pos, h, w).
|
118 |
-
mask_targets (Tensor): Gt mask of each positive proposal,
|
119 |
-
binary map of the shape (num_pos, h, w).
|
120 |
-
rcnn_train_cfg (dict): Training config for R-CNN part.
|
121 |
-
|
122 |
-
Returns:
|
123 |
-
Tensor: mask iou target (length == num positive).
|
124 |
-
"""
|
125 |
-
pos_proposals = [res.pos_bboxes for res in sampling_results]
|
126 |
-
pos_assigned_gt_inds = [
|
127 |
-
res.pos_assigned_gt_inds for res in sampling_results
|
128 |
-
]
|
129 |
-
|
130 |
-
# compute the area ratio of gt areas inside the proposals and
|
131 |
-
# the whole instance
|
132 |
-
area_ratios = map(self._get_area_ratio, pos_proposals,
|
133 |
-
pos_assigned_gt_inds, gt_masks)
|
134 |
-
area_ratios = torch.cat(list(area_ratios))
|
135 |
-
assert mask_targets.size(0) == area_ratios.size(0)
|
136 |
-
|
137 |
-
mask_pred = (mask_pred > rcnn_train_cfg.mask_thr_binary).float()
|
138 |
-
mask_pred_areas = mask_pred.sum((-1, -2))
|
139 |
-
|
140 |
-
# mask_pred and mask_targets are binary maps
|
141 |
-
overlap_areas = (mask_pred * mask_targets).sum((-1, -2))
|
142 |
-
|
143 |
-
# compute the mask area of the whole instance
|
144 |
-
gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7)
|
145 |
-
|
146 |
-
mask_iou_targets = overlap_areas / (
|
147 |
-
mask_pred_areas + gt_full_areas - overlap_areas)
|
148 |
-
return mask_iou_targets
|
149 |
-
|
150 |
-
def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks):
|
151 |
-
"""Compute area ratio of the gt mask inside the proposal and the gt
|
152 |
-
mask of the corresponding instance."""
|
153 |
-
num_pos = pos_proposals.size(0)
|
154 |
-
if num_pos > 0:
|
155 |
-
area_ratios = []
|
156 |
-
proposals_np = pos_proposals.cpu().numpy()
|
157 |
-
pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
|
158 |
-
# compute mask areas of gt instances (batch processing for speedup)
|
159 |
-
gt_instance_mask_area = gt_masks.areas
|
160 |
-
for i in range(num_pos):
|
161 |
-
gt_mask = gt_masks[pos_assigned_gt_inds[i]]
|
162 |
-
|
163 |
-
# crop the gt mask inside the proposal
|
164 |
-
bbox = proposals_np[i, :].astype(np.int32)
|
165 |
-
gt_mask_in_proposal = gt_mask.crop(bbox)
|
166 |
-
|
167 |
-
ratio = gt_mask_in_proposal.areas[0] / (
|
168 |
-
gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7)
|
169 |
-
area_ratios.append(ratio)
|
170 |
-
area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to(
|
171 |
-
pos_proposals.device)
|
172 |
-
else:
|
173 |
-
area_ratios = pos_proposals.new_zeros((0, ))
|
174 |
-
return area_ratios
|
175 |
-
|
176 |
-
@force_fp32(apply_to=('mask_iou_pred', ))
|
177 |
-
def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels):
|
178 |
-
"""Get the mask scores.
|
179 |
-
|
180 |
-
mask_score = bbox_score * mask_iou
|
181 |
-
"""
|
182 |
-
inds = range(det_labels.size(0))
|
183 |
-
mask_scores = mask_iou_pred[inds, det_labels] * det_bboxes[inds, -1]
|
184 |
-
mask_scores = mask_scores.cpu().numpy()
|
185 |
-
det_labels = det_labels.cpu().numpy()
|
186 |
-
return [mask_scores[det_labels == i] for i in range(self.num_classes)]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|