parquet-converter commited on
Commit
a985852
·
1 Parent(s): 3bf624d

Update parquet files (step 71 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1368565466ki/Satdia/text/symbols.py +0 -39
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Alpha Test Psicologia.pdf.md +0 -110
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Digital Signal Processing By Ramesh Babu 4th Edition Pdf Free Download Rar A Complete Course on DSP with Solutions and Projects.md +0 -175
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Left 4 Dead on Xbox One and Experience the Thrill of Co-op Survival.md +0 -25
  5. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Enter Password For The Encrypted File Setup Mudbox 2013 Crack NEW.md +0 -180
  6. spaces/1gistliPinn/ChatGPT4/Examples/Breakaway Broadcast Processor Asio.0.90.95 39.md +0 -40
  7. spaces/1gistliPinn/ChatGPT4/Examples/Dhoom 2 Hd Video Songs 1080p Torrent.md +0 -19
  8. spaces/1gistliPinn/ChatGPT4/Examples/Elven Love Crack Full Version Download LINK.md +0 -14
  9. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cheto 8 Ball Pool Mod APK Play with AutoPlay Long Line and Prediction Features.md +0 -127
  10. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Brawl Stars APKure and Enjoy Epic 3v3 Battles.md +0 -137
  11. spaces/1phancelerku/anime-remove-background/Dream Home Design and Makeover How to Unlock All Features with MOD APK.md +0 -94
  12. spaces/1phancelerku/anime-remove-background/FIFA Soccer Mod Menu APK - Get Unlimited Money and Unlock All Features in FIFA Mobile.md +0 -98
  13. spaces/232labs/VToonify/vtoonify/model/stylegan/dataset.py +0 -40
  14. spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/layers_537238KB.py +0 -126
  15. spaces/A00001/bingothoo/src/components/learn-more.tsx +0 -39
  16. spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/latent_diffusion/ddpm.py +0 -441
  17. spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/models/base_model.py +0 -500
  18. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/models/diffusion/plms.py +0 -236
  19. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/image_degradation/bsrgan.py +0 -730
  20. spaces/AP123/dreamgaussian/mesh.py +0 -394
  21. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/buttons/Factory.d.ts +0 -5
  22. spaces/Aki004/herta-so-vits/vdecoder/hifigan/utils.py +0 -68
  23. spaces/AlexMason/anime-remove-background/README.md +0 -14
  24. spaces/AlgoveraAI/dcgan-crypto-punks/app.py +0 -150
  25. spaces/Ali-C137/Motivation-Letter-Generator/app.py +0 -24
  26. spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/loop/utils.py +0 -11
  27. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/dpm_discrete_ancestral.md +0 -22
  28. spaces/Andy1621/uniformer_image_detection/exp/mask_rcnn_1x_hybrid_small/run.sh +0 -10
  29. spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/gfl.py +0 -16
  30. spaces/Anew1007/extras/tts_edge.py +0 -34
  31. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/superboogav2/README.md +0 -5
  32. spaces/Arnx/MusicGenXvAKN/audiocraft/data/__init__.py +0 -8
  33. spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/GroundingDINO/bertwarper.py +0 -273
  34. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/pretty.py +0 -994
  35. spaces/Audio-AGI/AudioSep/data/datamodules.py +0 -122
  36. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/README.md +0 -6
  37. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/demo/README.md +0 -8
  38. spaces/Benson/text-generation/Examples/Apkgstore X.md +0 -92
  39. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/idna/core.py +0 -400
  40. spaces/BilalSardar/AutoML-Model-Training/README.md +0 -13
  41. spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/uninitialized_copy.h +0 -22
  42. spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/mismatch.h +0 -44
  43. spaces/CVPR/WALT/mmdet/core/utils/misc.py +0 -61
  44. spaces/CVPR/WALT/mmdet/models/roi_heads/shared_heads/res_layer.py +0 -77
  45. spaces/CVPR/lama-example/bin/mask_example.py +0 -14
  46. spaces/CarlDennis/Lovelive-VITS-JPZH/text/sanskrit.py +0 -62
  47. spaces/CikeyQI/meme-api/meme_generator/memes/fencing/__init__.py +0 -37
  48. spaces/ClipHamper/stable-diffusion-webui/README.md +0 -12
  49. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/structures/image_list.py +0 -72
  50. spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/common/utils.py +0 -424
spaces/1368565466ki/Satdia/text/symbols.py DELETED
@@ -1,39 +0,0 @@
1
- '''
2
- Defines the set of symbols used in text input to the model.
3
- '''
4
-
5
- '''# japanese_cleaners
6
- _pad = '_'
7
- _punctuation = ',.!?-'
8
- _letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ '
9
- '''
10
-
11
- '''# japanese_cleaners2
12
- _pad = '_'
13
- _punctuation = ',.!?-~…'
14
- _letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ '
15
- '''
16
-
17
- '''# korean_cleaners
18
- _pad = '_'
19
- _punctuation = ',.!?…~'
20
- _letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ '
21
- '''
22
-
23
- '''# chinese_cleaners
24
- _pad = '_'
25
- _punctuation = ',。!?—…'
26
- _letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ '
27
- '''
28
-
29
- # zh_ja_mixture_cleaners
30
- _pad = '_'
31
- _punctuation = ',.!?-~…'
32
- _letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ '
33
-
34
-
35
- # Export all symbols:
36
- symbols = [_pad] + list(_punctuation) + list(_letters)
37
-
38
- # Special symbol ids
39
- SPACE_ID = symbols.index(" ")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Alpha Test Psicologia.pdf.md DELETED
@@ -1,110 +0,0 @@
1
- <br />
2
- <code>
3
- <h1>Alpha Test Psicologia: What You Need to Know</h1>
4
- <p>If you want to study psychology in Italy, you will need to pass a test called Alpha Test Psicologia. This test is designed to assess your skills and knowledge in various areas related to psychology, such as logic, reasoning, reading comprehension, language, science and culture. In this article, we will explain what Alpha Test Psicologia is, how to prepare for it, and how to use some of the best resources available to help you succeed.</p>
5
- <h2>Alpha Test Psicologia.pdf</h2><br /><p><b><b>DOWNLOAD</b> &ndash;&ndash;&ndash;&ndash;&ndash;>>> <a href="https://byltly.com/2uKyJf">https://byltly.com/2uKyJf</a></b></p><br /><br />
6
- <h2>What is Alpha Test Psicologia?</h2>
7
- <p>Alpha Test Psicologia is a standardized test that is required for admission to most psychology courses in Italian universities. The test consists of 60 multiple-choice questions that cover four main sections: reasoning skills, text comprehension, Italian language skills, and knowledge and competencies acquired in school. Depending on the university you apply to, you may have to take either the TOLC-SU (Test On Line Scienze Umane) or the TOLC-PSI version of the test. The TOLC-SU has 10 questions on reasoning skills, 15 on text comprehension, 15 on Italian language skills, and 20 on knowledge and competencies. The TOLC-PSI has 15 questions on text comprehension, 10 on basic math, 10 on biology, 10 on verbal reasoning, and 15 on numerical reasoning.</p>
8
- <p>The test is administered by CISIA (Consorzio Interuniversitario Sistemi Integrati per l'Accesso) and can be taken online or at a university center. You can choose from several sessions throughout the year, starting from February until autumn. You can also take the test at home (TOLC@casa) if you have a suitable device and internet connection. The test lasts 100 minutes and has a maximum score of 60 points. Each correct answer gives you one point, each wrong answer subtracts 0.25 points, and each unanswered question gives you zero points.</p>
9
- <h2>How to Prepare for Alpha Test Psicologia?</h2>
10
- <p>Preparing for Alpha Test Psicologia can be challenging but rewarding. Here are some tips and strategies that can help you improve your performance:</p>
11
- <ul>
12
- <li>Start studying early. Don't wait until the last minute to review your topics and practice your skills. Give yourself enough time to learn new concepts, revise old ones, and identify your strengths and weaknesses.</li>
13
- <li>Plan your study schedule. Set realistic goals and deadlines for yourself. Divide your topics into manageable chunks and allocate enough time for each one. Use a calendar or a planner to keep track of your progress.</li>
14
- <li>Use a variety of resources. Don't rely on just one source of information or practice. Use different materials that suit your learning style and preferences. For example, you can use books, online courses, platforms, videos, podcasts, articles, etc.</li>
15
- <li>Practice with real tests. One of the best ways to prepare for Alpha Test Psicologia is to take simulated tests that mimic the format and content of the real test. This will help you familiarize yourself with the types of questions, time limits, scoring system, etc.</li>
16
- <li>Get feedback. After taking a practice test, review your answers carefully. Analyze your mistakes and understand why you got them wrong. Look for patterns or gaps in your knowledge or skills. Seek help from others if you need it.</li>
17
- </ul>
18
- <h2>How to Use Alpha Test Psicologia Books?</h2>
19
- <p>One of the most popular and effective resources for preparing for Alpha Test Psicologia are the books published by Alpha Test. These books are specifically designed to help you master all the topics and skills required by the test. They include:</p>
20
- <ul>
21
- <li>The manual: This book provides a comprehensive overview of all the subjects covered by the test. It explains the concepts clearly and concisely with examples and diagrams. It also includes tips and tricks for solving different types of questions.</li>
22
- <li>The exercise book: This book contains hundreds of exercises that allow you to practice your skills in each section of the test. It also includes detailed solutions that show you how to approach each problem step by step.</li>
23
- <li>The verification tests: This book contains 10 full-length tests that simulate the real test in terms of structure and difficulty. It also includes answers keys that let you check your score.</li>
24
- </ul>
25
- <p>Here are some examples of questions from these books:</p>
26
- <table>
27
- <tr><td><b>Manual</b></td><td><b>Exercise book</b></td><td><b>Verification tests</b></td></tr>
28
- <tr><td>In un gruppo di persone si è verificato il seguente fenomeno: più aumentava il numero dei partecipanti alla discussione e più diminuiva la qualità delle decisioni prese dal gruppo stesso.<br>A quale fenomeno psicosociale si fa riferimento?<br>A) Conformismo<br>B) Polarizzazione<br>C) Pensiero di gruppo<br>D) Coesione<br>E) Leadership<br><br>Risposta: C) Pensiero di gruppo</td><td>Nel seguente brano sono presenti alcuni errori ortografici.<br>"La psiche umana è un mistero che da secoli affascina filosofi e scienziati.<br>Molti sono stati i tentativi di spiegarne il funzionamento e le dinamiche.<br>Tra i più noti vi sono quelli di Freud e Jung che hanno dato vita alla psicanalisi."<br>Individua gli errori e correggili.<br>A) da secoli - da secoli<br>B) spiegarne - spiegarne<br>C) vi sono - ci sono<br>D) Freud - freud<br>E) Jung - iung<br><br>Risposta: C) vi sono - ci sono; D) Freud - freud</td><td>Qual è il sinonimo della parola "suscettibile"?<br>A) Sensibile<br>B) Irritabile<br>C) Suscettivo<br>D) Suscinto<br>E) Suscitabile<br><br>Risposta: A) Sensibile</td></tr>
29
- </table>
30
- <h2>How to Use Alpha Test Psicologia Online Courses?</h2>
31
- <p>Another great resource for preparing for Alpha Test Psicologia are the online courses offered by Alpha Test. These courses are intensive and specific programs that guide you through all the aspects of the test preparation process. They include:</p>
32
- <p>Alpha Test Psicologia TOLC-PSI - Manuale di preparazione[^1^]<br />
33
- Alpha Test Psicologia TOLC-PSI - Esercizi commentati<br />
34
- Alpha Test Psicologia TOLC-PSI - Prove di verifica<br />
35
- Alpha Test Psicologia TOLC-PSI - 4100 quiz<br />
36
- Alpha Test Cultura generale - 5100 quiz<br />
37
- Alpha Test PLUS Psicologia TOLC-PSI - Kit completo di preparazione con training online personalizzato<br />
38
- Alpha Test Psicologia TOLC-SU - Manuale di preparazione<br />
39
- Alpha Test Psicologia TOLC-SU - Esercizi commentati<br />
40
- Alpha Test Psicologia TOLC-SU - Prove di verifica<br />
41
- Alpha Test Psicologia TOLC-SU - 4100 quiz<br />
42
- Alpha Test Cultura generale - 5100 quiz per Psicologia TOLC-SU<br />
43
- Alpha Test PLUS Psicologia TOLC-SU - Kit completo di preparazione con training online personalizzato<br />
44
- Alpha Test Psicologia IMAT - Manuale di preparazione<br />
45
- Alpha Test Psicologia IMAT - Esercizi commentati<br />
46
- Alpha Test Psicologia IMAT - Prove di verifica<br />
47
- Alpha Test Psicologia IMAT - 4100 quiz<br />
48
- Alpha Test Cultura generale - 5100 quiz per Psicologia IMAT<br />
49
- Alpha Test PLUS Psicologia IMAT - Kit completo di preparazione con training online personalizzato<br />
50
- Alpha Test Psicologia Professioni sanitarie - Manuale di preparazione<br />
51
- Alpha Test Psicologia Professioni sanitarie - Esercizi commentati<br />
52
- Alpha Test Psicologia Professioni sanitarie - Prove di verifica<br />
53
- Alpha Test Psicologia Professioni sanitarie - 4100 quiz<br />
54
- Alpha Test Cultura generale - 5100 quiz per Psicologia Professioni sanitarie<br />
55
- Alpha Test PLUS Psicologia Professioni sanitarie - Kit completo di preparazione con training online personalizzato<br />
56
- Alpha Test Psicologia Bocconi - Manuale di preparazione<br />
57
- Alpha Test Psicologia Bocconi - Esercizi commentati<br />
58
- Alpha Test Psicologia Bocconi - Prove di verifica<br />
59
- Alpha Test Psicologia Bocconi - 4100 quiz<br />
60
- Alpha Test Cultura generale - 5100 quiz per Psicologia Bocconi<br />
61
- Alpha Test PLUS Psicologia Bocconi - Kit completo di preparazione con training online personalizzato<br />
62
- Alpha Test Psicologia Luiss - Manuale di preparazione<br />
63
- Alpha Test Psicologia Luiss - Esercizi commentati<br />
64
- Alpha Test Psicologia Luiss - Prove di verifica<br />
65
- Alpha Test Psicologia Luiss - 4100 quiz<br />
66
- Alpha Test Cultura generale - 5100 quiz per Psicologia Luiss<br />
67
- Alpha Test PLUS Psicologia Luiss - Kit completo di preparazione con training online personalizzato<br />
68
- Alpha Test Orientamento per la scelta della facoltà di psicologia <br />
69
- Alpha Test Orientamento per la scelta dell'università di psicologia <br />
70
- Alpha Test Orientamento per la scelta del corso di laurea in psicologia <br />
71
- Alpha Test Orientamento per la scelta della specializzazione in psicologia <br />
72
- Alpha Test Orientamento per la scelta della professione in psicologia <br />
73
- Come superare il test d'ammissione a psicologia con il metodo alpha test <br />
74
- Come studiare per il test d'ammissione a psicologia con il metodo alpha test <br />
75
- Come prepararsi al test d'ammissione a psicologia con il metodo alpha test <br />
76
- Come affrontare il test d'ammissione a psicologia con il metodo alpha test</p>
77
- <ul>
78
- <li>The on-demand lessons: These are recorded videos that explain all the topics covered by the test in a clear and engaging way. You can watch them whenever you want at your own pace.</li>
79
- <li>The live streams: These are interactive sessions that allow you to ask questions directly to the Alpha Test teachers. You can also participate in quizzes, polls, games, etc.</li>
80
- <li>The exercises: These are online activities that let you practice your skills with immediate feedback. You can also compare your results with other students.</li>
81
- <code>
82
- <h2>How to Use Alpha Test Psicologia Platforms?</h2>
83
- <p>In addition to the books and the online courses, Alpha Test also provides you with two platforms that can help you study and practice more effectively. They are:</p>
84
- <ul>
85
- <li>AlphaTestAcademy: This is a personalized platform that follows you step by step until the day of the test. It offers you a customized study plan based on your level and goals. It also gives you access to thousands of questions, videos, summaries, tips, etc.</li>
86
- <li>MyDesk: This is a virtual desk that allows you to organize and manage your materials and activities. You can create notes, bookmarks, highlights, etc. You can also track your progress and performance with graphs and statistics.</li>
87
- </ul>
88
- <p>Here are some examples of how these platforms work:</p>
89
- <table>
90
- <tr><td><b>AlphaTestAcademy</b></td><td><b>MyDesk</b></td></tr>
91
- <tr><td><img src="https://www.alphatest.it/images/academy/academy-1.jpg" alt="AlphaTestAcademy screenshot"></td><td><img src="https://www.alphatest.it/images/mydesk/mydesk-1.jpg" alt="MyDesk screenshot"></td></tr>
92
- <tr><td>This is the dashboard where you can see your study plan and your progress.</td><td>This is the library where you can access all your books and materials.</td></tr>
93
- <tr><td><img src="https://www.alphatest.it/images/academy/academy-2.jpg" alt="AlphaTestAcademy screenshot"></td><td><img src="https://www.alphatest.it/images/mydesk/mydesk-2.jpg" alt="MyDesk screenshot"></td></tr>
94
- <tr><td>This is the question bank where you can practice with different types of questions.</td><td>This is the note editor where you can create and edit your notes.</td></tr>
95
- <tr><td><img src="https://www.alphatest.it/images/academy/academy-3.jpg" alt="AlphaTestAcademy screenshot"></td><td><img src="https://www.alphatest.it/images/mydesk/mydesk-3.jpg" alt="MyDesk screenshot"></td></tr>
96
- <tr><td>This is the video library where you can watch and review the lessons.</td><td>This is the performance report where you can see your results and feedback.</td></tr>
97
- </table>
98
- <h2>Conclusion</h2>
99
- <p>Alpha Test Psicologia is a crucial test for anyone who wants to pursue a career in psychology in Italy. It requires a lot of preparation and practice, but it can also be a rewarding and enjoyable experience. By using the resources we have described in this article, such as books, online courses and platforms, you can improve your chances of success and achieve your goals. Don't hesitate to start your journey today and join the Alpha Test community!</p>
100
- <h3>Frequently Asked Questions</h3>
101
- <ul>
102
- <li>Q: How can I register for Alpha Test Psicologia?<br>A: You can register for Alpha Test Psicologia on the CISIA website (https://www.cisiaonline.it/). You will need to create an account, choose your preferred session and university, and pay a fee of 30 euros.</li>
103
- <li>Q: How can I get the Alpha Test Psicologia books?<br>A: You can get the Alpha Test Psicologia books on the Alpha Test website (https://www.alphatest.it/Libri-Alpha-Test/Test-Ammissione/psicologia/psicologia) or on Amazon. You can also find them in bookstores or libraries.</li>
104
- <li>Q: How can I access the Alpha Test Psicologia online courses?<br>A: You can access the Alpha Test Psicologia online courses on the Alpha Test website (https://www.alphatest.it/Corsi/psicologia). You will need to choose your preferred course and pay a fee that varies depending on the duration and features of the course.</li>
105
- <li>Q: How can I use the Alpha Test Psicologia platforms?<br>A: You can use the Alpha Test Psicologia platforms by logging in with your credentials on the Alpha Test website (https://www.alphatest.it/). You will need to have purchased or received a code for accessing either AlphaTestAcademy or MyDesk.</li>
106
- <li>Q: How can I contact Alpha Test for more information?<br>A: You can contact Alpha Test by phone (+39 02 7601 8368), email ([email protected]), or social media (Facebook, Instagram, YouTube).</li>
107
- </ul>
108
- </p> 0a6ba089eb<br />
109
- <br />
110
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Digital Signal Processing By Ramesh Babu 4th Edition Pdf Free Download Rar A Complete Course on DSP with Solutions and Projects.md DELETED
@@ -1,175 +0,0 @@
1
-
2
- <h1>Digital Signal Processing by Ramesh Babu 4th Edition PDF Free Download Rar</h1>
3
- <p>If you are looking for a comprehensive and easy-to-understand book on digital signal processing, you might want to check out Digital Signal Processing by Ramesh Babu 4th Edition. This book covers all the essential topics in digital signal processing, such as discrete-time signals and systems, z-transforms, discrete Fourier transforms, fast Fourier transforms, digital filters, and more. It also includes MATLAB programs and applications that illustrate the concepts and techniques discussed in the book.</p>
4
- <p>In this article, we will give you an overview of what digital signal processing is, why it is important, and who Ramesh Babu is. We will also tell you how you can download the book for free in PDF format from a RAR file. So, let's get started!</p>
5
- <h2>Digital Signal Processing By Ramesh Babu 4th Edition Pdf Free Download Rar</h2><br /><p><b><b>Download</b> ->>> <a href="https://byltly.com/2uKyja">https://byltly.com/2uKyja</a></b></p><br /><br />
6
- <h2>Introduction</h2>
7
- <h3>What is digital signal processing?</h3>
8
- <p>Digital signal processing (DSP) is the branch of engineering that deals with the analysis, manipulation, and synthesis of signals using digital techniques. A signal is any physical quantity that varies with time, space, or any other parameter. For example, sound, image, video, temperature, pressure, etc. are all signals.</p>
9
- <p>DSP involves converting analog signals into digital signals using devices called analog-to-digital converters (ADCs), processing them using mathematical algorithms and operations, and converting them back into analog signals using devices called digital-to-analog converters (DACs). DSP can be used for various purposes, such as filtering, compression, encryption, modulation, demodulation, detection, estimation, classification, etc.</p>
10
- <h3>Why is digital signal processing important?</h3>
11
- <p>DSP is important because it enables us to perform complex and sophisticated tasks on signals that would be impossible or impractical using analog techniques. For example, DSP can help us:</p>
12
- <ul>
13
- <li>Reduce noise and interference in signals</li>
14
- <li>Enhance the quality and resolution of signals</li>
15
- <li>Extract useful information from signals</li>
16
- <li>Compress signals to save storage space and bandwidth</li>
17
- <li>Encrypt signals to ensure security and privacy</li>
18
- <li>Modulate and demodulate signals to transmit and receive them over different channels</li>
19
- <li>Detect and estimate parameters of signals</li>
20
- <li>Classify signals into different categories</li>
21
- </ul>
22
- <p>DSP has applications in various fields and industries, such as telecommunications, multimedia, biomedical engineering, radar, sonar, speech recognition, image processing, computer vision, machine learning, etc.</p>
23
- <h3>Who is Ramesh Babu?</h3>
24
- <p>Ramesh Babu is a professor of electronics and communication engineering at Pondicherry Engineering College in India. He has over 30 years of teaching experience and has authored several books on DSP and related subjects. He has also published many research papers in national and international journals and conferences. He is a member of various professional bodies such as IEEE, IETE, ISTE, etc.</p>
25
- <p>Ramesh Babu is known for his clear and concise style of writing and explaining complex concepts in a simple manner. He uses numerous examples and exercises to reinforce the understanding of the readers. He also provides MATLAB programs and applications that demonstrate the practical aspects of DSP.</p>
26
- <p>Download DSP by Ramesh Babu 4th edition pdf for free<br />
27
- How to get Digital Signal Processing by Ramesh Babu 4th edition ebook<br />
28
- Ramesh Babu DSP 4th edition pdf free download link<br />
29
- Digital Signal Processing by Ramesh Babu 4th edition solutions manual pdf<br />
30
- DSP by Ramesh Babu 4th edition pdf online<br />
31
- Digital Signal Processing by Ramesh Babu 4th edition rar file download<br />
32
- Ramesh Babu DSP 4th edition pdf google drive<br />
33
- Digital Signal Processing by Ramesh Babu 4th edition book review<br />
34
- DSP by Ramesh Babu 4th edition pdf torrent<br />
35
- Digital Signal Processing by Ramesh Babu 4th edition lecture notes pdf<br />
36
- Ramesh Babu DSP 4th edition pdf flipkart<br />
37
- Digital Signal Processing by Ramesh Babu 4th edition mcq pdf<br />
38
- DSP by Ramesh Babu 4th edition pdf scribd<br />
39
- Digital Signal Processing by Ramesh Babu 4th edition projects pdf<br />
40
- Ramesh Babu DSP 4th edition pdf slideshare<br />
41
- Digital Signal Processing by Ramesh Babu 4th edition examples pdf<br />
42
- DSP by Ramesh Babu 4th edition pdf quora<br />
43
- Digital Signal Processing by Ramesh Babu 4th edition objective questions pdf<br />
44
- Ramesh Babu DSP 4th edition pdf reddit<br />
45
- Digital Signal Processing by Ramesh Babu 4th edition syllabus pdf<br />
46
- DSP by Ramesh Babu 4th edition pdf goodreads<br />
47
- Digital Signal Processing by Ramesh Babu 4th edition matlab code pdf<br />
48
- Ramesh Babu DSP 4th edition pdf github<br />
49
- Digital Signal Processing by Ramesh Babu 4th edition summary pdf<br />
50
- DSP by Ramesh Babu 4th edition pdf medium<br />
51
- Digital Signal Processing by Ramesh Babu 4th edition interview questions pdf<br />
52
- Ramesh Babu DSP 4th edition pdf youtube<br />
53
- Digital Signal Processing by Ramesh Babu 4th edition case studies pdf<br />
54
- DSP by Ramesh Babu 4th edition pdf stackoverflow<br />
55
- Digital Signal Processing by Ramesh Babu 4th edition research papers pdf<br />
56
- Ramesh Babu DSP 4th edition pdf kopykitab<br />
57
- Digital Signal Processing by Ramesh Babu 4th edition reference books pdf<br />
58
- DSP by Ramesh Babu 4th edition pdf amazon<br />
59
- Digital Signal Processing by Ramesh Babu 4th edition applications pdf<br />
60
- Ramesh Babu DSP 4th edition pdf zlibrary<br />
61
- Digital Signal Processing by Ramesh Babu 4th edition concepts pdf<br />
62
- DSP by Ramesh Babu 4th edition pdf libgen<br />
63
- Digital Signal Processing by Ramesh Babu 4th edition algorithms pdf<br />
64
- Ramesh Babu DSP 4th edition pdf b-ok.cc<br />
65
- Digital Signal Processing by Ramesh Babu 4th edition exercises pdf<br />
66
- DSP by Ramesh Babu 4th edition pdf academia.edu<br />
67
- Digital Signal Processing by Ramesh Babu 4th edition formulas pdf<br />
68
- Ramesh Babu DSP 4th edition pdf archive.org<br />
69
- Digital Signal Processing by Ramesh Babu 4th edition diagrams pdf<br />
70
- DSP by Ramesh Babu 4th edition pdf coursehero.com<br />
71
- Digital Signal Processing by Ramesh Babu 4th edition problems and solutions pdf<br />
72
- Ramesh Babu DSP 4th edition pdf worldcat.org</p>
73
- <h2>Features of the book</h2>
74
- <h3>Comprehensive coverage of topics</h3>
75
- and then proceeds to cover discrete-time signals and systems, z-transforms, discrete Fourier transforms, fast Fourier transforms, digital filters, and more. It also includes topics such as finite word length effects, multirate signal processing, wavelet transforms, and adaptive filters. The book provides a balanced treatment of both theory and practice, with an emphasis on applications and problem-solving.</p>
76
- <h3>Clear and concise explanations</h3>
77
- <p>The book explains the concepts and techniques of DSP in a clear and concise manner, using simple language and notation. The book avoids unnecessary mathematical derivations and proofs, and focuses on the intuition and understanding of the readers. The book also uses diagrams, tables, graphs, and figures to illustrate the points and enhance the readability of the text.</p>
78
- <h3>Numerous examples and exercises</h3>
79
- <p>The book provides numerous examples and exercises throughout the chapters to help the readers apply the concepts and techniques learned in the book. The examples are carefully chosen to cover a wide range of applications and scenarios, such as speech processing, image processing, biomedical signal processing, etc. The exercises are designed to test the comprehension and analytical skills of the readers, as well as to provide them with additional practice and feedback. The book also provides solutions to selected exercises at the end of the book.</p>
80
- <h3>MATLAB programs and applications</h3>
81
- <p>The book includes MATLAB programs and applications that complement the topics covered in the book. MATLAB is a popular software tool for numerical computation and visualization that is widely used by engineers and scientists. The book provides MATLAB codes for various DSP algorithms and operations, such as convolution, correlation, DFT, FFT, FIR filters, IIR filters, etc. The book also provides MATLAB applications that demonstrate the use of DSP in real-world problems, such as audio processing, image processing, speech processing, etc. The book also provides instructions on how to run the MATLAB programs and applications on a computer.</p>
82
- <h2>How to download the book for free?</h2>
83
- <h3>Steps to download the book in PDF format</h3>
84
- <p>If you want to download the book for free in PDF format, you can follow these steps:</p>
85
- <ol>
86
- <li>Go to this link: https://www.pdfdrive.com/digital-signal-processing-by-ramesh-babu-4th-edition-e158775719.html</li>
87
- <li>Click on the green button that says "Download (PDF)"</li>
88
- <li>Wait for a few seconds until the download starts automatically</li>
89
- <li>Save the file on your computer or device</li>
90
- </ol>
91
- <p>Congratulations! You have successfully downloaded the book in PDF format.</p>
92
- <h3>Steps to extract the book from RAR file</h3>
93
- <p>If you have downloaded the book in RAR file format, you will need to extract it before you can read it. A RAR file is a compressed file that contains one or more files inside it. To extract the book from RAR file format, you can follow these steps:</p>
94
- <ol>
95
- <li>Download and install a software program that can open RAR files, such as WinRAR or 7-Zip</li>
96
- <li>Right-click on the RAR file that contains the book</li>
97
- <li>Select "Extract Here" or "Extract to" from the menu</li>
98
- <li>Wait for a few seconds until the extraction is complete</li>
99
- <li>Open the folder that contains the extracted files</li>
100
- <li>Double-click on the file that has the extension ".pdf"</li>
101
- </ol>
102
- <p>Congratulations! You have successfully extracted the book from RAR file format.</p>
103
- <h3>Tips to avoid viruses and malware</h3>
104
- <p>While downloading any file from the internet, you should be careful about viruses and malware that can harm your computer or device. Here are some tips to avoid viruses and malware:</p>
105
- <ul>
106
- <li>Use a reliable antivirus software program on your computer or device</li>
107
- <li>Scan any file that you download before opening it</li>
108
- <li>Avoid clicking on suspicious links or pop-ups that claim to offer free downloads or prizes</li>
109
- <li>Avoid downloading files from unknown or untrusted sources or websites</li>
110
- <li>Check the file size and extension before downloading it</li>
111
- <li>Delete any file that looks suspicious or corrupted after downloading it</li>
112
- </ul>
113
- <p>By following these tips, you can protect your computer or device from viruses and malware.</p>
114
- <h2>Conclusion</h2>
115
- <h3>Summary of the main points</h3>
116
- why it is important, and who Ramesh Babu is. We have also told you how you can download the book for free in PDF format from a RAR file. We have also discussed the features of the book, such as comprehensive coverage of topics, clear and concise explanations, numerous examples and exercises, and MATLAB programs and applications.</p>
117
- <h3>Benefits of reading the book</h3>
118
- <p>By reading the book, you can learn the fundamentals and applications of DSP in a simple and effective way. You can also enhance your skills and knowledge in DSP and related subjects. You can also use the book as a reference or a guide for your academic or professional projects. The book can help you to:</p>
119
- <ul>
120
- <li>Understand the concepts and techniques of DSP</li>
121
- <li>Solve problems and challenges in DSP</li>
122
- <li>Implement DSP algorithms and operations using MATLAB</li>
123
- <li>Explore various applications of DSP in real-world scenarios</li>
124
- <li>Prepare for exams and interviews on DSP</li>
125
- </ul>
126
- <h3>Call to action</h3>
127
- <p>If you are interested in learning more about DSP and want to download the book for free, you can click on the link below and follow the steps that we have mentioned in this article. You can also share this article with your friends and colleagues who might find it useful. We hope you enjoy reading the book and learning from it. Happy reading!</p>
128
- <p><a href="https://www.pdfdrive.com/digital-signal-processing-by-ramesh-babu-4th-edition-e158775719.html">Download Digital Signal Processing by Ramesh Babu 4th Edition PDF Free Download Rar</a></p>
129
- <h2>Frequently Asked Questions</h2>
130
- <h3>What is the difference between analog and digital signals?</h3>
131
- <p>An analog signal is a continuous signal that can have any value within a range. A digital signal is a discrete signal that can have only two values: 0 or 1.</p>
132
- <h3>What are the advantages of digital signals over analog signals?</h3>
133
- <p>Digital signals have several advantages over analog signals, such as:</p>
134
- <ul>
135
- <li>They are easier to store, transmit, and process</li>
136
- <li>They are more resistant to noise and distortion</li>
137
- <li>They can be encrypted and compressed more efficiently</li>
138
- <li>They can be manipulated using mathematical operations</li>
139
- <li>They can carry more information per unit time</li>
140
- </ul>
141
- <h3>What are some examples of digital signals?</h3>
142
- <p>Some examples of digital signals are:</p>
143
- <ul>
144
- <li>Binary numbers</li>
145
- <li>Morse code</li>
146
- <li>Bar codes</li>
147
- <li>Digital audio</li>
148
- <li>Digital video</li>
149
- <li>Digital images</li>
150
- </ul>
151
- <h3>What are some applications of digital signal processing?</h3>
152
- <p>Some applications of digital signal processing are:</p>
153
- <ul>
154
- <li>Telecommunications: Modulation, demodulation, encoding, decoding, multiplexing, etc.</li>
155
- <li>Multimedia: Audio processing, image processing, video processing, compression, encryption, etc.</li>
156
- <li>Biomedical engineering: Electrocardiography, electroencephalography, magnetic resonance imaging, ultrasound imaging, etc.</li>
157
- <li>Radar: Signal detection, estimation, tracking, filtering, etc.</li>
158
- <li>Sonar: Echo location, underwater communication, navigation, etc.</li>
159
- <li>Speech recognition: Feature extraction, classification, synthesis, etc.</li>
160
- <li>Image processing: Edge detection, segmentation, enhancement, restoration, etc.</li>
161
- <li>Computer vision: Face recognition, object recognition, scene analysis, etc.</li>
162
- <li>Machine learning: Neural networks, deep learning, pattern recognition, etc.</li>
163
- </ul>
164
- <h3>What are some challenges or limitations of digital signal processing?</h3>
165
- <p>Some challenges or limitations of digital signal processing are:</p>
166
- <ul>
167
- <li>The need for high-speed and high-precision hardware devices such as ADCs and DACs</li>
168
- <li>The trade-off between accuracy and complexity of DSP algorithms and operations</li>
169
- <li>The difficulty of handling non-linear or chaotic signals</li>
170
- <li>The ethical and social issues related to privacy and security of digital signals</li>
171
- <li>The lack of standardization and interoperability of DSP systems and formats</li>
172
- </ul>
173
- </p> 0a6ba089eb<br />
174
- <br />
175
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Left 4 Dead on Xbox One and Experience the Thrill of Co-op Survival.md DELETED
@@ -1,25 +0,0 @@
1
- <br />
2
- <h1>How to Download Left 4 Dead on Xbox One</h1>
3
- <p>Left 4 Dead is a classic zombie shooter game that was released in 2008 for Xbox 360 and PC. It features four survivors who have to fight their way through hordes of infected creatures in various scenarios. The game is known for its cooperative gameplay, dynamic AI, and replay value.</p>
4
- <p>If you want to play Left 4 Dead on your Xbox One, you might be wondering how to download it. The good news is that the game is backward compatible, which means you can play it on your Xbox One without any problems. Here are the steps you need to follow to download Left 4 Dead on Xbox One.</p>
5
- <h2>left 4 dead download xbox one</h2><br /><p><b><b>Download Zip</b> &#9733; <a href="https://byltly.com/2uKvBR">https://byltly.com/2uKvBR</a></b></p><br /><br />
6
- <ol>
7
- <li>First, you need to have a digital copy of Left 4 Dead on your Xbox account. If you don't have one, you can buy it from the Xbox Store for $19.99. Alternatively, you can use a physical disc of Left 4 Dead if you have one.</li>
8
- <li>Next, you need to sign in to your Xbox One with the same account that has Left 4 Dead. If you are using a disc, insert it into your console.</li>
9
- <li>Then, go to My Games & Apps and find Left 4 Dead in the Ready to Install section. If you are using a disc, the game should start installing automatically.</li>
10
- <li>Finally, wait for the installation to finish and launch the game from My Games & Apps or the Home screen. Enjoy playing Left 4 Dead on your Xbox One!</li>
11
- </ol>
12
- <p>Left 4 Dead is a fun and thrilling game that you can play with your friends online or offline. It has four campaigns that offer different challenges and environments. You can also customize your game settings and difficulty level to suit your preferences. If you are looking for a zombie shooter game that will keep you on the edge of your seat, Left 4 Dead is a great choice.</p>
13
-
14
- <h2>Left 4 Dead Tips and Tricks</h2>
15
- <p>Left 4 Dead is a game that requires teamwork, strategy, and skill. Here are some tips and tricks that can help you survive the zombie apocalypse.</p>
16
- <ul>
17
- <li>Communicate with your teammates. Use voice chat or text chat to coordinate your actions and warn each other of dangers. You can also use the in-game commands to point out items, enemies, or directions.</li>
18
- <li>Stick together. Don't wander off alone or stray too far from your group. You are more vulnerable to attacks and less likely to get help if you are separated.</li>
19
- <li>Use your weapons wisely. Each weapon has its advantages and disadvantages. For example, shotguns are powerful at close range but have limited ammo and reload time. Rifles are accurate at long range but have low damage and recoil. Pistols are unlimited but weak and slow. Melee weapons are silent but risky. Choose the weapon that suits your playstyle and situation.</li>
20
- <li>Conserve your resources. Ammo, health kits, pills, and grenades are scarce and valuable. Don't waste them on unnecessary shots or healing. Save them for emergencies or critical moments.</li>
21
- <li>Know your enemies. There are different types of infected creatures in Left 4 Dead, each with their own abilities and weaknesses. For example, Boomers can vomit on you and attract more zombies, but they explode when killed. Hunters can pounce on you and pin you down, but they are vulnerable to melee attacks. Tanks can smash you with their fists, but they are slow and loud. Learn how to identify, avoid, or counter each enemy type.</li>
22
- </ul>
23
- <p>Left 4 Dead is a game that will test your skills and nerves. It is also a game that will reward you with fun and satisfaction. If you follow these tips and tricks, you will have a better chance of surviving the zombie apocalypse.</p> ddb901b051<br />
24
- <br />
25
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Enter Password For The Encrypted File Setup Mudbox 2013 Crack NEW.md DELETED
@@ -1,180 +0,0 @@
1
-
2
- <h1>Enter Password For The Encrypted File Setup Mudbox 2013 Crack</h1>
3
- <p>Have you ever downloaded a file that requires a password to open it? If so, you might have encountered a situation where you don't know the password or you forgot it. This can be very frustrating, especially if the file contains something important or valuable. In this article, we will show you how to find the password for the encrypted file setup Mudbox 2013 crack, a popular software for 3D sculpting and painting.</p>
4
- <h2>What is Mudbox 2013 and why do you need it?</h2>
5
- <p>Mudbox 2013 is a software application developed by Autodesk that allows you to create realistic and detailed 3D models using digital sculpting and painting tools. You can use Mudbox 2013 to design characters, environments, props, and other assets for games, movies, animations, and other media. You can also use Mudbox 2013 to edit and refine existing 3D models imported from other software such as Maya, 3ds Max, or ZBrush.</p>
6
- <h2>Enter Password For The Encrypted File Setup Mudbox 2013 Crack</h2><br /><p><b><b>Download</b> &mdash;&mdash;&mdash;>>> <a href="https://byltly.com/2uKzhX">https://byltly.com/2uKzhX</a></b></p><br /><br />
7
- <h3>Mudbox 2013 features and benefits</h3>
8
- <p>Some of the features and benefits of using Mudbox 2013 are:</p>
9
- <ul>
10
- <li>It has a user-friendly interface that lets you work intuitively and efficiently.</li>
11
- <li>It has powerful sculpting tools that let you create organic shapes and fine details with ease.</li>
12
- <li>It has advanced painting tools that let you apply realistic textures and colors to your models.</li>
13
- <li>It has dynamic tessellation technology that automatically adds resolution to your models as you sculpt, without affecting performance.</li>
14
- <li>It has multi-touch support that lets you use gestures on touch-enabled devices to manipulate your models.</li>
15
- <li>It has interoperability with other Autodesk software that lets you exchange data seamlessly between applications.</li>
16
- <li>It has support for industry-standard formats such as OBJ, FBX, PSD, TIFF, JPEG, PNG, etc.</li>
17
- </ul>
18
- <h3>How to download and install Mudbox 2013</h3>
19
- <p>To download and install Mudbox 2013, you need to follow these steps:</p>
20
- <ol>
21
- <li>Visit the official website of Autodesk and sign up for an account if you don't have one already.</li>
22
- <li>Go to the product page of Mudbox 2013 and click on the "Download" button.</li>
23
- <li>Select your operating system (Windows or Mac) and your preferred language.</li>
24
- <li>Choose whether you want to download a free trial version or buy a full version of Mudbox 2013.</li>
25
- <li>If you choose to buy a full version, you will need to enter your payment details and confirm your order.</li>
26
- <li>If you choose to download a free trial version, you will need to enter your email address and agree to the terms of service.</li>
27
- <li>You will receive an email with a link to download the installer file of Mudbox 2013.</li>
28
- <li>Click on the link and save the installer file on your computer.</li>
29
- <li>Run the installer file and follow the instructions on the screen to complete the installation process.</li>
30
- <li>You will need to activate your product by entering your serial number and product key that you received in your email or order confirmation.</li>
31
- </ol>
32
- <h2>What is an encrypted file and why do you need a password?</h2>
33
- <p>An encrypted file is a file that has been converted into a secret code that can only be read by someone who knows the correct password. Encryption is a process of protecting data from unauthorized access or modification by using mathematical algorithms and keys. Encryption can be used for various purposes such as:</p>
34
- <ul>
35
- <li>Securing sensitive or confidential information such as personal data, financial transactions, medical records, etc.</li>
36
- <li>Preventing data theft or tampering by hackers, malware, viruses, etc.</li>
37
- <li>Compressing data to save storage space or bandwidth.</li>
38
- </ul>
39
- <p>A password is a secret word or phrase that is used to unlock an encrypted file. A password can be chosen by the person who encrypts the file or generated randomly by the encryption software. A password can be composed of letters, numbers, symbols, or any combination of them. A password can be simple or complex depending on its length and variety of characters. A simple password is easy to remember but also easy to guess or crack by others. A complex password is hard to remember but also hard to guess or crack by others.</p>
40
- <h3>What is encryption and how does it work?</h3>
41
- <p>Encryption is a process of transforming data into a secret code that can only be read by someone who knows the correct key. A key is a piece of information that is used to encrypt or decrypt data. A key can be symmetric or asymmetric depending on whether it is shared or not between the sender and receiver of data. A symmetric key is used for both encryption and decryption of data. An asymmetric key consists of two parts: a public key that is used for encryption of data and a private key that is used for decryption of data. The public key can be shared with anyone but the private key must be kept secret by its owner.</p>
42
- <p>How to unlock encrypted file setup Mudbox 2013 crack<br />
43
- Password recovery for encrypted file setup Mudbox 2013 crack<br />
44
- Decrypt file setup Mudbox 2013 crack without password<br />
45
- Download encrypted file setup Mudbox 2013 crack free<br />
46
- Encrypted file setup Mudbox 2013 crack keygen<br />
47
- Encrypted file setup Mudbox 2013 crack serial number<br />
48
- Encrypted file setup Mudbox 2013 crack activation code<br />
49
- Encrypted file setup Mudbox 2013 crack license key<br />
50
- Encrypted file setup Mudbox 2013 crack patch<br />
51
- Encrypted file setup Mudbox 2013 crack full version<br />
52
- Encrypted file setup Mudbox 2013 crack torrent<br />
53
- Encrypted file setup Mudbox 2013 crack rar<br />
54
- Encrypted file setup Mudbox 2013 crack zip<br />
55
- Encrypted file setup Mudbox 2013 crack iso<br />
56
- Encrypted file setup Mudbox 2013 crack exe<br />
57
- Encrypted file setup Mudbox 2013 crack dmg<br />
58
- Encrypted file setup Mudbox 2013 crack mac<br />
59
- Encrypted file setup Mudbox 2013 crack windows<br />
60
- Encrypted file setup Mudbox 2013 crack linux<br />
61
- Encrypted file setup Mudbox 2013 crack android<br />
62
- Encrypted file setup Mudbox 2013 crack ios<br />
63
- Encrypted file setup Mudbox 2013 crack online<br />
64
- Encrypted file setup Mudbox 2013 crack offline<br />
65
- Encrypted file setup Mudbox 2013 crack installer<br />
66
- Encrypted file setup Mudbox 2013 crack generator<br />
67
- Encrypted file setup Mudbox 2013 crack remover<br />
68
- Encrypted file setup Mudbox 2013 crack bypasser<br />
69
- Encrypted file setup Mudbox 2013 crack hacker<br />
70
- Encrypted file setup Mudbox 2013 crack breaker<br />
71
- Encrypted file setup Mudbox 2013 crack extractor<br />
72
- Encrypted file setup Mudbox 2013 crack opener<br />
73
- Encrypted file setup Mudbox 2013 crack viewer<br />
74
- Encrypted file setup Mudbox 2013 crack editor<br />
75
- Encrypted file setup Mudbox 2013 crack converter<br />
76
- Encrypted file setup Mudbox 2013 crack compressor<br />
77
- Encrypted file setup Mudbox 2013 crack decompressor<br />
78
- Encrypted file setup Mudbox 2013 crack splitter<br />
79
- Encrypted file setup Mudbox 2013 crack merger<br />
80
- Encrypted file setup Mudbox 2013 crack copier<br />
81
- Encrypted file setup Mudbox 2013 crack mover<br />
82
- Encrypted file setup Mudbox 2013 crack deleter<br />
83
- Encrypted file setup Mudbox 2013 crack renamer<br />
84
- Encrypted file setup Mudbox 2013 crack finder<br />
85
- Encrypted file setup Mudbox 2013 crack replacer<br />
86
- Encrypted file setup Mudbox 2013 crack checker<br />
87
- Encrypted file setup Mudbox 2013 crack verifier<br />
88
- Encrypted file setup Mudbox 2013 crack validator<br />
89
- Encrypted file setup Mudbox 2013 crack scanner<br />
90
- Encrypted file setup Mudbox 2013 crack cleaner<br />
91
- Encrypted file setup Mudbox 2013 crack fixer</p>
92
- <p>The basic steps of encryption are:</p>
93
- <ol>
94
- <li>The sender of data chooses an encryption algorithm (a set of rules) and a key (a piece of information) to encrypt data.</li>
95
- <li>The sender applies the encryption algorithm and the key to data (plaintext) to produce encrypted data (ciphertext).</li>
96
- <li>The sender sends the encrypted data (ciphertext) to the receiver of data through a communication channel (such as email, internet, etc.).</li>
97
- <li>The receiver of data uses the same encryption algorithm (or its inverse) and the same key (or its corresponding part) to decrypt data (ciphertext) back into its original form (plaintext).</li>
98
- </ol>
99
- <h3>How to encrypt and decrypt files with WinRAR</h3>
100
- <p>WinRAR is a software application that can create and extract compressed archive files such as RAR, ZIP, TAR, GZIP, etc. WinRAR can also encrypt and decrypt files with passwords using AES-256 encryption algorithm. AES-256 is one of the most secure encryption algorithms available today. To encrypt and decrypt files with WinRAR, you need to follow these steps:</p>
101
- <ol>
102
- <li>Download and install WinRAR from its official website if you don't have it already on your computer.</li>
103
- <li>Select one or more files that you want to encrypt or decrypt with WinRAR.</li>
104
- <li>Right-click on them and choose "Add to archive..." from the context menu.</li>
105
- <li>In the "Archive name" field, enter a name for your archive file (such as "Mudbox_2013_crack.rar").</li>
106
- <li>In the "Archive format" field, choose "RAR" as your archive format.</li>
107
- <li>In the "General" tab, click on "Set password..." button.</li>
108
- <li>In the "Enter password" field, enter a password that you want to use for encrypting or decrypting your files (such as "123456").</li>
109
- <li>In the "Reenter password" field, reenter your password for confirmation.</li>
110
- <li>In the "Encryption method" field, choose "AES-256" as your encryption method.</li>
111
- <li>If you want to encrypt both file names and contents in your archive file, check "Encrypt file names" option.</li>
112
- "Delete files after archiving" option.</li>
113
- <li>Click on "OK" button to create your archive file with encryption.</li>
114
- </ol>
115
- <p>To decrypt files with WinRAR, you need to follow these steps:</p>
116
- <ol>
117
- <li>Locate your archive file (such as "Mudbox_2013_crack.rar") on your computer.</li>
118
- <li>Double-click on it to open it with WinRAR.</li>
119
- <li>If your archive file is encrypted with a password, you will see a dialog box asking you to enter the password.</li>
120
- <li>In the "Enter password" field, enter the password that was used for encrypting your files (such as "123456").</li>
121
- <li>Click on "OK" button to access your files in the archive file.</li>
122
- <li>Select one or more files that you want to extract from the archive file.</li>
123
- <li>Click on "Extract to" button and choose a destination folder for your extracted files.</li>
124
- <li>Click on "OK" button to extract your files from the archive file with decryption.</li>
125
- </ol>
126
- <h2>How to find the password for the encrypted file setup Mudbox 2013 crack?</h2>
127
- <p>If you have downloaded an encrypted file setup Mudbox 2013 crack from an unknown source, you might not know the password for opening it. This can be a problem if you want to use the software for your 3D projects. Fortunately, there are some methods that can help you find the password for the encrypted file setup Mudbox 2013 crack. Here are two of them:</p>
128
- <h3>Method 1: Use a password recovery software</h3>
129
- <p>A password recovery software is a software application that can try to find the password for an encrypted file by using various techniques such as brute force, dictionary, mask, etc. A brute force technique tries all possible combinations of characters until it finds the correct password. A dictionary technique tries words from a predefined list of common passwords or words. A mask technique tries combinations of characters based on a pattern that you specify. A password recovery software can be fast or slow depending on the complexity of the password and the speed of your computer. To use a password recovery software to find the password for the encrypted file setup Mudbox 2013 crack, you need to follow these steps:</p>
130
- <h4>Step 1: Download and install RAR Password Refixer</h4>
131
- <p>RAR Password Refixer is one of the best password recovery software for RAR files. It can recover passwords for RAR files created by WinRAR and other RAR-compatible software. It supports all versions of RAR files and all types of encryption methods. It has four password attack types: Brute-force, Mask, Dictionary, and Smart. It has a user-friendly interface and a high recovery rate. You can download RAR Password Refixer from its official website: <a href="http://www.isumsoft.com/rar-password-refixer/">http://www.isumsoft.com/rar-password-refixer/</a>.</p>
132
- <h4>Step 2: Launch the software and import the encrypted file</h4>
133
- <p>After installing RAR Password Refixer on your computer, launch it and you will see its main interface. Click on the "Open" button and browse your computer to locate your encrypted file setup Mudbox 2013 crack (such as "Mudbox_2013_crack.rar"). Select it and click on "Open" button again to import it into the software.</p>
134
- <h4>Step 3: Choose a password attack type and set parameters</h4>
135
- <p>In the main interface of RAR Password Refixer, you will see four tabs: Brute-force, Mask, Dictionary, and Smart. Each tab represents a different password attack type that you can use to find the password for your encrypted file. You can choose one of them according to your situation and preference. For example, if you know nothing about the password, you can choose Brute-force attack type which will try all possible combinations of characters until it finds the correct password. If you know some information about the password such as its length, prefix, suffix, or character set, you can choose Mask attack type which will try combinations of characters based on a pattern that you specify. If you have a list of common passwords or words that might be used as passwords, you can choose Dictionary attack type which will try words from a predefined list or a custom list that you provide. If you want to use an intelligent algorithm that can analyze your encrypted file and find the most likely password, you can choose Smart attack type which will try passwords based on some rules and patterns.</p>
136
- the most likely password.</p>
137
- <h4>Step 4: Start the password recovery process and wait for the result</h4>
138
- <p>After setting the parameters for your chosen password attack type, click on the "Start" button to start the password recovery process. You will see a progress bar and some information such as current password, current speed, estimated time, etc. on the screen. You can pause or stop the process at any time by clicking on the "Pause" or "Stop" button. You can also save the process and resume it later by clicking on the "Save" or "Resume" button. The password recovery process can take from minutes to hours or even days depending on the complexity of the password and the speed of your computer. When the password is found, you will see a dialog box showing you the password and asking you to copy it or open your encrypted file with it. Click on "Copy" button to copy the password to your clipboard or click on "Open" button to open your encrypted file with WinRAR and enter the password.</p>
139
- <h3>Method 2: Use an online password cracker service</h3>
140
- <p>An online password cracker service is a website that can crack passwords for encrypted files by using cloud computing and distributed computing techniques. An online password cracker service can be fast or slow depending on the number of servers and users available at a given time. An online password cracker service can be free or paid depending on the features and limitations offered by the website. To use an online password cracker service to find the password for the encrypted file setup Mudbox 2013 crack, you need to follow these steps:</p>
141
- <h4>Step 1: Visit an online password cracker website</h4>
142
- <p>There are many online password cracker websites that can crack passwords for RAR files such as <a href="https://www.password-online.com/">https://www.password-online.com/</a>, <a href="https://www.crark.net/">https://www.crark.net/</a>, <a href="https://www.onlinehashcrack.com/rar-password-recovery-online.php">https://www.onlinehashcrack.com/rar-password-recovery-online.php</a>, etc. You can choose one of them according to your preference and availability. For example, we will use https://www.password-online.com/ as an example in this article.</p>
143
- <h4>Step 2: Upload the encrypted file and agree to the terms of service</h4>
144
- <p>After visiting https://www.password-online.com/, you will see its main interface. Click on the "Browse" button and browse your computer to locate your encrypted file setup Mudbox 2013 crack (such as "Mudbox_2013_crack.rar"). Select it and click on "Open" button to upload it to the website. You will see a message saying that your file has been uploaded successfully and asking you to agree to the terms of service. Read the terms of service carefully and check "I agree with terms of service" option if you agree with them. Click on "Next step" button to proceed.</p>
145
- <h4>Step 3: Wait for the password to be cracked and displayed</h4>
146
- <p>After agreeing to the terms of service, you will see a message saying that your file has been added to the queue and asking you to wait for your turn. You will also see some information such as file name, file size, encryption method, estimated time, etc. on the screen. You can refresh the page or check your email to see if your password has been cracked and displayed. The password cracking process can take from minutes to hours or even days depending on the complexity of the password and the number of servers and users available at a given time. When the password is cracked, you will see a message saying that your password has been found and displaying it on the screen. You can also receive an email with a link to download your decrypted file if you have provided your email address during the upload process.</p>
147
- <h2>Conclusion</h2>
148
- the password for the encrypted file setup Mudbox 2013 crack by using two methods: a password recovery software and an online password cracker service. We have also provided step-by-step instructions and screenshots for each method. We hope that this article has been helpful and informative for you. If you have any questions or comments, please feel free to leave them below.</p>
149
- <h2>FAQs</h2>
150
- <p>Here are some frequently asked questions and answers related to the topic of this article:</p>
151
- <h3>Q: Is it legal to crack passwords for encrypted files?</h3>
152
- <p>A: It depends on the situation and the jurisdiction. Generally speaking, it is legal to crack passwords for encrypted files that belong to you or that you have permission to access. However, it is illegal to crack passwords for encrypted files that belong to someone else or that you have no permission to access. It is also illegal to crack passwords for encrypted files that contain illegal or harmful content such as malware, viruses, child pornography, etc. Therefore, you should be careful and responsible when using password cracking methods and tools.</p>
153
- <h3>Q: Is it safe to use online password cracker services?</h3>
154
- <p>A: It depends on the website and the service. Some online password cracker services are reliable and trustworthy, while others are not. Some online password cracker services may steal your data, infect your computer with malware, or charge you hidden fees. Therefore, you should be careful and cautious when using online password cracker services. You should always read the terms of service, check the reviews and ratings, and use a secure connection when using online password cracker services.</p>
155
- <h3>Q: How can I prevent my files from being encrypted or cracked by others?</h3>
156
- <p>A: There are some measures that you can take to protect your files from being encrypted or cracked by others. Some of them are:</p>
157
- <ul>
158
- <li>Use strong passwords that are long, complex, and unique for your files. Avoid using common passwords or words that can be easily guessed or cracked by others.</li>
159
- <li>Use reliable encryption software that supports secure encryption algorithms and methods for your files. Avoid using outdated or weak encryption software that can be easily broken or bypassed by others.</li>
160
- <li>Backup your files regularly and store them in a safe place. Avoid losing your files or forgetting your passwords by keeping copies of your files and passwords in a secure location such as an external hard drive, a cloud storage service, or a password manager.</li>
161
- <li>Scan your computer and files regularly with antivirus software. Avoid getting infected by malware or viruses that can encrypt or damage your files by keeping your computer and files clean and updated with antivirus software.</li>
162
- </ul>
163
- <h3>Q: What are some other software applications that can create and extract encrypted archive files?</h3>
164
- <p>A: There are many other software applications that can create and extract encrypted archive files besides WinRAR. Some of them are:</p>
165
- <ul>
166
- <li>7-Zip: A free and open source software application that can create and extract compressed archive files such as 7z, ZIP, RAR, TAR, GZIP, etc. It can also encrypt and decrypt files with passwords using AES-256 encryption algorithm.</li>
167
- <li>PeaZip: A free and open source software application that can create and extract compressed archive files such as 7z, ZIP, RAR, TAR, GZIP, etc. It can also encrypt and decrypt files with passwords using various encryption algorithms such as AES-256, Blowfish, Serpent, Twofish, etc.</li>
168
- , etc. It can also encrypt and decrypt files with passwords using AES-256 encryption algorithm.</li>
169
- <li>WinAce: A commercial software application that can create and extract compressed archive files such as ACE, ZIP, RAR, TAR, GZIP, etc. It can also encrypt and decrypt files with passwords using various encryption algorithms such as AES-256, Blowfish, IDEA, etc.</li>
170
- </ul>
171
- <h3>Q: What are some other password recovery software applications that can crack passwords for encrypted files?</h3>
172
- <p>A: There are many other password recovery software applications that can crack passwords for encrypted files besides RAR Password Refixer. Some of them are:</p>
173
- <ul>
174
- <li>PassFab for RAR: A commercial software application that can crack passwords for RAR files created by WinRAR and other RAR-compatible software. It supports all versions of RAR files and all types of encryption methods. It has three password attack types: Brute-force, Brute-force with Mask, and Dictionary.</li>
175
- <li>KRyLack RAR Password Recovery: A commercial software application that can crack passwords for RAR files created by WinRAR and other RAR-compatible software. It supports all versions of RAR files and all types of encryption methods. It has three password attack types: Brute-force, Mask, and Dictionary.</li>
176
- <li>RAR Password Unlocker: A commercial software application that can crack passwords for RAR files created by WinRAR and other RAR-compatible software. It supports all versions of RAR files and all types of encryption methods. It has three password attack types: Brute-force, Brute-force with Mask, and Dictionary.</li>
177
- </ul>
178
- </p> 0a6ba089eb<br />
179
- <br />
180
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Breakaway Broadcast Processor Asio.0.90.95 39.md DELETED
@@ -1,40 +0,0 @@
1
- <h2>Breakaway broadcast processor asio.0.90.95 39</h2><br /><p><b><b>Download File</b> &#10003;&#10003;&#10003; <a href="https://imgfil.com/2uy0qy">https://imgfil.com/2uy0qy</a></b></p><br /><br />
2
-
3
- .51 KB
4
-
5
- 360 - English TV and Radio for Android by TV Player Ltd... Android Apps that run in the background. Type: Android, Category: Android Apps/Games, Size: 107 MB, Free Download
6
-
7
- Description: Download The Jump or Free Fall! for Android. If you like skiing, snowboarding or other extreme sports, this is the app for you! Set the best time in your life on your phone.
8
-
9
- Watch the world's best skiers and snowboarders in HD while you tap your screen to make it jump!
10
-
11
- Watch video and track your jumps in real time or challenge your friends to beat your time. Jump again and again, the more you jump the higher your score and the more points you can gain!
12
-
13
- The best part? You're jumping on your phone! Tapping your screen will make your phone jump, so if you want to take the picture or you just want to enjoy the view, you can still do it while you're in the air!
14
-
15
- How to play: - Touch your screen to jump- Tap the screen to jump higher- Tap to jump lower
16
-
17
- Features: - Watch videos and track your jumps- Live scores with a race with your friends and the world- Get behind your phone camera to take pictures while you are in the air- Compete against other players around the world
18
-
19
- Note: This game is free to play, but you can choose to purchase in-app items with real money. You can disable in-app purchases in your device's settings. For more info, see Terms of Use and Privacy Policy:
20
-
21
- Time by biggeelance.com for Android. Time for Android apk download free. In Time you're given the challenge of earning a million dollars in a day. Keep an eye on your resources to make sure that you can take on the 1,000 tasks given to you. Good luck.
22
-
23
- App name: Time for Android
24
-
25
- App size: 2.21 MB
26
-
27
- Developer: biggeelance.com
28
-
29
- Published: March 21, 2014
30
-
31
- Category: Games
32
-
33
- Version: 0.2.1
34
-
35
- Android Market link:
36
-
37
- Google Play link: 4fefd39f24<br />
38
- <br />
39
- <br />
40
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Dhoom 2 Hd Video Songs 1080p Torrent.md DELETED
@@ -1,19 +0,0 @@
1
-
2
- <h1>Dhoom 2: A Thrilling Heist Movie with Stunning Songs</h1>
3
- <p>Dhoom 2 is a 2006 Indian action thriller film directed by Sanjay Gadhvi and produced by Aditya Chopra and Yash Chopra. It is the second installment in the Dhoom series and a sequel to the 2004 film Dhoom. The film stars Hrithik Roshan, Abhishek Bachchan, Aishwarya Rai Bachchan, Uday Chopra and Bipasha Basu in the lead roles.</p>
4
- <p>The film follows the adventures of Mr. A (Roshan), a master thief who steals priceless artifacts from around the world and evades capture by the police. He teams up with Sunehri (Rai Bachchan), a beautiful but treacherous accomplice who has her own agenda. They are pursued by Jai Dixit (Bachchan), an honest cop who is determined to catch them, and his partner Ali (Chopra), a motorbike racer and comic relief.</p>
5
- <h2>Dhoom 2 Hd Video Songs 1080p Torrent</h2><br /><p><b><b>Download Zip</b> &rarr;&rarr;&rarr; <a href="https://imgfil.com/2uxZZF">https://imgfil.com/2uxZZF</a></b></p><br /><br />
6
- <p>Dhoom 2 was praised for its action sequences, cinematography, music and performances, especially by Roshan and Rai Bachchan. The film was a huge commercial success, becoming the highest-grossing Indian film of 2006 and one of the highest-grossing Bollywood films of all time. The film also won several awards, including five Filmfare Awards.</p>
7
- <p>One of the highlights of the film is its soundtrack, composed by Pritam, with lyrics by Sameer. The film features six songs that showcase the talents of singers like KK, Alisha Chinai, Sonu Nigam, Shreya Ghoshal and Vishal Dadlani. The songs are also accompanied by stunning visuals and choreography that enhance the appeal of the film.</p>
8
- <p>Some of the most popular songs from Dhoom 2 are:</p>
9
- <ul>
10
- <li>"Dhoom Again" - The title track of the film, sung by Vishal Dadlani and Dominique Cerejo, is a fast-paced rock song that captures the spirit of adventure and thrill of the film. The song features Roshan and Rai Bachchan dancing on top of a moving train in Brazil.</li>
11
- <li>"Crazy Kiya Re" - A seductive song sung by Sunidhi Chauhan, featuring Rai Bachchan as Sunehri who tries to woo Mr. A with her moves. The song is set in a nightclub in Rio de Janeiro.</li>
12
- <li>"Touch Me" - A romantic duet sung by KK and Alisha Chinai, featuring Roshan and Basu as Raj and Shonali, two undercover agents who fall in love while working on the case. The song is set in a beach resort in Goa.</li>
13
- <li>"Dil Laga Na" - A peppy song sung by Sukhbir, Soham Chakraborty, Jolly Mukherjee, Mahalaxmi Iyer and Suzanne D'Mello, featuring all the main characters celebrating their success after a heist. The song is set in a carnival in Rio de Janeiro.</li>
14
- <li>"My Name Is Ali" - A humorous song sung by Sonu Nigam and Bipasha Basu, featuring Chopra as Ali who tries to impress Basu as Monali, Shonali's twin sister. The song is set in a hotel room in Mumbai.</li>
15
- <li>"Dhoom Machale" - The theme song of the Dhoom series, sung by Naya (Tata Young in the original version), featuring clips from various scenes of the film. The song is played during the end credits of the film.</li>
16
- </ul>
17
- <p>If you are looking for a torrent to download Dhoom 2 HD video songs 1080p, you can try this link[^1^]. However, please note that downloading copyrighted content without permission is illegal and may result in legal consequences.</p> d5da3c52bf<br />
18
- <br />
19
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Elven Love Crack Full Version Download LINK.md DELETED
@@ -1,14 +0,0 @@
1
- <br />
2
- <h1>Elven Love: A Steam Game Review</h1>
3
- <p>Elven Love is a virtual reality game that lets you experience the ancient magic of the Wood and Moon Elves. You can explore their mystical and mysterious worlds, and witness their rare and sacred ritual of love. The game is designed for adults who want to immerse themselves in a fantasy adventure with beautiful graphics and sensual interactions.</p>
4
- <p>The game features two different races of elves, each with their own culture, history and personality. You can choose to play as a male or female human, and interact with the elves in various ways. You can talk to them, touch them, hug them, kiss them, and even make love to them. The game uses realistic physics and animations to create a lifelike experience.</p>
5
- <h2>Elven Love crack full version download</h2><br /><p><b><b>Download Zip</b> &rArr; <a href="https://imgfil.com/2uy0UR">https://imgfil.com/2uy0UR</a></b></p><br /><br />
6
- <p>Elven Love is available on Steam for $9.99 USD. You need a VR headset and controllers to play the game. The game supports HTC Vive, Oculus Rift and Windows Mixed Reality headsets. The game has positive reviews from users who praised its graphics, sound, gameplay and story. Some users also reported some bugs and glitches, but the developers are working on fixing them.</p>
7
- <p>If you are looking for a VR game that will transport you to a magical world of elves and romance, you might want to check out Elven Love on Steam[^1^]. It is a unique and captivating game that will make you feel the elven love on your skin!</p><p>The Wood Elves are the oldest race of elves, living in harmony with nature. They are skilled in archery, herbalism and magic. They are friendly and curious, but also cautious and secretive. They value freedom and peace, and avoid conflicts with other races. They have a deep bond with their forest home, and can communicate with animals and plants.</p>
8
- <p>The Moon Elves are the descendants of the Wood Elves who left their forest to explore the world. They are fascinated by the stars and the moon, and have developed a sophisticated astronomy and astrology. They are adept in illusion, enchantment and divination magic. They are elegant and refined, but also proud and arrogant. They seek knowledge and power, and often clash with other races. They have a strong connection with the lunar cycles, and can manipulate light and shadows.</p>
9
- <p>The game allows you to learn more about the elven lore and history through dialogues, books and objects. You can also discover hidden secrets and easter eggs in the game world. The game has multiple endings depending on your choices and actions. The game also has a sandbox mode where you can customize your appearance and the elven characters, and enjoy unlimited interactions with them.</p><p>Elven Love is a game that will appeal to fans of fantasy, romance and erotica. The game has stunning visuals and sound effects that create a realistic and immersive atmosphere. The game also has a captivating story and characters that will make you care about them and their fate. The game is not just about sex, but also about emotions and relationships.</p>
10
- <p>The game is not for everyone, however. The game contains explicit sexual content and nudity that some people might find offensive or uncomfortable. The game also requires a VR headset and controllers, which might be expensive or inaccessible for some players. The game also has some technical issues and bugs that might affect the performance and enjoyment of the game.</p>
11
- <p></p>
12
- <p>Overall, Elven Love is a game that offers a unique and memorable VR experience. It is a game that will make you feel the elven love on your skin!</p> d5da3c52bf<br />
13
- <br />
14
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cheto 8 Ball Pool Mod APK Play with AutoPlay Long Line and Prediction Features.md DELETED
@@ -1,127 +0,0 @@
1
-
2
- <h1>Cheto APK 8 Ball Pool: A Guide to the Best Aiming Tool for 8 Ball Pool</h1>
3
- <p>If you are a fan of 8 ball pool, you might have heard of Cheto APK 8 Ball Pool, a popular aiming tool that can help you improve your skills and win more games. But what is Cheto APK 8 Ball Pool exactly, and how can you use it effectively? In this article, we will answer these questions and more, so you can decide if Cheto APK 8 Ball Pool is right for you.</p>
4
- <h2>cheto apk 8 ball pool</h2><br /><p><b><b>Download</b> &#9989; <a href="https://urlin.us/2uT0vY">https://urlin.us/2uT0vY</a></b></p><br /><br />
5
- <h2>What is Cheto APK 8 Ball Pool?</h2>
6
- <h3>A brief introduction to the app and its features</h3>
7
- <p>Cheto APK 8 Ball Pool is an Android app that uses AI image recognition technology to display an extended line of guideline in real time while playing 8 ball pool. This feature allows you to aim more accurately and make more precise shots. Not only that, but Cheto APK 8 Ball Pool also supports advanced cushion shots, including bank shots and kick shots, so you can easily pocket the target ball into another pocket. Moreover, Cheto APK 8 Ball Pool also supports 3-lines guideline, which lets you play some fancy shots that can only be done by professional players. With these features, Cheto APK 8 Ball Pool can help you become a king of 8 ball pool in no time.</p>
8
- <h3>How to download and install Cheto APK 8 Ball Pool</h3>
9
- <p>To download and install Cheto APK 8 Ball Pool, you need to follow these steps:</p>
10
- <ol>
11
- <li>Go to [Cheto Aim Pool For 8 Bal Pool](^1^) or [Cheto Cheat for 8 Ball Pool](^2^) and click on the download button.</li>
12
- <li>Allow unknown sources on your device settings if prompted.</li>
13
- <li>Open the downloaded file and install it on your device.</li>
14
- <li>Launch the app and grant it the necessary permissions.</li>
15
- <li>Enjoy using Cheto APK 8 Ball Pool.</li>
16
- </ol>
17
- <h2>How to use Cheto APK 8 Ball Pool</h2>
18
- <h3>How to activate the auto-extend guideline feature</h3>
19
- <p>To activate the auto-extend guideline feature, you need to do the following:</p>
20
- <ol>
21
- <li>Open the app and tap on the settings icon.</li>
22
- <li>Toggle on the auto-extend guideline option.</li>
23
- <li>Go back to the main screen and tap on the play button.</li>
24
- <li>Select your preferred game mode and start playing.</li>
25
- <li>You will see a longer line of guideline on your screen that will help you aim better.</li>
26
- </ol>
27
- <h3>How to use the cushion shots and 3-lines features</h3>
28
- <p>To use the cushion shots and 3-lines features, you need to do the following:</p>
29
- <ol>
30
- <li>Open the app and tap on the settings icon.</li>
31
- <li>Toggle on the cushion shots or 3-lines option depending on what you want to use.</li>
32
- <li>Go back to the main screen and tap on the play button.</li>
33
- <li>Select your preferred game mode and start playing.</li>
34
- <li>You will see a cushion shot or 3-lines icon on your screen that will help you make advanced shots.</li>
35
- </ol>
36
- <h3>Tips and tricks for using Cheto APK 8 Ball Pool effectively</h3>
37
- <p>Here are some tips and tricks for using Cheto APK 8 Ball Pool effectively:</p>
38
- <ul>
39
- <li>Practice using the app in offline mode or with friends before playing in online tournaments or matches.</li>
40
- <li>Don't rely too much on the app and use your own judgment and skills as well.</li>
41
- <li>Be careful not to use the app too obviously or excessively, as it might raise suspicion from other players or moderators.</li>
42
- <li>Keep the app updated to avoid any bugs or glitches.</li>
43
- <li>Have fun and enjoy playing 8 ball pool with Cheto APK 8 Ball Pool.</li>
44
- </ul>
45
- <h2>Pros and cons of Cheto APK 8 Ball Pool</h2>
46
- <h3>The benefits of using Cheto APK 8 Ball Pool</h3>
47
- <p>Some of the benefits of using Cheto APK 8 Ball Pool are:</p>
48
- <ul>
49
- <li>It can help you improve your aiming and shooting skills in 8 ball pool.</li>
50
- <li>It can help you win more games and earn more coins and rewards.</li>
51
- <li>It can help you learn some advanced shots and techniques that can impress your opponents and friends.</li>
52
- <li>It can make 8 ball pool more fun and exciting for you.</li>
53
- </ul>
54
- <h3>The drawbacks and risks of using Cheto APK 8 Ball Pool</h3>
55
- <p>Some of the drawbacks and risks of using Cheto APK 8 Ball Pool are:</p>
56
- <p>cheto hack 8 ball pool pc download<br />
57
- how to install cheto hack 8 ball pool pc<br />
58
- cheto pc 8 ball pool free<br />
59
- cheto 8 ball pool mod apk<br />
60
- cheto 8 ball pool autoplay<br />
61
- cheto 8 ball pool prediction<br />
62
- cheto 8 ball pool gameloop<br />
63
- cheto 8 ball pool latest version<br />
64
- cheto 8 ball pool coins hack<br />
65
- cheto 8 ball pool long line hack<br />
66
- cheto 8 ball pool tips and tricks<br />
67
- cheto 8 ball pool linkvertise<br />
68
- cheto 8 ball pool youtube<br />
69
- cheto 8 ball pool tutorial<br />
70
- cheto 8 ball pool fix problem solve<br />
71
- cheto 8 ball pool update<br />
72
- cheto 8 ball pool cheat engine<br />
73
- cheto 8 ball pool unlimited money<br />
74
- cheto 8 ball pool online generator<br />
75
- cheto 8 ball pool no root<br />
76
- cheto 8 ball pool no ban<br />
77
- cheto 8 ball pool for android<br />
78
- cheto 8 ball pool for ios<br />
79
- cheto 8 ball pool for mac<br />
80
- cheto 8 ball pool for windows<br />
81
- cheto 8 ball pool reddit<br />
82
- cheto 8 ball pool review<br />
83
- cheto 8 ball pool forum<br />
84
- cheto 8 ball pool discord<br />
85
- cheto 8 ball pool telegram<br />
86
- cheto 8 ball pool support<br />
87
- cheto 8 ball pool customer service<br />
88
- cheto 8 ball pool refund policy<br />
89
- cheto 8 ball pool license key<br />
90
- cheto 8 ball pool activation code<br />
91
- cheto 8 ball pool crack file<br />
92
- cheto 8 ball pool obb file<br />
93
- cheto 8 ball pool data file<br />
94
- cheto 8 ball pool zip file download<br />
95
- cheto 8 ball pool rar file download<br />
96
- how to use free cheto 8 ball pool <br />
97
- how to get free coins with cheto 8 ball pool <br />
98
- how to win every game with cheto 8 ball pool <br />
99
- how to play with friends using cheto 8 ball pool <br />
100
- how to customize settings in cheto 8 ball pool <br />
101
- how to uninstall cheto hack from 8 ball pool <br />
102
- how to report bugs in cheto hack for 8 ball pool <br />
103
- how to contact developers of cheto hack for 8 ball pool <br />
104
- how to join beta testing of cheto hack for 8 ball pool <br />
105
- how to buy premium subscription of cheto hack for 8 ball pool </p>
106
- <ul>
107
- <li>It can make 8 ball pool less challenging and satisfying for you.</li>
108
- <li>It can make you dependent on the app and lose your own skills and confidence.</li>
109
- <li>It can get you banned or reported by other players or moderators if they detect that you are using the app.</li>
110
- <li>It can expose your device to malware or viruses if you download it from untrusted sources.</li>
111
- </ul>
112
- <h2>Conclusion</h2>
113
- <h3>A summary of the main points and a call to action</h3>
114
- <p>In conclusion, Cheto APK 8 Ball Pool is an aiming tool that can help you play 8 ball pool better. It has features such as auto-extend guideline, cushion shots, and 3-lines that can help you aim more accurately and make more precise shots. However, it also has some drawbacks and risks, such as making 8 ball pool less challenging, making you dependent on the app, getting you banned or reported, and exposing your device to malware or viruses. Therefore, you should use Cheto APK 8 Ball Pool wisely and responsibly, and not abuse it or rely on it too much. If you want to try Cheto APK 8 Ball Pool, you can download it from [Cheto Aim Pool For 8 Bal Pool] or [Cheto Cheat for 8 Ball Pool] and follow the instructions in this article. Have fun and enjoy playing 8 ball pool with Cheto APK 8 Ball Pool!</p>
115
- <h2>FAQs</h2>
116
- <h4>Is Cheto APK 8 Ball Pool legal?</h4>
117
- <p>No, Cheto APK 8 Ball Pool is not legal. It is a third-party app that violates the terms of service of 8 ball pool. Using it can get you banned or reported by other players or moderators. Use it at your own risk.</p>
118
- <h4>Is Cheto APK 8 Ball Pool safe?</h4>
119
- <p>Not necessarily. Cheto APK 8 Ball Pool is not available on the official Google Play Store, so you have to download it from other sources that may not be trustworthy. Downloading it from untrusted sources can expose your device to malware or viruses that can harm your device or steal your personal information. Be careful where you download it from.</p>
120
- <h4>Does Cheto APK 8 Ball Pool work on iOS devices?</h4>
121
- <p>No, Cheto APK 8 Ball Pool only works on Android devices. It is not compatible with iOS devices. If you have an iOS device, you will not be able to use Cheto APK 8 Ball Pool.</p>
122
- <h4>How much does Cheto APK 8 Ball Pool cost?</h4>
123
- <p>Cheto APK 8 Ball Pool is free to download and use. However, it may have some ads or in-app purchases that may require you to pay some money. You can choose whether to pay for them or not.</p>
124
- <h4>Where can I get more information about Cheto APK 8 Ball Pool?</h <p>4?</p>
125
- <p>You can get more information about Cheto APK 8 Ball Pool from the official website of the app, [Cheto Aim Pool For 8 Bal Pool] or [Cheto Cheat for 8 Ball Pool]. You can also check out some reviews, videos, or forums about the app online. However, be careful not to trust everything you read or watch, as some of them may be biased or misleading.</p> 197e85843d<br />
126
- <br />
127
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Brawl Stars APKure and Enjoy Epic 3v3 Battles.md DELETED
@@ -1,137 +0,0 @@
1
-
2
- <h1>Brawl Stars Apkure: How to Download and Play the Popular Mobile Game</h1>
3
- <p>If you are looking for a fast-paced, action-packed, and fun multiplayer game for your mobile device, you might want to check out Brawl Stars. Brawl Stars is a game developed by Supercell, the same company behind the hit games Clash of Clans and Clash Royale. In this game, you can choose from a variety of unique characters, called Brawlers, and compete in different game modes with players from around the world. You can also customize your Brawlers with skins, gadgets, and star powers, and join or create a club with your friends.</p>
4
- <h2>brawl stars apkure</h2><br /><p><b><b>DOWNLOAD</b> &#10040; <a href="https://urlin.us/2uSZ1P">https://urlin.us/2uSZ1P</a></b></p><br /><br />
5
- <p>But what if you can't access Brawl Stars from the official app store of your device? Maybe you live in a country where the game is not available, or maybe you want to try a different version of the game that has some features that are not in the official release. In that case, you might want to use apkure, a website that lets you download apk files of various apps and games for free. Apk files are the installation files for Android apps, and they can be used to install apps that are not in the Google Play Store.</p>
6
- <p>In this article, we will show you how to download Brawl Stars from apkure, and how to play the game after downloading it. We will also discuss the pros and cons of using apkure, and give you some tips and warnings for using it safely and legally. Let's get started!</p>
7
- <h2>How to Download Brawl Stars from Apkure</h2>
8
- <p>Downloading Brawl Stars from apkure is not very difficult, but you need to follow some steps carefully. Here is what you need to do:</p>
9
- <p>brawl stars apkure download<br />
10
- brawl stars apkure mod<br />
11
- brawl stars apkure hack<br />
12
- brawl stars apkure latest version<br />
13
- brawl stars apkure update<br />
14
- brawl stars apkure free gems<br />
15
- brawl stars apkure online<br />
16
- brawl stars apkure private server<br />
17
- brawl stars apkure unlimited money<br />
18
- brawl stars apkure no verification<br />
19
- brawl stars apkure android<br />
20
- brawl stars apkure ios<br />
21
- brawl stars apkure pc<br />
22
- brawl stars apkure windows<br />
23
- brawl stars apkure mac<br />
24
- brawl stars apkure chromebook<br />
25
- brawl stars apkure emulator<br />
26
- brawl stars apkure bluestacks<br />
27
- brawl stars apkure nox<br />
28
- brawl stars apkure ldplayer<br />
29
- brawl stars apkure gameplay<br />
30
- brawl stars apkure review<br />
31
- brawl stars apkure tips<br />
32
- brawl stars apkure tricks<br />
33
- brawl stars apkure guide<br />
34
- brawl stars apkure cheats<br />
35
- brawl stars apkure codes<br />
36
- brawl stars apkure redeem code<br />
37
- brawl stars apkure brawlers<br />
38
- brawl stars apkure skins<br />
39
- brawl stars apkure events<br />
40
- brawl stars apkure modes<br />
41
- brawl stars apkure maps<br />
42
- brawl stars apkure clubs<br />
43
- brawl stars apkure trophies<br />
44
- brawl stars apkure ranks<br />
45
- brawl stars apkure season pass<br />
46
- brawl stars apkure star points<br />
47
- brawl stars apkure power points<br />
48
- brawl stars apkure power league<br />
49
- brawl stars apkure championship challenge<br />
50
- brawl stars apkure esports<br />
51
- brawl stars apkure news<br />
52
- brawl stars apkure reddit<br />
53
- brawl stars apkure discord<br />
54
- brawl stars apkure youtube<br />
55
- brawl stars apkure twitch<br />
56
- brawl stars apkure facebook<br />
57
- brawl stars apkure twitter</p>
58
- <ol>
59
- <li>Go to <a href="(^1^)">apkure.com</a> on your browser. You can use any browser, but we recommend using Chrome or Firefox for better compatibility.</li>
60
- <li>On the homepage, type "Brawl Stars" in the search box and tap on the magnifying glass icon.</li>
61
- <li>You will see a list of results related to Brawl Stars. Look for the one that has the latest version number and the most downloads. Tap on it.</li>
62
- <li>You will be taken to a page that has more information about the app, such as its description, screenshots, ratings, reviews, etc. Scroll down until you see a green button that says "Download APK". Tap on it.</li>
63
- <li>A pop-up window will appear asking you to confirm your download. Tap on "OK". The download will start automatically.</li>
64
- <li>Once the download is complete, you will see a notification on your device. Tap on it to open the file.</li>
65
- <li>You will be asked to allow the installation of apps from unknown sources. This is necessary because apkure is not an official app store. Tap on "Settings" and enable the option that allows installing apps from unknown sources. You may need to enter your device's password or PIN if prompted.</li>
66
- <li>Go back to the file and tap on "Install". The installation will begin.</li>
67
- <li>Wait for a few seconds until the installation is finished. You will see a message that says "App installed". Tap on "Open" to launch Brawl Stars.</li>
68
- </ol>
69
- <h3>Pros and Cons of Using Apkure</h3>
70
- <p>Using apkure has some advantages and disadvantages that you should be aware of before downloading any app or game from it. Here are some of them:</p>
71
- <table>
72
- <tr><th>Pros</th><th>Cons</th></tr>
73
- <tr><td>- You <p>Here are some of the pros and cons of using apkure to download Brawl Stars:</p>
74
- <table>
75
- <tr><th>Pros</th><th>Cons</th></tr>
76
- <tr><td>- You can access apps and games that are not available in your region or device.</td><td>- You may encounter compatibility issues or bugs that affect the performance of the app or game.</td></tr>
77
- <tr><td>- You can try different versions of the app or game and enjoy features that are not in the official release.</td><td>- You may miss out on updates and security patches that are released by the official app store.</td></tr>
78
- <tr><td>- You can download apps and games for free without spending any money.</td><td>- You may violate the terms of service of the app or game developer and risk getting banned or suspended.</td></tr>
79
- </table>
80
- <h3>Tips and Warnings for Using Apkure Safely and Legally</h3>
81
- <p>While apkure can be a useful source of apps and games, it also comes with some risks and responsibilities. Here are some tips and warnings to help you use apkure safely and legally:</p>
82
- <ul>
83
- <li>Always check the ratings, reviews, and comments of the app or game before downloading it. Look for any signs of malware, viruses, or scams that could harm your device or account.</li>
84
- <li>Always scan the downloaded file with a reliable antivirus software before installing it. This will help you detect and remove any potential threats that could compromise your security or privacy.</li>
85
- <li>Always backup your data before installing any app or game from apkure. This will help you restore your device to its previous state in case something goes wrong during the installation or afterwards.</li>
86
- <li>Always respect the intellectual property rights of the app or game developer. Do not use apkure to download pirated, cracked, or modded apps or games that infringe on the developer's rights.</li>
87
- <li>Always follow the rules and guidelines of the app or game you are playing. Do not use apkure to gain an unfair advantage over other players or cheat in any way.</li>
88
- </ul>
89
- <h2>How to Play Brawl Stars After Downloading from Apkure</h2>
90
- <p>Now that you have downloaded Brawl Stars from apkure, you are ready to play the game and have some fun. Here are some tips on how to play Brawl Stars after downloading it from apkure:</p>
91
- <h3>How to Install and Launch the Game</h3>
92
- <p>If you have followed the steps above, you should have already installed Brawl Stars on your device. To launch the game, simply tap on its icon on your home screen or app drawer. You will see a loading screen with the Brawl Stars logo and some tips. Wait for a few seconds until the game loads completely.</p>
93
- <p>The first time you launch the game, you will be asked to choose a name for your profile. You can enter any name you like, as long as it is not offensive or inappropriate. You can also change your name later in the settings menu. After choosing a name, you will be taken to the tutorial mode, where you will learn the basics of the game.</p>
94
- <p>The tutorial mode will teach you how to move, shoot, aim, use your super ability, and collect power cubes. You will also learn about the different game modes, such as Showdown, Gem Grab, Brawl Ball, Bounty, Heist, Siege, Hot Zone, and Duo Showdown. You will also learn about events, quests, trophies, tokens, gems, coins, power points, star points, boxes, brawlers, skins, gadgets, star powers, clubs, friends, chat, leaderboard, shop, news, settings, and support. The tutorial mode will end when you complete your first Showdown match.</p>
95
- <h3>How to Choose and Unlock New Characters (Brawlers)</h3>
96
- <p>Brawl Stars has 49 characters that you can choose from, each with their own unique abilities and stats. These characters are called Brawlers, and they are divided into seven rarities: Common (8), Rare (6), Super Rare (6), Epic (9), Mythic (7), Legendary (7), and Chromatic (6). The higher the rarity of a Brawler, the harder it is to unlock them.</p>
97
- <p>You can unlock new Brawlers by opening boxes that you get from playing matches or completing quests. Boxes can contain coins, power points, gems, star points, gadgets, star powers, or new Brawlers. The chances of getting a new Brawler depend on their rarity and your luck. You can also buy new Brawlers from the shop using gems or star points when they are available.</p>
98
- <p>You can choose which Brawler you want to play with before starting a match. You can see all your unlocked B <p>You can choose which Brawler you want to play with before starting a match. You can see all your unlocked Brawlers in the Brawlers menu, where you can also upgrade them with power points and coins, equip them with gadgets and star powers, and change their skins. You can also see the stats, abilities, and tips for each Brawler by tapping on their portrait.</p>
99
- <p>Some Brawlers are better suited for certain game modes than others, so you should choose wisely depending on the situation. You can also switch your Brawler between matches if you want to try something different. You can also use the random button to let the game choose a Brawler for you.</p>
100
- <h3>How to Join or Create a Club and Play with Friends</h3>
101
- <p>Brawl Stars is more fun when you play with your friends or other players who share your interests and goals. You can join or create a club to chat, play, and compete with other club members. Clubs are like clans or guilds in other games, and they can have up to 100 members.</p>
102
- <p>To join or create a club, you need to go to the social menu and tap on the club tab. There, you will see a list of recommended clubs that you can join, or you can search for a specific club by name or tag. You can also create your own club by tapping on the create button and choosing a name, description, badge, type, trophy requirement, and region for your club. You can also invite your friends to join your club by sending them a link or a code.</p>
103
- <p>Once you are in a club, you can chat with other club members, see their profiles and stats, send them friend requests, invite them to play with you, or challenge them to friendly matches. You can also participate in club events and leagues, where you can earn rewards and fame for your club. You can also leave or switch clubs at any time if you want to.</p>
104
- <h3>How to Earn Trophies, Tokens, Gems, and Other Rewards</h3>
105
- <p>Brawl Stars has a lot of rewards that you can earn by playing the game and completing various tasks. Here are some of the main rewards that you can get:</p>
106
- <ul>
107
- <li>Trophies: Trophies are the main measure of your progress and skill in the game. You can earn trophies by winning matches in any game mode, or lose trophies by losing matches. The more trophies you have, the higher your rank and league will be. You can also unlock new game modes, events, and rewards by reaching certain trophy milestones.</li>
108
- <li>Tokens: Tokens are the main currency that you can use to open boxes. You can earn tokens by playing matches in any game mode, completing quests, ranking up your Brawlers, or watching ads. You can also get tokens from boxes or from the shop. You can use tokens to open brawl boxes, big boxes, or mega boxes, which contain coins, power points, gems, star points, gadgets, star powers, or new Brawlers.</li>
109
- <li>Gems: Gems are the premium currency that you can use to buy special offers, skins, boxes, coins, power points, star points, or brawl passes from the shop. You can get gems from boxes or from the shop using real money. You can also get gems from the brawl pass or from special events.</li>
110
- <li>Star Points: Star Points are a special currency that you can use to buy exclusive skins, boxes, or power points from the shop. You can get star points by ranking up your Brawlers to rank 10 or higher, reaching certain trophy milestones with your Brawlers, or completing certain quests.</li>
111
- <li>Brawl Pass: The brawl pass is a seasonal feature that gives you access to exclusive rewards and perks. You can get the brawl pass by spending gems or by completing certain quests. The brawl pass has two tracks: a free track and a premium track. The free track contains rewards that anyone can get by earning brawl pass tokens. The premium track contains more rewards that only brawl pass holders can get by earning brawl pass tokens. The brawl pass also gives you access to special quests and events that give you more rewards.</li>
112
- </ul>
113
- <h2>Conclusion</h2>
114
- <p>Brawl Stars is a fun and exciting game that you can play on your mobile device with players from around the world. You can choose from different characters with unique abilities and compete in various game modes with different objectives and rules. You can also customize your characters with skins, <p>Brawl Stars is a fun and exciting game that you can play on your mobile device with players from around the world. You can choose from different characters with unique abilities and compete in various game modes with different objectives and rules. You can also customize your characters with skins, gadgets, and star powers, and join or create a club with your friends.</p>
115
- <p>If you want to download Brawl Stars from apkure, you need to follow some steps carefully and be aware of the pros and cons of using this website. You also need to follow some tips and warnings to use apkure safely and legally. After downloading Brawl Stars from apkure, you can install and launch the game, choose and unlock new characters, join or create a club, and earn trophies, tokens, gems, and other rewards.</p>
116
- <p>We hope this article has helped you learn more about Brawl Stars and apkure. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you. And if you enjoyed this article, please share it with your friends who might be interested in Brawl Stars or apkure. Thank you for reading!</p>
117
- <h2>FAQs</h2>
118
- <p>Here are some frequently asked questions about Brawl Stars and apkure:</p>
119
- <h3>Is Brawl Stars free to play?</h3>
120
- <p>Yes, Brawl Stars is free to play. You can download and play the game without spending any money. However, the game also offers some optional in-app purchases that can enhance your gaming experience. You can buy gems, coins, power points, star points, brawl passes, skins, boxes, or special offers using real money. These purchases are not required to play the game, but they can help you unlock new content faster or customize your characters more.</p>
121
- <h3>Is apkure safe to use?</h3>
122
- <p>Apkure is not an official app store, so it does not have the same level of security and quality control as the Google Play Store or the Apple App Store. This means that there is a risk of downloading apps or games that contain malware, viruses, or scams that could harm your device or account. Therefore, you should always be careful when using apkure and check the ratings, reviews, and comments of the app or game before downloading it. You should also scan the downloaded file with a reliable antivirus software before installing it. And you should always backup your data before installing any app or game from apkure.</p>
123
- <h3>Is apkure legal to use?</h3>
124
- <p>Apkure is not illegal to use per se, but it may violate the terms of service of some app or game developers. Some developers may not allow their apps or games to be distributed outside of the official app stores or in certain regions or countries. Some developers may also not allow their apps or games to be modified or hacked in any way. By using apkure, you may be infringing on the intellectual property rights of the developers and risk getting banned or suspended from their services. Therefore, you should always respect the rights of the developers and follow the rules and guidelines of the app or game you are playing.</p>
125
- <h3>How do I update Brawl Stars after downloading it from apkure?</h3>
126
- <p>If you download Brawl Stars from apkure, you will not receive automatic updates from the official app store. This means that you may miss out on new features, bug fixes, security patches, or events that are released by Supercell. To update Brawl Stars after downloading it from apkure, you need to visit apkure again and look for the latest version of the game. Then you need to download and install it over the existing one. However, this may cause some issues with your data or account compatibility, so you should always backup your data before updating.</p>
127
- <h3>How do I uninstall Brawl Stars after downloading it from apkure?</h3>
128
- <p>If you want to uninstall Brawl Stars after downloading it from apkure, you can do so by following these steps:</p>
129
- <ol>
130
- <li>Go to your device's settings menu and tap on apps or applications.</li>
131
- <li>Look for Brawl Stars in the list of installed apps and tap on it.</li>
132
- <li>Tap on uninstall and confirm your action.</li>
133
- <li>Brawl Stars will be removed from your device.</li>
134
- </ol>
135
- <p>You can also delete the downloaded file from your device's storage if you want to free up some space.</p> 197e85843d<br />
136
- <br />
137
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Dream Home Design and Makeover How to Unlock All Features with MOD APK.md DELETED
@@ -1,94 +0,0 @@
1
-
2
- <h1>Dream Home Design and Makeover Mod Apk: A Guide for Home Lovers</h1>
3
- <p>Do you love designing, decorating, and renovating homes? Do you want to unleash your creativity and create your own dream home? If yes, then you should try Dream Home Design and Makeover Mod Apk, a fun and addictive game that lets you transform ordinary houses into stunning masterpieces. In this game, you can choose from various projects, clients, styles, and items to create your own unique home design. You can also play puzzles to earn coins and gems that you can use to buy more items and unlock new features. With Dream Home Design and Makeover Mod Apk, you can enjoy unlimited money and gems, no ads, no root, high-quality graphics, and sound effects. In this article, we will show you how to download, install, play, and enjoy Dream Home Design and Makeover Mod Apk. We will also share some features, tips, tricks, pros, and cons of this game. So, if you are ready to become a home designer, read on!</p>
4
- <h2>dream home design and makeover mod apk</h2><br /><p><b><b>Download Zip</b> &#10027;&#10027;&#10027; <a href="https://jinyurl.com/2uNSJM">https://jinyurl.com/2uNSJM</a></b></p><br /><br />
5
- <h2>How to Download and Install Dream Home Design and Makeover Mod Apk</h2>
6
- <p>Downloading and installing Dream Home Design and Makeover Mod Apk is very easy. Just follow these simple steps:</p>
7
- <ol>
8
- <li>Find a reliable source for the mod apk file. You can search on Google or use the link below. Make sure that the file is safe, updated, and compatible with your device.</li>
9
- <li>Enable unknown sources on your device. To do this, go to Settings > Security > Unknown Sources. This will allow you to install apps from sources other than the Google Play Store.</li>
10
- <li>Download and install the mod apk file. Once you have found the file, tap on it to start the download. After the download is complete, open the file manager app on your device and locate the file. Tap on it again to start the installation. Follow the instructions on the screen to complete the installation.</li>
11
- </ol>
12
- <p>Congratulations! You have successfully installed Dream Home Design and Makeover Mod Apk on your device. Now you can start playing the game.</p>
13
- <h2>How to Play Dream Home Design and Makeover Mod Apk</h2>
14
- <p>Playing Dream Home Design and Makeover Mod Apk is very simple. Just follow these steps:</p>
15
- <p>* dream home makeover unlimited money mod apk<br />
16
- * home design and renovation mod apk download<br />
17
- * dream house design game mod apk free<br />
18
- * home makeover and interior design mod apk<br />
19
- * dream home designer mod apk latest version<br />
20
- * home design makeover hack mod apk android<br />
21
- * dream home renovation game mod apk offline<br />
22
- * home design and decor mod apk unlimited gems<br />
23
- * dream house designer game mod apk online<br />
24
- * home makeover and decoration mod apk 2023<br />
25
- * dream home design game mod apk no ads<br />
26
- * home design makeover cheats mod apk ios<br />
27
- * dream house design and plan mod apk 3d<br />
28
- * home makeover and furniture design mod apk<br />
29
- * dream home designer game mod apk 2022<br />
30
- * home design makeover modded apk unlimited lives<br />
31
- * dream house design simulator mod apk pro<br />
32
- * home makeover and remodeling mod apk premium<br />
33
- * dream home design challenge mod apk full<br />
34
- * home design makeover cracked mod apk 2021<br />
35
- * dream house designer app mod apk unlocked<br />
36
- * home makeover and landscaping mod apk vip<br />
37
- * dream home design story mod apk new<br />
38
- * home design makeover patched mod apk 2020<br />
39
- * dream house designer hack mod apk 2019<br />
40
- * home makeover and gardening mod apk 2018<br />
41
- * dream home design studio mod apk old<br />
42
- * home design makeover updated mod apk 2017<br />
43
- * dream house designer cheat mod apk 2016<br />
44
- * home makeover and cleaning mod apk 2015<br />
45
- * dream home design ideas mod apk 2014<br />
46
- * home design makeover original mod apk 2013<br />
47
- * dream house designer tips mod apk 2012<br />
48
- * home makeover and painting mod apk 2011<br />
49
- * dream home design trends mod apk 2010<br />
50
- * home design makeover classic mod apk 2009<br />
51
- * dream house designer tricks mod apk 2008<br />
52
- * home makeover and flooring mod apk 2007<br />
53
- * dream home design styles mod apk 2006<br />
54
- * home design makeover modern mod apk 2005<br />
55
- * dream house designer guide mod apk 2004<br />
56
- * home makeover and lighting mod apk 2003<br />
57
- * dream home design themes mod apk 2002<br />
58
- * home design makeover vintage mod apk 2001<br />
59
- * dream house designer review mod apk 2000</p>
60
- <ol>
61
- <li>Choose a project and a client. When you open the game, you will see a map with different projects that you can choose from. Each project has a different client with different preferences, budget, style, etc. Tap and cons that you should be aware of. Here are some of the pros and cons of this game:</p>
62
- <table>
63
- <tr>
64
- <th>Pros</th>
65
- <th>Cons</th>
66
- </tr>
67
- <tr>
68
- <td><b>Fun and relaxing gameplay</b>. This game is fun and relaxing to play. You can enjoy designing, decorating, and renovating homes at your own pace. You can also play puzzles to challenge your brain and earn rewards.</td>
69
- <td><b>Requires internet connection and storage space</b>. This game requires internet connection to download, install, and play. It also requires storage space on your device to store the mod apk file and the game data. If you have a slow or unstable internet connection or a low storage space, you may experience some problems with this game.</td>
70
- </tr>
71
- <tr>
72
- <td><b>Creative and customizable design options</b>. This game offers creative and customizable design options that let you create your own dream home. You can choose from various projects, clients, styles, items, colors, etc. You can also preview your design and change it as you wish.</td>
73
- <td><b>May not be compatible with some devices or updates</b>. This game may not be compatible with some devices or updates. The mod apk file may not work properly on some devices or may cause some errors or crashes. The game may also not be updated regularly or may not support the latest features or updates of the original version.</td>
74
- </tr>
75
- <tr>
76
- <td><b>Free and easy to use mod apk</b>. This game is free and easy to use with the mod apk. You can enjoy unlimited money and gems, no ads, no root, high-quality graphics, and sound effects. You can also download and install the mod apk easily with the steps provided in this article.</td>
77
- <td><b>May have some bugs or glitches</b>. This game may have some bugs or glitches that affect the gameplay or the design. For example, some items may not appear correctly or some puzzles may not load properly. These bugs or glitches may be caused by the mod apk file or by the game itself.</td>
78
- </tr>
79
- </table>
80
- <p>These are just some of the pros and cons of Dream Home Design and Makeover Mod Apk. You may have your own opinions or experiences with this game. You can share them in the comments section below.</p>
81
- <h2>Conclusion</h2>
82
- <p>Dream Home Design and Makeover Mod Apk is a great game for home lovers who want to design their own dream home. It is fun, relaxing, creative, and customizable. It also has many features that make it more enjoyable than the original version. However, it also has some drawbacks that you should be aware of before playing it. In this article, we have shown you how to download, install, play, and enjoy Dream Home Design and Makeover Mod Apk. We have also shared some features, tips, tricks, pros, and cons of this game. We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave them in the comments section below. Thank you for reading!</p>
83
- <h3>FAQs</h3>
84
- <p>Here are some frequently asked questions about Dream Home Design and Makeover Mod Apk:</p>
85
- <ol>
86
- <li><b>What is Dream Home Design and Makeover Mod Apk?</b><br>Dream Home Design and Makeover Mod Apk is a modified version of Dream Home Design and Makeover, a fun and addictive game that lets you design, decorate, and renovate homes. With this mod apk, you can enjoy unlimited money and gems, no ads, no root, high-quality graphics, and sound effects.</li>
87
- <li><b>How to download Dream Home Design and Makeover Mod Apk?</b><br>To download Dream Home Design and Makeover Mod Apk, you need to find a reliable source for the mod apk file. You can search on Google or use the link below. Then, you need to enable unknown sources on your device, download and install the mod apk file.</li>
88
- <li><b>How to play Dream Home Design and Makeover Mod Apk?</b><br>To play Dream Home Design and Makeover Mod Apk, you need to choose a project and a client, design your dream home with various tools and options, complete puzzles to earn coins and gems.</li>
89
- <li><b>What are the features of Dream Home Design and Makeover Mod Apk?</b><br>Dream Home Design and Makeover Mod Apk has many features that make it more fun and enjoyable than the original version. Some of the features are unlimited money and gems, no ads, no root, high-quality graphics, and sound effects.</li>
90
- <li><b>What are the tips and tricks for Dream Home Design and Makeover Mod Apk?</b><br>Some of the tips and tricks for Dream Home Design and Makeover Mod Apk are to follow your client's preferences and budget, use hints and boosters wisely in puzzles, experiment with different styles and colors.</li>
91
- <li><b>What are the pros and cons of Dream Home Design and Makeover Mod Apk?</b><br>Some of the pros of Dream Home Design and Makeover Mod Apk are fun and relaxing gameplay, creative and customizable design options, free and easy to use mod apk. Some of the cons are requires internet connection and storage space, may not be compatible with some devices or updates, may have some bugs or glitches.</li>
92
- </ol></p> 197e85843d<br />
93
- <br />
94
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/FIFA Soccer Mod Menu APK - Get Unlimited Money and Unlock All Features in FIFA Mobile.md DELETED
@@ -1,98 +0,0 @@
1
-
2
- <h1>Download FIFA Mobile Mod Menu: How to Enjoy the Ultimate Soccer Experience on Your Mobile Device</h1>
3
- <p>If you are a fan of soccer games, you probably have heard of FIFA Mobile, the popular football simulation game developed by EA Sports. The game features real-world teams, players, and stadiums, allowing you to create your own football teams and compete against others online. But what if you want to have more fun and freedom in the game? What if you want to unlock all the features and modes without spending any money or time? That's where FIFA Mobile mod menu comes in.</p>
4
- <h2>download fifa mobile mod menu</h2><br /><p><b><b>Download File</b> &#10003;&#10003;&#10003; <a href="https://jinyurl.com/2uNUOY">https://jinyurl.com/2uNUOY</a></b></p><br /><br />
5
- <p>FIFA Mobile mod menu is a modified version of the game that gives you access to various cheats and hacks that can enhance your gaming experience. With this mod menu, you can unlock all players, teams, and modes, get unlimited money and gems, customize your settings, and enjoy faster and smoother gameplay. In this article, we will show you how to download and install FIFA Mobile mod menu, as well as some tips and tricks for playing the game with it. Let's get started!</p>
6
- <h2>Features of FIFA Mobile Mod Menu</h2>
7
- <p>FIFA Mobile mod menu offers a lot of features that can make your game more enjoyable and exciting. Here are some of them:</p>
8
- <ul>
9
- <li><b>Unlocked all players, teams, and modes</b>: You can choose from over 15,000 authentic soccer stars from over 600 teams, including world-class talent like Kylian Mbappé, Christian Pulisic, Vinicius Jr and Son Heung-min. You can also play in various modes, such as FIFA World Cup 2022™ mode, UEFA Champions League mode, Manager mode, Head-to-Head mode, VS Attack mode, and more.</li>
10
- <li><b>Unlimited money and gems</b>: You can get unlimited coins and gems that you can use to buy player items, upgrade your team, unlock new features, and more. You don't have to worry about running out of resources or spending real money on in-app purchases.</li>
11
- <li><b>Menu mod with various options</b>: You can access a menu mod that allows you to customize your preferences and settings. You can enable or disable cheats, such as freeze players, freeze goalkeeper, perfect skilled game, etc. You can also adjust the difficulty level, match duration, camera angle, sound effects, etc.</li>
12
- <li><b>Mod speed and perfect skill game</b>: You can enjoy faster and smoother gameplay with mod speed. You can also perform perfect skill moves and score goals with ease with perfect skill game.</li>
13
- </ul>
14
- <h2>How to Download and Install FIFA Mobile Mod Menu</h2>
15
- <p>Downloading and installing FIFA Mobile mod menu is easy and simple. Just follow these steps:</p>
16
- <ol>
17
- <li><b>Step 1</b>: Download the APK file from a trusted source. You can find many websites that offer FIFA Mobile mod menu APK files for free. However, be careful of fake or malicious links that may harm your device or steal your data. We recommend using [5Play](^1^), [HappyMod](^2^), or [APKYLO](^7^) as they are reliable sources that provide safe and updated APK files.</li>
18
- <li><b>Step 2</b>: Enable unknown sources on your device settings. Before you can install the APK file, you need to allow your device to install apps from unknown sources. To do this, go to your device settings, then security, then enable unknown sources. This will let you install apps that are not from the Google Play Store.</li>
19
- <li><b>Step 3</b>: Install the APK file and launch the game. After you have downloaded the APK file, locate it on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to complete. Once done, launch the game and enjoy the mod menu.</li>
20
- <li><b>Step 4</b>: Enjoy the mod menu and customize your preferences. When you open the game, you will see a mod menu icon on the top left corner of the screen. Tap on it to access the menu mod and choose the options you want to enable or disable. You can also change your settings anytime during the game by tapping on the icon again.</li>
21
- </ol>
22
- <h2>Tips and Tricks for Playing FIFA Mobile with Mod Menu</h2>
23
- <p>Playing FIFA Mobile with mod menu can be a lot of fun, but it can also be challenging if you don't know how to use it properly. Here are some tips and tricks that can help you get the most out of your game:</p>
24
- <ul>
25
- <li><b>Choose the best players and teams for your Ultimate Team</b>: With FIFA Mobile mod menu, you can unlock all players and teams in the game, but that doesn't mean you should use them randomly. You should still consider their ratings, skills, chemistry, and positions when building your Ultimate Team. You should also balance your team with attackers, midfielders, defenders, and goalkeepers. You can use the [FIFA Mobile Database] or [FUTHead] to find out more about the players and teams in the game.</li>
26
- <li><b>Use the advanced passing system to create more chances</b>: FIFA Mobile has an advanced passing system that allows you to control the direction, speed, and timing of your passes. You can use different types of passes, such as through balls, lobbed passes, driven passes, etc., to create more opportunities for scoring. You can also use the mod menu to enable perfect skilled game, which will make your passes more accurate and effective.</li>
27
- <li><b>Master the skill moves and tackle techniques</b>: FIFA Mobile has a variety of skill moves and tackle techniques that you can use to beat your opponents and win matches. You can use gestures, buttons, or virtual joysticks to perform skill moves, such as roulette, rainbow flick, heel-to-heel flick, etc. You can also use different types of tackles, such as slide tackle, standing tackle, jockeying, etc., to stop your opponents from scoring. You can use the mod menu to enable freeze players or freeze goalkeeper, which will make your skill moves and tackles easier and more successful.</li>
28
- <li><b>Compete in various modes and tournaments</b>: FIFA Mobile has a lot of modes and tournaments that you can play and enjoy with mod menu. You can play in FIFA World Cup 2022™ mode, UEFA Champions League mode, Manager mode, Head-to-Head mode, VS Attack mode, and more. You can also participate in various tournaments, such as Weekend Tournament, Division Rivals Tournament, Champions League Tournament, etc., to win rewards and trophies. You can use the mod menu to unlock all modes and tournaments without any restrictions or requirements.</li>
29
- </ul>
30
- <h2>Pros and Cons of FIFA Mobile Mod Menu</h2>
31
- <p>FIFA Mobile mod menu has its pros and cons that you should be aware of before downloading and installing it. Here are some of them:</p>
32
- <table>
33
- <tr><th>Pros</th><th>Cons</th></tr>
34
- <tr><td>- More fun, freedom, and customization: You can enjoy the game without any limitations or frustrations. You can unlock all features and modes, get unlimited resources, customize your settings, and have a smoother gameplay.</td><td>- Potential risks: Downloading and installing FIFA Mobile mod menu may expose your device to malware or viruses that may harm your device or steal your data. You should always download from trusted sources and scan your device regularly.</td></tr>
35
- <tr><td>- Compatibility issues: FIFA Mobile mod menu may not work well with some devices or versions of the game. You may experience crashes, glitches, errors, or lagging while playing the game. You should always check the compatibility of the mod menu with your device and game before installing it.</td></tr>
36
- <tr><td>- Ethical concerns: Using FIFA Mobile mod menu may be considered cheating or unfair by some players or developers. You may face bans or penalties if you use it online or in competitive modes. You should always respect the rules and regulations of the game and play responsibly.</td></ r></td></tr>
37
- </table>
38
- <h1>Conclusion</h1>
39
- <p>FIFA Mobile is a great game for soccer fans who want to enjoy the ultimate soccer experience on their mobile devices. However, if you want to have more fun and freedom in the game, you can download and install FIFA Mobile mod menu, which gives you access to various cheats and hacks that can enhance your gaming experience. With FIFA Mobile mod menu, you can unlock all players, teams, and modes, get unlimited money and gems, customize your settings, and enjoy faster and smoother gameplay. However, you should also be aware of the potential risks, compatibility issues, and ethical concerns that come with using FIFA Mobile mod menu. You should always download from trusted sources, scan your device regularly, check the compatibility of the mod menu with your device and game, respect the rules and regulations of the game, and play responsibly. We hope this article has helped you learn how to download and install FIFA Mobile mod menu, as well as some tips and tricks for playing the game with it. Have fun and enjoy the game!</p>
40
- <h2>FAQs</h2>
41
- <p>Here are some frequently asked questions about FIFA Mobile mod menu:</p>
42
- <ol>
43
- <li><b>Is FIFA Mobile mod menu safe to use?</b>: FIFA Mobile mod menu is safe to use as long as you download it from trusted sources and scan your device regularly. However, there is always a risk of malware or viruses when downloading and installing any modded or hacked app. You should also be careful of fake or malicious links that may harm your device or steal your data.</li>
44
- <li><b>Will I get banned for using FIFA Mobile mod menu?</b>: There is a possibility of getting banned for using FIFA Mobile mod menu, especially if you use it online or in competitive modes. EA Sports has a strict policy against cheating or hacking in their games, and they may detect and penalize any players who use FIFA Mobile mod menu. You should always respect the rules and regulations of the game and play responsibly.</li>
45
- <li><b>How do I update FIFA Mobile mod menu?</b>: To update FIFA Mobile mod menu, you need to download and install the latest version of the APK file from the same source that you downloaded it from. You should also check the compatibility of the new version with your device and game before installing it.</li>
46
- <li><b>Can I use FIFA Mobile mod menu on iOS devices?</b>: No, FIFA Mobile mod menu is only available for Android devices. You cannot use it on iOS devices, such as iPhones or iPads.</li>
47
- <li><b>Can I play FIFA Mobile with my friends using mod menu?</b>: Yes, you can play FIFA Mobile with your friends using mod menu, but only if they also have the same mod menu installed on their devices. Otherwise, you may not be able to connect or play with them due to compatibility issues.</li>
48
- </ol></p>
49
- <p>download fifa mobile mod apk with menu<br />
50
- download fifa mobile mod menu unlimited money<br />
51
- download fifa mobile mod menu 18.1.03<br />
52
- download fifa mobile mod menu 5play<br />
53
- download fifa mobile mod menu happymod<br />
54
- download fifa mobile mod menu android<br />
55
- download fifa mobile mod menu ios<br />
56
- download fifa mobile mod menu no root<br />
57
- download fifa mobile mod menu latest version<br />
58
- download fifa mobile mod menu free<br />
59
- download fifa mobile mod menu 2022<br />
60
- download fifa mobile mod menu world cup mode<br />
61
- download fifa mobile mod menu unlocked all<br />
62
- download fifa mobile mod menu perfect skill game<br />
63
- download fifa mobile mod menu freeze players<br />
64
- download fifa mobile mod menu speed hack<br />
65
- download fifa mobile mod menu manager mode<br />
66
- download fifa mobile mod menu soccer icons<br />
67
- download fifa mobile mod menu realistic simulation<br />
68
- download fifa mobile mod menu offline<br />
69
- download fifa mobile mod menu online<br />
70
- download fifa mobile mod menu mega.nz<br />
71
- download fifa mobile mod menu mediafire<br />
72
- download fifa mobile mod menu apk pure<br />
73
- download fifa mobile mod menu rexdl<br />
74
- download fifa mobile mod menu revdl<br />
75
- download fifa mobile mod menu apk home<br />
76
- download fifa mobile mod menu an1.com<br />
77
- download fifa mobile mod menu apkdone.com<br />
78
- download fifa mobile mod menu apk4all.com<br />
79
- how to download fifa mobile mod menu<br />
80
- where to download fifa mobile mod menu<br />
81
- best site to download fifa mobile mod menu<br />
82
- safe way to download fifa mobile mod menu<br />
83
- easy way to download fifa mobile mod menu<br />
84
- fastest way to download fifa mobile mod menu<br />
85
- tips to download fifa mobile mod menu<br />
86
- guide to download fifa mobile mod menu<br />
87
- tutorial to download fifa mobile mod menu<br />
88
- video to download fifa mobile mod menu<br />
89
- review of fifa mobile mod menu download<br />
90
- rating of fifa mobile mod menu download<br />
91
- feedback of fifa mobile mod menu download<br />
92
- benefits of downloading fifa mobile mod menu <br />
93
- drawbacks of downloading fifa mobile mod menu <br />
94
- risks of downloading fifa mobile mod menu <br />
95
- alternatives to downloading fifa mobile mod menu <br />
96
- comparison of downloading fifa mobile mod menu</p> 401be4b1e0<br />
97
- <br />
98
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/model/stylegan/dataset.py DELETED
@@ -1,40 +0,0 @@
1
- from io import BytesIO
2
-
3
- import lmdb
4
- from PIL import Image
5
- from torch.utils.data import Dataset
6
-
7
-
8
- class MultiResolutionDataset(Dataset):
9
- def __init__(self, path, transform, resolution=256):
10
- self.env = lmdb.open(
11
- path,
12
- max_readers=32,
13
- readonly=True,
14
- lock=False,
15
- readahead=False,
16
- meminit=False,
17
- )
18
-
19
- if not self.env:
20
- raise IOError('Cannot open lmdb dataset', path)
21
-
22
- with self.env.begin(write=False) as txn:
23
- self.length = int(txn.get('length'.encode('utf-8')).decode('utf-8'))
24
-
25
- self.resolution = resolution
26
- self.transform = transform
27
-
28
- def __len__(self):
29
- return self.length
30
-
31
- def __getitem__(self, index):
32
- with self.env.begin(write=False) as txn:
33
- key = f'{self.resolution}-{str(index).zfill(5)}'.encode('utf-8')
34
- img_bytes = txn.get(key)
35
-
36
- buffer = BytesIO(img_bytes)
37
- img = Image.open(buffer)
38
- img = self.transform(img)
39
-
40
- return img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/layers_537238KB.py DELETED
@@ -1,126 +0,0 @@
1
- import torch
2
- from torch import nn
3
- import torch.nn.functional as F
4
-
5
- from . import spec_utils
6
-
7
-
8
- class Conv2DBNActiv(nn.Module):
9
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
10
- super(Conv2DBNActiv, self).__init__()
11
- self.conv = nn.Sequential(
12
- nn.Conv2d(
13
- nin,
14
- nout,
15
- kernel_size=ksize,
16
- stride=stride,
17
- padding=pad,
18
- dilation=dilation,
19
- bias=False,
20
- ),
21
- nn.BatchNorm2d(nout),
22
- activ(),
23
- )
24
-
25
- def __call__(self, x):
26
- return self.conv(x)
27
-
28
-
29
- class SeperableConv2DBNActiv(nn.Module):
30
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
31
- super(SeperableConv2DBNActiv, self).__init__()
32
- self.conv = nn.Sequential(
33
- nn.Conv2d(
34
- nin,
35
- nin,
36
- kernel_size=ksize,
37
- stride=stride,
38
- padding=pad,
39
- dilation=dilation,
40
- groups=nin,
41
- bias=False,
42
- ),
43
- nn.Conv2d(nin, nout, kernel_size=1, bias=False),
44
- nn.BatchNorm2d(nout),
45
- activ(),
46
- )
47
-
48
- def __call__(self, x):
49
- return self.conv(x)
50
-
51
-
52
- class Encoder(nn.Module):
53
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
54
- super(Encoder, self).__init__()
55
- self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
56
- self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
57
-
58
- def __call__(self, x):
59
- skip = self.conv1(x)
60
- h = self.conv2(skip)
61
-
62
- return h, skip
63
-
64
-
65
- class Decoder(nn.Module):
66
- def __init__(
67
- self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
68
- ):
69
- super(Decoder, self).__init__()
70
- self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
71
- self.dropout = nn.Dropout2d(0.1) if dropout else None
72
-
73
- def __call__(self, x, skip=None):
74
- x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
75
- if skip is not None:
76
- skip = spec_utils.crop_center(skip, x)
77
- x = torch.cat([x, skip], dim=1)
78
- h = self.conv(x)
79
-
80
- if self.dropout is not None:
81
- h = self.dropout(h)
82
-
83
- return h
84
-
85
-
86
- class ASPPModule(nn.Module):
87
- def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
88
- super(ASPPModule, self).__init__()
89
- self.conv1 = nn.Sequential(
90
- nn.AdaptiveAvgPool2d((1, None)),
91
- Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
92
- )
93
- self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
94
- self.conv3 = SeperableConv2DBNActiv(
95
- nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
96
- )
97
- self.conv4 = SeperableConv2DBNActiv(
98
- nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
99
- )
100
- self.conv5 = SeperableConv2DBNActiv(
101
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
102
- )
103
- self.conv6 = SeperableConv2DBNActiv(
104
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
105
- )
106
- self.conv7 = SeperableConv2DBNActiv(
107
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
108
- )
109
- self.bottleneck = nn.Sequential(
110
- Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
111
- )
112
-
113
- def forward(self, x):
114
- _, _, h, w = x.size()
115
- feat1 = F.interpolate(
116
- self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
117
- )
118
- feat2 = self.conv2(x)
119
- feat3 = self.conv3(x)
120
- feat4 = self.conv4(x)
121
- feat5 = self.conv5(x)
122
- feat6 = self.conv6(x)
123
- feat7 = self.conv7(x)
124
- out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
125
- bottle = self.bottleneck(out)
126
- return bottle
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/components/learn-more.tsx DELETED
@@ -1,39 +0,0 @@
1
- import React from 'react'
2
- import { SourceAttribution } from '@/lib/bots/bing/types'
3
-
4
- export interface LearnMoreProps {
5
- sourceAttributions?: SourceAttribution[]
6
- }
7
-
8
- export function LearnMore({ sourceAttributions }: LearnMoreProps) {
9
- if (!sourceAttributions?.length) {
10
- return null
11
- }
12
-
13
- return (
14
- <div className="learn-more-root" role="list" aria-label="了解详细信息:">
15
- <div className="learn-more">了解详细信息:</div>
16
- <div className="attribution-container">
17
- <div className="attribution-items">
18
- {sourceAttributions.map((attribution, index) => {
19
- const { providerDisplayName, seeMoreUrl } = attribution
20
- const { host } = new URL(seeMoreUrl)
21
- return (
22
- <a
23
- key={index}
24
- className="attribution-item"
25
- target="_blank"
26
- role="listitem"
27
- href={seeMoreUrl}
28
- title={providerDisplayName}
29
- tabIndex={index}
30
- >
31
- {index + 1}. {host}
32
- </a>
33
- )
34
- })}
35
- </div>
36
- </div>
37
- </div>
38
- )
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/latent_diffusion/ddpm.py DELETED
@@ -1,441 +0,0 @@
1
- """
2
- wild mixture of
3
- https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
4
- https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
5
- https://github.com/CompVis/taming-transformers
6
- -- merci
7
- """
8
- import sys
9
- import os
10
-
11
- import torch
12
- import torch.nn as nn
13
- import numpy as np
14
- from contextlib import contextmanager
15
- from functools import partial
16
- from tqdm import tqdm
17
-
18
- from audioldm.utils import exists, default, count_params, instantiate_from_config
19
- from audioldm.latent_diffusion.ema import LitEma
20
- from audioldm.latent_diffusion.util import (
21
- make_beta_schedule,
22
- extract_into_tensor,
23
- noise_like,
24
- )
25
- import soundfile as sf
26
- import os
27
-
28
-
29
- __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"}
30
-
31
-
32
- def disabled_train(self, mode=True):
33
- """Overwrite model.train with this function to make sure train/eval mode
34
- does not change anymore."""
35
- return self
36
-
37
-
38
- def uniform_on_device(r1, r2, shape, device):
39
- return (r1 - r2) * torch.rand(*shape, device=device) + r2
40
-
41
-
42
- class DiffusionWrapper(nn.Module):
43
- def __init__(self, diff_model_config, conditioning_key):
44
- super().__init__()
45
- self.diffusion_model = instantiate_from_config(diff_model_config)
46
- self.conditioning_key = conditioning_key
47
- assert self.conditioning_key in [
48
- None,
49
- "concat",
50
- "crossattn",
51
- "hybrid",
52
- "adm",
53
- "film",
54
- ]
55
-
56
- def forward(
57
- self, x, t, c_concat: list = None, c_crossattn: list = None, c_film: list = None
58
- ):
59
- x = x.contiguous()
60
- t = t.contiguous()
61
-
62
- if self.conditioning_key is None:
63
- out = self.diffusion_model(x, t)
64
- elif self.conditioning_key == "concat":
65
- xc = torch.cat([x] + c_concat, dim=1)
66
- out = self.diffusion_model(xc, t)
67
- elif self.conditioning_key == "crossattn":
68
- cc = torch.cat(c_crossattn, 1)
69
- out = self.diffusion_model(x, t, context=cc)
70
- elif self.conditioning_key == "hybrid":
71
- xc = torch.cat([x] + c_concat, dim=1)
72
- cc = torch.cat(c_crossattn, 1)
73
- out = self.diffusion_model(xc, t, context=cc)
74
- elif (
75
- self.conditioning_key == "film"
76
- ): # The condition is assumed to be a global token, which wil pass through a linear layer and added with the time embedding for the FILM
77
- cc = c_film[0].squeeze(1) # only has one token
78
- out = self.diffusion_model(x, t, y=cc)
79
- elif self.conditioning_key == "adm":
80
- cc = c_crossattn[0]
81
- out = self.diffusion_model(x, t, y=cc)
82
- else:
83
- raise NotImplementedError()
84
-
85
- return out
86
-
87
-
88
- class DDPM(nn.Module):
89
- # classic DDPM with Gaussian diffusion, in image space
90
- def __init__(
91
- self,
92
- unet_config,
93
- timesteps=1000,
94
- beta_schedule="linear",
95
- loss_type="l2",
96
- ckpt_path=None,
97
- ignore_keys=[],
98
- load_only_unet=False,
99
- monitor="val/loss",
100
- use_ema=True,
101
- first_stage_key="image",
102
- latent_t_size=256,
103
- latent_f_size=16,
104
- channels=3,
105
- log_every_t=100,
106
- clip_denoised=True,
107
- linear_start=1e-4,
108
- linear_end=2e-2,
109
- cosine_s=8e-3,
110
- given_betas=None,
111
- original_elbo_weight=0.0,
112
- v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
113
- l_simple_weight=1.0,
114
- conditioning_key=None,
115
- parameterization="eps", # all assuming fixed variance schedules
116
- scheduler_config=None,
117
- use_positional_encodings=False,
118
- learn_logvar=False,
119
- logvar_init=0.0,
120
- ):
121
- super().__init__()
122
- assert parameterization in [
123
- "eps",
124
- "x0",
125
- ], 'currently only supporting "eps" and "x0"'
126
- self.parameterization = parameterization
127
- self.state = None
128
- # print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
129
- self.cond_stage_model = None
130
- self.clip_denoised = clip_denoised
131
- self.log_every_t = log_every_t
132
- self.first_stage_key = first_stage_key
133
-
134
- self.latent_t_size = latent_t_size
135
- self.latent_f_size = latent_f_size
136
-
137
- self.channels = channels
138
- self.use_positional_encodings = use_positional_encodings
139
- self.model = DiffusionWrapper(unet_config, conditioning_key)
140
- count_params(self.model, verbose=True)
141
- self.use_ema = use_ema
142
- if self.use_ema:
143
- self.model_ema = LitEma(self.model)
144
- # print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
145
-
146
- self.use_scheduler = scheduler_config is not None
147
- if self.use_scheduler:
148
- self.scheduler_config = scheduler_config
149
-
150
- self.v_posterior = v_posterior
151
- self.original_elbo_weight = original_elbo_weight
152
- self.l_simple_weight = l_simple_weight
153
-
154
- if monitor is not None:
155
- self.monitor = monitor
156
-
157
- self.register_schedule(
158
- given_betas=given_betas,
159
- beta_schedule=beta_schedule,
160
- timesteps=timesteps,
161
- linear_start=linear_start,
162
- linear_end=linear_end,
163
- cosine_s=cosine_s,
164
- )
165
-
166
- self.loss_type = loss_type
167
-
168
- self.learn_logvar = learn_logvar
169
- self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
170
- if self.learn_logvar:
171
- self.logvar = nn.Parameter(self.logvar, requires_grad=True)
172
- else:
173
- self.logvar = nn.Parameter(self.logvar, requires_grad=False)
174
-
175
- self.logger_save_dir = None
176
- self.logger_project = None
177
- self.logger_version = None
178
- self.label_indices_total = None
179
- # To avoid the system cannot find metric value for checkpoint
180
- self.metrics_buffer = {
181
- "val/kullback_leibler_divergence_sigmoid": 15.0,
182
- "val/kullback_leibler_divergence_softmax": 10.0,
183
- "val/psnr": 0.0,
184
- "val/ssim": 0.0,
185
- "val/inception_score_mean": 1.0,
186
- "val/inception_score_std": 0.0,
187
- "val/kernel_inception_distance_mean": 0.0,
188
- "val/kernel_inception_distance_std": 0.0,
189
- "val/frechet_inception_distance": 133.0,
190
- "val/frechet_audio_distance": 32.0,
191
- }
192
- self.initial_learning_rate = None
193
-
194
- def get_log_dir(self):
195
- if (
196
- self.logger_save_dir is None
197
- and self.logger_project is None
198
- and self.logger_version is None
199
- ):
200
- return os.path.join(
201
- self.logger.save_dir, self.logger._project, self.logger.version
202
- )
203
- else:
204
- return os.path.join(
205
- self.logger_save_dir, self.logger_project, self.logger_version
206
- )
207
-
208
- def set_log_dir(self, save_dir, project, version):
209
- self.logger_save_dir = save_dir
210
- self.logger_project = project
211
- self.logger_version = version
212
-
213
- def register_schedule(
214
- self,
215
- given_betas=None,
216
- beta_schedule="linear",
217
- timesteps=1000,
218
- linear_start=1e-4,
219
- linear_end=2e-2,
220
- cosine_s=8e-3,
221
- ):
222
- if exists(given_betas):
223
- betas = given_betas
224
- else:
225
- betas = make_beta_schedule(
226
- beta_schedule,
227
- timesteps,
228
- linear_start=linear_start,
229
- linear_end=linear_end,
230
- cosine_s=cosine_s,
231
- )
232
- alphas = 1.0 - betas
233
- alphas_cumprod = np.cumprod(alphas, axis=0)
234
- alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])
235
-
236
- (timesteps,) = betas.shape
237
- self.num_timesteps = int(timesteps)
238
- self.linear_start = linear_start
239
- self.linear_end = linear_end
240
- assert (
241
- alphas_cumprod.shape[0] == self.num_timesteps
242
- ), "alphas have to be defined for each timestep"
243
-
244
- to_torch = partial(torch.tensor, dtype=torch.float32)
245
-
246
- self.register_buffer("betas", to_torch(betas))
247
- self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod))
248
- self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev))
249
-
250
- # calculations for diffusion q(x_t | x_{t-1}) and others
251
- self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod)))
252
- self.register_buffer(
253
- "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod))
254
- )
255
- self.register_buffer(
256
- "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod))
257
- )
258
- self.register_buffer(
259
- "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod))
260
- )
261
- self.register_buffer(
262
- "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1))
263
- )
264
-
265
- # calculations for posterior q(x_{t-1} | x_t, x_0)
266
- posterior_variance = (1 - self.v_posterior) * betas * (
267
- 1.0 - alphas_cumprod_prev
268
- ) / (1.0 - alphas_cumprod) + self.v_posterior * betas
269
- # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
270
- self.register_buffer("posterior_variance", to_torch(posterior_variance))
271
- # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
272
- self.register_buffer(
273
- "posterior_log_variance_clipped",
274
- to_torch(np.log(np.maximum(posterior_variance, 1e-20))),
275
- )
276
- self.register_buffer(
277
- "posterior_mean_coef1",
278
- to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)),
279
- )
280
- self.register_buffer(
281
- "posterior_mean_coef2",
282
- to_torch(
283
- (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod)
284
- ),
285
- )
286
-
287
- if self.parameterization == "eps":
288
- lvlb_weights = self.betas**2 / (
289
- 2
290
- * self.posterior_variance
291
- * to_torch(alphas)
292
- * (1 - self.alphas_cumprod)
293
- )
294
- elif self.parameterization == "x0":
295
- lvlb_weights = (
296
- 0.5
297
- * np.sqrt(torch.Tensor(alphas_cumprod))
298
- / (2.0 * 1 - torch.Tensor(alphas_cumprod))
299
- )
300
- else:
301
- raise NotImplementedError("mu not supported")
302
- # TODO how to choose this term
303
- lvlb_weights[0] = lvlb_weights[1]
304
- self.register_buffer("lvlb_weights", lvlb_weights, persistent=False)
305
- assert not torch.isnan(self.lvlb_weights).all()
306
-
307
- @contextmanager
308
- def ema_scope(self, context=None):
309
- if self.use_ema:
310
- self.model_ema.store(self.model.parameters())
311
- self.model_ema.copy_to(self.model)
312
- if context is not None:
313
- # print(f"{context}: Switched to EMA weights")
314
- pass
315
- try:
316
- yield None
317
- finally:
318
- if self.use_ema:
319
- self.model_ema.restore(self.model.parameters())
320
- if context is not None:
321
- # print(f"{context}: Restored training weights")
322
- pass
323
-
324
- def q_mean_variance(self, x_start, t):
325
- """
326
- Get the distribution q(x_t | x_0).
327
- :param x_start: the [N x C x ...] tensor of noiseless inputs.
328
- :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
329
- :return: A tuple (mean, variance, log_variance), all of x_start's shape.
330
- """
331
- mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
332
- variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
333
- log_variance = extract_into_tensor(
334
- self.log_one_minus_alphas_cumprod, t, x_start.shape
335
- )
336
- return mean, variance, log_variance
337
-
338
- def predict_start_from_noise(self, x_t, t, noise):
339
- return (
340
- extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
341
- - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
342
- * noise
343
- )
344
-
345
- def q_posterior(self, x_start, x_t, t):
346
- posterior_mean = (
347
- extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
348
- + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
349
- )
350
- posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
351
- posterior_log_variance_clipped = extract_into_tensor(
352
- self.posterior_log_variance_clipped, t, x_t.shape
353
- )
354
- return posterior_mean, posterior_variance, posterior_log_variance_clipped
355
-
356
- def p_mean_variance(self, x, t, clip_denoised: bool):
357
- model_out = self.model(x, t)
358
- if self.parameterization == "eps":
359
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
360
- elif self.parameterization == "x0":
361
- x_recon = model_out
362
- if clip_denoised:
363
- x_recon.clamp_(-1.0, 1.0)
364
-
365
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(
366
- x_start=x_recon, x_t=x, t=t
367
- )
368
- return model_mean, posterior_variance, posterior_log_variance
369
-
370
- @torch.no_grad()
371
- def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
372
- b, *_, device = *x.shape, x.device
373
- model_mean, _, model_log_variance = self.p_mean_variance(
374
- x=x, t=t, clip_denoised=clip_denoised
375
- )
376
- noise = noise_like(x.shape, device, repeat_noise)
377
- # no noise when t == 0
378
- nonzero_mask = (
379
- (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))).contiguous()
380
- )
381
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
382
-
383
- @torch.no_grad()
384
- def p_sample_loop(self, shape, return_intermediates=False):
385
- device = self.betas.device
386
- b = shape[0]
387
- img = torch.randn(shape, device=device)
388
- intermediates = [img]
389
- for i in tqdm(
390
- reversed(range(0, self.num_timesteps)),
391
- desc="Sampling t",
392
- total=self.num_timesteps,
393
- ):
394
- img = self.p_sample(
395
- img,
396
- torch.full((b,), i, device=device, dtype=torch.long),
397
- clip_denoised=self.clip_denoised,
398
- )
399
- if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
400
- intermediates.append(img)
401
- if return_intermediates:
402
- return img, intermediates
403
- return img
404
-
405
- @torch.no_grad()
406
- def sample(self, batch_size=16, return_intermediates=False):
407
- shape = (batch_size, channels, self.latent_t_size, self.latent_f_size)
408
- channels = self.channels
409
- return self.p_sample_loop(shape, return_intermediates=return_intermediates)
410
-
411
- def q_sample(self, x_start, t, noise=None):
412
- noise = default(noise, lambda: torch.randn_like(x_start))
413
- return (
414
- extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
415
- + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
416
- * noise
417
- )
418
-
419
- def forward(self, x, *args, **kwargs):
420
- t = torch.randint(
421
- 0, self.num_timesteps, (x.shape[0],), device=self.device
422
- ).long()
423
- return self.p_losses(x, t, *args, **kwargs)
424
-
425
- def get_input(self, batch, k):
426
- # fbank, log_magnitudes_stft, label_indices, fname, waveform, clip_label, text = batch
427
- fbank, log_magnitudes_stft, label_indices, fname, waveform, text = batch
428
- ret = {}
429
-
430
- ret["fbank"] = (
431
- fbank.unsqueeze(1).to(memory_format=torch.contiguous_format).float()
432
- )
433
- ret["stft"] = log_magnitudes_stft.to(
434
- memory_format=torch.contiguous_format
435
- ).float()
436
- # ret["clip_label"] = clip_label.to(memory_format=torch.contiguous_format).float()
437
- ret["waveform"] = waveform.to(memory_format=torch.contiguous_format).float()
438
- ret["text"] = list(text)
439
- ret["fname"] = fname
440
-
441
- return ret[k]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/models/base_model.py DELETED
@@ -1,500 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
- from typing import Dict
4
-
5
- import torch
6
- import torch.nn as nn
7
-
8
- from .utils import mean_with_lens, repeat_tensor
9
-
10
-
11
- class CaptionModel(nn.Module):
12
- """
13
- Encoder-decoder captioning model.
14
- """
15
-
16
- pad_idx = 0
17
- start_idx = 1
18
- end_idx = 2
19
- max_length = 20
20
-
21
- def __init__(self, encoder: nn.Module, decoder: nn.Module, **kwargs):
22
- super().__init__()
23
- self.encoder = encoder
24
- self.decoder = decoder
25
- self.vocab_size = decoder.vocab_size
26
- self.train_forward_keys = ["cap", "cap_len", "ss_ratio"]
27
- self.inference_forward_keys = ["sample_method", "max_length", "temp"]
28
- freeze_encoder = kwargs.get("freeze_encoder", False)
29
- if freeze_encoder:
30
- for param in self.encoder.parameters():
31
- param.requires_grad = False
32
- self.check_decoder_compatibility()
33
-
34
- def check_decoder_compatibility(self):
35
- compatible_decoders = [x.__class__.__name__ for x in self.compatible_decoders]
36
- assert isinstance(self.decoder, self.compatible_decoders), \
37
- f"{self.decoder.__class__.__name__} is incompatible with " \
38
- f"{self.__class__.__name__}, please use decoder in {compatible_decoders} "
39
-
40
- @classmethod
41
- def set_index(cls, start_idx, end_idx):
42
- cls.start_idx = start_idx
43
- cls.end_idx = end_idx
44
-
45
- def forward(self, input_dict: Dict):
46
- """
47
- input_dict: {
48
- (required)
49
- mode: train/inference,
50
- spec,
51
- spec_len,
52
- fc,
53
- attn,
54
- attn_len,
55
- [sample_method: greedy],
56
- [temp: 1.0] (in case of no teacher forcing)
57
-
58
- (optional, mode=train)
59
- cap,
60
- cap_len,
61
- ss_ratio,
62
-
63
- (optional, mode=inference)
64
- sample_method: greedy/beam,
65
- max_length,
66
- temp,
67
- beam_size (optional, sample_method=beam),
68
- n_best (optional, sample_method=beam),
69
- }
70
- """
71
- # encoder_input_keys = ["spec", "spec_len", "fc", "attn", "attn_len"]
72
- # encoder_input = { key: input_dict[key] for key in encoder_input_keys }
73
- encoder_output_dict = self.encoder(input_dict)
74
- if input_dict["mode"] == "train":
75
- forward_dict = {
76
- "mode": "train", "sample_method": "greedy", "temp": 1.0
77
- }
78
- for key in self.train_forward_keys:
79
- forward_dict[key] = input_dict[key]
80
- forward_dict.update(encoder_output_dict)
81
- output = self.train_forward(forward_dict)
82
- elif input_dict["mode"] == "inference":
83
- forward_dict = {"mode": "inference"}
84
- default_args = { "sample_method": "greedy", "max_length": self.max_length, "temp": 1.0 }
85
- for key in self.inference_forward_keys:
86
- if key in input_dict:
87
- forward_dict[key] = input_dict[key]
88
- else:
89
- forward_dict[key] = default_args[key]
90
-
91
- if forward_dict["sample_method"] == "beam":
92
- forward_dict["beam_size"] = input_dict.get("beam_size", 3)
93
- forward_dict["n_best"] = input_dict.get("n_best", False)
94
- forward_dict["n_best_size"] = input_dict.get("n_best_size", forward_dict["beam_size"])
95
- elif forward_dict["sample_method"] == "dbs":
96
- forward_dict["beam_size"] = input_dict.get("beam_size", 6)
97
- forward_dict["group_size"] = input_dict.get("group_size", 3)
98
- forward_dict["diversity_lambda"] = input_dict.get("diversity_lambda", 0.5)
99
- forward_dict["group_nbest"] = input_dict.get("group_nbest", True)
100
-
101
- forward_dict.update(encoder_output_dict)
102
- output = self.inference_forward(forward_dict)
103
- else:
104
- raise Exception("mode should be either 'train' or 'inference'")
105
-
106
- return output
107
-
108
- def prepare_output(self, input_dict):
109
- output = {}
110
- batch_size = input_dict["fc_emb"].size(0)
111
- if input_dict["mode"] == "train":
112
- max_length = input_dict["cap"].size(1) - 1
113
- elif input_dict["mode"] == "inference":
114
- max_length = input_dict["max_length"]
115
- else:
116
- raise Exception("mode should be either 'train' or 'inference'")
117
- device = input_dict["fc_emb"].device
118
- output["seq"] = torch.full((batch_size, max_length), self.end_idx,
119
- dtype=torch.long)
120
- output["logit"] = torch.empty(batch_size, max_length,
121
- self.vocab_size).to(device)
122
- output["sampled_logprob"] = torch.zeros(batch_size, max_length)
123
- output["embed"] = torch.empty(batch_size, max_length,
124
- self.decoder.d_model).to(device)
125
- return output
126
-
127
- def train_forward(self, input_dict):
128
- if input_dict["ss_ratio"] != 1: # scheduled sampling training
129
- input_dict["mode"] = "train"
130
- return self.stepwise_forward(input_dict)
131
- output = self.seq_forward(input_dict)
132
- self.train_process(output, input_dict)
133
- return output
134
-
135
- def seq_forward(self, input_dict):
136
- raise NotImplementedError
137
-
138
- def train_process(self, output, input_dict):
139
- pass
140
-
141
- def inference_forward(self, input_dict):
142
- if input_dict["sample_method"] == "beam":
143
- return self.beam_search(input_dict)
144
- elif input_dict["sample_method"] == "dbs":
145
- return self.diverse_beam_search(input_dict)
146
- return self.stepwise_forward(input_dict)
147
-
148
- def stepwise_forward(self, input_dict):
149
- """Step-by-step decoding"""
150
- output = self.prepare_output(input_dict)
151
- max_length = output["seq"].size(1)
152
- # start sampling
153
- for t in range(max_length):
154
- input_dict["t"] = t
155
- self.decode_step(input_dict, output)
156
- if input_dict["mode"] == "inference": # decide whether to stop when sampling
157
- unfinished_t = output["seq"][:, t] != self.end_idx
158
- if t == 0:
159
- unfinished = unfinished_t
160
- else:
161
- unfinished *= unfinished_t
162
- output["seq"][:, t][~unfinished] = self.end_idx
163
- if unfinished.sum() == 0:
164
- break
165
- self.stepwise_process(output)
166
- return output
167
-
168
- def decode_step(self, input_dict, output):
169
- """Decoding operation of timestep t"""
170
- decoder_input = self.prepare_decoder_input(input_dict, output)
171
- # feed to the decoder to get logit
172
- output_t = self.decoder(decoder_input)
173
- logit_t = output_t["logit"]
174
- # assert logit_t.ndim == 3
175
- if logit_t.size(1) == 1:
176
- logit_t = logit_t.squeeze(1)
177
- embed_t = output_t["embed"].squeeze(1)
178
- elif logit_t.size(1) > 1:
179
- logit_t = logit_t[:, -1, :]
180
- embed_t = output_t["embed"][:, -1, :]
181
- else:
182
- raise Exception("no logit output")
183
- # sample the next input word and get the corresponding logit
184
- sampled = self.sample_next_word(logit_t,
185
- method=input_dict["sample_method"],
186
- temp=input_dict["temp"])
187
-
188
- output_t.update(sampled)
189
- output_t["t"] = input_dict["t"]
190
- output_t["logit"] = logit_t
191
- output_t["embed"] = embed_t
192
- self.stepwise_process_step(output, output_t)
193
-
194
- def prepare_decoder_input(self, input_dict, output):
195
- """Prepare the inp ut dict for the decoder"""
196
- raise NotImplementedError
197
-
198
- def stepwise_process_step(self, output, output_t):
199
- """Postprocessing (save output values) after each timestep t"""
200
- t = output_t["t"]
201
- output["logit"][:, t, :] = output_t["logit"]
202
- output["seq"][:, t] = output_t["word"]
203
- output["sampled_logprob"][:, t] = output_t["probs"]
204
- output["embed"][:, t, :] = output_t["embed"]
205
-
206
- def stepwise_process(self, output):
207
- """Postprocessing after the whole step-by-step autoregressive decoding"""
208
- pass
209
-
210
- def sample_next_word(self, logit, method, temp):
211
- """Sample the next word, given probs output by the decoder"""
212
- logprob = torch.log_softmax(logit, dim=1)
213
- if method == "greedy":
214
- sampled_logprob, word = torch.max(logprob.detach(), 1)
215
- elif method == "gumbel":
216
- def sample_gumbel(shape, eps=1e-20):
217
- U = torch.rand(shape).to(logprob.device)
218
- return -torch.log(-torch.log(U + eps) + eps)
219
- def gumbel_softmax_sample(logit, temperature):
220
- y = logit + sample_gumbel(logit.size())
221
- return torch.log_softmax(y / temperature, dim=-1)
222
- _logprob = gumbel_softmax_sample(logprob, temp)
223
- _, word = torch.max(_logprob.data, 1)
224
- sampled_logprob = logprob.gather(1, word.unsqueeze(-1))
225
- else:
226
- logprob = logprob / temp
227
- if method.startswith("top"):
228
- top_num = float(method[3:])
229
- if 0 < top_num < 1: # top-p sampling
230
- probs = torch.softmax(logit, dim=1)
231
- sorted_probs, sorted_indices = torch.sort(probs, descending=True, dim=1)
232
- _cumsum = sorted_probs.cumsum(1)
233
- mask = _cumsum < top_num
234
- mask = torch.cat([torch.ones_like(mask[:,:1]), mask[:,:-1]], 1)
235
- sorted_probs = sorted_probs * mask.to(sorted_probs)
236
- sorted_probs = sorted_probs / sorted_probs.sum(1, keepdim=True)
237
- logprob.scatter_(1, sorted_indices, sorted_probs.log())
238
- else: # top-k sampling
239
- k = int(top_num)
240
- tmp = torch.empty_like(logprob).fill_(float('-inf'))
241
- topk, indices = torch.topk(logprob, k, dim=1)
242
- tmp = tmp.scatter(1, indices, topk)
243
- logprob = tmp
244
- word = torch.distributions.Categorical(logits=logprob.detach()).sample()
245
- sampled_logprob = logprob.gather(1, word.unsqueeze(-1)).squeeze(1)
246
- word = word.detach().long()
247
- # sampled_logprob: [N,], word: [N,]
248
- return {"word": word, "probs": sampled_logprob}
249
-
250
- def beam_search(self, input_dict):
251
- output = self.prepare_output(input_dict)
252
- max_length = input_dict["max_length"]
253
- beam_size = input_dict["beam_size"]
254
- if input_dict["n_best"]:
255
- n_best_size = input_dict["n_best_size"]
256
- batch_size, max_length = output["seq"].size()
257
- output["seq"] = torch.full((batch_size, n_best_size, max_length),
258
- self.end_idx, dtype=torch.long)
259
-
260
- temp = input_dict["temp"]
261
- # instance by instance beam seach
262
- for i in range(output["seq"].size(0)):
263
- output_i = self.prepare_beamsearch_output(input_dict)
264
- input_dict["sample_idx"] = i
265
- for t in range(max_length):
266
- input_dict["t"] = t
267
- output_t = self.beamsearch_step(input_dict, output_i)
268
- #######################################
269
- # merge with previous beam and select the current max prob beam
270
- #######################################
271
- logit_t = output_t["logit"]
272
- if logit_t.size(1) == 1:
273
- logit_t = logit_t.squeeze(1)
274
- elif logit_t.size(1) > 1:
275
- logit_t = logit_t[:, -1, :]
276
- else:
277
- raise Exception("no logit output")
278
- logprob_t = torch.log_softmax(logit_t, dim=1)
279
- logprob_t = torch.log_softmax(logprob_t / temp, dim=1)
280
- logprob_t = output_i["topk_logprob"].unsqueeze(1) + logprob_t
281
- if t == 0: # for the first step, all k seq will have the same probs
282
- topk_logprob, topk_words = logprob_t[0].topk(
283
- beam_size, 0, True, True)
284
- else: # unroll and find top logprob, and their unrolled indices
285
- topk_logprob, topk_words = logprob_t.view(-1).topk(
286
- beam_size, 0, True, True)
287
- topk_words = topk_words.cpu()
288
- output_i["topk_logprob"] = topk_logprob
289
- # output_i["prev_words_beam"] = topk_words // self.vocab_size # [beam_size,]
290
- output_i["prev_words_beam"] = torch.div(topk_words, self.vocab_size,
291
- rounding_mode='trunc')
292
- output_i["next_word"] = topk_words % self.vocab_size # [beam_size,]
293
- if t == 0:
294
- output_i["seq"] = output_i["next_word"].unsqueeze(1)
295
- else:
296
- output_i["seq"] = torch.cat([
297
- output_i["seq"][output_i["prev_words_beam"]],
298
- output_i["next_word"].unsqueeze(1)], dim=1)
299
-
300
- # add finished beams to results
301
- is_end = output_i["next_word"] == self.end_idx
302
- if t == max_length - 1:
303
- is_end.fill_(1)
304
-
305
- for beam_idx in range(beam_size):
306
- if is_end[beam_idx]:
307
- final_beam = {
308
- "seq": output_i["seq"][beam_idx].clone(),
309
- "score": output_i["topk_logprob"][beam_idx].item()
310
- }
311
- final_beam["score"] = final_beam["score"] / (t + 1)
312
- output_i["done_beams"].append(final_beam)
313
- output_i["topk_logprob"][is_end] -= 1000
314
-
315
- self.beamsearch_process_step(output_i, output_t)
316
-
317
- self.beamsearch_process(output, output_i, input_dict)
318
- return output
319
-
320
- def prepare_beamsearch_output(self, input_dict):
321
- beam_size = input_dict["beam_size"]
322
- device = input_dict["fc_emb"].device
323
- output = {
324
- "topk_logprob": torch.zeros(beam_size).to(device),
325
- "seq": None,
326
- "prev_words_beam": None,
327
- "next_word": None,
328
- "done_beams": [],
329
- }
330
- return output
331
-
332
- def beamsearch_step(self, input_dict, output_i):
333
- decoder_input = self.prepare_beamsearch_decoder_input(input_dict, output_i)
334
- output_t = self.decoder(decoder_input)
335
- output_t["t"] = input_dict["t"]
336
- return output_t
337
-
338
- def prepare_beamsearch_decoder_input(self, input_dict, output_i):
339
- raise NotImplementedError
340
-
341
- def beamsearch_process_step(self, output_i, output_t):
342
- pass
343
-
344
- def beamsearch_process(self, output, output_i, input_dict):
345
- i = input_dict["sample_idx"]
346
- done_beams = sorted(output_i["done_beams"], key=lambda x: -x["score"])
347
- if input_dict["n_best"]:
348
- done_beams = done_beams[:input_dict["n_best_size"]]
349
- for out_idx, done_beam in enumerate(done_beams):
350
- seq = done_beam["seq"]
351
- output["seq"][i][out_idx, :len(seq)] = seq
352
- else:
353
- seq = done_beams[0]["seq"]
354
- output["seq"][i][:len(seq)] = seq
355
-
356
- def diverse_beam_search(self, input_dict):
357
-
358
- def add_diversity(seq_table, logprob, t, divm, diversity_lambda, bdash):
359
- local_time = t - divm
360
- unaug_logprob = logprob.clone()
361
-
362
- if divm > 0:
363
- change = torch.zeros(logprob.size(-1))
364
- for prev_choice in range(divm):
365
- prev_decisions = seq_table[prev_choice][..., local_time]
366
- for prev_labels in range(bdash):
367
- change.scatter_add_(0, prev_decisions[prev_labels], change.new_ones(1))
368
-
369
- change = change.to(logprob.device)
370
- logprob = logprob - repeat_tensor(change, bdash) * diversity_lambda
371
-
372
- return logprob, unaug_logprob
373
-
374
- output = self.prepare_output(input_dict)
375
- group_size = input_dict["group_size"]
376
- batch_size = output["seq"].size(0)
377
- beam_size = input_dict["beam_size"]
378
- bdash = beam_size // group_size
379
- input_dict["bdash"] = bdash
380
- diversity_lambda = input_dict["diversity_lambda"]
381
- device = input_dict["fc_emb"].device
382
- max_length = input_dict["max_length"]
383
- temp = input_dict["temp"]
384
- group_nbest = input_dict["group_nbest"]
385
- batch_size, max_length = output["seq"].size()
386
- if group_nbest:
387
- output["seq"] = torch.full((batch_size, beam_size, max_length),
388
- self.end_idx, dtype=torch.long)
389
- else:
390
- output["seq"] = torch.full((batch_size, group_size, max_length),
391
- self.end_idx, dtype=torch.long)
392
-
393
-
394
- for i in range(batch_size):
395
- input_dict["sample_idx"] = i
396
- seq_table = [torch.LongTensor(bdash, 0) for _ in range(group_size)] # group_size x [bdash, 0]
397
- logprob_table = [torch.zeros(bdash).to(device) for _ in range(group_size)]
398
- done_beams_table = [[] for _ in range(group_size)]
399
-
400
- output_i = {
401
- "prev_words_beam": [None for _ in range(group_size)],
402
- "next_word": [None for _ in range(group_size)],
403
- "state": [None for _ in range(group_size)]
404
- }
405
-
406
- for t in range(max_length + group_size - 1):
407
- input_dict["t"] = t
408
- for divm in range(group_size):
409
- input_dict["divm"] = divm
410
- if t >= divm and t <= max_length + divm - 1:
411
- local_time = t - divm
412
- decoder_input = self.prepare_dbs_decoder_input(input_dict, output_i)
413
- output_t = self.decoder(decoder_input)
414
- output_t["divm"] = divm
415
- logit_t = output_t["logit"]
416
- if logit_t.size(1) == 1:
417
- logit_t = logit_t.squeeze(1)
418
- elif logit_t.size(1) > 1:
419
- logit_t = logit_t[:, -1, :]
420
- else:
421
- raise Exception("no logit output")
422
- logprob_t = torch.log_softmax(logit_t, dim=1)
423
- logprob_t = torch.log_softmax(logprob_t / temp, dim=1)
424
- logprob_t, unaug_logprob_t = add_diversity(seq_table, logprob_t, t, divm, diversity_lambda, bdash)
425
- logprob_t = logprob_table[divm].unsqueeze(-1) + logprob_t
426
- if local_time == 0: # for the first step, all k seq will have the same probs
427
- topk_logprob, topk_words = logprob_t[0].topk(
428
- bdash, 0, True, True)
429
- else: # unroll and find top logprob, and their unrolled indices
430
- topk_logprob, topk_words = logprob_t.view(-1).topk(
431
- bdash, 0, True, True)
432
- topk_words = topk_words.cpu()
433
- logprob_table[divm] = topk_logprob
434
- output_i["prev_words_beam"][divm] = topk_words // self.vocab_size # [bdash,]
435
- output_i["next_word"][divm] = topk_words % self.vocab_size # [bdash,]
436
- if local_time > 0:
437
- seq_table[divm] = seq_table[divm][output_i["prev_words_beam"][divm]]
438
- seq_table[divm] = torch.cat([
439
- seq_table[divm],
440
- output_i["next_word"][divm].unsqueeze(-1)], -1)
441
-
442
- is_end = seq_table[divm][:, t-divm] == self.end_idx
443
- assert seq_table[divm].shape[-1] == t - divm + 1
444
- if t == max_length + divm - 1:
445
- is_end.fill_(1)
446
- for beam_idx in range(bdash):
447
- if is_end[beam_idx]:
448
- final_beam = {
449
- "seq": seq_table[divm][beam_idx].clone(),
450
- "score": logprob_table[divm][beam_idx].item()
451
- }
452
- final_beam["score"] = final_beam["score"] / (t - divm + 1)
453
- done_beams_table[divm].append(final_beam)
454
- logprob_table[divm][is_end] -= 1000
455
- self.dbs_process_step(output_i, output_t)
456
- done_beams_table = [sorted(done_beams_table[divm], key=lambda x: -x["score"])[:bdash] for divm in range(group_size)]
457
- if group_nbest:
458
- done_beams = sum(done_beams_table, [])
459
- else:
460
- done_beams = [group_beam[0] for group_beam in done_beams_table]
461
- for _, done_beam in enumerate(done_beams):
462
- output["seq"][i, _, :len(done_beam["seq"])] = done_beam["seq"]
463
-
464
- return output
465
-
466
- def prepare_dbs_decoder_input(self, input_dict, output_i):
467
- raise NotImplementedError
468
-
469
- def dbs_process_step(self, output_i, output_t):
470
- pass
471
-
472
-
473
- class CaptionSequenceModel(nn.Module):
474
-
475
- def __init__(self, model, seq_output_size):
476
- super().__init__()
477
- self.model = model
478
- if model.decoder.d_model != seq_output_size:
479
- self.output_transform = nn.Linear(model.decoder.d_model, seq_output_size)
480
- else:
481
- self.output_transform = lambda x: x
482
-
483
- def forward(self, input_dict):
484
- output = self.model(input_dict)
485
-
486
- if input_dict["mode"] == "train":
487
- lens = input_dict["cap_len"] - 1
488
- # seq_outputs: [N, d_model]
489
- elif input_dict["mode"] == "inference":
490
- if "sample_method" in input_dict and input_dict["sample_method"] == "beam":
491
- return output
492
- seq = output["seq"]
493
- lens = torch.where(seq == self.model.end_idx, torch.zeros_like(seq), torch.ones_like(seq)).sum(dim=1)
494
- else:
495
- raise Exception("mode should be either 'train' or 'inference'")
496
- seq_output = mean_with_lens(output["embed"], lens)
497
- seq_output = self.output_transform(seq_output)
498
- output["seq_output"] = seq_output
499
- return output
500
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/models/diffusion/plms.py DELETED
@@ -1,236 +0,0 @@
1
- """SAMPLING ONLY."""
2
-
3
- import torch
4
- import numpy as np
5
- from tqdm import tqdm
6
- from functools import partial
7
-
8
- from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
9
-
10
-
11
- class PLMSSampler(object):
12
- def __init__(self, model, schedule="linear", **kwargs):
13
- super().__init__()
14
- self.model = model
15
- self.ddpm_num_timesteps = model.num_timesteps
16
- self.schedule = schedule
17
-
18
- def register_buffer(self, name, attr):
19
- if type(attr) == torch.Tensor:
20
- if attr.device != torch.device("cuda"):
21
- attr = attr.to(torch.device("cuda"))
22
- setattr(self, name, attr)
23
-
24
- def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
25
- if ddim_eta != 0:
26
- raise ValueError('ddim_eta must be 0 for PLMS')
27
- self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
28
- num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
29
- alphas_cumprod = self.model.alphas_cumprod
30
- assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
31
- to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
32
-
33
- self.register_buffer('betas', to_torch(self.model.betas))
34
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
35
- self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
36
-
37
- # calculations for diffusion q(x_t | x_{t-1}) and others
38
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
39
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
40
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
41
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
42
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
43
-
44
- # ddim sampling parameters
45
- ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
46
- ddim_timesteps=self.ddim_timesteps,
47
- eta=ddim_eta,verbose=verbose)
48
- self.register_buffer('ddim_sigmas', ddim_sigmas)
49
- self.register_buffer('ddim_alphas', ddim_alphas)
50
- self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
51
- self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
52
- sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
53
- (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
54
- 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
55
- self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
56
-
57
- @torch.no_grad()
58
- def sample(self,
59
- S,
60
- batch_size,
61
- shape,
62
- conditioning=None,
63
- callback=None,
64
- normals_sequence=None,
65
- img_callback=None,
66
- quantize_x0=False,
67
- eta=0.,
68
- mask=None,
69
- x0=None,
70
- temperature=1.,
71
- noise_dropout=0.,
72
- score_corrector=None,
73
- corrector_kwargs=None,
74
- verbose=True,
75
- x_T=None,
76
- log_every_t=100,
77
- unconditional_guidance_scale=1.,
78
- unconditional_conditioning=None,
79
- # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
80
- **kwargs
81
- ):
82
- if conditioning is not None:
83
- if isinstance(conditioning, dict):
84
- cbs = conditioning[list(conditioning.keys())[0]].shape[0]
85
- if cbs != batch_size:
86
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
87
- else:
88
- if conditioning.shape[0] != batch_size:
89
- print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
90
-
91
- self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
92
- # sampling
93
- C, H, W = shape
94
- size = (batch_size, C, H, W)
95
- print(f'Data shape for PLMS sampling is {size}')
96
-
97
- samples, intermediates = self.plms_sampling(conditioning, size,
98
- callback=callback,
99
- img_callback=img_callback,
100
- quantize_denoised=quantize_x0,
101
- mask=mask, x0=x0,
102
- ddim_use_original_steps=False,
103
- noise_dropout=noise_dropout,
104
- temperature=temperature,
105
- score_corrector=score_corrector,
106
- corrector_kwargs=corrector_kwargs,
107
- x_T=x_T,
108
- log_every_t=log_every_t,
109
- unconditional_guidance_scale=unconditional_guidance_scale,
110
- unconditional_conditioning=unconditional_conditioning,
111
- )
112
- return samples, intermediates
113
-
114
- @torch.no_grad()
115
- def plms_sampling(self, cond, shape,
116
- x_T=None, ddim_use_original_steps=False,
117
- callback=None, timesteps=None, quantize_denoised=False,
118
- mask=None, x0=None, img_callback=None, log_every_t=100,
119
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
120
- unconditional_guidance_scale=1., unconditional_conditioning=None,):
121
- device = self.model.betas.device
122
- b = shape[0]
123
- if x_T is None:
124
- img = torch.randn(shape, device=device)
125
- else:
126
- img = x_T
127
-
128
- if timesteps is None:
129
- timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
130
- elif timesteps is not None and not ddim_use_original_steps:
131
- subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
132
- timesteps = self.ddim_timesteps[:subset_end]
133
-
134
- intermediates = {'x_inter': [img], 'pred_x0': [img]}
135
- time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)
136
- total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
137
- print(f"Running PLMS Sampling with {total_steps} timesteps")
138
-
139
- iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
140
- old_eps = []
141
-
142
- for i, step in enumerate(iterator):
143
- index = total_steps - i - 1
144
- ts = torch.full((b,), step, device=device, dtype=torch.long)
145
- ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
146
-
147
- if mask is not None:
148
- assert x0 is not None
149
- img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
150
- img = img_orig * mask + (1. - mask) * img
151
-
152
- outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
153
- quantize_denoised=quantize_denoised, temperature=temperature,
154
- noise_dropout=noise_dropout, score_corrector=score_corrector,
155
- corrector_kwargs=corrector_kwargs,
156
- unconditional_guidance_scale=unconditional_guidance_scale,
157
- unconditional_conditioning=unconditional_conditioning,
158
- old_eps=old_eps, t_next=ts_next)
159
- img, pred_x0, e_t = outs
160
- old_eps.append(e_t)
161
- if len(old_eps) >= 4:
162
- old_eps.pop(0)
163
- if callback: callback(i)
164
- if img_callback: img_callback(pred_x0, i)
165
-
166
- if index % log_every_t == 0 or index == total_steps - 1:
167
- intermediates['x_inter'].append(img)
168
- intermediates['pred_x0'].append(pred_x0)
169
-
170
- return img, intermediates
171
-
172
- @torch.no_grad()
173
- def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
174
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
175
- unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None):
176
- b, *_, device = *x.shape, x.device
177
-
178
- def get_model_output(x, t):
179
- if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
180
- e_t = self.model.apply_model(x, t, c)
181
- else:
182
- x_in = torch.cat([x] * 2)
183
- t_in = torch.cat([t] * 2)
184
- c_in = torch.cat([unconditional_conditioning, c])
185
- e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
186
- e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
187
-
188
- if score_corrector is not None:
189
- assert self.model.parameterization == "eps"
190
- e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
191
-
192
- return e_t
193
-
194
- alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
195
- alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
196
- sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
197
- sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
198
-
199
- def get_x_prev_and_pred_x0(e_t, index):
200
- # select parameters corresponding to the currently considered timestep
201
- a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
202
- a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
203
- sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
204
- sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
205
-
206
- # current prediction for x_0
207
- pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
208
- if quantize_denoised:
209
- pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
210
- # direction pointing to x_t
211
- dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
212
- noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
213
- if noise_dropout > 0.:
214
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
215
- x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
216
- return x_prev, pred_x0
217
-
218
- e_t = get_model_output(x, t)
219
- if len(old_eps) == 0:
220
- # Pseudo Improved Euler (2nd order)
221
- x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
222
- e_t_next = get_model_output(x_prev, t_next)
223
- e_t_prime = (e_t + e_t_next) / 2
224
- elif len(old_eps) == 1:
225
- # 2nd order Pseudo Linear Multistep (Adams-Bashforth)
226
- e_t_prime = (3 * e_t - old_eps[-1]) / 2
227
- elif len(old_eps) == 2:
228
- # 3nd order Pseudo Linear Multistep (Adams-Bashforth)
229
- e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
230
- elif len(old_eps) >= 3:
231
- # 4nd order Pseudo Linear Multistep (Adams-Bashforth)
232
- e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
233
-
234
- x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
235
-
236
- return x_prev, pred_x0, e_t
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/image_degradation/bsrgan.py DELETED
@@ -1,730 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- # --------------------------------------------
4
- # Super-Resolution
5
- # --------------------------------------------
6
- #
7
- # Kai Zhang ([email protected])
8
- # https://github.com/cszn
9
- # From 2019/03--2021/08
10
- # --------------------------------------------
11
- """
12
-
13
- import numpy as np
14
- import cv2
15
- import torch
16
-
17
- from functools import partial
18
- import random
19
- from scipy import ndimage
20
- import scipy
21
- import scipy.stats as ss
22
- from scipy.interpolate import interp2d
23
- from scipy.linalg import orth
24
- import albumentations
25
-
26
- import ldm.modules.image_degradation.utils_image as util
27
-
28
-
29
- def modcrop_np(img, sf):
30
- '''
31
- Args:
32
- img: numpy image, WxH or WxHxC
33
- sf: scale factor
34
- Return:
35
- cropped image
36
- '''
37
- w, h = img.shape[:2]
38
- im = np.copy(img)
39
- return im[:w - w % sf, :h - h % sf, ...]
40
-
41
-
42
- """
43
- # --------------------------------------------
44
- # anisotropic Gaussian kernels
45
- # --------------------------------------------
46
- """
47
-
48
-
49
- def analytic_kernel(k):
50
- """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
51
- k_size = k.shape[0]
52
- # Calculate the big kernels size
53
- big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
54
- # Loop over the small kernel to fill the big one
55
- for r in range(k_size):
56
- for c in range(k_size):
57
- big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
58
- # Crop the edges of the big kernel to ignore very small values and increase run time of SR
59
- crop = k_size // 2
60
- cropped_big_k = big_k[crop:-crop, crop:-crop]
61
- # Normalize to 1
62
- return cropped_big_k / cropped_big_k.sum()
63
-
64
-
65
- def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
66
- """ generate an anisotropic Gaussian kernel
67
- Args:
68
- ksize : e.g., 15, kernel size
69
- theta : [0, pi], rotation angle range
70
- l1 : [0.1,50], scaling of eigenvalues
71
- l2 : [0.1,l1], scaling of eigenvalues
72
- If l1 = l2, will get an isotropic Gaussian kernel.
73
- Returns:
74
- k : kernel
75
- """
76
-
77
- v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
78
- V = np.array([[v[0], v[1]], [v[1], -v[0]]])
79
- D = np.array([[l1, 0], [0, l2]])
80
- Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
81
- k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
82
-
83
- return k
84
-
85
-
86
- def gm_blur_kernel(mean, cov, size=15):
87
- center = size / 2.0 + 0.5
88
- k = np.zeros([size, size])
89
- for y in range(size):
90
- for x in range(size):
91
- cy = y - center + 1
92
- cx = x - center + 1
93
- k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
94
-
95
- k = k / np.sum(k)
96
- return k
97
-
98
-
99
- def shift_pixel(x, sf, upper_left=True):
100
- """shift pixel for super-resolution with different scale factors
101
- Args:
102
- x: WxHxC or WxH
103
- sf: scale factor
104
- upper_left: shift direction
105
- """
106
- h, w = x.shape[:2]
107
- shift = (sf - 1) * 0.5
108
- xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
109
- if upper_left:
110
- x1 = xv + shift
111
- y1 = yv + shift
112
- else:
113
- x1 = xv - shift
114
- y1 = yv - shift
115
-
116
- x1 = np.clip(x1, 0, w - 1)
117
- y1 = np.clip(y1, 0, h - 1)
118
-
119
- if x.ndim == 2:
120
- x = interp2d(xv, yv, x)(x1, y1)
121
- if x.ndim == 3:
122
- for i in range(x.shape[-1]):
123
- x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
124
-
125
- return x
126
-
127
-
128
- def blur(x, k):
129
- '''
130
- x: image, NxcxHxW
131
- k: kernel, Nx1xhxw
132
- '''
133
- n, c = x.shape[:2]
134
- p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
135
- x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
136
- k = k.repeat(1, c, 1, 1)
137
- k = k.view(-1, 1, k.shape[2], k.shape[3])
138
- x = x.view(1, -1, x.shape[2], x.shape[3])
139
- x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
140
- x = x.view(n, c, x.shape[2], x.shape[3])
141
-
142
- return x
143
-
144
-
145
- def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
146
- """"
147
- # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
148
- # Kai Zhang
149
- # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
150
- # max_var = 2.5 * sf
151
- """
152
- # Set random eigen-vals (lambdas) and angle (theta) for COV matrix
153
- lambda_1 = min_var + np.random.rand() * (max_var - min_var)
154
- lambda_2 = min_var + np.random.rand() * (max_var - min_var)
155
- theta = np.random.rand() * np.pi # random theta
156
- noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
157
-
158
- # Set COV matrix using Lambdas and Theta
159
- LAMBDA = np.diag([lambda_1, lambda_2])
160
- Q = np.array([[np.cos(theta), -np.sin(theta)],
161
- [np.sin(theta), np.cos(theta)]])
162
- SIGMA = Q @ LAMBDA @ Q.T
163
- INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
164
-
165
- # Set expectation position (shifting kernel for aligned image)
166
- MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
167
- MU = MU[None, None, :, None]
168
-
169
- # Create meshgrid for Gaussian
170
- [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
171
- Z = np.stack([X, Y], 2)[:, :, :, None]
172
-
173
- # Calcualte Gaussian for every pixel of the kernel
174
- ZZ = Z - MU
175
- ZZ_t = ZZ.transpose(0, 1, 3, 2)
176
- raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
177
-
178
- # shift the kernel so it will be centered
179
- # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
180
-
181
- # Normalize the kernel and return
182
- # kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
183
- kernel = raw_kernel / np.sum(raw_kernel)
184
- return kernel
185
-
186
-
187
- def fspecial_gaussian(hsize, sigma):
188
- hsize = [hsize, hsize]
189
- siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
190
- std = sigma
191
- [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
192
- arg = -(x * x + y * y) / (2 * std * std)
193
- h = np.exp(arg)
194
- h[h < scipy.finfo(float).eps * h.max()] = 0
195
- sumh = h.sum()
196
- if sumh != 0:
197
- h = h / sumh
198
- return h
199
-
200
-
201
- def fspecial_laplacian(alpha):
202
- alpha = max([0, min([alpha, 1])])
203
- h1 = alpha / (alpha + 1)
204
- h2 = (1 - alpha) / (alpha + 1)
205
- h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
206
- h = np.array(h)
207
- return h
208
-
209
-
210
- def fspecial(filter_type, *args, **kwargs):
211
- '''
212
- python code from:
213
- https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
214
- '''
215
- if filter_type == 'gaussian':
216
- return fspecial_gaussian(*args, **kwargs)
217
- if filter_type == 'laplacian':
218
- return fspecial_laplacian(*args, **kwargs)
219
-
220
-
221
- """
222
- # --------------------------------------------
223
- # degradation models
224
- # --------------------------------------------
225
- """
226
-
227
-
228
- def bicubic_degradation(x, sf=3):
229
- '''
230
- Args:
231
- x: HxWxC image, [0, 1]
232
- sf: down-scale factor
233
- Return:
234
- bicubicly downsampled LR image
235
- '''
236
- x = util.imresize_np(x, scale=1 / sf)
237
- return x
238
-
239
-
240
- def srmd_degradation(x, k, sf=3):
241
- ''' blur + bicubic downsampling
242
- Args:
243
- x: HxWxC image, [0, 1]
244
- k: hxw, double
245
- sf: down-scale factor
246
- Return:
247
- downsampled LR image
248
- Reference:
249
- @inproceedings{zhang2018learning,
250
- title={Learning a single convolutional super-resolution network for multiple degradations},
251
- author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
252
- booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
253
- pages={3262--3271},
254
- year={2018}
255
- }
256
- '''
257
- x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
258
- x = bicubic_degradation(x, sf=sf)
259
- return x
260
-
261
-
262
- def dpsr_degradation(x, k, sf=3):
263
- ''' bicubic downsampling + blur
264
- Args:
265
- x: HxWxC image, [0, 1]
266
- k: hxw, double
267
- sf: down-scale factor
268
- Return:
269
- downsampled LR image
270
- Reference:
271
- @inproceedings{zhang2019deep,
272
- title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
273
- author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
274
- booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
275
- pages={1671--1681},
276
- year={2019}
277
- }
278
- '''
279
- x = bicubic_degradation(x, sf=sf)
280
- x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
281
- return x
282
-
283
-
284
- def classical_degradation(x, k, sf=3):
285
- ''' blur + downsampling
286
- Args:
287
- x: HxWxC image, [0, 1]/[0, 255]
288
- k: hxw, double
289
- sf: down-scale factor
290
- Return:
291
- downsampled LR image
292
- '''
293
- x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
294
- # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
295
- st = 0
296
- return x[st::sf, st::sf, ...]
297
-
298
-
299
- def add_sharpening(img, weight=0.5, radius=50, threshold=10):
300
- """USM sharpening. borrowed from real-ESRGAN
301
- Input image: I; Blurry image: B.
302
- 1. K = I + weight * (I - B)
303
- 2. Mask = 1 if abs(I - B) > threshold, else: 0
304
- 3. Blur mask:
305
- 4. Out = Mask * K + (1 - Mask) * I
306
- Args:
307
- img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
308
- weight (float): Sharp weight. Default: 1.
309
- radius (float): Kernel size of Gaussian blur. Default: 50.
310
- threshold (int):
311
- """
312
- if radius % 2 == 0:
313
- radius += 1
314
- blur = cv2.GaussianBlur(img, (radius, radius), 0)
315
- residual = img - blur
316
- mask = np.abs(residual) * 255 > threshold
317
- mask = mask.astype('float32')
318
- soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
319
-
320
- K = img + weight * residual
321
- K = np.clip(K, 0, 1)
322
- return soft_mask * K + (1 - soft_mask) * img
323
-
324
-
325
- def add_blur(img, sf=4):
326
- wd2 = 4.0 + sf
327
- wd = 2.0 + 0.2 * sf
328
- if random.random() < 0.5:
329
- l1 = wd2 * random.random()
330
- l2 = wd2 * random.random()
331
- k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
332
- else:
333
- k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random())
334
- img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
335
-
336
- return img
337
-
338
-
339
- def add_resize(img, sf=4):
340
- rnum = np.random.rand()
341
- if rnum > 0.8: # up
342
- sf1 = random.uniform(1, 2)
343
- elif rnum < 0.7: # down
344
- sf1 = random.uniform(0.5 / sf, 1)
345
- else:
346
- sf1 = 1.0
347
- img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
348
- img = np.clip(img, 0.0, 1.0)
349
-
350
- return img
351
-
352
-
353
- # def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
354
- # noise_level = random.randint(noise_level1, noise_level2)
355
- # rnum = np.random.rand()
356
- # if rnum > 0.6: # add color Gaussian noise
357
- # img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
358
- # elif rnum < 0.4: # add grayscale Gaussian noise
359
- # img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
360
- # else: # add noise
361
- # L = noise_level2 / 255.
362
- # D = np.diag(np.random.rand(3))
363
- # U = orth(np.random.rand(3, 3))
364
- # conv = np.dot(np.dot(np.transpose(U), D), U)
365
- # img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
366
- # img = np.clip(img, 0.0, 1.0)
367
- # return img
368
-
369
- def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
370
- noise_level = random.randint(noise_level1, noise_level2)
371
- rnum = np.random.rand()
372
- if rnum > 0.6: # add color Gaussian noise
373
- img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
374
- elif rnum < 0.4: # add grayscale Gaussian noise
375
- img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
376
- else: # add noise
377
- L = noise_level2 / 255.
378
- D = np.diag(np.random.rand(3))
379
- U = orth(np.random.rand(3, 3))
380
- conv = np.dot(np.dot(np.transpose(U), D), U)
381
- img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
382
- img = np.clip(img, 0.0, 1.0)
383
- return img
384
-
385
-
386
- def add_speckle_noise(img, noise_level1=2, noise_level2=25):
387
- noise_level = random.randint(noise_level1, noise_level2)
388
- img = np.clip(img, 0.0, 1.0)
389
- rnum = random.random()
390
- if rnum > 0.6:
391
- img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
392
- elif rnum < 0.4:
393
- img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
394
- else:
395
- L = noise_level2 / 255.
396
- D = np.diag(np.random.rand(3))
397
- U = orth(np.random.rand(3, 3))
398
- conv = np.dot(np.dot(np.transpose(U), D), U)
399
- img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
400
- img = np.clip(img, 0.0, 1.0)
401
- return img
402
-
403
-
404
- def add_Poisson_noise(img):
405
- img = np.clip((img * 255.0).round(), 0, 255) / 255.
406
- vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
407
- if random.random() < 0.5:
408
- img = np.random.poisson(img * vals).astype(np.float32) / vals
409
- else:
410
- img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
411
- img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
412
- noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
413
- img += noise_gray[:, :, np.newaxis]
414
- img = np.clip(img, 0.0, 1.0)
415
- return img
416
-
417
-
418
- def add_JPEG_noise(img):
419
- quality_factor = random.randint(30, 95)
420
- img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
421
- result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
422
- img = cv2.imdecode(encimg, 1)
423
- img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
424
- return img
425
-
426
-
427
- def random_crop(lq, hq, sf=4, lq_patchsize=64):
428
- h, w = lq.shape[:2]
429
- rnd_h = random.randint(0, h - lq_patchsize)
430
- rnd_w = random.randint(0, w - lq_patchsize)
431
- lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
432
-
433
- rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
434
- hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
435
- return lq, hq
436
-
437
-
438
- def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
439
- """
440
- This is the degradation model of BSRGAN from the paper
441
- "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
442
- ----------
443
- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
444
- sf: scale factor
445
- isp_model: camera ISP model
446
- Returns
447
- -------
448
- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
449
- hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
450
- """
451
- isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
452
- sf_ori = sf
453
-
454
- h1, w1 = img.shape[:2]
455
- img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
456
- h, w = img.shape[:2]
457
-
458
- if h < lq_patchsize * sf or w < lq_patchsize * sf:
459
- raise ValueError(f'img size ({h1}X{w1}) is too small!')
460
-
461
- hq = img.copy()
462
-
463
- if sf == 4 and random.random() < scale2_prob: # downsample1
464
- if np.random.rand() < 0.5:
465
- img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
466
- interpolation=random.choice([1, 2, 3]))
467
- else:
468
- img = util.imresize_np(img, 1 / 2, True)
469
- img = np.clip(img, 0.0, 1.0)
470
- sf = 2
471
-
472
- shuffle_order = random.sample(range(7), 7)
473
- idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
474
- if idx1 > idx2: # keep downsample3 last
475
- shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
476
-
477
- for i in shuffle_order:
478
-
479
- if i == 0:
480
- img = add_blur(img, sf=sf)
481
-
482
- elif i == 1:
483
- img = add_blur(img, sf=sf)
484
-
485
- elif i == 2:
486
- a, b = img.shape[1], img.shape[0]
487
- # downsample2
488
- if random.random() < 0.75:
489
- sf1 = random.uniform(1, 2 * sf)
490
- img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
491
- interpolation=random.choice([1, 2, 3]))
492
- else:
493
- k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
494
- k_shifted = shift_pixel(k, sf)
495
- k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
496
- img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
497
- img = img[0::sf, 0::sf, ...] # nearest downsampling
498
- img = np.clip(img, 0.0, 1.0)
499
-
500
- elif i == 3:
501
- # downsample3
502
- img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
503
- img = np.clip(img, 0.0, 1.0)
504
-
505
- elif i == 4:
506
- # add Gaussian noise
507
- img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
508
-
509
- elif i == 5:
510
- # add JPEG noise
511
- if random.random() < jpeg_prob:
512
- img = add_JPEG_noise(img)
513
-
514
- elif i == 6:
515
- # add processed camera sensor noise
516
- if random.random() < isp_prob and isp_model is not None:
517
- with torch.no_grad():
518
- img, hq = isp_model.forward(img.copy(), hq)
519
-
520
- # add final JPEG compression noise
521
- img = add_JPEG_noise(img)
522
-
523
- # random crop
524
- img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
525
-
526
- return img, hq
527
-
528
-
529
- # todo no isp_model?
530
- def degradation_bsrgan_variant(image, sf=4, isp_model=None):
531
- """
532
- This is the degradation model of BSRGAN from the paper
533
- "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
534
- ----------
535
- sf: scale factor
536
- isp_model: camera ISP model
537
- Returns
538
- -------
539
- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
540
- hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
541
- """
542
- image = util.uint2single(image)
543
- isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
544
- sf_ori = sf
545
-
546
- h1, w1 = image.shape[:2]
547
- image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
548
- h, w = image.shape[:2]
549
-
550
- hq = image.copy()
551
-
552
- if sf == 4 and random.random() < scale2_prob: # downsample1
553
- if np.random.rand() < 0.5:
554
- image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
555
- interpolation=random.choice([1, 2, 3]))
556
- else:
557
- image = util.imresize_np(image, 1 / 2, True)
558
- image = np.clip(image, 0.0, 1.0)
559
- sf = 2
560
-
561
- shuffle_order = random.sample(range(7), 7)
562
- idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
563
- if idx1 > idx2: # keep downsample3 last
564
- shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
565
-
566
- for i in shuffle_order:
567
-
568
- if i == 0:
569
- image = add_blur(image, sf=sf)
570
-
571
- elif i == 1:
572
- image = add_blur(image, sf=sf)
573
-
574
- elif i == 2:
575
- a, b = image.shape[1], image.shape[0]
576
- # downsample2
577
- if random.random() < 0.75:
578
- sf1 = random.uniform(1, 2 * sf)
579
- image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
580
- interpolation=random.choice([1, 2, 3]))
581
- else:
582
- k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
583
- k_shifted = shift_pixel(k, sf)
584
- k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
585
- image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
586
- image = image[0::sf, 0::sf, ...] # nearest downsampling
587
- image = np.clip(image, 0.0, 1.0)
588
-
589
- elif i == 3:
590
- # downsample3
591
- image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
592
- image = np.clip(image, 0.0, 1.0)
593
-
594
- elif i == 4:
595
- # add Gaussian noise
596
- image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25)
597
-
598
- elif i == 5:
599
- # add JPEG noise
600
- if random.random() < jpeg_prob:
601
- image = add_JPEG_noise(image)
602
-
603
- # elif i == 6:
604
- # # add processed camera sensor noise
605
- # if random.random() < isp_prob and isp_model is not None:
606
- # with torch.no_grad():
607
- # img, hq = isp_model.forward(img.copy(), hq)
608
-
609
- # add final JPEG compression noise
610
- image = add_JPEG_noise(image)
611
- image = util.single2uint(image)
612
- example = {"image":image}
613
- return example
614
-
615
-
616
- # TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc...
617
- def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None):
618
- """
619
- This is an extended degradation model by combining
620
- the degradation models of BSRGAN and Real-ESRGAN
621
- ----------
622
- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
623
- sf: scale factor
624
- use_shuffle: the degradation shuffle
625
- use_sharp: sharpening the img
626
- Returns
627
- -------
628
- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
629
- hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
630
- """
631
-
632
- h1, w1 = img.shape[:2]
633
- img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
634
- h, w = img.shape[:2]
635
-
636
- if h < lq_patchsize * sf or w < lq_patchsize * sf:
637
- raise ValueError(f'img size ({h1}X{w1}) is too small!')
638
-
639
- if use_sharp:
640
- img = add_sharpening(img)
641
- hq = img.copy()
642
-
643
- if random.random() < shuffle_prob:
644
- shuffle_order = random.sample(range(13), 13)
645
- else:
646
- shuffle_order = list(range(13))
647
- # local shuffle for noise, JPEG is always the last one
648
- shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6)))
649
- shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13)))
650
-
651
- poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1
652
-
653
- for i in shuffle_order:
654
- if i == 0:
655
- img = add_blur(img, sf=sf)
656
- elif i == 1:
657
- img = add_resize(img, sf=sf)
658
- elif i == 2:
659
- img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
660
- elif i == 3:
661
- if random.random() < poisson_prob:
662
- img = add_Poisson_noise(img)
663
- elif i == 4:
664
- if random.random() < speckle_prob:
665
- img = add_speckle_noise(img)
666
- elif i == 5:
667
- if random.random() < isp_prob and isp_model is not None:
668
- with torch.no_grad():
669
- img, hq = isp_model.forward(img.copy(), hq)
670
- elif i == 6:
671
- img = add_JPEG_noise(img)
672
- elif i == 7:
673
- img = add_blur(img, sf=sf)
674
- elif i == 8:
675
- img = add_resize(img, sf=sf)
676
- elif i == 9:
677
- img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
678
- elif i == 10:
679
- if random.random() < poisson_prob:
680
- img = add_Poisson_noise(img)
681
- elif i == 11:
682
- if random.random() < speckle_prob:
683
- img = add_speckle_noise(img)
684
- elif i == 12:
685
- if random.random() < isp_prob and isp_model is not None:
686
- with torch.no_grad():
687
- img, hq = isp_model.forward(img.copy(), hq)
688
- else:
689
- print('check the shuffle!')
690
-
691
- # resize to desired size
692
- img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])),
693
- interpolation=random.choice([1, 2, 3]))
694
-
695
- # add final JPEG compression noise
696
- img = add_JPEG_noise(img)
697
-
698
- # random crop
699
- img, hq = random_crop(img, hq, sf, lq_patchsize)
700
-
701
- return img, hq
702
-
703
-
704
- if __name__ == '__main__':
705
- print("hey")
706
- img = util.imread_uint('utils/test.png', 3)
707
- print(img)
708
- img = util.uint2single(img)
709
- print(img)
710
- img = img[:448, :448]
711
- h = img.shape[0] // 4
712
- print("resizing to", h)
713
- sf = 4
714
- deg_fn = partial(degradation_bsrgan_variant, sf=sf)
715
- for i in range(20):
716
- print(i)
717
- img_lq = deg_fn(img)
718
- print(img_lq)
719
- img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"]
720
- print(img_lq.shape)
721
- print("bicubic", img_lq_bicubic.shape)
722
- print(img_hq.shape)
723
- lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
724
- interpolation=0)
725
- lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
726
- interpolation=0)
727
- img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
728
- util.imsave(img_concat, str(i) + '.png')
729
-
730
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AP123/dreamgaussian/mesh.py DELETED
@@ -1,394 +0,0 @@
1
- import os
2
- import cv2
3
- import numpy as np
4
- import trimesh
5
-
6
- import torch
7
- import torch.nn.functional as F
8
-
9
- def dot(x, y):
10
- return torch.sum(x * y, -1, keepdim=True)
11
-
12
-
13
- def length(x, eps=1e-20):
14
- return torch.sqrt(torch.clamp(dot(x, x), min=eps))
15
-
16
-
17
- def safe_normalize(x, eps=1e-20):
18
- return x / length(x, eps)
19
-
20
-
21
- class Mesh:
22
- def __init__(
23
- self,
24
- v=None,
25
- f=None,
26
- vn=None,
27
- fn=None,
28
- vt=None,
29
- ft=None,
30
- albedo=None,
31
- device=None,
32
- ):
33
- self.device = device
34
- self.v = v
35
- self.vn = vn
36
- self.vt = vt
37
- self.f = f
38
- self.fn = fn
39
- self.ft = ft
40
- # only support a single albedo
41
- self.albedo = albedo
42
-
43
- self.ori_center = 0
44
- self.ori_scale = 1
45
-
46
- @classmethod
47
- def load(cls, path=None, resize=True, **kwargs):
48
- # assume init with kwargs
49
- if path is None:
50
- mesh = cls(**kwargs)
51
- # obj supports face uv
52
- elif path.endswith(".obj"):
53
- mesh = cls.load_obj(path, **kwargs)
54
- # trimesh only supports vertex uv, but can load more formats
55
- else:
56
- mesh = cls.load_trimesh(path, **kwargs)
57
-
58
- print(f"[Mesh loading] v: {mesh.v.shape}, f: {mesh.f.shape}")
59
- # auto-normalize
60
- if resize:
61
- mesh.auto_size()
62
- # auto-fix normal
63
- if mesh.vn is None:
64
- mesh.auto_normal()
65
- print(f"[Mesh loading] vn: {mesh.vn.shape}, fn: {mesh.fn.shape}")
66
- # auto-fix texture
67
- if mesh.vt is None:
68
- mesh.auto_uv(cache_path=path)
69
- print(f"[Mesh loading] vt: {mesh.vt.shape}, ft: {mesh.ft.shape}")
70
-
71
- return mesh
72
-
73
- # load from obj file
74
- @classmethod
75
- def load_obj(cls, path, albedo_path=None, device=None, init_empty_tex=False):
76
- assert os.path.splitext(path)[-1] == ".obj"
77
-
78
- mesh = cls()
79
-
80
- # device
81
- if device is None:
82
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
83
-
84
- mesh.device = device
85
-
86
- # try to find texture from mtl file
87
- if albedo_path is None:
88
- mtl_path = path.replace(".obj", ".mtl")
89
- if os.path.exists(mtl_path):
90
- with open(mtl_path, "r") as f:
91
- lines = f.readlines()
92
- for line in lines:
93
- split_line = line.split()
94
- # empty line
95
- if len(split_line) == 0:
96
- continue
97
- prefix = split_line[0]
98
- # NOTE: simply use the first map_Kd as albedo!
99
- if "map_Kd" in prefix:
100
- albedo_path = os.path.join(os.path.dirname(path), split_line[1])
101
- print(f"[load_obj] use texture from: {albedo_path}")
102
- break
103
-
104
- if init_empty_tex or albedo_path is None or not os.path.exists(albedo_path):
105
- # init an empty texture
106
- print(f"[load_obj] init empty albedo!")
107
- # albedo = np.random.rand(1024, 1024, 3).astype(np.float32)
108
- albedo = np.ones((1024, 1024, 3), dtype=np.float32) * np.array(
109
- [0.5, 0.5, 0.5]
110
- ) # default color
111
- else:
112
- albedo = cv2.imread(albedo_path, cv2.IMREAD_UNCHANGED)
113
- albedo = cv2.cvtColor(albedo, cv2.COLOR_BGR2RGB)
114
- albedo = albedo.astype(np.float32) / 255
115
- print(f"[load_obj] load texture: {albedo.shape}")
116
-
117
- # import matplotlib.pyplot as plt
118
- # plt.imshow(albedo)
119
- # plt.show()
120
-
121
- mesh.albedo = torch.tensor(albedo, dtype=torch.float32, device=device)
122
-
123
- # load obj
124
- with open(path, "r") as f:
125
- lines = f.readlines()
126
-
127
- def parse_f_v(fv):
128
- # pass in a vertex term of a face, return {v, vt, vn} (-1 if not provided)
129
- # supported forms:
130
- # f v1 v2 v3
131
- # f v1/vt1 v2/vt2 v3/vt3
132
- # f v1/vt1/vn1 v2/vt2/vn2 v3/vt3/vn3
133
- # f v1//vn1 v2//vn2 v3//vn3
134
- xs = [int(x) - 1 if x != "" else -1 for x in fv.split("/")]
135
- xs.extend([-1] * (3 - len(xs)))
136
- return xs[0], xs[1], xs[2]
137
-
138
- # NOTE: we ignore usemtl, and assume the mesh ONLY uses one material (first in mtl)
139
- vertices, texcoords, normals = [], [], []
140
- faces, tfaces, nfaces = [], [], []
141
- for line in lines:
142
- split_line = line.split()
143
- # empty line
144
- if len(split_line) == 0:
145
- continue
146
- # v/vn/vt
147
- prefix = split_line[0].lower()
148
- if prefix == "v":
149
- vertices.append([float(v) for v in split_line[1:]])
150
- elif prefix == "vn":
151
- normals.append([float(v) for v in split_line[1:]])
152
- elif prefix == "vt":
153
- val = [float(v) for v in split_line[1:]]
154
- texcoords.append([val[0], 1.0 - val[1]])
155
- elif prefix == "f":
156
- vs = split_line[1:]
157
- nv = len(vs)
158
- v0, t0, n0 = parse_f_v(vs[0])
159
- for i in range(nv - 2): # triangulate (assume vertices are ordered)
160
- v1, t1, n1 = parse_f_v(vs[i + 1])
161
- v2, t2, n2 = parse_f_v(vs[i + 2])
162
- faces.append([v0, v1, v2])
163
- tfaces.append([t0, t1, t2])
164
- nfaces.append([n0, n1, n2])
165
-
166
- mesh.v = torch.tensor(vertices, dtype=torch.float32, device=device)
167
- mesh.vt = (
168
- torch.tensor(texcoords, dtype=torch.float32, device=device)
169
- if len(texcoords) > 0
170
- else None
171
- )
172
- mesh.vn = (
173
- torch.tensor(normals, dtype=torch.float32, device=device)
174
- if len(normals) > 0
175
- else None
176
- )
177
-
178
- mesh.f = torch.tensor(faces, dtype=torch.int32, device=device)
179
- mesh.ft = (
180
- torch.tensor(tfaces, dtype=torch.int32, device=device)
181
- if texcoords is not None
182
- else None
183
- )
184
- mesh.fn = (
185
- torch.tensor(nfaces, dtype=torch.int32, device=device)
186
- if normals is not None
187
- else None
188
- )
189
-
190
- return mesh
191
-
192
- @classmethod
193
- def load_trimesh(cls, path, device=None):
194
- mesh = cls()
195
-
196
- # device
197
- if device is None:
198
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
199
-
200
- mesh.device = device
201
-
202
- # use trimesh to load glb, assume only has one single RootMesh...
203
- _data = trimesh.load(path)
204
- if isinstance(_data, trimesh.Scene):
205
- mesh_keys = list(_data.geometry.keys())
206
- assert (
207
- len(mesh_keys) == 1
208
- ), f"{path} contains more than one meshes, not supported!"
209
- _mesh = _data.geometry[mesh_keys[0]]
210
-
211
- elif isinstance(_data, trimesh.Trimesh):
212
- _mesh = _data
213
-
214
- else:
215
- raise NotImplementedError(f"type {type(_data)} not supported!")
216
-
217
- # TODO: exception handling if no material
218
- _material = _mesh.visual.material
219
- if isinstance(_material, trimesh.visual.material.PBRMaterial):
220
- texture = np.array(_material.baseColorTexture).astype(np.float32) / 255
221
- elif isinstance(_material, trimesh.visual.material.SimpleMaterial):
222
- texture = (
223
- np.array(_material.to_pbr().baseColorTexture).astype(np.float32) / 255
224
- )
225
- else:
226
- raise NotImplementedError(f"material type {type(_material)} not supported!")
227
-
228
- print(f"[load_obj] load texture: {texture.shape}")
229
- mesh.albedo = torch.tensor(texture, dtype=torch.float32, device=device)
230
-
231
- vertices = _mesh.vertices
232
- texcoords = _mesh.visual.uv
233
- texcoords[:, 1] = 1 - texcoords[:, 1]
234
- normals = _mesh.vertex_normals
235
-
236
- # trimesh only support vertex uv...
237
- faces = tfaces = nfaces = _mesh.faces
238
-
239
- mesh.v = torch.tensor(vertices, dtype=torch.float32, device=device)
240
- mesh.vt = (
241
- torch.tensor(texcoords, dtype=torch.float32, device=device)
242
- if len(texcoords) > 0
243
- else None
244
- )
245
- mesh.vn = (
246
- torch.tensor(normals, dtype=torch.float32, device=device)
247
- if len(normals) > 0
248
- else None
249
- )
250
-
251
- mesh.f = torch.tensor(faces, dtype=torch.int32, device=device)
252
- mesh.ft = (
253
- torch.tensor(tfaces, dtype=torch.int32, device=device)
254
- if texcoords is not None
255
- else None
256
- )
257
- mesh.fn = (
258
- torch.tensor(nfaces, dtype=torch.int32, device=device)
259
- if normals is not None
260
- else None
261
- )
262
-
263
- return mesh
264
-
265
- # aabb
266
- def aabb(self):
267
- return torch.min(self.v, dim=0).values, torch.max(self.v, dim=0).values
268
-
269
- # unit size
270
- @torch.no_grad()
271
- def auto_size(self):
272
- vmin, vmax = self.aabb()
273
- self.ori_center = (vmax + vmin) / 2
274
- self.ori_scale = 1.2 / torch.max(vmax - vmin).item() # to ~ [-0.6, 0.6]
275
- self.v = (self.v - self.ori_center) * self.ori_scale
276
-
277
- def auto_normal(self):
278
- i0, i1, i2 = self.f[:, 0].long(), self.f[:, 1].long(), self.f[:, 2].long()
279
- v0, v1, v2 = self.v[i0, :], self.v[i1, :], self.v[i2, :]
280
-
281
- face_normals = torch.cross(v1 - v0, v2 - v0)
282
-
283
- # Splat face normals to vertices
284
- vn = torch.zeros_like(self.v)
285
- vn.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)
286
- vn.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)
287
- vn.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)
288
-
289
- # Normalize, replace zero (degenerated) normals with some default value
290
- vn = torch.where(
291
- dot(vn, vn) > 1e-20,
292
- vn,
293
- torch.tensor([0.0, 0.0, 1.0], dtype=torch.float32, device=vn.device),
294
- )
295
- vn = safe_normalize(vn)
296
-
297
- self.vn = vn
298
- self.fn = self.f
299
-
300
- def auto_uv(self, cache_path=None):
301
- # try to load cache
302
- if cache_path is not None:
303
- cache_path = cache_path.replace(".obj", "_uv.npz")
304
-
305
- if cache_path is not None and os.path.exists(cache_path):
306
- data = np.load(cache_path)
307
- vt_np, ft_np = data["vt"], data["ft"]
308
- else:
309
- import xatlas
310
-
311
- v_np = self.v.detach().cpu().numpy()
312
- f_np = self.f.detach().int().cpu().numpy()
313
- atlas = xatlas.Atlas()
314
- atlas.add_mesh(v_np, f_np)
315
- chart_options = xatlas.ChartOptions()
316
- # chart_options.max_iterations = 4
317
- atlas.generate(chart_options=chart_options)
318
- vmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2]
319
-
320
- # save to cache
321
- if cache_path is not None:
322
- np.savez(cache_path, vt=vt_np, ft=ft_np)
323
-
324
- vt = torch.from_numpy(vt_np.astype(np.float32)).to(self.device)
325
- ft = torch.from_numpy(ft_np.astype(np.int32)).to(self.device)
326
-
327
- self.vt = vt
328
- self.ft = ft
329
-
330
- def to(self, device):
331
- self.device = device
332
- for name in ["v", "f", "vn", "fn", "vt", "ft", "albedo"]:
333
- tensor = getattr(self, name)
334
- if tensor is not None:
335
- setattr(self, name, tensor.to(device))
336
- return self
337
-
338
- # write to ply file (only geom)
339
- def write_ply(self, path):
340
- assert path.endswith(".ply")
341
-
342
- v_np = self.v.detach().cpu().numpy()
343
- f_np = self.f.detach().cpu().numpy()
344
-
345
- _mesh = trimesh.Trimesh(vertices=v_np, faces=f_np)
346
- _mesh.export(path)
347
-
348
- # write to obj file
349
- def write(self, path):
350
- mtl_path = path.replace(".obj", ".mtl")
351
- albedo_path = path.replace(".obj", "_albedo.png")
352
-
353
- v_np = self.v.detach().cpu().numpy()
354
- vt_np = self.vt.detach().cpu().numpy() if self.vt is not None else None
355
- vn_np = self.vn.detach().cpu().numpy() if self.vn is not None else None
356
- f_np = self.f.detach().cpu().numpy()
357
- ft_np = self.ft.detach().cpu().numpy() if self.ft is not None else None
358
- fn_np = self.fn.detach().cpu().numpy() if self.fn is not None else None
359
-
360
- with open(path, "w") as fp:
361
- fp.write(f"mtllib {os.path.basename(mtl_path)} \n")
362
-
363
- for v in v_np:
364
- fp.write(f"v {v[0]} {v[1]} {v[2]} \n")
365
-
366
- if vt_np is not None:
367
- for v in vt_np:
368
- fp.write(f"vt {v[0]} {1 - v[1]} \n")
369
-
370
- if vn_np is not None:
371
- for v in vn_np:
372
- fp.write(f"vn {v[0]} {v[1]} {v[2]} \n")
373
-
374
- fp.write(f"usemtl defaultMat \n")
375
- for i in range(len(f_np)):
376
- fp.write(
377
- f'f {f_np[i, 0] + 1}/{ft_np[i, 0] + 1 if ft_np is not None else ""}/{fn_np[i, 0] + 1 if fn_np is not None else ""} \
378
- {f_np[i, 1] + 1}/{ft_np[i, 1] + 1 if ft_np is not None else ""}/{fn_np[i, 1] + 1 if fn_np is not None else ""} \
379
- {f_np[i, 2] + 1}/{ft_np[i, 2] + 1 if ft_np is not None else ""}/{fn_np[i, 2] + 1 if fn_np is not None else ""} \n'
380
- )
381
-
382
- with open(mtl_path, "w") as fp:
383
- fp.write(f"newmtl defaultMat \n")
384
- fp.write(f"Ka 1 1 1 \n")
385
- fp.write(f"Kd 1 1 1 \n")
386
- fp.write(f"Ks 0 0 0 \n")
387
- fp.write(f"Tr 1 \n")
388
- fp.write(f"illum 1 \n")
389
- fp.write(f"Ns 0 \n")
390
- fp.write(f"map_Kd {os.path.basename(albedo_path)} \n")
391
-
392
- albedo = self.albedo.detach().cpu().numpy()
393
- albedo = (albedo * 255).astype(np.uint8)
394
- cv2.imwrite(albedo_path, cv2.cvtColor(albedo, cv2.COLOR_RGB2BGR))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/buttons/Factory.d.ts DELETED
@@ -1,5 +0,0 @@
1
- import Buttons from './Buttons';
2
-
3
- export default function ButtonsFactory(
4
- config?: Buttons.IConfig
5
- ): Buttons;
 
 
 
 
 
 
spaces/Aki004/herta-so-vits/vdecoder/hifigan/utils.py DELETED
@@ -1,68 +0,0 @@
1
- import glob
2
- import os
3
- import matplotlib
4
- import torch
5
- from torch.nn.utils import weight_norm
6
- # matplotlib.use("Agg")
7
- import matplotlib.pylab as plt
8
-
9
-
10
- def plot_spectrogram(spectrogram):
11
- fig, ax = plt.subplots(figsize=(10, 2))
12
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
13
- interpolation='none')
14
- plt.colorbar(im, ax=ax)
15
-
16
- fig.canvas.draw()
17
- plt.close()
18
-
19
- return fig
20
-
21
-
22
- def init_weights(m, mean=0.0, std=0.01):
23
- classname = m.__class__.__name__
24
- if classname.find("Conv") != -1:
25
- m.weight.data.normal_(mean, std)
26
-
27
-
28
- def apply_weight_norm(m):
29
- classname = m.__class__.__name__
30
- if classname.find("Conv") != -1:
31
- weight_norm(m)
32
-
33
-
34
- def get_padding(kernel_size, dilation=1):
35
- return int((kernel_size*dilation - dilation)/2)
36
-
37
-
38
- def load_checkpoint(filepath, device):
39
- assert os.path.isfile(filepath)
40
- print("Loading '{}'".format(filepath))
41
- checkpoint_dict = torch.load(filepath, map_location=device)
42
- print("Complete.")
43
- return checkpoint_dict
44
-
45
-
46
- def save_checkpoint(filepath, obj):
47
- print("Saving checkpoint to {}".format(filepath))
48
- torch.save(obj, filepath)
49
- print("Complete.")
50
-
51
-
52
- def del_old_checkpoints(cp_dir, prefix, n_models=2):
53
- pattern = os.path.join(cp_dir, prefix + '????????')
54
- cp_list = glob.glob(pattern) # get checkpoint paths
55
- cp_list = sorted(cp_list)# sort by iter
56
- if len(cp_list) > n_models: # if more than n_models models are found
57
- for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models
58
- open(cp, 'w').close()# empty file contents
59
- os.unlink(cp)# delete file (move to trash when using Colab)
60
-
61
-
62
- def scan_checkpoint(cp_dir, prefix):
63
- pattern = os.path.join(cp_dir, prefix + '????????')
64
- cp_list = glob.glob(pattern)
65
- if len(cp_list) == 0:
66
- return None
67
- return sorted(cp_list)[-1]
68
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexMason/anime-remove-background/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Anime Remove Background
3
- emoji: 🪄🖼️
4
- colorFrom: indigo
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.1.4
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- duplicated_from: skytnt/anime-remove-background
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlgoveraAI/dcgan-crypto-punks/app.py DELETED
@@ -1,150 +0,0 @@
1
- from datetime import datetime, timedelta
2
- from decimal import Decimal
3
- import gradio as gr
4
- import matplotlib.pyplot as plt
5
- from pathlib import Path
6
- import pickle
7
- import os
8
- import time
9
- import torch
10
- import torchvision.transforms as T
11
-
12
- from ocean_lib.ocean.ocean import Ocean
13
- from ocean_lib.config import Config
14
- from ocean_lib.models.compute_input import ComputeInput
15
- from ocean_lib.web3_internal.wallet import Wallet
16
-
17
-
18
-
19
- def compute(private_key):
20
-
21
- start_time = time.time()
22
-
23
- network = 'mumbai'
24
- config = Config(f'configs/config-{network}.ini')
25
- network_url = Path(config.network_url[8:])
26
- network_url = network_url.parents[0] / os.getenv('INFURA_KEY')
27
- config.set('eth-network', 'network', "https://" + str(network_url))
28
- ocean = Ocean(config)
29
-
30
- wallet = Wallet(ocean.web3, str(private_key).strip(), config.block_confirmations, config.transaction_timeout)
31
-
32
- address = wallet.address
33
-
34
- data_did = {
35
- "rinkeby": "did:op:064abd2c7f8d5c3cacdbf43a687194d50008889130dbc4403d4b973797da7081",
36
- "mumbai": "did:op:2def598c3ab3732dec5db8f33af028429a547b101cf6e7ec4ac9eba547a5507d"
37
- }
38
-
39
- data_asset = ocean.assets.resolve(data_did[config.network_name])
40
- data_token = ocean.get_datatoken(data_asset.datatokens[0]['address'])
41
- data_did = data_asset.did
42
-
43
- assert data_token.balanceOf(wallet.address) > 0, "need to buy data token"
44
-
45
- algo_did = {
46
- "rinkeby": "did:op:b6df860d4db1405293768985600db2de317d8f0035aa1805c0724ae678f23477",
47
- "mumbai": "did:op:0a618a6c19ea44a189598f71aaefc98a516592cf90c64fd1f81f3f840f7b5b92"
48
- }
49
-
50
- algo_asset = ocean.assets.resolve(algo_did[config.network_name])
51
- algo_token = ocean.get_datatoken(algo_asset.datatokens[0]['address'])
52
- algo_did = algo_asset.did
53
-
54
- assert algo_token.balanceOf(wallet.address) > 0, "need to buy data token"
55
-
56
- compute_service = data_asset.services[0]
57
- algo_service = algo_asset.services[0]
58
- free_c2d_env = ocean.compute.get_free_c2d_environment(compute_service.service_endpoint)
59
-
60
- data_compute_input = ComputeInput(data_asset, compute_service)
61
- algo_compute_input = ComputeInput(algo_asset, algo_service)
62
-
63
- if config.network_name == 'rinkeby' or config.network_name == 'mainnet':
64
- assert alice_wallet.web3.eth.get_balance(alice_wallet.address) > 0, "need ETH"
65
- # elif config.network_name == 'mumbai' or config.network_name == 'polygon':
66
- # assert ocean.MATIC_token.balanceOf(alice_wallet.address) > 0, "need MATIC"
67
-
68
- # Pay for dataset and algo for 1 day
69
- datasets, algorithm = ocean.assets.pay_for_compute_service(
70
- datasets=[data_compute_input],
71
- algorithm_data=algo_compute_input,
72
- consume_market_order_fee_address=wallet.address,
73
- wallet=wallet,
74
- compute_environment=free_c2d_env["id"],
75
- valid_until=int((datetime.utcnow() + timedelta(days=1)).timestamp()),
76
- consumer_address=free_c2d_env["consumerAddress"],
77
- )
78
- assert datasets, "pay for dataset unsuccessful"
79
- assert algorithm, "pay for algorithm unsuccessful"
80
-
81
- # Start compute job
82
- job_id = ocean.compute.start(
83
- consumer_wallet=wallet,
84
- dataset=datasets[0],
85
- compute_environment=free_c2d_env["id"],
86
- algorithm=algorithm,
87
- )
88
-
89
- # Wait until job is done
90
- succeeded = False
91
- for _ in range(0, 200):
92
- status = ocean.compute.status(data_asset, compute_service, job_id, wallet)
93
- print('===========', status['statusText'])
94
-
95
- if status.get("dateFinished") and Decimal(status["dateFinished"]) > 0:
96
- succeeded = True
97
- break
98
- time.sleep(5)
99
-
100
- print('hii')
101
-
102
- # Retrieve algorithm output and log files
103
- result = ocean.compute.compute_job_result_logs(
104
- data_asset, compute_service, job_id, wallet
105
- )[0]
106
-
107
- print('===================0', result)
108
-
109
- tensor = pickle.loads(result)
110
-
111
- print('===================1', tensor)
112
-
113
- img = T.ToPILImage()(tensor.permute(2,0,1))
114
-
115
- print('===================2', img)
116
-
117
- elapsed_time = time.time() - start_time
118
-
119
- print('===================3', elapsed_time)
120
-
121
- return address, img
122
-
123
- description = (
124
- "This demo serves a generative model from the Ocean marketplace. "
125
- )
126
-
127
- article = (
128
- "<p style='text-align: center'>"
129
- "<a href='https://metahub.algovera.ai/' target='_blank'>Algovera Metahub</a> | "
130
- "<a href='https://market.oceanprotocol.com/' target='_blank'>Ocean Marketplace</a> | "
131
- "<a href='https://docs.algovera.ai/blog/2022/04/05/Tutorial%20for%20using%20token-gated%20apps%20on%20HuggingFace' target='_blank'>How-to-use Tutorial</a> | "
132
- "<a href='https://www.algovera.ai' target='_blank'>Algovera Website</a>"
133
- "</p>"
134
- )
135
-
136
- interface = gr.Interface(
137
- compute,
138
- [
139
- gr.inputs.Textbox(label="Private Key"),
140
- ],
141
- [
142
- gr.outputs.Textbox(label="Public Key"),
143
- gr.outputs.Image(label="Output Image")
144
- ],
145
- title="Generative Model from the Ocean marketplace",
146
- description=description,
147
- article=article,
148
- )
149
-
150
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ali-C137/Motivation-Letter-Generator/app.py DELETED
@@ -1,24 +0,0 @@
1
- # Motivation-Letter-Generator
2
-
3
- from transformers import AutoModelForCausalLM, AutoTokenizer, AutoTokenizer, AutoModelForSeq2SeqLM, set_seed, pipeline
4
- import gradio as gr
5
-
6
- ### need more GPU power to call T0pp
7
-
8
- model = AutoModelForCausalLM.from_pretrained('EleutherAI/gpt-neo-1.3B', use_cache=True)
9
- tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-neo-1.3B')
10
-
11
- set_seed(424242)
12
-
13
- def generate(Name, Position, Organization, max_length=500, top_k=1, temperature=0.9, repetition_penalty = 2.0):
14
- prompt = f"i'm {Name} and i want to write a motivation letter to an employer about the position of {Position} at {Organization} mentioning the hard skills and soft skills i have acquired"
15
- input_ids = tokenizer(prompt, return_tensors="pt").to(0)
16
- sample = model.generate(**input_ids, max_length=max_length, top_k=top_k, temperature=temperature, repetition_penalty = repetition_penalty)
17
- return tokenizer.decode(sample[0], truncate_before_pattern=[r"\n\n^#", "^'''", "\n\n\n"])
18
-
19
- title = "Motivation Letter Generator"
20
- article = "For now this still a toy demo and no good results will came out. PS: if you have enough resources try using stronger models !"
21
-
22
- gr = gr.Interface(fn = generate, inputs=["text", "text", "text"], outputs="text", title=title, article=article)
23
-
24
- gr.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/loop/utils.py DELETED
@@ -1,11 +0,0 @@
1
- import numpy as np
2
-
3
-
4
- def N_to_reso(n_voxels, bbox):
5
- xyz_min, xyz_max = bbox
6
- dim = len(xyz_min)
7
- voxel_size = ((xyz_max - xyz_min).prod() / n_voxels).pow(1 / dim)
8
- return ((xyz_max - xyz_min) / voxel_size).long().tolist()
9
-
10
- def cal_n_samples(reso, step_ratio=0.5):
11
- return int(np.linalg.norm(reso)/step_ratio)
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/dpm_discrete_ancestral.md DELETED
@@ -1,22 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # DPM Discrete Scheduler with ancestral sampling inspired by Karras et. al paper
14
-
15
- ## Overview
16
-
17
- Inspired by [Karras et. al](https://arxiv.org/abs/2206.00364). Scheduler ported from @crowsonkb's https://github.com/crowsonkb/k-diffusion library:
18
-
19
- All credit for making this scheduler work goes to [Katherine Crowson](https://github.com/crowsonkb/)
20
-
21
- ## KDPM2AncestralDiscreteScheduler
22
- [[autodoc]] KDPM2AncestralDiscreteScheduler
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/exp/mask_rcnn_1x_hybrid_small/run.sh DELETED
@@ -1,10 +0,0 @@
1
- #!/usr/bin/env bash
2
-
3
- work_path=$(dirname $0)
4
- PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \
5
- python -m torch.distributed.launch --nproc_per_node=8 \
6
- tools/train.py ${work_path}/config.py \
7
- --launcher pytorch \
8
- --cfg-options model.backbone.pretrained_path='your_model_path/uniformer_small_in1k.pth' \
9
- --work-dir ${work_path}/ckpt \
10
- 2>&1 | tee -a ${work_path}/log.txt
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/gfl.py DELETED
@@ -1,16 +0,0 @@
1
- from ..builder import DETECTORS
2
- from .single_stage import SingleStageDetector
3
-
4
-
5
- @DETECTORS.register_module()
6
- class GFL(SingleStageDetector):
7
-
8
- def __init__(self,
9
- backbone,
10
- neck,
11
- bbox_head,
12
- train_cfg=None,
13
- test_cfg=None,
14
- pretrained=None):
15
- super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg,
16
- test_cfg, pretrained)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anew1007/extras/tts_edge.py DELETED
@@ -1,34 +0,0 @@
1
- import io
2
- import edge_tts
3
- import asyncio
4
-
5
-
6
- def get_voices():
7
- voices = asyncio.run(edge_tts.list_voices())
8
- return voices
9
-
10
-
11
- async def _iterate_chunks(audio):
12
- async for chunk in audio.stream():
13
- if chunk["type"] == "audio":
14
- yield chunk["data"]
15
-
16
-
17
- async def _async_generator_to_list(async_gen):
18
- result = []
19
- async for item in async_gen:
20
- result.append(item)
21
- return result
22
-
23
-
24
- def generate_audio(text: str, voice: str, rate: int) -> bytes:
25
- sign = '+' if rate > 0 else '-'
26
- rate = f'{sign}{abs(rate)}%'
27
- audio = edge_tts.Communicate(text=text, voice=voice, rate=rate)
28
- chunks = asyncio.run(_async_generator_to_list(_iterate_chunks(audio)))
29
- buffer = io.BytesIO()
30
-
31
- for chunk in chunks:
32
- buffer.write(chunk)
33
-
34
- return buffer.getvalue()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/superboogav2/README.md DELETED
@@ -1,5 +0,0 @@
1
- # superboogav2
2
-
3
- For a description, please see the comments in this Pull Request:
4
-
5
- https://github.com/oobabooga/text-generation-webui/pull/3272
 
 
 
 
 
 
spaces/Arnx/MusicGenXvAKN/audiocraft/data/__init__.py DELETED
@@ -1,8 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # flake8: noqa
8
- from . import audio, audio_dataset
 
 
 
 
 
 
 
 
 
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/GroundingDINO/bertwarper.py DELETED
@@ -1,273 +0,0 @@
1
- # ------------------------------------------------------------------------
2
- # Grounding DINO
3
- # url: https://github.com/IDEA-Research/GroundingDINO
4
- # Copyright (c) 2023 IDEA. All Rights Reserved.
5
- # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
- # ------------------------------------------------------------------------
7
-
8
- import torch
9
- import torch.nn.functional as F
10
- import torch.utils.checkpoint as checkpoint
11
- from torch import Tensor, nn
12
- from torchvision.ops.boxes import nms
13
- from transformers import BertConfig, BertModel, BertPreTrainedModel
14
- from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions
15
-
16
-
17
- class BertModelWarper(nn.Module):
18
- def __init__(self, bert_model):
19
- super().__init__()
20
- # self.bert = bert_modelc
21
-
22
- self.config = bert_model.config
23
- self.embeddings = bert_model.embeddings
24
- self.encoder = bert_model.encoder
25
- self.pooler = bert_model.pooler
26
-
27
- self.get_extended_attention_mask = bert_model.get_extended_attention_mask
28
- self.invert_attention_mask = bert_model.invert_attention_mask
29
- self.get_head_mask = bert_model.get_head_mask
30
-
31
- def forward(
32
- self,
33
- input_ids=None,
34
- attention_mask=None,
35
- token_type_ids=None,
36
- position_ids=None,
37
- head_mask=None,
38
- inputs_embeds=None,
39
- encoder_hidden_states=None,
40
- encoder_attention_mask=None,
41
- past_key_values=None,
42
- use_cache=None,
43
- output_attentions=None,
44
- output_hidden_states=None,
45
- return_dict=None,
46
- ):
47
- r"""
48
- encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
49
- Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
50
- the model is configured as a decoder.
51
- encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
52
- Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
53
- the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
54
-
55
- - 1 for tokens that are **not masked**,
56
- - 0 for tokens that are **masked**.
57
- past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
58
- Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
59
-
60
- If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
61
- (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
62
- instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
63
- use_cache (:obj:`bool`, `optional`):
64
- If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
65
- decoding (see :obj:`past_key_values`).
66
- """
67
- output_attentions = (
68
- output_attentions if output_attentions is not None else self.config.output_attentions
69
- )
70
- output_hidden_states = (
71
- output_hidden_states
72
- if output_hidden_states is not None
73
- else self.config.output_hidden_states
74
- )
75
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
76
-
77
- if self.config.is_decoder:
78
- use_cache = use_cache if use_cache is not None else self.config.use_cache
79
- else:
80
- use_cache = False
81
-
82
- if input_ids is not None and inputs_embeds is not None:
83
- raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
84
- elif input_ids is not None:
85
- input_shape = input_ids.size()
86
- batch_size, seq_length = input_shape
87
- elif inputs_embeds is not None:
88
- input_shape = inputs_embeds.size()[:-1]
89
- batch_size, seq_length = input_shape
90
- else:
91
- raise ValueError("You have to specify either input_ids or inputs_embeds")
92
-
93
- device = input_ids.device if input_ids is not None else inputs_embeds.device
94
-
95
- # past_key_values_length
96
- past_key_values_length = (
97
- past_key_values[0][0].shape[2] if past_key_values is not None else 0
98
- )
99
-
100
- if attention_mask is None:
101
- attention_mask = torch.ones(
102
- ((batch_size, seq_length + past_key_values_length)), device=device
103
- )
104
- if token_type_ids is None:
105
- token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
106
-
107
- # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
108
- # ourselves in which case we just need to make it broadcastable to all heads.
109
- extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
110
- attention_mask, input_shape, device
111
- )
112
-
113
- # If a 2D or 3D attention mask is provided for the cross-attention
114
- # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
115
- if self.config.is_decoder and encoder_hidden_states is not None:
116
- encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
117
- encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
118
- if encoder_attention_mask is None:
119
- encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
120
- encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
121
- else:
122
- encoder_extended_attention_mask = None
123
- # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
124
- # import ipdb; ipdb.set_trace()
125
-
126
- # Prepare head mask if needed
127
- # 1.0 in head_mask indicate we keep the head
128
- # attention_probs has shape bsz x n_heads x N x N
129
- # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
130
- # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
131
- head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
132
-
133
- embedding_output = self.embeddings(
134
- input_ids=input_ids,
135
- position_ids=position_ids,
136
- token_type_ids=token_type_ids,
137
- inputs_embeds=inputs_embeds,
138
- past_key_values_length=past_key_values_length,
139
- )
140
-
141
- encoder_outputs = self.encoder(
142
- embedding_output,
143
- attention_mask=extended_attention_mask,
144
- head_mask=head_mask,
145
- encoder_hidden_states=encoder_hidden_states,
146
- encoder_attention_mask=encoder_extended_attention_mask,
147
- past_key_values=past_key_values,
148
- use_cache=use_cache,
149
- output_attentions=output_attentions,
150
- output_hidden_states=output_hidden_states,
151
- return_dict=return_dict,
152
- )
153
- sequence_output = encoder_outputs[0]
154
- pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
155
-
156
- if not return_dict:
157
- return (sequence_output, pooled_output) + encoder_outputs[1:]
158
-
159
- return BaseModelOutputWithPoolingAndCrossAttentions(
160
- last_hidden_state=sequence_output,
161
- pooler_output=pooled_output,
162
- past_key_values=encoder_outputs.past_key_values,
163
- hidden_states=encoder_outputs.hidden_states,
164
- attentions=encoder_outputs.attentions,
165
- cross_attentions=encoder_outputs.cross_attentions,
166
- )
167
-
168
-
169
- class TextEncoderShell(nn.Module):
170
- def __init__(self, text_encoder):
171
- super().__init__()
172
- self.text_encoder = text_encoder
173
- self.config = self.text_encoder.config
174
-
175
- def forward(self, **kw):
176
- # feed into text encoder
177
- return self.text_encoder(**kw)
178
-
179
-
180
- def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer):
181
- """Generate attention mask between each pair of special tokens
182
- Args:
183
- input_ids (torch.Tensor): input ids. Shape: [bs, num_token]
184
- special_tokens_mask (list): special tokens mask.
185
- Returns:
186
- torch.Tensor: attention mask between each special tokens.
187
- """
188
- input_ids = tokenized["input_ids"]
189
- bs, num_token = input_ids.shape
190
- # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens
191
- special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()
192
- for special_token in special_tokens_list:
193
- special_tokens_mask |= input_ids == special_token
194
-
195
- # idxs: each row is a list of indices of special tokens
196
- idxs = torch.nonzero(special_tokens_mask)
197
-
198
- # generate attention mask and positional ids
199
- attention_mask = (
200
- torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)
201
- )
202
- position_ids = torch.zeros((bs, num_token), device=input_ids.device)
203
- previous_col = 0
204
- for i in range(idxs.shape[0]):
205
- row, col = idxs[i]
206
- if (col == 0) or (col == num_token - 1):
207
- attention_mask[row, col, col] = True
208
- position_ids[row, col] = 0
209
- else:
210
- attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True
211
- position_ids[row, previous_col + 1 : col + 1] = torch.arange(
212
- 0, col - previous_col, device=input_ids.device
213
- )
214
-
215
- previous_col = col
216
-
217
- # # padding mask
218
- # padding_mask = tokenized['attention_mask']
219
- # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()
220
-
221
- return attention_mask, position_ids.to(torch.long)
222
-
223
-
224
- def generate_masks_with_special_tokens_and_transfer_map(tokenized, special_tokens_list, tokenizer):
225
- """Generate attention mask between each pair of special tokens
226
- Args:
227
- input_ids (torch.Tensor): input ids. Shape: [bs, num_token]
228
- special_tokens_mask (list): special tokens mask.
229
- Returns:
230
- torch.Tensor: attention mask between each special tokens.
231
- """
232
- input_ids = tokenized["input_ids"]
233
- bs, num_token = input_ids.shape
234
- # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens
235
- special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()
236
- for special_token in special_tokens_list:
237
- special_tokens_mask |= input_ids == special_token
238
-
239
- # idxs: each row is a list of indices of special tokens
240
- idxs = torch.nonzero(special_tokens_mask)
241
-
242
- # generate attention mask and positional ids
243
- attention_mask = (
244
- torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)
245
- )
246
- position_ids = torch.zeros((bs, num_token), device=input_ids.device)
247
- cate_to_token_mask_list = [[] for _ in range(bs)]
248
- previous_col = 0
249
- for i in range(idxs.shape[0]):
250
- row, col = idxs[i]
251
- if (col == 0) or (col == num_token - 1):
252
- attention_mask[row, col, col] = True
253
- position_ids[row, col] = 0
254
- else:
255
- attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True
256
- position_ids[row, previous_col + 1 : col + 1] = torch.arange(
257
- 0, col - previous_col, device=input_ids.device
258
- )
259
- c2t_maski = torch.zeros((num_token), device=input_ids.device).bool()
260
- c2t_maski[previous_col + 1 : col] = True
261
- cate_to_token_mask_list[row].append(c2t_maski)
262
- previous_col = col
263
-
264
- cate_to_token_mask_list = [
265
- torch.stack(cate_to_token_mask_listi, dim=0)
266
- for cate_to_token_mask_listi in cate_to_token_mask_list
267
- ]
268
-
269
- # # padding mask
270
- # padding_mask = tokenized['attention_mask']
271
- # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()
272
-
273
- return attention_mask, position_ids.to(torch.long), cate_to_token_mask_list
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/pretty.py DELETED
@@ -1,994 +0,0 @@
1
- import builtins
2
- import collections
3
- import dataclasses
4
- import inspect
5
- import os
6
- import sys
7
- from array import array
8
- from collections import Counter, UserDict, UserList, defaultdict, deque
9
- from dataclasses import dataclass, fields, is_dataclass
10
- from inspect import isclass
11
- from itertools import islice
12
- from types import MappingProxyType
13
- from typing import (
14
- TYPE_CHECKING,
15
- Any,
16
- Callable,
17
- DefaultDict,
18
- Dict,
19
- Iterable,
20
- List,
21
- Optional,
22
- Sequence,
23
- Set,
24
- Tuple,
25
- Union,
26
- )
27
-
28
- from pip._vendor.rich.repr import RichReprResult
29
-
30
- try:
31
- import attr as _attr_module
32
-
33
- _has_attrs = hasattr(_attr_module, "ib")
34
- except ImportError: # pragma: no cover
35
- _has_attrs = False
36
-
37
- from . import get_console
38
- from ._loop import loop_last
39
- from ._pick import pick_bool
40
- from .abc import RichRenderable
41
- from .cells import cell_len
42
- from .highlighter import ReprHighlighter
43
- from .jupyter import JupyterMixin, JupyterRenderable
44
- from .measure import Measurement
45
- from .text import Text
46
-
47
- if TYPE_CHECKING:
48
- from .console import (
49
- Console,
50
- ConsoleOptions,
51
- HighlighterType,
52
- JustifyMethod,
53
- OverflowMethod,
54
- RenderResult,
55
- )
56
-
57
-
58
- def _is_attr_object(obj: Any) -> bool:
59
- """Check if an object was created with attrs module."""
60
- return _has_attrs and _attr_module.has(type(obj))
61
-
62
-
63
- def _get_attr_fields(obj: Any) -> Sequence["_attr_module.Attribute[Any]"]:
64
- """Get fields for an attrs object."""
65
- return _attr_module.fields(type(obj)) if _has_attrs else []
66
-
67
-
68
- def _is_dataclass_repr(obj: object) -> bool:
69
- """Check if an instance of a dataclass contains the default repr.
70
-
71
- Args:
72
- obj (object): A dataclass instance.
73
-
74
- Returns:
75
- bool: True if the default repr is used, False if there is a custom repr.
76
- """
77
- # Digging in to a lot of internals here
78
- # Catching all exceptions in case something is missing on a non CPython implementation
79
- try:
80
- return obj.__repr__.__code__.co_filename == dataclasses.__file__
81
- except Exception: # pragma: no coverage
82
- return False
83
-
84
-
85
- _dummy_namedtuple = collections.namedtuple("_dummy_namedtuple", [])
86
-
87
-
88
- def _has_default_namedtuple_repr(obj: object) -> bool:
89
- """Check if an instance of namedtuple contains the default repr
90
-
91
- Args:
92
- obj (object): A namedtuple
93
-
94
- Returns:
95
- bool: True if the default repr is used, False if there's a custom repr.
96
- """
97
- obj_file = None
98
- try:
99
- obj_file = inspect.getfile(obj.__repr__)
100
- except (OSError, TypeError):
101
- # OSError handles case where object is defined in __main__ scope, e.g. REPL - no filename available.
102
- # TypeError trapped defensively, in case of object without filename slips through.
103
- pass
104
- default_repr_file = inspect.getfile(_dummy_namedtuple.__repr__)
105
- return obj_file == default_repr_file
106
-
107
-
108
- def _ipy_display_hook(
109
- value: Any,
110
- console: Optional["Console"] = None,
111
- overflow: "OverflowMethod" = "ignore",
112
- crop: bool = False,
113
- indent_guides: bool = False,
114
- max_length: Optional[int] = None,
115
- max_string: Optional[int] = None,
116
- max_depth: Optional[int] = None,
117
- expand_all: bool = False,
118
- ) -> Union[str, None]:
119
- # needed here to prevent circular import:
120
- from .console import ConsoleRenderable
121
-
122
- # always skip rich generated jupyter renderables or None values
123
- if _safe_isinstance(value, JupyterRenderable) or value is None:
124
- return None
125
-
126
- console = console or get_console()
127
-
128
- with console.capture() as capture:
129
- # certain renderables should start on a new line
130
- if _safe_isinstance(value, ConsoleRenderable):
131
- console.line()
132
- console.print(
133
- value
134
- if _safe_isinstance(value, RichRenderable)
135
- else Pretty(
136
- value,
137
- overflow=overflow,
138
- indent_guides=indent_guides,
139
- max_length=max_length,
140
- max_string=max_string,
141
- max_depth=max_depth,
142
- expand_all=expand_all,
143
- margin=12,
144
- ),
145
- crop=crop,
146
- new_line_start=True,
147
- end="",
148
- )
149
- # strip trailing newline, not usually part of a text repr
150
- # I'm not sure if this should be prevented at a lower level
151
- return capture.get().rstrip("\n")
152
-
153
-
154
- def _safe_isinstance(
155
- obj: object, class_or_tuple: Union[type, Tuple[type, ...]]
156
- ) -> bool:
157
- """isinstance can fail in rare cases, for example types with no __class__"""
158
- try:
159
- return isinstance(obj, class_or_tuple)
160
- except Exception:
161
- return False
162
-
163
-
164
- def install(
165
- console: Optional["Console"] = None,
166
- overflow: "OverflowMethod" = "ignore",
167
- crop: bool = False,
168
- indent_guides: bool = False,
169
- max_length: Optional[int] = None,
170
- max_string: Optional[int] = None,
171
- max_depth: Optional[int] = None,
172
- expand_all: bool = False,
173
- ) -> None:
174
- """Install automatic pretty printing in the Python REPL.
175
-
176
- Args:
177
- console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
178
- overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
179
- crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
180
- indent_guides (bool, optional): Enable indentation guides. Defaults to False.
181
- max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
182
- Defaults to None.
183
- max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
184
- max_depth (int, optional): Maximum depth of nested data structures, or None for no maximum. Defaults to None.
185
- expand_all (bool, optional): Expand all containers. Defaults to False.
186
- max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
187
- """
188
- from pip._vendor.rich import get_console
189
-
190
- console = console or get_console()
191
- assert console is not None
192
-
193
- def display_hook(value: Any) -> None:
194
- """Replacement sys.displayhook which prettifies objects with Rich."""
195
- if value is not None:
196
- assert console is not None
197
- builtins._ = None # type: ignore[attr-defined]
198
- console.print(
199
- value
200
- if _safe_isinstance(value, RichRenderable)
201
- else Pretty(
202
- value,
203
- overflow=overflow,
204
- indent_guides=indent_guides,
205
- max_length=max_length,
206
- max_string=max_string,
207
- max_depth=max_depth,
208
- expand_all=expand_all,
209
- ),
210
- crop=crop,
211
- )
212
- builtins._ = value # type: ignore[attr-defined]
213
-
214
- if "get_ipython" in globals():
215
- ip = get_ipython() # type: ignore[name-defined]
216
- from IPython.core.formatters import BaseFormatter
217
-
218
- class RichFormatter(BaseFormatter): # type: ignore[misc]
219
- pprint: bool = True
220
-
221
- def __call__(self, value: Any) -> Any:
222
- if self.pprint:
223
- return _ipy_display_hook(
224
- value,
225
- console=get_console(),
226
- overflow=overflow,
227
- indent_guides=indent_guides,
228
- max_length=max_length,
229
- max_string=max_string,
230
- max_depth=max_depth,
231
- expand_all=expand_all,
232
- )
233
- else:
234
- return repr(value)
235
-
236
- # replace plain text formatter with rich formatter
237
- rich_formatter = RichFormatter()
238
- ip.display_formatter.formatters["text/plain"] = rich_formatter
239
- else:
240
- sys.displayhook = display_hook
241
-
242
-
243
- class Pretty(JupyterMixin):
244
- """A rich renderable that pretty prints an object.
245
-
246
- Args:
247
- _object (Any): An object to pretty print.
248
- highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
249
- indent_size (int, optional): Number of spaces in indent. Defaults to 4.
250
- justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
251
- overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
252
- no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
253
- indent_guides (bool, optional): Enable indentation guides. Defaults to False.
254
- max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
255
- Defaults to None.
256
- max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
257
- max_depth (int, optional): Maximum depth of nested data structures, or None for no maximum. Defaults to None.
258
- expand_all (bool, optional): Expand all containers. Defaults to False.
259
- margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
260
- insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
261
- """
262
-
263
- def __init__(
264
- self,
265
- _object: Any,
266
- highlighter: Optional["HighlighterType"] = None,
267
- *,
268
- indent_size: int = 4,
269
- justify: Optional["JustifyMethod"] = None,
270
- overflow: Optional["OverflowMethod"] = None,
271
- no_wrap: Optional[bool] = False,
272
- indent_guides: bool = False,
273
- max_length: Optional[int] = None,
274
- max_string: Optional[int] = None,
275
- max_depth: Optional[int] = None,
276
- expand_all: bool = False,
277
- margin: int = 0,
278
- insert_line: bool = False,
279
- ) -> None:
280
- self._object = _object
281
- self.highlighter = highlighter or ReprHighlighter()
282
- self.indent_size = indent_size
283
- self.justify: Optional["JustifyMethod"] = justify
284
- self.overflow: Optional["OverflowMethod"] = overflow
285
- self.no_wrap = no_wrap
286
- self.indent_guides = indent_guides
287
- self.max_length = max_length
288
- self.max_string = max_string
289
- self.max_depth = max_depth
290
- self.expand_all = expand_all
291
- self.margin = margin
292
- self.insert_line = insert_line
293
-
294
- def __rich_console__(
295
- self, console: "Console", options: "ConsoleOptions"
296
- ) -> "RenderResult":
297
- pretty_str = pretty_repr(
298
- self._object,
299
- max_width=options.max_width - self.margin,
300
- indent_size=self.indent_size,
301
- max_length=self.max_length,
302
- max_string=self.max_string,
303
- max_depth=self.max_depth,
304
- expand_all=self.expand_all,
305
- )
306
- pretty_text = Text.from_ansi(
307
- pretty_str,
308
- justify=self.justify or options.justify,
309
- overflow=self.overflow or options.overflow,
310
- no_wrap=pick_bool(self.no_wrap, options.no_wrap),
311
- style="pretty",
312
- )
313
- pretty_text = (
314
- self.highlighter(pretty_text)
315
- if pretty_text
316
- else Text(
317
- f"{type(self._object)}.__repr__ returned empty string",
318
- style="dim italic",
319
- )
320
- )
321
- if self.indent_guides and not options.ascii_only:
322
- pretty_text = pretty_text.with_indent_guides(
323
- self.indent_size, style="repr.indent"
324
- )
325
- if self.insert_line and "\n" in pretty_text:
326
- yield ""
327
- yield pretty_text
328
-
329
- def __rich_measure__(
330
- self, console: "Console", options: "ConsoleOptions"
331
- ) -> "Measurement":
332
- pretty_str = pretty_repr(
333
- self._object,
334
- max_width=options.max_width,
335
- indent_size=self.indent_size,
336
- max_length=self.max_length,
337
- max_string=self.max_string,
338
- max_depth=self.max_depth,
339
- expand_all=self.expand_all,
340
- )
341
- text_width = (
342
- max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
343
- )
344
- return Measurement(text_width, text_width)
345
-
346
-
347
- def _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, str, str]:
348
- return (
349
- f"defaultdict({_object.default_factory!r}, {{",
350
- "})",
351
- f"defaultdict({_object.default_factory!r}, {{}})",
352
- )
353
-
354
-
355
- def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]:
356
- return (f"array({_object.typecode!r}, [", "])", f"array({_object.typecode!r})")
357
-
358
-
359
- _BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
360
- os._Environ: lambda _object: ("environ({", "})", "environ({})"),
361
- array: _get_braces_for_array,
362
- defaultdict: _get_braces_for_defaultdict,
363
- Counter: lambda _object: ("Counter({", "})", "Counter()"),
364
- deque: lambda _object: ("deque([", "])", "deque()"),
365
- dict: lambda _object: ("{", "}", "{}"),
366
- UserDict: lambda _object: ("{", "}", "{}"),
367
- frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
368
- list: lambda _object: ("[", "]", "[]"),
369
- UserList: lambda _object: ("[", "]", "[]"),
370
- set: lambda _object: ("{", "}", "set()"),
371
- tuple: lambda _object: ("(", ")", "()"),
372
- MappingProxyType: lambda _object: ("mappingproxy({", "})", "mappingproxy({})"),
373
- }
374
- _CONTAINERS = tuple(_BRACES.keys())
375
- _MAPPING_CONTAINERS = (dict, os._Environ, MappingProxyType, UserDict)
376
-
377
-
378
- def is_expandable(obj: Any) -> bool:
379
- """Check if an object may be expanded by pretty print."""
380
- return (
381
- _safe_isinstance(obj, _CONTAINERS)
382
- or (is_dataclass(obj))
383
- or (hasattr(obj, "__rich_repr__"))
384
- or _is_attr_object(obj)
385
- ) and not isclass(obj)
386
-
387
-
388
- @dataclass
389
- class Node:
390
- """A node in a repr tree. May be atomic or a container."""
391
-
392
- key_repr: str = ""
393
- value_repr: str = ""
394
- open_brace: str = ""
395
- close_brace: str = ""
396
- empty: str = ""
397
- last: bool = False
398
- is_tuple: bool = False
399
- is_namedtuple: bool = False
400
- children: Optional[List["Node"]] = None
401
- key_separator: str = ": "
402
- separator: str = ", "
403
-
404
- def iter_tokens(self) -> Iterable[str]:
405
- """Generate tokens for this node."""
406
- if self.key_repr:
407
- yield self.key_repr
408
- yield self.key_separator
409
- if self.value_repr:
410
- yield self.value_repr
411
- elif self.children is not None:
412
- if self.children:
413
- yield self.open_brace
414
- if self.is_tuple and not self.is_namedtuple and len(self.children) == 1:
415
- yield from self.children[0].iter_tokens()
416
- yield ","
417
- else:
418
- for child in self.children:
419
- yield from child.iter_tokens()
420
- if not child.last:
421
- yield self.separator
422
- yield self.close_brace
423
- else:
424
- yield self.empty
425
-
426
- def check_length(self, start_length: int, max_length: int) -> bool:
427
- """Check the length fits within a limit.
428
-
429
- Args:
430
- start_length (int): Starting length of the line (indent, prefix, suffix).
431
- max_length (int): Maximum length.
432
-
433
- Returns:
434
- bool: True if the node can be rendered within max length, otherwise False.
435
- """
436
- total_length = start_length
437
- for token in self.iter_tokens():
438
- total_length += cell_len(token)
439
- if total_length > max_length:
440
- return False
441
- return True
442
-
443
- def __str__(self) -> str:
444
- repr_text = "".join(self.iter_tokens())
445
- return repr_text
446
-
447
- def render(
448
- self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
449
- ) -> str:
450
- """Render the node to a pretty repr.
451
-
452
- Args:
453
- max_width (int, optional): Maximum width of the repr. Defaults to 80.
454
- indent_size (int, optional): Size of indents. Defaults to 4.
455
- expand_all (bool, optional): Expand all levels. Defaults to False.
456
-
457
- Returns:
458
- str: A repr string of the original object.
459
- """
460
- lines = [_Line(node=self, is_root=True)]
461
- line_no = 0
462
- while line_no < len(lines):
463
- line = lines[line_no]
464
- if line.expandable and not line.expanded:
465
- if expand_all or not line.check_length(max_width):
466
- lines[line_no : line_no + 1] = line.expand(indent_size)
467
- line_no += 1
468
-
469
- repr_str = "\n".join(str(line) for line in lines)
470
- return repr_str
471
-
472
-
473
- @dataclass
474
- class _Line:
475
- """A line in repr output."""
476
-
477
- parent: Optional["_Line"] = None
478
- is_root: bool = False
479
- node: Optional[Node] = None
480
- text: str = ""
481
- suffix: str = ""
482
- whitespace: str = ""
483
- expanded: bool = False
484
- last: bool = False
485
-
486
- @property
487
- def expandable(self) -> bool:
488
- """Check if the line may be expanded."""
489
- return bool(self.node is not None and self.node.children)
490
-
491
- def check_length(self, max_length: int) -> bool:
492
- """Check this line fits within a given number of cells."""
493
- start_length = (
494
- len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
495
- )
496
- assert self.node is not None
497
- return self.node.check_length(start_length, max_length)
498
-
499
- def expand(self, indent_size: int) -> Iterable["_Line"]:
500
- """Expand this line by adding children on their own line."""
501
- node = self.node
502
- assert node is not None
503
- whitespace = self.whitespace
504
- assert node.children
505
- if node.key_repr:
506
- new_line = yield _Line(
507
- text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
508
- whitespace=whitespace,
509
- )
510
- else:
511
- new_line = yield _Line(text=node.open_brace, whitespace=whitespace)
512
- child_whitespace = self.whitespace + " " * indent_size
513
- tuple_of_one = node.is_tuple and len(node.children) == 1
514
- for last, child in loop_last(node.children):
515
- separator = "," if tuple_of_one else node.separator
516
- line = _Line(
517
- parent=new_line,
518
- node=child,
519
- whitespace=child_whitespace,
520
- suffix=separator,
521
- last=last and not tuple_of_one,
522
- )
523
- yield line
524
-
525
- yield _Line(
526
- text=node.close_brace,
527
- whitespace=whitespace,
528
- suffix=self.suffix,
529
- last=self.last,
530
- )
531
-
532
- def __str__(self) -> str:
533
- if self.last:
534
- return f"{self.whitespace}{self.text}{self.node or ''}"
535
- else:
536
- return (
537
- f"{self.whitespace}{self.text}{self.node or ''}{self.suffix.rstrip()}"
538
- )
539
-
540
-
541
- def _is_namedtuple(obj: Any) -> bool:
542
- """Checks if an object is most likely a namedtuple. It is possible
543
- to craft an object that passes this check and isn't a namedtuple, but
544
- there is only a minuscule chance of this happening unintentionally.
545
-
546
- Args:
547
- obj (Any): The object to test
548
-
549
- Returns:
550
- bool: True if the object is a namedtuple. False otherwise.
551
- """
552
- try:
553
- fields = getattr(obj, "_fields", None)
554
- except Exception:
555
- # Being very defensive - if we cannot get the attr then its not a namedtuple
556
- return False
557
- return isinstance(obj, tuple) and isinstance(fields, tuple)
558
-
559
-
560
- def traverse(
561
- _object: Any,
562
- max_length: Optional[int] = None,
563
- max_string: Optional[int] = None,
564
- max_depth: Optional[int] = None,
565
- ) -> Node:
566
- """Traverse object and generate a tree.
567
-
568
- Args:
569
- _object (Any): Object to be traversed.
570
- max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
571
- Defaults to None.
572
- max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
573
- Defaults to None.
574
- max_depth (int, optional): Maximum depth of data structures, or None for no maximum.
575
- Defaults to None.
576
-
577
- Returns:
578
- Node: The root of a tree structure which can be used to render a pretty repr.
579
- """
580
-
581
- def to_repr(obj: Any) -> str:
582
- """Get repr string for an object, but catch errors."""
583
- if (
584
- max_string is not None
585
- and _safe_isinstance(obj, (bytes, str))
586
- and len(obj) > max_string
587
- ):
588
- truncated = len(obj) - max_string
589
- obj_repr = f"{obj[:max_string]!r}+{truncated}"
590
- else:
591
- try:
592
- obj_repr = repr(obj)
593
- except Exception as error:
594
- obj_repr = f"<repr-error {str(error)!r}>"
595
- return obj_repr
596
-
597
- visited_ids: Set[int] = set()
598
- push_visited = visited_ids.add
599
- pop_visited = visited_ids.remove
600
-
601
- def _traverse(obj: Any, root: bool = False, depth: int = 0) -> Node:
602
- """Walk the object depth first."""
603
-
604
- obj_id = id(obj)
605
- if obj_id in visited_ids:
606
- # Recursion detected
607
- return Node(value_repr="...")
608
-
609
- obj_type = type(obj)
610
- children: List[Node]
611
- reached_max_depth = max_depth is not None and depth >= max_depth
612
-
613
- def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]:
614
- for arg in rich_args:
615
- if _safe_isinstance(arg, tuple):
616
- if len(arg) == 3:
617
- key, child, default = arg
618
- if default == child:
619
- continue
620
- yield key, child
621
- elif len(arg) == 2:
622
- key, child = arg
623
- yield key, child
624
- elif len(arg) == 1:
625
- yield arg[0]
626
- else:
627
- yield arg
628
-
629
- try:
630
- fake_attributes = hasattr(
631
- obj, "awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492"
632
- )
633
- except Exception:
634
- fake_attributes = False
635
-
636
- rich_repr_result: Optional[RichReprResult] = None
637
- if not fake_attributes:
638
- try:
639
- if hasattr(obj, "__rich_repr__") and not isclass(obj):
640
- rich_repr_result = obj.__rich_repr__()
641
- except Exception:
642
- pass
643
-
644
- if rich_repr_result is not None:
645
- push_visited(obj_id)
646
- angular = getattr(obj.__rich_repr__, "angular", False)
647
- args = list(iter_rich_args(rich_repr_result))
648
- class_name = obj.__class__.__name__
649
-
650
- if args:
651
- children = []
652
- append = children.append
653
-
654
- if reached_max_depth:
655
- if angular:
656
- node = Node(value_repr=f"<{class_name}...>")
657
- else:
658
- node = Node(value_repr=f"{class_name}(...)")
659
- else:
660
- if angular:
661
- node = Node(
662
- open_brace=f"<{class_name} ",
663
- close_brace=">",
664
- children=children,
665
- last=root,
666
- separator=" ",
667
- )
668
- else:
669
- node = Node(
670
- open_brace=f"{class_name}(",
671
- close_brace=")",
672
- children=children,
673
- last=root,
674
- )
675
- for last, arg in loop_last(args):
676
- if _safe_isinstance(arg, tuple):
677
- key, child = arg
678
- child_node = _traverse(child, depth=depth + 1)
679
- child_node.last = last
680
- child_node.key_repr = key
681
- child_node.key_separator = "="
682
- append(child_node)
683
- else:
684
- child_node = _traverse(arg, depth=depth + 1)
685
- child_node.last = last
686
- append(child_node)
687
- else:
688
- node = Node(
689
- value_repr=f"<{class_name}>" if angular else f"{class_name}()",
690
- children=[],
691
- last=root,
692
- )
693
- pop_visited(obj_id)
694
- elif _is_attr_object(obj) and not fake_attributes:
695
- push_visited(obj_id)
696
- children = []
697
- append = children.append
698
-
699
- attr_fields = _get_attr_fields(obj)
700
- if attr_fields:
701
- if reached_max_depth:
702
- node = Node(value_repr=f"{obj.__class__.__name__}(...)")
703
- else:
704
- node = Node(
705
- open_brace=f"{obj.__class__.__name__}(",
706
- close_brace=")",
707
- children=children,
708
- last=root,
709
- )
710
-
711
- def iter_attrs() -> Iterable[
712
- Tuple[str, Any, Optional[Callable[[Any], str]]]
713
- ]:
714
- """Iterate over attr fields and values."""
715
- for attr in attr_fields:
716
- if attr.repr:
717
- try:
718
- value = getattr(obj, attr.name)
719
- except Exception as error:
720
- # Can happen, albeit rarely
721
- yield (attr.name, error, None)
722
- else:
723
- yield (
724
- attr.name,
725
- value,
726
- attr.repr if callable(attr.repr) else None,
727
- )
728
-
729
- for last, (name, value, repr_callable) in loop_last(iter_attrs()):
730
- if repr_callable:
731
- child_node = Node(value_repr=str(repr_callable(value)))
732
- else:
733
- child_node = _traverse(value, depth=depth + 1)
734
- child_node.last = last
735
- child_node.key_repr = name
736
- child_node.key_separator = "="
737
- append(child_node)
738
- else:
739
- node = Node(
740
- value_repr=f"{obj.__class__.__name__}()", children=[], last=root
741
- )
742
- pop_visited(obj_id)
743
- elif (
744
- is_dataclass(obj)
745
- and not _safe_isinstance(obj, type)
746
- and not fake_attributes
747
- and _is_dataclass_repr(obj)
748
- ):
749
- push_visited(obj_id)
750
- children = []
751
- append = children.append
752
- if reached_max_depth:
753
- node = Node(value_repr=f"{obj.__class__.__name__}(...)")
754
- else:
755
- node = Node(
756
- open_brace=f"{obj.__class__.__name__}(",
757
- close_brace=")",
758
- children=children,
759
- last=root,
760
- empty=f"{obj.__class__.__name__}()",
761
- )
762
-
763
- for last, field in loop_last(
764
- field for field in fields(obj) if field.repr
765
- ):
766
- child_node = _traverse(getattr(obj, field.name), depth=depth + 1)
767
- child_node.key_repr = field.name
768
- child_node.last = last
769
- child_node.key_separator = "="
770
- append(child_node)
771
-
772
- pop_visited(obj_id)
773
- elif _is_namedtuple(obj) and _has_default_namedtuple_repr(obj):
774
- push_visited(obj_id)
775
- class_name = obj.__class__.__name__
776
- if reached_max_depth:
777
- # If we've reached the max depth, we still show the class name, but not its contents
778
- node = Node(
779
- value_repr=f"{class_name}(...)",
780
- )
781
- else:
782
- children = []
783
- append = children.append
784
- node = Node(
785
- open_brace=f"{class_name}(",
786
- close_brace=")",
787
- children=children,
788
- empty=f"{class_name}()",
789
- )
790
- for last, (key, value) in loop_last(obj._asdict().items()):
791
- child_node = _traverse(value, depth=depth + 1)
792
- child_node.key_repr = key
793
- child_node.last = last
794
- child_node.key_separator = "="
795
- append(child_node)
796
- pop_visited(obj_id)
797
- elif _safe_isinstance(obj, _CONTAINERS):
798
- for container_type in _CONTAINERS:
799
- if _safe_isinstance(obj, container_type):
800
- obj_type = container_type
801
- break
802
-
803
- push_visited(obj_id)
804
-
805
- open_brace, close_brace, empty = _BRACES[obj_type](obj)
806
-
807
- if reached_max_depth:
808
- node = Node(value_repr=f"{open_brace}...{close_brace}")
809
- elif obj_type.__repr__ != type(obj).__repr__:
810
- node = Node(value_repr=to_repr(obj), last=root)
811
- elif obj:
812
- children = []
813
- node = Node(
814
- open_brace=open_brace,
815
- close_brace=close_brace,
816
- children=children,
817
- last=root,
818
- )
819
- append = children.append
820
- num_items = len(obj)
821
- last_item_index = num_items - 1
822
-
823
- if _safe_isinstance(obj, _MAPPING_CONTAINERS):
824
- iter_items = iter(obj.items())
825
- if max_length is not None:
826
- iter_items = islice(iter_items, max_length)
827
- for index, (key, child) in enumerate(iter_items):
828
- child_node = _traverse(child, depth=depth + 1)
829
- child_node.key_repr = to_repr(key)
830
- child_node.last = index == last_item_index
831
- append(child_node)
832
- else:
833
- iter_values = iter(obj)
834
- if max_length is not None:
835
- iter_values = islice(iter_values, max_length)
836
- for index, child in enumerate(iter_values):
837
- child_node = _traverse(child, depth=depth + 1)
838
- child_node.last = index == last_item_index
839
- append(child_node)
840
- if max_length is not None and num_items > max_length:
841
- append(Node(value_repr=f"... +{num_items - max_length}", last=True))
842
- else:
843
- node = Node(empty=empty, children=[], last=root)
844
-
845
- pop_visited(obj_id)
846
- else:
847
- node = Node(value_repr=to_repr(obj), last=root)
848
- node.is_tuple = _safe_isinstance(obj, tuple)
849
- node.is_namedtuple = _is_namedtuple(obj)
850
- return node
851
-
852
- node = _traverse(_object, root=True)
853
- return node
854
-
855
-
856
- def pretty_repr(
857
- _object: Any,
858
- *,
859
- max_width: int = 80,
860
- indent_size: int = 4,
861
- max_length: Optional[int] = None,
862
- max_string: Optional[int] = None,
863
- max_depth: Optional[int] = None,
864
- expand_all: bool = False,
865
- ) -> str:
866
- """Prettify repr string by expanding on to new lines to fit within a given width.
867
-
868
- Args:
869
- _object (Any): Object to repr.
870
- max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
871
- indent_size (int, optional): Number of spaces to indent. Defaults to 4.
872
- max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
873
- Defaults to None.
874
- max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
875
- Defaults to None.
876
- max_depth (int, optional): Maximum depth of nested data structure, or None for no depth.
877
- Defaults to None.
878
- expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
879
-
880
- Returns:
881
- str: A possibly multi-line representation of the object.
882
- """
883
-
884
- if _safe_isinstance(_object, Node):
885
- node = _object
886
- else:
887
- node = traverse(
888
- _object, max_length=max_length, max_string=max_string, max_depth=max_depth
889
- )
890
- repr_str: str = node.render(
891
- max_width=max_width, indent_size=indent_size, expand_all=expand_all
892
- )
893
- return repr_str
894
-
895
-
896
- def pprint(
897
- _object: Any,
898
- *,
899
- console: Optional["Console"] = None,
900
- indent_guides: bool = True,
901
- max_length: Optional[int] = None,
902
- max_string: Optional[int] = None,
903
- max_depth: Optional[int] = None,
904
- expand_all: bool = False,
905
- ) -> None:
906
- """A convenience function for pretty printing.
907
-
908
- Args:
909
- _object (Any): Object to pretty print.
910
- console (Console, optional): Console instance, or None to use default. Defaults to None.
911
- max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
912
- Defaults to None.
913
- max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
914
- max_depth (int, optional): Maximum depth for nested data structures, or None for unlimited depth. Defaults to None.
915
- indent_guides (bool, optional): Enable indentation guides. Defaults to True.
916
- expand_all (bool, optional): Expand all containers. Defaults to False.
917
- """
918
- _console = get_console() if console is None else console
919
- _console.print(
920
- Pretty(
921
- _object,
922
- max_length=max_length,
923
- max_string=max_string,
924
- max_depth=max_depth,
925
- indent_guides=indent_guides,
926
- expand_all=expand_all,
927
- overflow="ignore",
928
- ),
929
- soft_wrap=True,
930
- )
931
-
932
-
933
- if __name__ == "__main__": # pragma: no cover
934
-
935
- class BrokenRepr:
936
- def __repr__(self) -> str:
937
- 1 / 0
938
- return "this will fail"
939
-
940
- from typing import NamedTuple
941
-
942
- class StockKeepingUnit(NamedTuple):
943
- name: str
944
- description: str
945
- price: float
946
- category: str
947
- reviews: List[str]
948
-
949
- d = defaultdict(int)
950
- d["foo"] = 5
951
- data = {
952
- "foo": [
953
- 1,
954
- "Hello World!",
955
- 100.123,
956
- 323.232,
957
- 432324.0,
958
- {5, 6, 7, (1, 2, 3, 4), 8},
959
- ],
960
- "bar": frozenset({1, 2, 3}),
961
- "defaultdict": defaultdict(
962
- list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
963
- ),
964
- "counter": Counter(
965
- [
966
- "apple",
967
- "orange",
968
- "pear",
969
- "kumquat",
970
- "kumquat",
971
- "durian" * 100,
972
- ]
973
- ),
974
- "atomic": (False, True, None),
975
- "namedtuple": StockKeepingUnit(
976
- "Sparkling British Spring Water",
977
- "Carbonated spring water",
978
- 0.9,
979
- "water",
980
- ["its amazing!", "its terrible!"],
981
- ),
982
- "Broken": BrokenRepr(),
983
- }
984
- data["foo"].append(data) # type: ignore[attr-defined]
985
-
986
- from pip._vendor.rich import print
987
-
988
- # print(Pretty(data, indent_guides=True, max_string=20))
989
-
990
- class Thing:
991
- def __repr__(self) -> str:
992
- return "Hello\x1b[38;5;239m World!"
993
-
994
- print(Pretty(Thing()))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/AudioSep/data/datamodules.py DELETED
@@ -1,122 +0,0 @@
1
- from typing import Dict, List, Optional, NoReturn
2
- import torch
3
- import lightning.pytorch as pl
4
- from torch.utils.data import DataLoader
5
- from data.audiotext_dataset import AudioTextDataset
6
-
7
-
8
- class DataModule(pl.LightningDataModule):
9
- def __init__(
10
- self,
11
- train_dataset: object,
12
- batch_size: int,
13
- num_workers: int
14
- ):
15
- r"""Data module. To get one batch of data:
16
-
17
- code-block:: python
18
-
19
- data_module.setup()
20
-
21
- for batch_data_dict in data_module.train_dataloader():
22
- print(batch_data_dict.keys())
23
- break
24
-
25
- Args:
26
- train_sampler: Sampler object
27
- train_dataset: Dataset object
28
- num_workers: int
29
- distributed: bool
30
- """
31
- super().__init__()
32
- self._train_dataset = train_dataset
33
- self.num_workers = num_workers
34
- self.batch_size = batch_size
35
- self.collate_fn = collate_fn
36
-
37
-
38
- def prepare_data(self):
39
- # download, split, etc...
40
- # only called on 1 GPU/TPU in distributed
41
- pass
42
-
43
- def setup(self, stage: Optional[str] = None) -> NoReturn:
44
- r"""called on every device."""
45
-
46
- # make assignments here (val/train/test split)
47
- # called on every process in DDP
48
-
49
- # SegmentSampler is used for selecting segments for training.
50
- # On multiple devices, each SegmentSampler samples a part of mini-batch
51
- # data.
52
- self.train_dataset = self._train_dataset
53
-
54
-
55
- def train_dataloader(self) -> torch.utils.data.DataLoader:
56
- r"""Get train loader."""
57
- train_loader = DataLoader(
58
- dataset=self.train_dataset,
59
- batch_size=self.batch_size,
60
- collate_fn=self.collate_fn,
61
- num_workers=self.num_workers,
62
- pin_memory=True,
63
- persistent_workers=False,
64
- shuffle=True
65
- )
66
-
67
- return train_loader
68
-
69
- def val_dataloader(self):
70
- # val_split = Dataset(...)
71
- # return DataLoader(val_split)
72
- pass
73
-
74
- def test_dataloader(self):
75
- # test_split = Dataset(...)
76
- # return DataLoader(test_split)
77
- pass
78
-
79
- def teardown(self):
80
- # clean up after fit or test
81
- # called on every process in DDP
82
- pass
83
-
84
-
85
- def collate_fn(list_data_dict):
86
- r"""Collate mini-batch data to inputs and targets for training.
87
-
88
- Args:
89
- list_data_dict: e.g., [
90
- {
91
- 'text': 'a sound of dog',
92
- 'waveform': (1, samples),
93
- 'modality': 'audio_text'
94
- }
95
- ...
96
- ]
97
- Returns:
98
- data_dict: e.g.
99
- 'audio_text': {
100
- 'text': ['a sound of dog', ...]
101
- 'waveform': (batch_size, 1, samples)
102
- }
103
- """
104
-
105
- at_list_data_dict = [data_dict for data_dict in list_data_dict if data_dict['modality']=='audio_text']
106
-
107
- at_data_dict = {}
108
-
109
- if len(at_list_data_dict) > 0:
110
- for key in at_list_data_dict[0].keys():
111
- at_data_dict[key] = [at_data_dict[key] for at_data_dict in at_list_data_dict]
112
- if key == 'waveform':
113
- at_data_dict[key] = torch.stack(at_data_dict[key])
114
- elif key == 'text':
115
- at_data_dict[key] = [text for text in at_data_dict[key]]
116
-
117
-
118
- data_dict = {
119
- 'audio_text': at_data_dict
120
- }
121
-
122
- return data_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/README.md DELETED
@@ -1,6 +0,0 @@
1
- This directory provides definitions for a few common models, dataloaders, scheduler,
2
- and optimizers that are often used in training.
3
- The definition of these objects are provided in the form of lazy instantiation:
4
- their arguments can be edited by users before constructing the objects.
5
-
6
- They can be imported, or loaded by `model_zoo.get_config` API in users' own configs.
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/demo/README.md DELETED
@@ -1,8 +0,0 @@
1
-
2
- ## Detectron2 Demo
3
-
4
- We provide a command line tool to run a simple demo of builtin configs.
5
- The usage is explained in [GETTING_STARTED.md](../GETTING_STARTED.md).
6
-
7
- See our [blog post](https://ai.facebook.com/blog/-detectron2-a-pytorch-based-modular-object-detection-library-)
8
- for a high-quality demo generated with this tool.
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Apkgstore X.md DELETED
@@ -1,92 +0,0 @@
1
- <br />
2
- <h1>Rush Royale Mod APK dinero ilimitado y joyas 2023</h1>
3
- <p>¿Estás buscando un juego de torre de defensa divertido y adictivo con elementos de recogida de cartas? ¿Quieres competir contra otros jugadores o formar equipo con ellos en un reino de fantasía de magia y caos? ¿Quieres desbloquear todas las tarjetas y actualizaciones sin gastar dinero real o ver anuncios? Si usted respondió sí a cualquiera de estas preguntas, entonces usted debe tratar de Rush Royale Mod APK Unlimited Money and Gems 2023! </p>
4
- <h2>¿Qué es Rush Royale? </h2>
5
- <p>Rush Royale es un juego de defensa de torres de fantasía móvil desarrollado por IT Territory y publicado por My.Games. Está disponible en rr.my.games y en Google Play Store. En este juego, coleccionas unidades, montas una baraja para la defensa de la base y te preparas para un juego TD lleno de acción, aventura y diversión sin fin. </p>
6
- <h2>apkgstore x</h2><br /><p><b><b>Download</b> &#10003; <a href="https://bltlly.com/2v6J2n">https://bltlly.com/2v6J2n</a></b></p><br /><br />
7
- <h3>Un juego de defensa de la torre con elementos de recogida de cartas</h3>
8
- <p>El modo de juego de Rush Royale es simple pero desafiante. Empiezas con una cuadrícula de 3x5 para colocar tus unidades y 100 unidades de maná. El maná se utiliza para convocar nuevas unidades o mejorar las existentes. Las unidades que invocas son aleatorias según tu baraja, al igual que su ubicación en la cuadrícula. Puede combinar dos unidades idénticas del mismo rango para crear una nueva unidad aleatoria con una tasa de fuego mejorada. Sin embargo, la fusión también puede resultar en una pérdida de potencia si la nueva unidad tiene una potencia menor que las unidades combinadas. Tienes que equilibrar las unidades de fusión para hacer espacio para más, o mantenerlas separadas para mantener su poder. </p>
9
-
10
- <h3>Un modo PvP y cooperativo con diferentes mecánicas de juego</h3>
11
- <p>Rush Royale tiene dos modos de juego principales: PvP y Cooperativo. En el modo PvP, compites contra otro jugador en tiempo real. Tienes que defender tu propia base mientras envías monstruos a la base de tu oponente matándolos a tu lado. El primer jugador en perder todos sus puntos de salud pierde el juego. Puedes ganar trofeos ganando partidos y progresando a través de diferentes arenas. También puedes ganar recompensas como cartas, cofres, monedas, gemas y entradas. </p>
12
- <p>En el modo cooperativo, haces equipo con otro jugador para defender un castillo contra oleadas de monstruos. Compartes la misma red y mana pool con tu pareja. Tienes que cooperar y comunicarte con tu pareja para sobrevivir el mayor tiempo posible. Puedes ganar recompensas como cartas, cofres, monedas, gemas y tickets en función del número de olas que despejes. </p>
13
- <h3>Una variedad de unidades, héroes y facciones para elegir</h3>
14
- <p>Rush Royale tiene una rica y diversa colección de unidades, héroes y facciones para adaptarse a su estilo de juego y estrategia. Hay cuatro facciones en el juego: Orden, Caos, Vacío y Naturaleza. Cada facción tiene su propio tema, color y unidades. Puedes mezclar y combinar unidades de diferentes facciones para crear tu propia baraja única. También puedes actualizar tus unidades para aumentar su poder y desbloquear nuevas habilidades. </p>
15
- <p></p>
16
- <p>También hay héroes que puedes elegir para dirigir tu ejército. Los héroes tienen habilidades especiales que se pueden activar una vez que se cargan. Algunos héroes pueden mejorar tus unidades, algunos pueden desacreditar a los enemigos y algunos pueden infligir daño masivo. Puedes desbloquear nuevos héroes completando misiones o comprándolas con gemas. También puedes actualizar a tus héroes para mejorar sus habilidades y estadísticas. </p>
17
- <h2>¿Por qué usar Rush Royale Mod APK? </h2>
18
-
19
- <p>Es por eso que usted debe utilizar Rush Royale Mod APK dinero ilimitado y Gems 2023. Esta es una versión modificada del juego original que te da dinero ilimitado y gemas para gastar como quieras. Puede desbloquear todas las tarjetas y actualizaciones sin moler o gastar dinero real. También puede disfrutar del juego sin anuncios ni restricciones. Con Rush Royale Mod APK, usted puede tener más diversión y libertad en el juego. </p>
20
- <h3>Para obtener dinero y gemas ilimitadas</h3>
21
- <p>El principal beneficio de usar Rush Royale Mod APK es que se obtiene dinero ilimitado y gemas para usar en el juego. El dinero y las gemas son las dos principales monedas en Rush Royale que se utilizan para diversos fines. El dinero se utiliza para comprar nuevas cartas de la tienda, mejorar sus unidades y héroes, cofres abiertos, y entrar en torneos. Las gemas se utilizan para comprar cofres premium, ofertas especiales, pieles de héroes y entradas. </p>
22
- <p>Con dinero y gemas ilimitadas, puedes comprar lo que quieras sin preocuparte por quedarte sin recursos. Puede desbloquear todas las tarjetas y mejoras en el juego sin esperar o moler. También puedes abrir tantos cofres como quieras para obtener más cartas y recompensas. Puedes participar en cualquier torneo que desees sin pagar ninguna cuota de inscripción. Puedes personalizar a tu héroe con cualquier piel que te guste sin gastar ninguna joya. </p>
23
- <h3>Para desbloquear todas las tarjetas y actualizaciones</h3>
24
- <p>Otro beneficio de usar Rush Royale Mod APK es que se puede desbloquear todas las tarjetas y mejoras en el juego sin ningún tipo de molestia. Las cartas son el elemento central de Rush Royale ya que determinan las habilidades y el rendimiento de tus unidades en la batalla. Hay más de 100 cartas en el juego divididas en cuatro facciones: Orden, Caos, Vacío y Naturaleza. Cada carta tiene un arma diferente, estilo, rango, poder, velocidad de fuego, coste de maná, habilidad y rareza. </p>
25
-
26
- <h3>Para disfrutar del juego sin anuncios ni restricciones</h3>
27
- <p>Un beneficio final de usar Rush Royale Mod APK es que se puede disfrutar del juego sin anuncios o restricciones que pueden arruinar su experiencia de juego. Los anuncios son molestos pop-ups que aparecen al azar en el juego o cuando intenta reclamar algunas recompensas. Interrumpen tu juego y te obligan a observarlo durante unos segundos o minutos antes de que puedas continuar jugando. </p>
28
- <p>Las restricciones son limitaciones que le impiden acceder a algunas funciones o contenido del juego a menos que pague dinero real o vea anuncios. Por ejemplo, tienes un número limitado de cofres gratis por día que puedes abrir para obtener algunas cartas y recompensas. También tienes un número limitado de entradas gratuitas por día para participar en torneos donde puedes competir por premios más grandes. </p>
29
- <p>Con Rush Royale Mod APK, no tienes que lidiar con ningún anuncio o restricciones en el juego. Usted puede jugar el juego sin problemas y sin interrupciones por cualquier molesto pop-ups o banners. También puedes acceder a todas las características y contenido del juego sin pagar dinero real ni ver ningún anuncio. </p>
30
- <h2>¿Cómo descargar e instalar Rush Royale Mod APK? </h 2>Cómo descargar e instalar Rush Royale Mod APK? </h2>
31
- <p>Si desea descargar e instalar Rush Royale Mod APK dinero ilimitado y joyas 2023, usted tiene que seguir estos sencillos pasos:</p>
32
- <h3>Siga estos pasos para descargar el archivo apk mod</h3>
33
- <ol>
34
- <li>Haga clic en este enlace para ir a la página de descarga de Rush Royale Mod APK.</li>
35
- <li>Espere unos segundos hasta que aparezca el botón de descarga. </li>
36
- <li>Haga clic en el botón de descarga y elija una ubicación para guardar el archivo en su dispositivo. </li>
37
- <li>Espere a que termine la descarga. El tamaño del archivo es de unos 150 MB.</li>
38
- </ol>
39
- <h3>Siga estos pasos para instalar el archivo apk mod</h3>
40
- <ol>
41
- <li>Vaya a la ubicación donde guardó el archivo y toque en él para abrirlo. </li>
42
-
43
- <li>Toque en "Instalar" y espere a que la instalación se complete. </li>
44
- <li>Toque en "Abrir" y disfrutar del juego con dinero y gemas ilimitadas. </li>
45
- </ol>
46
- <h3>Disfruta del juego con dinero y gemas ilimitadas</h3>
47
- <p>Felicidades! Usted ha descargado e instalado con éxito Rush Royale Mod APK Unlimited Money and Gems 2023. Ahora puedes disfrutar del juego con todas las características y contenido desbloqueado. Puedes comprar cualquier carta que quieras, actualizar cualquier unidad o héroe que quieras, abrir cualquier cofre que quieras, entrar en cualquier torneo que quieras y personalizar cualquier piel de héroe que quieras. También puedes jugar sin anuncios ni restricciones. ¡Diviértete y domina la Isla de Rhandum! </p>
48
- <h2>¿Cuáles son algunos consejos y trucos para ganar en Rush Royale? </h2>
49
- <p>Rush Royale es un juego que requiere habilidad, estrategia y suerte. Tienes que usar tus unidades, héroes y habilidades sabiamente para defender tu base y atacar la base de tu oponente. También tienes que adaptarte a la naturaleza aleatoria del juego y lidiar con situaciones inesperadas. Aquí hay algunos consejos y trucos que pueden ayudarte a ganar en Rush Royale:</p>
50
- <h3>Utilice una cubierta equilibrada con daño de punto, AOE, regeneración de maná y control de multitudes</h3>
51
- <p>Una cubierta equilibrada es una cubierta que tiene una buena mezcla de unidades que pueden causar daño puntual, daño de área de efecto, regeneración de maná y control de multitudes. Las unidades de daño puntual son unidades que pueden atacar y dañar a un solo enemigo a la vez, como Archer o Bombardier. Las unidades AOE son unidades que pueden dañar a múltiples enemigos en un área determinada, como Fire Mage o Plague Doctor. Las unidades de regeneración de maná son unidades que pueden restaurar tu maná con el tiempo, como Dryad o Shaman. Las unidades de control de multitudes son unidades que pueden afectar el movimiento o el comportamiento de los enemigos, como Frost Mage o Harlequin.</p>
52
-
53
- <h3>Combinar unidades sabiamente y actualizarlas cuando sea necesario</h3>
54
- <p>Las unidades de fusión es un mecánico clave en Rush Royale que puede hacer o romper su juego. Las unidades de fusión pueden crear nuevas unidades aleatorias con una tasa de fuego mejorada, pero también puede resultar en una pérdida de potencia si la nueva unidad tiene una potencia menor que las unidades combinadas. Tienes que combinar las unidades sabiamente y estratégicamente para optimizar el espacio de la red y la potencia de la unidad. </p>
55
- <p>Algunos consejos generales para las unidades de fusión son:</p>
56
- <ul>
57
- <li>Fusionar unidades de rango bajo primero, ya que tienen menor potencia y velocidad de fuego que las unidades de alto rango. </li>
58
- <li>Fusionar unidades de baja potencia primero, ya que tienen menor impacto que las unidades de alta potencia. </li>
59
- <li>Fusionar unidades duplicadas primero, ya que no tienen sinergia entre sí. </li>
60
- <li>Fusionar diferentes unidades de facción primero, ya que no tienen ninguna bonificación de facción entre sí. </li>
61
- <li>Las unidades de regeneración de mana de fusión duran, ya que son útiles para mantener su suministro de maná. </li>
62
- </ul>
63
- <p>Actualizar unidades es otro mecánico clave en Rush Royale que puede mejorar la potencia y el rendimiento de su unidad. Mejorar las unidades aumenta su poder y desbloquea nuevas habilidades que pueden hacerlas más efectivas en combate. Tienes que actualizar las unidades cuando sea necesario para mantenerte al día con la creciente dificultad del juego. </p>
64
- <p>Algunos consejos generales para las unidades de actualización son:</p>
65
- <ul>
66
- <li>Actualice primero las unidades de alto rango, ya que tienen mayor potencia y velocidad de fuego que las unidades de bajo rango. </li>
67
- <li <li>Actualice primero las unidades de alta potencia, ya que tienen mayor impacto que las unidades de baja potencia. </li>
68
- <li>Mejora las unidades con habilidades útiles primero, ya que pueden darte una ventaja en la batalla. </li>
69
- <li>Mejora las unidades con bonificación de facción primero, ya que pueden aumentar tu poder general y sinergia. </li>
70
- <li>Actualice las unidades de manera uniforme, ya que tener una distribución de energía equilibrada puede ayudarlo a lidiar con diferentes situaciones. </li>
71
- </ul>
72
- <h3>Juega al modo cooperativo para obtener más recompensas y aprender de otros jugadores</h3>
73
-
74
- <p>El modo cooperativo es una gran manera de obtener más recompensas y aprender de otros jugadores. Puedes obtener más cartas y recompensas jugando al modo cooperativo que jugando en solitario. También puede aprender nuevas estrategias y consejos de su pareja o de ver su juego. También puedes hacer nuevos amigos y chatear con ellos en el juego. </p>
75
- <h2>Conclusión</h2>
76
- <p>Rush Royale es un divertido y adictivo juego de torre de defensa con la tarjeta de recogida de elementos. Puedes reunir unidades, montar un mazo y competir contra otros jugadores o formar equipo con ellos en un reino de fantasía de magia y caos. También puede desbloquear todas las características y el contenido en el juego sin gastar dinero real o ver anuncios mediante el uso de Rush Royale Mod APK Unlimited Money and Gems 2023. </p>
77
- <p>Rush Royale Mod APK le da dinero ilimitado y gemas para gastar como desee. Puedes comprar cualquier carta que quieras, actualizar cualquier unidad o héroe que quieras, abrir cualquier cofre que quieras, entrar en cualquier torneo que quieras y personalizar cualquier piel de héroe que quieras. También puedes disfrutar del juego sin anuncios ni restricciones que puedan limitar tu disfrute del juego. </p>
78
- <p>Descargar e instalar Rush Royale Mod APK hoy y conquistar la Isla de Rhandum con sus habilidades y estrategia. Diviértete y buena suerte! </p>
79
- <h2>Preguntas frecuentes</h2>
80
- <p>Aquí hay algunas preguntas frecuentes sobre Rush Royale Mod APK:</p>
81
- <h3> ¿Es Rush Royale Mod APK seguro de usar? </h3>
82
- <p>Sí, Rush Royale Mod APK es seguro de usar, siempre y cuando se descarga desde una fuente de confianza. Hemos probado el archivo apk mod y no encontramos virus o malware en él. Sin embargo, no podemos garantizar que funcionará en todos los dispositivos o que no causará ningún problema con su cuenta de juego. Úselo bajo su propio riesgo. </p>
83
- <h3>¿Voy a conseguir prohibido para el uso de Rush Royale Mod APK? </h3>
84
-
85
- <h3> ¿Cómo actualizo Rush Royale Mod APK? </h3>
86
- <p>Para actualizar Rush Royale Mod APK, usted tiene que descargar la última versión del archivo apk mod de la misma fuente donde descargó la versión anterior. Entonces, usted tiene que desinstalar la versión anterior del archivo apk mod e instalar la nueva versión del archivo apk mod. Puedes perder tu progreso o datos si no haces una copia de seguridad de tu juego antes de actualizarlo. </p>
87
- <h3>¿Puedo jugar Rush Royale Mod APK en línea con otros jugadores? </h3>
88
- <p>Sí, puedes jugar Rush Royale Mod APK en línea con otros jugadores en ambos modos PvP y Cooperativo. Sin embargo, puede encontrar algunos problemas o errores al jugar en línea con otros jugadores que están utilizando la versión original del juego o una versión diferente del archivo apk mod. Para evitar estos problemas o errores, le recomendamos que juegue en línea con otros jugadores que están utilizando la misma versión del archivo apk mod que usted. </p>
89
- <h3>¿Puedo solicitar una característica o informar de un error para Rush Royale Mod APK? </h3>
90
- <p>No somos los desarrolladores de Rush Royale Mod APK, por lo que no podemos agregar nuevas características o corregir cualquier error para ella. Solo estamos proporcionando el enlace para descargar el archivo apk mod de una fuente de confianza. Si usted tiene alguna solicitud o informes para Rush Royale Mod APK, usted tiene que ponerse en contacto con los desarrolladores originales o modders del archivo apk mod. </p> 64aa2da5cf<br />
91
- <br />
92
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/idna/core.py DELETED
@@ -1,400 +0,0 @@
1
- from . import idnadata
2
- import bisect
3
- import unicodedata
4
- import re
5
- from typing import Union, Optional
6
- from .intranges import intranges_contain
7
-
8
- _virama_combining_class = 9
9
- _alabel_prefix = b'xn--'
10
- _unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]')
11
-
12
- class IDNAError(UnicodeError):
13
- """ Base exception for all IDNA-encoding related problems """
14
- pass
15
-
16
-
17
- class IDNABidiError(IDNAError):
18
- """ Exception when bidirectional requirements are not satisfied """
19
- pass
20
-
21
-
22
- class InvalidCodepoint(IDNAError):
23
- """ Exception when a disallowed or unallocated codepoint is used """
24
- pass
25
-
26
-
27
- class InvalidCodepointContext(IDNAError):
28
- """ Exception when the codepoint is not valid in the context it is used """
29
- pass
30
-
31
-
32
- def _combining_class(cp: int) -> int:
33
- v = unicodedata.combining(chr(cp))
34
- if v == 0:
35
- if not unicodedata.name(chr(cp)):
36
- raise ValueError('Unknown character in unicodedata')
37
- return v
38
-
39
- def _is_script(cp: str, script: str) -> bool:
40
- return intranges_contain(ord(cp), idnadata.scripts[script])
41
-
42
- def _punycode(s: str) -> bytes:
43
- return s.encode('punycode')
44
-
45
- def _unot(s: int) -> str:
46
- return 'U+{:04X}'.format(s)
47
-
48
-
49
- def valid_label_length(label: Union[bytes, str]) -> bool:
50
- if len(label) > 63:
51
- return False
52
- return True
53
-
54
-
55
- def valid_string_length(label: Union[bytes, str], trailing_dot: bool) -> bool:
56
- if len(label) > (254 if trailing_dot else 253):
57
- return False
58
- return True
59
-
60
-
61
- def check_bidi(label: str, check_ltr: bool = False) -> bool:
62
- # Bidi rules should only be applied if string contains RTL characters
63
- bidi_label = False
64
- for (idx, cp) in enumerate(label, 1):
65
- direction = unicodedata.bidirectional(cp)
66
- if direction == '':
67
- # String likely comes from a newer version of Unicode
68
- raise IDNABidiError('Unknown directionality in label {} at position {}'.format(repr(label), idx))
69
- if direction in ['R', 'AL', 'AN']:
70
- bidi_label = True
71
- if not bidi_label and not check_ltr:
72
- return True
73
-
74
- # Bidi rule 1
75
- direction = unicodedata.bidirectional(label[0])
76
- if direction in ['R', 'AL']:
77
- rtl = True
78
- elif direction == 'L':
79
- rtl = False
80
- else:
81
- raise IDNABidiError('First codepoint in label {} must be directionality L, R or AL'.format(repr(label)))
82
-
83
- valid_ending = False
84
- number_type = None # type: Optional[str]
85
- for (idx, cp) in enumerate(label, 1):
86
- direction = unicodedata.bidirectional(cp)
87
-
88
- if rtl:
89
- # Bidi rule 2
90
- if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
91
- raise IDNABidiError('Invalid direction for codepoint at position {} in a right-to-left label'.format(idx))
92
- # Bidi rule 3
93
- if direction in ['R', 'AL', 'EN', 'AN']:
94
- valid_ending = True
95
- elif direction != 'NSM':
96
- valid_ending = False
97
- # Bidi rule 4
98
- if direction in ['AN', 'EN']:
99
- if not number_type:
100
- number_type = direction
101
- else:
102
- if number_type != direction:
103
- raise IDNABidiError('Can not mix numeral types in a right-to-left label')
104
- else:
105
- # Bidi rule 5
106
- if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
107
- raise IDNABidiError('Invalid direction for codepoint at position {} in a left-to-right label'.format(idx))
108
- # Bidi rule 6
109
- if direction in ['L', 'EN']:
110
- valid_ending = True
111
- elif direction != 'NSM':
112
- valid_ending = False
113
-
114
- if not valid_ending:
115
- raise IDNABidiError('Label ends with illegal codepoint directionality')
116
-
117
- return True
118
-
119
-
120
- def check_initial_combiner(label: str) -> bool:
121
- if unicodedata.category(label[0])[0] == 'M':
122
- raise IDNAError('Label begins with an illegal combining character')
123
- return True
124
-
125
-
126
- def check_hyphen_ok(label: str) -> bool:
127
- if label[2:4] == '--':
128
- raise IDNAError('Label has disallowed hyphens in 3rd and 4th position')
129
- if label[0] == '-' or label[-1] == '-':
130
- raise IDNAError('Label must not start or end with a hyphen')
131
- return True
132
-
133
-
134
- def check_nfc(label: str) -> None:
135
- if unicodedata.normalize('NFC', label) != label:
136
- raise IDNAError('Label must be in Normalization Form C')
137
-
138
-
139
- def valid_contextj(label: str, pos: int) -> bool:
140
- cp_value = ord(label[pos])
141
-
142
- if cp_value == 0x200c:
143
-
144
- if pos > 0:
145
- if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
146
- return True
147
-
148
- ok = False
149
- for i in range(pos-1, -1, -1):
150
- joining_type = idnadata.joining_types.get(ord(label[i]))
151
- if joining_type == ord('T'):
152
- continue
153
- if joining_type in [ord('L'), ord('D')]:
154
- ok = True
155
- break
156
-
157
- if not ok:
158
- return False
159
-
160
- ok = False
161
- for i in range(pos+1, len(label)):
162
- joining_type = idnadata.joining_types.get(ord(label[i]))
163
- if joining_type == ord('T'):
164
- continue
165
- if joining_type in [ord('R'), ord('D')]:
166
- ok = True
167
- break
168
- return ok
169
-
170
- if cp_value == 0x200d:
171
-
172
- if pos > 0:
173
- if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
174
- return True
175
- return False
176
-
177
- else:
178
-
179
- return False
180
-
181
-
182
- def valid_contexto(label: str, pos: int, exception: bool = False) -> bool:
183
- cp_value = ord(label[pos])
184
-
185
- if cp_value == 0x00b7:
186
- if 0 < pos < len(label)-1:
187
- if ord(label[pos - 1]) == 0x006c and ord(label[pos + 1]) == 0x006c:
188
- return True
189
- return False
190
-
191
- elif cp_value == 0x0375:
192
- if pos < len(label)-1 and len(label) > 1:
193
- return _is_script(label[pos + 1], 'Greek')
194
- return False
195
-
196
- elif cp_value == 0x05f3 or cp_value == 0x05f4:
197
- if pos > 0:
198
- return _is_script(label[pos - 1], 'Hebrew')
199
- return False
200
-
201
- elif cp_value == 0x30fb:
202
- for cp in label:
203
- if cp == '\u30fb':
204
- continue
205
- if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'):
206
- return True
207
- return False
208
-
209
- elif 0x660 <= cp_value <= 0x669:
210
- for cp in label:
211
- if 0x6f0 <= ord(cp) <= 0x06f9:
212
- return False
213
- return True
214
-
215
- elif 0x6f0 <= cp_value <= 0x6f9:
216
- for cp in label:
217
- if 0x660 <= ord(cp) <= 0x0669:
218
- return False
219
- return True
220
-
221
- return False
222
-
223
-
224
- def check_label(label: Union[str, bytes, bytearray]) -> None:
225
- if isinstance(label, (bytes, bytearray)):
226
- label = label.decode('utf-8')
227
- if len(label) == 0:
228
- raise IDNAError('Empty Label')
229
-
230
- check_nfc(label)
231
- check_hyphen_ok(label)
232
- check_initial_combiner(label)
233
-
234
- for (pos, cp) in enumerate(label):
235
- cp_value = ord(cp)
236
- if intranges_contain(cp_value, idnadata.codepoint_classes['PVALID']):
237
- continue
238
- elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']):
239
- try:
240
- if not valid_contextj(label, pos):
241
- raise InvalidCodepointContext('Joiner {} not allowed at position {} in {}'.format(
242
- _unot(cp_value), pos+1, repr(label)))
243
- except ValueError:
244
- raise IDNAError('Unknown codepoint adjacent to joiner {} at position {} in {}'.format(
245
- _unot(cp_value), pos+1, repr(label)))
246
- elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']):
247
- if not valid_contexto(label, pos):
248
- raise InvalidCodepointContext('Codepoint {} not allowed at position {} in {}'.format(_unot(cp_value), pos+1, repr(label)))
249
- else:
250
- raise InvalidCodepoint('Codepoint {} at position {} of {} not allowed'.format(_unot(cp_value), pos+1, repr(label)))
251
-
252
- check_bidi(label)
253
-
254
-
255
- def alabel(label: str) -> bytes:
256
- try:
257
- label_bytes = label.encode('ascii')
258
- ulabel(label_bytes)
259
- if not valid_label_length(label_bytes):
260
- raise IDNAError('Label too long')
261
- return label_bytes
262
- except UnicodeEncodeError:
263
- pass
264
-
265
- if not label:
266
- raise IDNAError('No Input')
267
-
268
- label = str(label)
269
- check_label(label)
270
- label_bytes = _punycode(label)
271
- label_bytes = _alabel_prefix + label_bytes
272
-
273
- if not valid_label_length(label_bytes):
274
- raise IDNAError('Label too long')
275
-
276
- return label_bytes
277
-
278
-
279
- def ulabel(label: Union[str, bytes, bytearray]) -> str:
280
- if not isinstance(label, (bytes, bytearray)):
281
- try:
282
- label_bytes = label.encode('ascii')
283
- except UnicodeEncodeError:
284
- check_label(label)
285
- return label
286
- else:
287
- label_bytes = label
288
-
289
- label_bytes = label_bytes.lower()
290
- if label_bytes.startswith(_alabel_prefix):
291
- label_bytes = label_bytes[len(_alabel_prefix):]
292
- if not label_bytes:
293
- raise IDNAError('Malformed A-label, no Punycode eligible content found')
294
- if label_bytes.decode('ascii')[-1] == '-':
295
- raise IDNAError('A-label must not end with a hyphen')
296
- else:
297
- check_label(label_bytes)
298
- return label_bytes.decode('ascii')
299
-
300
- try:
301
- label = label_bytes.decode('punycode')
302
- except UnicodeError:
303
- raise IDNAError('Invalid A-label')
304
- check_label(label)
305
- return label
306
-
307
-
308
- def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False) -> str:
309
- """Re-map the characters in the string according to UTS46 processing."""
310
- from .uts46data import uts46data
311
- output = ''
312
-
313
- for pos, char in enumerate(domain):
314
- code_point = ord(char)
315
- try:
316
- uts46row = uts46data[code_point if code_point < 256 else
317
- bisect.bisect_left(uts46data, (code_point, 'Z')) - 1]
318
- status = uts46row[1]
319
- replacement = None # type: Optional[str]
320
- if len(uts46row) == 3:
321
- replacement = uts46row[2] # type: ignore
322
- if (status == 'V' or
323
- (status == 'D' and not transitional) or
324
- (status == '3' and not std3_rules and replacement is None)):
325
- output += char
326
- elif replacement is not None and (status == 'M' or
327
- (status == '3' and not std3_rules) or
328
- (status == 'D' and transitional)):
329
- output += replacement
330
- elif status != 'I':
331
- raise IndexError()
332
- except IndexError:
333
- raise InvalidCodepoint(
334
- 'Codepoint {} not allowed at position {} in {}'.format(
335
- _unot(code_point), pos + 1, repr(domain)))
336
-
337
- return unicodedata.normalize('NFC', output)
338
-
339
-
340
- def encode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False, transitional: bool = False) -> bytes:
341
- if isinstance(s, (bytes, bytearray)):
342
- try:
343
- s = s.decode('ascii')
344
- except UnicodeDecodeError:
345
- raise IDNAError('should pass a unicode string to the function rather than a byte string.')
346
- if uts46:
347
- s = uts46_remap(s, std3_rules, transitional)
348
- trailing_dot = False
349
- result = []
350
- if strict:
351
- labels = s.split('.')
352
- else:
353
- labels = _unicode_dots_re.split(s)
354
- if not labels or labels == ['']:
355
- raise IDNAError('Empty domain')
356
- if labels[-1] == '':
357
- del labels[-1]
358
- trailing_dot = True
359
- for label in labels:
360
- s = alabel(label)
361
- if s:
362
- result.append(s)
363
- else:
364
- raise IDNAError('Empty label')
365
- if trailing_dot:
366
- result.append(b'')
367
- s = b'.'.join(result)
368
- if not valid_string_length(s, trailing_dot):
369
- raise IDNAError('Domain too long')
370
- return s
371
-
372
-
373
- def decode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False) -> str:
374
- try:
375
- if isinstance(s, (bytes, bytearray)):
376
- s = s.decode('ascii')
377
- except UnicodeDecodeError:
378
- raise IDNAError('Invalid ASCII in A-label')
379
- if uts46:
380
- s = uts46_remap(s, std3_rules, False)
381
- trailing_dot = False
382
- result = []
383
- if not strict:
384
- labels = _unicode_dots_re.split(s)
385
- else:
386
- labels = s.split('.')
387
- if not labels or labels == ['']:
388
- raise IDNAError('Empty domain')
389
- if not labels[-1]:
390
- del labels[-1]
391
- trailing_dot = True
392
- for label in labels:
393
- s = ulabel(label)
394
- if s:
395
- result.append(s)
396
- else:
397
- raise IDNAError('Empty label')
398
- if trailing_dot:
399
- result.append('')
400
- return '.'.join(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BilalSardar/AutoML-Model-Training/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: AutoML Model Training
3
- emoji: 💻
4
- colorFrom: gray
5
- colorTo: red
6
- sdk: streamlit
7
- sdk_version: 1.15.2
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/uninitialized_copy.h DELETED
@@ -1,22 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system has no special version of this algorithm
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/mismatch.h DELETED
@@ -1,44 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a fill of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // the purpose of this header is to #include the mismatch.h header
22
- // of the host and device systems. It should be #included in any
23
- // code which uses adl to dispatch mismatch
24
-
25
- #include <thrust/system/detail/sequential/mismatch.h>
26
-
27
- // SCons can't see through the #defines below to figure out what this header
28
- // includes, so we fake it out by specifying all possible files we might end up
29
- // including inside an #if 0.
30
- #if 0
31
- #include <thrust/system/cpp/detail/mismatch.h>
32
- #include <thrust/system/cuda/detail/mismatch.h>
33
- #include <thrust/system/omp/detail/mismatch.h>
34
- #include <thrust/system/tbb/detail/mismatch.h>
35
- #endif
36
-
37
- #define __THRUST_HOST_SYSTEM_MISMATCH_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/mismatch.h>
38
- #include __THRUST_HOST_SYSTEM_MISMATCH_HEADER
39
- #undef __THRUST_HOST_SYSTEM_MISMATCH_HEADER
40
-
41
- #define __THRUST_DEVICE_SYSTEM_MISMATCH_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/mismatch.h>
42
- #include __THRUST_DEVICE_SYSTEM_MISMATCH_HEADER
43
- #undef __THRUST_DEVICE_SYSTEM_MISMATCH_HEADER
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/utils/misc.py DELETED
@@ -1,61 +0,0 @@
1
- from functools import partial
2
-
3
- import numpy as np
4
- import torch
5
- from six.moves import map, zip
6
-
7
- from ..mask.structures import BitmapMasks, PolygonMasks
8
-
9
-
10
- def multi_apply(func, *args, **kwargs):
11
- """Apply function to a list of arguments.
12
-
13
- Note:
14
- This function applies the ``func`` to multiple inputs and
15
- map the multiple outputs of the ``func`` into different
16
- list. Each list contains the same type of outputs corresponding
17
- to different inputs.
18
-
19
- Args:
20
- func (Function): A function that will be applied to a list of
21
- arguments
22
-
23
- Returns:
24
- tuple(list): A tuple containing multiple list, each list contains \
25
- a kind of returned results by the function
26
- """
27
- pfunc = partial(func, **kwargs) if kwargs else func
28
- map_results = map(pfunc, *args)
29
- return tuple(map(list, zip(*map_results)))
30
-
31
-
32
- def unmap(data, count, inds, fill=0):
33
- """Unmap a subset of item (data) back to the original set of items (of size
34
- count)"""
35
- if data.dim() == 1:
36
- ret = data.new_full((count, ), fill)
37
- ret[inds.type(torch.bool)] = data
38
- else:
39
- new_size = (count, ) + data.size()[1:]
40
- ret = data.new_full(new_size, fill)
41
- ret[inds.type(torch.bool), :] = data
42
- return ret
43
-
44
-
45
- def mask2ndarray(mask):
46
- """Convert Mask to ndarray..
47
-
48
- Args:
49
- mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or
50
- torch.Tensor or np.ndarray): The mask to be converted.
51
-
52
- Returns:
53
- np.ndarray: Ndarray mask of shape (n, h, w) that has been converted
54
- """
55
- if isinstance(mask, (BitmapMasks, PolygonMasks)):
56
- mask = mask.to_ndarray()
57
- elif isinstance(mask, torch.Tensor):
58
- mask = mask.detach().cpu().numpy()
59
- elif not isinstance(mask, np.ndarray):
60
- raise TypeError(f'Unsupported {type(mask)} data type')
61
- return mask
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/roi_heads/shared_heads/res_layer.py DELETED
@@ -1,77 +0,0 @@
1
- import torch.nn as nn
2
- from mmcv.cnn import constant_init, kaiming_init
3
- from mmcv.runner import auto_fp16, load_checkpoint
4
-
5
- from mmdet.models.backbones import ResNet
6
- from mmdet.models.builder import SHARED_HEADS
7
- from mmdet.models.utils import ResLayer as _ResLayer
8
- from mmdet.utils import get_root_logger
9
-
10
-
11
- @SHARED_HEADS.register_module()
12
- class ResLayer(nn.Module):
13
-
14
- def __init__(self,
15
- depth,
16
- stage=3,
17
- stride=2,
18
- dilation=1,
19
- style='pytorch',
20
- norm_cfg=dict(type='BN', requires_grad=True),
21
- norm_eval=True,
22
- with_cp=False,
23
- dcn=None):
24
- super(ResLayer, self).__init__()
25
- self.norm_eval = norm_eval
26
- self.norm_cfg = norm_cfg
27
- self.stage = stage
28
- self.fp16_enabled = False
29
- block, stage_blocks = ResNet.arch_settings[depth]
30
- stage_block = stage_blocks[stage]
31
- planes = 64 * 2**stage
32
- inplanes = 64 * 2**(stage - 1) * block.expansion
33
-
34
- res_layer = _ResLayer(
35
- block,
36
- inplanes,
37
- planes,
38
- stage_block,
39
- stride=stride,
40
- dilation=dilation,
41
- style=style,
42
- with_cp=with_cp,
43
- norm_cfg=self.norm_cfg,
44
- dcn=dcn)
45
- self.add_module(f'layer{stage + 1}', res_layer)
46
-
47
- def init_weights(self, pretrained=None):
48
- """Initialize the weights in the module.
49
-
50
- Args:
51
- pretrained (str, optional): Path to pre-trained weights.
52
- Defaults to None.
53
- """
54
- if isinstance(pretrained, str):
55
- logger = get_root_logger()
56
- load_checkpoint(self, pretrained, strict=False, logger=logger)
57
- elif pretrained is None:
58
- for m in self.modules():
59
- if isinstance(m, nn.Conv2d):
60
- kaiming_init(m)
61
- elif isinstance(m, nn.BatchNorm2d):
62
- constant_init(m, 1)
63
- else:
64
- raise TypeError('pretrained must be a str or None')
65
-
66
- @auto_fp16()
67
- def forward(self, x):
68
- res_layer = getattr(self, f'layer{self.stage + 1}')
69
- out = res_layer(x)
70
- return out
71
-
72
- def train(self, mode=True):
73
- super(ResLayer, self).train(mode)
74
- if self.norm_eval:
75
- for m in self.modules():
76
- if isinstance(m, nn.BatchNorm2d):
77
- m.eval()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/bin/mask_example.py DELETED
@@ -1,14 +0,0 @@
1
- import matplotlib.pyplot as plt
2
- from skimage import io
3
- from skimage.transform import resize
4
-
5
- from saicinpainting.evaluation.masks.mask import SegmentationMask
6
-
7
- im = io.imread('imgs/ex4.jpg')
8
- im = resize(im, (512, 1024), anti_aliasing=True)
9
- mask_seg = SegmentationMask(num_variants_per_mask=10)
10
- mask_examples = mask_seg.get_masks(im)
11
- for i, example in enumerate(mask_examples):
12
- plt.imshow(example)
13
- plt.show()
14
- plt.imsave(f'tmp/img_masks/{i}.png', example)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CarlDennis/Lovelive-VITS-JPZH/text/sanskrit.py DELETED
@@ -1,62 +0,0 @@
1
- import re
2
- from indic_transliteration import sanscript
3
-
4
-
5
- # List of (iast, ipa) pairs:
6
- _iast_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
7
- ('a', 'ə'),
8
- ('ā', 'aː'),
9
- ('ī', 'iː'),
10
- ('ū', 'uː'),
11
- ('ṛ', 'ɹ`'),
12
- ('ṝ', 'ɹ`ː'),
13
- ('ḷ', 'l`'),
14
- ('ḹ', 'l`ː'),
15
- ('e', 'eː'),
16
- ('o', 'oː'),
17
- ('k', 'k⁼'),
18
- ('k⁼h', 'kʰ'),
19
- ('g', 'g⁼'),
20
- ('g⁼h', 'gʰ'),
21
- ('ṅ', 'ŋ'),
22
- ('c', 'ʧ⁼'),
23
- ('ʧ⁼h', 'ʧʰ'),
24
- ('j', 'ʥ⁼'),
25
- ('ʥ⁼h', 'ʥʰ'),
26
- ('ñ', 'n^'),
27
- ('ṭ', 't`⁼'),
28
- ('t`⁼h', 't`ʰ'),
29
- ('ḍ', 'd`⁼'),
30
- ('d`⁼h', 'd`ʰ'),
31
- ('ṇ', 'n`'),
32
- ('t', 't⁼'),
33
- ('t⁼h', 'tʰ'),
34
- ('d', 'd⁼'),
35
- ('d⁼h', 'dʰ'),
36
- ('p', 'p⁼'),
37
- ('p⁼h', 'pʰ'),
38
- ('b', 'b⁼'),
39
- ('b⁼h', 'bʰ'),
40
- ('y', 'j'),
41
- ('ś', 'ʃ'),
42
- ('ṣ', 's`'),
43
- ('r', 'ɾ'),
44
- ('l̤', 'l`'),
45
- ('h', 'ɦ'),
46
- ("'", ''),
47
- ('~', '^'),
48
- ('ṃ', '^')
49
- ]]
50
-
51
-
52
- def devanagari_to_ipa(text):
53
- text = text.replace('ॐ', 'ओम्')
54
- text = re.sub(r'\s*।\s*$', '.', text)
55
- text = re.sub(r'\s*।\s*', ', ', text)
56
- text = re.sub(r'\s*॥', '.', text)
57
- text = sanscript.transliterate(text, sanscript.DEVANAGARI, sanscript.IAST)
58
- for regex, replacement in _iast_to_ipa:
59
- text = re.sub(regex, replacement, text)
60
- text = re.sub('(.)[`ː]*ḥ', lambda x: x.group(0)
61
- [:-1]+'h'+x.group(1)+'*', text)
62
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/fencing/__init__.py DELETED
@@ -1,37 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from PIL.Image import Image as IMG
5
- from pil_utils import BuildImage
6
-
7
- from meme_generator import add_meme
8
- from meme_generator.utils import save_gif
9
-
10
- img_dir = Path(__file__).parent / "images"
11
-
12
-
13
- def fencing(images: List[BuildImage], texts, args):
14
- self_head = images[0].convert("RGBA").circle().resize((27, 27))
15
- user_head = images[1].convert("RGBA").circle().resize((27, 27))
16
- # fmt: off
17
- user_locs = [
18
- (57, 4), (55, 5), (58, 7), (57, 5), (53, 8), (54, 9),
19
- (64, 5), (66, 8), (70, 9), (73, 8), (81, 10), (77, 10),
20
- (72, 4), (79, 8), (50, 8), (60, 7), (67, 6), (60, 6), (50, 9)
21
- ]
22
- self_locs = [
23
- (10, 6), (3, 6), (32, 7), (22, 7), (13, 4), (21, 6),
24
- (30, 6), (22, 2), (22, 3), (26, 8), (23, 8), (27, 10),
25
- (30, 9), (17, 6), (12, 8), (11, 7), (8, 6), (-2, 10), (4, 9)
26
- ]
27
- # fmt: on
28
- frames: List[IMG] = []
29
- for i in range(19):
30
- frame = BuildImage.open(img_dir / f"{i}.png")
31
- frame.paste(user_head, user_locs[i], alpha=True)
32
- frame.paste(self_head, self_locs[i], alpha=True)
33
- frames.append(frame.image)
34
- return save_gif(frames, 0.05)
35
-
36
-
37
- add_meme("fencing", fencing, min_images=2, max_images=2, keywords=["击剑", "🤺"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ClipHamper/stable-diffusion-webui/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Stable Diffusion Webui
3
- emoji: 🚀
4
- colorFrom: blue
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.44.4
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/structures/image_list.py DELETED
@@ -1,72 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
- from __future__ import division
3
-
4
- import torch
5
-
6
-
7
- class ImageList(object):
8
- """
9
- Structure that holds a list of images (of possibly
10
- varying sizes) as a single tensor.
11
- This works by padding the images to the same size,
12
- and storing in a field the original sizes of each image
13
- """
14
-
15
- def __init__(self, tensors, image_sizes):
16
- """
17
- Arguments:
18
- tensors (tensor)
19
- image_sizes (list[tuple[int, int]])
20
- """
21
- self.tensors = tensors
22
- self.image_sizes = image_sizes
23
-
24
- def to(self, *args, **kwargs):
25
- cast_tensor = self.tensors.to(*args, **kwargs)
26
- return ImageList(cast_tensor, self.image_sizes)
27
-
28
-
29
- def to_image_list(tensors, size_divisible=0):
30
- """
31
- tensors can be an ImageList, a torch.Tensor or
32
- an iterable of Tensors. It can't be a numpy array.
33
- When tensors is an iterable of Tensors, it pads
34
- the Tensors with zeros so that they have the same
35
- shape
36
- """
37
- if isinstance(tensors, torch.Tensor) and size_divisible > 0:
38
- tensors = [tensors]
39
-
40
- if isinstance(tensors, ImageList):
41
- return tensors
42
- elif isinstance(tensors, torch.Tensor):
43
- # single tensor shape can be inferred
44
- if tensors.dim() == 3:
45
- tensors = tensors[None]
46
- assert tensors.dim() == 4
47
- image_sizes = [tensor.shape[-2:] for tensor in tensors]
48
- return ImageList(tensors, image_sizes)
49
- elif isinstance(tensors, (tuple, list)):
50
- max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors]))
51
-
52
- # TODO Ideally, just remove this and let me model handle arbitrary
53
- # input sizs
54
- if size_divisible > 0:
55
- import math
56
-
57
- stride = size_divisible
58
- max_size = list(max_size)
59
- max_size[1] = int(math.ceil(max_size[1] / stride) * stride)
60
- max_size[2] = int(math.ceil(max_size[2] / stride) * stride)
61
- max_size = tuple(max_size)
62
-
63
- batch_shape = (len(tensors),) + max_size
64
- batched_imgs = tensors[0].new(*batch_shape).zero_()
65
- for img, pad_img in zip(tensors, batched_imgs):
66
- pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
67
-
68
- image_sizes = [im.shape[-2:] for im in tensors]
69
-
70
- return ImageList(batched_imgs, image_sizes)
71
- else:
72
- raise TypeError("Unsupported type for to_image_list: {}".format(type(tensors)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/common/utils.py DELETED
@@ -1,424 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
-
8
- import io
9
- import json
10
- import logging
11
- import os
12
- import pickle
13
- import re
14
- import shutil
15
- import urllib
16
- import urllib.error
17
- import urllib.request
18
- from typing import Optional
19
- from urllib.parse import urlparse
20
-
21
- import numpy as np
22
- import pandas as pd
23
- import yaml
24
- from iopath.common.download import download
25
- from iopath.common.file_io import file_lock, g_pathmgr
26
- from video_llama.common.registry import registry
27
- from torch.utils.model_zoo import tqdm
28
- from torchvision.datasets.utils import (
29
- check_integrity,
30
- download_file_from_google_drive,
31
- extract_archive,
32
- )
33
-
34
-
35
- def now():
36
- from datetime import datetime
37
-
38
- return datetime.now().strftime("%Y%m%d%H%M")[:-1]
39
-
40
-
41
- def is_url(url_or_filename):
42
- parsed = urlparse(url_or_filename)
43
- return parsed.scheme in ("http", "https")
44
-
45
-
46
- def get_cache_path(rel_path):
47
- return os.path.expanduser(os.path.join(registry.get_path("cache_root"), rel_path))
48
-
49
-
50
- def get_abs_path(rel_path):
51
- return os.path.join(registry.get_path("library_root"), rel_path)
52
-
53
-
54
- def load_json(filename):
55
- with open(filename, "r") as f:
56
- return json.load(f)
57
-
58
-
59
- # The following are adapted from torchvision and vissl
60
- # torchvision: https://github.com/pytorch/vision
61
- # vissl: https://github.com/facebookresearch/vissl/blob/main/vissl/utils/download.py
62
-
63
-
64
- def makedir(dir_path):
65
- """
66
- Create the directory if it does not exist.
67
- """
68
- is_success = False
69
- try:
70
- if not g_pathmgr.exists(dir_path):
71
- g_pathmgr.mkdirs(dir_path)
72
- is_success = True
73
- except BaseException:
74
- print(f"Error creating directory: {dir_path}")
75
- return is_success
76
-
77
-
78
- def get_redirected_url(url: str):
79
- """
80
- Given a URL, returns the URL it redirects to or the
81
- original URL in case of no indirection
82
- """
83
- import requests
84
-
85
- with requests.Session() as session:
86
- with session.get(url, stream=True, allow_redirects=True) as response:
87
- if response.history:
88
- return response.url
89
- else:
90
- return url
91
-
92
-
93
- def to_google_drive_download_url(view_url: str) -> str:
94
- """
95
- Utility function to transform a view URL of google drive
96
- to a download URL for google drive
97
- Example input:
98
- https://drive.google.com/file/d/137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp/view
99
- Example output:
100
- https://drive.google.com/uc?export=download&id=137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp
101
- """
102
- splits = view_url.split("/")
103
- assert splits[-1] == "view"
104
- file_id = splits[-2]
105
- return f"https://drive.google.com/uc?export=download&id={file_id}"
106
-
107
-
108
- def download_google_drive_url(url: str, output_path: str, output_file_name: str):
109
- """
110
- Download a file from google drive
111
- Downloading an URL from google drive requires confirmation when
112
- the file of the size is too big (google drive notifies that
113
- anti-viral checks cannot be performed on such files)
114
- """
115
- import requests
116
-
117
- with requests.Session() as session:
118
-
119
- # First get the confirmation token and append it to the URL
120
- with session.get(url, stream=True, allow_redirects=True) as response:
121
- for k, v in response.cookies.items():
122
- if k.startswith("download_warning"):
123
- url = url + "&confirm=" + v
124
-
125
- # Then download the content of the file
126
- with session.get(url, stream=True, verify=True) as response:
127
- makedir(output_path)
128
- path = os.path.join(output_path, output_file_name)
129
- total_size = int(response.headers.get("Content-length", 0))
130
- with open(path, "wb") as file:
131
- from tqdm import tqdm
132
-
133
- with tqdm(total=total_size) as progress_bar:
134
- for block in response.iter_content(
135
- chunk_size=io.DEFAULT_BUFFER_SIZE
136
- ):
137
- file.write(block)
138
- progress_bar.update(len(block))
139
-
140
-
141
- def _get_google_drive_file_id(url: str) -> Optional[str]:
142
- parts = urlparse(url)
143
-
144
- if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None:
145
- return None
146
-
147
- match = re.match(r"/file/d/(?P<id>[^/]*)", parts.path)
148
- if match is None:
149
- return None
150
-
151
- return match.group("id")
152
-
153
-
154
- def _urlretrieve(url: str, filename: str, chunk_size: int = 1024) -> None:
155
- with open(filename, "wb") as fh:
156
- with urllib.request.urlopen(
157
- urllib.request.Request(url, headers={"User-Agent": "vissl"})
158
- ) as response:
159
- with tqdm(total=response.length) as pbar:
160
- for chunk in iter(lambda: response.read(chunk_size), ""):
161
- if not chunk:
162
- break
163
- pbar.update(chunk_size)
164
- fh.write(chunk)
165
-
166
-
167
- def download_url(
168
- url: str,
169
- root: str,
170
- filename: Optional[str] = None,
171
- md5: Optional[str] = None,
172
- ) -> None:
173
- """Download a file from a url and place it in root.
174
- Args:
175
- url (str): URL to download file from
176
- root (str): Directory to place downloaded file in
177
- filename (str, optional): Name to save the file under.
178
- If None, use the basename of the URL.
179
- md5 (str, optional): MD5 checksum of the download. If None, do not check
180
- """
181
- root = os.path.expanduser(root)
182
- if not filename:
183
- filename = os.path.basename(url)
184
- fpath = os.path.join(root, filename)
185
-
186
- makedir(root)
187
-
188
- # check if file is already present locally
189
- if check_integrity(fpath, md5):
190
- print("Using downloaded and verified file: " + fpath)
191
- return
192
-
193
- # expand redirect chain if needed
194
- url = get_redirected_url(url)
195
-
196
- # check if file is located on Google Drive
197
- file_id = _get_google_drive_file_id(url)
198
- if file_id is not None:
199
- return download_file_from_google_drive(file_id, root, filename, md5)
200
-
201
- # download the file
202
- try:
203
- print("Downloading " + url + " to " + fpath)
204
- _urlretrieve(url, fpath)
205
- except (urllib.error.URLError, IOError) as e: # type: ignore[attr-defined]
206
- if url[:5] == "https":
207
- url = url.replace("https:", "http:")
208
- print(
209
- "Failed download. Trying https -> http instead."
210
- " Downloading " + url + " to " + fpath
211
- )
212
- _urlretrieve(url, fpath)
213
- else:
214
- raise e
215
-
216
- # check integrity of downloaded file
217
- if not check_integrity(fpath, md5):
218
- raise RuntimeError("File not found or corrupted.")
219
-
220
-
221
- def download_and_extract_archive(
222
- url: str,
223
- download_root: str,
224
- extract_root: Optional[str] = None,
225
- filename: Optional[str] = None,
226
- md5: Optional[str] = None,
227
- remove_finished: bool = False,
228
- ) -> None:
229
- download_root = os.path.expanduser(download_root)
230
- if extract_root is None:
231
- extract_root = download_root
232
- if not filename:
233
- filename = os.path.basename(url)
234
-
235
- download_url(url, download_root, filename, md5)
236
-
237
- archive = os.path.join(download_root, filename)
238
- print("Extracting {} to {}".format(archive, extract_root))
239
- extract_archive(archive, extract_root, remove_finished)
240
-
241
-
242
- def cache_url(url: str, cache_dir: str) -> str:
243
- """
244
- This implementation downloads the remote resource and caches it locally.
245
- The resource will only be downloaded if not previously requested.
246
- """
247
- parsed_url = urlparse(url)
248
- dirname = os.path.join(cache_dir, os.path.dirname(parsed_url.path.lstrip("/")))
249
- makedir(dirname)
250
- filename = url.split("/")[-1]
251
- cached = os.path.join(dirname, filename)
252
- with file_lock(cached):
253
- if not os.path.isfile(cached):
254
- logging.info(f"Downloading {url} to {cached} ...")
255
- cached = download(url, dirname, filename=filename)
256
- logging.info(f"URL {url} cached in {cached}")
257
- return cached
258
-
259
-
260
- # TODO (prigoyal): convert this into RAII-style API
261
- def create_file_symlink(file1, file2):
262
- """
263
- Simply create the symlinks for a given file1 to file2.
264
- Useful during model checkpointing to symlinks to the
265
- latest successful checkpoint.
266
- """
267
- try:
268
- if g_pathmgr.exists(file2):
269
- g_pathmgr.rm(file2)
270
- g_pathmgr.symlink(file1, file2)
271
- except Exception as e:
272
- logging.info(f"Could NOT create symlink. Error: {e}")
273
-
274
-
275
- def save_file(data, filename, append_to_json=True, verbose=True):
276
- """
277
- Common i/o utility to handle saving data to various file formats.
278
- Supported:
279
- .pkl, .pickle, .npy, .json
280
- Specifically for .json, users have the option to either append (default)
281
- or rewrite by passing in Boolean value to append_to_json.
282
- """
283
- if verbose:
284
- logging.info(f"Saving data to file: {filename}")
285
- file_ext = os.path.splitext(filename)[1]
286
- if file_ext in [".pkl", ".pickle"]:
287
- with g_pathmgr.open(filename, "wb") as fopen:
288
- pickle.dump(data, fopen, pickle.HIGHEST_PROTOCOL)
289
- elif file_ext == ".npy":
290
- with g_pathmgr.open(filename, "wb") as fopen:
291
- np.save(fopen, data)
292
- elif file_ext == ".json":
293
- if append_to_json:
294
- with g_pathmgr.open(filename, "a") as fopen:
295
- fopen.write(json.dumps(data, sort_keys=True) + "\n")
296
- fopen.flush()
297
- else:
298
- with g_pathmgr.open(filename, "w") as fopen:
299
- fopen.write(json.dumps(data, sort_keys=True) + "\n")
300
- fopen.flush()
301
- elif file_ext == ".yaml":
302
- with g_pathmgr.open(filename, "w") as fopen:
303
- dump = yaml.dump(data)
304
- fopen.write(dump)
305
- fopen.flush()
306
- else:
307
- raise Exception(f"Saving {file_ext} is not supported yet")
308
-
309
- if verbose:
310
- logging.info(f"Saved data to file: {filename}")
311
-
312
-
313
- def load_file(filename, mmap_mode=None, verbose=True, allow_pickle=False):
314
- """
315
- Common i/o utility to handle loading data from various file formats.
316
- Supported:
317
- .pkl, .pickle, .npy, .json
318
- For the npy files, we support reading the files in mmap_mode.
319
- If the mmap_mode of reading is not successful, we load data without the
320
- mmap_mode.
321
- """
322
- if verbose:
323
- logging.info(f"Loading data from file: {filename}")
324
-
325
- file_ext = os.path.splitext(filename)[1]
326
- if file_ext == ".txt":
327
- with g_pathmgr.open(filename, "r") as fopen:
328
- data = fopen.readlines()
329
- elif file_ext in [".pkl", ".pickle"]:
330
- with g_pathmgr.open(filename, "rb") as fopen:
331
- data = pickle.load(fopen, encoding="latin1")
332
- elif file_ext == ".npy":
333
- if mmap_mode:
334
- try:
335
- with g_pathmgr.open(filename, "rb") as fopen:
336
- data = np.load(
337
- fopen,
338
- allow_pickle=allow_pickle,
339
- encoding="latin1",
340
- mmap_mode=mmap_mode,
341
- )
342
- except ValueError as e:
343
- logging.info(
344
- f"Could not mmap {filename}: {e}. Trying without g_pathmgr"
345
- )
346
- data = np.load(
347
- filename,
348
- allow_pickle=allow_pickle,
349
- encoding="latin1",
350
- mmap_mode=mmap_mode,
351
- )
352
- logging.info("Successfully loaded without g_pathmgr")
353
- except Exception:
354
- logging.info("Could not mmap without g_pathmgr. Trying without mmap")
355
- with g_pathmgr.open(filename, "rb") as fopen:
356
- data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1")
357
- else:
358
- with g_pathmgr.open(filename, "rb") as fopen:
359
- data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1")
360
- elif file_ext == ".json":
361
- with g_pathmgr.open(filename, "r") as fopen:
362
- data = json.load(fopen)
363
- elif file_ext == ".yaml":
364
- with g_pathmgr.open(filename, "r") as fopen:
365
- data = yaml.load(fopen, Loader=yaml.FullLoader)
366
- elif file_ext == ".csv":
367
- with g_pathmgr.open(filename, "r") as fopen:
368
- data = pd.read_csv(fopen)
369
- else:
370
- raise Exception(f"Reading from {file_ext} is not supported yet")
371
- return data
372
-
373
-
374
- def abspath(resource_path: str):
375
- """
376
- Make a path absolute, but take into account prefixes like
377
- "http://" or "manifold://"
378
- """
379
- regex = re.compile(r"^\w+://")
380
- if regex.match(resource_path) is None:
381
- return os.path.abspath(resource_path)
382
- else:
383
- return resource_path
384
-
385
-
386
- def makedir(dir_path):
387
- """
388
- Create the directory if it does not exist.
389
- """
390
- is_success = False
391
- try:
392
- if not g_pathmgr.exists(dir_path):
393
- g_pathmgr.mkdirs(dir_path)
394
- is_success = True
395
- except BaseException:
396
- logging.info(f"Error creating directory: {dir_path}")
397
- return is_success
398
-
399
-
400
- def is_url(input_url):
401
- """
402
- Check if an input string is a url. look for http(s):// and ignoring the case
403
- """
404
- is_url = re.match(r"^(?:http)s?://", input_url, re.IGNORECASE) is not None
405
- return is_url
406
-
407
-
408
- def cleanup_dir(dir):
409
- """
410
- Utility for deleting a directory. Useful for cleaning the storage space
411
- that contains various training artifacts like checkpoints, data etc.
412
- """
413
- if os.path.exists(dir):
414
- logging.info(f"Deleting directory: {dir}")
415
- shutil.rmtree(dir)
416
- logging.info(f"Deleted contents of directory: {dir}")
417
-
418
-
419
- def get_file_size(filename):
420
- """
421
- Given a file, get the size of file in MB
422
- """
423
- size_in_mb = os.path.getsize(filename) / float(1024**2)
424
- return size_in_mb