parquet-converter commited on
Commit
5b4cc3e
·
1 Parent(s): 98cd895

Update parquet files (step 64 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Discover the Secrets of Microbiology with Pelczar Ebook PDF Free 330 A User-Friendly and In-Depth Book.md +0 -166
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ecut Download.md +0 -27
  3. spaces/1gistliPinn/ChatGPT4/Examples/Chirag Hd Movies _TOP_ Download 720p.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/Corel PaintShop Pro 2020 Ultimate V22 With Crack Key.md +0 -6
  5. spaces/1gistliPinn/ChatGPT4/Examples/Free Energy Surprise William Lyne PDF Converter The Most Powerful and Controversial Book on Alternative Energy Ever Written.md +0 -20
  6. spaces/1line/AutoGPT/autogpt/prompt.py +0 -204
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ .md +0 -103
  8. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/DRAGON BALL LEGENDS APK combate en 3D con Goku Vegeta Frieza y muchos ms.md +0 -20
  9. spaces/1phancelerku/anime-remove-background/Bleach VS Naruto 3.3 MOD APK for PC How to Install and Play.md +0 -132
  10. spaces/1phancelerku/anime-remove-background/Download CPU-Z APK for PC The Best System Information Software.md +0 -147
  11. spaces/1phancelerku/anime-remove-background/Enjoy Adam Na Eva by Pst Alex and Mary Atieno Ominde - MP3 Download Available.md +0 -212
  12. spaces/232labs/VToonify/vtoonify/model/stylegan/op_gpu/upfirdn2d.py +0 -209
  13. spaces/4Taps/SadTalker/src/face3d/util/nvdiffrast.py +0 -126
  14. spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/How to QA 2f036148193a4fccac2c9e8ae9e6d197.md +0 -31
  15. spaces/AHzizi/WaifuVoiceGen/utils.py +0 -225
  16. spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/portaspeech/portaspeech_flow.py +0 -75
  17. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/__init__.py +0 -0
  18. spaces/Ababababababbababa/Ashaar/poetry_diacritizer/models/gpt.py +0 -83
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dropdownlist/methods/listpanel/ConfigurationMethods.js +0 -129
  20. spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_mbf.py +0 -26
  21. spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/onnx_helper.py +0 -250
  22. spaces/Amrrs/DragGan-Inversion/gui_utils/gl_utils.py +0 -455
  23. spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/README.md +0 -13
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/dance_diffusion.md +0 -33
  25. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/unconditional_image_generation/train_unconditional.py +0 -712
  26. spaces/Andy1621/uniformer_image_detection/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py +0 -53
  27. spaces/Apex-X/GODROOP/roop/globals.py +0 -17
  28. spaces/Artrajz/vits-simple-api/utils/sentence.py +0 -91
  29. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/datetime.py +0 -11
  30. spaces/BernardoOlisan/vqganclip/taming-transformers/scripts/sample_fast.py +0 -260
  31. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/__init__.py +0 -54
  32. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/distributions/sdist.py +0 -150
  33. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/deprecation.py +0 -120
  34. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py +0 -130
  35. spaces/Big-Web/MMSD/env/Lib/site-packages/six.py +0 -998
  36. spaces/BilalSardar/Remove_Text_for_Image/app.py +0 -73
  37. spaces/CVPR/LIVE/pybind11/tools/FindCatch.cmake +0 -70
  38. spaces/CVPR/LIVE/thrust/dependencies/cub/CODE_OF_CONDUCT.md +0 -59
  39. spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/iter_swap.h +0 -23
  40. spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/extrema.h +0 -44
  41. spaces/CVPR/regionclip-demo/detectron2/engine/defaults.py +0 -705
  42. spaces/CikeyQI/meme-api/meme_generator/memes/kaleidoscope/__init__.py +0 -56
  43. spaces/Cyril666/my_abi/modules/backbone.py +0 -36
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/log.py +0 -8
  45. spaces/DReAMy-lib/dream_II/README.md +0 -13
  46. spaces/Datasculptor/StyleGAN-NADA/e4e/scripts/train.py +0 -88
  47. spaces/Demosthene-OR/avr23-cds-translation/tabs/modelisation_seq2seq_tab.py +0 -479
  48. spaces/Detomo/ai-comic-generation/src/components/ui/avatar.tsx +0 -50
  49. spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/mapper/training/coach.py +0 -242
  50. spaces/DragGan/DragGan-Inversion/torch_utils/ops/grid_sample_gradfix.py +0 -84
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Discover the Secrets of Microbiology with Pelczar Ebook PDF Free 330 A User-Friendly and In-Depth Book.md DELETED
@@ -1,166 +0,0 @@
1
-
2
- <h1>Pelczar Microbiology Ebook PDF Free 330: A Comprehensive Guide</h1>
3
- <p>If you are a student, teacher, researcher, or practitioner of microbiology, you might have heard of Pelczar Microbiology. It is one of the most popular and widely used textbooks in the field of microbiology. But what is Pelczar Microbiology exactly? How can you download it for free as a PDF file? And how can you use it effectively for your studies or work? In this article, we will answer all these questions and more. We will provide you with a comprehensive guide on Pelczar Microbiology Ebook PDF Free 330, covering its history, features, benefits, contents, structure, download process, usage methods, examples, and applications. By the end of this article, you will have a clear understanding of what Pelczar Microbiology is and how you can get the most out of it.</p>
4
- <h2>What is Pelczar Microbiology?</h2>
5
- <p>Pelczar Microbiology is a textbook of microbiology written by Michael J. Pelczar Jr., E.C.S. Chan, and Noel R. Krieg. It was first published in 1958 and has since been revised and updated several times. The latest edition is the seventh edition, which was published in 2010. It is also known as Pelczar Microbiology Ebook PDF Free 330 because it is available online as a free PDF file with 330 pages.</p>
6
- <h2>pelczar microbiology ebook pdf free 330</h2><br /><p><b><b>Download File</b> &#10004; <a href="https://byltly.com/2uKyij">https://byltly.com/2uKyij</a></b></p><br /><br />
7
- <h3>The history and scope of Pelczar Microbiology</h3>
8
- <p>Pelczar Microbiology was originally written by Michael J. Pelczar Jr., who was a professor of microbiology at the University of Maryland. He was also the director of the Institute for Molecular and Cell Biology at the University of Maryland Biotechnology Institute. He had a distinguished career as a microbiologist, author, editor, and administrator. He wrote several books and articles on microbiology, biotechnology, biochemistry, genetics, immunology, and molecular biology. He also served as the president of the American Society for Microbiology and the International Union of Microbiological Societies.</p>
9
- <p>Pelczar Jr. collaborated with E.C.S. Chan and Noel R. Krieg to write the first edition of Pelczar Microbiology in 1958. E.C.S. Chan was a professor of microbiology at the National University of Singapore. He was also the dean of the Faculty of Science and the vice-chancellor of the university. He had a prominent role in developing microbiology education and research in Singapore and Southeast Asia. He wrote several books and articles on microbiology, biotechnology, environmental science, food science, and public health. He also served as the president of the Singapore National Academy of Science and the Federation of Asian Scientific Academies and Societies.</p>
10
- <p>pelczar microbiology textbook pdf download free<br />
11
- free pdf of pelczar microbiology 5th edition<br />
12
- pelczar microbiology ebook online free access<br />
13
- how to get pelczar microbiology pdf for free<br />
14
- pelczar microbiology book pdf free 330 pages<br />
15
- download pelczar microbiology ebook pdf without registration<br />
16
- pelczar microbiology pdf free ebook for students<br />
17
- pelczar microbiology 5th edition ebook pdf free<br />
18
- pelczar microbiology ebook pdf free download link<br />
19
- free pelczar microbiology pdf ebook with solutions<br />
20
- pelczar microbiology ebook pdf free 330 questions and answers<br />
21
- best site to download pelczar microbiology pdf for free<br />
22
- pelczar microbiology ebook pdf free 330 chapter summaries<br />
23
- pelczar microbiology pdf ebook free download no survey<br />
24
- pelczar microbiology ebook pdf free 330 review questions<br />
25
- where can I find pelczar microbiology pdf for free<br />
26
- pelczar microbiology ebook pdf free 330 test bank<br />
27
- pelczar microbiology pdf ebook free download zip file<br />
28
- pelczar microbiology ebook pdf free 330 lecture notes<br />
29
- pelczar microbiology pdf ebook free download torrent<br />
30
- pelczar microbiology ebook pdf free 330 practice problems<br />
31
- pelczar microbiology pdf ebook free download google drive<br />
32
- pelczar microbiology ebook pdf free 330 study guide<br />
33
- pelczar microbiology pdf ebook free download epub format<br />
34
- pelczar microbiology ebook pdf free 330 flashcards<br />
35
- pelczar microbiology pdf ebook free download mobi format<br />
36
- pelczar microbiology ebook pdf free 330 glossary terms<br />
37
- pelczar microbiology pdf ebook free download dropbox link<br />
38
- pelczar microbiology ebook pdf free 330 case studies<br />
39
- pelczar microbiology pdf ebook free download mediafire link<br />
40
- pelczar microbiology ebook pdf free 330 multiple choice questions<br />
41
- pelczar microbiology pdf ebook free download mega link<br />
42
- pelczar microbiology ebook pdf free 330 key concepts<br />
43
- pelczar microbiology pdf ebook free download rar file<br />
44
- pelczar microbiology ebook pdf free 330 learning objectives<br />
45
- pelczar microbiology pdf ebook free download one drive link<br />
46
- pelczar microbiology ebook pdf free 330 critical thinking questions<br />
47
- pelczar microbiology pdf ebook free download box link<br />
48
- pelczar microbiology ebook pdf free 330 self-assessment questions<br />
49
- pelczar microbiology pdf ebook free download zippyshare link<br />
50
- pelczar microbiology ebook pdf free 330 diagrams and illustrations<br />
51
- pelczar microbiology pdf ebook free download pcloud link<br />
52
- pelczar microbiology ebook pdf free 330 tables and charts<br />
53
- pelczar microbiology pdf ebook free download sendspace link<br />
54
- pelczar microbiology ebook pdf free 330 references and citations<br />
55
- pelczar microbiology pdf ebook free download solidfiles link<br />
56
- pelczar microbiology ebook pdf free 330 appendices and index<br />
57
- pelczar microbiology pdf ebook free download uploadboy link<br />
58
- pelczar microbiology ebook pdf free 330 supplementary materials</p>
59
- <p>Noel R. Krieg was a professor emeritus of microbiology at Virginia Tech. He was also the director emeritus of the Virginia Agricultural Experiment Station. He had an illustrious career as a microbiologist, author, editor, and consultant. He wrote several books and articles on microbiology, soil science, ecology, taxonomy, phylogeny, evolution, and biotechnology. He also served as the president of the Society for Industrial Microbiology and Biotechnology and the Bergey's Manual Trust.</p>
60
- <p>The three authors aimed to provide a comprehensive and up-to-date introduction to microbiology for undergraduate students in biology, agriculture, medicine, dentistry, pharmacy, veterinary science, engineering, and other related fields. They also intended to make the book useful for graduate students, teachers, researchers, and practitioners who need a reference or a review of microbiology. They covered various aspects of microbiology, such as its history, scope, methods, principles, diversity, structure, function, metabolism, genetics, evolution, ecology, interactions, applications, and challenges. They also included numerous examples, illustrations, tables, diagrams, exercises, questions, and references to enhance the learning and understanding of microbiology.</p>
61
- <h3>The main features and benefits of Pelczar Microbiology</h3>
62
- <p>Pelczar Microbiology has many features and benefits that make it one of the best textbooks in microbiology. Some of these features and benefits are:</p>
63
- <ul>
64
- <li>It is comprehensive and up-to-date. It covers all the major topics and subtopics in microbiology, from its history and scope to its applications and challenges. It also incorporates the latest developments and discoveries in microbiology, such as molecular techniques, genomics, proteomics, metabolomics, bioinformatics, nanotechnology, synthetic biology, bioremediation, biofuels, bioweapons, antibiotics resistance, emerging diseases, and climate change.</li>
65
- <li>It is clear and concise. It explains the concepts and facts of microbiology in a simple and straightforward way. It uses an informal tone, personal pronouns, active voice, brief sentences, rhetorical questions, analogies, and metaphors to engage the reader. It avoids jargon, technical terms, and unnecessary details that might confuse or bore the reader.</li>
66
- <li>It is informative and interesting. It provides relevant information about microbiology that can help the reader to learn more about the subject. It also provides interesting information about microbiology that can spark the reader's curiosity and enthusiasm for the subject. It uses examples from real life situations such as health care environmental issues food production biotechnology etc to illustrate how microbiology affects our lives.</li>
67
- <li>It is visual and logical. It uses illustrations, tables, diagrams, and graphs to present information in a clear and attractive way. It also uses headings, subheadings, bullets, and numbers to organize information in a logical and coherent way. It helps the reader to follow the flow of the article and to find the information they need easily.</li>
68
- <li>It is interactive and actionable. It includes exercises, questions, and references at the end of each chapter to test the reader's knowledge and understanding of microbiology. It also includes tips, suggestions, and warnings throughout the article to guide the reader on how to use microbiology effectively. It encourages the reader to apply microbiology to their studies or work.</li>
69
- </ul>
70
- <p>These are some of the main features and benefits of Pelczar Microbiology that make it a valuable and enjoyable resource for anyone who wants to learn more about microbiology.</p>
71
- <h3>The contents and structure of Pelczar Microbiology</h3>
72
- <p>Pelczar Microbiology consists of 24 chapters that are divided into six parts. Each part covers a major theme or area of microbiology. The parts and chapters are as follows:</p>
73
- | Part | Title | Chapters | | --- | --- | --- | | I | Introduction to Microbiology | 1. The Scope and History of Microbiology <br> 2. The Methods of Microbiology <br> 3. The Principles of Microbiology | | II | The Diversity of Microorganisms | 4. The Prokaryotic Cell: Bacteria <br> 5. The Eukaryotic Cell: Fungi, Algae, Protozoa, and Helminths <br> 6. The Viruses | | III | The Structure and Function of Microorganisms | 7. Microbial Nutrition <br> 8. Microbial Growth <br> 9. Microbial Metabolism <br> 10. Microbial Genetics | | IV | The Evolution and Ecology of Microorganisms | 11. Microbial Taxonomy and Phylogeny <br> 12. Microbial Evolution <br> 13. Microbial Ecology | | V | The Interactions of Microorganisms | 14. Microbial Symbiosis <br> 15. Microbial Pathogenesis <br> 16. Host Defense and Immunity <br> 17. Epidemiology and Public Health | | VI | The Applications of Microorganisms | 18. Industrial Microbiology <br> 19. Environmental Microbiology <br> 20. Food Microbiology <br> 21. Agricultural Microbiology <br> 22. Medical Microbiology <br> 23. Biotechnology and Genetic Engineering <br> 24. Emerging Issues in Microbiology | <p>The structure of each chapter is similar. It starts with an introduction that gives an overview of the topic and its importance. It then presents the main points and subpoints of the topic in a clear and concise way. It ends with a summary that highlights the key takeaways and a list of exercises, questions, and references that help the reader to review and reinforce their learning.</p>
74
- <h2>How to download Pelczar Microbiology Ebook PDF Free 330?</h2>
75
- <p>If you want to download Pelczar Microbiology Ebook PDF Free 330, you need to follow some steps and tips that will help you to get it easily and safely.</p>
76
- <h3>The advantages and disadvantages of downloading Pelczar Microbiology Ebook PDF Free 330</h3>
77
- <p>Downloading Pelczar Microbiology Ebook PDF Free 330 has some advantages and disadvantages that you should consider before doing it.</p>
78
- <p>The advantages are:</p>
79
- <ul>
80
- <li>You can get it for free without paying any money.</li>
81
- <li>You can get it instantly without waiting for delivery or availability.</li>
82
- <li>You can access it anytime and anywhere on your computer, tablet, smartphone, or e-reader.</li>
83
- <li>You can save it on your device or cloud storage for future use.</li>
84
- <li>You can print it or share it with others if you want.</li>
85
- </ul>
86
- <p>The disadvantages are:</p>
87
- <ul>
88
- <li>You might get a low-quality or incomplete version that does not match the original book.</li>
89
- <li>You might get a virus or malware that can harm your device or data.</li>
90
- <li>You might violate the copyright or intellectual property rights of the authors or publishers.</li>
91
- <li>You might miss out on some features or benefits that are only available in the printed book.</li>
92
- </ul>
93
- <p>Therefore, you should weigh the pros and cons carefully before deciding to download Pelczar Microbiology Ebook PDF Free 330.</p>
94
- <h3>The steps and tips to download Pelczar Microbiology Ebook PDF Free 330</h3>
95
- <p>To download Pelczar Microbiology Ebook PDF Free 330, you need to follow these steps and tips:</p>
96
- <ol>
97
- <li>Find a reliable source or website that offers Pelczar Microbiology Ebook PDF Free 330. You can use a search engine such as Google or Bing to look for it. You can also use a specific website that specializes in providing free ebooks, such as PDF Drive, Z-Library, or Open Library. However, you should be careful and cautious when choosing a source or website, as some of them might be fake, illegal, or unsafe. You should check the reviews, ratings, comments, and feedbacks of other users who have used the source or website before. You should also check the quality, completeness, and accuracy of the ebook file before downloading it.</li>
98
- <li>Download Pelczar Microbiology Ebook PDF Free 330 from the source or website. You can click on the download button, link, or icon that is provided by the source or website. You might need to register, sign up, or log in to the source or website first. You might also need to complete a survey, captcha, or verification process first. You should follow the instructions carefully. You should also choose a suitable format, size, and location for your ebook file. You should avoid clicking on any pop-ups, ads, or redirects that might appear during the download process.</li>
99
- <li>Open Pelczar Microbiology Ebook PDF Free 330 on your device. You can use any PDF reader application such as Adobe Acrobat Reader Foxit Reader or Sumatra PDF to open your ebook file. You can also use any web browser such as Chrome Firefox or Safari to open your ebook file. You should make sure that your device has enough space memory and battery to open your ebook file. You should also make sure that your device has a good internet connection if you want to access any online features or resources that are linked to your ebook file.</li>
100
- </ol>
101
- <p>These are some of the steps and tips that will help you to download Pelczar Microbiology Ebook PDF Free 330 successfully.</p>
102
- <h3>The best sources and websites to download Pelczar Microbiology Ebook PDF Free 330</h3>
103
- <p>If you are looking for some of the best sources and websites to download Pelczar Microbiology Ebook PDF Free 330, here are some suggestions:</p>
104
- <ul>
105
- <li><a href="https://www.pdfdrive.com/pelczar-microbiology-ebook-pdf-free-330-ebooks.html">PDF Drive</a>: This is one of the largest online libraries that offers millions of free ebooks in various formats, languages, categories, and genres. You can easily find Pelczar Microbiology Ebook PDF Free 330 by typing its title in the search box or browsing through its category page. You can also preview, download, print, share, bookmark, or comment on any ebook file that you like.</li>
106
- <li><a href="https://z-lib.org/">Z-Library</a>: This is one of the most comprehensive online libraries that offers over six million free ebooks in various formats, languages, categories, and genres. You can easily find Pelczar Microbiology Ebook PDF Free 330 by typing its title in the search box or browsing through its category page. You can also preview, download, print, share, bookmark, or comment on any ebook file that you like.</li>
107
- <li><a href="https://openlibrary.org/">Open Library</a>: This is one of the most ambitious online libraries that aims to create a web page for every book ever published. You can easily find Pelczar Microbiology Ebook PDF Free 330 by typing its title in the search box or browsing through its category page. You can also preview, download, print, share, bookmark, or comment on any ebook file that you like.</li>
108
- </ul>
109
- <p>These are some of the best sources and websites that offer Pelczar Microbiology Ebook PDF Free 330 for free.</p>
110
- <h2>How to use Pelczar Microbiology Ebook PDF Free 330 effectively?</h2>
111
- you need to follow some methods and strategies that will help you to get the most out of it.</p>
112
- <h3>The prerequisites and requirements for using Pelczar Microbiology Ebook PDF Free 330</h3>
113
- <p>Before using Pelczar Microbiology Ebook PDF Free 330, you need to make sure that you have some prerequisites and requirements that will enable you to use it properly. These are:</p>
114
- <ul>
115
- <li>A basic knowledge and interest in microbiology. You should have a general understanding of what microbiology is and why it is important. You should also have a curiosity and enthusiasm for learning more about microbiology.</li>
116
- <li>A suitable device and application to open Pelczar Microbiology Ebook PDF Free 330. You should have a computer, tablet, smartphone, or e-reader that can open PDF files. You should also have a PDF reader application or a web browser that can display PDF files.</li>
117
- <li>A good internet connection and storage space to download and access Pelczar Microbiology Ebook PDF Free 330. You should have a fast and reliable internet connection that can download PDF files without interruption or delay. You should also have enough space on your device or cloud storage to save PDF files without running out of memory or battery.</li>
118
- <li>A comfortable and conducive environment to read and study Pelczar Microbiology Ebook PDF Free 330. You should have a quiet and well-lit place where you can read and study without distraction or disturbance. You should also have a comfortable and ergonomic posture, chair, desk, and screen that can prevent eye strain, neck pain, back pain, or fatigue.</li>
119
- </ul>
120
- <p>These are some of the prerequisites and requirements that you need to have before using Pelczar Microbiology Ebook PDF Free 330.</p>
121
- <h3>The methods and strategies for using Pelczar Microbiology Ebook PDF Free 330</h3>
122
- <p>When using Pelczar Microbiology Ebook PDF Free 330, you need to apply some methods and strategies that will help you to use it effectively. These are:</p>
123
- <ul>
124
- <li>Set a clear goal and purpose for using Pelczar Microbiology Ebook PDF Free 330. You should know what you want to achieve and learn from using Pelczar Microbiology Ebook PDF Free 330. You should also know how you will use it for your studies or work. For example, you might want to use it as a textbook for your course, as a reference for your research, as a review for your exam, or as a source of inspiration for your project.</li>
125
- <li>Plan a schedule and budget for using Pelczar Microbiology Ebook PDF Free 330. You should decide how much time and money you can spend on using Pelczar Microbiology Ebook PDF Free 330. You should also allocate enough time and money for other activities and expenses that are related to your studies or work. For example, you might need to spend some time and money on doing experiments, writing reports, attending lectures, buying materials, or traveling.</li>
126
- <li>Select the relevant parts and chapters of Pelczar Microbiology Ebook PDF Free 330. You should choose the parts and chapters of Pelczar Microbiology Ebook PDF Free 330 that are relevant to your goal and purpose. You should also prioritize the parts and chapters that are more important or difficult than others. For example, you might want to focus on the parts and chapters that cover the topics that are included in your syllabus, research question, exam question, or project proposal.</li>
127
- and tools to enhance your reading and studying. For example, you might want to use the following techniques and tools:</p>
128
- <ul>
129
- <li>Skimming and scanning. You can skim and scan the selected parts and chapters of Pelczar Microbiology Ebook PDF Free 330 to get a general idea of what they are about and to find the specific information that you need.</li>
130
- <li>Highlighting and annotating. You can highlight and annotate the important points and facts of the selected parts and chapters of Pelczar Microbiology Ebook PDF Free 330 to make them stand out and to add your own notes and comments.</li>
131
- <li>Summarizing and paraphrasing. You can summarize and paraphrase the main ideas and arguments of the selected parts and chapters of Pelczar Microbiology Ebook PDF Free 330 to condense them into your own words and to check your understanding.</li>
132
- <li>Outlining and mapping. You can outline and map the structure and flow of the selected parts and chapters of Pelczar Microbiology Ebook PDF Free 330 to organize them into a logical and coherent way and to see how they are connected.</li>
133
- <li>Questioning and answering. You can question and answer the selected parts and chapters of Pelczar Microbiology Ebook PDF Free 330 to test your knowledge and understanding of microbiology and to stimulate your curiosity and interest in microbiology.</li>
134
- <li>Comparing and contrasting. You can compare and contrast the selected parts and chapters of Pelczar Microbiology Ebook PDF Free 330 with other sources or perspectives of microbiology to identify their similarities and differences and to evaluate their strengths and weaknesses.</li>
135
- <li>Applying and analyzing. You can apply and analyze the selected parts and chapters of Pelczar Microbiology Ebook PDF Free 330 to your own studies or work situations to solve problems, make decisions, or create products related to microbiology.</li>
136
- <li>Evaluating and synthesizing. You can evaluate and synthesize the selected parts and chapters of Pelczar Microbiology Ebook PDF Free 330 with your own thoughts and opinions to form your own conclusions or recommendations about microbiology.</li>
137
- </ul>
138
- <p>These are some of the methods and strategies that will help you to use Pelczar Microbiology Ebook PDF Free 330 effectively.</p>
139
- <h3>The examples and applications of using Pelczar Microbiology Ebook PDF Free 330</h3>
140
- <p>To illustrate how you can use Pelczar Microbiology Ebook PDF Free 330 effectively, here are some examples and applications of using it for different purposes:</p>
141
- <ul>
142
- <li>If you are a student of microbiology, you can use Pelczar Microbiology Ebook PDF Free 330 as a textbook for your course. You can read and study the parts and chapters that cover the topics that are included in your syllabus. You can also do the exercises, questions, and references at the end of each chapter to review and reinforce your learning. You can also use Pelczar Microbiology Ebook PDF Free 330 as a reference for your assignments, projects, or exams. You can find relevant information, examples, illustrations, tables, diagrams, and graphs that can help you to complete your tasks, present your work, or answer your questions.</li>
143
- <li>If you are a teacher of microbiology, you can use Pelczar Microbiology Ebook PDF Free 330 as a resource for your teaching. You can use the parts and chapters as a guide for designing your curriculum, lesson plans, activities, or assessments. You can also use the examples, illustrations, tables, diagrams, and graphs as a tool for explaining, demonstrating, or visualizing microbiology concepts, facts, or processes. You can also use the exercises, questions, and references as a source for creating, assigning, or grading homework, projects, or tests.</li>
144
- <li>If you are a researcher of microbiology, you can use Pelczar Microbiology Ebook PDF Free 330 as a source for your research. You can use the parts and chapters as a background for defining your research problem, question, or hypothesis. You can also use the examples, illustrations, tables, diagrams, graphs, exercises, questions, references as a basis for conducting your literature review, data collection, data analysis, or data interpretation. You can also use the methods, principles, applications, challenges as a framework for developing your research design, methodology, results, discussion, or conclusion.</li>
145
- <li>If you are a practitioner of microbiology, you can use Pelczar Microbiology Ebook PDF Free 330 as a guide for your practice. You can use the parts and chapters as a reference for updating your knowledge, skills, or competencies in microbiology. You can also use the examples, illustrations, tables, diagrams, graphs, exercises, questions, references as a support for solving problems, making decisions, or creating products related to microbiology. You can also use the methods, principles, applications, challenges as a inspiration for improving your practice, innovation, or impact in microbiology.</li>
146
- </ul>
147
- <p>These are some of the examples and applications of using Pelczar Microbiology Ebook PDF Free 330 effectively for different purposes.</p>
148
- <h2>Conclusion</h2>
149
- <p>Pelczar Microbiology Ebook PDF Free 330 is a comprehensive and up-to-date textbook of microbiology that covers various aspects of microbiology such as its history scope methods principles diversity structure function metabolism genetics evolution ecology interactions applications challenges It is written by Michael J Pelczar Jr ECS Chan Noel R Krieg who are renowned experts in microbiology It is available online as a free PDF file with 330 pages It has many features benefits such as being clear concise informative interesting visual logical interactive actionable It is useful for anyone who wants to learn more about microbiology such as students teachers researchers practitioners To download it you need to find a reliable source website follow some steps tips consider some advantages disadvantages To use it effectively you need to have some prerequisites requirements apply some methods strategies use it for different purposes such as textbook reference review source resource guide By using it effectively you will be able to gain more knowledge understanding appreciation application of microbiology</p>
150
- <h2>FAQs</h2>
151
- <p>Here are some frequently asked questions about Pelczar Microbiology Ebook PDF Free 330:</p>
152
- <ol>
153
- <li>What is the difference between Pelczar Microbiology Ebook PDF Free 330 and other textbooks of microbiology?</li>
154
- <p>Pelczar Microbiology Ebook PDF Free 330 is different from other textbooks of microbiology in several ways such as being more comprehensive up-to-date clear concise informative interesting visual logical interactive actionable free online</p>
155
- <li>Who are the authors of Pelczar Microbiology Ebook PDF Free 330?</li>
156
- <p>The authors of Pelczar Microbiology Ebook PDF Free 330 are Michael J Pelczar Jr ECS Chan Noel R Krieg who are renowned experts in microbiology They have written several books articles on microbiology biotechnology biochemistry genetics immunology molecular biology They have also served as presidents directors deans professors editors consultants in various institutions organizations societies related to microbiology</p>
157
- <li>How many pages does Pelczar Microbiology Ebook PDF Free 330 have?</li>
158
- <p>Pelczar Microbiology Ebook PDF Free 330 has 330 pages It consists of 24 chapters that are divided into six parts Each part covers a major theme or area of microbiology Each chapter starts with an introduction ends with a summary exercises questions references</p>
159
- <li>How can I download Pelczar Microbiology Ebook PDF Free 330?</li>
160
- <p>You can download Pelczar Microbiology Ebook PDF Free 330 by finding a reliable source website that offers it clicking on the download button link icon following the instructions choosing a suitable format size location for your ebook file avoiding any pop-ups ads redirects that might appear during the download process</p>
161
- <li>How can I use Pelczar Microbiology Ebook PDF Free 330 effectively?</li>
162
- <p>You can use Pelczar Microbiology Ebook PDF Free 330 effectively by setting a clear goal purpose for using it planning a schedule budget for using it selecting the relevant parts chapters of it reading studying them in a systematic active way using various techniques tools such as skimming scanning highlighting annotating summarizing paraphrasing outlining mapping questioning answering comparing contrasting applying analyzing evaluating synthesizing using it for different purposes such as textbook reference review source resource guide</p>
163
- </ol>
164
- </p> 0a6ba089eb<br />
165
- <br />
166
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ecut Download.md DELETED
@@ -1,27 +0,0 @@
1
- <br />
2
- <h1>How to Download and Install eCut for CorelDRAW</h1>
3
-
4
- <p>If you are looking for a powerful plugin that can help you with various tasks related to advertisement design, such as nesting, cutting, measuring, and creating effects, you might want to try eCut for CorelDRAW. eCut is a versatile tool that supports all full versions of CorelDRAW since X3 and has more than 40 different functions. In this article, we will show you how to download and install eCut for CorelDRAW in a few simple steps.</p>
5
-
6
- <h2>Step 1: Download eCut for CorelDRAW</h2>
7
-
8
- <p>The first thing you need to do is to download the eCut for CorelDRAW installer from the official website or from the eCut software website. You can choose to download the full version or the demo version. The full version requires an activation key that you can purchase online, while the demo version allows you to try all the functions for 4 days without any restrictions. The file size is about 43 MB and it is compatible with Windows XP, Vista, 7, 8, and 10.</p>
9
- <h2>ecut download</h2><br /><p><b><b>DOWNLOAD</b> &#9733; <a href="https://byltly.com/2uKyOU">https://byltly.com/2uKyOU</a></b></p><br /><br />
10
-
11
- <h2>Step 2: Run the Setup File</h2>
12
-
13
- <p>After downloading the installer, you need to close all CorelDRAW applications before running the setup file. Then, follow the instructions on the screen and click Next until the installation is complete. The setup file will automatically detect your CorelDRAW version and install the appropriate plugin.</p>
14
-
15
- <h2>Step 3: Import the Toolbar into CorelDRAW</h2>
16
-
17
- <p>Once the installation is done, you need to import the eCut toolbar into CorelDRAW. To do this, open CorelDRAW and go to Tools > Options > Customization > Commands. Then, click on Import and browse to the folder where you installed eCut (usually C:\Program Files\eCut). Select the file named ecut.cui and click Open. You should see a new toolbar named eCut appear on your CorelDRAW workspace.</p>
18
-
19
- <h2>Step 4: Activate eCut for CorelDRAW</h2>
20
-
21
- <p>The last step is to activate eCut for CorelDRAW. If you have purchased an activation key, you can enter it in the eCut dialog box that pops up when you launch CorelDRAW. If you want to use the demo version, you need to have a good internet connection and make sure that CorelDRAW is excluded from all antivirus and firewall programs. Then, click on Start Test Period and enjoy using eCut for 4 days.</p>
22
-
23
- <h2>Conclusion</h2>
24
-
25
- <p>eCut for CorelDRAW is a useful plugin that can enhance your productivity and creativity when working with advertisement design projects. It has many features that can help you with nesting, cutting, measuring, and creating effects. To download and install eCut for CorelDRAW, you just need to follow these four steps: download the installer, run the setup file, import the toolbar into CorelDRAW, and activate eCut. We hope this article was helpful and informative. If you have any questions or feedback, please feel free to contact us.</p> ddb901b051<br />
26
- <br />
27
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Chirag Hd Movies _TOP_ Download 720p.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Chirag Hd Movies Download 720p</h2><br /><p><b><b>DOWNLOAD</b> &rArr;&rArr;&rArr; <a href="https://imgfil.com/2uy0iM">https://imgfil.com/2uy0iM</a></b></p><br /><br />
2
-
3
- November 2, 2021 - Movie quality: 720p HDRip. File size: 740 MB Storyline, Pratiksha's pride takes her away from fame, and her proximity to Chirag infuriates her. Throughout her career, Chirag and her family have been in the spotlight of the media as they have been known throughout India. Her mother, Shivgani, was known as "Shivgani's mother". Shivgani was known for her courage and determination. She campaigned for the rights of women and female prisoners. However, she gained notoriety not only because of her fight against the criminal justice system, but also because of her marriage to Chirag, the famous writer, and their family. 8a78ff9644<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Corel PaintShop Pro 2020 Ultimate V22 With Crack Key.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Corel PaintShop Pro 2020 Ultimate v22 With Crack Key</h2><br /><p><b><b>Download Zip</b> &gt;&gt;&gt;&gt;&gt; <a href="https://imgfil.com/2uxZrY">https://imgfil.com/2uxZrY</a></b></p><br /><br />
2
-
3
- Corel PaintShop Pro 2018 Ultimate Download With Crack + Serial Key If you are a lover ... Download Corel PaintShop Pro 2020 Crack & Serial Number is ultimate photo editing ... Corel PaintShop Pro 2020 v22.1.0.44 with Crack | 4HowCrack. 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Free Energy Surprise William Lyne PDF Converter The Most Powerful and Controversial Book on Alternative Energy Ever Written.md DELETED
@@ -1,20 +0,0 @@
1
- <br />
2
- <p>In the free energy formula of Eq (7.36), nonideality is expressed by the general form gex , the excess free energy. The simplifications used in the prior analyses of ideal melting and phase separation, namely neglecting sex and confining hex to the regular-solution model, are not valid for most binary systems. In order to construct phase diagrams by the common-tangent technique, more elaborate solution models are needed to relate free energy to composition for all likely phases. Figure 8.9 shows plots of g Vs xB for the three phases at six temperatures, with T6 the highest and T1 the lowest. In the six graphs, the curves for each phase keep approximately the same shape but shift relative Fig. 8.9 Free energy composition curves for an A-B binary system with two solid phases (a and P) and a liquid phase (From Ref. 1) Fig. 8.9 Free energy composition curves for an A-B binary system with two solid phases (a and P) and a liquid phase (From Ref.</p>
3
- <p>As in Eq (7.21) for the enthalpy, the molar Gibbs free energy of a solution (g) can be written in terms of pure-component contributions (gA and gB ) and an excess value (gex ). However, an important contribution needs to be added. For a binary solution, the terms contributing to g are</p>
4
- <h2>free energy surprise william lyne pdf converter</h2><br /><p><b><b>Download</b> &gt; <a href="https://imgfil.com/2uxYTJ">https://imgfil.com/2uxYTJ</a></b></p><br /><br />
5
- <p>Even though the thermochemical database need contain only AGo (or, equivalently, AHo and ASo ), the number of reactions that would have to be included in such a compilation is intractably large. The key to reducing data requirements to manageable size is to provide the standard free energy changes of forming the individual molecular species from their constituent elements. Particular reactions are constructed from these so-called formation reactions. For molecular compounds containing two or more elements, the basic information is the Free energy change for reactions by which the compound is created from its constituent elements, the latter in their normal state at the particular temperature. These reaction free energy changes are called standard free energies of formation of the compound. For example, the methane combustion reaction of Eq (9.1) involves one elemental compound (O2 ) and three molecular compounds (CH4 , CO2 , and H2 O).</p>
6
- <p>Another notable solid-solid equilibrium is the graphite-to-diamond transition in the element carbon. Graphite is fairly common in the earth's crust but the rarity of diamond is the origin of its value. Under normal terrestrial conditions (300 K, 1 atm) the two forms of carbon are not in equilibrium and so, thermodynamically speaking, only one form should exist. The stable form is the one with the lowest Gibbs free energy. At 300 K, the enthalpy difference between diamond and graphite is Ahd-g 1900 J mole, with diamond less stable than graphite in this regard. Being a highly ordered structure, diamond has a molar entropy lower than that of graphite, and Asd-g -3.3 J mole-K (see Fig. 3.6). This difference also favors the stability of graphite. The combination of the enthalpy and entropy effects produces a free-energy difference of Since the phase with the lowest free energy (graphite) is stable, diamond is a metastable phase.</p>
7
- <p>The definition has been chosen so that the activity tends to unity for pure i that is, gi , the molar free energy of pure i. Activity varies monotonically with concentration. Therefore, when component i approaches infinite dilution ai 0 and - ot. This inconvenient behavior of the chemical potential at zero concentration is avoided by using the activity in practical thermodynamic calculations involving species in solution. Another reason for the choice of the mathematical form of the relation between and ai embodied in Eq (7.29) is that the activity is directly measurable as the ratio of the equilibrium pressure exerted by a component in solution to the vapor pressure of the pure substance. This important connection is discussed in Chap. 8. Problem 7.7 shows how this equation can be used to assess the validity of formulas for hex . In an equally important application, the above equation can be integrated to give the Gibbs free energy analog of Eq (7.19) for the enthalpy</p>
8
- <p>The equilibrium criterion of minimum Gibbs free energy (Sect. 1.11) can be applied to any of the phase transitions described in the previous section. At fixed pressure and temperature, let the system contain nI moles of phase I and nII moles of phase II, with molar Gibbs free energies of gI and gII, respectively. The total Gibbs free energy of the two-phase mixture is This is an expression of chemical equilibrium. It complements the conditions of thermal equilibrium (TI TII) and mechanical equilibrium (pI pII). Since the Gibbs free energy is defined by g h - Ts, another form of Eq (5.2) is</p>
9
- <p>To Matthews that Tesla had entrusted two of his greatest inventions prior to his death - the Tesla interplanetary communications set and the Tesla anti-war device. Tesla also left special instructions to Otis T. Carr of Baltimore, who used this information to develop free-energy devices capable of 'powering anything from a hearing aid to a spaceship.' (73) Tesla's technology, through Carr's free-energy device, will revolutionize the world. Instead of purchasing power from the large corporations, which is then delivered to our homes via wires and cables, the new technology consists of nothing more than a small antenna that will be attached to the roof of every building</p>
10
- <p>As in any system constrained to constant temperature and pressure, the equilibrium of a chemical reaction is attained when the free energy is a minimum. Specifically, this means that dG 0, where the differential of G is with respect to the composition of the mixture. In order to convert this criterion to an equation relating the equilibrium concentrations of the reactants and products, the chemical potentials are the essential intermediaries. At equilibrium, Eq (7.27) provides the equation</p>
11
- <p>Irrespective of the complexity of the nonideal behavior of the phases involved, the phase diagram can always be constructed if the free energy Vs composition curves for each phase can be drawn. The link between the two graphical representations is the common-tangent rule. Because of the wide variations in the shapes of free-energy curves, the types of phase diagrams deduced from them reaches zoological proportions. In this section, a common variety called the eutectic phase diagram5 is developed by the graphical method.</p>
12
- <p>The structure of a phase diagram is determined by the condition of chemical equilibrium. As shown in Sect. 8.2, this condition can be expressed in one of two ways either the total free energy of the system (Eq (8.1)) is minimized or the chemical potentials of the each component (Eq (8.2)) in coexisting phases are equated. The choice of the manner of expressing equilibrium is a matter of convenience and varies with the particular application. Free-energy minimization is usually used with the graphical method and chemical-potential equality is the method of choice for the analytic approach.</p>
13
- <p></p>
14
- <p>The chemical potential is directly related to the Gibbs free energy of a system. For a one-component system, the chemical potential is identical to the molar Gibbs free energy of the pure substance. In solutions or mixtures, the chemical potential is simply another name for the partial molar Gibbs free energy. The discussion in Sect. 7.3, in which enthalpy was used to illustrate partial molar and excess properties, applies to the Gibbs free energy one need only replace h everywhere by g. The reason that the partial molar Gibbs free energy (g) is accorded the special name chemical potential is not only to shorten a cumbersome five-word designation. More important is the role of the chemical potential in phase equilibria and chemical equilibria when the restraints are constant temperature and pressure. Instead of the symbol g, the chemical potential is designated by The connection between the Gibbs free energy of a system at fixed T and p and the equilibrium state is shown in Fig. 1.18.</p>
15
- <p>By the time I arrived at Princeton in the fall of 1962, I was thoroughly pumped up to join the quest for controlled fusion at PPL. My under-grad senior thesis on an obscure plasma instability led to working in Jim Drummond's Plasma Physics Group at the Boeing Scientific Research Laboratories in my hometown, Seattle, during the year following graduation. In fact, this millennial dream of realizing a virtually limitless source of pollution-free energy was largely my motivation for applying to grad school, and only to Princeton.</p>
16
- <p>That an under-ice ocean exists on Europa is remarkable. It is especially remarkable when it is realized that Jupiter sits well outside of the habitable zone (defined in Chapter 5, and see Figure 5.9) and given that the surface temperature of the moon is not much greater than 100 K. How, indeed, can this ocean exist There is not enough solar energy to warm Europa above the freezing point of water, and the moon is so small that it should have cooled off relatively rapidly after formation.19 The possibility of terraforming Europa and, indeed, the other Galilean moons has been discussed by numerous researchers, but in all cases, bar the stellifying of Jupiter option, the biggest hurdle to overcome is that of supplying enough surface heat.</p>
17
- <p>The term on the left is A , the chemical-potential difference of overall reaction (10.21). It is the aqueous equivalent of the free-energy difference AG used in describing nonaqueous cells. The electric potential difference on the right is the cell EMF, s, so the equation is</p>
18
- <p>Propellant to generate electricity is not an efficient way to power a satellite that could use free solar energy instead (by means of solar cells). Nevertheless, electrodynamic tether power generation could be useful for generating short bursts of electrical energy, for instance when needed for high-energy but short duration experiments involving powerful lidars (instruments similar to radar but using laser light instead of short wavelength radio waves).</p> aaccfb2cb3<br />
19
- <br />
20
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/prompt.py DELETED
@@ -1,204 +0,0 @@
1
- from colorama import Fore
2
-
3
- from autogpt.config import Config
4
- from autogpt.config.ai_config import AIConfig
5
- from autogpt.config.config import Config
6
- from autogpt.logs import logger
7
- from autogpt.promptgenerator import PromptGenerator
8
- from autogpt.setup import prompt_user
9
- from autogpt.utils import clean_input
10
-
11
- CFG = Config()
12
-
13
-
14
- def get_prompt() -> str:
15
- """
16
- This function generates a prompt string that includes various constraints,
17
- commands, resources, and performance evaluations.
18
-
19
- Returns:
20
- str: The generated prompt string.
21
- """
22
-
23
- # Initialize the Config object
24
- cfg = Config()
25
-
26
- # Initialize the PromptGenerator object
27
- prompt_generator = PromptGenerator()
28
-
29
- # Add constraints to the PromptGenerator object
30
- prompt_generator.add_constraint(
31
- "~4000 word limit for short term memory. Your short term memory is short, so"
32
- " immediately save important information to files."
33
- )
34
- prompt_generator.add_constraint(
35
- "If you are unsure how you previously did something or want to recall past"
36
- " events, thinking about similar events will help you remember."
37
- )
38
- prompt_generator.add_constraint("No user assistance")
39
- prompt_generator.add_constraint(
40
- 'Exclusively use the commands listed in double quotes e.g. "command name"'
41
- )
42
- prompt_generator.add_constraint(
43
- "Use subprocesses for commands that will not terminate within a few minutes"
44
- )
45
-
46
- # Define the command list
47
- commands = [
48
- ("Google Search", "google", {"input": "<search>"}),
49
- (
50
- "Browse Website",
51
- "browse_website",
52
- {"url": "<url>", "question": "<what_you_want_to_find_on_website>"},
53
- ),
54
- (
55
- "Start GPT Agent",
56
- "start_agent",
57
- {"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"},
58
- ),
59
- (
60
- "Message GPT Agent",
61
- "message_agent",
62
- {"key": "<key>", "message": "<message>"},
63
- ),
64
- ("List GPT Agents", "list_agents", {}),
65
- ("Delete GPT Agent", "delete_agent", {"key": "<key>"}),
66
- (
67
- "Clone Repository",
68
- "clone_repository",
69
- {"repository_url": "<url>", "clone_path": "<directory>"},
70
- ),
71
- ("Write to file", "write_to_file", {"file": "<file>", "text": "<text>"}),
72
- ("Read file", "read_file", {"file": "<file>"}),
73
- ("Append to file", "append_to_file", {"file": "<file>", "text": "<text>"}),
74
- ("Delete file", "delete_file", {"file": "<file>"}),
75
- ("Search Files", "search_files", {"directory": "<directory>"}),
76
- ("Analyze Code", "analyze_code", {"code": "<full_code_string>"}),
77
- (
78
- "Get Improved Code",
79
- "improve_code",
80
- {"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"},
81
- ),
82
- (
83
- "Write Tests",
84
- "write_tests",
85
- {"code": "<full_code_string>", "focus": "<list_of_focus_areas>"},
86
- ),
87
- ("Execute Python File", "execute_python_file", {"file": "<file>"}),
88
- ("Task Complete (Shutdown)", "task_complete", {"reason": "<reason>"}),
89
- ("Generate Image", "generate_image", {"prompt": "<prompt>"}),
90
- ("Send Tweet", "send_tweet", {"text": "<text>"}),
91
- ]
92
-
93
- # Only add the audio to text command if the model is specified
94
- if cfg.huggingface_audio_to_text_model:
95
- commands.append(
96
- ("Convert Audio to text", "read_audio_from_file", {"file": "<file>"}),
97
- )
98
-
99
- # Only add shell command to the prompt if the AI is allowed to execute it
100
- if cfg.execute_local_commands:
101
- commands.append(
102
- (
103
- "Execute Shell Command, non-interactive commands only",
104
- "execute_shell",
105
- {"command_line": "<command_line>"},
106
- ),
107
- )
108
- commands.append(
109
- (
110
- "Execute Shell Command Popen, non-interactive commands only",
111
- "execute_shell_popen",
112
- {"command_line": "<command_line>"},
113
- ),
114
- )
115
-
116
- # Only add the download file command if the AI is allowed to execute it
117
- if cfg.allow_downloads:
118
- commands.append(
119
- (
120
- "Downloads a file from the internet, and stores it locally",
121
- "download_file",
122
- {"url": "<file_url>", "file": "<saved_filename>"},
123
- ),
124
- )
125
-
126
- # Add these command last.
127
- commands.append(
128
- ("Do Nothing", "do_nothing", {}),
129
- )
130
- commands.append(
131
- ("Task Complete (Shutdown)", "task_complete", {"reason": "<reason>"}),
132
- )
133
-
134
- # Add commands to the PromptGenerator object
135
- for command_label, command_name, args in commands:
136
- prompt_generator.add_command(command_label, command_name, args)
137
-
138
- # Add resources to the PromptGenerator object
139
- prompt_generator.add_resource(
140
- "Internet access for searches and information gathering."
141
- )
142
- prompt_generator.add_resource("Long Term memory management.")
143
- prompt_generator.add_resource(
144
- "GPT-3.5 powered Agents for delegation of simple tasks."
145
- )
146
- prompt_generator.add_resource("File output.")
147
-
148
- # Add performance evaluations to the PromptGenerator object
149
- prompt_generator.add_performance_evaluation(
150
- "Continuously review and analyze your actions to ensure you are performing to"
151
- " the best of your abilities."
152
- )
153
- prompt_generator.add_performance_evaluation(
154
- "Constructively self-criticize your big-picture behavior constantly."
155
- )
156
- prompt_generator.add_performance_evaluation(
157
- "Reflect on past decisions and strategies to refine your approach."
158
- )
159
- prompt_generator.add_performance_evaluation(
160
- "Every command has a cost, so be smart and efficient. Aim to complete tasks in"
161
- " the least number of steps."
162
- )
163
-
164
- # Generate the prompt string
165
- return prompt_generator.generate_prompt_string()
166
-
167
-
168
- def construct_prompt() -> str:
169
- """Construct the prompt for the AI to respond to
170
-
171
- Returns:
172
- str: The prompt string
173
- """
174
- config = AIConfig.load(CFG.ai_settings_file)
175
- if CFG.skip_reprompt and config.ai_name:
176
- logger.typewriter_log("Name :", Fore.GREEN, config.ai_name)
177
- logger.typewriter_log("Role :", Fore.GREEN, config.ai_role)
178
- logger.typewriter_log("Goals:", Fore.GREEN, f"{config.ai_goals}")
179
- elif config.ai_name:
180
- logger.typewriter_log(
181
- "Welcome back! ",
182
- Fore.GREEN,
183
- f"Would you like me to return to being {config.ai_name}?",
184
- speak_text=True,
185
- )
186
- should_continue = clean_input(
187
- f"""Continue with the last settings?
188
- Name: {config.ai_name}
189
- Role: {config.ai_role}
190
- Goals: {config.ai_goals}
191
- Continue (y/n): """
192
- )
193
- if should_continue.lower() == "n":
194
- config = AIConfig()
195
-
196
- if not config.ai_name:
197
- config = prompt_user()
198
- config.save(CFG.ai_settings_file)
199
-
200
- # Get rid of this global:
201
- global ai_name
202
- ai_name = config.ai_name
203
-
204
- return config.construct_full_prompt()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ .md DELETED
@@ -1,103 +0,0 @@
1
- <br />
2
- <h1>Скачать книги английский для начинающих бесплатно: как и где найти лучшие ресурсы</h1>
3
- <p>Вы хотите изучать английский язык, но не знаете, с чего начать? Вы ищете эффективный и интересный способ улучшить свои знания и навыки? Тогда вам стоит попробовать читать книги на английском языке. Это не только увлекательное, но и полезное занятие, которое поможет вам развиться в разных аспектах языка. В этой статье мы расскажем вам, почему читать книги на английском языке полезно для начинающих, как выбрать подходящие книги для чтения на английском языке, и где скачать книги английский для начинающих бесплатно.</p>
4
- <h2>скачать книги английский для начинающих бесплатно</h2><br /><p><b><b>Download</b> &gt;&gt;&gt;&gt;&gt; <a href="https://urlin.us/2uSSCl">https://urlin.us/2uSSCl</a></b></p><br /><br />
5
- <h2>Почему читать книги на английском языке полезно для начинающих</h2>
6
- <p>Чтение книг на английском языке имеет много преимуществ для тех, кто только начинает изучать язык. Вот некоторые из них:</p>
7
- <h3>Улучшение словарного запаса и грамматики</h3>
8
- <p>Когда вы читаете книгу на английском языке, вы неизбежно сталкиваетесь с новыми словами и выражениями, которые вы можете запомнить и использовать в своей речи. Вы также видите, как эти слова и выражения употребляются в контексте, что помогает вам понять их значение и правильное употребление. Чтение книг на английском языке также помогает вам освоить грамматические правила и структуры, которые вы можете применять в своем письме и говорении. Вы учитесь строить предложения, согласовывать времена, использовать модальные глаголы, артикли, предлоги и другие элементы языка.</p>
9
- <h3>Развитие навыков чтения и понимания</h3>
10
- <p>Чтение книг на английском языке также тренирует ваши навыки чтения и понимания. Вы учитесь сканировать текст, выделять главную идею, находить ключевые слова, делать выводы и предположения, анализировать и критиковать информацию. Вы также развиваете свою способность угадывать значение неизвестных слов по контексту, использовать синонимы и антонимы, распознавать фразовые глаголы и идиомы. Все эти навыки пригодятся вам не только при чтении книг, но и при просмотре фильмов, прослушивании подкастов, общении с носителями языка и сдаче международных экзаменов.</p>
11
- <h3>Погружение в культуру и менталитет</h3>
12
- <p>Наконец, чтение книг на английском языке дает вам возможность погрузиться в культуру и менталитет стран, где говорят на этом языке. Вы можете узнать много интересных фактов об истории, географии, политике, экономике, образовании, науке, искусстве, спорте, религии, традициях и обычаях этих стран. Вы также можете почувствовать атмосферу, юмор, эмоции и характер людей, которые живут там. Это поможет вам лучше понять и уважать разные культуры и народы, а также расширить свой кругозор и обогатить свой опыт.</p>
13
- <h2>Как выбрать подходящие книги для чтения на английском языке</h2>
14
- <p>Чтобы читать книги на английском языке с удовольствием и пользой, важно выбрать те книги, которые соответствуют вашему уровню владения языком, интересам и целям. Вот несколько советов, как это сделать:</p>
15
- <p>скачать книги по английской грамматике бесплатно<br />
16
- скачать книги на английском языке для начинающих<br />
17
- скачать бесплатно учебники английского для начинающих<br />
18
- скачать книги для изучения английского бесплатно<br />
19
- скачать книги на английском в формате pdf бесплатно<br />
20
- скачать книги на английском с переводом бесплатно<br />
21
- скачать книги на английском для чтения бесплатно<br />
22
- скачать книги по английскому языку бесплатно epub<br />
23
- скачать книги на английском для детей бесплатно<br />
24
- скачать книги на английском для подростков бесплатно<br />
25
- скачать книги на английском для продвинутых бесплатно<br />
26
- скачать книги на английском для самообразования бесплатно<br />
27
- скачать книги на английском по интересам бесплатно<br />
28
- скачать книги на английском по уровням бесплатно<br />
29
- скачать книги на английском по жанрам бесплатно<br />
30
- скачать книги на английском классической литературы бесплатно<br />
31
- скачать книги на английском современной литературы бесплатно<br />
32
- скачать книги на английском детективы бесплатно<br />
33
- скачать книги на английском фантастика бесплатно<br />
34
- скачать книги на английском романы бесплатно<br />
35
- скачать книги на английском приключения бесплатно<br />
36
- скачать книги на английском юмор бесплатно<br />
37
- скачать книги на английском психология бесплатно<br />
38
- скачать книги на английском история бесплатно<br />
39
- скачать книги на английском экономика бесплатно<br />
40
- скачать книги на английском маркетинг бесплатно<br />
41
- скачать книги на английском менеджмент бесплатно<br />
42
- скачать книги на английском лидерство бесплатно<br />
43
- скачать книги на английском мотивация бесплатно<br />
44
- скачать книги на английском развитие личности бесплатно<br />
45
- скачать книги на английском обучение иностранным языкам бесплатно<br />
46
- скачать книги на английском путешествия и туризм бесплатно<br />
47
- скачать книги на английском здоровье и сп</p>
48
- <h3>Определение своего уровня владения языком</h3>
49
- <p>Перед тем, как скачать книги английский для начинающих бесплатно, вы должны определить свой уровень владения языком. Это поможет вам подобрать книги, которые не будут слишком легкими или слишком сложными для вас. Существ уют разные системы определения уровня владения языком, такие как CEFR (Common European Framework of Reference for Languages), IELTS (International English Language Testing System), TOEFL (Test of English as a Foreign Language) и другие. Вы можете пройти онлайн-тест или самостоятельно оценить свои знания по таким критериям, как словарный запас, грамматика, чтение, письмо, говорение и слушание. В зависимости от вашего уровня, вы можете выбирать книги с разной степенью сложности, объема и жанра.</p>
50
- <h3>Учет своих интересов и целей</h3>
51
- <p>Чтобы читать книги на английском языке с удовольствием и мотивацией, важно выбирать те книги, которые вам интересны и соответствуют вашим целям. Например, если вы хотите изучать английский язык для работы или учебы, вы можете выбирать книги по своей специальности или профессии. Если вы хотите изучать английский язык для путешествий или общения, вы можете выбирать книги по странам, культурам, людям или ситуациям, которые вас привлекают. Если вы хотите изучать английский язык для развлечения или саморазвития, вы можете выбирать книги по своим хобби, увлечениям, вкусам или ценностям. Главное, чтобы книги были интересными и полезными для вас.</p>
52
- <h3>Поиск книг с аудио, переводом и упражнениями</h3>
53
- <p>Чтобы читать книги на английском языке эффективно и комфортно, полезно искать те книги, которые имеют дополнительные ресурсы, такие как аудио, перевод и упражнения. Аудио поможет вам слушать произношение слов и интонацию предложений, а также тренировать свое слуховое восприятие. Перевод поможет вам понять смысл слов и выражений, которые вы не знаете или не уверены. Упражнения помогут вам проверить свое понимание текста и закрепить новые знания и навыки. Вы можете искать книги с аудио, переводом и упражнениями на специализированных сайтах или приложениях для изучения английского языка.</p>
54
- <h2>Где скачать книги английский для начинающих бесплатно</h2>
55
- <p>Если вы решили читать книги на английском языке, то вам нужно знать, где вы можете скачать книги английский для начинающих бесплатно. Существует много сайтов и ресурсов, которые предлагают бесплатные книги на английском языке для разных уровней и целей. Вот некоторые из них:</p>
56
- <h3>English 4U: две книги по грамматике с упражнениями</h3>
57
- <p>English 4U - это сайт для изучения английского языка онлай н. На этом сайте вы можете скачать две книги по грамматике английского языка с упражнениями: English Grammar in Use и Essential Grammar in Use. Эти книги подходят для начинающих и среднего уровня, и содержат много примеров, объяснений и тестов по разным темам грамматики. Вы можете скачать книги в формате PDF или DOC, а также аудиофайлы в формате MP3. Вы можете скачать книги по этой ссылке: .</p>
58
- <h3>Grammar Teacher: учебник English Grammar Secrets с тестами</h3>
59
- <p>Grammar Teacher - это сайт для изучения грамматики английского языка онлайн. На этом сайте вы можете скачать учебник English Grammar Secrets, который состоит из 16 глав, посвященных разным аспектам грамматики. Каждая глава содержит теоретическую часть, примеры, упражнения и тесты. Вы можете скачать учебник в формате PDF или DOC, а также аудиофайлы в формате MP3. Вы можете скачать учебник по этой ссылке: .</p>
60
- <h3>Oxford Guide to English Grammar: полный справочник по грамматике</h3>
61
- <p>Oxford Guide to English Grammar - это полный справочник по грамматике английского языка, который подходит для всех уровней. Эта книга охватывает все основные и дополнительные темы грамматики, такие как части речи, предложения, времена, модальность, пассивный залог, условные предложения и другие. Книга содержит много примеров, таблиц, схем и правил. Вы можете скачать книгу в формате PDF по этой ссылке: .</p>
62
- <h3>Englishpage Online English Grammar Book: интерактивная книга по грамматике с заданиями</h3>
63
- <p>Englishpage Online English Grammar Book - это интерактивная книга по грамматике английского языка, которая доступна онлайн. Эта книга состоит из 10 разделов, которые посвящены разным темам грамматики, таким как артикли, наречия, прилагательные, местоимения, фразовые глаголы и другие. Каждый раздел содержит теоретическую часть, примеры и задания. Вы можете читать книгу онлайн или скачать ее в формате PDF по этой ссылке: .</p>
64
- <h3>Альдебаран: электронная библиотека с книгами по разным темам</h3>
65
- <p>Альдебаран - это электронная библиотека, которая содержит более 100 тысяч книг на русском и английском языках по разным темам и жанрам. На этом сайте вы можете найти книги по литературе, истории, философии, психологии, экономике, праву, медицине и другим областям знаний. Вы также можете найти художественные произведения разных авторов и направлений. Вы можете читать книги онлайн или скачать их в форматах FB 2, RTF, TXT, DOC и другие. Вы можете посетить сайт по этой ссылке: .</p>
66
- <h3>English Online Club: сайт с книгами для чтения по уровням</h3>
67
- <p>English Online Club - это сайт для изучения английского языка онлайн. На этом сайте вы можете найти книги для чтения по уровням, от начального до продвинутого. Вы можете выбрать книги по разным жанрам, таким как детективы, фантастика, приключения, романы и другие. Вы также можете найти книги по классике английской и американской литературы, такие как Шекспир, Диккенс, Твен, Хемингуэй и другие. Вы можете читать книги онлайн или скачать их в форматах PDF или DOC. Вы можете посетить сайт по этой ссылке: .</p>
68
- <h2>Заключение</h2>
69
- <p>Чтение книг на английском языке - это один из лучших способов изучать английский язык для начинающих. Это помогает вам улучшить свой словарный запас и грамматику, развить навыки чтения и понимания, погрузиться в культуру и менталитет англоязычных стран. Чтобы читать книги на английском языке с удовольствием и пользой, важно выбирать те книги, которые соответствуют вашему уровню владения языком, интересам и целям. Вы также можете искать те книги, которые имеют дополнительные ресурсы, такие как аудио, перевод и упражнения. В интернете вы можете найти много сайтов и ресурсов, которые предлагают скачать книги английский для начинающих бесплатно. Мы надеемся, что наша статья помогла вам в этом вопросе и вы сможете насладиться чтением книг на английском языке.</p>
70
- <h2>FAQ</h2>
71
- <p>Вот некоторые часто задаваемые вопросы о теме скачать книги английский для начинающих бесплатно:</p>
72
- <h3>Какая книга лучше всего подходит для начинающих?</h3>
73
- <p>Нет однозначного ответа на этот вопрос, так как выбор книги зависит от вашего ур��вня владения языком, интересов и целей. Однако в целом рекомендуется выбирать те книги, которые не слишком сложные или слишком легкие для вас, которые имеют интересный сюжет или полезную информацию, которые имеют дополнительные ресурсы, такие как аудио, перевод и упражнения. Вы можете начать с книг по грамматике или лексике, а затем перейти к художественным или научно-популярным произведениям.</p>
74
- <h3>Сколько времени нужно уделять чтению книг на английском языке?</h3>
75
- <p>Это зависит от вашего свободного времени, мотивации и целей. Од нако, в целом, рекомендуется читать книги на английском языке регулярно и систематически, по возможности каждый день или несколько раз в неделю. Вы можете уделять чтению книг на английском языке столько времени, сколько вам комфортно, но не меньше 15-20 минут за сеанс. Вы можете читать книги на английском языке в любое удобное для вас время и место, например, дома, в общественном транспорте, в парке или в кафе.</p>
76
- <h3>Как проверить свое понимание книг на английском языке?</h3>
77
- <p>Чтобы проверить свое понимание книг на английском языке, вы можете использовать разные способы, такие как:</p>
78
- <ul>
79
- <li>Отвечать на вопросы по тексту, которые могут быть в конце главы или книги, или которые вы можете составить сами.</li>
80
- <li>Писать краткое содержание или рецензию на прочитанную книгу, выражая свое мнение и впечатления.</li>
81
- <li>Обсуждать прочитанную книгу с другими людьми, которые тоже читали ее или хотят прочитать.</li>
82
- <li>Искать дополнительную информацию по теме книги, сравнивая и анализируя разные источники.</li>
83
- <li>Применять новые слова и выражения из книги в своей речи и письме, используя их в разных контекстах.</li>
84
- </ul>
85
- <h3>Как повысить свой уровень чтения книг на английском языке?</h3>
86
- <p>Чтобы повысить свой уровень чтения книг на английском языке, вы можете следовать таким советам:</p>
87
- <ul>
88
- <li>Читать книги на английском языке постепенно усложняя их сложность, объем и жанр.</li>
89
- <li>Читать книги на английском языке разных авторов и направлений, чтобы расширить свой кругозор и стиль.</li>
90
- <li>Читать книги на английском языке с разными целями, например, для обучения, для развлечения, для саморазвития и т.д.</li>
91
- <li>Читать книги на английском языке с разными стратегиями, например, сканирование, просмотр, детальное чтение, критическое чтение и т.д.</li>
92
- <li>Читать книги на английском языке с разными методами, например, вслух, про себя, с аудио, с переводом и т.д.</li>
93
- </ul>
94
- <h3>Какие еще ресурсы можно использовать для чтения книг на английском языке?</h3>
95
- <p>Кроме сайтов и ресурсов, которые мы упомянули выше, вы можете использовать такие ресурсы для чтения книг на английском языке:</p>
96
- <ul>
97
- <li>Электронные читалки или приложения для чтения книг на английском языке на вашем телефоне или план шете. Например, вы можете использовать Kindle, Google Play Books, iBooks, Kobo и другие.</li>
98
- <li>Аудиокниги или подкасты на ан��лийском языке, которые вы можете слушать во время занятий спортом, вождения, готовки или других действий. Например, вы можете использовать Audible, LibriVox, Spotify, Apple Podcasts и другие.</li>
99
- <li>Онлайн-курсы или программы для изучения английского языка через чтение книг на английском языке. Например, вы можете использовать LinguaLeo, Duolingo, FluentU, LearnEnglish и другие.</li>
100
- </ul>
101
- <p>Мы надеемся, что наша статья помогла вам узнать больше о том, как и где скачать книги английский для начинающих бесплатно. Желаем вам удачи и удовольствия в изучении английского языка через чтение книг на английском языке!</p> 197e85843d<br />
102
- <br />
103
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/DRAGON BALL LEGENDS APK combate en 3D con Goku Vegeta Frieza y muchos ms.md DELETED
@@ -1,20 +0,0 @@
1
- <br />
2
- You have asked me to write a 500-word article on the topic of "dragon ball legends apk sin licencia". This is a Spanish phrase that means "dragon ball legends apk without license". I assume you want me to write the article in English, but if you prefer another language, please let me know. Before I write the article, I will create two tables. The first table will be the outline of the article, and the second table will be the article with HTML formatting. I will use at least 15 headings and subheadings (including H1, H2, H3, and H4 headings) to organize the article. I will also use at least one table in the article to present some information. I will write the article in a conversational style as written by a human, using an informal tone, personal pronouns, simple language, engaging questions, and analogies and metaphors. I will end with a conclusion paragraph and 5 unique FAQs after the conclusion. I will also make sure that the article is 100% unique, SEO-optimized, and human-written. Here are the two tables: | Outline of the article | | | --- | --- | | H1: Dragon Ball Legends APK Without License: What You Need to Know | This is the main title of the article. It introduces the topic and catches the reader's attention. | | H2: What is Dragon Ball Legends? | This is the first subheading of the article. It gives a brief overview of the game and its features. | | H3: Why is Dragon Ball Legends popular? | This is a subheading under the first subheading. It explains why the game has many fans and what makes it appealing. | | H3: How to play Dragon Ball Legends? | This is another subheading under the first subheading. It describes the basic gameplay mechanics and controls of the game. | | H2: What is an APK file? | This is the second subheading of the article. It defines what an APK file is and how it works. | | H3: What are the benefits of using an APK file? | This is a subheading under the second subheading. It lists some advantages of using an APK file to install apps or games. | | H3: What are the risks of using an APK file? | This is another subheading under the second subheading. It warns about some potential dangers or drawbacks of using an APK file to install apps or games. | | H2: What does it mean to have an APK without license? | This is the third subheading of the article. It explains what it means when an APK file does not have a license or verification from the original developer or publisher. | | H3: How to get an APK without license? | This is a subheading under the third subheading. It provides some sources or methods to obtain an APK file without license for Dragon Ball Legends or other games. | | H3: How to install an APK without license? | This is another subheading under the third subheading. It gives some steps or instructions to install an APK file without license on an Android device. | | H2: What are the pros and cons of using an APK without license? | This is the fourth subheading of the article. It compares and contrasts the benefits and drawbacks of using an APK file without license for Dragon Ball Legends or other games. | | H3: Pros of using an APK without license | This is a subheading under the fourth subheading. It enumerates some positive aspects of using an APK file without license, such as saving money, accessing more features, or bypassing regional restrictions. | | H3: Cons of using an APK without license | This is another subheading under the fourth subheading. It enumerates some negative aspects of using an APK file without license, such as violating terms of service, exposing device to malware, or losing data or progress. | | H2: Tips and tricks for playing Dragon Ball Legends with an APK without license | This is the fifth subheading of the article. It offers some advice or recommendations for playing Dragon Ball Legends with an APK file without license, such as clearing cache, checking for updates, changing DNS settings, or sending bug reports. | | H2: Conclusion | This is the sixth and final subheading of the article. It summarizes the main points of the article and gives a final verdict on whether using an APK file without license for Dragon Ball Legends is worth it or not. | | FAQs | This is a section after the conclusion that answers some frequently asked questions about Dragon Ball Legends and APK files without license. | | Article with HTML formatting | | | --- | --- | <h1>Dragon Ball Legends APK Without License: What You Need to Know</h1>
3
- If you are a fan of Dragon Ball, you might have heard of Dragon Ball Legends, a popular mobile game based on the anime series. But did you know that you can play the game without paying for it or having a license? In this article, we will tell you everything you need to know about Dragon Ball Legends APK without license, including what it is, how to get it, how to install it, and what are the pros and cons of using it. Read on to find out more! <h2>What is Dragon Ball Legends?</h2>
4
- Dragon Ball Legends is a 3D action RPG game that lets you experience the epic battles and adventures of the Dragon Ball universe on your smartphone. You can create your own custom character or choose from a roster of over 100 characters from the series, including Goku, Vegeta, Frieza, Cell, Broly, and more. You can also enjoy the original story mode that features a new character designed by Akira Toriyama, the creator of Dragon Ball. The game features stunning graphics, smooth animations, and realistic voice acting that bring the characters and scenes to life. You can also challenge other players from around the world in real-time PvP matches or join forces with your friends in co-op raids. The game is constantly updated with new events, missions, rewards, and characters to keep you entertained. <h3>Why is Dragon Ball Legends popular?</h3>
5
- Dragon Ball Legends is popular because it appeals to both casual and hardcore fans of Dragon Ball. It has a simple and intuitive gameplay system that anyone can pick up and play, but also offers depth and strategy for more advanced players. It also has a rich and immersive story mode that expands the lore and canon of the series, as well as fan service and nostalgia for those who grew up watching the anime or reading the manga. Dragon Ball Legends is also popular because it is faithful to the source material and respects the original vision of Akira Toriyama. The game features authentic voice acting from the Japanese and English cast of the anime, as well as original music and sound effects. The game also follows the timeline and events of the series, while adding new twists and surprises along the way. <h3>How to play Dragon Ball Legends?</h3>
6
- To play Dragon Ball Legends, you need to download and install the game from the Google Play Store or the App Store. The game is free to play, but it also offers in-app purchases that can enhance your gameplay experience or unlock more content. You also need to have a stable internet connection to play the game online. The game has a simple and intuitive control system that uses taps and swipes on your screen. You can move your character by dragging on the left side of the screen, and attack by tapping on the right side of the screen. You can also use special cards that appear on the bottom of the screen to unleash powerful skills or combos. You can switch between characters by tapping on their icons on the top of the screen. The game has various modes that you can enjoy, such as story mode, PvP mode, co-op mode, event mode, adventure mode, training mode, and more. Each mode has its own objectives, rewards, and challenges that you can complete to earn experience points, currency, items, and characters. You can also customize your characters by equipping them with different equipment, souls, z abilities, limit breaks, and more. <h2>What is an APK file?</h2>
7
- An APK file is a file format that stands for Android Package Kit. It is used to distribute and install applications or games on Android devices. An APK file contains all the necessary files and data for an app or game to run properly on your device. An APK file can be downloaded from various sources online, such as official websites, third-party app stores, or file-sharing platforms. However, not all APK files are safe or reliable. Some APK files may contain viruses, malware, spyware, or other harmful software that can damage your device or steal your personal information. <h3>What are the benefits of using an APK file?</h3>
8
- Using an APK file can have some benefits over using the official app store or Google Play Store. Some of these benefits are: - You can access apps or games that are not available in your region or country. - You can access apps or games that are not compatible with your device or operating system. - You can access apps or games that are no longer supported or updated by their developers or publishers. - You can access apps or games that have more features or content than their official versions. - You can save money by downloading apps or games that are paid or premium for free. <h3>What are the risks of using an APK file?</h3>
9
- - You may violate the terms of service or the intellectual property rights of the developers or publishers of the apps or games. - You may expose your device to viruses, malware, spyware, or other harmful software that can damage your device or steal your personal information. - You may lose your data or progress in the apps or games if they are not properly backed up or synced with your account. - You may encounter bugs, errors, crashes, or compatibility issues in the apps or games that can affect your gameplay experience or performance. - You may not receive updates, patches, fixes, or support from the developers or publishers of the apps or games. <h2>What does it mean to have an APK without license?</h2>
10
- Having an APK without license means that you have downloaded and installed an APK file that does not have a license or verification from the original developer or publisher of the app or game. This means that you are using an unofficial, modified, hacked, or pirated version of the app or game. An APK without license can be obtained from various sources online, such as websites, forums, blogs, social media, or file-sharing platforms. However, these sources are not authorized or endorsed by the developers or publishers of the apps or games. Therefore, they may not be safe, reliable, or legal to use. <h3>How to get an APK without license?</h3>
11
- There are many ways to get an APK without license for Dragon Ball Legends or other games. Some of these ways are: - Searching for the APK file on Google or other search engines and downloading it from a website that offers it. - Joining a community or group of fans or modders of the game and downloading the APK file from their links or posts. - Using a tool or software that can extract, modify, hack, or crack the APK file from the official version of the game. - Using a VPN or proxy service that can change your IP address and location and access the game from a different region or country. However, before you get an APK without license, you should be aware of the risks and consequences that come with it. You should also check the credibility and reputation of the source that provides the APK file and scan it for any viruses or malware. <h3>How to install an APK without license?</h3>
12
- To install an APK without license on your Android device, you need to follow these steps: - Enable unknown sources on your device settings. This will allow you to install apps or games from sources other than Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. - Download the APK file without license from your preferred source and save it on your device storage. - Locate and tap on the APK file to start the installation process. You may need to grant some permissions or accept some terms and conditions before proceeding. - Wait for the installation to finish and then launch the app or game from your device. However, before you install an APK without license, you should be aware of the risks and consequences that come with it. You should also backup your data and progress in case something goes wrong during or after the installation. <h2>What are the pros and cons of using an APK without license?</h2>
13
- Using an APK without license for Dragon Ball Legends or other games can have some pros and cons. Here are some of them: <h3>Pros of using an APK without license</h3>
14
- Some of the pros of using an APK without license are: - You can save money by playing Dragon Ball Legends for free instead of paying for it. - You can access more features or content in Dragon Ball Legends that are not available in the official version, such as unlimited crystals, coins, energy, characters, items, etc. - You can bypass regional restrictions and play Dragon Ball Legends in any country or language that you want. - You can enjoy faster loading times and smoother gameplay performance in Dragon Ball Legends by removing ads, banners, pop-ups, etc. <h3>Cons of using an APK without license</h3>
15
- Some of the cons of using an APK without license are: - You may expose your device to viruses, malware, spyware, or other harmful software that can damage your device or steal your personal information, and compromise your security and privacy. - You may lose your data or progress in Dragon Ball Legends if the APK file is corrupted, outdated, incompatible, or deleted, and have no way to restore or recover it. - You may encounter bugs, errors, crashes, or compatibility issues in Dragon Ball Legends that can affect your gameplay experience or performance, and have no access to updates, patches, fixes, or support from the developers or publishers. <h2>Tips and tricks for playing Dragon Ball Legends with an APK without license</h2>
16
- If you decide to play Dragon Ball Legends with an APK without license, you may want to follow some tips and tricks to improve your gameplay experience and avoid some problems. Here are some of them: - Clear your cache and data regularly to free up some space and improve your loading times and performance. - Check for updates frequently to make sure that you have the latest version of the APK file and the game. - Change your DNS settings to a faster or more secure one to enhance your internet connection and speed. - Send bug reports or feedback to the source of the APK file or the community of fans or modders of the game to help them improve their work and fix any issues. - Use a VPN or proxy service to hide your IP address and location and avoid getting detected or banned by the developers or publishers of the game. <h2>Conclusion</h2>
17
- Dragon Ball Legends is a fun and exciting game that lets you experience the epic battles and adventures of the Dragon Ball universe on your smartphone. However, if you want to play the game without paying for it or having a license, you may need to use an APK file without license. This can have some benefits, such as saving money, accessing more features, or bypassing regional restrictions. But it can also have some risks, such as violating terms of service, exposing device to malware, or losing data or progress. Therefore, you should weigh the pros and cons carefully before using an APK file without license for Dragon Ball Legends. You should also follow some tips and tricks to improve your gameplay experience and avoid some problems. We hope that this article has helped you understand what you need to know about Dragon Ball Legends APK without license. If you have any questions or comments, feel free to leave them below. Thank you for reading! FAQs Q: What is Dragon Ball Legends? A: Dragon Ball Legends is a 3D action RPG game that lets you experience the epic battles and adventures of the Dragon Ball universe on your smartphone. Q: What is an APK file? A: An APK file is a file format that stands for Android Package Kit. It is used to distribute and install applications or games on Android devices. Q: What does it mean to have an APK without license? A: Having an APK without license means that you have downloaded and installed an APK file that does not have a license or verification from the original developer or publisher of the app or game. Q: What are the pros and cons of using an APK without license? A: Some of the pros of using an APK without license are saving money, accessing more features, or bypassing regional restrictions. Some of the cons of using an APK without license are violating terms of service, exposing device to malware, or losing data or progress. Q: How to get an APK without license? A: There are many ways to get an APK without license for Dragon Ball Legends or other games. Some of these ways are searching for the APK file on Google or other search engines, joining a community or group of fans or modders of the game, using a tool or software that can extract, modify, hack, or crack the APK file from the official version of the game, or using a VPN or proxy service that can change your IP address and location.</p>
18
- <h2>dragon ball legends apk sin licencia</h2><br /><p><b><b>Download Zip</b> &#10001; &#10001; &#10001; <a href="https://urlin.us/2uSZQX">https://urlin.us/2uSZQX</a></b></p><br /><br /> 197e85843d<br />
19
- <br />
20
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Bleach VS Naruto 3.3 MOD APK for PC How to Install and Play.md DELETED
@@ -1,132 +0,0 @@
1
-
2
- <h1>Bleach vs Naruto 3.3 Mod: How to Download and Play on PC</h1>
3
- <p>If you are a fan of anime fighting games, you might have heard of <strong>Bleach vs Naruto 3.3 Mod</strong>, a popular flash game that features characters from two of the most famous shonen anime series, <em>Bleach</em> and <em>Naruto</em>. This game allows you to choose from over 40 heroes, each with their own unique style and technique, and battle against your friends or the computer in various modes and arenas.</p>
4
- <h2>bleach vs naruto 3.3 mod download apk pc</h2><br /><p><b><b>DOWNLOAD</b> &bull; <a href="https://jinyurl.com/2uNTnQ">https://jinyurl.com/2uNTnQ</a></b></p><br /><br />
5
- <p>While this game is originally designed for web browsers, you might wonder if you can play it on your PC as well. The answer is yes, you can! Playing Bleach vs Naruto 3.3 Mod on PC has many advantages, such as better graphics, smoother performance, larger screen, and more comfortable controls. In this article, we will show you how to download and play Bleach vs Naruto 3.3 Mod on PC using two different methods. We will also give you some tips and tricks for playing the game, as well as a comparison of Bleach and Naruto characters.</p>
6
- <h2>How to Download Bleach vs Naruto 3.3 Mod on PC</h2>
7
- <p>There are two ways you can download and play Bleach vs Naruto 3.3 Mod on PC: using an emulator or using an APK file. An emulator is a software that mimics the Android operating system on your PC, allowing you to run Android apps and games. An APK file is a package file that contains all the data and code of an Android app or game. Here are the steps for each option:</p>
8
- <h3>Option 1: Using Bluestacks emulator</h3>
9
- <p>Bluestacks is one of the most popular and reliable Android emulators for PC. It lets you access the Google Play Store and download, install, and play Android apps and games on your PC. Here is how to use Bluestacks to play Bleach vs Naruto 3.3 Mod on PC:</p>
10
- <ol>
11
- <li><strong>Download and install Bluestacks on your PC</strong>. You can download Bluestacks from <a href="(^25^)">its official website</a>. There is no install process, but you need to have Java Runtime Environment 1.5.0 installed on your PC. If you don't have it, you will be prompted to download it when you run Bluestacks.</li>
12
- <li><strong>Log in to your Google account and open Google Play Store</strong >. You will need a Google account to access the Google Play Store. If you don't have one, you can create one for free. Once you log in, you will see the Google Play Store icon on the Bluestacks home screen. Click on it to open it.</li>
13
- <li><strong>Search for Bleach vs Naruto 3.3 Mod and install it</strong>. In the Google Play Store, type "Bleach vs Naruto 3.3 Mod" in the search bar and hit enter. You will see a list of results, including the game you are looking for. Click on the game and then click on the "Install" button. The game will be downloaded and installed on your PC.</li>
14
- <li><strong>Launch the game and enjoy</strong>. After the installation is complete, you will see the game icon on the Bluestacks home screen. Click on it to launch the game. You can use your mouse and keyboard to control the game, or you can customize the settings to use a gamepad or a touchscreen if you have one.</li>
15
- </ol>
16
- <h3>Option 2: Using APK file</h3>
17
- <p>If you don't want to use an emulator, you can also download and play Bleach vs Naruto 3.3 Mod on PC using an APK file. An APK file is a package file that contains all the data and code of an Android app or game. You can download an APK file from various sources online, but make sure you choose a reliable and safe one. Here is how to use an APK file to play Bleach vs Naruto 3.3 Mod on PC:</p>
18
- <p>bleach vs naruto 3.3 mod apk pc free download<br />
19
- how to install bleach vs naruto 3.3 mod on pc<br />
20
- bleach vs naruto 3.3 mod 200+ characters pc download<br />
21
- bleach vs naruto 3.3 mod pc gameplay<br />
22
- bleach vs naruto 3.3 mod pack for pc<br />
23
- bleach vs naruto 3.3 mod android apk download for pc<br />
24
- bleach vs naruto 3.3 mod tutorial pc<br />
25
- bleach vs naruto 3.3 mod latest version pc download<br />
26
- bleach vs naruto 3.3 mod pc online<br />
27
- bleach vs naruto 3.3 mod pc cheats<br />
28
- bleach vs naruto 3.3 mod pc system requirements<br />
29
- bleach vs naruto 3.3 mod pc controller support<br />
30
- bleach vs naruto 3.3 mod pc best characters<br />
31
- bleach vs naruto 3.3 mod pc update<br />
32
- bleach vs naruto 3.3 mod pc review<br />
33
- bleach vs naruto 3.3 mod pc full screen<br />
34
- bleach vs naruto 3.3 mod pc multiplayer<br />
35
- bleach vs naruto 3.3 mod pc all characters unlocked<br />
36
- bleach vs naruto 3.3 mod pc no virus<br />
37
- bleach vs naruto 3.3 mod pc mega link<br />
38
- bleach vs naruto 3.3 mod pc mediafire link<br />
39
- bleach vs naruto 3.3 mod pc google drive link<br />
40
- bleach vs naruto 3.3 mod pc zip file download<br />
41
- bleach vs naruto 3.3 mod pc rar file download<br />
42
- bleach vs naruto 3.3 mod pc exe file download<br />
43
- bleach vs naruto 3.3 mod pc iso file download<br />
44
- bleach vs naruto 3.3 mod pc emulator download<br />
45
- bleach vs naruto 3.3 mod for windows pc download<br />
46
- bleach vs naruto 3.3 mod for mac pc download<br />
47
- bleach vs naruto 3.3 mod for linux pc download<br />
48
- bleach vs naruto 2d fighting game for pc with mods download<br />
49
- how to play bleach vs naruto on pc with mods download<br />
50
- how to get more characters in bleach vs naruto on pc with mods download<br />
51
- how to unlock all characters in bleach vs naruto on pc with mods download<br />
52
- how to change controls in bleach vs naruto on pc with mods download<br />
53
- how to fix lag in bleach vs naruto on pc with mods download<br />
54
- how to customize characters in bleach vs naruto on pc with mods download<br />
55
- how to add music in bleach vs naruto on pc with mods download<br />
56
- how to change language in bleach vs naruto on pc with mods download<br />
57
- how to save progress in bleach vs naruto on pc with mods download<br />
58
- how to load game in bleach vs naruto on pc with mods download<br />
59
- how to delete game in bleach vs naruto on pc with mods download<br />
60
- how to reset game in bleach vs naruto on pc with mods download<br />
61
- how to make your own character in bleach vs naruto on pc with mods download<br />
62
- how to make your own stage in bleach vs naruto on pc with mods download<br />
63
- how to make your own music in bleach vs naruto on pc with mods download<br />
64
- how to make your own story mode in bleach vs naruto on pc with mods download<br />
65
- how to make your own arcade mode in bleach vs naruto on pc with mods download</p>
66
- <ol>
67
- <li><strong>Download the APK file from a reliable source</strong>. You can search for "Bleach vs Naruto 3.3 Mod APK" on Google or any other search engine and find many websites that offer the APK file for download. However, not all of them are trustworthy and some may contain viruses or malware that can harm your PC. Therefore, we recommend you to download the APK file from <a href="">this website</a>, which is verified and safe.</li>
68
- <li><strong>Download and install an APK installer on your PC</strong>. An APK installer is a software that allows you to install and run APK files on your PC. There are many APK installers available online, but we suggest you to use <a href="">APK Installer for PC</a>, which is free and easy to use. You can download it from <a href="">its official website</a> and follow the instructions to install it on your PC.</li>
69
- <li><strong>Run the APK installer and select the APK file</strong>. After installing the APK installer, run it and click on the "Choose" button. Then, browse your PC and select the APK file that you downloaded in step 1. The APK installer will scan the file and show you some information about it, such as its name, size, version, and permissions.</li>
70
- <li><strong>Install the game and launch it</strong>. Click on the "Install" button to start installing the game on your PC. The installation process may take a few minutes depending on your PC's speed and performance. Once the installation is done, click on the "Open" button to launch the game. You can also find the game icon on your desktop or in your start menu.</li>
71
- </ol>
72
- <h2>How to Play Bleach vs Naruto 3.3 Mod on PC</h2>
73
- <p>Now that you have downloaded and installed Bleach vs Naruto 3.3 Mod on your PC, you are ready to play it. But before you jump into action, here are some tips and tricks for playing the game that will help you improve your skills and enjoy it more.</p>
74
- <h3>Tips and tricks for playing the game</h3>
75
- <p>Bleach vs Naruto 3.3 Mod is a fun and challenging fighting game that requires quick reflexes, strategic thinking, and mastery of different skills and combos. Here are some tips and tricks for playing the game:</p>
76
- <ul>
77
- <li><strong>How to choose and customize your character</strong>. When you start the game, you will see a screen with over 40 characters from Bleach and Naruto that you can choose from. You can use the arrow keys or the mouse to scroll through them and press Enter or click to select one. You can also press C or right-click to customize your character's appearance, such as their hair color, outfit, accessories, etc.</li>
78
- <li><strong>How to use different skills and combos</strong>. Each character has their own set of skills and combos that they can use in battle. You can see them by pressing O or clicking on the "Skill List" button at the bottom of the screen. You can also practice them by pressing P or clicking on the "Practice Mode" button at the bottom of the screen. The basic controls for using skills and combos are as follows: - W, A, S, D or arrow keys: move your character - J or Z: attack - K or X: jump - L or C: guard - U or A: use skill 1 - I or S: use skill 2 - O or D: use skill 3 - P or F: use skill 4 - J + K or Z + X: use special attack - W + J or up arrow + Z: use dash attack - S + J or down arrow + Z: use sweep attack - W + U or up arrow + A: use transformation (if available) - W + I or up arrow + S: use awakening (if available) - You can also combine different keys to create more combos and variations. For example, you can press W + J + K or up arrow + Z + X to use a special dash attack, or press S + J + U or down arrow + Z + A to use a special sweep attack.</li>
79
- <li><strong>How to unlock new characters and modes</strong>. As you play the game, you will earn points that you can use to unlock new characters and modes. You can see your points at the top right corner of the screen. You can also see the requirements for unlocking each character and mode by pressing Q or clicking on the "Unlock List" button at the bottom of the screen. Some of the characters and modes that you can unlock are: - Sasuke (Naruto): 540 points - Ichigo (Bleach): 580 points - Naruto (Sage Mode): 620 points - Rukia (Bleach): 660 points - Naruto (Kyuubi Mode): 700 points - Aizen (Bleach): 740 points - Madara (Naruto): 780 points - Ichigo (Final Getsuga Tenshou): 820 points - Survival Mode: 860 points - VS Mode: 900 points</li>
80
- </ul>
81
- <h3>Comparison of Bleach and Naruto characters</h3>
82
- <p>Bleach vs Naruto 3.3 Mod is a game that brings together two of the most popular anime series, Bleach and Naruto. Both series have a large and diverse cast of characters, each with their own personality, backstory, and abilities. If you are curious about how these characters compare to each other, here is a table that shows some of their strengths and weaknesses, as well as their similarities and differences.</p>
83
- <table>
84
- <tr>
85
- <th>Character</th>
86
- <th>Strengths</th>
87
- <th>Weaknesses</th>
88
- <th>Similarities</th>
89
- <th>Differences</th>
90
- </tr>
91
- <tr>
92
- <td>Naruto</td>
93
- <td>- Has a powerful nine-tailed fox spirit inside him<br>- Can use various types of jutsu, such as shadow clones, rasengan, sage mode, etc.<br>- Has a strong will and determination to protect his friends and achieve his goals</td>
94
- <td>- Can be reckless and impulsive<br>- Can be easily angered and lose control of his fox spirit<br>- Can be naive and gullible</td>
95
- <td>- Both are orange-haired protagonists who aspire to become the strongest in their world<br>- Both have a rival who is more talented and has a dark past<br>- Both have a mentor who is eccentric and powerful</td>
96
- <td>- Naruto is a ninja who lives in a world where people use chakra to perform jutsu<br>- Naruto is an orphan who grew up without parents<br>- Naruto is cheerful and optimistic despite his hardships</td>
97
- </tr>
98
- <tr>
99
- <td>Ichigo</td>
100
- <td>- Has a high level of spiritual energy that allows him to see and fight spirits<br>- Can use various forms of power, such as shinigami, hollow, fullbring, etc.<br>- Has a strong sense of justice and responsibility to protect his family and friends</td>
101
- <td>- Can be stubborn and prideful<br>- Can be overconfident and underestimate his enemies<br>- Can be reluctant to accept help from others</td>
102
- <td>- Both are orange-haired protagonists who aspire to become the strongest in their world<br>- Both have a rival who is more talented and has a dark past<br>- Both have a mentor who is eccentric and powerful</td>
103
- <td>- Ichigo is a human who lives in a world where people have souls that can become shinigami or hollows<br>- Ichigo has a loving family who supports him<br>- Ichigo is cynical and sarcastic due to his experiences</td>
104
- </tr>
105
- <tr>
106
- <td>Sasuke</td>
107
- <td>- Has a rare bloodline trait that gives him the sharingan eye <br>- Can use various types of jutsu, such as fire, lightning, genjutsu, etc.<br>- Has a high intelligence and analytical skills</td>
108
- <td>- Can be cold and aloof<br>- Can be consumed by hatred and revenge<br>- Can be arrogant and dismissive of others</td>
109
- <td>- Both are dark-haired rivals of the protagonists who have a tragic past<br>- Both have a powerful eye technique that can manipulate reality<br>- Both have a brother who is influential and mysterious</td>
110
- <td>- Sasuke is a ninja who lives in a world where people use chakra to perform jutsu<br>- Sasuke's clan was massacred by his brother when he was young<br>- Sasuke is ambitious and driven to surpass his brother</td>
111
- </tr>
112
- <tr>
113
- <td>Aizen</td>
114
- <td>- Has a genius-level intellect and a vast knowledge of spiritual matters<br>- Can use various forms of power, such as shinigami, hollow, hogyoku, etc.<br>- Has a masterful control of his spiritual energy and his zanpakuto</td>
115
- <td>- Can be manipulative and deceptive<br>- Can be overconfident and underestimate his enemies<br>- Can be cruel and ruthless</td>
116
- <td>- Both are dark-haired rivals of the protagonists who have a tragic past<br>- Both have a powerful eye technique that can manipulate reality<br>- Both have a brother who is influential and mysterious</td>
117
- <td>- Aizen is a shinigami who lives in a world where people have souls that can become shinigami or hollows<br>- Aizen's past is shrouded in mystery and he has no known family<br>- Aizen is treacherous and scheming to overthrow the Soul Society</td>
118
- </tr>
119
- </table>
120
- <h2>Conclusion</h2>
121
- <p>Bleach vs Naruto 3.3 Mod is an amazing game that lets you enjoy the best of both anime worlds. You can choose from over 40 characters from Bleach and Naruto, each with their own skills and combos, and fight against your friends or the computer in various modes and arenas. You can also download and play the game on your PC using an emulator or an APK file, which will give you better graphics, smoother performance, larger screen, and more comfortable controls. If you are looking for a fun and challenging fighting game that features your favorite anime characters, you should definitely try Bleach vs Naruto 3.3 Mod on PC.</p>
122
- <h2>FAQs</h2>
123
- <p>Here are some of the most frequently asked questions about Bleach vs Naruto 3.3 Mod on PC:</p>
124
- <ul>
125
- <li><strong>What are the system requirements for playing Bleach vs Naruto 3.3 Mod on PC?</strong><br>The system requirements for playing Bleach vs Naruto 3.3 Mod on PC are not very high, but they may vary depending on the method you use to download and play the game. Generally, you will need at least: - Windows XP/Vista/7/8/10 - 2 GB of RAM - 4 GB of free disk space - A graphics card that supports OpenGL 2.0 or higher - A stable internet connection (for online mode)</li>
126
- <li><strong>How many characters are there in Bleach vs Naruto 3.3 Mod?</strong><br>There are over 40 characters in Bleach vs Naruto 3.3 Mod, including main characters, supporting characters, villains, and guest characters from other anime series. You can see the full list of characters by pressing Q or clicking on the "Unlock List" button at the bottom of the screen.</li>
127
- <li><strong>Can I play Bleach vs Naruto 3.3 Mod online with other players?</strong><br>Yes, you can play Bleach vs Naruto 3.3 Mod online with other players if you have a stable internet connection. You can choose the "Online Mode" option at the main menu and then select either "Quick Match" or "Create Room". You can also join an existing room by entering its code or browsing the available rooms.</li>
128
- <li><strong>What are the best characters to use in Bleach vs Naruto 3.3 Mod?</strong><br>The best characters to use in Bleach vs Naruto 3.3 Mod depend on your personal preference and play style. However, some of the most popular and powerful characters are: - Naruto (Kyuubi Mode): He has a high speed, attack, and defense, as well as a powerful transformation that gives him more skills and combos. - Ichigo (Final Getsuga Tenshou): He has a high speed, attack, and defense, as well as a powerful transformation that gives him more skills and combos. - Aizen: He has a high speed, attack, and defense, as well as a powerful zanpakuto that can create illusions and manipulate reality. - Madara: He has a high speed, attack, and defense, as well as a powerful sharingan that can use various jutsu and summon meteors.</li>
129
- <li><strong>Is Bleach vs Naruto 3.3 Mod safe to download and play?</strong><br>Yes, Bleach vs Naruto 3.3 Mod is safe to download and play if you follow the steps and sources that we have provided in this article. However, you should always be careful when downloading and installing any software or file from the internet, as some of them may contain viruses or malware that can harm your PC. Therefore, we recommend you to use a trusted antivirus program and scan any file before opening it.</li>
130
- </ul></p> 197e85843d<br />
131
- <br />
132
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download CPU-Z APK for PC The Best System Information Software.md DELETED
@@ -1,147 +0,0 @@
1
-
2
- <h1>Download CPU Z Apk for PC: A Guide to Monitor Your Hardware</h1>
3
- <p>If you want to know more about the hardware components of your PC, such as your processor, motherboard, RAM, and graphics card, then you might want to download CPU Z apk for PC. CPU Z is a freeware system information utility that can show you detailed data about your hardware and help you test your system's performance and stability. In this article, we will explain what CPU Z is, how to download and install it on your PC, how to use it to monitor your hardware, what are the benefits of using it, and what are some alternatives to it.</p>
4
- <h2>download cpu z apk for pc</h2><br /><p><b><b>Download</b> &rarr; <a href="https://jinyurl.com/2uNPIw">https://jinyurl.com/2uNPIw</a></b></p><br /><br />
5
- <h2>What is CPU Z and why do you need it?</h2>
6
- <p>CPU Z is a freeware system information utility that was developed by the French company CPUID. It was originally designed for overclockers who wanted to check their CPU frequencies and voltages, but it has since evolved into a comprehensive tool that can provide information on various aspects of your hardware.</p>
7
- <h3>CPU Z is a freeware system information utility</h3>
8
- <p>CPU Z can gather information on some of the main devices of your system, such as:</p>
9
- <ul>
10
- <li>Processor name and number, codename, process, package, cache levels</li>
11
- <li>Mainboard and chipset</li>
12
- <li>Memory type, size, timings, and module specifications (SPD)</li>
13
- <li>Graphics card model, lithography, specific GPU chip equipped, memory frequency, memory technology, and bus width</li>
14
- </ul>
15
- <p>CPU Z can also measure the real-time frequency of each core of your processor and the memory frequency.</p>
16
- <h3>CPU Z can show you detailed data about your processor, motherboard, RAM, and graphics card</h3>
17
- <p>CPU Z can help you understand the specifications and capabilities of your hardware components. For example, you can find out:</p>
18
- <ul>
19
- <li>The model, voltage, lithography, frequency, cache memory, and socket of your processor</li>
20
- <li>The exact model, the PCI-Express technology supported, the chipset used, and the BIOS version of your motherboard</li>
21
- <li>The frequency, latencies, XMP profile, voltage, and manufacturer of your RAM modules</li>
22
- <li>The graphics card model, lithography, specific GPU chip equipped, memory frequency , memory technology, and bus width of your graphics card</li>
23
- </ul>
24
- <p>CPU Z can also show you the logo and the codename of your processor and graphics card, as well as the manufacturer and the model of your motherboard.</p>
25
- <h3>CPU Z can help you test your system's performance and stability</h3>
26
- <p>CPU Z can also help you measure and compare the performance of your hardware components. For example, you can use CPU Z to:</p>
27
- <p>How to download cpu z apk for pc windows 10<br />
28
- Download cpu z apk for pc free full version<br />
29
- Download cpu z apk for pc offline installer<br />
30
- Download cpu z apk for pc latest version 2023<br />
31
- Download cpu z apk for pc uptodown<br />
32
- Download cpu z apk for pc softonic<br />
33
- Download cpu z apk for pc filehippo<br />
34
- Download cpu z apk for pc windows 7 32 bit<br />
35
- Download cpu z apk for pc windows 8.1 64 bit<br />
36
- Download cpu z apk for pc from official website<br />
37
- Download cpu z apk for pc without emulator<br />
38
- Download cpu z apk for pc using bluestacks<br />
39
- Download cpu z apk for pc using nox player<br />
40
- Download cpu z apk for pc using memu play<br />
41
- Download cpu z apk for pc using ldplayer<br />
42
- Download cpu z apk for pc to monitor hardware<br />
43
- Download cpu z apk for pc to check processor details<br />
44
- Download cpu z apk for pc to test ram speed<br />
45
- Download cpu z apk for pc to benchmark cpu performance<br />
46
- Download cpu z apk for pc to stress test overclocking<br />
47
- Download cpu z apk for pc with custom skins<br />
48
- Download cpu z apk for pc with cooler master skin<br />
49
- Download cpu z apk for pc with asus rog skin<br />
50
- Download cpu z apk for pc with msi gaming skin<br />
51
- Download cpu z apk for pc with gigabyte aorus skin<br />
52
- Download cpu z apk for pc with gigabyte skin<br />
53
- Download cpu z apk for pc with asrock phantom skin<br />
54
- Download cpu z apk for pc with asrock taichi skin<br />
55
- Download cpu z apk for pc with asrock formula skin<br />
56
- Download cpu z apk for pc compatible with windows 11<br />
57
- Download cpu z apk for pc compatible with intel xeon sapphire rapids platform<br />
58
- Download cpu z apk for pc compatible with amd storm peak platform<br />
59
- Download cpu z apk for pc compatible with zhaoxin kx-6000g/4 cpu<br />
60
- Download cpu z apk for pc compatible with via apollo mvp4 chipset<br />
61
- Download cpu z apk for pc compatible with via apollo vpx chipset<br />
62
- Best site to download cpu z apk for pc safely and securely<br />
63
- Best site to download cpu z apk for pc fast and easy<br />
64
- Best site to download cpu z apk for pc without ads and malware<br />
65
- Best site to download cpu z apk for cpuid official site[^2^]<br />
66
- Best site to download cpu z apk from uptodown[^1^]<br />
67
- Best alternative to download cpu z apk for pc windows xp<br />
68
- Best alternative to download cpu z apk for mac os x<br />
69
- Best alternative to download cpu z apk for linux ubuntu<br />
70
- Best alternative to download cpuz app from google play store[^3^]<br />
71
- Best alternative to download cpuz app from apple app store</p>
72
- <ul>
73
- <li>Run a benchmark test to evaluate the single-thread and multi-thread performance of your processor</li>
74
- <li>Run a stress test to check the stability and the temperature of your processor under heavy load</li>
75
- <li>Run a validation test to create an online report of your hardware data and share it with others</li>
76
- </ul>
77
- <p>CPU Z can also detect any overclocking or underclocking of your processor and graphics card, and show you the current and the maximum frequency of each core.</p>
78
- <h2>How to download and install CPU Z apk for PC?</h2>
79
- <p>If you want to use CPU Z on your PC, you will need to download and install an Android emulator first. An Android emulator is a software that can create a virtual Android environment on your PC, allowing you to run Android apps and games on your computer. There are many Android emulators available for PC, such as BlueStacks, NoxPlayer, LDPlayer, MEmu, etc. You can choose the one that suits your preferences and system requirements.</p>
80
- <h3>You can download CPU Z apk from various sources online</h3>
81
- <p>Once you have installed an Android emulator on your PC, you will need to download the CPU Z apk file. An apk file is a package file that contains the installation files and data of an Android app. You can download CPU Z apk from various sources online, such as APKPure, APKMirror, Uptodown, etc. Make sure you download the latest version of CPU Z apk from a reliable and safe source.</p>
82
- <h3>You need an Android emulator to run CPU Z apk on your PC</h3>
83
- <p>After downloading the CPU Z apk file, you will need to run it on your Android emulator. Depending on the emulator you are using, there are different ways to do this. For example, you can:</p>
84
- <ul>
85
- <li>Drag and drop the CPU Z apk file onto the emulator's window</li>
86
- <li>Double-click on the CPU Z apk file and choose the emulator as the default program to open it</li>
87
- <li>Right-click on the CPU Z apk file and select "Open with" and then choose the emulator from the list</li>
88
- <li>Browse the CPU Z apk file from the emulator's file manager or app installer</li>
89
- </ul>
90
- <p>The emulator will then install CPU Z apk on your PC and create a shortcut icon on your desktop or home screen.</p> <h3>You can follow these steps to install and use CPU Z apk on your PC</h3>
91
- <p>To summarize, you can follow these steps to download and install CPU Z apk for PC:</p>
92
- <ol>
93
- <li>Download and install an Android emulator on your PC, such as BlueStacks, NoxPlayer, LDPlayer, MEmu, etc.</li>
94
- <li>Download the CPU Z apk file from a reliable and safe source online, such as APKPure, APKMirror, Uptodown, etc.</li>
95
- <li>Run the CPU Z apk file on your Android emulator using one of the methods mentioned above.</li>
96
- <li>Wait for the emulator to install CPU Z apk on your PC and create a shortcut icon on your desktop or home screen.</li>
97
- <li>Launch CPU Z from the emulator and enjoy monitoring your hardware.</li>
98
- </ol>
99
- <h2>How to use CPU Z to monitor your hardware?</h2>
100
- <p>Once you have installed CPU Z on your PC, you can use it to monitor your hardware and test your system's performance and stability. CPU Z has a simple and user-friendly interface that consists of several tabs that display different information about your hardware. You can also access some features and options from the menu bar or the toolbar.</p>
101
- <h3>You can use the tabs in CPU Z to view different information about your hardware</h3>
102
- <p>CPU Z has six main tabs that show you various data about your hardware components. These tabs are:</p>
103
- <ul>
104
- <li>CPU: This tab shows you information about your processor, such as the name, model, codename, package, lithography, frequency, voltage, cache memory, socket, etc. You can also see the logo and the codename of your processor at the top of the tab.</li>
105
- <li>Cache: This tab shows you information about the cache memory of your processor, such as the size, type, latency, and organization of each level of cache. You can also see a diagram of the cache hierarchy at the bottom of the tab.</li>
106
- <li>Mainboard: This tab shows you information about your motherboard and BIOS, such as the manufacturer, model, chipset, PCI-Express technology supported, BIOS version, date, etc. You can also see the logo and the model of your motherboard at the top of the tab.</li>
107
- <li>Memory: This tab shows you information about your RAM modules, such as the type, size, frequency, timings, voltage, XMP profile, etc. You can also see a graph of the memory frequency and usage at the bottom of the tab.</li>
108
- <li>SPD: This tab shows you information about the Serial Presence Detect (SPD) data of each RAM module installed on your system. SPD is a standard that allows memory modules to communicate their specifications and capabilities to the motherboard. You can see information such as the manufacturer, part number, serial number, module size, maximum bandwidth, timings table, etc. You can also see a picture of each RAM module at the top of the tab.</li>
109
- <li>Graphics: This tab shows you information about your graphics card or integrated graphics processor (IGP), such as the model, lithography, specific GPU chip equipped, memory frequency, memory technology, and bus width. You can also see the logo and the codename of your graphics card or IGP at the top of the tab.</li>
110
- </ul>
111
- <h3>You can use the benchmark and stress test features to measure your CPU's performance</h3>
112
- <p>CPU Z can also help you evaluate and compare the performance of your processor using the benchmark and stress test features. You can access these features from the menu bar or the toolbar. The benchmark feature allows you to run a single-thread or a multi-thread test that measures the processing speed of your CPU in millions of instructions per second (MIPS). The stress test feature allows you to run a heavy load on your CPU for a specified duration and monitor its temperature and stability. You can also compare your results with other processors online or offline.</p>
113
- <h3>You can use the validation feature to create an online report of your hardware data</h3>
114
- <p>CPU Z can also help you create and share an online report of your hardware data using the validation feature. You can access this feature from the menu bar or the toolbar. The validation feature allows you to submit your hardware data to the CPUID website and generate a unique URL that you can share with others. You can also view and download your validation report as a PDF file. You can also browse other validation reports from other users and compare your hardware with theirs.</p>
115
- <h2>What are the benefits of using CPU Z?</h2>
116
- <p>CPU Z is a useful tool for anyone who wants to monitor their hardware and improve their system performance. Some of the benefits of using CPU Z are:</p>
117
- <h3>CPU Z can help you optimize your system settings and performance</h3>
118
- <p>CPU Z can help you optimize your system settings and performance by providing you with accurate and detailed information about your hardware components. You can use this information to adjust your BIOS settings, overclock or underclock your processor and graphics card, enable or disable certain features, update your drivers, etc. You can also use CPU Z to test and verify the effects of your changes on your system's performance and stability.</p>
119
- <h3>CPU Z can help you troubleshoot any hardware issues or errors</h3>
120
- <p>CPU Z can help you troubleshoot any hardware issues or errors by providing you with real-time data about your hardware components. You can use this data to diagnose any problems or anomalies in your system, such as high temperature, low voltage, incorrect frequency, faulty memory, etc. You can also use CPU Z to check if your hardware components are compatible with each other and with your system requirements.</p>
121
- <h3>CPU Z can help you compare your hardware with other systems and devices</h3>
122
- <p>CPU Z can help you compare your hardware with other systems and devices by providing you with benchmark and validation features. You can use these features to measure and compare the performance of your processor with other processors online or offline. You can also use these features to create and share an online report of your hardware data with others. You can also browse other validation reports from other users and compare your hardware with theirs.</p>
123
- <h2>What are some alternatives to CPU Z?</h2>
124
- <p>There are many other system information utilities and benchmark tools available for PC that can provide you with similar or different information and options than CPU Z. Some of the most popular alternatives to CPU Z are:</p>
125
- <h3>HWiNFO</h3>
126
- <p>HWiNFO is a freeware system information utility that can provide you with comprehensive information about your hardware components, such as processor, motherboard, memory, disk drives, network adapters, sensors, etc. It can also monitor your system's health, performance, and power consumption in real-time. It can also generate reports, logs, graphs, alerts, etc.</p>
127
- <h3>Speccy</h3>
128
- <p>Speccy is a freeware system information utility that can provide you with concise information about your hardware components, such as processor, motherboard, memory, graphics card, storage devices, optical drives, audio devices, network devices, etc. It can also show you the temperature of each component and the operating system details. It can also generate reports in various formats.</p>
129
- <h3>AIDA64</h3>
130
- <p>AIDA64 is a shareware system information utility that can provide you with detailed information about your hardware components, such as processor, motherboard, memory, graphics card, storage devices, network devices, sensors, etc. It can also monitor your system's health, performance, and power consumption in real-time. It can also run various tests and benchmarks to measure your system's capabilities and stability. It can also generate reports in various formats.</p>
131
- <h3>GPU-Z</h3>
132
- <p>GPU-Z is a freeware system information utility that can provide you with specific information about your graphics card or integrated graphics processor (IGP), such as the model, lithography, specific GPU chip equipped, memory frequency, memory technology, bus width, etc. It can also monitor your graphics card's temperature, fan speed, voltage, load, etc. It can also run a render test to verify the PCI-Express lane configuration.</p>
133
- <h2>Conclusion</h2>
134
- <p>CPU Z is a useful tool for anyone who wants to monitor their hardware and improve their system performance. You can download and install CPU Z apk for PC using an Android emulator and follow the instructions in this guide. You can also check out other similar tools that can provide you with more information and options.</p>
135
- <h2>FAQs</h2>
136
- <h3>Is CPU Z safe to use?</h3>
137
- <p>CPU Z is safe to use as long as you download it from a reliable and safe source online. CPU Z does not contain any malware or spyware and does not modify any system files or settings. However, you should always be careful when downloading and installing any software on your PC and scan it with an antivirus program before running it.</p>
138
- <h3>Does CPU Z support Windows 10?</h3>
139
- <p>CPU Z supports Windows 10 as well as Windows XP, Vista, 7, 8, and 8.1. CPU Z also supports 32-bit and 64-bit versions of Windows. However, some features of CPU Z may not work properly on some versions of Windows or some hardware configurations.</p>
140
- <h3>Can CPU Z damage my hardware?</h3>
141
- <p>CPU Z does not damage your hardware by itself. CPU Z only reads and displays the information about your hardware components and does not change or modify any of them. However, if you use CPU Z to overclock or underclock your processor or graphics card, you may risk damaging your hardware if you do not know what you are doing or if you exceed the safe limits of your hardware.</p>
142
- <h3>How accurate is CPU Z?</h3>
143
- <p>CPU Z is generally accurate and reliable in providing information about your hardware components. However, CPU Z may not be able to detect or display some information correctly depending on the type and model of your hardware components or the version of your BIOS or drivers. CPU Z may also show some discrepancies or errors in some cases due to rounding or measurement errors.</p>
144
- <h3>How often is CPU Z updated?</h3>
145
- <p>CPU Z is updated regularly by the developers to support new hardware components and fix any bugs or issues. You can check the latest version of CPU Z on the official website of CPUID or on the sources where you downloaded it from. You can also enable the automatic update feature in CPU Z to get notified when a new version is available.</p> 401be4b1e0<br />
146
- <br />
147
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Enjoy Adam Na Eva by Pst Alex and Mary Atieno Ominde - MP3 Download Available.md DELETED
@@ -1,212 +0,0 @@
1
- <br />
2
- <h1>Adam Na Eva ft 4 Na 5: A Zambian Music Hit</h1>
3
- <p>If you are a fan of Zambian music, you have probably heard of the song "Adam Na Eva" by Adam Na Eva featuring 4 Na 5. This catchy tune has been making waves in the local music scene since its release in March 2023. It is a fusion of kalindula, a traditional Zambian genre, and afrobeat, a modern African genre. The song showcases the talents and creativity of four young Zambian artists who have collaborated to produce a hit that appeals to a wide audience.</p>
4
- <h2>Who are Adam Na Eva?</h2>
5
- <p>Adam Na Eva are a Zambian duo composed of Adam Mwale and Eva Chanda. They met in 2020 at a talent show in Lusaka, where they discovered their mutual passion for music. They decided to form a group and named themselves after the biblical characters Adam and Eve. Their musical style is influenced by various genres such as gospel, reggae, dancehall, and hip hop. They have released several singles such as "Nimwe", "Mwana Wanga", and "Ndiwe". They have also performed at various events and festivals across Zambia.</p>
6
- <h2>adam na eva ft 4 na 5 mp3 download</h2><br /><p><b><b>DOWNLOAD</b> &#10027; <a href="https://jinyurl.com/2uNTCa">https://jinyurl.com/2uNTCa</a></b></p><br /><br />
7
- <h2>Who are 4 Na 5?</h2>
8
- <p>4 Na 5 are another Zambian duo composed of Y-Celeb and Chanda Na Kay. They also met in 2020 at a studio in Ndola, where they were recording their solo projects. They realized that they had a similar vibe and decided to work together as a team. They named themselves 4 Na 5 because they believe that they are more than just two people; they are a movement that represents the youth and their aspirations. Their musical style is mainly afrobeat with elements of rap and comedy. They have released several singles such as "Iyee", "Aboloka", and "Ka Boom". They have also gained popularity on social media platforms such as YouTube and TikTok.</p>
9
- <h2>How did the collaboration happen?</h2 <p>The collaboration between Adam Na Eva and 4 Na 5 happened by chance. According to an interview with Zed Music Blog, the four artists bumped into each other at a radio station in Lusaka, where they were invited for separate interviews. They got along well and decided to exchange contacts. Later, they met at a studio and played some beats for each other. They liked one of the beats produced by 4 Na 5 and decided to record a song over it. They wrote the lyrics on the spot and recorded the vocals in one take. They named the song "Adam Na Eva" as a tribute to their group names and their friendship.</p>
10
- <h2>What is the song about?</h2>
11
- <p>The song "Adam Na Eva" is a love song that expresses the feelings of two couples who are deeply in love. The chorus goes like this:</p>
12
- <blockquote>
13
- <p>Adam na Eva, we are meant to be<br>
14
- You are my rib, I am your tree<br>
15
- Adam na Eva, we are one in love<br>
16
- You are my queen, I am your dove</p>
17
- </blockquote>
18
- <p>The verses describe the qualities and actions that make the couples happy and loyal to each other. For example, Adam Na Eva sing:</p>
19
- <blockquote>
20
- <p>You make me smile when I am sad<br>
21
- You make me calm when I am mad<br>
22
- You make me strong when I am weak<br>
23
- You make me whole when I am bleak</p>
24
- <p>download emilokan freestyle by qdot mp3<br />
25
- download emilokan freestyle by qdot audio<br />
26
- download emilokan freestyle by qdot song<br />
27
- download emilokan freestyle by qdot music<br />
28
- download emilokan freestyle by qdot naijaloaded<br />
29
- download emilokan freestyle by qdot ghupload<br />
30
- download emilokan freestyle by qdot tuneloaded<br />
31
- download emilokan freestyle by qdot lyrics<br />
32
- download emilokan freestyle by qdot video<br />
33
- download emilokan freestyle by qdot youtube<br />
34
- download emi lokan by qdot mp3<br />
35
- download emi lokan by qdot audio<br />
36
- download emi lokan by qdot song<br />
37
- download emi lokan by qdot music<br />
38
- download emi lokan by qdot naijaloaded<br />
39
- download emi lokan by qdot ghupload<br />
40
- download emi lokan by qdot tuneloaded<br />
41
- download emi lokan by qdot lyrics<br />
42
- download emi lokan by qdot video<br />
43
- download emi lokan by qdot youtube<br />
44
- how to download emilokan freestyle by qdot mp3<br />
45
- how to download emilokan freestyle by qdot audio<br />
46
- how to download emilokan freestyle by qdot song<br />
47
- how to download emilokan freestyle by qdot music<br />
48
- how to download emilokan freestyle by qdot naijaloaded<br />
49
- how to download emilokan freestyle by qdot ghupload<br />
50
- how to download emilokan freestyle by qdot tuneloaded<br />
51
- how to download emilokan freestyle by qdot lyrics<br />
52
- how to download emilokan freestyle by qdot video<br />
53
- how to download emilokan freestyle by qdot youtube<br />
54
- where to download emilokan freestyle by qdot mp3<br />
55
- where to download emilokan freestyle by qdot audio<br />
56
- where to download emilokan freestyle by qdot song<br />
57
- where to download emilokan freestyle by qdot music<br />
58
- where to download emilokan freestyle by qdot naijaloaded<br />
59
- where to download emilokan freestyle by qdot ghupload<br />
60
- where to download emilokan freestyle by qdot tuneloaded<br />
61
- where to download emilokan freestyle by qdot lyrics<br />
62
- where to download emilokan freestyle by qdot video<br />
63
- where to download emilokan freestyle by qdot youtube<br />
64
- free download of emilokan freestyle by qdot mp3<br />
65
- free download of emilokan freestyle by qdot audio<br />
66
- free download of emilokan freestyle by qdot song<br />
67
- free download of emilokan freestyle by qdot music<br />
68
- free download of emilokan freestyle by qdot naijaloaded<br />
69
- free download of emilokan freestyle by qdot ghupload<br />
70
- free download of emilokan freestyle by qdot tuneloaded<br />
71
- free download of emilokan freestyle by qdot lyrics<br />
72
- free download of emilokan freestyle by qdot video</p>
73
- </blockquote>
74
- <p>And 4 Na 5 sing:</p>
75
- <blockquote>
76
- <p>I will never cheat on you<br>
77
- I will never lie to you<br>
78
- I will never hurt you<br>
79
- I will never leave you</p>
80
- </blockquote>
81
- <p>The song also uses some metaphors and references to Zambian culture and history. For example, Adam Na Eva compare their love to the Zambezi River, which is the longest river in Zambia and a symbol of life and prosperity. They also mention the names of some famous Zambian leaders and heroes, such as Kenneth Kaunda, Levy Mwanawasa, and Simon Mwansa Kapwepwe, to show their respect and admiration.</p> <h2>How has the song been received?</h2>
82
- <p>The song "Adam Na Eva" has been received very well by the Zambian music fans and critics. It has become one of the most popular songs in the country, topping the charts on various platforms such as Mvesesani, ZedBeats, and Zambian Music Blog. It has also received millions of views and likes on YouTube and TikTok, where many users have created videos dancing and lip-syncing to the song. The song has also been played on several radio stations and TV channels, such as ZNBC, QFM, and Diamond TV. The song has also received positive feedback from other Zambian artists and celebrities, such as Macky 2, Chef 187, Slapdee, and Pompi, who have praised the song for its quality, originality, and message.</p>
83
- <h2>What are some of the challenges and opportunities for Zambian music?</h2>
84
- <p>The success of the song "Adam Na Eva" reflects the growth and potential of the Zambian music industry, which has been producing more diverse and innovative music in recent years. However, there are also some challenges and opportunities that face the industry and its artists. Some of the challenges include:</p>
85
- <ul>
86
- <li>Lack of adequate funding and support from the government and private sector, which limits the access to quality equipment, studios, marketing, and distribution.</li>
87
- <li>Lack of adequate recognition and appreciation from the local and international audiences, which affects the motivation and income of the artists.</li>
88
- <li>Lack of adequate regulation and protection of intellectual property rights, which exposes the artists to piracy and plagiarism.</li>
89
- </ul>
90
- <p>Some of the opportunities include:</p>
91
- <ul>
92
- <li>Increased use of digital platforms and social media, which enables the artists to reach more fans and markets, and to interact with them directly.</li>
93
- <li>Increased collaboration and networking among the artists, which fosters creativity, innovation, and unity.</li>
94
- <li>Increased exposure and influence of Zambian music in the regional and global scene, which enhances the reputation and visibility of the industry and its artists.</li>
95
- </ul> <h1>How to download Adam Na Eva ft 4 Na 5 mp3</h1>
96
- <p>Now that you know more about the song "Adam Na Eva" and its artists, you might be wondering how to download it as an mp3 file. There are different ways to do this, depending on your preferences and resources. However, before you proceed, you should be aware of some legal and ethical issues involved in downloading music. Downloading music without paying for it or without the permission of the artists or the owners of the rights is considered piracy, which is illegal and punishable by law. It also deprives the artists of their deserved income and recognition, which can affect their livelihood and career. Therefore, you should always respect the rights of the artists and support them by buying their music from official sources or streaming it from licensed platforms.</p>
97
- <h2>Buying music from official sources</h2>
98
- <p>One of the best ways to download the song "Adam Na Eva" as an mp3 file is to buy it from official sources, such as iTunes or Google Play Music. These are online stores that sell digital music legally and securely. You can access them from your computer or mobile device, and pay with your credit card or other methods. The advantages of buying music from official sources are:</p>
99
- <ul>
100
- <li>You get high-quality mp3 files that are free of viruses, malware, or errors.</li>
101
- <li>You support the artists and the music industry by paying for their work.</li>
102
- <li>You can access your purchased music anytime and anywhere, and sync it with your devices.</li>
103
- </ul>
104
- <p>The disadvantages of buying music from official sources are:</p>
105
- <ul>
106
- <li>You have to pay a certain amount of money for each song or album, which can be expensive if you want to download many songs.</li>
107
- <li>You have to create an account and provide your personal and financial information, which can be risky if the site is hacked or compromised.</li>
108
- <li>You have to abide by the terms and conditions of the site, which may limit your rights to use, share, or modify the music.</li>
109
- </ul>
110
- <p>To buy the song "Adam Na Eva" from iTunes, follow these steps:</p>
111
- <ol>
112
- <li>Open iTunes on your computer or mobile device, or download it from <a href="">https://www.apple.com/itunes/</a> if you don't have it.</li>
113
- <li>Create an Apple ID or sign in with your existing one.</li>
114
- <li>Search for "Adam Na Eva ft 4 Na 5" in the search bar.</li>
115
- <li>Select the song from the results and click on the price button.</li>
116
- <li>Confirm your purchase and enter your payment details.</li>
117
- <li>Wait for the download to finish and enjoy your music.</li>
118
- </ol>
119
- <p>The price of the song on iTunes is $0.99 USD as of June 2023.</p>
120
- <p>To buy the song "Adam Na Eva" from Google Play Music, follow these steps:</p>
121
- <ol>
122
- <li>Open Google Play Music on your computer or mobile device, or download it from <a href="">https://play.google.com/music/</a> if you don't have it.</li>
123
- <li>Create a Google account or sign in with your existing one.</li>
124
- <li>Search for "Adam Na Eva ft 4 Na 5" in the search bar.</li>
125
- <li>Select the song from the results and click on the price button.</li>
126
- <li>Confirm your purchase and enter your payment details.</li>
127
- <li>Wait for the download to finish and enjoy your music.</li>
128
- </ol>
129
- <p>The price of the song on Google Play Music is $0.99 USD as of June 2023.</p> <h2>Downloading music from YouTube</h2>
130
- <p>Another way to download the song "Adam Na Eva" as an mp3 file is to convert YouTube videos into mp3 files. YouTube is a popular online platform that hosts millions of videos, including music videos. You can access YouTube from your computer or mobile device, and watch videos for free. However, YouTube does not allow you to download videos or audio directly from its site, for legal and ethical reasons. Therefore, you need to use third-party tools or software that can extract the audio from YouTube videos and save them as mp3 files. The advantages of downloading music from YouTube are:</p>
131
- <ul>
132
- <li>You can find almost any song or video on YouTube, including rare or old ones.</li>
133
- <li>You can choose the quality and size of the mp3 file, depending on your preferences and storage space.</li>
134
- <li>You can download music for free, without paying any fees or subscriptions.</li>
135
- </ul>
136
- <p>The disadvantages of downloading music from YouTube are:</p>
137
- <ul>
138
- <li>You may violate the rights of the artists and the owners of the videos, who may not have given their consent for their content to be downloaded or used in other ways.</li>
139
- <li>You may expose your device to viruses, malware, or spyware, if you use unreliable or malicious tools or software.</li>
140
- <li>You may experience slow or interrupted downloads, if you have a poor internet connection or if the site is overloaded.</li>
141
- </ul>
142
- <p>To download the song "Adam Na Eva" from YouTube, follow these steps:</p>
143
- <ol>
144
- <li>Open YouTube on your computer or mobile device, or download it from <a href="">https://www.youtube.com/</a> if you don't have it.</li>
145
- <li>Search for "Adam Na Eva ft 4 Na 5" in the search bar.</li>
146
- <li>Select the video that matches the song from the results and copy its URL.</li>
147
- <li>Open a new tab and go to one of the online tools that can convert YouTube videos into mp3 files, such as <a href="">https://ytmp3.cc/</a>, <a href="">https://www.onlinevideoconverter.com/mp3-converter</a>, or <a href="">https://www.flvto.biz/</a>.</li>
148
- <li>Paste the URL of the video into the box and click on the convert button.</li>
149
- <li>Wait for the conversion to finish and click on the download button.</li>
150
- <li>Save the mp3 file to your device and enjoy your music.</li>
151
- </ol> <h2>Downloading music from other websites</h2>
152
- <p>A third way to download the song "Adam Na Eva" as an mp3 file is to find and download free mp3 files from websites that offer royalty-free or Creative Commons music. Royalty-free music is music that you can use for personal or commercial purposes without paying any royalties or fees to the artists or the owners of the rights. Creative Commons music is music that the artists have licensed under certain conditions, such as attribution, non-commercial use, or share-alike. You can access these websites from your computer or mobile device, and download music for free or for a small donation. The advantages of downloading music from other websites are:</p>
153
- <ul>
154
- <li>You can discover new and diverse music from different genres, cultures, and countries.</li>
155
- <li>You can support independent and emerging artists who share their music for free or for a low cost.</li>
156
- <li>You can use the music for your own projects, such as videos, podcasts, or presentations, without worrying about legal issues.</li>
157
- </ul>
158
- <p>The disadvantages of downloading music from other websites are:</p>
159
- <ul>
160
- <li>You may not find the exact song or artist that you are looking for, as these websites may have limited or niche collections.</li>
161
- <li>You may encounter low-quality or corrupted mp3 files that are not suitable for listening or editing.</li>
162
- <li>You may have to comply with certain terms and conditions of the websites, such as giving credit to the artists, linking back to the source, or not modifying the music.</li>
163
- </ul>
164
- <p>To download the song "Adam Na Eva" from other websites, follow these steps:</p>
165
- <ol>
166
- <li>Open a new tab and go to one of the websites that offer royalty-free or Creative Commons music, such as <a href="">https://www.jamendo.com/</a>, <a href="">https://freemusicarchive.org/</a>, or <a href="">https://www.bensound.com/</a>.</li>
167
- <li>Search for "Adam Na Eva ft 4 Na 5" or similar keywords in the search bar.</li>
168
- <li>Select the mp3 file that matches the song from the results and click on the download button.</li>
169
- <li>Save the mp3 file to your device and enjoy your music.</li>
170
- </ol>
171
- <h2>Comparing the pros and cons of each method</h2>
172
- <p>To help you decide which method to use for downloading the song "Adam Na Eva" as an mp3 file, here is a table that compares the pros and cons of each method, based on criteria such as quality, cost, convenience, and legality.</p>
173
- <table>
174
- <tr><th>Method</th><th>Quality</th><th>Cost</th><th>Convenience</th><th>Legality</th></tr>
175
- <tr><td>Buying music from official sources</td><td>High</td><td>Medium to high</td><td>Easy to moderate</td><td>Legal and ethical</td></tr>
176
- <tr><td>Downloading music from YouTube</td><td>Low to medium</td><td>Free</td><td>Moderate to hard</td><td>Illegal and unethical</td></tr>
177
- <tr><td>Downloading music from other websites</td><td>Low to high</td><td>Free to low</td><td>Moderate to hard</td><td>Legal and ethical (with conditions)</td></tr>
178
- </table>
179
- <h1>Conclusion</h1>
180
- <p>In conclusion, the song "Adam Na Eva" by Adam Na Eva featuring 4 Na 5 is a Zambian music hit that has been making waves in the local and regional scene. It is a fusion of kalindula and afrobeat genres that showcases the talents and creativity of four young Zambian artists. The song is a love song that expresses the feelings of two couples who are deeply in love. The song has been received very well by the Zambian music fans and critics, who have praised it for its quality, originality, and message. The song also reflects the growth and potential of the Zambian music industry, which faces some challenges and opportunities in terms of funding, recognition, regulation, digitalization, collaboration, and exposure.</p>
181
- <p>If you want to download the song "Adam Na Eva" as an mp3 file, you have three options: buying it from official sources such as iTunes or Google Play Music; converting it from YouTube videos using online tools or software; or finding it on other websites that offer royalty-free or Creative Commons music. Each option has its pros and cons in terms of quality, cost, convenience, and legality. You should weigh these factors carefully before choosing your preferred option. However, we recommend that you buy the song from official sources if you can afford it, as this is the best way to support the artists and respect their rights. However, if you cannot or do not want to buy the song, you can use the other options with caution and discretion, and always give credit to the original sources.</p>
182
- <h1>FAQs</h1>
183
- <p>Here are some frequently asked questions and answers related to the topic of the article:</p>
184
- <ol>
185
- <li><b>What is the meaning of the name Adam Na Eva?</b><br>
186
- The name Adam Na Eva is a combination of the names of the two members of the duo, Adam Mwale and Eva Chanda. It is also a reference to the biblical characters Adam and Eve, who were the first human couple created by God.</li>
187
- <li><b>What is the meaning of the name 4 Na 5?</b><br>
188
- The name 4 Na 5 is a slang term that means "more than enough" or "exceeding expectations". It is also a way of expressing the duo's confidence and ambition in their music career.</li>
189
- <li><b>Where can I watch the official video of the song Adam Na Eva?</b><br>
190
- You can watch the official video of the song Adam Na Eva on YouTube, by following this link: <a href="">https://www.youtube.com/watch?v=xxxxxxxx</a>. The video was directed by XYZ and features the four artists performing the song in various locations.</li>
191
- <li><b>Are there any remixes or covers of the song Adam Na Eva?</b><br>
192
- Yes, there are several remixes and covers of the song Adam Na Eva by other Zambian artists and fans. Some of them are:</li>
193
- <ul>
194
- <li><a href="">https://www.youtube.com/watch?v=yyyyyyyy</a>: A remix by DJ ABC featuring DEF and GHI.</li>
195
- <li><a href="">https://www.youtube.com/watch?v=zzzzzzzz</a>: A cover by JKL, a Zambian singer and guitarist.</li>
196
- <li><a href="">https://www.youtube.com/watch?v=wwwwwwww</a>: A cover by MNO, a Zambian rapper and producer.</li>
197
- </ul>
198
- <li><b>What are some other popular songs by Adam Na Eva and 4 Na 5?</b><br>
199
- Some other popular songs by Adam Na Eva are:</li>
200
- <ul>
201
- <li>"Nimwe": A gospel song that praises God for his blessings and guidance.</li>
202
- <li>"Mwana Wanga": A love song that expresses the joy of having a child with their partner.</li>
203
- <li>"Ndiwe": A love song that declares their devotion and gratitude to their partner.</li>
204
- </ul>
205
- <p>Some other popular songs by 4 Na 5 are:</p>
206
- <ul>
207
- <li>"Iyee": A party song that celebrates life and happiness.</li>
208
- <li>"Aboloka": A comedy song that mocks some social issues and stereotypes.</li>
209
- <li>"Ka Boom": A rap song that showcases their skills and attitude.</li>
210
- </ul></p> 197e85843d<br />
211
- <br />
212
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/model/stylegan/op_gpu/upfirdn2d.py DELETED
@@ -1,209 +0,0 @@
1
- from collections import abc
2
- import os
3
-
4
- import torch
5
- from torch.nn import functional as F
6
- from torch.autograd import Function
7
- from torch.utils.cpp_extension import load
8
-
9
-
10
- module_path = os.path.dirname(__file__)
11
- upfirdn2d_op = load(
12
- "upfirdn2d",
13
- sources=[
14
- os.path.join(module_path, "upfirdn2d.cpp"),
15
- os.path.join(module_path, "upfirdn2d_kernel.cu"),
16
- ],
17
- )
18
-
19
-
20
- class UpFirDn2dBackward(Function):
21
- @staticmethod
22
- def forward(
23
- ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
24
- ):
25
-
26
- up_x, up_y = up
27
- down_x, down_y = down
28
- g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
29
-
30
- grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
31
-
32
- grad_input = upfirdn2d_op.upfirdn2d(
33
- grad_output,
34
- grad_kernel,
35
- down_x,
36
- down_y,
37
- up_x,
38
- up_y,
39
- g_pad_x0,
40
- g_pad_x1,
41
- g_pad_y0,
42
- g_pad_y1,
43
- )
44
- grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
45
-
46
- ctx.save_for_backward(kernel)
47
-
48
- pad_x0, pad_x1, pad_y0, pad_y1 = pad
49
-
50
- ctx.up_x = up_x
51
- ctx.up_y = up_y
52
- ctx.down_x = down_x
53
- ctx.down_y = down_y
54
- ctx.pad_x0 = pad_x0
55
- ctx.pad_x1 = pad_x1
56
- ctx.pad_y0 = pad_y0
57
- ctx.pad_y1 = pad_y1
58
- ctx.in_size = in_size
59
- ctx.out_size = out_size
60
-
61
- return grad_input
62
-
63
- @staticmethod
64
- def backward(ctx, gradgrad_input):
65
- kernel, = ctx.saved_tensors
66
-
67
- gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
68
-
69
- gradgrad_out = upfirdn2d_op.upfirdn2d(
70
- gradgrad_input,
71
- kernel,
72
- ctx.up_x,
73
- ctx.up_y,
74
- ctx.down_x,
75
- ctx.down_y,
76
- ctx.pad_x0,
77
- ctx.pad_x1,
78
- ctx.pad_y0,
79
- ctx.pad_y1,
80
- )
81
- # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
82
- gradgrad_out = gradgrad_out.view(
83
- ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
84
- )
85
-
86
- return gradgrad_out, None, None, None, None, None, None, None, None
87
-
88
-
89
- class UpFirDn2d(Function):
90
- @staticmethod
91
- def forward(ctx, input, kernel, up, down, pad):
92
- up_x, up_y = up
93
- down_x, down_y = down
94
- pad_x0, pad_x1, pad_y0, pad_y1 = pad
95
-
96
- kernel_h, kernel_w = kernel.shape
97
- batch, channel, in_h, in_w = input.shape
98
- ctx.in_size = input.shape
99
-
100
- input = input.reshape(-1, in_h, in_w, 1)
101
-
102
- ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
103
-
104
- out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h + down_y) // down_y
105
- out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w + down_x) // down_x
106
- ctx.out_size = (out_h, out_w)
107
-
108
- ctx.up = (up_x, up_y)
109
- ctx.down = (down_x, down_y)
110
- ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
111
-
112
- g_pad_x0 = kernel_w - pad_x0 - 1
113
- g_pad_y0 = kernel_h - pad_y0 - 1
114
- g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
115
- g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
116
-
117
- ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
118
-
119
- out = upfirdn2d_op.upfirdn2d(
120
- input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
121
- )
122
- # out = out.view(major, out_h, out_w, minor)
123
- out = out.view(-1, channel, out_h, out_w)
124
-
125
- return out
126
-
127
- @staticmethod
128
- def backward(ctx, grad_output):
129
- kernel, grad_kernel = ctx.saved_tensors
130
-
131
- grad_input = None
132
-
133
- if ctx.needs_input_grad[0]:
134
- grad_input = UpFirDn2dBackward.apply(
135
- grad_output,
136
- kernel,
137
- grad_kernel,
138
- ctx.up,
139
- ctx.down,
140
- ctx.pad,
141
- ctx.g_pad,
142
- ctx.in_size,
143
- ctx.out_size,
144
- )
145
-
146
- return grad_input, None, None, None, None
147
-
148
-
149
- def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
150
- if not isinstance(up, abc.Iterable):
151
- up = (up, up)
152
-
153
- if not isinstance(down, abc.Iterable):
154
- down = (down, down)
155
-
156
- if len(pad) == 2:
157
- pad = (pad[0], pad[1], pad[0], pad[1])
158
-
159
- if input.device.type == "cpu":
160
- out = upfirdn2d_native(input, kernel, *up, *down, *pad)
161
-
162
- else:
163
- out = UpFirDn2d.apply(input, kernel, up, down, pad)
164
-
165
- return out
166
-
167
-
168
- def upfirdn2d_native(
169
- input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
170
- ):
171
- _, channel, in_h, in_w = input.shape
172
- input = input.reshape(-1, in_h, in_w, 1)
173
-
174
- _, in_h, in_w, minor = input.shape
175
- kernel_h, kernel_w = kernel.shape
176
-
177
- out = input.view(-1, in_h, 1, in_w, 1, minor)
178
- out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
179
- out = out.view(-1, in_h * up_y, in_w * up_x, minor)
180
-
181
- out = F.pad(
182
- out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
183
- )
184
- out = out[
185
- :,
186
- max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
187
- max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
188
- :,
189
- ]
190
-
191
- out = out.permute(0, 3, 1, 2)
192
- out = out.reshape(
193
- [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
194
- )
195
- w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
196
- out = F.conv2d(out, w)
197
- out = out.reshape(
198
- -1,
199
- minor,
200
- in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
201
- in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
202
- )
203
- out = out.permute(0, 2, 3, 1)
204
- out = out[:, ::down_y, ::down_x, :]
205
-
206
- out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h + down_y) // down_y
207
- out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w + down_x) // down_x
208
-
209
- return out.view(-1, channel, out_h, out_w)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/util/nvdiffrast.py DELETED
@@ -1,126 +0,0 @@
1
- """This script is the differentiable renderer for Deep3DFaceRecon_pytorch
2
- Attention, antialiasing step is missing in current version.
3
- """
4
- import pytorch3d.ops
5
- import torch
6
- import torch.nn.functional as F
7
- import kornia
8
- from kornia.geometry.camera import pixel2cam
9
- import numpy as np
10
- from typing import List
11
- from scipy.io import loadmat
12
- from torch import nn
13
-
14
- from pytorch3d.structures import Meshes
15
- from pytorch3d.renderer import (
16
- look_at_view_transform,
17
- FoVPerspectiveCameras,
18
- DirectionalLights,
19
- RasterizationSettings,
20
- MeshRenderer,
21
- MeshRasterizer,
22
- SoftPhongShader,
23
- TexturesUV,
24
- )
25
-
26
- # def ndc_projection(x=0.1, n=1.0, f=50.0):
27
- # return np.array([[n/x, 0, 0, 0],
28
- # [ 0, n/-x, 0, 0],
29
- # [ 0, 0, -(f+n)/(f-n), -(2*f*n)/(f-n)],
30
- # [ 0, 0, -1, 0]]).astype(np.float32)
31
-
32
- class MeshRenderer(nn.Module):
33
- def __init__(self,
34
- rasterize_fov,
35
- znear=0.1,
36
- zfar=10,
37
- rasterize_size=224):
38
- super(MeshRenderer, self).__init__()
39
-
40
- # x = np.tan(np.deg2rad(rasterize_fov * 0.5)) * znear
41
- # self.ndc_proj = torch.tensor(ndc_projection(x=x, n=znear, f=zfar)).matmul(
42
- # torch.diag(torch.tensor([1., -1, -1, 1])))
43
- self.rasterize_size = rasterize_size
44
- self.fov = rasterize_fov
45
- self.znear = znear
46
- self.zfar = zfar
47
-
48
- self.rasterizer = None
49
-
50
- def forward(self, vertex, tri, feat=None):
51
- """
52
- Return:
53
- mask -- torch.tensor, size (B, 1, H, W)
54
- depth -- torch.tensor, size (B, 1, H, W)
55
- features(optional) -- torch.tensor, size (B, C, H, W) if feat is not None
56
-
57
- Parameters:
58
- vertex -- torch.tensor, size (B, N, 3)
59
- tri -- torch.tensor, size (B, M, 3) or (M, 3), triangles
60
- feat(optional) -- torch.tensor, size (B, N ,C), features
61
- """
62
- device = vertex.device
63
- rsize = int(self.rasterize_size)
64
- # ndc_proj = self.ndc_proj.to(device)
65
- # trans to homogeneous coordinates of 3d vertices, the direction of y is the same as v
66
- if vertex.shape[-1] == 3:
67
- vertex = torch.cat([vertex, torch.ones([*vertex.shape[:2], 1]).to(device)], dim=-1)
68
- vertex[..., 0] = -vertex[..., 0]
69
-
70
-
71
- # vertex_ndc = vertex @ ndc_proj.t()
72
- if self.rasterizer is None:
73
- self.rasterizer = MeshRasterizer()
74
- print("create rasterizer on device cuda:%d"%device.index)
75
-
76
- # ranges = None
77
- # if isinstance(tri, List) or len(tri.shape) == 3:
78
- # vum = vertex_ndc.shape[1]
79
- # fnum = torch.tensor([f.shape[0] for f in tri]).unsqueeze(1).to(device)
80
- # fstartidx = torch.cumsum(fnum, dim=0) - fnum
81
- # ranges = torch.cat([fstartidx, fnum], axis=1).type(torch.int32).cpu()
82
- # for i in range(tri.shape[0]):
83
- # tri[i] = tri[i] + i*vum
84
- # vertex_ndc = torch.cat(vertex_ndc, dim=0)
85
- # tri = torch.cat(tri, dim=0)
86
-
87
- # for range_mode vetex: [B*N, 4], tri: [B*M, 3], for instance_mode vetex: [B, N, 4], tri: [M, 3]
88
- tri = tri.type(torch.int32).contiguous()
89
-
90
- # rasterize
91
- cameras = FoVPerspectiveCameras(
92
- device=device,
93
- fov=self.fov,
94
- znear=self.znear,
95
- zfar=self.zfar,
96
- )
97
-
98
- raster_settings = RasterizationSettings(
99
- image_size=rsize
100
- )
101
-
102
- # print(vertex.shape, tri.shape)
103
- mesh = Meshes(vertex.contiguous()[...,:3], tri.unsqueeze(0).repeat((vertex.shape[0],1,1)))
104
-
105
- fragments = self.rasterizer(mesh, cameras = cameras, raster_settings = raster_settings)
106
- rast_out = fragments.pix_to_face.squeeze(-1)
107
- depth = fragments.zbuf
108
-
109
- # render depth
110
- depth = depth.permute(0, 3, 1, 2)
111
- mask = (rast_out > 0).float().unsqueeze(1)
112
- depth = mask * depth
113
-
114
-
115
- image = None
116
- if feat is not None:
117
- attributes = feat.reshape(-1,3)[mesh.faces_packed()]
118
- image = pytorch3d.ops.interpolate_face_attributes(fragments.pix_to_face,
119
- fragments.bary_coords,
120
- attributes)
121
- # print(image.shape)
122
- image = image.squeeze(-2).permute(0, 3, 1, 2)
123
- image = mask * image
124
-
125
- return mask, depth, image
126
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/How to QA 2f036148193a4fccac2c9e8ae9e6d197.md DELETED
@@ -1,31 +0,0 @@
1
- # How to QA
2
-
3
- Last edited time: March 31, 2023 1:49 PM
4
- Owner: Anonymous
5
- Tags: Guides and Processes, Testing
6
-
7
- <aside>
8
- 💡 This template explains our QA process for shipping bug-free software.
9
-
10
- </aside>
11
-
12
- # QA Philosophy
13
-
14
- Write about your approach to QA and why it's critical to success.
15
-
16
- # Processes
17
-
18
- ### Making Code Changes
19
-
20
- - Test PRs rigorously before requesting review.
21
- - Include test cases you've checked for in your PR description.
22
-
23
- ### Reviewing Code
24
-
25
- - If the change is substantial and user-facing, pull down the branch.
26
- - Test for cases (particularly edge cases) that the PR author may have missed.
27
-
28
- ### QA
29
-
30
- - Look at the list of items going out for release the next day.
31
- - Go down the list one-by-one and thoroughly test changes.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AHzizi/WaifuVoiceGen/utils.py DELETED
@@ -1,225 +0,0 @@
1
- import os
2
- import sys
3
- import argparse
4
- import logging
5
- import json
6
- import subprocess
7
- import numpy as np
8
- import librosa
9
- import torch
10
-
11
- MATPLOTLIB_FLAG = False
12
-
13
- logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
14
- logger = logging
15
-
16
-
17
- def load_checkpoint(checkpoint_path, model, optimizer=None):
18
- assert os.path.isfile(checkpoint_path)
19
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
20
- iteration = checkpoint_dict['iteration']
21
- learning_rate = checkpoint_dict['learning_rate']
22
- if optimizer is not None:
23
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
24
- saved_state_dict = checkpoint_dict['model']
25
- if hasattr(model, 'module'):
26
- state_dict = model.module.state_dict()
27
- else:
28
- state_dict = model.state_dict()
29
- new_state_dict= {}
30
- for k, v in state_dict.items():
31
- try:
32
- new_state_dict[k] = saved_state_dict[k]
33
- except:
34
- logger.info("%s is not in the checkpoint" % k)
35
- new_state_dict[k] = v
36
- if hasattr(model, 'module'):
37
- model.module.load_state_dict(new_state_dict)
38
- else:
39
- model.load_state_dict(new_state_dict)
40
- logger.info("Loaded checkpoint '{}' (iteration {})" .format(
41
- checkpoint_path, iteration))
42
- return model, optimizer, learning_rate, iteration
43
-
44
-
45
- def plot_spectrogram_to_numpy(spectrogram):
46
- global MATPLOTLIB_FLAG
47
- if not MATPLOTLIB_FLAG:
48
- import matplotlib
49
- matplotlib.use("Agg")
50
- MATPLOTLIB_FLAG = True
51
- mpl_logger = logging.getLogger('matplotlib')
52
- mpl_logger.setLevel(logging.WARNING)
53
- import matplotlib.pylab as plt
54
- import numpy as np
55
-
56
- fig, ax = plt.subplots(figsize=(10,2))
57
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
58
- interpolation='none')
59
- plt.colorbar(im, ax=ax)
60
- plt.xlabel("Frames")
61
- plt.ylabel("Channels")
62
- plt.tight_layout()
63
-
64
- fig.canvas.draw()
65
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
66
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
67
- plt.close()
68
- return data
69
-
70
-
71
- def plot_alignment_to_numpy(alignment, info=None):
72
- global MATPLOTLIB_FLAG
73
- if not MATPLOTLIB_FLAG:
74
- import matplotlib
75
- matplotlib.use("Agg")
76
- MATPLOTLIB_FLAG = True
77
- mpl_logger = logging.getLogger('matplotlib')
78
- mpl_logger.setLevel(logging.WARNING)
79
- import matplotlib.pylab as plt
80
- import numpy as np
81
-
82
- fig, ax = plt.subplots(figsize=(6, 4))
83
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
84
- interpolation='none')
85
- fig.colorbar(im, ax=ax)
86
- xlabel = 'Decoder timestep'
87
- if info is not None:
88
- xlabel += '\n\n' + info
89
- plt.xlabel(xlabel)
90
- plt.ylabel('Encoder timestep')
91
- plt.tight_layout()
92
-
93
- fig.canvas.draw()
94
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
95
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
96
- plt.close()
97
- return data
98
-
99
-
100
- def load_audio_to_torch(full_path, target_sampling_rate):
101
- audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True)
102
- return torch.FloatTensor(audio.astype(np.float32))
103
-
104
-
105
- def load_filepaths_and_text(filename, split="|"):
106
- with open(filename, encoding='utf-8') as f:
107
- filepaths_and_text = [line.strip().split(split) for line in f]
108
- return filepaths_and_text
109
-
110
-
111
- def get_hparams(init=True):
112
- parser = argparse.ArgumentParser()
113
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
114
- help='JSON file for configuration')
115
- parser.add_argument('-m', '--model', type=str, required=True,
116
- help='Model name')
117
-
118
- args = parser.parse_args()
119
- model_dir = os.path.join("./logs", args.model)
120
-
121
- if not os.path.exists(model_dir):
122
- os.makedirs(model_dir)
123
-
124
- config_path = args.config
125
- config_save_path = os.path.join(model_dir, "config.json")
126
- if init:
127
- with open(config_path, "r") as f:
128
- data = f.read()
129
- with open(config_save_path, "w") as f:
130
- f.write(data)
131
- else:
132
- with open(config_save_path, "r") as f:
133
- data = f.read()
134
- config = json.loads(data)
135
-
136
- hparams = HParams(**config)
137
- hparams.model_dir = model_dir
138
- return hparams
139
-
140
-
141
- def get_hparams_from_dir(model_dir):
142
- config_save_path = os.path.join(model_dir, "config.json")
143
- with open(config_save_path, "r") as f:
144
- data = f.read()
145
- config = json.loads(data)
146
-
147
- hparams =HParams(**config)
148
- hparams.model_dir = model_dir
149
- return hparams
150
-
151
-
152
- def get_hparams_from_file(config_path):
153
- with open(config_path, "r") as f:
154
- data = f.read()
155
- config = json.loads(data)
156
-
157
- hparams =HParams(**config)
158
- return hparams
159
-
160
-
161
- def check_git_hash(model_dir):
162
- source_dir = os.path.dirname(os.path.realpath(__file__))
163
- if not os.path.exists(os.path.join(source_dir, ".git")):
164
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
165
- source_dir
166
- ))
167
- return
168
-
169
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
170
-
171
- path = os.path.join(model_dir, "githash")
172
- if os.path.exists(path):
173
- saved_hash = open(path).read()
174
- if saved_hash != cur_hash:
175
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
176
- saved_hash[:8], cur_hash[:8]))
177
- else:
178
- open(path, "w").write(cur_hash)
179
-
180
-
181
- def get_logger(model_dir, filename="train.log"):
182
- global logger
183
- logger = logging.getLogger(os.path.basename(model_dir))
184
- logger.setLevel(logging.DEBUG)
185
-
186
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
187
- if not os.path.exists(model_dir):
188
- os.makedirs(model_dir)
189
- h = logging.FileHandler(os.path.join(model_dir, filename))
190
- h.setLevel(logging.DEBUG)
191
- h.setFormatter(formatter)
192
- logger.addHandler(h)
193
- return logger
194
-
195
-
196
- class HParams():
197
- def __init__(self, **kwargs):
198
- for k, v in kwargs.items():
199
- if type(v) == dict:
200
- v = HParams(**v)
201
- self[k] = v
202
-
203
- def keys(self):
204
- return self.__dict__.keys()
205
-
206
- def items(self):
207
- return self.__dict__.items()
208
-
209
- def values(self):
210
- return self.__dict__.values()
211
-
212
- def __len__(self):
213
- return len(self.__dict__)
214
-
215
- def __getitem__(self, key):
216
- return getattr(self, key)
217
-
218
- def __setitem__(self, key, value):
219
- return setattr(self, key, value)
220
-
221
- def __contains__(self, key):
222
- return key in self.__dict__
223
-
224
- def __repr__(self):
225
- return self.__dict__.__repr__()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/portaspeech/portaspeech_flow.py DELETED
@@ -1,75 +0,0 @@
1
- import torch
2
- import torch.distributions as dist
3
- from torch import nn
4
- from modules.commons.normalizing_flow.glow_modules import Glow
5
- from modules.portaspeech.portaspeech import PortaSpeech
6
- from utils.hparams import hparams
7
-
8
- class PortaSpeechFlow(PortaSpeech):
9
- def __init__(self, ph_dict_size, word_dict_size, out_dims=None):
10
- super().__init__(ph_dict_size, word_dict_size, out_dims)
11
- cond_hs = 80
12
- if hparams.get('use_txt_cond', True):
13
- cond_hs = cond_hs + hparams['hidden_size']
14
- if hparams.get('use_latent_cond', False):
15
- cond_hs = cond_hs + hparams['latent_size']
16
- if hparams['use_cond_proj']:
17
- self.g_proj = nn.Conv1d(cond_hs, 160, 5, padding=2)
18
- cond_hs = 160
19
- self.post_flow = Glow(
20
- 80, hparams['post_glow_hidden'], hparams['post_glow_kernel_size'], 1,
21
- hparams['post_glow_n_blocks'], hparams['post_glow_n_block_layers'],
22
- n_split=4, n_sqz=2,
23
- gin_channels=cond_hs,
24
- share_cond_layers=hparams['post_share_cond_layers'],
25
- share_wn_layers=hparams['share_wn_layers'],
26
- sigmoid_scale=hparams['sigmoid_scale']
27
- )
28
- self.prior_dist = dist.Normal(0, 1)
29
-
30
- def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None,
31
- spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None,
32
- forward_post_glow=True, two_stage=True, global_step=None, **kwargs):
33
- is_training = self.training
34
- train_fvae = not (forward_post_glow and two_stage)
35
- if not train_fvae:
36
- self.eval()
37
- with torch.set_grad_enabled(mode=train_fvae):
38
- ret = super(PortaSpeechFlow, self).forward(
39
- txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph,
40
- spk_embed, spk_id, pitch, infer, tgt_mels, global_step, **kwargs)
41
- if (forward_post_glow or not two_stage) and hparams['use_post_flow']:
42
- self.run_post_glow(tgt_mels, infer, is_training, ret)
43
- return ret
44
-
45
- def run_post_glow(self, tgt_mels, infer, is_training, ret):
46
- x_recon = ret['mel_out'].transpose(1, 2)
47
- g = x_recon
48
- B, _, T = g.shape
49
- if hparams.get('use_txt_cond', True):
50
- g = torch.cat([g, ret['decoder_inp'].transpose(1, 2)], 1)
51
- if hparams.get('use_latent_cond', False):
52
- g_z = ret['z_p'][:, :, :, None].repeat(1, 1, 1, 4).reshape(B, -1, T)
53
- g = torch.cat([g, g_z], 1)
54
- if hparams['use_cond_proj']:
55
- g = self.g_proj(g)
56
- prior_dist = self.prior_dist
57
- if not infer:
58
- if is_training:
59
- self.post_flow.train()
60
- nonpadding = ret['nonpadding'].transpose(1, 2)
61
- y_lengths = nonpadding.sum(-1)
62
- if hparams['detach_postflow_input']:
63
- g = g.detach()
64
- tgt_mels = tgt_mels.transpose(1, 2)
65
- z_postflow, ldj = self.post_flow(tgt_mels, nonpadding, g=g)
66
- ldj = ldj / y_lengths / 80
67
- ret['z_pf'], ret['ldj_pf'] = z_postflow, ldj
68
- ret['postflow'] = -prior_dist.log_prob(z_postflow).mean() - ldj.mean()
69
- if torch.isnan(ret['postflow']):
70
- ret['postflow'] = None
71
- else:
72
- nonpadding = torch.ones_like(x_recon[:, :1, :])
73
- z_post = torch.randn(x_recon.shape).to(g.device) * hparams['noise_scale']
74
- x_recon, _ = self.post_flow(z_post, nonpadding, g, reverse=True)
75
- ret['mel_out'] = x_recon.transpose(1, 2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/__init__.py DELETED
File without changes
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/models/gpt.py DELETED
@@ -1,83 +0,0 @@
1
- from typing import List
2
- from torch import nn
3
- import torch
4
- from pathlib import Path
5
- import json
6
- from .gpt_model import Model, HParams
7
-
8
-
9
- class GPTModel(nn.Module):
10
- def __init__(self, path, n_layer=-1, freeze=True, use_lstm=False):
11
- super().__init__()
12
- root = Path(path)
13
-
14
- params = json.loads((root / "params.json").read_text())
15
- hparams = params["hparams"]
16
- hparams.setdefault("n_hidden", hparams["n_embed"])
17
- self.model = Model(HParams(**hparams))
18
- state = torch.load(root / "model.pt", map_location="cpu")
19
- state_dict = self.fixed_state_dict(state["state_dict"])
20
- self.model.load_state_dict(state_dict)
21
- self.activation = {}
22
- self.freeze = freeze
23
- self.n_layer = n_layer
24
- if self.freeze:
25
- for param in self.model.parameters():
26
- param.requires_grad = False
27
-
28
- self.activation = {}
29
- self.use_lstm = use_lstm
30
- self.set_hook(self.n_layer)
31
- self.in_fc_layer = 512 if self.use_lstm else 768
32
- self.lstm1 = nn.LSTM(
33
- 768,
34
- 256,
35
- bidirectional=True,
36
- batch_first=True,
37
- )
38
- self.lstm2 = nn.LSTM(
39
- 512,
40
- 256,
41
- bidirectional=True,
42
- batch_first=True,
43
- )
44
- self.lstm3 = nn.LSTM(
45
- 512,
46
- 256,
47
- bidirectional=True,
48
- batch_first=True,
49
- )
50
- self.fc = nn.Linear(self.in_fc_layer, 17)
51
-
52
- def get_activation(self, name):
53
- def hook(model, input, output):
54
- self.activation[name] = output[0].detach()
55
-
56
- return hook
57
-
58
- def set_hook(self, n_layer=0):
59
- self.model.blocks[n_layer].register_forward_hook(self.get_activation("feats"))
60
-
61
- def fixed_state_dict(self, state_dict):
62
- if all(k.startswith("module.") for k in state_dict):
63
- # legacy multi-GPU format
64
- state_dict = {k[len("module.") :]: v for k, v in state_dict.items()}
65
- return state_dict
66
-
67
- def forward(self, src: torch.Tensor, lengths: torch.Tensor, target=None):
68
-
69
- # logits shape [batch_size, 256, 500]
70
- logits = self.model(src)["logits"]
71
- logits = self.activation["feats"]
72
-
73
- if self.use_lstm:
74
- x, (h, cn) = self.lstm1(logits)
75
- x, (h, cn) = self.lstm2(x)
76
- x, (h, cn) = self.lstm3(x)
77
- else:
78
- x = logits
79
- predictions = self.fc(x)
80
-
81
- output = {"diacritics": predictions}
82
-
83
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dropdownlist/methods/listpanel/ConfigurationMethods.js DELETED
@@ -1,129 +0,0 @@
1
- var methods = {
2
- setWrapEnable(enable) {
3
- if (enable === undefined) {
4
- enable = true;
5
- }
6
-
7
- this.listWrapEnable = enable;
8
- return this;
9
- },
10
-
11
- setCreateButtonCallback(callback) {
12
- this.listCreateButtonCallback = callback;
13
- return this;
14
- },
15
-
16
- setCreateListBackgroundCallback(callback) {
17
- this.listCreateBackgroundCallback = callback;
18
- return this;
19
- },
20
-
21
- setButtonClickCallback(callback) {
22
- this.listOnButtonClick = callback;
23
- return this;
24
- },
25
-
26
- setButtonOverCallback(callback) {
27
- this.listOnButtonOver = callback;
28
- return this;
29
- },
30
-
31
- setButtonOutCallback(callback) {
32
- this.listOnButtonOut = callback;
33
- return this;
34
- },
35
-
36
- setListExpandDirection(direction) {
37
- if (typeof (direction) === 'string') {
38
- direction = ListExpandDirections[direction];
39
- }
40
- this.listExpandDirection = direction;
41
- return this;
42
- },
43
-
44
- setListEaseInDuration(duration) {
45
- if (duration === undefined) {
46
- duration = 0;
47
- }
48
- this.listEaseInDuration = duration;
49
- return this;
50
- },
51
-
52
- setListEaseOutDuration(duration) {
53
- if (duration === undefined) {
54
- duration = 0;
55
- }
56
- this.listEaseOutDuration = duration;
57
- return this;
58
- },
59
-
60
- setListTransitInCallback(callback) {
61
- this.listTransitInCallback = callback;
62
- // callback = function(gameObject, duration) {}
63
- return this;
64
- },
65
-
66
- settListTransitOutCallback(callback) {
67
- this.listTransitOutCallback = callback;
68
- // callback = function(gameObject, duration) {}
69
- return this;
70
- },
71
-
72
- setListBounds(bounds) {
73
- this.listBounds = bounds;
74
- return this;
75
- },
76
-
77
- setListWidth(width) {
78
- this.listWidth = width;
79
- return this;
80
- },
81
-
82
- setListHeight(height) {
83
- this.listHeight = height;
84
- return this;
85
- },
86
-
87
- setListSize(width, height) {
88
- this.setListWidth(width).setListHeight(height);
89
- return this;
90
- },
91
-
92
- setListAlignmentMode(mode) {
93
- this.listAlignMode = mode;
94
- return this;
95
- },
96
-
97
- setListAlignmentSide(side) {
98
- if (side === undefined) {
99
- side = '';
100
- }
101
-
102
- this.listAlignSide = side;
103
- return this;
104
- },
105
-
106
- setListSpace(space) {
107
- if (space === undefined) {
108
- space = {};
109
- }
110
- this.listSpace = space;
111
- return this;
112
- },
113
-
114
- setListDraggable(enable) {
115
- if (enable === undefined) {
116
- enable = true;
117
- }
118
- this.listDraggable = enable;
119
- return this;
120
- },
121
-
122
- }
123
-
124
- const ListExpandDirections = {
125
- down: 0,
126
- up: 1
127
- }
128
-
129
- export default methods;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_mbf.py DELETED
@@ -1,26 +0,0 @@
1
- from easydict import EasyDict as edict
2
-
3
- # make training faster
4
- # our RAM is 256G
5
- # mount -t tmpfs -o size=140G tmpfs /train_tmp
6
-
7
- config = edict()
8
- config.loss = "arcface"
9
- config.network = "mbf"
10
- config.resume = False
11
- config.output = None
12
- config.embedding_size = 512
13
- config.sample_rate = 1.0
14
- config.fp16 = True
15
- config.momentum = 0.9
16
- config.weight_decay = 2e-4
17
- config.batch_size = 128
18
- config.lr = 0.1 # batch size is 512
19
-
20
- config.rec = "/train_tmp/ms1m-retinaface-t1"
21
- config.num_classes = 93431
22
- config.num_image = 5179510
23
- config.num_epoch = 30
24
- config.warmup_epoch = -1
25
- config.decay_epoch = [10, 20, 25]
26
- config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/onnx_helper.py DELETED
@@ -1,250 +0,0 @@
1
- from __future__ import division
2
- import datetime
3
- import os
4
- import os.path as osp
5
- import glob
6
- import numpy as np
7
- import cv2
8
- import sys
9
- import onnxruntime
10
- import onnx
11
- import argparse
12
- from onnx import numpy_helper
13
- from insightface.data import get_image
14
-
15
- class ArcFaceORT:
16
- def __init__(self, model_path, cpu=False):
17
- self.model_path = model_path
18
- # providers = None will use available provider, for onnxruntime-gpu it will be "CUDAExecutionProvider"
19
- self.providers = ['CPUExecutionProvider'] if cpu else None
20
-
21
- #input_size is (w,h), return error message, return None if success
22
- def check(self, track='cfat', test_img = None):
23
- #default is cfat
24
- max_model_size_mb=1024
25
- max_feat_dim=512
26
- max_time_cost=15
27
- if track.startswith('ms1m'):
28
- max_model_size_mb=1024
29
- max_feat_dim=512
30
- max_time_cost=10
31
- elif track.startswith('glint'):
32
- max_model_size_mb=1024
33
- max_feat_dim=1024
34
- max_time_cost=20
35
- elif track.startswith('cfat'):
36
- max_model_size_mb = 1024
37
- max_feat_dim = 512
38
- max_time_cost = 15
39
- elif track.startswith('unconstrained'):
40
- max_model_size_mb=1024
41
- max_feat_dim=1024
42
- max_time_cost=30
43
- else:
44
- return "track not found"
45
-
46
- if not os.path.exists(self.model_path):
47
- return "model_path not exists"
48
- if not os.path.isdir(self.model_path):
49
- return "model_path should be directory"
50
- onnx_files = []
51
- for _file in os.listdir(self.model_path):
52
- if _file.endswith('.onnx'):
53
- onnx_files.append(osp.join(self.model_path, _file))
54
- if len(onnx_files)==0:
55
- return "do not have onnx files"
56
- self.model_file = sorted(onnx_files)[-1]
57
- print('use onnx-model:', self.model_file)
58
- try:
59
- session = onnxruntime.InferenceSession(self.model_file, providers=self.providers)
60
- except:
61
- return "load onnx failed"
62
- input_cfg = session.get_inputs()[0]
63
- input_shape = input_cfg.shape
64
- print('input-shape:', input_shape)
65
- if len(input_shape)!=4:
66
- return "length of input_shape should be 4"
67
- if not isinstance(input_shape[0], str):
68
- #return "input_shape[0] should be str to support batch-inference"
69
- print('reset input-shape[0] to None')
70
- model = onnx.load(self.model_file)
71
- model.graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None'
72
- new_model_file = osp.join(self.model_path, 'zzzzrefined.onnx')
73
- onnx.save(model, new_model_file)
74
- self.model_file = new_model_file
75
- print('use new onnx-model:', self.model_file)
76
- try:
77
- session = onnxruntime.InferenceSession(self.model_file, providers=self.providers)
78
- except:
79
- return "load onnx failed"
80
- input_cfg = session.get_inputs()[0]
81
- input_shape = input_cfg.shape
82
- print('new-input-shape:', input_shape)
83
-
84
- self.image_size = tuple(input_shape[2:4][::-1])
85
- #print('image_size:', self.image_size)
86
- input_name = input_cfg.name
87
- outputs = session.get_outputs()
88
- output_names = []
89
- for o in outputs:
90
- output_names.append(o.name)
91
- #print(o.name, o.shape)
92
- if len(output_names)!=1:
93
- return "number of output nodes should be 1"
94
- self.session = session
95
- self.input_name = input_name
96
- self.output_names = output_names
97
- #print(self.output_names)
98
- model = onnx.load(self.model_file)
99
- graph = model.graph
100
- if len(graph.node)<8:
101
- return "too small onnx graph"
102
-
103
- input_size = (112,112)
104
- self.crop = None
105
- if track=='cfat':
106
- crop_file = osp.join(self.model_path, 'crop.txt')
107
- if osp.exists(crop_file):
108
- lines = open(crop_file,'r').readlines()
109
- if len(lines)!=6:
110
- return "crop.txt should contain 6 lines"
111
- lines = [int(x) for x in lines]
112
- self.crop = lines[:4]
113
- input_size = tuple(lines[4:6])
114
- if input_size!=self.image_size:
115
- return "input-size is inconsistant with onnx model input, %s vs %s"%(input_size, self.image_size)
116
-
117
- self.model_size_mb = os.path.getsize(self.model_file) / float(1024*1024)
118
- if self.model_size_mb > max_model_size_mb:
119
- return "max model size exceed, given %.3f-MB"%self.model_size_mb
120
-
121
- input_mean = None
122
- input_std = None
123
- if track=='cfat':
124
- pn_file = osp.join(self.model_path, 'pixel_norm.txt')
125
- if osp.exists(pn_file):
126
- lines = open(pn_file,'r').readlines()
127
- if len(lines)!=2:
128
- return "pixel_norm.txt should contain 2 lines"
129
- input_mean = float(lines[0])
130
- input_std = float(lines[1])
131
- if input_mean is not None or input_std is not None:
132
- if input_mean is None or input_std is None:
133
- return "please set input_mean and input_std simultaneously"
134
- else:
135
- find_sub = False
136
- find_mul = False
137
- for nid, node in enumerate(graph.node[:8]):
138
- print(nid, node.name)
139
- if node.name.startswith('Sub') or node.name.startswith('_minus'):
140
- find_sub = True
141
- if node.name.startswith('Mul') or node.name.startswith('_mul') or node.name.startswith('Div'):
142
- find_mul = True
143
- if find_sub and find_mul:
144
- print("find sub and mul")
145
- #mxnet arcface model
146
- input_mean = 0.0
147
- input_std = 1.0
148
- else:
149
- input_mean = 127.5
150
- input_std = 127.5
151
- self.input_mean = input_mean
152
- self.input_std = input_std
153
- for initn in graph.initializer:
154
- weight_array = numpy_helper.to_array(initn)
155
- dt = weight_array.dtype
156
- if dt.itemsize<4:
157
- return 'invalid weight type - (%s:%s)' % (initn.name, dt.name)
158
- if test_img is None:
159
- test_img = get_image('Tom_Hanks_54745')
160
- test_img = cv2.resize(test_img, self.image_size)
161
- else:
162
- test_img = cv2.resize(test_img, self.image_size)
163
- feat, cost = self.benchmark(test_img)
164
- batch_result = self.check_batch(test_img)
165
- batch_result_sum = float(np.sum(batch_result))
166
- if batch_result_sum in [float('inf'), -float('inf')] or batch_result_sum != batch_result_sum:
167
- print(batch_result)
168
- print(batch_result_sum)
169
- return "batch result output contains NaN!"
170
-
171
- if len(feat.shape) < 2:
172
- return "the shape of the feature must be two, but get {}".format(str(feat.shape))
173
-
174
- if feat.shape[1] > max_feat_dim:
175
- return "max feat dim exceed, given %d"%feat.shape[1]
176
- self.feat_dim = feat.shape[1]
177
- cost_ms = cost*1000
178
- if cost_ms>max_time_cost:
179
- return "max time cost exceed, given %.4f"%cost_ms
180
- self.cost_ms = cost_ms
181
- print('check stat:, model-size-mb: %.4f, feat-dim: %d, time-cost-ms: %.4f, input-mean: %.3f, input-std: %.3f'%(self.model_size_mb, self.feat_dim, self.cost_ms, self.input_mean, self.input_std))
182
- return None
183
-
184
- def check_batch(self, img):
185
- if not isinstance(img, list):
186
- imgs = [img, ] * 32
187
- if self.crop is not None:
188
- nimgs = []
189
- for img in imgs:
190
- nimg = img[self.crop[1]:self.crop[3], self.crop[0]:self.crop[2], :]
191
- if nimg.shape[0] != self.image_size[1] or nimg.shape[1] != self.image_size[0]:
192
- nimg = cv2.resize(nimg, self.image_size)
193
- nimgs.append(nimg)
194
- imgs = nimgs
195
- blob = cv2.dnn.blobFromImages(
196
- images=imgs, scalefactor=1.0 / self.input_std, size=self.image_size,
197
- mean=(self.input_mean, self.input_mean, self.input_mean), swapRB=True)
198
- net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
199
- return net_out
200
-
201
-
202
- def meta_info(self):
203
- return {'model-size-mb':self.model_size_mb, 'feature-dim':self.feat_dim, 'infer': self.cost_ms}
204
-
205
-
206
- def forward(self, imgs):
207
- if not isinstance(imgs, list):
208
- imgs = [imgs]
209
- input_size = self.image_size
210
- if self.crop is not None:
211
- nimgs = []
212
- for img in imgs:
213
- nimg = img[self.crop[1]:self.crop[3],self.crop[0]:self.crop[2],:]
214
- if nimg.shape[0]!=input_size[1] or nimg.shape[1]!=input_size[0]:
215
- nimg = cv2.resize(nimg, input_size)
216
- nimgs.append(nimg)
217
- imgs = nimgs
218
- blob = cv2.dnn.blobFromImages(imgs, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
219
- net_out = self.session.run(self.output_names, {self.input_name : blob})[0]
220
- return net_out
221
-
222
- def benchmark(self, img):
223
- input_size = self.image_size
224
- if self.crop is not None:
225
- nimg = img[self.crop[1]:self.crop[3],self.crop[0]:self.crop[2],:]
226
- if nimg.shape[0]!=input_size[1] or nimg.shape[1]!=input_size[0]:
227
- nimg = cv2.resize(nimg, input_size)
228
- img = nimg
229
- blob = cv2.dnn.blobFromImage(img, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
230
- costs = []
231
- for _ in range(50):
232
- ta = datetime.datetime.now()
233
- net_out = self.session.run(self.output_names, {self.input_name : blob})[0]
234
- tb = datetime.datetime.now()
235
- cost = (tb-ta).total_seconds()
236
- costs.append(cost)
237
- costs = sorted(costs)
238
- cost = costs[5]
239
- return net_out, cost
240
-
241
-
242
- if __name__ == '__main__':
243
- parser = argparse.ArgumentParser(description='')
244
- # general
245
- parser.add_argument('workdir', help='submitted work dir', type=str)
246
- parser.add_argument('--track', help='track name, for different challenge', type=str, default='cfat')
247
- args = parser.parse_args()
248
- handler = ArcFaceORT(args.workdir)
249
- err = handler.check(args.track)
250
- print('err:', err)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/gui_utils/gl_utils.py DELETED
@@ -1,455 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- import math
10
- import os
11
- import functools
12
- import contextlib
13
- import numpy as np
14
- import OpenGL.GL as gl
15
- import OpenGL.GL.ARB.texture_float
16
- import dnnlib
17
-
18
- # ----------------------------------------------------------------------------
19
-
20
-
21
- def init_egl():
22
- # Must be set before importing OpenGL.
23
- assert os.environ['PYOPENGL_PLATFORM'] == 'egl'
24
- import OpenGL.EGL as egl
25
- import ctypes
26
-
27
- # Initialize EGL.
28
- display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY)
29
- assert display != egl.EGL_NO_DISPLAY
30
- major = ctypes.c_int32()
31
- minor = ctypes.c_int32()
32
- ok = egl.eglInitialize(display, major, minor)
33
- assert ok
34
- assert major.value * 10 + minor.value >= 14
35
-
36
- # Choose config.
37
- config_attribs = [
38
- egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT,
39
- egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT,
40
- egl.EGL_NONE
41
- ]
42
- configs = (ctypes.c_int32 * 1)()
43
- num_configs = ctypes.c_int32()
44
- ok = egl.eglChooseConfig(display, config_attribs, configs, 1, num_configs)
45
- assert ok
46
- assert num_configs.value == 1
47
- config = configs[0]
48
-
49
- # Create dummy pbuffer surface.
50
- surface_attribs = [
51
- egl.EGL_WIDTH, 1,
52
- egl.EGL_HEIGHT, 1,
53
- egl.EGL_NONE
54
- ]
55
- surface = egl.eglCreatePbufferSurface(display, config, surface_attribs)
56
- assert surface != egl.EGL_NO_SURFACE
57
-
58
- # Setup GL context.
59
- ok = egl.eglBindAPI(egl.EGL_OPENGL_API)
60
- assert ok
61
- context = egl.eglCreateContext(display, config, egl.EGL_NO_CONTEXT, None)
62
- assert context != egl.EGL_NO_CONTEXT
63
- ok = egl.eglMakeCurrent(display, surface, surface, context)
64
- assert ok
65
-
66
- # ----------------------------------------------------------------------------
67
-
68
-
69
- _texture_formats = {
70
- ('uint8', 1): dnnlib.EasyDict(type=gl.GL_UNSIGNED_BYTE, format=gl.GL_LUMINANCE, internalformat=gl.GL_LUMINANCE8),
71
- ('uint8', 2): dnnlib.EasyDict(type=gl.GL_UNSIGNED_BYTE, format=gl.GL_LUMINANCE_ALPHA, internalformat=gl.GL_LUMINANCE8_ALPHA8),
72
- ('uint8', 3): dnnlib.EasyDict(type=gl.GL_UNSIGNED_BYTE, format=gl.GL_RGB, internalformat=gl.GL_RGB8),
73
- ('uint8', 4): dnnlib.EasyDict(type=gl.GL_UNSIGNED_BYTE, format=gl.GL_RGBA, internalformat=gl.GL_RGBA8),
74
- ('float32', 1): dnnlib.EasyDict(type=gl.GL_FLOAT, format=gl.GL_LUMINANCE, internalformat=OpenGL.GL.ARB.texture_float.GL_LUMINANCE32F_ARB),
75
- ('float32', 2): dnnlib.EasyDict(type=gl.GL_FLOAT, format=gl.GL_LUMINANCE_ALPHA, internalformat=OpenGL.GL.ARB.texture_float.GL_LUMINANCE_ALPHA32F_ARB),
76
- ('float32', 3): dnnlib.EasyDict(type=gl.GL_FLOAT, format=gl.GL_RGB, internalformat=gl.GL_RGB32F),
77
- ('float32', 4): dnnlib.EasyDict(type=gl.GL_FLOAT, format=gl.GL_RGBA, internalformat=gl.GL_RGBA32F),
78
- }
79
-
80
-
81
- def get_texture_format(dtype, channels):
82
- return _texture_formats[(np.dtype(dtype).name, int(channels))]
83
-
84
- # ----------------------------------------------------------------------------
85
-
86
-
87
- def prepare_texture_data(image):
88
- image = np.asarray(image)
89
- if image.ndim == 2:
90
- image = image[:, :, np.newaxis]
91
- if image.dtype.name == 'float64':
92
- image = image.astype('float32')
93
- return image
94
-
95
- # ----------------------------------------------------------------------------
96
-
97
-
98
- def draw_pixels(image, *, pos=0, zoom=1, align=0, rint=True):
99
- pos = np.broadcast_to(np.asarray(pos, dtype='float32'), [2])
100
- zoom = np.broadcast_to(np.asarray(zoom, dtype='float32'), [2])
101
- align = np.broadcast_to(np.asarray(align, dtype='float32'), [2])
102
- image = prepare_texture_data(image)
103
- height, width, channels = image.shape
104
- size = zoom * [width, height]
105
- pos = pos - size * align
106
- if rint:
107
- pos = np.rint(pos)
108
- fmt = get_texture_format(image.dtype, channels)
109
-
110
- gl.glPushAttrib(gl.GL_CURRENT_BIT | gl.GL_PIXEL_MODE_BIT)
111
- gl.glPushClientAttrib(gl.GL_CLIENT_PIXEL_STORE_BIT)
112
- gl.glRasterPos2f(pos[0], pos[1])
113
- gl.glPixelZoom(zoom[0], -zoom[1])
114
- gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
115
- gl.glDrawPixels(width, height, fmt.format, fmt.type, image)
116
- gl.glPopClientAttrib()
117
- gl.glPopAttrib()
118
-
119
- # ----------------------------------------------------------------------------
120
-
121
-
122
- def read_pixels(width, height, *, pos=0, dtype='uint8', channels=3):
123
- pos = np.broadcast_to(np.asarray(pos, dtype='float32'), [2])
124
- dtype = np.dtype(dtype)
125
- fmt = get_texture_format(dtype, channels)
126
- image = np.empty([height, width, channels], dtype=dtype)
127
-
128
- gl.glPushClientAttrib(gl.GL_CLIENT_PIXEL_STORE_BIT)
129
- gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1)
130
- gl.glReadPixels(int(np.round(pos[0])), int(
131
- np.round(pos[1])), width, height, fmt.format, fmt.type, image)
132
- gl.glPopClientAttrib()
133
- return np.flipud(image)
134
-
135
- # ----------------------------------------------------------------------------
136
-
137
-
138
- class Texture:
139
- def __init__(self, *, image=None, width=None, height=None, channels=None, dtype=None, bilinear=True, mipmap=True):
140
- self.gl_id = None
141
- self.bilinear = bilinear
142
- self.mipmap = mipmap
143
-
144
- # Determine size and dtype.
145
- if image is not None:
146
- image = prepare_texture_data(image)
147
- self.height, self.width, self.channels = image.shape
148
- self.dtype = image.dtype
149
- else:
150
- assert width is not None and height is not None
151
- self.width = width
152
- self.height = height
153
- self.channels = channels if channels is not None else 3
154
- self.dtype = np.dtype(dtype) if dtype is not None else np.uint8
155
-
156
- # Validate size and dtype.
157
- assert isinstance(self.width, int) and self.width >= 0
158
- assert isinstance(self.height, int) and self.height >= 0
159
- assert isinstance(self.channels, int) and self.channels >= 1
160
- assert self.is_compatible(
161
- width=width, height=height, channels=channels, dtype=dtype)
162
-
163
- # Create texture object.
164
- self.gl_id = gl.glGenTextures(1)
165
- with self.bind():
166
- gl.glTexParameterf(
167
- gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE)
168
- gl.glTexParameterf(
169
- gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_EDGE)
170
- gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER,
171
- gl.GL_LINEAR if self.bilinear else gl.GL_NEAREST)
172
- gl.glTexParameterf(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER,
173
- gl.GL_LINEAR_MIPMAP_LINEAR if self.mipmap else gl.GL_NEAREST)
174
- self.update(image)
175
-
176
- def delete(self):
177
- if self.gl_id is not None:
178
- gl.glDeleteTextures([self.gl_id])
179
- self.gl_id = None
180
-
181
- def __del__(self):
182
- try:
183
- self.delete()
184
- except:
185
- pass
186
-
187
- @contextlib.contextmanager
188
- def bind(self):
189
- prev_id = gl.glGetInteger(gl.GL_TEXTURE_BINDING_2D)
190
- gl.glBindTexture(gl.GL_TEXTURE_2D, self.gl_id)
191
- yield
192
- gl.glBindTexture(gl.GL_TEXTURE_2D, prev_id)
193
-
194
- def update(self, image):
195
- if image is not None:
196
- image = prepare_texture_data(image)
197
- assert self.is_compatible(image=image)
198
- with self.bind():
199
- fmt = get_texture_format(self.dtype, self.channels)
200
- gl.glPushClientAttrib(gl.GL_CLIENT_PIXEL_STORE_BIT)
201
- gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
202
- gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, fmt.internalformat,
203
- self.width, self.height, 0, fmt.format, fmt.type, image)
204
- if self.mipmap:
205
- gl.glGenerateMipmap(gl.GL_TEXTURE_2D)
206
- gl.glPopClientAttrib()
207
-
208
- def draw(self, *, pos=0, zoom=1, align=0, rint=False, color=1, alpha=1, rounding=0):
209
- zoom = np.broadcast_to(np.asarray(zoom, dtype='float32'), [2])
210
- size = zoom * [self.width, self.height]
211
- with self.bind():
212
- gl.glPushAttrib(gl.GL_ENABLE_BIT)
213
- gl.glEnable(gl.GL_TEXTURE_2D)
214
- draw_rect(pos=pos, size=size, align=align, rint=rint,
215
- color=color, alpha=alpha, rounding=rounding)
216
- gl.glPopAttrib()
217
-
218
- def is_compatible(self, *, image=None, width=None, height=None, channels=None, dtype=None): # pylint: disable=too-many-return-statements
219
- if image is not None:
220
- if image.ndim != 3:
221
- return False
222
- ih, iw, ic = image.shape
223
- if not self.is_compatible(width=iw, height=ih, channels=ic, dtype=image.dtype):
224
- return False
225
- if width is not None and self.width != width:
226
- return False
227
- if height is not None and self.height != height:
228
- return False
229
- if channels is not None and self.channels != channels:
230
- return False
231
- if dtype is not None and self.dtype != dtype:
232
- return False
233
- return True
234
-
235
- # ----------------------------------------------------------------------------
236
-
237
-
238
- class Framebuffer:
239
- def __init__(self, *, texture=None, width=None, height=None, channels=None, dtype=None, msaa=0):
240
- self.texture = texture
241
- self.gl_id = None
242
- self.gl_color = None
243
- self.gl_depth_stencil = None
244
- self.msaa = msaa
245
-
246
- # Determine size and dtype.
247
- if texture is not None:
248
- assert isinstance(self.texture, Texture)
249
- self.width = texture.width
250
- self.height = texture.height
251
- self.channels = texture.channels
252
- self.dtype = texture.dtype
253
- else:
254
- assert width is not None and height is not None
255
- self.width = width
256
- self.height = height
257
- self.channels = channels if channels is not None else 4
258
- self.dtype = np.dtype(dtype) if dtype is not None else np.float32
259
-
260
- # Validate size and dtype.
261
- assert isinstance(self.width, int) and self.width >= 0
262
- assert isinstance(self.height, int) and self.height >= 0
263
- assert isinstance(self.channels, int) and self.channels >= 1
264
- assert width is None or width == self.width
265
- assert height is None or height == self.height
266
- assert channels is None or channels == self.channels
267
- assert dtype is None or dtype == self.dtype
268
-
269
- # Create framebuffer object.
270
- self.gl_id = gl.glGenFramebuffers(1)
271
- with self.bind():
272
-
273
- # Setup color buffer.
274
- if self.texture is not None:
275
- assert self.msaa == 0
276
- gl.glFramebufferTexture2D(
277
- gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.texture.gl_id, 0)
278
- else:
279
- fmt = get_texture_format(self.dtype, self.channels)
280
- self.gl_color = gl.glGenRenderbuffers(1)
281
- gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.gl_color)
282
- gl.glRenderbufferStorageMultisample(
283
- gl.GL_RENDERBUFFER, self.msaa, fmt.internalformat, self.width, self.height)
284
- gl.glFramebufferRenderbuffer(
285
- gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_RENDERBUFFER, self.gl_color)
286
-
287
- # Setup depth/stencil buffer.
288
- self.gl_depth_stencil = gl.glGenRenderbuffers(1)
289
- gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.gl_depth_stencil)
290
- gl.glRenderbufferStorageMultisample(
291
- gl.GL_RENDERBUFFER, self.msaa, gl.GL_DEPTH24_STENCIL8, self.width, self.height)
292
- gl.glFramebufferRenderbuffer(
293
- gl.GL_FRAMEBUFFER, gl.GL_DEPTH_STENCIL_ATTACHMENT, gl.GL_RENDERBUFFER, self.gl_depth_stencil)
294
-
295
- def delete(self):
296
- if self.gl_id is not None:
297
- gl.glDeleteFramebuffers([self.gl_id])
298
- self.gl_id = None
299
- if self.gl_color is not None:
300
- gl.glDeleteRenderbuffers(1, [self.gl_color])
301
- self.gl_color = None
302
- if self.gl_depth_stencil is not None:
303
- gl.glDeleteRenderbuffers(1, [self.gl_depth_stencil])
304
- self.gl_depth_stencil = None
305
-
306
- def __del__(self):
307
- try:
308
- self.delete()
309
- except:
310
- pass
311
-
312
- @contextlib.contextmanager
313
- def bind(self):
314
- prev_fbo = gl.glGetInteger(gl.GL_FRAMEBUFFER_BINDING)
315
- prev_rbo = gl.glGetInteger(gl.GL_RENDERBUFFER_BINDING)
316
- gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.gl_id)
317
- if self.width is not None and self.height is not None:
318
- gl.glViewport(0, 0, self.width, self.height)
319
- yield
320
- gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, prev_fbo)
321
- gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, prev_rbo)
322
-
323
- def blit(self, dst=None):
324
- assert dst is None or isinstance(dst, Framebuffer)
325
- with self.bind():
326
- gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER,
327
- 0 if dst is None else dst.fbo)
328
- gl.glBlitFramebuffer(0, 0, self.width, self.height, 0, 0,
329
- self.width, self.height, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
330
-
331
- # ----------------------------------------------------------------------------
332
-
333
-
334
- def draw_shape(vertices, *, mode=gl.GL_TRIANGLE_FAN, pos=0, size=1, color=1, alpha=1):
335
- assert vertices.ndim == 2 and vertices.shape[1] == 2
336
- pos = np.broadcast_to(np.asarray(pos, dtype='float32'), [2])
337
- size = np.broadcast_to(np.asarray(size, dtype='float32'), [2])
338
- color = np.broadcast_to(np.asarray(color, dtype='float32'), [3])
339
- alpha = np.clip(np.broadcast_to(
340
- np.asarray(alpha, dtype='float32'), []), 0, 1)
341
-
342
- gl.glPushClientAttrib(gl.GL_CLIENT_VERTEX_ARRAY_BIT)
343
- gl.glPushAttrib(gl.GL_CURRENT_BIT | gl.GL_TRANSFORM_BIT)
344
- gl.glMatrixMode(gl.GL_MODELVIEW)
345
- gl.glPushMatrix()
346
-
347
- gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
348
- gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY)
349
- gl.glVertexPointer(2, gl.GL_FLOAT, 0, vertices)
350
- gl.glTexCoordPointer(2, gl.GL_FLOAT, 0, vertices)
351
- gl.glTranslate(pos[0], pos[1], 0)
352
- gl.glScale(size[0], size[1], 1)
353
- gl.glColor4f(color[0] * alpha, color[1] * alpha, color[2] * alpha, alpha)
354
- gl.glDrawArrays(mode, 0, vertices.shape[0])
355
-
356
- gl.glPopMatrix()
357
- gl.glPopAttrib()
358
- gl.glPopClientAttrib()
359
-
360
- # ----------------------------------------------------------------------------
361
-
362
-
363
- def draw_arrow(x1, y1, x2, y2, l=10, width=1.0):
364
- # Compute the length and angle of the arrow
365
- dx = x2 - x1
366
- dy = y2 - y1
367
- length = math.sqrt(dx**2 + dy**2)
368
- if length < l:
369
- return
370
- angle = math.atan2(dy, dx)
371
-
372
- # Save the current modelview matrix
373
- gl.glPushMatrix()
374
-
375
- # Translate and rotate the coordinate system
376
- gl.glTranslatef(x1, y1, 0.0)
377
- gl.glRotatef(angle * 180.0 / math.pi, 0.0, 0.0, 1.0)
378
-
379
- # Set the line width
380
- gl.glLineWidth(width)
381
- # gl.glColor3f(0.75, 0.75, 0.75)
382
-
383
- # Begin drawing lines
384
- gl.glBegin(gl.GL_LINES)
385
-
386
- # Draw the shaft of the arrow
387
- gl.glVertex2f(0.0, 0.0)
388
- gl.glVertex2f(length, 0.0)
389
-
390
- # Draw the head of the arrow
391
- gl.glVertex2f(length, 0.0)
392
- gl.glVertex2f(length - 2 * l, l)
393
- gl.glVertex2f(length, 0.0)
394
- gl.glVertex2f(length - 2 * l, -l)
395
-
396
- # End drawing lines
397
- gl.glEnd()
398
-
399
- # Restore the modelview matrix
400
- gl.glPopMatrix()
401
-
402
- # ----------------------------------------------------------------------------
403
-
404
-
405
- def draw_rect(*, pos=0, pos2=None, size=None, align=0, rint=False, color=1, alpha=1, rounding=0):
406
- assert pos2 is None or size is None
407
- pos = np.broadcast_to(np.asarray(pos, dtype='float32'), [2])
408
- pos2 = np.broadcast_to(np.asarray(pos2, dtype='float32'), [
409
- 2]) if pos2 is not None else None
410
- size = np.broadcast_to(np.asarray(size, dtype='float32'), [
411
- 2]) if size is not None else None
412
- size = size if size is not None else pos2 - \
413
- pos if pos2 is not None else np.array([1, 1], dtype='float32')
414
- pos = pos - size * align
415
- if rint:
416
- pos = np.rint(pos)
417
- rounding = np.broadcast_to(np.asarray(rounding, dtype='float32'), [2])
418
- rounding = np.minimum(
419
- np.abs(rounding) / np.maximum(np.abs(size), 1e-8), 0.5)
420
- if np.min(rounding) == 0:
421
- rounding *= 0
422
- vertices = _setup_rect(float(rounding[0]), float(rounding[1]))
423
- draw_shape(vertices, mode=gl.GL_TRIANGLE_FAN, pos=pos,
424
- size=size, color=color, alpha=alpha)
425
-
426
-
427
- @functools.lru_cache(maxsize=10000)
428
- def _setup_rect(rx, ry):
429
- t = np.linspace(0, np.pi / 2, 1 if max(rx, ry) == 0 else 64)
430
- s = 1 - np.sin(t)
431
- c = 1 - np.cos(t)
432
- x = [c * rx, 1 - s * rx, 1 - c * rx, s * rx]
433
- y = [s * ry, c * ry, 1 - s * ry, 1 - c * ry]
434
- v = np.stack([x, y], axis=-1).reshape(-1, 2)
435
- return v.astype('float32')
436
-
437
- # ----------------------------------------------------------------------------
438
-
439
-
440
- def draw_circle(*, center=0, radius=100, hole=0, color=1, alpha=1):
441
- hole = np.broadcast_to(np.asarray(hole, dtype='float32'), [])
442
- vertices = _setup_circle(float(hole))
443
- draw_shape(vertices, mode=gl.GL_TRIANGLE_STRIP, pos=center,
444
- size=radius, color=color, alpha=alpha)
445
-
446
-
447
- @functools.lru_cache(maxsize=10000)
448
- def _setup_circle(hole):
449
- t = np.linspace(0, np.pi * 2, 128)
450
- s = np.sin(t)
451
- c = np.cos(t)
452
- v = np.stack([c, s, c * hole, s * hole], axis=-1).reshape(-1, 2)
453
- return v.astype('float32')
454
-
455
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: 3D Zeroshot Neural Style Transfer
3
- emoji: 🌖
4
- colorFrom: purple
5
- colorTo: green
6
- sdk: streamlit
7
- sdk_version: 1.26.0
8
- app_file: app.py
9
- pinned: false
10
- license: unlicense
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/dance_diffusion.md DELETED
@@ -1,33 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Dance Diffusion
14
-
15
- [Dance Diffusion](https://github.com/Harmonai-org/sample-generator) is by Zach Evans.
16
-
17
- Dance Diffusion is the first in a suite of generative audio tools for producers and musicians released by [Harmonai](https://github.com/Harmonai-org).
18
-
19
- The original codebase of this implementation can be found at [Harmonai-org](https://github.com/Harmonai-org/sample-generator).
20
-
21
- <Tip>
22
-
23
- Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
24
-
25
- </Tip>
26
-
27
- ## DanceDiffusionPipeline
28
- [[autodoc]] DanceDiffusionPipeline
29
- - all
30
- - __call__
31
-
32
- ## AudioPipelineOutput
33
- [[autodoc]] pipelines.AudioPipelineOutput
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/unconditional_image_generation/train_unconditional.py DELETED
@@ -1,712 +0,0 @@
1
- import argparse
2
- import inspect
3
- import logging
4
- import math
5
- import os
6
- import shutil
7
- from datetime import timedelta
8
- from pathlib import Path
9
- from typing import Optional
10
-
11
- import accelerate
12
- import datasets
13
- import torch
14
- import torch.nn.functional as F
15
- from accelerate import Accelerator, InitProcessGroupKwargs
16
- from accelerate.logging import get_logger
17
- from accelerate.utils import ProjectConfiguration
18
- from datasets import load_dataset
19
- from huggingface_hub import HfFolder, Repository, create_repo, whoami
20
- from packaging import version
21
- from torchvision import transforms
22
- from tqdm.auto import tqdm
23
-
24
- import diffusers
25
- from diffusers import DDPMPipeline, DDPMScheduler, UNet2DModel
26
- from diffusers.optimization import get_scheduler
27
- from diffusers.training_utils import EMAModel
28
- from diffusers.utils import check_min_version, is_accelerate_version, is_tensorboard_available, is_wandb_available
29
- from diffusers.utils.import_utils import is_xformers_available
30
-
31
-
32
- # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
33
- check_min_version("0.19.0")
34
-
35
- logger = get_logger(__name__, log_level="INFO")
36
-
37
-
38
- def _extract_into_tensor(arr, timesteps, broadcast_shape):
39
- """
40
- Extract values from a 1-D numpy array for a batch of indices.
41
-
42
- :param arr: the 1-D numpy array.
43
- :param timesteps: a tensor of indices into the array to extract.
44
- :param broadcast_shape: a larger shape of K dimensions with the batch
45
- dimension equal to the length of timesteps.
46
- :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
47
- """
48
- if not isinstance(arr, torch.Tensor):
49
- arr = torch.from_numpy(arr)
50
- res = arr[timesteps].float().to(timesteps.device)
51
- while len(res.shape) < len(broadcast_shape):
52
- res = res[..., None]
53
- return res.expand(broadcast_shape)
54
-
55
-
56
- def parse_args():
57
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
58
- parser.add_argument(
59
- "--dataset_name",
60
- type=str,
61
- default=None,
62
- help=(
63
- "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
64
- " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
65
- " or to a folder containing files that HF Datasets can understand."
66
- ),
67
- )
68
- parser.add_argument(
69
- "--dataset_config_name",
70
- type=str,
71
- default=None,
72
- help="The config of the Dataset, leave as None if there's only one config.",
73
- )
74
- parser.add_argument(
75
- "--model_config_name_or_path",
76
- type=str,
77
- default=None,
78
- help="The config of the UNet model to train, leave as None to use standard DDPM configuration.",
79
- )
80
- parser.add_argument(
81
- "--train_data_dir",
82
- type=str,
83
- default=None,
84
- help=(
85
- "A folder containing the training data. Folder contents must follow the structure described in"
86
- " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
87
- " must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
88
- ),
89
- )
90
- parser.add_argument(
91
- "--output_dir",
92
- type=str,
93
- default="ddpm-model-64",
94
- help="The output directory where the model predictions and checkpoints will be written.",
95
- )
96
- parser.add_argument("--overwrite_output_dir", action="store_true")
97
- parser.add_argument(
98
- "--cache_dir",
99
- type=str,
100
- default=None,
101
- help="The directory where the downloaded models and datasets will be stored.",
102
- )
103
- parser.add_argument(
104
- "--resolution",
105
- type=int,
106
- default=64,
107
- help=(
108
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
109
- " resolution"
110
- ),
111
- )
112
- parser.add_argument(
113
- "--center_crop",
114
- default=False,
115
- action="store_true",
116
- help=(
117
- "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
118
- " cropped. The images will be resized to the resolution first before cropping."
119
- ),
120
- )
121
- parser.add_argument(
122
- "--random_flip",
123
- default=False,
124
- action="store_true",
125
- help="whether to randomly flip images horizontally",
126
- )
127
- parser.add_argument(
128
- "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
129
- )
130
- parser.add_argument(
131
- "--eval_batch_size", type=int, default=16, help="The number of images to generate for evaluation."
132
- )
133
- parser.add_argument(
134
- "--dataloader_num_workers",
135
- type=int,
136
- default=0,
137
- help=(
138
- "The number of subprocesses to use for data loading. 0 means that the data will be loaded in the main"
139
- " process."
140
- ),
141
- )
142
- parser.add_argument("--num_epochs", type=int, default=100)
143
- parser.add_argument("--save_images_epochs", type=int, default=10, help="How often to save images during training.")
144
- parser.add_argument(
145
- "--save_model_epochs", type=int, default=10, help="How often to save the model during training."
146
- )
147
- parser.add_argument(
148
- "--gradient_accumulation_steps",
149
- type=int,
150
- default=1,
151
- help="Number of updates steps to accumulate before performing a backward/update pass.",
152
- )
153
- parser.add_argument(
154
- "--learning_rate",
155
- type=float,
156
- default=1e-4,
157
- help="Initial learning rate (after the potential warmup period) to use.",
158
- )
159
- parser.add_argument(
160
- "--lr_scheduler",
161
- type=str,
162
- default="cosine",
163
- help=(
164
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
165
- ' "constant", "constant_with_warmup"]'
166
- ),
167
- )
168
- parser.add_argument(
169
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
170
- )
171
- parser.add_argument("--adam_beta1", type=float, default=0.95, help="The beta1 parameter for the Adam optimizer.")
172
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
173
- parser.add_argument(
174
- "--adam_weight_decay", type=float, default=1e-6, help="Weight decay magnitude for the Adam optimizer."
175
- )
176
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer.")
177
- parser.add_argument(
178
- "--use_ema",
179
- action="store_true",
180
- help="Whether to use Exponential Moving Average for the final model weights.",
181
- )
182
- parser.add_argument("--ema_inv_gamma", type=float, default=1.0, help="The inverse gamma value for the EMA decay.")
183
- parser.add_argument("--ema_power", type=float, default=3 / 4, help="The power value for the EMA decay.")
184
- parser.add_argument("--ema_max_decay", type=float, default=0.9999, help="The maximum decay magnitude for EMA.")
185
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
186
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
187
- parser.add_argument(
188
- "--hub_model_id",
189
- type=str,
190
- default=None,
191
- help="The name of the repository to keep in sync with the local `output_dir`.",
192
- )
193
- parser.add_argument(
194
- "--hub_private_repo", action="store_true", help="Whether or not to create a private repository."
195
- )
196
- parser.add_argument(
197
- "--logger",
198
- type=str,
199
- default="tensorboard",
200
- choices=["tensorboard", "wandb"],
201
- help=(
202
- "Whether to use [tensorboard](https://www.tensorflow.org/tensorboard) or [wandb](https://www.wandb.ai)"
203
- " for experiment tracking and logging of model metrics and model checkpoints"
204
- ),
205
- )
206
- parser.add_argument(
207
- "--logging_dir",
208
- type=str,
209
- default="logs",
210
- help=(
211
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
212
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
213
- ),
214
- )
215
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
216
- parser.add_argument(
217
- "--mixed_precision",
218
- type=str,
219
- default="no",
220
- choices=["no", "fp16", "bf16"],
221
- help=(
222
- "Whether to use mixed precision. Choose"
223
- "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
224
- "and an Nvidia Ampere GPU."
225
- ),
226
- )
227
- parser.add_argument(
228
- "--prediction_type",
229
- type=str,
230
- default="epsilon",
231
- choices=["epsilon", "sample"],
232
- help="Whether the model should predict the 'epsilon'/noise error or directly the reconstructed image 'x0'.",
233
- )
234
- parser.add_argument("--ddpm_num_steps", type=int, default=1000)
235
- parser.add_argument("--ddpm_num_inference_steps", type=int, default=1000)
236
- parser.add_argument("--ddpm_beta_schedule", type=str, default="linear")
237
- parser.add_argument(
238
- "--checkpointing_steps",
239
- type=int,
240
- default=500,
241
- help=(
242
- "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
243
- " training using `--resume_from_checkpoint`."
244
- ),
245
- )
246
- parser.add_argument(
247
- "--checkpoints_total_limit",
248
- type=int,
249
- default=None,
250
- help=("Max number of checkpoints to store."),
251
- )
252
- parser.add_argument(
253
- "--resume_from_checkpoint",
254
- type=str,
255
- default=None,
256
- help=(
257
- "Whether training should be resumed from a previous checkpoint. Use a path saved by"
258
- ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
259
- ),
260
- )
261
- parser.add_argument(
262
- "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
263
- )
264
-
265
- args = parser.parse_args()
266
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
267
- if env_local_rank != -1 and env_local_rank != args.local_rank:
268
- args.local_rank = env_local_rank
269
-
270
- if args.dataset_name is None and args.train_data_dir is None:
271
- raise ValueError("You must specify either a dataset name from the hub or a train data directory.")
272
-
273
- return args
274
-
275
-
276
- def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
277
- if token is None:
278
- token = HfFolder.get_token()
279
- if organization is None:
280
- username = whoami(token)["name"]
281
- return f"{username}/{model_id}"
282
- else:
283
- return f"{organization}/{model_id}"
284
-
285
-
286
- def main(args):
287
- logging_dir = os.path.join(args.output_dir, args.logging_dir)
288
- accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
289
-
290
- kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=7200)) # a big number for high resolution or big dataset
291
- accelerator = Accelerator(
292
- gradient_accumulation_steps=args.gradient_accumulation_steps,
293
- mixed_precision=args.mixed_precision,
294
- log_with=args.logger,
295
- project_config=accelerator_project_config,
296
- kwargs_handlers=[kwargs],
297
- )
298
-
299
- if args.logger == "tensorboard":
300
- if not is_tensorboard_available():
301
- raise ImportError("Make sure to install tensorboard if you want to use it for logging during training.")
302
-
303
- elif args.logger == "wandb":
304
- if not is_wandb_available():
305
- raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
306
- import wandb
307
-
308
- # `accelerate` 0.16.0 will have better support for customized saving
309
- if version.parse(accelerate.__version__) >= version.parse("0.16.0"):
310
- # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
311
- def save_model_hook(models, weights, output_dir):
312
- if args.use_ema:
313
- ema_model.save_pretrained(os.path.join(output_dir, "unet_ema"))
314
-
315
- for i, model in enumerate(models):
316
- model.save_pretrained(os.path.join(output_dir, "unet"))
317
-
318
- # make sure to pop weight so that corresponding model is not saved again
319
- weights.pop()
320
-
321
- def load_model_hook(models, input_dir):
322
- if args.use_ema:
323
- load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DModel)
324
- ema_model.load_state_dict(load_model.state_dict())
325
- ema_model.to(accelerator.device)
326
- del load_model
327
-
328
- for i in range(len(models)):
329
- # pop models so that they are not loaded again
330
- model = models.pop()
331
-
332
- # load diffusers style into model
333
- load_model = UNet2DModel.from_pretrained(input_dir, subfolder="unet")
334
- model.register_to_config(**load_model.config)
335
-
336
- model.load_state_dict(load_model.state_dict())
337
- del load_model
338
-
339
- accelerator.register_save_state_pre_hook(save_model_hook)
340
- accelerator.register_load_state_pre_hook(load_model_hook)
341
-
342
- # Make one log on every process with the configuration for debugging.
343
- logging.basicConfig(
344
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
345
- datefmt="%m/%d/%Y %H:%M:%S",
346
- level=logging.INFO,
347
- )
348
- logger.info(accelerator.state, main_process_only=False)
349
- if accelerator.is_local_main_process:
350
- datasets.utils.logging.set_verbosity_warning()
351
- diffusers.utils.logging.set_verbosity_info()
352
- else:
353
- datasets.utils.logging.set_verbosity_error()
354
- diffusers.utils.logging.set_verbosity_error()
355
-
356
- # Handle the repository creation
357
- if accelerator.is_main_process:
358
- if args.push_to_hub:
359
- if args.hub_model_id is None:
360
- repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
361
- else:
362
- repo_name = args.hub_model_id
363
- create_repo(repo_name, exist_ok=True, token=args.hub_token)
364
- repo = Repository(args.output_dir, clone_from=repo_name, token=args.hub_token)
365
-
366
- with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
367
- if "step_*" not in gitignore:
368
- gitignore.write("step_*\n")
369
- if "epoch_*" not in gitignore:
370
- gitignore.write("epoch_*\n")
371
- elif args.output_dir is not None:
372
- os.makedirs(args.output_dir, exist_ok=True)
373
-
374
- # Initialize the model
375
- if args.model_config_name_or_path is None:
376
- model = UNet2DModel(
377
- sample_size=args.resolution,
378
- in_channels=3,
379
- out_channels=3,
380
- layers_per_block=2,
381
- block_out_channels=(128, 128, 256, 256, 512, 512),
382
- down_block_types=(
383
- "DownBlock2D",
384
- "DownBlock2D",
385
- "DownBlock2D",
386
- "DownBlock2D",
387
- "AttnDownBlock2D",
388
- "DownBlock2D",
389
- ),
390
- up_block_types=(
391
- "UpBlock2D",
392
- "AttnUpBlock2D",
393
- "UpBlock2D",
394
- "UpBlock2D",
395
- "UpBlock2D",
396
- "UpBlock2D",
397
- ),
398
- )
399
- else:
400
- config = UNet2DModel.load_config(args.model_config_name_or_path)
401
- model = UNet2DModel.from_config(config)
402
-
403
- # Create EMA for the model.
404
- if args.use_ema:
405
- ema_model = EMAModel(
406
- model.parameters(),
407
- decay=args.ema_max_decay,
408
- use_ema_warmup=True,
409
- inv_gamma=args.ema_inv_gamma,
410
- power=args.ema_power,
411
- model_cls=UNet2DModel,
412
- model_config=model.config,
413
- )
414
-
415
- if args.enable_xformers_memory_efficient_attention:
416
- if is_xformers_available():
417
- import xformers
418
-
419
- xformers_version = version.parse(xformers.__version__)
420
- if xformers_version == version.parse("0.0.16"):
421
- logger.warn(
422
- "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
423
- )
424
- model.enable_xformers_memory_efficient_attention()
425
- else:
426
- raise ValueError("xformers is not available. Make sure it is installed correctly")
427
-
428
- # Initialize the scheduler
429
- accepts_prediction_type = "prediction_type" in set(inspect.signature(DDPMScheduler.__init__).parameters.keys())
430
- if accepts_prediction_type:
431
- noise_scheduler = DDPMScheduler(
432
- num_train_timesteps=args.ddpm_num_steps,
433
- beta_schedule=args.ddpm_beta_schedule,
434
- prediction_type=args.prediction_type,
435
- )
436
- else:
437
- noise_scheduler = DDPMScheduler(num_train_timesteps=args.ddpm_num_steps, beta_schedule=args.ddpm_beta_schedule)
438
-
439
- # Initialize the optimizer
440
- optimizer = torch.optim.AdamW(
441
- model.parameters(),
442
- lr=args.learning_rate,
443
- betas=(args.adam_beta1, args.adam_beta2),
444
- weight_decay=args.adam_weight_decay,
445
- eps=args.adam_epsilon,
446
- )
447
-
448
- # Get the datasets: you can either provide your own training and evaluation files (see below)
449
- # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub).
450
-
451
- # In distributed training, the load_dataset function guarantees that only one local process can concurrently
452
- # download the dataset.
453
- if args.dataset_name is not None:
454
- dataset = load_dataset(
455
- args.dataset_name,
456
- args.dataset_config_name,
457
- cache_dir=args.cache_dir,
458
- split="train",
459
- )
460
- else:
461
- dataset = load_dataset("imagefolder", data_dir=args.train_data_dir, cache_dir=args.cache_dir, split="train")
462
- # See more about loading custom images at
463
- # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder
464
-
465
- # Preprocessing the datasets and DataLoaders creation.
466
- augmentations = transforms.Compose(
467
- [
468
- transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
469
- transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution),
470
- transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x),
471
- transforms.ToTensor(),
472
- transforms.Normalize([0.5], [0.5]),
473
- ]
474
- )
475
-
476
- def transform_images(examples):
477
- images = [augmentations(image.convert("RGB")) for image in examples["image"]]
478
- return {"input": images}
479
-
480
- logger.info(f"Dataset size: {len(dataset)}")
481
-
482
- dataset.set_transform(transform_images)
483
- train_dataloader = torch.utils.data.DataLoader(
484
- dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers
485
- )
486
-
487
- # Initialize the learning rate scheduler
488
- lr_scheduler = get_scheduler(
489
- args.lr_scheduler,
490
- optimizer=optimizer,
491
- num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
492
- num_training_steps=(len(train_dataloader) * args.num_epochs),
493
- )
494
-
495
- # Prepare everything with our `accelerator`.
496
- model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
497
- model, optimizer, train_dataloader, lr_scheduler
498
- )
499
-
500
- if args.use_ema:
501
- ema_model.to(accelerator.device)
502
-
503
- # We need to initialize the trackers we use, and also store our configuration.
504
- # The trackers initializes automatically on the main process.
505
- if accelerator.is_main_process:
506
- run = os.path.split(__file__)[-1].split(".")[0]
507
- accelerator.init_trackers(run)
508
-
509
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
510
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
511
- max_train_steps = args.num_epochs * num_update_steps_per_epoch
512
-
513
- logger.info("***** Running training *****")
514
- logger.info(f" Num examples = {len(dataset)}")
515
- logger.info(f" Num Epochs = {args.num_epochs}")
516
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
517
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
518
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
519
- logger.info(f" Total optimization steps = {max_train_steps}")
520
-
521
- global_step = 0
522
- first_epoch = 0
523
-
524
- # Potentially load in the weights and states from a previous save
525
- if args.resume_from_checkpoint:
526
- if args.resume_from_checkpoint != "latest":
527
- path = os.path.basename(args.resume_from_checkpoint)
528
- else:
529
- # Get the most recent checkpoint
530
- dirs = os.listdir(args.output_dir)
531
- dirs = [d for d in dirs if d.startswith("checkpoint")]
532
- dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
533
- path = dirs[-1] if len(dirs) > 0 else None
534
-
535
- if path is None:
536
- accelerator.print(
537
- f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
538
- )
539
- args.resume_from_checkpoint = None
540
- else:
541
- accelerator.print(f"Resuming from checkpoint {path}")
542
- accelerator.load_state(os.path.join(args.output_dir, path))
543
- global_step = int(path.split("-")[1])
544
-
545
- resume_global_step = global_step * args.gradient_accumulation_steps
546
- first_epoch = global_step // num_update_steps_per_epoch
547
- resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
548
-
549
- # Train!
550
- for epoch in range(first_epoch, args.num_epochs):
551
- model.train()
552
- progress_bar = tqdm(total=num_update_steps_per_epoch, disable=not accelerator.is_local_main_process)
553
- progress_bar.set_description(f"Epoch {epoch}")
554
- for step, batch in enumerate(train_dataloader):
555
- # Skip steps until we reach the resumed step
556
- if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
557
- if step % args.gradient_accumulation_steps == 0:
558
- progress_bar.update(1)
559
- continue
560
-
561
- clean_images = batch["input"]
562
- # Sample noise that we'll add to the images
563
- noise = torch.randn(
564
- clean_images.shape, dtype=(torch.float32 if args.mixed_precision == "no" else torch.float16)
565
- ).to(clean_images.device)
566
- bsz = clean_images.shape[0]
567
- # Sample a random timestep for each image
568
- timesteps = torch.randint(
569
- 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=clean_images.device
570
- ).long()
571
-
572
- # Add noise to the clean images according to the noise magnitude at each timestep
573
- # (this is the forward diffusion process)
574
- noisy_images = noise_scheduler.add_noise(clean_images, noise, timesteps)
575
-
576
- with accelerator.accumulate(model):
577
- # Predict the noise residual
578
- model_output = model(noisy_images, timesteps).sample
579
-
580
- if args.prediction_type == "epsilon":
581
- loss = F.mse_loss(model_output, noise) # this could have different weights!
582
- elif args.prediction_type == "sample":
583
- alpha_t = _extract_into_tensor(
584
- noise_scheduler.alphas_cumprod, timesteps, (clean_images.shape[0], 1, 1, 1)
585
- )
586
- snr_weights = alpha_t / (1 - alpha_t)
587
- loss = snr_weights * F.mse_loss(
588
- model_output, clean_images, reduction="none"
589
- ) # use SNR weighting from distillation paper
590
- loss = loss.mean()
591
- else:
592
- raise ValueError(f"Unsupported prediction type: {args.prediction_type}")
593
-
594
- accelerator.backward(loss)
595
-
596
- if accelerator.sync_gradients:
597
- accelerator.clip_grad_norm_(model.parameters(), 1.0)
598
- optimizer.step()
599
- lr_scheduler.step()
600
- optimizer.zero_grad()
601
-
602
- # Checks if the accelerator has performed an optimization step behind the scenes
603
- if accelerator.sync_gradients:
604
- if args.use_ema:
605
- ema_model.step(model.parameters())
606
- progress_bar.update(1)
607
- global_step += 1
608
-
609
- if global_step % args.checkpointing_steps == 0:
610
- # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
611
- if args.checkpoints_total_limit is not None:
612
- checkpoints = os.listdir(args.output_dir)
613
- checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
614
- checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
615
-
616
- # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
617
- if len(checkpoints) >= args.checkpoints_total_limit:
618
- num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
619
- removing_checkpoints = checkpoints[0:num_to_remove]
620
-
621
- logger.info(
622
- f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
623
- )
624
- logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
625
-
626
- for removing_checkpoint in removing_checkpoints:
627
- removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
628
- shutil.rmtree(removing_checkpoint)
629
-
630
- if accelerator.is_main_process:
631
- save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
632
- accelerator.save_state(save_path)
633
- logger.info(f"Saved state to {save_path}")
634
-
635
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step}
636
- if args.use_ema:
637
- logs["ema_decay"] = ema_model.cur_decay_value
638
- progress_bar.set_postfix(**logs)
639
- accelerator.log(logs, step=global_step)
640
- progress_bar.close()
641
-
642
- accelerator.wait_for_everyone()
643
-
644
- # Generate sample images for visual inspection
645
- if accelerator.is_main_process:
646
- if epoch % args.save_images_epochs == 0 or epoch == args.num_epochs - 1:
647
- unet = accelerator.unwrap_model(model)
648
-
649
- if args.use_ema:
650
- ema_model.store(unet.parameters())
651
- ema_model.copy_to(unet.parameters())
652
-
653
- pipeline = DDPMPipeline(
654
- unet=unet,
655
- scheduler=noise_scheduler,
656
- )
657
-
658
- generator = torch.Generator(device=pipeline.device).manual_seed(0)
659
- # run pipeline in inference (sample random noise and denoise)
660
- images = pipeline(
661
- generator=generator,
662
- batch_size=args.eval_batch_size,
663
- num_inference_steps=args.ddpm_num_inference_steps,
664
- output_type="numpy",
665
- ).images
666
-
667
- if args.use_ema:
668
- ema_model.restore(unet.parameters())
669
-
670
- # denormalize the images and save to tensorboard
671
- images_processed = (images * 255).round().astype("uint8")
672
-
673
- if args.logger == "tensorboard":
674
- if is_accelerate_version(">=", "0.17.0.dev0"):
675
- tracker = accelerator.get_tracker("tensorboard", unwrap=True)
676
- else:
677
- tracker = accelerator.get_tracker("tensorboard")
678
- tracker.add_images("test_samples", images_processed.transpose(0, 3, 1, 2), epoch)
679
- elif args.logger == "wandb":
680
- # Upcoming `log_images` helper coming in https://github.com/huggingface/accelerate/pull/962/files
681
- accelerator.get_tracker("wandb").log(
682
- {"test_samples": [wandb.Image(img) for img in images_processed], "epoch": epoch},
683
- step=global_step,
684
- )
685
-
686
- if epoch % args.save_model_epochs == 0 or epoch == args.num_epochs - 1:
687
- # save the model
688
- unet = accelerator.unwrap_model(model)
689
-
690
- if args.use_ema:
691
- ema_model.store(unet.parameters())
692
- ema_model.copy_to(unet.parameters())
693
-
694
- pipeline = DDPMPipeline(
695
- unet=unet,
696
- scheduler=noise_scheduler,
697
- )
698
-
699
- pipeline.save_pretrained(args.output_dir)
700
-
701
- if args.use_ema:
702
- ema_model.restore(unet.parameters())
703
-
704
- if args.push_to_hub:
705
- repo.push_to_hub(commit_message=f"Epoch {epoch}", blocking=False)
706
-
707
- accelerator.end_training()
708
-
709
-
710
- if __name__ == "__main__":
711
- args = parse_args()
712
- main(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py DELETED
@@ -1,53 +0,0 @@
1
- _base_ = '../nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py'
2
-
3
- norm_cfg = dict(type='BN', requires_grad=True)
4
- model = dict(
5
- neck=dict(
6
- _delete_=True,
7
- type='FPG',
8
- in_channels=[256, 512, 1024, 2048],
9
- out_channels=256,
10
- inter_channels=256,
11
- num_outs=5,
12
- add_extra_convs=True,
13
- start_level=1,
14
- stack_times=9,
15
- paths=['bu'] * 9,
16
- same_down_trans=None,
17
- same_up_trans=dict(
18
- type='conv',
19
- kernel_size=3,
20
- stride=2,
21
- padding=1,
22
- norm_cfg=norm_cfg,
23
- inplace=False,
24
- order=('act', 'conv', 'norm')),
25
- across_lateral_trans=dict(
26
- type='conv',
27
- kernel_size=1,
28
- norm_cfg=norm_cfg,
29
- inplace=False,
30
- order=('act', 'conv', 'norm')),
31
- across_down_trans=dict(
32
- type='interpolation_conv',
33
- mode='nearest',
34
- kernel_size=3,
35
- norm_cfg=norm_cfg,
36
- order=('act', 'conv', 'norm'),
37
- inplace=False),
38
- across_up_trans=None,
39
- across_skip_trans=dict(
40
- type='conv',
41
- kernel_size=1,
42
- norm_cfg=norm_cfg,
43
- inplace=False,
44
- order=('act', 'conv', 'norm')),
45
- output_trans=dict(
46
- type='last_conv',
47
- kernel_size=3,
48
- order=('act', 'conv', 'norm'),
49
- inplace=False),
50
- norm_cfg=norm_cfg,
51
- skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()]))
52
-
53
- evaluation = dict(interval=2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Apex-X/GODROOP/roop/globals.py DELETED
@@ -1,17 +0,0 @@
1
- from typing import List
2
-
3
- source_path = None
4
- target_path = None
5
- output_path = None
6
- frame_processors: List[str] = []
7
- keep_fps = None
8
- keep_audio = None
9
- keep_frames = None
10
- many_faces = None
11
- video_encoder = None
12
- video_quality = None
13
- max_memory = None
14
- execution_providers: List[str] = []
15
- execution_threads = None
16
- headless = None
17
- log_level = 'error'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artrajz/vits-simple-api/utils/sentence.py DELETED
@@ -1,91 +0,0 @@
1
- import regex as re
2
-
3
- from logger import logger
4
- from utils.data_utils import check_is_none
5
- from utils.classify_language import classify_language
6
-
7
-
8
- def markup_language_type(text: str, target_languages: list = None) -> str:
9
- pattern = r'[\!\"\#\$\%\&\'\(\)\*\+\,\-\.\/\:\;\<\>\=\?\@\[\]\{\}\\\\\^\_\`' \
10
- r'\!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」' \
11
- r'『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘\'\‛\“\”\„\‟…‧﹏.]+'
12
- sentences = re.split(pattern, text)
13
-
14
- pre_lang = ""
15
- p = 0
16
-
17
- for sentence in sentences:
18
-
19
- if check_is_none(sentence): continue
20
-
21
- lang = classify_language(sentence, target_languages)
22
-
23
- if pre_lang == "":
24
- text = text[:p] + text[p:].replace(sentence, f"[{lang.upper()}]{sentence}", 1)
25
- p += len(f"[{lang.upper()}]")
26
- elif pre_lang != lang:
27
- text = text[:p] + text[p:].replace(sentence, f"[{pre_lang.upper()}][{lang.upper()}]{sentence}", 1)
28
- p += len(f"[{pre_lang.upper()}][{lang.upper()}]")
29
- pre_lang = lang
30
- p += text[p:].index(sentence) + len(sentence)
31
- text += f"[{pre_lang.upper()}]"
32
-
33
- return text
34
-
35
-
36
- def cut(text: str, max: int) -> list:
37
- pattern = r'[!(),—+\-.:;??。,、;:]+'
38
- sentences = re.split(pattern, text)
39
- discarded_chars = re.findall(pattern, text)
40
-
41
- sentence_list, count, p = [], 0, 0
42
-
43
- # 按被分割的符号遍历
44
- for i, discarded_chars in enumerate(discarded_chars):
45
- count += len(sentences[i]) + len(discarded_chars)
46
- if count >= max:
47
- sentence_list.append(text[p:p + count].strip())
48
- p += count
49
- count = 0
50
-
51
- # 加入最后剩余的文本
52
- if p < len(text):
53
- sentence_list.append(text[p:])
54
-
55
- return sentence_list
56
-
57
-
58
- def sentence_split_and_markup(text, max=50, lang="auto", speaker_lang=None):
59
- # 如果该speaker只支持一种语言
60
- if speaker_lang is not None and len(speaker_lang) == 1:
61
- if lang.upper() not in ["AUTO", "MIX"] and lang.lower() != speaker_lang[0]:
62
- logger.debug(
63
- f"lang \"{lang}\" is not in speaker_lang {speaker_lang},automatically set lang={speaker_lang[0]}")
64
- lang = speaker_lang[0]
65
-
66
- sentence_list = []
67
- if lang.upper() != "MIX":
68
- if max <= 0:
69
- sentence_list.append(
70
- markup_language_type(text,
71
- speaker_lang) if lang.upper() == "AUTO" else f"[{lang.upper()}]{text}[{lang.upper()}]")
72
- else:
73
- for i in cut(text, max):
74
- if check_is_none(i): continue
75
- sentence_list.append(
76
- markup_language_type(i,
77
- speaker_lang) if lang.upper() == "AUTO" else f"[{lang.upper()}]{i}[{lang.upper()}]")
78
- else:
79
- sentence_list.append(text)
80
-
81
- for i in sentence_list:
82
- logger.debug(i)
83
-
84
- return sentence_list
85
-
86
-
87
- if __name__ == '__main__':
88
- text = "这几天心里颇不宁静。今晚在院子里坐着乘凉,忽然想起日日走过的荷塘,在这满月的光里,总该另有一番样子吧。月亮渐渐地升高了,墙外马路上孩子们的欢笑,已经听不见了;妻在屋里拍着闰儿,迷迷糊糊地哼着眠歌。我悄悄地披了大衫,带上门出去。"
89
- print(markup_language_type(text, languages=None))
90
- print(cut(text, max=50))
91
- print(sentence_split_and_markup(text, max=50, lang="auto", speaker_lang=None))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/datetime.py DELETED
@@ -1,11 +0,0 @@
1
- """For when pip wants to check the date or time.
2
- """
3
-
4
- import datetime
5
-
6
-
7
- def today_is_later_than(year: int, month: int, day: int) -> bool:
8
- today = datetime.date.today()
9
- given = datetime.date(year, month, day)
10
-
11
- return today > given
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BernardoOlisan/vqganclip/taming-transformers/scripts/sample_fast.py DELETED
@@ -1,260 +0,0 @@
1
- import argparse, os, sys, glob
2
- import torch
3
- import time
4
- import numpy as np
5
- from omegaconf import OmegaConf
6
- from PIL import Image
7
- from tqdm import tqdm, trange
8
- from einops import repeat
9
-
10
- from main import instantiate_from_config
11
- from taming.modules.transformer.mingpt import sample_with_past
12
-
13
-
14
- rescale = lambda x: (x + 1.) / 2.
15
-
16
-
17
- def chw_to_pillow(x):
18
- return Image.fromarray((255*rescale(x.detach().cpu().numpy().transpose(1,2,0))).clip(0,255).astype(np.uint8))
19
-
20
-
21
- @torch.no_grad()
22
- def sample_classconditional(model, batch_size, class_label, steps=256, temperature=None, top_k=None, callback=None,
23
- dim_z=256, h=16, w=16, verbose_time=False, top_p=None):
24
- log = dict()
25
- assert type(class_label) == int, f'expecting type int but type is {type(class_label)}'
26
- qzshape = [batch_size, dim_z, h, w]
27
- assert not model.be_unconditional, 'Expecting a class-conditional Net2NetTransformer.'
28
- c_indices = repeat(torch.tensor([class_label]), '1 -> b 1', b=batch_size).to(model.device) # class token
29
- t1 = time.time()
30
- index_sample = sample_with_past(c_indices, model.transformer, steps=steps,
31
- sample_logits=True, top_k=top_k, callback=callback,
32
- temperature=temperature, top_p=top_p)
33
- if verbose_time:
34
- sampling_time = time.time() - t1
35
- print(f"Full sampling takes about {sampling_time:.2f} seconds.")
36
- x_sample = model.decode_to_img(index_sample, qzshape)
37
- log["samples"] = x_sample
38
- log["class_label"] = c_indices
39
- return log
40
-
41
-
42
- @torch.no_grad()
43
- def sample_unconditional(model, batch_size, steps=256, temperature=None, top_k=None, top_p=None, callback=None,
44
- dim_z=256, h=16, w=16, verbose_time=False):
45
- log = dict()
46
- qzshape = [batch_size, dim_z, h, w]
47
- assert model.be_unconditional, 'Expecting an unconditional model.'
48
- c_indices = repeat(torch.tensor([model.sos_token]), '1 -> b 1', b=batch_size).to(model.device) # sos token
49
- t1 = time.time()
50
- index_sample = sample_with_past(c_indices, model.transformer, steps=steps,
51
- sample_logits=True, top_k=top_k, callback=callback,
52
- temperature=temperature, top_p=top_p)
53
- if verbose_time:
54
- sampling_time = time.time() - t1
55
- print(f"Full sampling takes about {sampling_time:.2f} seconds.")
56
- x_sample = model.decode_to_img(index_sample, qzshape)
57
- log["samples"] = x_sample
58
- return log
59
-
60
-
61
- @torch.no_grad()
62
- def run(logdir, model, batch_size, temperature, top_k, unconditional=True, num_samples=50000,
63
- given_classes=None, top_p=None):
64
- batches = [batch_size for _ in range(num_samples//batch_size)] + [num_samples % batch_size]
65
- if not unconditional:
66
- assert given_classes is not None
67
- print("Running in pure class-conditional sampling mode. I will produce "
68
- f"{num_samples} samples for each of the {len(given_classes)} classes, "
69
- f"i.e. {num_samples*len(given_classes)} in total.")
70
- for class_label in tqdm(given_classes, desc="Classes"):
71
- for n, bs in tqdm(enumerate(batches), desc="Sampling Class"):
72
- if bs == 0: break
73
- logs = sample_classconditional(model, batch_size=bs, class_label=class_label,
74
- temperature=temperature, top_k=top_k, top_p=top_p)
75
- save_from_logs(logs, logdir, base_count=n * batch_size, cond_key=logs["class_label"])
76
- else:
77
- print(f"Running in unconditional sampling mode, producing {num_samples} samples.")
78
- for n, bs in tqdm(enumerate(batches), desc="Sampling"):
79
- if bs == 0: break
80
- logs = sample_unconditional(model, batch_size=bs, temperature=temperature, top_k=top_k, top_p=top_p)
81
- save_from_logs(logs, logdir, base_count=n * batch_size)
82
-
83
-
84
- def save_from_logs(logs, logdir, base_count, key="samples", cond_key=None):
85
- xx = logs[key]
86
- for i, x in enumerate(xx):
87
- x = chw_to_pillow(x)
88
- count = base_count + i
89
- if cond_key is None:
90
- x.save(os.path.join(logdir, f"{count:06}.png"))
91
- else:
92
- condlabel = cond_key[i]
93
- if type(condlabel) == torch.Tensor: condlabel = condlabel.item()
94
- os.makedirs(os.path.join(logdir, str(condlabel)), exist_ok=True)
95
- x.save(os.path.join(logdir, str(condlabel), f"{count:06}.png"))
96
-
97
-
98
- def get_parser():
99
- def str2bool(v):
100
- if isinstance(v, bool):
101
- return v
102
- if v.lower() in ("yes", "true", "t", "y", "1"):
103
- return True
104
- elif v.lower() in ("no", "false", "f", "n", "0"):
105
- return False
106
- else:
107
- raise argparse.ArgumentTypeError("Boolean value expected.")
108
-
109
- parser = argparse.ArgumentParser()
110
- parser.add_argument(
111
- "-r",
112
- "--resume",
113
- type=str,
114
- nargs="?",
115
- help="load from logdir or checkpoint in logdir",
116
- )
117
- parser.add_argument(
118
- "-o",
119
- "--outdir",
120
- type=str,
121
- nargs="?",
122
- help="path where the samples will be logged to.",
123
- default=""
124
- )
125
- parser.add_argument(
126
- "-b",
127
- "--base",
128
- nargs="*",
129
- metavar="base_config.yaml",
130
- help="paths to base configs. Loaded from left-to-right. "
131
- "Parameters can be overwritten or added with command-line options of the form `--key value`.",
132
- default=list(),
133
- )
134
- parser.add_argument(
135
- "-n",
136
- "--num_samples",
137
- type=int,
138
- nargs="?",
139
- help="num_samples to draw",
140
- default=50000
141
- )
142
- parser.add_argument(
143
- "--batch_size",
144
- type=int,
145
- nargs="?",
146
- help="the batch size",
147
- default=25
148
- )
149
- parser.add_argument(
150
- "-k",
151
- "--top_k",
152
- type=int,
153
- nargs="?",
154
- help="top-k value to sample with",
155
- default=250,
156
- )
157
- parser.add_argument(
158
- "-t",
159
- "--temperature",
160
- type=float,
161
- nargs="?",
162
- help="temperature value to sample with",
163
- default=1.0
164
- )
165
- parser.add_argument(
166
- "-p",
167
- "--top_p",
168
- type=float,
169
- nargs="?",
170
- help="top-p value to sample with",
171
- default=1.0
172
- )
173
- parser.add_argument(
174
- "--classes",
175
- type=str,
176
- nargs="?",
177
- help="specify comma-separated classes to sample from. Uses 1000 classes per default.",
178
- default="imagenet"
179
- )
180
- return parser
181
-
182
-
183
- def load_model_from_config(config, sd, gpu=True, eval_mode=True):
184
- model = instantiate_from_config(config)
185
- if sd is not None:
186
- model.load_state_dict(sd)
187
- if gpu:
188
- model.cuda()
189
- if eval_mode:
190
- model.eval()
191
- return {"model": model}
192
-
193
-
194
- def load_model(config, ckpt, gpu, eval_mode):
195
- # load the specified checkpoint
196
- if ckpt:
197
- pl_sd = torch.load(ckpt, map_location="cpu")
198
- global_step = pl_sd["global_step"]
199
- print(f"loaded model from global step {global_step}.")
200
- else:
201
- pl_sd = {"state_dict": None}
202
- global_step = None
203
- model = load_model_from_config(config.model, pl_sd["state_dict"], gpu=gpu, eval_mode=eval_mode)["model"]
204
- return model, global_step
205
-
206
-
207
- if __name__ == "__main__":
208
- sys.path.append(os.getcwd())
209
- parser = get_parser()
210
-
211
- opt, unknown = parser.parse_known_args()
212
- assert opt.resume
213
-
214
- ckpt = None
215
-
216
- if not os.path.exists(opt.resume):
217
- raise ValueError("Cannot find {}".format(opt.resume))
218
- if os.path.isfile(opt.resume):
219
- paths = opt.resume.split("/")
220
- try:
221
- idx = len(paths)-paths[::-1].index("logs")+1
222
- except ValueError:
223
- idx = -2 # take a guess: path/to/logdir/checkpoints/model.ckpt
224
- logdir = "/".join(paths[:idx])
225
- ckpt = opt.resume
226
- else:
227
- assert os.path.isdir(opt.resume), opt.resume
228
- logdir = opt.resume.rstrip("/")
229
- ckpt = os.path.join(logdir, "checkpoints", "last.ckpt")
230
-
231
- base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*-project.yaml")))
232
- opt.base = base_configs+opt.base
233
-
234
- configs = [OmegaConf.load(cfg) for cfg in opt.base]
235
- cli = OmegaConf.from_dotlist(unknown)
236
- config = OmegaConf.merge(*configs, cli)
237
-
238
- model, global_step = load_model(config, ckpt, gpu=True, eval_mode=True)
239
-
240
- if opt.outdir:
241
- print(f"Switching logdir from '{logdir}' to '{opt.outdir}'")
242
- logdir = opt.outdir
243
-
244
- if opt.classes == "imagenet":
245
- given_classes = [i for i in range(1000)]
246
- else:
247
- cls_str = opt.classes
248
- assert not cls_str.endswith(","), 'class string should not end with a ","'
249
- given_classes = [int(c) for c in cls_str.split(",")]
250
-
251
- logdir = os.path.join(logdir, "samples", f"top_k_{opt.top_k}_temp_{opt.temperature:.2f}_top_p_{opt.top_p}",
252
- f"{global_step}")
253
-
254
- print(f"Logging to {logdir}")
255
- os.makedirs(logdir, exist_ok=True)
256
-
257
- run(logdir, model, opt.batch_size, opt.temperature, opt.top_k, unconditional=model.be_unconditional,
258
- given_classes=given_classes, num_samples=opt.num_samples, top_p=opt.top_p)
259
-
260
- print("done.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/__init__.py DELETED
@@ -1,54 +0,0 @@
1
- # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # http://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- import os
14
-
15
- from botocore.docs.service import ServiceDocumenter
16
-
17
- DEPRECATED_SERVICE_NAMES = {'sms-voice'}
18
-
19
-
20
- def generate_docs(root_dir, session):
21
- """Generates the reference documentation for botocore
22
-
23
- This will go through every available AWS service and output ReSTructured
24
- text files documenting each service.
25
-
26
- :param root_dir: The directory to write the reference files to. Each
27
- service's reference documentation is loacated at
28
- root_dir/reference/services/service-name.rst
29
- """
30
- # Create the root directory where all service docs live.
31
- services_dir_path = os.path.join(root_dir, 'reference', 'services')
32
- if not os.path.exists(services_dir_path):
33
- os.makedirs(services_dir_path)
34
-
35
- # Prevents deprecated service names from being generated in docs.
36
- available_services = [
37
- service
38
- for service in session.get_available_services()
39
- if service not in DEPRECATED_SERVICE_NAMES
40
- ]
41
-
42
- # Generate reference docs and write them out.
43
- for service_name in available_services:
44
- docs = ServiceDocumenter(
45
- service_name, session, services_dir_path
46
- ).document_service()
47
-
48
- # Write the main service documentation page.
49
- # Path: <root>/reference/services/<service>/index.rst
50
- service_file_path = os.path.join(
51
- services_dir_path, f'{service_name}.rst'
52
- )
53
- with open(service_file_path, 'wb') as f:
54
- f.write(docs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/distributions/sdist.py DELETED
@@ -1,150 +0,0 @@
1
- import logging
2
- from typing import Iterable, Set, Tuple
3
-
4
- from pip._internal.build_env import BuildEnvironment
5
- from pip._internal.distributions.base import AbstractDistribution
6
- from pip._internal.exceptions import InstallationError
7
- from pip._internal.index.package_finder import PackageFinder
8
- from pip._internal.metadata import BaseDistribution
9
- from pip._internal.utils.subprocess import runner_with_spinner_message
10
-
11
- logger = logging.getLogger(__name__)
12
-
13
-
14
- class SourceDistribution(AbstractDistribution):
15
- """Represents a source distribution.
16
-
17
- The preparation step for these needs metadata for the packages to be
18
- generated, either using PEP 517 or using the legacy `setup.py egg_info`.
19
- """
20
-
21
- def get_metadata_distribution(self) -> BaseDistribution:
22
- return self.req.get_dist()
23
-
24
- def prepare_distribution_metadata(
25
- self,
26
- finder: PackageFinder,
27
- build_isolation: bool,
28
- check_build_deps: bool,
29
- ) -> None:
30
- # Load pyproject.toml, to determine whether PEP 517 is to be used
31
- self.req.load_pyproject_toml()
32
-
33
- # Set up the build isolation, if this requirement should be isolated
34
- should_isolate = self.req.use_pep517 and build_isolation
35
- if should_isolate:
36
- # Setup an isolated environment and install the build backend static
37
- # requirements in it.
38
- self._prepare_build_backend(finder)
39
- # Check that if the requirement is editable, it either supports PEP 660 or
40
- # has a setup.py or a setup.cfg. This cannot be done earlier because we need
41
- # to setup the build backend to verify it supports build_editable, nor can
42
- # it be done later, because we want to avoid installing build requirements
43
- # needlessly. Doing it here also works around setuptools generating
44
- # UNKNOWN.egg-info when running get_requires_for_build_wheel on a directory
45
- # without setup.py nor setup.cfg.
46
- self.req.isolated_editable_sanity_check()
47
- # Install the dynamic build requirements.
48
- self._install_build_reqs(finder)
49
- # Check if the current environment provides build dependencies
50
- should_check_deps = self.req.use_pep517 and check_build_deps
51
- if should_check_deps:
52
- pyproject_requires = self.req.pyproject_requires
53
- assert pyproject_requires is not None
54
- conflicting, missing = self.req.build_env.check_requirements(
55
- pyproject_requires
56
- )
57
- if conflicting:
58
- self._raise_conflicts("the backend dependencies", conflicting)
59
- if missing:
60
- self._raise_missing_reqs(missing)
61
- self.req.prepare_metadata()
62
-
63
- def _prepare_build_backend(self, finder: PackageFinder) -> None:
64
- # Isolate in a BuildEnvironment and install the build-time
65
- # requirements.
66
- pyproject_requires = self.req.pyproject_requires
67
- assert pyproject_requires is not None
68
-
69
- self.req.build_env = BuildEnvironment()
70
- self.req.build_env.install_requirements(
71
- finder, pyproject_requires, "overlay", kind="build dependencies"
72
- )
73
- conflicting, missing = self.req.build_env.check_requirements(
74
- self.req.requirements_to_check
75
- )
76
- if conflicting:
77
- self._raise_conflicts("PEP 517/518 supported requirements", conflicting)
78
- if missing:
79
- logger.warning(
80
- "Missing build requirements in pyproject.toml for %s.",
81
- self.req,
82
- )
83
- logger.warning(
84
- "The project does not specify a build backend, and "
85
- "pip cannot fall back to setuptools without %s.",
86
- " and ".join(map(repr, sorted(missing))),
87
- )
88
-
89
- def _get_build_requires_wheel(self) -> Iterable[str]:
90
- with self.req.build_env:
91
- runner = runner_with_spinner_message("Getting requirements to build wheel")
92
- backend = self.req.pep517_backend
93
- assert backend is not None
94
- with backend.subprocess_runner(runner):
95
- return backend.get_requires_for_build_wheel()
96
-
97
- def _get_build_requires_editable(self) -> Iterable[str]:
98
- with self.req.build_env:
99
- runner = runner_with_spinner_message(
100
- "Getting requirements to build editable"
101
- )
102
- backend = self.req.pep517_backend
103
- assert backend is not None
104
- with backend.subprocess_runner(runner):
105
- return backend.get_requires_for_build_editable()
106
-
107
- def _install_build_reqs(self, finder: PackageFinder) -> None:
108
- # Install any extra build dependencies that the backend requests.
109
- # This must be done in a second pass, as the pyproject.toml
110
- # dependencies must be installed before we can call the backend.
111
- if (
112
- self.req.editable
113
- and self.req.permit_editable_wheels
114
- and self.req.supports_pyproject_editable()
115
- ):
116
- build_reqs = self._get_build_requires_editable()
117
- else:
118
- build_reqs = self._get_build_requires_wheel()
119
- conflicting, missing = self.req.build_env.check_requirements(build_reqs)
120
- if conflicting:
121
- self._raise_conflicts("the backend dependencies", conflicting)
122
- self.req.build_env.install_requirements(
123
- finder, missing, "normal", kind="backend dependencies"
124
- )
125
-
126
- def _raise_conflicts(
127
- self, conflicting_with: str, conflicting_reqs: Set[Tuple[str, str]]
128
- ) -> None:
129
- format_string = (
130
- "Some build dependencies for {requirement} "
131
- "conflict with {conflicting_with}: {description}."
132
- )
133
- error_message = format_string.format(
134
- requirement=self.req,
135
- conflicting_with=conflicting_with,
136
- description=", ".join(
137
- f"{installed} is incompatible with {wanted}"
138
- for installed, wanted in sorted(conflicting_reqs)
139
- ),
140
- )
141
- raise InstallationError(error_message)
142
-
143
- def _raise_missing_reqs(self, missing: Set[str]) -> None:
144
- format_string = (
145
- "Some build dependencies for {requirement} are missing: {missing}."
146
- )
147
- error_message = format_string.format(
148
- requirement=self.req, missing=", ".join(map(repr, sorted(missing)))
149
- )
150
- raise InstallationError(error_message)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/deprecation.py DELETED
@@ -1,120 +0,0 @@
1
- """
2
- A module that implements tooling to enable easy warnings about deprecations.
3
- """
4
-
5
- import logging
6
- import warnings
7
- from typing import Any, Optional, TextIO, Type, Union
8
-
9
- from pip._vendor.packaging.version import parse
10
-
11
- from pip import __version__ as current_version # NOTE: tests patch this name.
12
-
13
- DEPRECATION_MSG_PREFIX = "DEPRECATION: "
14
-
15
-
16
- class PipDeprecationWarning(Warning):
17
- pass
18
-
19
-
20
- _original_showwarning: Any = None
21
-
22
-
23
- # Warnings <-> Logging Integration
24
- def _showwarning(
25
- message: Union[Warning, str],
26
- category: Type[Warning],
27
- filename: str,
28
- lineno: int,
29
- file: Optional[TextIO] = None,
30
- line: Optional[str] = None,
31
- ) -> None:
32
- if file is not None:
33
- if _original_showwarning is not None:
34
- _original_showwarning(message, category, filename, lineno, file, line)
35
- elif issubclass(category, PipDeprecationWarning):
36
- # We use a specially named logger which will handle all of the
37
- # deprecation messages for pip.
38
- logger = logging.getLogger("pip._internal.deprecations")
39
- logger.warning(message)
40
- else:
41
- _original_showwarning(message, category, filename, lineno, file, line)
42
-
43
-
44
- def install_warning_logger() -> None:
45
- # Enable our Deprecation Warnings
46
- warnings.simplefilter("default", PipDeprecationWarning, append=True)
47
-
48
- global _original_showwarning
49
-
50
- if _original_showwarning is None:
51
- _original_showwarning = warnings.showwarning
52
- warnings.showwarning = _showwarning
53
-
54
-
55
- def deprecated(
56
- *,
57
- reason: str,
58
- replacement: Optional[str],
59
- gone_in: Optional[str],
60
- feature_flag: Optional[str] = None,
61
- issue: Optional[int] = None,
62
- ) -> None:
63
- """Helper to deprecate existing functionality.
64
-
65
- reason:
66
- Textual reason shown to the user about why this functionality has
67
- been deprecated. Should be a complete sentence.
68
- replacement:
69
- Textual suggestion shown to the user about what alternative
70
- functionality they can use.
71
- gone_in:
72
- The version of pip does this functionality should get removed in.
73
- Raises an error if pip's current version is greater than or equal to
74
- this.
75
- feature_flag:
76
- Command-line flag of the form --use-feature={feature_flag} for testing
77
- upcoming functionality.
78
- issue:
79
- Issue number on the tracker that would serve as a useful place for
80
- users to find related discussion and provide feedback.
81
- """
82
-
83
- # Determine whether or not the feature is already gone in this version.
84
- is_gone = gone_in is not None and parse(current_version) >= parse(gone_in)
85
-
86
- message_parts = [
87
- (reason, f"{DEPRECATION_MSG_PREFIX}{{}}"),
88
- (
89
- gone_in,
90
- "pip {} will enforce this behaviour change."
91
- if not is_gone
92
- else "Since pip {}, this is no longer supported.",
93
- ),
94
- (
95
- replacement,
96
- "A possible replacement is {}.",
97
- ),
98
- (
99
- feature_flag,
100
- "You can use the flag --use-feature={} to test the upcoming behaviour."
101
- if not is_gone
102
- else None,
103
- ),
104
- (
105
- issue,
106
- "Discussion can be found at https://github.com/pypa/pip/issues/{}",
107
- ),
108
- ]
109
-
110
- message = " ".join(
111
- format_str.format(value)
112
- for value, format_str in message_parts
113
- if format_str is not None and value is not None
114
- )
115
-
116
- # Raise as an error if this behaviour is deprecated.
117
- if is_gone:
118
- raise PipDeprecationWarning(message)
119
-
120
- warnings.warn(message, category=PipDeprecationWarning, stacklevel=2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py DELETED
@@ -1,130 +0,0 @@
1
- """
2
- NTLM authenticating pool, contributed by erikcederstran
3
-
4
- Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
5
- """
6
- from __future__ import absolute_import
7
-
8
- import warnings
9
- from logging import getLogger
10
-
11
- from ntlm import ntlm
12
-
13
- from .. import HTTPSConnectionPool
14
- from ..packages.six.moves.http_client import HTTPSConnection
15
-
16
- warnings.warn(
17
- "The 'urllib3.contrib.ntlmpool' module is deprecated and will be removed "
18
- "in urllib3 v2.0 release, urllib3 is not able to support it properly due "
19
- "to reasons listed in issue: https://github.com/urllib3/urllib3/issues/2282. "
20
- "If you are a user of this module please comment in the mentioned issue.",
21
- DeprecationWarning,
22
- )
23
-
24
- log = getLogger(__name__)
25
-
26
-
27
- class NTLMConnectionPool(HTTPSConnectionPool):
28
- """
29
- Implements an NTLM authentication version of an urllib3 connection pool
30
- """
31
-
32
- scheme = "https"
33
-
34
- def __init__(self, user, pw, authurl, *args, **kwargs):
35
- """
36
- authurl is a random URL on the server that is protected by NTLM.
37
- user is the Windows user, probably in the DOMAIN\\username format.
38
- pw is the password for the user.
39
- """
40
- super(NTLMConnectionPool, self).__init__(*args, **kwargs)
41
- self.authurl = authurl
42
- self.rawuser = user
43
- user_parts = user.split("\\", 1)
44
- self.domain = user_parts[0].upper()
45
- self.user = user_parts[1]
46
- self.pw = pw
47
-
48
- def _new_conn(self):
49
- # Performs the NTLM handshake that secures the connection. The socket
50
- # must be kept open while requests are performed.
51
- self.num_connections += 1
52
- log.debug(
53
- "Starting NTLM HTTPS connection no. %d: https://%s%s",
54
- self.num_connections,
55
- self.host,
56
- self.authurl,
57
- )
58
-
59
- headers = {"Connection": "Keep-Alive"}
60
- req_header = "Authorization"
61
- resp_header = "www-authenticate"
62
-
63
- conn = HTTPSConnection(host=self.host, port=self.port)
64
-
65
- # Send negotiation message
66
- headers[req_header] = "NTLM %s" % ntlm.create_NTLM_NEGOTIATE_MESSAGE(
67
- self.rawuser
68
- )
69
- log.debug("Request headers: %s", headers)
70
- conn.request("GET", self.authurl, None, headers)
71
- res = conn.getresponse()
72
- reshdr = dict(res.headers)
73
- log.debug("Response status: %s %s", res.status, res.reason)
74
- log.debug("Response headers: %s", reshdr)
75
- log.debug("Response data: %s [...]", res.read(100))
76
-
77
- # Remove the reference to the socket, so that it can not be closed by
78
- # the response object (we want to keep the socket open)
79
- res.fp = None
80
-
81
- # Server should respond with a challenge message
82
- auth_header_values = reshdr[resp_header].split(", ")
83
- auth_header_value = None
84
- for s in auth_header_values:
85
- if s[:5] == "NTLM ":
86
- auth_header_value = s[5:]
87
- if auth_header_value is None:
88
- raise Exception(
89
- "Unexpected %s response header: %s" % (resp_header, reshdr[resp_header])
90
- )
91
-
92
- # Send authentication message
93
- ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE(
94
- auth_header_value
95
- )
96
- auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(
97
- ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags
98
- )
99
- headers[req_header] = "NTLM %s" % auth_msg
100
- log.debug("Request headers: %s", headers)
101
- conn.request("GET", self.authurl, None, headers)
102
- res = conn.getresponse()
103
- log.debug("Response status: %s %s", res.status, res.reason)
104
- log.debug("Response headers: %s", dict(res.headers))
105
- log.debug("Response data: %s [...]", res.read()[:100])
106
- if res.status != 200:
107
- if res.status == 401:
108
- raise Exception("Server rejected request: wrong username or password")
109
- raise Exception("Wrong server response: %s %s" % (res.status, res.reason))
110
-
111
- res.fp = None
112
- log.debug("Connection established")
113
- return conn
114
-
115
- def urlopen(
116
- self,
117
- method,
118
- url,
119
- body=None,
120
- headers=None,
121
- retries=3,
122
- redirect=True,
123
- assert_same_host=True,
124
- ):
125
- if headers is None:
126
- headers = {}
127
- headers["Connection"] = "Keep-Alive"
128
- return super(NTLMConnectionPool, self).urlopen(
129
- method, url, body, headers, retries, redirect, assert_same_host
130
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/six.py DELETED
@@ -1,998 +0,0 @@
1
- # Copyright (c) 2010-2020 Benjamin Peterson
2
- #
3
- # Permission is hereby granted, free of charge, to any person obtaining a copy
4
- # of this software and associated documentation files (the "Software"), to deal
5
- # in the Software without restriction, including without limitation the rights
6
- # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
- # copies of the Software, and to permit persons to whom the Software is
8
- # furnished to do so, subject to the following conditions:
9
- #
10
- # The above copyright notice and this permission notice shall be included in all
11
- # copies or substantial portions of the Software.
12
- #
13
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
- # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
- # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
- # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
- # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
- # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19
- # SOFTWARE.
20
-
21
- """Utilities for writing code that runs on Python 2 and 3"""
22
-
23
- from __future__ import absolute_import
24
-
25
- import functools
26
- import itertools
27
- import operator
28
- import sys
29
- import types
30
-
31
- __author__ = "Benjamin Peterson <[email protected]>"
32
- __version__ = "1.16.0"
33
-
34
-
35
- # Useful for very coarse version differentiation.
36
- PY2 = sys.version_info[0] == 2
37
- PY3 = sys.version_info[0] == 3
38
- PY34 = sys.version_info[0:2] >= (3, 4)
39
-
40
- if PY3:
41
- string_types = str,
42
- integer_types = int,
43
- class_types = type,
44
- text_type = str
45
- binary_type = bytes
46
-
47
- MAXSIZE = sys.maxsize
48
- else:
49
- string_types = basestring,
50
- integer_types = (int, long)
51
- class_types = (type, types.ClassType)
52
- text_type = unicode
53
- binary_type = str
54
-
55
- if sys.platform.startswith("java"):
56
- # Jython always uses 32 bits.
57
- MAXSIZE = int((1 << 31) - 1)
58
- else:
59
- # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
60
- class X(object):
61
-
62
- def __len__(self):
63
- return 1 << 31
64
- try:
65
- len(X())
66
- except OverflowError:
67
- # 32-bit
68
- MAXSIZE = int((1 << 31) - 1)
69
- else:
70
- # 64-bit
71
- MAXSIZE = int((1 << 63) - 1)
72
- del X
73
-
74
- if PY34:
75
- from importlib.util import spec_from_loader
76
- else:
77
- spec_from_loader = None
78
-
79
-
80
- def _add_doc(func, doc):
81
- """Add documentation to a function."""
82
- func.__doc__ = doc
83
-
84
-
85
- def _import_module(name):
86
- """Import module, returning the module after the last dot."""
87
- __import__(name)
88
- return sys.modules[name]
89
-
90
-
91
- class _LazyDescr(object):
92
-
93
- def __init__(self, name):
94
- self.name = name
95
-
96
- def __get__(self, obj, tp):
97
- result = self._resolve()
98
- setattr(obj, self.name, result) # Invokes __set__.
99
- try:
100
- # This is a bit ugly, but it avoids running this again by
101
- # removing this descriptor.
102
- delattr(obj.__class__, self.name)
103
- except AttributeError:
104
- pass
105
- return result
106
-
107
-
108
- class MovedModule(_LazyDescr):
109
-
110
- def __init__(self, name, old, new=None):
111
- super(MovedModule, self).__init__(name)
112
- if PY3:
113
- if new is None:
114
- new = name
115
- self.mod = new
116
- else:
117
- self.mod = old
118
-
119
- def _resolve(self):
120
- return _import_module(self.mod)
121
-
122
- def __getattr__(self, attr):
123
- _module = self._resolve()
124
- value = getattr(_module, attr)
125
- setattr(self, attr, value)
126
- return value
127
-
128
-
129
- class _LazyModule(types.ModuleType):
130
-
131
- def __init__(self, name):
132
- super(_LazyModule, self).__init__(name)
133
- self.__doc__ = self.__class__.__doc__
134
-
135
- def __dir__(self):
136
- attrs = ["__doc__", "__name__"]
137
- attrs += [attr.name for attr in self._moved_attributes]
138
- return attrs
139
-
140
- # Subclasses should override this
141
- _moved_attributes = []
142
-
143
-
144
- class MovedAttribute(_LazyDescr):
145
-
146
- def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
147
- super(MovedAttribute, self).__init__(name)
148
- if PY3:
149
- if new_mod is None:
150
- new_mod = name
151
- self.mod = new_mod
152
- if new_attr is None:
153
- if old_attr is None:
154
- new_attr = name
155
- else:
156
- new_attr = old_attr
157
- self.attr = new_attr
158
- else:
159
- self.mod = old_mod
160
- if old_attr is None:
161
- old_attr = name
162
- self.attr = old_attr
163
-
164
- def _resolve(self):
165
- module = _import_module(self.mod)
166
- return getattr(module, self.attr)
167
-
168
-
169
- class _SixMetaPathImporter(object):
170
-
171
- """
172
- A meta path importer to import six.moves and its submodules.
173
-
174
- This class implements a PEP302 finder and loader. It should be compatible
175
- with Python 2.5 and all existing versions of Python3
176
- """
177
-
178
- def __init__(self, six_module_name):
179
- self.name = six_module_name
180
- self.known_modules = {}
181
-
182
- def _add_module(self, mod, *fullnames):
183
- for fullname in fullnames:
184
- self.known_modules[self.name + "." + fullname] = mod
185
-
186
- def _get_module(self, fullname):
187
- return self.known_modules[self.name + "." + fullname]
188
-
189
- def find_module(self, fullname, path=None):
190
- if fullname in self.known_modules:
191
- return self
192
- return None
193
-
194
- def find_spec(self, fullname, path, target=None):
195
- if fullname in self.known_modules:
196
- return spec_from_loader(fullname, self)
197
- return None
198
-
199
- def __get_module(self, fullname):
200
- try:
201
- return self.known_modules[fullname]
202
- except KeyError:
203
- raise ImportError("This loader does not know module " + fullname)
204
-
205
- def load_module(self, fullname):
206
- try:
207
- # in case of a reload
208
- return sys.modules[fullname]
209
- except KeyError:
210
- pass
211
- mod = self.__get_module(fullname)
212
- if isinstance(mod, MovedModule):
213
- mod = mod._resolve()
214
- else:
215
- mod.__loader__ = self
216
- sys.modules[fullname] = mod
217
- return mod
218
-
219
- def is_package(self, fullname):
220
- """
221
- Return true, if the named module is a package.
222
-
223
- We need this method to get correct spec objects with
224
- Python 3.4 (see PEP451)
225
- """
226
- return hasattr(self.__get_module(fullname), "__path__")
227
-
228
- def get_code(self, fullname):
229
- """Return None
230
-
231
- Required, if is_package is implemented"""
232
- self.__get_module(fullname) # eventually raises ImportError
233
- return None
234
- get_source = get_code # same as get_code
235
-
236
- def create_module(self, spec):
237
- return self.load_module(spec.name)
238
-
239
- def exec_module(self, module):
240
- pass
241
-
242
- _importer = _SixMetaPathImporter(__name__)
243
-
244
-
245
- class _MovedItems(_LazyModule):
246
-
247
- """Lazy loading of moved objects"""
248
- __path__ = [] # mark as package
249
-
250
-
251
- _moved_attributes = [
252
- MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
253
- MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
254
- MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
255
- MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
256
- MovedAttribute("intern", "__builtin__", "sys"),
257
- MovedAttribute("map", "itertools", "builtins", "imap", "map"),
258
- MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
259
- MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
260
- MovedAttribute("getoutput", "commands", "subprocess"),
261
- MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
262
- MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
263
- MovedAttribute("reduce", "__builtin__", "functools"),
264
- MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
265
- MovedAttribute("StringIO", "StringIO", "io"),
266
- MovedAttribute("UserDict", "UserDict", "collections"),
267
- MovedAttribute("UserList", "UserList", "collections"),
268
- MovedAttribute("UserString", "UserString", "collections"),
269
- MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
270
- MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
271
- MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
272
- MovedModule("builtins", "__builtin__"),
273
- MovedModule("configparser", "ConfigParser"),
274
- MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
275
- MovedModule("copyreg", "copy_reg"),
276
- MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
277
- MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
278
- MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
279
- MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
280
- MovedModule("http_cookies", "Cookie", "http.cookies"),
281
- MovedModule("html_entities", "htmlentitydefs", "html.entities"),
282
- MovedModule("html_parser", "HTMLParser", "html.parser"),
283
- MovedModule("http_client", "httplib", "http.client"),
284
- MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
285
- MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
286
- MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
287
- MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
288
- MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
289
- MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
290
- MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
291
- MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
292
- MovedModule("cPickle", "cPickle", "pickle"),
293
- MovedModule("queue", "Queue"),
294
- MovedModule("reprlib", "repr"),
295
- MovedModule("socketserver", "SocketServer"),
296
- MovedModule("_thread", "thread", "_thread"),
297
- MovedModule("tkinter", "Tkinter"),
298
- MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
299
- MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
300
- MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
301
- MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
302
- MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
303
- MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
304
- MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
305
- MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
306
- MovedModule("tkinter_colorchooser", "tkColorChooser",
307
- "tkinter.colorchooser"),
308
- MovedModule("tkinter_commondialog", "tkCommonDialog",
309
- "tkinter.commondialog"),
310
- MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
311
- MovedModule("tkinter_font", "tkFont", "tkinter.font"),
312
- MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
313
- MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
314
- "tkinter.simpledialog"),
315
- MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
316
- MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
317
- MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
318
- MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
319
- MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
320
- MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
321
- ]
322
- # Add windows specific modules.
323
- if sys.platform == "win32":
324
- _moved_attributes += [
325
- MovedModule("winreg", "_winreg"),
326
- ]
327
-
328
- for attr in _moved_attributes:
329
- setattr(_MovedItems, attr.name, attr)
330
- if isinstance(attr, MovedModule):
331
- _importer._add_module(attr, "moves." + attr.name)
332
- del attr
333
-
334
- _MovedItems._moved_attributes = _moved_attributes
335
-
336
- moves = _MovedItems(__name__ + ".moves")
337
- _importer._add_module(moves, "moves")
338
-
339
-
340
- class Module_six_moves_urllib_parse(_LazyModule):
341
-
342
- """Lazy loading of moved objects in six.moves.urllib_parse"""
343
-
344
-
345
- _urllib_parse_moved_attributes = [
346
- MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
347
- MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
348
- MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
349
- MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
350
- MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
351
- MovedAttribute("urljoin", "urlparse", "urllib.parse"),
352
- MovedAttribute("urlparse", "urlparse", "urllib.parse"),
353
- MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
354
- MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
355
- MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
356
- MovedAttribute("quote", "urllib", "urllib.parse"),
357
- MovedAttribute("quote_plus", "urllib", "urllib.parse"),
358
- MovedAttribute("unquote", "urllib", "urllib.parse"),
359
- MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
360
- MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
361
- MovedAttribute("urlencode", "urllib", "urllib.parse"),
362
- MovedAttribute("splitquery", "urllib", "urllib.parse"),
363
- MovedAttribute("splittag", "urllib", "urllib.parse"),
364
- MovedAttribute("splituser", "urllib", "urllib.parse"),
365
- MovedAttribute("splitvalue", "urllib", "urllib.parse"),
366
- MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
367
- MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
368
- MovedAttribute("uses_params", "urlparse", "urllib.parse"),
369
- MovedAttribute("uses_query", "urlparse", "urllib.parse"),
370
- MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
371
- ]
372
- for attr in _urllib_parse_moved_attributes:
373
- setattr(Module_six_moves_urllib_parse, attr.name, attr)
374
- del attr
375
-
376
- Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
377
-
378
- _importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
379
- "moves.urllib_parse", "moves.urllib.parse")
380
-
381
-
382
- class Module_six_moves_urllib_error(_LazyModule):
383
-
384
- """Lazy loading of moved objects in six.moves.urllib_error"""
385
-
386
-
387
- _urllib_error_moved_attributes = [
388
- MovedAttribute("URLError", "urllib2", "urllib.error"),
389
- MovedAttribute("HTTPError", "urllib2", "urllib.error"),
390
- MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
391
- ]
392
- for attr in _urllib_error_moved_attributes:
393
- setattr(Module_six_moves_urllib_error, attr.name, attr)
394
- del attr
395
-
396
- Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
397
-
398
- _importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
399
- "moves.urllib_error", "moves.urllib.error")
400
-
401
-
402
- class Module_six_moves_urllib_request(_LazyModule):
403
-
404
- """Lazy loading of moved objects in six.moves.urllib_request"""
405
-
406
-
407
- _urllib_request_moved_attributes = [
408
- MovedAttribute("urlopen", "urllib2", "urllib.request"),
409
- MovedAttribute("install_opener", "urllib2", "urllib.request"),
410
- MovedAttribute("build_opener", "urllib2", "urllib.request"),
411
- MovedAttribute("pathname2url", "urllib", "urllib.request"),
412
- MovedAttribute("url2pathname", "urllib", "urllib.request"),
413
- MovedAttribute("getproxies", "urllib", "urllib.request"),
414
- MovedAttribute("Request", "urllib2", "urllib.request"),
415
- MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
416
- MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
417
- MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
418
- MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
419
- MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
420
- MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
421
- MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
422
- MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
423
- MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
424
- MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
425
- MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
426
- MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
427
- MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
428
- MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
429
- MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
430
- MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
431
- MovedAttribute("FileHandler", "urllib2", "urllib.request"),
432
- MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
433
- MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
434
- MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
435
- MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
436
- MovedAttribute("urlretrieve", "urllib", "urllib.request"),
437
- MovedAttribute("urlcleanup", "urllib", "urllib.request"),
438
- MovedAttribute("URLopener", "urllib", "urllib.request"),
439
- MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
440
- MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
441
- MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
442
- MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
443
- ]
444
- for attr in _urllib_request_moved_attributes:
445
- setattr(Module_six_moves_urllib_request, attr.name, attr)
446
- del attr
447
-
448
- Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
449
-
450
- _importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
451
- "moves.urllib_request", "moves.urllib.request")
452
-
453
-
454
- class Module_six_moves_urllib_response(_LazyModule):
455
-
456
- """Lazy loading of moved objects in six.moves.urllib_response"""
457
-
458
-
459
- _urllib_response_moved_attributes = [
460
- MovedAttribute("addbase", "urllib", "urllib.response"),
461
- MovedAttribute("addclosehook", "urllib", "urllib.response"),
462
- MovedAttribute("addinfo", "urllib", "urllib.response"),
463
- MovedAttribute("addinfourl", "urllib", "urllib.response"),
464
- ]
465
- for attr in _urllib_response_moved_attributes:
466
- setattr(Module_six_moves_urllib_response, attr.name, attr)
467
- del attr
468
-
469
- Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
470
-
471
- _importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
472
- "moves.urllib_response", "moves.urllib.response")
473
-
474
-
475
- class Module_six_moves_urllib_robotparser(_LazyModule):
476
-
477
- """Lazy loading of moved objects in six.moves.urllib_robotparser"""
478
-
479
-
480
- _urllib_robotparser_moved_attributes = [
481
- MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
482
- ]
483
- for attr in _urllib_robotparser_moved_attributes:
484
- setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
485
- del attr
486
-
487
- Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
488
-
489
- _importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
490
- "moves.urllib_robotparser", "moves.urllib.robotparser")
491
-
492
-
493
- class Module_six_moves_urllib(types.ModuleType):
494
-
495
- """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
496
- __path__ = [] # mark as package
497
- parse = _importer._get_module("moves.urllib_parse")
498
- error = _importer._get_module("moves.urllib_error")
499
- request = _importer._get_module("moves.urllib_request")
500
- response = _importer._get_module("moves.urllib_response")
501
- robotparser = _importer._get_module("moves.urllib_robotparser")
502
-
503
- def __dir__(self):
504
- return ['parse', 'error', 'request', 'response', 'robotparser']
505
-
506
- _importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
507
- "moves.urllib")
508
-
509
-
510
- def add_move(move):
511
- """Add an item to six.moves."""
512
- setattr(_MovedItems, move.name, move)
513
-
514
-
515
- def remove_move(name):
516
- """Remove item from six.moves."""
517
- try:
518
- delattr(_MovedItems, name)
519
- except AttributeError:
520
- try:
521
- del moves.__dict__[name]
522
- except KeyError:
523
- raise AttributeError("no such move, %r" % (name,))
524
-
525
-
526
- if PY3:
527
- _meth_func = "__func__"
528
- _meth_self = "__self__"
529
-
530
- _func_closure = "__closure__"
531
- _func_code = "__code__"
532
- _func_defaults = "__defaults__"
533
- _func_globals = "__globals__"
534
- else:
535
- _meth_func = "im_func"
536
- _meth_self = "im_self"
537
-
538
- _func_closure = "func_closure"
539
- _func_code = "func_code"
540
- _func_defaults = "func_defaults"
541
- _func_globals = "func_globals"
542
-
543
-
544
- try:
545
- advance_iterator = next
546
- except NameError:
547
- def advance_iterator(it):
548
- return it.next()
549
- next = advance_iterator
550
-
551
-
552
- try:
553
- callable = callable
554
- except NameError:
555
- def callable(obj):
556
- return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
557
-
558
-
559
- if PY3:
560
- def get_unbound_function(unbound):
561
- return unbound
562
-
563
- create_bound_method = types.MethodType
564
-
565
- def create_unbound_method(func, cls):
566
- return func
567
-
568
- Iterator = object
569
- else:
570
- def get_unbound_function(unbound):
571
- return unbound.im_func
572
-
573
- def create_bound_method(func, obj):
574
- return types.MethodType(func, obj, obj.__class__)
575
-
576
- def create_unbound_method(func, cls):
577
- return types.MethodType(func, None, cls)
578
-
579
- class Iterator(object):
580
-
581
- def next(self):
582
- return type(self).__next__(self)
583
-
584
- callable = callable
585
- _add_doc(get_unbound_function,
586
- """Get the function out of a possibly unbound function""")
587
-
588
-
589
- get_method_function = operator.attrgetter(_meth_func)
590
- get_method_self = operator.attrgetter(_meth_self)
591
- get_function_closure = operator.attrgetter(_func_closure)
592
- get_function_code = operator.attrgetter(_func_code)
593
- get_function_defaults = operator.attrgetter(_func_defaults)
594
- get_function_globals = operator.attrgetter(_func_globals)
595
-
596
-
597
- if PY3:
598
- def iterkeys(d, **kw):
599
- return iter(d.keys(**kw))
600
-
601
- def itervalues(d, **kw):
602
- return iter(d.values(**kw))
603
-
604
- def iteritems(d, **kw):
605
- return iter(d.items(**kw))
606
-
607
- def iterlists(d, **kw):
608
- return iter(d.lists(**kw))
609
-
610
- viewkeys = operator.methodcaller("keys")
611
-
612
- viewvalues = operator.methodcaller("values")
613
-
614
- viewitems = operator.methodcaller("items")
615
- else:
616
- def iterkeys(d, **kw):
617
- return d.iterkeys(**kw)
618
-
619
- def itervalues(d, **kw):
620
- return d.itervalues(**kw)
621
-
622
- def iteritems(d, **kw):
623
- return d.iteritems(**kw)
624
-
625
- def iterlists(d, **kw):
626
- return d.iterlists(**kw)
627
-
628
- viewkeys = operator.methodcaller("viewkeys")
629
-
630
- viewvalues = operator.methodcaller("viewvalues")
631
-
632
- viewitems = operator.methodcaller("viewitems")
633
-
634
- _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
635
- _add_doc(itervalues, "Return an iterator over the values of a dictionary.")
636
- _add_doc(iteritems,
637
- "Return an iterator over the (key, value) pairs of a dictionary.")
638
- _add_doc(iterlists,
639
- "Return an iterator over the (key, [values]) pairs of a dictionary.")
640
-
641
-
642
- if PY3:
643
- def b(s):
644
- return s.encode("latin-1")
645
-
646
- def u(s):
647
- return s
648
- unichr = chr
649
- import struct
650
- int2byte = struct.Struct(">B").pack
651
- del struct
652
- byte2int = operator.itemgetter(0)
653
- indexbytes = operator.getitem
654
- iterbytes = iter
655
- import io
656
- StringIO = io.StringIO
657
- BytesIO = io.BytesIO
658
- del io
659
- _assertCountEqual = "assertCountEqual"
660
- if sys.version_info[1] <= 1:
661
- _assertRaisesRegex = "assertRaisesRegexp"
662
- _assertRegex = "assertRegexpMatches"
663
- _assertNotRegex = "assertNotRegexpMatches"
664
- else:
665
- _assertRaisesRegex = "assertRaisesRegex"
666
- _assertRegex = "assertRegex"
667
- _assertNotRegex = "assertNotRegex"
668
- else:
669
- def b(s):
670
- return s
671
- # Workaround for standalone backslash
672
-
673
- def u(s):
674
- return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
675
- unichr = unichr
676
- int2byte = chr
677
-
678
- def byte2int(bs):
679
- return ord(bs[0])
680
-
681
- def indexbytes(buf, i):
682
- return ord(buf[i])
683
- iterbytes = functools.partial(itertools.imap, ord)
684
- import StringIO
685
- StringIO = BytesIO = StringIO.StringIO
686
- _assertCountEqual = "assertItemsEqual"
687
- _assertRaisesRegex = "assertRaisesRegexp"
688
- _assertRegex = "assertRegexpMatches"
689
- _assertNotRegex = "assertNotRegexpMatches"
690
- _add_doc(b, """Byte literal""")
691
- _add_doc(u, """Text literal""")
692
-
693
-
694
- def assertCountEqual(self, *args, **kwargs):
695
- return getattr(self, _assertCountEqual)(*args, **kwargs)
696
-
697
-
698
- def assertRaisesRegex(self, *args, **kwargs):
699
- return getattr(self, _assertRaisesRegex)(*args, **kwargs)
700
-
701
-
702
- def assertRegex(self, *args, **kwargs):
703
- return getattr(self, _assertRegex)(*args, **kwargs)
704
-
705
-
706
- def assertNotRegex(self, *args, **kwargs):
707
- return getattr(self, _assertNotRegex)(*args, **kwargs)
708
-
709
-
710
- if PY3:
711
- exec_ = getattr(moves.builtins, "exec")
712
-
713
- def reraise(tp, value, tb=None):
714
- try:
715
- if value is None:
716
- value = tp()
717
- if value.__traceback__ is not tb:
718
- raise value.with_traceback(tb)
719
- raise value
720
- finally:
721
- value = None
722
- tb = None
723
-
724
- else:
725
- def exec_(_code_, _globs_=None, _locs_=None):
726
- """Execute code in a namespace."""
727
- if _globs_ is None:
728
- frame = sys._getframe(1)
729
- _globs_ = frame.f_globals
730
- if _locs_ is None:
731
- _locs_ = frame.f_locals
732
- del frame
733
- elif _locs_ is None:
734
- _locs_ = _globs_
735
- exec("""exec _code_ in _globs_, _locs_""")
736
-
737
- exec_("""def reraise(tp, value, tb=None):
738
- try:
739
- raise tp, value, tb
740
- finally:
741
- tb = None
742
- """)
743
-
744
-
745
- if sys.version_info[:2] > (3,):
746
- exec_("""def raise_from(value, from_value):
747
- try:
748
- raise value from from_value
749
- finally:
750
- value = None
751
- """)
752
- else:
753
- def raise_from(value, from_value):
754
- raise value
755
-
756
-
757
- print_ = getattr(moves.builtins, "print", None)
758
- if print_ is None:
759
- def print_(*args, **kwargs):
760
- """The new-style print function for Python 2.4 and 2.5."""
761
- fp = kwargs.pop("file", sys.stdout)
762
- if fp is None:
763
- return
764
-
765
- def write(data):
766
- if not isinstance(data, basestring):
767
- data = str(data)
768
- # If the file has an encoding, encode unicode with it.
769
- if (isinstance(fp, file) and
770
- isinstance(data, unicode) and
771
- fp.encoding is not None):
772
- errors = getattr(fp, "errors", None)
773
- if errors is None:
774
- errors = "strict"
775
- data = data.encode(fp.encoding, errors)
776
- fp.write(data)
777
- want_unicode = False
778
- sep = kwargs.pop("sep", None)
779
- if sep is not None:
780
- if isinstance(sep, unicode):
781
- want_unicode = True
782
- elif not isinstance(sep, str):
783
- raise TypeError("sep must be None or a string")
784
- end = kwargs.pop("end", None)
785
- if end is not None:
786
- if isinstance(end, unicode):
787
- want_unicode = True
788
- elif not isinstance(end, str):
789
- raise TypeError("end must be None or a string")
790
- if kwargs:
791
- raise TypeError("invalid keyword arguments to print()")
792
- if not want_unicode:
793
- for arg in args:
794
- if isinstance(arg, unicode):
795
- want_unicode = True
796
- break
797
- if want_unicode:
798
- newline = unicode("\n")
799
- space = unicode(" ")
800
- else:
801
- newline = "\n"
802
- space = " "
803
- if sep is None:
804
- sep = space
805
- if end is None:
806
- end = newline
807
- for i, arg in enumerate(args):
808
- if i:
809
- write(sep)
810
- write(arg)
811
- write(end)
812
- if sys.version_info[:2] < (3, 3):
813
- _print = print_
814
-
815
- def print_(*args, **kwargs):
816
- fp = kwargs.get("file", sys.stdout)
817
- flush = kwargs.pop("flush", False)
818
- _print(*args, **kwargs)
819
- if flush and fp is not None:
820
- fp.flush()
821
-
822
- _add_doc(reraise, """Reraise an exception.""")
823
-
824
- if sys.version_info[0:2] < (3, 4):
825
- # This does exactly the same what the :func:`py3:functools.update_wrapper`
826
- # function does on Python versions after 3.2. It sets the ``__wrapped__``
827
- # attribute on ``wrapper`` object and it doesn't raise an error if any of
828
- # the attributes mentioned in ``assigned`` and ``updated`` are missing on
829
- # ``wrapped`` object.
830
- def _update_wrapper(wrapper, wrapped,
831
- assigned=functools.WRAPPER_ASSIGNMENTS,
832
- updated=functools.WRAPPER_UPDATES):
833
- for attr in assigned:
834
- try:
835
- value = getattr(wrapped, attr)
836
- except AttributeError:
837
- continue
838
- else:
839
- setattr(wrapper, attr, value)
840
- for attr in updated:
841
- getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
842
- wrapper.__wrapped__ = wrapped
843
- return wrapper
844
- _update_wrapper.__doc__ = functools.update_wrapper.__doc__
845
-
846
- def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
847
- updated=functools.WRAPPER_UPDATES):
848
- return functools.partial(_update_wrapper, wrapped=wrapped,
849
- assigned=assigned, updated=updated)
850
- wraps.__doc__ = functools.wraps.__doc__
851
-
852
- else:
853
- wraps = functools.wraps
854
-
855
-
856
- def with_metaclass(meta, *bases):
857
- """Create a base class with a metaclass."""
858
- # This requires a bit of explanation: the basic idea is to make a dummy
859
- # metaclass for one level of class instantiation that replaces itself with
860
- # the actual metaclass.
861
- class metaclass(type):
862
-
863
- def __new__(cls, name, this_bases, d):
864
- if sys.version_info[:2] >= (3, 7):
865
- # This version introduced PEP 560 that requires a bit
866
- # of extra care (we mimic what is done by __build_class__).
867
- resolved_bases = types.resolve_bases(bases)
868
- if resolved_bases is not bases:
869
- d['__orig_bases__'] = bases
870
- else:
871
- resolved_bases = bases
872
- return meta(name, resolved_bases, d)
873
-
874
- @classmethod
875
- def __prepare__(cls, name, this_bases):
876
- return meta.__prepare__(name, bases)
877
- return type.__new__(metaclass, 'temporary_class', (), {})
878
-
879
-
880
- def add_metaclass(metaclass):
881
- """Class decorator for creating a class with a metaclass."""
882
- def wrapper(cls):
883
- orig_vars = cls.__dict__.copy()
884
- slots = orig_vars.get('__slots__')
885
- if slots is not None:
886
- if isinstance(slots, str):
887
- slots = [slots]
888
- for slots_var in slots:
889
- orig_vars.pop(slots_var)
890
- orig_vars.pop('__dict__', None)
891
- orig_vars.pop('__weakref__', None)
892
- if hasattr(cls, '__qualname__'):
893
- orig_vars['__qualname__'] = cls.__qualname__
894
- return metaclass(cls.__name__, cls.__bases__, orig_vars)
895
- return wrapper
896
-
897
-
898
- def ensure_binary(s, encoding='utf-8', errors='strict'):
899
- """Coerce **s** to six.binary_type.
900
-
901
- For Python 2:
902
- - `unicode` -> encoded to `str`
903
- - `str` -> `str`
904
-
905
- For Python 3:
906
- - `str` -> encoded to `bytes`
907
- - `bytes` -> `bytes`
908
- """
909
- if isinstance(s, binary_type):
910
- return s
911
- if isinstance(s, text_type):
912
- return s.encode(encoding, errors)
913
- raise TypeError("not expecting type '%s'" % type(s))
914
-
915
-
916
- def ensure_str(s, encoding='utf-8', errors='strict'):
917
- """Coerce *s* to `str`.
918
-
919
- For Python 2:
920
- - `unicode` -> encoded to `str`
921
- - `str` -> `str`
922
-
923
- For Python 3:
924
- - `str` -> `str`
925
- - `bytes` -> decoded to `str`
926
- """
927
- # Optimization: Fast return for the common case.
928
- if type(s) is str:
929
- return s
930
- if PY2 and isinstance(s, text_type):
931
- return s.encode(encoding, errors)
932
- elif PY3 and isinstance(s, binary_type):
933
- return s.decode(encoding, errors)
934
- elif not isinstance(s, (text_type, binary_type)):
935
- raise TypeError("not expecting type '%s'" % type(s))
936
- return s
937
-
938
-
939
- def ensure_text(s, encoding='utf-8', errors='strict'):
940
- """Coerce *s* to six.text_type.
941
-
942
- For Python 2:
943
- - `unicode` -> `unicode`
944
- - `str` -> `unicode`
945
-
946
- For Python 3:
947
- - `str` -> `str`
948
- - `bytes` -> decoded to `str`
949
- """
950
- if isinstance(s, binary_type):
951
- return s.decode(encoding, errors)
952
- elif isinstance(s, text_type):
953
- return s
954
- else:
955
- raise TypeError("not expecting type '%s'" % type(s))
956
-
957
-
958
- def python_2_unicode_compatible(klass):
959
- """
960
- A class decorator that defines __unicode__ and __str__ methods under Python 2.
961
- Under Python 3 it does nothing.
962
-
963
- To support Python 2 and 3 with a single code base, define a __str__ method
964
- returning text and apply this decorator to the class.
965
- """
966
- if PY2:
967
- if '__str__' not in klass.__dict__:
968
- raise ValueError("@python_2_unicode_compatible cannot be applied "
969
- "to %s because it doesn't define __str__()." %
970
- klass.__name__)
971
- klass.__unicode__ = klass.__str__
972
- klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
973
- return klass
974
-
975
-
976
- # Complete the moves implementation.
977
- # This code is at the end of this module to speed up module loading.
978
- # Turn this module into a package.
979
- __path__ = [] # required for PEP 302 and PEP 451
980
- __package__ = __name__ # see PEP 366 @ReservedAssignment
981
- if globals().get("__spec__") is not None:
982
- __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
983
- # Remove other six meta path importers, since they cause problems. This can
984
- # happen if six is removed from sys.modules and then reloaded. (Setuptools does
985
- # this for some reason.)
986
- if sys.meta_path:
987
- for i, importer in enumerate(sys.meta_path):
988
- # Here's some real nastiness: Another "instance" of the six module might
989
- # be floating around. Therefore, we can't use isinstance() to check for
990
- # the six meta path importer, since the other six instance will have
991
- # inserted an importer with different class.
992
- if (type(importer).__name__ == "_SixMetaPathImporter" and
993
- importer.name == __name__):
994
- del sys.meta_path[i]
995
- break
996
- del i, importer
997
- # Finally, add the importer to the meta path import hook.
998
- sys.meta_path.append(_importer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BilalSardar/Remove_Text_for_Image/app.py DELETED
@@ -1,73 +0,0 @@
1
-
2
- from cv2 import threshold
3
- import keras_ocr
4
- import cv2
5
- import math
6
- import numpy as np
7
- import gradio as gr
8
-
9
- def midpoint(x1, y1, x2, y2):
10
- x_mid = int((x1 + x2)/2)
11
- y_mid = int((y1 + y2)/2)
12
- return (x_mid, y_mid)
13
-
14
- def segment_img(img):
15
- hsv=cv2.cvtColor(img,cv2.COLOR_RGB2HSV)
16
- #mask
17
- mask=cv2.inRange(hsv,(40,25,25),(70,255,255))
18
-
19
- imask=mask>0
20
- threshold=np.zeros_like(img,np.uint8)
21
- threshold[imask]=img[imask]
22
-
23
- return threshold
24
- #Main function that detects text and inpaints.
25
- #Inputs are the image path and kreas_ocr pipeline
26
- def inpaint_text(img_path, pipeline):
27
- # read the image
28
- img = keras_ocr.tools.read(img_path)
29
-
30
- #img=segment_img(img)
31
-
32
-
33
- # Recogize text (and corresponding regions)
34
- # Each list of predictions in prediction_groups is a list of
35
- # (word, box) tuples.
36
- prediction_groups = pipeline.recognize([img])
37
-
38
- #Define the mask for inpainting
39
- mask = np.zeros(img.shape[:2], dtype="uint8")
40
- for box in prediction_groups[0]:
41
- x0, y0 = box[1][0]
42
- x1, y1 = box[1][1]
43
- x2, y2 = box[1][2]
44
- x3, y3 = box[1][3]
45
-
46
- x_mid0, y_mid0 = midpoint(x1, y1, x2, y2)
47
- x_mid1, y_mi1 = midpoint(x0, y0, x3, y3)
48
-
49
- #For the line thickness, we will calculate the length of the line between
50
- #the top-left corner and the bottom-left corner.
51
- thickness = int(math.sqrt( (x2 - x1)**2 + (y2 - y1)**2 ))
52
-
53
- #Define the line and inpaint
54
- cv2.line(mask, (x_mid0, y_mid0), (x_mid1, y_mi1), 255,
55
- thickness)
56
- inpainted_img = cv2.inpaint(img, mask, 7, cv2.INPAINT_NS)
57
-
58
- return (inpainted_img)
59
-
60
- # keras-ocr will automatically download pretrained
61
- # weights for the detector and recognizer.
62
- pipeline = keras_ocr.pipeline.Pipeline()
63
- def RemoveText(image):
64
- img_text_removed = inpaint_text(image, pipeline)
65
- return cv2.cvtColor(img_text_removed, cv2.COLOR_BGR2RGB)
66
-
67
- iface = gr.Interface(fn=RemoveText,
68
- inputs=gr.inputs.Image(label="Image to Remove Text From", type="numpy"),
69
- outputs="image",
70
- examples=[["1.jpg"]],
71
- title="Remove Text for Image")
72
- iface.launch(debug=True)
73
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tools/FindCatch.cmake DELETED
@@ -1,70 +0,0 @@
1
- # - Find the Catch test framework or download it (single header)
2
- #
3
- # This is a quick module for internal use. It assumes that Catch is
4
- # REQUIRED and that a minimum version is provided (not EXACT). If
5
- # a suitable version isn't found locally, the single header file
6
- # will be downloaded and placed in the build dir: PROJECT_BINARY_DIR.
7
- #
8
- # This code sets the following variables:
9
- # CATCH_INCLUDE_DIR - path to catch.hpp
10
- # CATCH_VERSION - version number
11
-
12
- if(NOT Catch_FIND_VERSION)
13
- message(FATAL_ERROR "A version number must be specified.")
14
- elseif(Catch_FIND_REQUIRED)
15
- message(FATAL_ERROR "This module assumes Catch is not required.")
16
- elseif(Catch_FIND_VERSION_EXACT)
17
- message(FATAL_ERROR "Exact version numbers are not supported, only minimum.")
18
- endif()
19
-
20
- # Extract the version number from catch.hpp
21
- function(_get_catch_version)
22
- file(
23
- STRINGS "${CATCH_INCLUDE_DIR}/catch.hpp" version_line
24
- REGEX "Catch v.*"
25
- LIMIT_COUNT 1)
26
- if(version_line MATCHES "Catch v([0-9]+)\\.([0-9]+)\\.([0-9]+)")
27
- set(CATCH_VERSION
28
- "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}"
29
- PARENT_SCOPE)
30
- endif()
31
- endfunction()
32
-
33
- # Download the single-header version of Catch
34
- function(_download_catch version destination_dir)
35
- message(STATUS "Downloading catch v${version}...")
36
- set(url https://github.com/philsquared/Catch/releases/download/v${version}/catch.hpp)
37
- file(DOWNLOAD ${url} "${destination_dir}/catch.hpp" STATUS status)
38
- list(GET status 0 error)
39
- if(error)
40
- message(FATAL_ERROR "Could not download ${url}")
41
- endif()
42
- set(CATCH_INCLUDE_DIR
43
- "${destination_dir}"
44
- CACHE INTERNAL "")
45
- endfunction()
46
-
47
- # Look for catch locally
48
- find_path(
49
- CATCH_INCLUDE_DIR
50
- NAMES catch.hpp
51
- PATH_SUFFIXES catch2)
52
- if(CATCH_INCLUDE_DIR)
53
- _get_catch_version()
54
- endif()
55
-
56
- # Download the header if it wasn't found or if it's outdated
57
- if(NOT CATCH_VERSION OR CATCH_VERSION VERSION_LESS ${Catch_FIND_VERSION})
58
- if(DOWNLOAD_CATCH)
59
- _download_catch(${Catch_FIND_VERSION} "${PROJECT_BINARY_DIR}/catch/")
60
- _get_catch_version()
61
- else()
62
- set(CATCH_FOUND FALSE)
63
- return()
64
- endif()
65
- endif()
66
-
67
- add_library(Catch2::Catch2 IMPORTED INTERFACE)
68
- set_property(TARGET Catch2::Catch2 PROPERTY INTERFACE_INCLUDE_DIRECTORIES "${CATCH_INCLUDE_DIR}")
69
-
70
- set(CATCH_FOUND TRUE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/dependencies/cub/CODE_OF_CONDUCT.md DELETED
@@ -1,59 +0,0 @@
1
- # Contributor Covenant Code of Conduct
2
-
3
- ## Overview
4
-
5
- Define the code of conduct followed and enforced for CUB
6
-
7
- ### Intended audience
8
-
9
- COMMUNITY | DEVELOPERS | PROJECT LEADS
10
-
11
- ## Our Pledge
12
-
13
- In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation.
14
-
15
- ## Our Standards
16
-
17
- Examples of behavior that contributes to creating a positive environment include:
18
-
19
- - Using welcoming and inclusive language
20
- - Being respectful of differing viewpoints and experiences
21
- - Gracefully accepting constructive criticism
22
- - Focusing on what is best for the community
23
- - Showing empathy towards other community members
24
-
25
- Examples of unacceptable behavior by participants include:
26
-
27
- - The use of sexualized language or imagery and unwelcome sexual attention or advances
28
- - Trolling, insulting/derogatory comments, and personal or political attacks
29
- - Public or private harassment
30
- - Publishing others’ private information, such as a physical or electronic address, without explicit permission
31
- - Other conduct which could reasonably be considered inappropriate in a professional setting
32
-
33
- ## Our Responsibilities
34
-
35
- Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
36
-
37
- Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
38
-
39
- ## Scope
40
-
41
- This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
42
-
43
- ## Enforcement
44
-
45
- Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at [[email protected]](mailto:[email protected]) All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
46
-
47
- Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project’s leadership.
48
-
49
- ## Attribution
50
-
51
- This Code of Conduct was taken from the [NVIDIA RAPIDS](https://docs.rapids.ai/resources/conduct/) project, which was adapted from the [Contributor Covenant](https://www.contributor-covenant.org/), version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
52
-
53
- For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq
54
-
55
- ## Contact
56
-
57
- If you need to contact the CUB team, please reach out to one of the following email addresses:
58
59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/iter_swap.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits iter_swap
22
- #include <thrust/system/detail/sequential/iter_swap.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/extrema.h DELETED
@@ -1,44 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a extrema of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // the purpose of this header is to #include the extrema.h header
22
- // of the sequential, host, and device systems. It should be #included in any
23
- // code which uses adl to dispatch extrema
24
-
25
- #include <thrust/system/detail/sequential/extrema.h>
26
-
27
- // SCons can't see through the #defines below to figure out what this header
28
- // includes, so we fake it out by specifying all possible files we might end up
29
- // including inside an #if 0.
30
- #if 0
31
- #include <thrust/system/cpp/detail/extrema.h>
32
- #include <thrust/system/cuda/detail/extrema.h>
33
- #include <thrust/system/omp/detail/extrema.h>
34
- #include <thrust/system/tbb/detail/extrema.h>
35
- #endif
36
-
37
- #define __THRUST_HOST_SYSTEM_EXTREMA_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/extrema.h>
38
- #include __THRUST_HOST_SYSTEM_EXTREMA_HEADER
39
- #undef __THRUST_HOST_SYSTEM_EXTREMA_HEADER
40
-
41
- #define __THRUST_DEVICE_SYSTEM_EXTREMA_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/extrema.h>
42
- #include __THRUST_DEVICE_SYSTEM_EXTREMA_HEADER
43
- #undef __THRUST_DEVICE_SYSTEM_EXTREMA_HEADER
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/engine/defaults.py DELETED
@@ -1,705 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) Facebook, Inc. and its affiliates.
3
-
4
- """
5
- This file contains components with some default boilerplate logic user may need
6
- in training / testing. They will not work for everyone, but many users may find them useful.
7
-
8
- The behavior of functions/classes in this file is subject to change,
9
- since they are meant to represent the "common default behavior" people need in their projects.
10
- """
11
-
12
- import argparse
13
- import logging
14
- import os
15
- import sys
16
- import weakref
17
- from collections import OrderedDict
18
- from typing import Optional
19
- import torch
20
- from fvcore.nn.precise_bn import get_bn_modules
21
- from omegaconf import OmegaConf
22
- from torch.nn.parallel import DistributedDataParallel
23
-
24
- import detectron2.data.transforms as T
25
- from detectron2.checkpoint import DetectionCheckpointer
26
- from detectron2.config import CfgNode, LazyConfig
27
- from detectron2.data import (
28
- MetadataCatalog,
29
- build_detection_test_loader,
30
- build_detection_train_loader,
31
- )
32
- from detectron2.evaluation import (
33
- DatasetEvaluator,
34
- inference_on_dataset,
35
- print_csv_format,
36
- verify_results,
37
- )
38
- from detectron2.modeling import build_model
39
- from detectron2.solver import build_lr_scheduler, build_optimizer
40
- from detectron2.utils import comm
41
- from detectron2.utils.collect_env import collect_env_info
42
- from detectron2.utils.env import seed_all_rng
43
- from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
44
- from detectron2.utils.file_io import PathManager
45
- from detectron2.utils.logger import setup_logger
46
-
47
- from . import hooks
48
- from .train_loop import AMPTrainer, SimpleTrainer, TrainerBase
49
-
50
- __all__ = [
51
- "create_ddp_model",
52
- "default_argument_parser",
53
- "default_setup",
54
- "default_writers",
55
- "DefaultPredictor",
56
- "DefaultTrainer",
57
- ]
58
-
59
-
60
- def create_ddp_model(model, *, fp16_compression=False, **kwargs):
61
- """
62
- Create a DistributedDataParallel model if there are >1 processes.
63
-
64
- Args:
65
- model: a torch.nn.Module
66
- fp16_compression: add fp16 compression hooks to the ddp object.
67
- See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook
68
- kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`.
69
- """ # noqa
70
- if comm.get_world_size() == 1:
71
- return model
72
- if "device_ids" not in kwargs:
73
- kwargs["device_ids"] = [comm.get_local_rank()]
74
- ddp = DistributedDataParallel(model, **kwargs)
75
- if fp16_compression:
76
- from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks
77
-
78
- ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook)
79
- return ddp
80
-
81
-
82
- def default_argument_parser(epilog=None):
83
- """
84
- Create a parser with some common arguments used by detectron2 users.
85
-
86
- Args:
87
- epilog (str): epilog passed to ArgumentParser describing the usage.
88
-
89
- Returns:
90
- argparse.ArgumentParser:
91
- """
92
- parser = argparse.ArgumentParser(
93
- epilog=epilog
94
- or f"""
95
- Examples:
96
-
97
- Run on single machine:
98
- $ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml
99
-
100
- Change some config options:
101
- $ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001
102
-
103
- Run on multiple machines:
104
- (machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags]
105
- (machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags]
106
- """,
107
- formatter_class=argparse.RawDescriptionHelpFormatter,
108
- )
109
- parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
110
- parser.add_argument(
111
- "--resume",
112
- action="store_true",
113
- help="Whether to attempt to resume from the checkpoint directory. "
114
- "See documentation of `DefaultTrainer.resume_or_load()` for what it means.",
115
- )
116
- parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
117
- parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
118
- parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
119
- parser.add_argument(
120
- "--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
121
- )
122
-
123
- # PyTorch still may leave orphan processes in multi-gpu training.
124
- # Therefore we use a deterministic way to obtain port,
125
- # so that users are aware of orphan processes by seeing the port occupied.
126
- port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
127
- parser.add_argument(
128
- "--dist-url",
129
- default="tcp://127.0.0.1:{}".format(port),
130
- help="initialization URL for pytorch distributed backend. See "
131
- "https://pytorch.org/docs/stable/distributed.html for details.",
132
- )
133
- parser.add_argument(
134
- "opts",
135
- help="Modify config options by adding 'KEY VALUE' pairs at the end of the command. "
136
- "See config references at "
137
- "https://detectron2.readthedocs.io/modules/config.html#config-references",
138
- default=None,
139
- nargs=argparse.REMAINDER,
140
- )
141
- return parser
142
-
143
-
144
- def _try_get_key(cfg, *keys, default=None):
145
- """
146
- Try select keys from cfg until the first key that exists. Otherwise return default.
147
- """
148
- if isinstance(cfg, CfgNode):
149
- cfg = OmegaConf.create(cfg.dump())
150
- for k in keys:
151
- parts = k.split(".")
152
- # https://github.com/omry/omegaconf/issues/674
153
- for p in parts:
154
- if p not in cfg:
155
- break
156
- cfg = OmegaConf.select(cfg, p)
157
- else:
158
- return cfg
159
- return default
160
-
161
-
162
- def _highlight(code, filename):
163
- try:
164
- import pygments
165
- except ImportError:
166
- return code
167
-
168
- from pygments.lexers import Python3Lexer, YamlLexer
169
- from pygments.formatters import Terminal256Formatter
170
-
171
- lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer()
172
- code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai"))
173
- return code
174
-
175
-
176
- def default_setup(cfg, args):
177
- """
178
- Perform some basic common setups at the beginning of a job, including:
179
-
180
- 1. Set up the detectron2 logger
181
- 2. Log basic information about environment, cmdline arguments, and config
182
- 3. Backup the config to the output directory
183
-
184
- Args:
185
- cfg (CfgNode or omegaconf.DictConfig): the full config to be used
186
- args (argparse.NameSpace): the command line arguments to be logged
187
- """
188
- output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir")
189
- if comm.is_main_process() and output_dir:
190
- PathManager.mkdirs(output_dir)
191
-
192
- rank = comm.get_rank()
193
- setup_logger(output_dir, distributed_rank=rank, name="fvcore")
194
- logger = setup_logger(output_dir, distributed_rank=rank)
195
-
196
- logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size()))
197
- logger.info("Environment info:\n" + collect_env_info())
198
-
199
- logger.info("Command line arguments: " + str(args))
200
- if hasattr(args, "config_file") and args.config_file != "":
201
- logger.info(
202
- "Contents of args.config_file={}:\n{}".format(
203
- args.config_file,
204
- _highlight(PathManager.open(args.config_file, "r").read(), args.config_file),
205
- )
206
- )
207
-
208
- if comm.is_main_process() and output_dir:
209
- # Note: some of our scripts may expect the existence of
210
- # config.yaml in output directory
211
- path = os.path.join(output_dir, "config.yaml")
212
- if isinstance(cfg, CfgNode):
213
- logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml")))
214
- with PathManager.open(path, "w") as f:
215
- f.write(cfg.dump())
216
- else:
217
- LazyConfig.save(cfg, path)
218
- logger.info("Full config saved to {}".format(path))
219
-
220
- # make sure each worker has a different, yet deterministic seed if specified
221
- seed = _try_get_key(cfg, "SEED", "train.seed", default=-1)
222
- seed_all_rng(None if seed < 0 else seed + rank)
223
-
224
- # cudnn benchmark has large overhead. It shouldn't be used considering the small size of
225
- # typical validation set.
226
- if not (hasattr(args, "eval_only") and args.eval_only):
227
- torch.backends.cudnn.benchmark = _try_get_key(
228
- cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False
229
- )
230
-
231
-
232
- def default_writers(output_dir: str, max_iter: Optional[int] = None):
233
- """
234
- Build a list of :class:`EventWriter` to be used.
235
- It now consists of a :class:`CommonMetricPrinter`,
236
- :class:`TensorboardXWriter` and :class:`JSONWriter`.
237
-
238
- Args:
239
- output_dir: directory to store JSON metrics and tensorboard events
240
- max_iter: the total number of iterations
241
-
242
- Returns:
243
- list[EventWriter]: a list of :class:`EventWriter` objects.
244
- """
245
- return [
246
- # It may not always print what you want to see, since it prints "common" metrics only.
247
- CommonMetricPrinter(max_iter),
248
- JSONWriter(os.path.join(output_dir, "metrics.json")),
249
- TensorboardXWriter(output_dir),
250
- ]
251
-
252
-
253
- class DefaultPredictor:
254
- """
255
- Create a simple end-to-end predictor with the given config that runs on
256
- single device for a single input image.
257
-
258
- Compared to using the model directly, this class does the following additions:
259
-
260
- 1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
261
- 2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
262
- 3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
263
- 4. Take one input image and produce a single output, instead of a batch.
264
-
265
- This is meant for simple demo purposes, so it does the above steps automatically.
266
- This is not meant for benchmarks or running complicated inference logic.
267
- If you'd like to do anything more fancy, please refer to its source code as examples
268
- to build and use the model manually.
269
-
270
- Attributes:
271
- metadata (Metadata): the metadata of the underlying dataset, obtained from
272
- cfg.DATASETS.TEST.
273
-
274
- Examples:
275
- ::
276
- pred = DefaultPredictor(cfg)
277
- inputs = cv2.imread("input.jpg")
278
- outputs = pred(inputs)
279
- """
280
-
281
- def __init__(self, cfg):
282
- self.cfg = cfg.clone() # cfg can be modified by model
283
- self.model = build_model(self.cfg)
284
- self.model.eval()
285
- if len(cfg.DATASETS.TEST):
286
- self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
287
-
288
- checkpointer = DetectionCheckpointer(self.model)
289
- checkpointer.load(cfg.MODEL.WEIGHTS)
290
-
291
- self.aug = T.ResizeShortestEdge(
292
- [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
293
- )
294
-
295
- self.input_format = cfg.INPUT.FORMAT
296
- assert self.input_format in ["RGB", "BGR"], self.input_format
297
-
298
- def __call__(self, original_image):
299
- """
300
- Args:
301
- original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
302
-
303
- Returns:
304
- predictions (dict):
305
- the output of the model for one image only.
306
- See :doc:`/tutorials/models` for details about the format.
307
- """
308
- with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
309
- # Apply pre-processing to image.
310
- if self.input_format == "RGB":
311
- # whether the model expects BGR inputs or RGB
312
- original_image = original_image[:, :, ::-1]
313
- height, width = original_image.shape[:2]
314
- image = self.aug.get_transform(original_image).apply_image(original_image)
315
- image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
316
-
317
- inputs = {"image": image, "height": height, "width": width}
318
- predictions = self.model([inputs])[0]
319
- return predictions
320
-
321
-
322
- class DefaultTrainer(TrainerBase):
323
- """
324
- A trainer with default training logic. It does the following:
325
-
326
- 1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader
327
- defined by the given config. Create a LR scheduler defined by the config.
328
- 2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when
329
- `resume_or_load` is called.
330
- 3. Register a few common hooks defined by the config.
331
-
332
- It is created to simplify the **standard model training workflow** and reduce code boilerplate
333
- for users who only need the standard training workflow, with standard features.
334
- It means this class makes *many assumptions* about your training logic that
335
- may easily become invalid in a new research. In fact, any assumptions beyond those made in the
336
- :class:`SimpleTrainer` are too much for research.
337
-
338
- The code of this class has been annotated about restrictive assumptions it makes.
339
- When they do not work for you, you're encouraged to:
340
-
341
- 1. Overwrite methods of this class, OR:
342
- 2. Use :class:`SimpleTrainer`, which only does minimal SGD training and
343
- nothing else. You can then add your own hooks if needed. OR:
344
- 3. Write your own training loop similar to `tools/plain_train_net.py`.
345
-
346
- See the :doc:`/tutorials/training` tutorials for more details.
347
-
348
- Note that the behavior of this class, like other functions/classes in
349
- this file, is not stable, since it is meant to represent the "common default behavior".
350
- It is only guaranteed to work well with the standard models and training workflow in detectron2.
351
- To obtain more stable behavior, write your own training logic with other public APIs.
352
-
353
- Examples:
354
- ::
355
- trainer = DefaultTrainer(cfg)
356
- trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS
357
- trainer.train()
358
-
359
- Attributes:
360
- scheduler:
361
- checkpointer (DetectionCheckpointer):
362
- cfg (CfgNode):
363
- """
364
-
365
- def __init__(self, cfg):
366
- """
367
- Args:
368
- cfg (CfgNode):
369
- """
370
- super().__init__()
371
- logger = logging.getLogger("detectron2")
372
- if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2
373
- setup_logger()
374
- cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
375
-
376
- # Assume these objects must be constructed in this order.
377
- model = self.build_model(cfg)
378
- optimizer = self.build_optimizer(cfg, model)
379
- data_loader = self.build_train_loader(cfg)
380
-
381
- model = create_ddp_model(model, broadcast_buffers=False)
382
- self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(
383
- model, data_loader, optimizer
384
- )
385
-
386
- self.scheduler = self.build_lr_scheduler(cfg, optimizer)
387
- self.checkpointer = DetectionCheckpointer(
388
- # Assume you want to save checkpoints together with logs/statistics
389
- model,
390
- cfg.OUTPUT_DIR,
391
- trainer=weakref.proxy(self),
392
- )
393
- # load 2nd pretrained model
394
- if cfg.MODEL.META_ARCHITECTURE in ['CLIPRCNN', 'CLIPFastRCNN', 'PretrainFastRCNN'] \
395
- and cfg.MODEL.CLIP.BB_RPN_WEIGHTS is not None\
396
- and cfg.MODEL.CLIP.CROP_REGION_TYPE == 'RPN': # load 2nd pretrained model
397
- self.second_checkpointer = DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR, bb_rpn_weights=True)
398
- else:
399
- self.second_checkpointer = None
400
- self.start_iter = 0
401
- self.max_iter = cfg.SOLVER.MAX_ITER
402
- self.cfg = cfg
403
-
404
- self.register_hooks(self.build_hooks())
405
-
406
- def resume_or_load(self, resume=True):
407
- """
408
- If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
409
- a `last_checkpoint` file), resume from the file. Resuming means loading all
410
- available states (eg. optimizer and scheduler) and update iteration counter
411
- from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
412
-
413
- Otherwise, this is considered as an independent training. The method will load model
414
- weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
415
- from iteration 0.
416
-
417
- Args:
418
- resume (bool): whether to do resume or not
419
- """
420
- self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)
421
- if self.second_checkpointer:
422
- self.second_checkpointer.resume_or_load(
423
- self.cfg.MODEL.CLIP.BB_RPN_WEIGHTS, resume=False
424
- )
425
- if resume and self.checkpointer.has_checkpoint():
426
- # The checkpoint stores the training iteration that just finished, thus we start
427
- # at the next iteration
428
- self.start_iter = self.iter + 1
429
-
430
- def build_hooks(self):
431
- """
432
- Build a list of default hooks, including timing, evaluation,
433
- checkpointing, lr scheduling, precise BN, writing events.
434
-
435
- Returns:
436
- list[HookBase]:
437
- """
438
- cfg = self.cfg.clone()
439
- cfg.defrost()
440
- cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
441
-
442
- ret = [
443
- hooks.IterationTimer(),
444
- hooks.LRScheduler(),
445
- hooks.PreciseBN(
446
- # Run at the same freq as (but before) evaluation.
447
- cfg.TEST.EVAL_PERIOD,
448
- self.model,
449
- # Build a new data loader to not affect training
450
- self.build_train_loader(cfg),
451
- cfg.TEST.PRECISE_BN.NUM_ITER,
452
- )
453
- if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
454
- else None,
455
- ]
456
-
457
- # Do PreciseBN before checkpointer, because it updates the model and need to
458
- # be saved by checkpointer.
459
- # This is not always the best: if checkpointing has a different frequency,
460
- # some checkpoints may have more precise statistics than others.
461
- if comm.is_main_process():
462
- ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD))
463
-
464
- def test_and_save_results():
465
- self._last_eval_results = self.test(self.cfg, self.model)
466
- return self._last_eval_results
467
-
468
- # Do evaluation after checkpointer, because then if it fails,
469
- # we can use the saved checkpoint to debug.
470
- ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
471
-
472
- if comm.is_main_process():
473
- # Here the default print/log frequency of each writer is used.
474
- # run writers in the end, so that evaluation metrics are written
475
- ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
476
- return ret
477
-
478
- def build_writers(self):
479
- """
480
- Build a list of writers to be used using :func:`default_writers()`.
481
- If you'd like a different list of writers, you can overwrite it in
482
- your trainer.
483
-
484
- Returns:
485
- list[EventWriter]: a list of :class:`EventWriter` objects.
486
- """
487
- return default_writers(self.cfg.OUTPUT_DIR, self.max_iter)
488
-
489
- def train(self):
490
- """
491
- Run training.
492
-
493
- Returns:
494
- OrderedDict of results, if evaluation is enabled. Otherwise None.
495
- """
496
- super().train(self.start_iter, self.max_iter)
497
- if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process():
498
- assert hasattr(
499
- self, "_last_eval_results"
500
- ), "No evaluation results obtained during training!"
501
- verify_results(self.cfg, self._last_eval_results)
502
- return self._last_eval_results
503
-
504
- def run_step(self):
505
- self._trainer.iter = self.iter
506
- self._trainer.run_step()
507
-
508
- @classmethod
509
- def build_model(cls, cfg):
510
- """
511
- Returns:
512
- torch.nn.Module:
513
-
514
- It now calls :func:`detectron2.modeling.build_model`.
515
- Overwrite it if you'd like a different model.
516
- """
517
- model = build_model(cfg)
518
- logger = logging.getLogger(__name__)
519
- logger.info("Model:\n{}".format(model))
520
- return model
521
-
522
- @classmethod
523
- def build_optimizer(cls, cfg, model):
524
- """
525
- Returns:
526
- torch.optim.Optimizer:
527
-
528
- It now calls :func:`detectron2.solver.build_optimizer`.
529
- Overwrite it if you'd like a different optimizer.
530
- """
531
- return build_optimizer(cfg, model)
532
-
533
- @classmethod
534
- def build_lr_scheduler(cls, cfg, optimizer):
535
- """
536
- It now calls :func:`detectron2.solver.build_lr_scheduler`.
537
- Overwrite it if you'd like a different scheduler.
538
- """
539
- return build_lr_scheduler(cfg, optimizer)
540
-
541
- @classmethod
542
- def build_train_loader(cls, cfg):
543
- """
544
- Returns:
545
- iterable
546
-
547
- It now calls :func:`detectron2.data.build_detection_train_loader`.
548
- Overwrite it if you'd like a different data loader.
549
- """
550
- return build_detection_train_loader(cfg)
551
-
552
- @classmethod
553
- def build_test_loader(cls, cfg, dataset_name):
554
- """
555
- Returns:
556
- iterable
557
-
558
- It now calls :func:`detectron2.data.build_detection_test_loader`.
559
- Overwrite it if you'd like a different data loader.
560
- """
561
- return build_detection_test_loader(cfg, dataset_name)
562
-
563
- @classmethod
564
- def build_evaluator(cls, cfg, dataset_name):
565
- """
566
- Returns:
567
- DatasetEvaluator or None
568
-
569
- It is not implemented by default.
570
- """
571
- raise NotImplementedError(
572
- """
573
- If you want DefaultTrainer to automatically run evaluation,
574
- please implement `build_evaluator()` in subclasses (see train_net.py for example).
575
- Alternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example).
576
- """
577
- )
578
-
579
- @classmethod
580
- def test(cls, cfg, model, queries, evaluators=None):
581
- """
582
- Args:
583
- cfg (CfgNode):
584
- model (nn.Module):
585
- evaluators (list[DatasetEvaluator] or None): if None, will call
586
- :meth:`build_evaluator`. Otherwise, must have the same length as
587
- ``cfg.DATASETS.TEST``.
588
-
589
- Returns:
590
- dict: a dict of result metrics
591
- """
592
- logger = logging.getLogger(__name__)
593
- if isinstance(evaluators, DatasetEvaluator):
594
- evaluators = [evaluators]
595
- if evaluators is not None:
596
- assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
597
- len(cfg.DATASETS.TEST), len(evaluators)
598
- )
599
-
600
- results = OrderedDict()
601
- for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
602
- data_loader = cls.build_test_loader(cfg, dataset_name)
603
- # When evaluators are passed in as arguments,
604
- # implicitly assume that evaluators can be created before data_loader.
605
- if evaluators is not None:
606
- evaluator = evaluators[idx]
607
- results_i = inference_on_dataset(model, data_loader, queries, None)
608
- results[dataset_name] = results_i
609
- if comm.is_main_process():
610
- assert isinstance(
611
- results_i, dict
612
- ), "Evaluator must return a dict on the main process. Got {} instead.".format(
613
- results_i
614
- )
615
- logger.info("Evaluation results for {} in csv format:".format(dataset_name))
616
- print_csv_format(results_i)
617
-
618
- if len(results) == 1:
619
- results = list(results.values())[0]
620
- return results
621
-
622
- @staticmethod
623
- def auto_scale_workers(cfg, num_workers: int):
624
- """
625
- When the config is defined for certain number of workers (according to
626
- ``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of
627
- workers currently in use, returns a new cfg where the total batch size
628
- is scaled so that the per-GPU batch size stays the same as the
629
- original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``.
630
-
631
- Other config options are also scaled accordingly:
632
- * training steps and warmup steps are scaled inverse proportionally.
633
- * learning rate are scaled proportionally, following :paper:`ImageNet in 1h`.
634
-
635
- For example, with the original config like the following:
636
-
637
- .. code-block:: yaml
638
-
639
- IMS_PER_BATCH: 16
640
- BASE_LR: 0.1
641
- REFERENCE_WORLD_SIZE: 8
642
- MAX_ITER: 5000
643
- STEPS: (4000,)
644
- CHECKPOINT_PERIOD: 1000
645
-
646
- When this config is used on 16 GPUs instead of the reference number 8,
647
- calling this method will return a new config with:
648
-
649
- .. code-block:: yaml
650
-
651
- IMS_PER_BATCH: 32
652
- BASE_LR: 0.2
653
- REFERENCE_WORLD_SIZE: 16
654
- MAX_ITER: 2500
655
- STEPS: (2000,)
656
- CHECKPOINT_PERIOD: 500
657
-
658
- Note that both the original config and this new config can be trained on 16 GPUs.
659
- It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``).
660
-
661
- Returns:
662
- CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``.
663
- """
664
- old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE
665
- if old_world_size == 0 or old_world_size == num_workers:
666
- return cfg
667
- cfg = cfg.clone()
668
- frozen = cfg.is_frozen()
669
- cfg.defrost()
670
-
671
- assert (
672
- cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0
673
- ), "Invalid REFERENCE_WORLD_SIZE in config!"
674
- scale = num_workers / old_world_size
675
- bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale))
676
- lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale
677
- max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale))
678
- warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale))
679
- cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS)
680
- cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale))
681
- cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale))
682
- cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant
683
- logger = logging.getLogger(__name__)
684
- logger.info(
685
- f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, "
686
- f"max_iter={max_iter}, warmup={warmup_iter}."
687
- )
688
-
689
- if frozen:
690
- cfg.freeze()
691
- return cfg
692
-
693
-
694
- # Access basic attributes from the underlying trainer
695
- for _attr in ["model", "data_loader", "optimizer"]:
696
- setattr(
697
- DefaultTrainer,
698
- _attr,
699
- property(
700
- # getter
701
- lambda self, x=_attr: getattr(self._trainer, x),
702
- # setter
703
- lambda self, value, x=_attr: setattr(self._trainer, x, value),
704
- ),
705
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/kaleidoscope/__init__.py DELETED
@@ -1,56 +0,0 @@
1
- import math
2
- from typing import List
3
-
4
- from pil_utils import BuildImage
5
- from pydantic import Field
6
-
7
- from meme_generator import MemeArgsModel, MemeArgsParser, MemeArgsType, add_meme
8
- from meme_generator.utils import make_jpg_or_gif
9
-
10
- help = "是否将图片变为圆形"
11
-
12
- parser = MemeArgsParser(prefix_chars="-/")
13
- parser.add_argument("--circle", "/圆", action="store_true", help=help)
14
-
15
-
16
- class Model(MemeArgsModel):
17
- circle: bool = Field(False, description=help)
18
-
19
-
20
- def kaleidoscope(images: List[BuildImage], texts, args: Model):
21
- def make(img: BuildImage) -> BuildImage:
22
- circle_num = 10
23
- img_per_circle = 4
24
- init_angle = 0
25
- angle_step = 360 / img_per_circle
26
- radius = lambda n: n * 50 + 100
27
- cx = cy = radius(circle_num)
28
-
29
- img = img.convert("RGBA")
30
- frame = BuildImage.new("RGBA", (cx * 2, cy * 2), "white")
31
- for i in range(circle_num):
32
- r = radius(i)
33
- img_w = i * 35 + 100
34
- im = img.resize_width(img_w)
35
- if args.circle:
36
- im = im.circle()
37
- for j in range(img_per_circle):
38
- angle = init_angle + angle_step * j
39
- im_rot = im.rotate(angle - 90, expand=True)
40
- x = round(cx + r * math.cos(math.radians(angle)) - im_rot.width / 2)
41
- y = round(cy - r * math.sin(math.radians(angle)) - im_rot.height / 2)
42
- frame.paste(im_rot, (x, y), alpha=True)
43
- init_angle += angle_step / 2
44
- return frame
45
-
46
- return make_jpg_or_gif(images[0], make)
47
-
48
-
49
- add_meme(
50
- "kaleidoscope",
51
- kaleidoscope,
52
- min_images=1,
53
- max_images=1,
54
- args_type=MemeArgsType(parser, Model, [Model(circle=False), Model(circle=True)]),
55
- keywords=["万花筒", "万花镜"],
56
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/my_abi/modules/backbone.py DELETED
@@ -1,36 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from fastai.vision import *
4
-
5
- from modules.model import _default_tfmer_cfg
6
- from modules.resnet import resnet45
7
- from modules.transformer import (PositionalEncoding,
8
- TransformerEncoder,
9
- TransformerEncoderLayer)
10
-
11
-
12
- class ResTranformer(nn.Module):
13
- def __init__(self, config):
14
- super().__init__()
15
- self.resnet = resnet45()
16
-
17
- self.d_model = ifnone(config.model_vision_d_model, _default_tfmer_cfg['d_model'])
18
- nhead = ifnone(config.model_vision_nhead, _default_tfmer_cfg['nhead'])
19
- d_inner = ifnone(config.model_vision_d_inner, _default_tfmer_cfg['d_inner'])
20
- dropout = ifnone(config.model_vision_dropout, _default_tfmer_cfg['dropout'])
21
- activation = ifnone(config.model_vision_activation, _default_tfmer_cfg['activation'])
22
- num_layers = ifnone(config.model_vision_backbone_ln, 2)
23
-
24
- self.pos_encoder = PositionalEncoding(self.d_model, max_len=8*32)
25
- encoder_layer = TransformerEncoderLayer(d_model=self.d_model, nhead=nhead,
26
- dim_feedforward=d_inner, dropout=dropout, activation=activation)
27
- self.transformer = TransformerEncoder(encoder_layer, num_layers)
28
-
29
- def forward(self, images):
30
- feature = self.resnet(images)
31
- n, c, h, w = feature.shape
32
- feature = feature.view(n, c, -1).permute(2, 0, 1)
33
- feature = self.pos_encoder(feature)
34
- feature = self.transformer(feature)
35
- feature = feature.permute(1, 2, 0).view(n, c, h, w)
36
- return feature
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/log.py DELETED
@@ -1,8 +0,0 @@
1
- import logging
2
-
3
- access_logger = logging.getLogger("aiohttp.access")
4
- client_logger = logging.getLogger("aiohttp.client")
5
- internal_logger = logging.getLogger("aiohttp.internal")
6
- server_logger = logging.getLogger("aiohttp.server")
7
- web_logger = logging.getLogger("aiohttp.web")
8
- ws_logger = logging.getLogger("aiohttp.websocket")
 
 
 
 
 
 
 
 
 
spaces/DReAMy-lib/dream_II/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: DReAM v. II
3
- emoji: 😴🗣🤖📊
4
- colorFrom: indigo
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.19.1
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datasculptor/StyleGAN-NADA/e4e/scripts/train.py DELETED
@@ -1,88 +0,0 @@
1
- """
2
- This file runs the main training/val loop
3
- """
4
- import os
5
- import json
6
- import math
7
- import sys
8
- import pprint
9
- import torch
10
- from argparse import Namespace
11
-
12
- sys.path.append(".")
13
- sys.path.append("..")
14
-
15
- from options.train_options import TrainOptions
16
- from training.coach import Coach
17
-
18
-
19
- def main():
20
- opts = TrainOptions().parse()
21
- previous_train_ckpt = None
22
- if opts.resume_training_from_ckpt:
23
- opts, previous_train_ckpt = load_train_checkpoint(opts)
24
- else:
25
- setup_progressive_steps(opts)
26
- create_initial_experiment_dir(opts)
27
-
28
- coach = Coach(opts, previous_train_ckpt)
29
- coach.train()
30
-
31
-
32
- def load_train_checkpoint(opts):
33
- train_ckpt_path = opts.resume_training_from_ckpt
34
- previous_train_ckpt = torch.load(opts.resume_training_from_ckpt, map_location='cpu')
35
- new_opts_dict = vars(opts)
36
- opts = previous_train_ckpt['opts']
37
- opts['resume_training_from_ckpt'] = train_ckpt_path
38
- update_new_configs(opts, new_opts_dict)
39
- pprint.pprint(opts)
40
- opts = Namespace(**opts)
41
- if opts.sub_exp_dir is not None:
42
- sub_exp_dir = opts.sub_exp_dir
43
- opts.exp_dir = os.path.join(opts.exp_dir, sub_exp_dir)
44
- create_initial_experiment_dir(opts)
45
- return opts, previous_train_ckpt
46
-
47
-
48
- def setup_progressive_steps(opts):
49
- log_size = int(math.log(opts.stylegan_size, 2))
50
- num_style_layers = 2*log_size - 2
51
- num_deltas = num_style_layers - 1
52
- if opts.progressive_start is not None: # If progressive delta training
53
- opts.progressive_steps = [0]
54
- next_progressive_step = opts.progressive_start
55
- for i in range(num_deltas):
56
- opts.progressive_steps.append(next_progressive_step)
57
- next_progressive_step += opts.progressive_step_every
58
-
59
- assert opts.progressive_steps is None or is_valid_progressive_steps(opts, num_style_layers), \
60
- "Invalid progressive training input"
61
-
62
-
63
- def is_valid_progressive_steps(opts, num_style_layers):
64
- return len(opts.progressive_steps) == num_style_layers and opts.progressive_steps[0] == 0
65
-
66
-
67
- def create_initial_experiment_dir(opts):
68
- if os.path.exists(opts.exp_dir):
69
- raise Exception('Oops... {} already exists'.format(opts.exp_dir))
70
- os.makedirs(opts.exp_dir)
71
-
72
- opts_dict = vars(opts)
73
- pprint.pprint(opts_dict)
74
- with open(os.path.join(opts.exp_dir, 'opt.json'), 'w') as f:
75
- json.dump(opts_dict, f, indent=4, sort_keys=True)
76
-
77
-
78
- def update_new_configs(ckpt_opts, new_opts):
79
- for k, v in new_opts.items():
80
- if k not in ckpt_opts:
81
- ckpt_opts[k] = v
82
- if new_opts['update_param_list']:
83
- for param in new_opts['update_param_list']:
84
- ckpt_opts[param] = new_opts[param]
85
-
86
-
87
- if __name__ == '__main__':
88
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Demosthene-OR/avr23-cds-translation/tabs/modelisation_seq2seq_tab.py DELETED
@@ -1,479 +0,0 @@
1
- import streamlit as st
2
- import pandas as pd
3
- import numpy as np
4
- import os
5
- from sacrebleu import corpus_bleu
6
- from transformers import pipeline
7
- from translate import Translator
8
- from audio_recorder_streamlit import audio_recorder
9
- import speech_recognition as sr
10
- import whisper
11
- import io
12
- # import wave
13
- import wavio
14
- from filesplit.merge import Merge
15
- import tensorflow as tf
16
- import string
17
- import re
18
- from tensorflow import keras
19
- from tensorflow.keras import layers
20
- from keras_nlp.layers import TransformerEncoder
21
- from tensorflow.keras.utils import plot_model
22
- from PIL import Image
23
- from gtts import gTTS
24
- from extra_streamlit_components import tab_bar, TabBarItemData
25
-
26
-
27
- title = "Traduction Sequence à Sequence"
28
- sidebar_name = "Traduction Seq2Seq"
29
-
30
- # !pip install transformers
31
- # !pip install sentencepiece
32
-
33
- @st.cache_data
34
- def load_corpus(path):
35
- input_file = os.path.join(path)
36
- with open(input_file, "r", encoding="utf-8") as f:
37
- data = f.read()
38
- data = data.split('\n')
39
- data=data[:-1]
40
- return pd.DataFrame(data)
41
-
42
- # ===== Keras ====
43
- strip_chars = string.punctuation + "¿"
44
- strip_chars = strip_chars.replace("[", "")
45
- strip_chars = strip_chars.replace("]", "")
46
-
47
- def custom_standardization(input_string):
48
- lowercase = tf.strings.lower(input_string)
49
- lowercase=tf.strings.regex_replace(lowercase, "[à]", "a")
50
- return tf.strings.regex_replace(
51
- lowercase, f"[{re.escape(strip_chars)}]", "")
52
-
53
- def load_vocab(file_path):
54
- with open(file_path, "r", encoding="utf-8") as file:
55
- return file.read().split('\n')[:-1]
56
-
57
-
58
- def decode_sequence_rnn(input_sentence, src, tgt):
59
- global translation_model
60
-
61
- vocab_size = 15000
62
- sequence_length = 50
63
-
64
- source_vectorization = layers.TextVectorization(
65
- max_tokens=vocab_size,
66
- output_mode="int",
67
- output_sequence_length=sequence_length,
68
- standardize=custom_standardization,
69
- vocabulary = load_vocab("data/vocab_"+src+".txt"),
70
- )
71
-
72
- target_vectorization = layers.TextVectorization(
73
- max_tokens=vocab_size,
74
- output_mode="int",
75
- output_sequence_length=sequence_length + 1,
76
- standardize=custom_standardization,
77
- vocabulary = load_vocab("data/vocab_"+tgt+".txt"),
78
- )
79
-
80
- tgt_vocab = target_vectorization.get_vocabulary()
81
- tgt_index_lookup = dict(zip(range(len(tgt_vocab)), tgt_vocab))
82
- max_decoded_sentence_length = 50
83
- tokenized_input_sentence = source_vectorization([input_sentence])
84
- decoded_sentence = "[start]"
85
- for i in range(max_decoded_sentence_length):
86
- tokenized_target_sentence = target_vectorization([decoded_sentence])
87
- next_token_predictions = translation_model.predict(
88
- [tokenized_input_sentence, tokenized_target_sentence], verbose=0)
89
- sampled_token_index = np.argmax(next_token_predictions[0, i, :])
90
- sampled_token = tgt_index_lookup[sampled_token_index]
91
- decoded_sentence += " " + sampled_token
92
- if sampled_token == "[end]":
93
- break
94
- return decoded_sentence[8:-6]
95
-
96
- # ===== Enf of Keras ====
97
-
98
- # ===== Transformer section ====
99
-
100
- class TransformerDecoder(layers.Layer):
101
- def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
102
- super().__init__(**kwargs)
103
- self.embed_dim = embed_dim
104
- self.dense_dim = dense_dim
105
- self.num_heads = num_heads
106
- self.attention_1 = layers.MultiHeadAttention(
107
- num_heads=num_heads, key_dim=embed_dim)
108
- self.attention_2 = layers.MultiHeadAttention(
109
- num_heads=num_heads, key_dim=embed_dim)
110
- self.dense_proj = keras.Sequential(
111
- [layers.Dense(dense_dim, activation="relu"),
112
- layers.Dense(embed_dim),]
113
- )
114
- self.layernorm_1 = layers.LayerNormalization()
115
- self.layernorm_2 = layers.LayerNormalization()
116
- self.layernorm_3 = layers.LayerNormalization()
117
- self.supports_masking = True
118
-
119
- def get_config(self):
120
- config = super().get_config()
121
- config.update({
122
- "embed_dim": self.embed_dim,
123
- "num_heads": self.num_heads,
124
- "dense_dim": self.dense_dim,
125
- })
126
- return config
127
-
128
- def get_causal_attention_mask(self, inputs):
129
- input_shape = tf.shape(inputs)
130
- batch_size, sequence_length = input_shape[0], input_shape[1]
131
- i = tf.range(sequence_length)[:, tf.newaxis]
132
- j = tf.range(sequence_length)
133
- mask = tf.cast(i >= j, dtype="int32")
134
- mask = tf.reshape(mask, (1, input_shape[1], input_shape[1]))
135
- mult = tf.concat(
136
- [tf.expand_dims(batch_size, -1),
137
- tf.constant([1, 1], dtype=tf.int32)], axis=0)
138
- return tf.tile(mask, mult)
139
-
140
- def call(self, inputs, encoder_outputs, mask=None):
141
- causal_mask = self.get_causal_attention_mask(inputs)
142
- if mask is not None:
143
- padding_mask = tf.cast(
144
- mask[:, tf.newaxis, :], dtype="int32")
145
- padding_mask = tf.minimum(padding_mask, causal_mask)
146
- else:
147
- padding_mask = mask
148
- attention_output_1 = self.attention_1(
149
- query=inputs,
150
- value=inputs,
151
- key=inputs,
152
- attention_mask=causal_mask)
153
- attention_output_1 = self.layernorm_1(inputs + attention_output_1)
154
- attention_output_2 = self.attention_2(
155
- query=attention_output_1,
156
- value=encoder_outputs,
157
- key=encoder_outputs,
158
- attention_mask=padding_mask,
159
- )
160
- attention_output_2 = self.layernorm_2(
161
- attention_output_1 + attention_output_2)
162
- proj_output = self.dense_proj(attention_output_2)
163
- return self.layernorm_3(attention_output_2 + proj_output)
164
-
165
- class PositionalEmbedding(layers.Layer):
166
- def __init__(self, sequence_length, input_dim, output_dim, **kwargs):
167
- super().__init__(**kwargs)
168
- self.token_embeddings = layers.Embedding(
169
- input_dim=input_dim, output_dim=output_dim)
170
- self.position_embeddings = layers.Embedding(
171
- input_dim=sequence_length, output_dim=output_dim)
172
- self.sequence_length = sequence_length
173
- self.input_dim = input_dim
174
- self.output_dim = output_dim
175
-
176
- def call(self, inputs):
177
- length = tf.shape(inputs)[-1]
178
- positions = tf.range(start=0, limit=length, delta=1)
179
- embedded_tokens = self.token_embeddings(inputs)
180
- embedded_positions = self.position_embeddings(positions)
181
- return embedded_tokens + embedded_positions
182
-
183
- def compute_mask(self, inputs, mask=None):
184
- return tf.math.not_equal(inputs, 0)
185
-
186
- def get_config(self):
187
- config = super(PositionalEmbedding, self).get_config()
188
- config.update({
189
- "output_dim": self.output_dim,
190
- "sequence_length": self.sequence_length,
191
- "input_dim": self.input_dim,
192
- })
193
- return config
194
-
195
- def compute_mask(self, inputs, mask=None):
196
- return tf.math.not_equal(inputs, 0)
197
-
198
- def get_config(self):
199
- config = super(PositionalEmbedding, self).get_config()
200
- config.update({
201
- "output_dim": self.output_dim,
202
- "sequence_length": self.sequence_length,
203
- "input_dim": self.input_dim,
204
- })
205
- return config
206
-
207
- def decode_sequence_tranf(input_sentence, src, tgt):
208
- global translation_model
209
-
210
- vocab_size = 15000
211
- sequence_length = 30
212
-
213
- source_vectorization = layers.TextVectorization(
214
- max_tokens=vocab_size,
215
- output_mode="int",
216
- output_sequence_length=sequence_length,
217
- standardize=custom_standardization,
218
- vocabulary = load_vocab("data/vocab_"+src+".txt"),
219
- )
220
-
221
- target_vectorization = layers.TextVectorization(
222
- max_tokens=vocab_size,
223
- output_mode="int",
224
- output_sequence_length=sequence_length + 1,
225
- standardize=custom_standardization,
226
- vocabulary = load_vocab("data/vocab_"+tgt+".txt"),
227
- )
228
-
229
- tgt_vocab = target_vectorization.get_vocabulary()
230
- tgt_index_lookup = dict(zip(range(len(tgt_vocab)), tgt_vocab))
231
- max_decoded_sentence_length = 50
232
- tokenized_input_sentence = source_vectorization([input_sentence])
233
- decoded_sentence = "[start]"
234
- for i in range(max_decoded_sentence_length):
235
- tokenized_target_sentence = target_vectorization(
236
- [decoded_sentence])[:, :-1]
237
- predictions = translation_model(
238
- [tokenized_input_sentence, tokenized_target_sentence])
239
- sampled_token_index = np.argmax(predictions[0, i, :])
240
- sampled_token = tgt_index_lookup[sampled_token_index]
241
- decoded_sentence += " " + sampled_token
242
- if sampled_token == "[end]":
243
- break
244
- return decoded_sentence[8:-6]
245
-
246
- # ==== End Transforformer section ====
247
-
248
- @st.cache_resource
249
- def load_all_data():
250
- df_data_en = load_corpus('data/preprocess_txt_en')
251
- df_data_fr = load_corpus('data/preprocess_txt_fr')
252
- lang_classifier = pipeline('text-classification',model="papluca/xlm-roberta-base-language-detection")
253
- translation_en_fr = pipeline('translation_en_to_fr', model="t5-base")
254
- translation_fr_en = pipeline('translation_fr_to_en', model="Helsinki-NLP/opus-mt-fr-en")
255
- model_speech = whisper.load_model("base")
256
-
257
- merge = Merge( "data/rnn_en-fr_split", "data", "seq2seq_rnn-model-en-fr.h5").merge(cleanup=False)
258
- merge = Merge( "data/rnn_fr-en_split", "data", "seq2seq_rnn-model-fr-en.h5").merge(cleanup=False)
259
- rnn_en_fr = keras.models.load_model("data/seq2seq_rnn-model-en-fr.h5", compile=False)
260
- rnn_fr_en = keras.models.load_model("data/seq2seq_rnn-model-fr-en.h5", compile=False)
261
- rnn_en_fr.compile(optimizer="rmsprop", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
262
- rnn_fr_en.compile(optimizer="rmsprop", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
263
-
264
- custom_objects = {"TransformerDecoder": TransformerDecoder, "PositionalEmbedding": PositionalEmbedding}
265
- with keras.saving.custom_object_scope(custom_objects):
266
- transformer_en_fr = keras.models.load_model( "data/transformer-model-en-fr.h5")
267
- transformer_fr_en = keras.models.load_model( "data/transformer-model-fr-en.h5")
268
- transformer_en_fr.load_weights("data/transformer-model-en-fr.weights.h5")
269
- transformer_fr_en.load_weights("data/transformer-model-fr-en.weights.h5")
270
- transformer_en_fr.compile(optimizer="rmsprop", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
271
- transformer_fr_en.compile(optimizer="rmsprop", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
272
-
273
- return df_data_en, df_data_fr, translation_en_fr, translation_fr_en, lang_classifier, model_speech, rnn_en_fr, rnn_fr_en, transformer_en_fr, transformer_fr_en
274
-
275
- n1 = 0
276
- df_data_en, df_data_fr, translation_en_fr, translation_fr_en, lang_classifier, model_speech, rnn_en_fr, rnn_fr_en, transformer_en_fr, transformer_fr_en = load_all_data()
277
-
278
-
279
- def display_translation(n1, Lang,model_type):
280
- global df_data_src, df_data_tgt, placeholder
281
-
282
- placeholder = st.empty()
283
- with st.status(":sunglasses:", expanded=True):
284
- s = df_data_src.iloc[n1:n1+5][0].tolist()
285
- s_trad = []
286
- s_trad_ref = df_data_tgt.iloc[n1:n1+5][0].tolist()
287
- source = Lang[:2]
288
- target = Lang[-2:]
289
- for i in range(5):
290
- if model_type==1:
291
- s_trad.append(decode_sequence_rnn(s[i], source, target))
292
- else:
293
- s_trad.append(decode_sequence_tranf(s[i], source, target))
294
- st.write("**"+source+" :** :blue["+ s[i]+"]")
295
- st.write("**"+target+" :** "+s_trad[-1])
296
- st.write("**ref. :** "+s_trad_ref[i])
297
- st.write("")
298
- with placeholder:
299
- st.write("<p style='text-align:center;background-color:red; color:white')>Score Bleu = "+str(int(round(corpus_bleu(s_trad,[s_trad_ref]).score,0)))+"%</p>", \
300
- unsafe_allow_html=True)
301
-
302
- @st.cache_data
303
- def find_lang_label(lang_sel):
304
- global lang_tgt, label_lang
305
- return label_lang[lang_tgt.index(lang_sel)]
306
-
307
- def run():
308
-
309
- global n1, df_data_src, df_data_tgt, translation_model, placeholder, model_speech
310
- global df_data_en, df_data_fr, lang_classifier, translation_en_fr, translation_fr_en
311
- global lang_tgt, label_lang
312
-
313
- st.title(title)
314
- #
315
- st.write("## **Explications :**\n")
316
-
317
- st.markdown(
318
- """
319
- Enfin, nous avons réalisé une traduction :red[**Seq2Seq**] ("Sequence-to-Sequence") avec des :red[**réseaux neuronaux**].
320
- La traduction Seq2Seq est une méthode d'apprentissage automatique qui permet de traduire des séquences de texte d'une langue à une autre en utilisant
321
- un :red[**encodeur**] pour capturer le sens du texte source, un :red[**décodeur**] pour générer la traduction, et un :red[**vecteur de contexte**] pour relier les deux parties du modèle.
322
- """
323
- )
324
-
325
- lang_tgt = ['en','fr','ab','aa','af','ak','sq','de','am','en','ar','an','hy','as','av','ae','ay','az','ba','bm','eu','bn','bi','be','bh','my','bs','br','bg','ks','ca','ch','ny','zh','si','ko','kw','co','ht','cr','hr','da','dz','gd','es','eo','et','ee','fo','fj','fi','fr','fy','gl','cy','lg','ka','el','kl','gn','gu','ha','he','hz','hi','ho','hu','io','ig','id','ia','iu','ik','ga','is','it','ja','jv','kn','kr','kk','km','kg','ki','rw','ky','rn','kv','kj','ku','lo','la','lv','li','ln','lt','lu','lb','mk','ms','ml','dv','mg','mt','gv','mi','mr','mh','mo','mn','na','nv','ng','nl','ne','no','nb','nn','nr','ie','oc','oj','or','om','os','ug','ur','uz','ps','pi','pa','fa','ff','pl','pt','qu','rm','ro','ru','se','sm','sg','sa','sc','sr','sh','sn','nd','sd','sk','sl','so','st','su','sv','sw','ss','tg','tl','ty','ta','tt','cs','ce','cv','te','th','bo','ti','to','ts','tn','tr','tk','tw','uk','ve','vi','cu','vo','wa','wo','xh','ii','yi','yo','za','zu']
326
- label_lang = ['Anglais','Français','Abkhaze','Afar','Afrikaans','Akan','Albanais','Allemand','Amharique','Anglais','Arabe','Aragonais','Arménien','Assamais','Avar','Avestique','Aymara','Azéri','Bachkir','Bambara','Basque','Bengali','Bichelamar','Biélorusse','Bihari','Birman','Bosnien','Breton','Bulgare','Cachemiri','Catalan','Chamorro','Chichewa','Chinois','Cingalais','Coréen','Cornique','Corse','Créolehaïtien','Cri','Croate','Danois','Dzongkha','Écossais','Espagnol','Espéranto','Estonien','Ewe','Féroïen','Fidjien','Finnois','Français','Frisonoccidental','Galicien','Gallois','Ganda','Géorgien','Grecmoderne','Groenlandais','Guarani','Gujarati','Haoussa','Hébreu','Héréro','Hindi','Hirimotu','Hongrois','Ido','Igbo','Indonésien','Interlingua','Inuktitut','Inupiak','Irlandais','Islandais','Italien','Japonais','Javanais','Kannada','Kanouri','Kazakh','Khmer','Kikongo','Kikuyu','Kinyarwanda','Kirghiz','Kirundi','Komi','Kuanyama','Kurde','Lao','Latin','Letton','Limbourgeois','Lingala','Lituanien','Luba','Luxembourgeois','Macédonien','Malais','Malayalam','Maldivien','Malgache','Maltais','Mannois','MaorideNouvelle-Zélande','Marathi','Marshallais','Moldave','Mongol','Nauruan','Navajo','Ndonga','Néerlandais','Népalais','Norvégien','Norvégienbokmål','Norvégiennynorsk','Nrebele','Occidental','Occitan','Ojibwé','Oriya','Oromo','Ossète','Ouïghour','Ourdou','Ouzbek','Pachto','Pali','Pendjabi','Persan','Peul','Polonais','Portugais','Quechua','Romanche','Roumain','Russe','SameduNord','Samoan','Sango','Sanskrit','Sarde','Serbe','Serbo-croate','Shona','Sindebele','Sindhi','Slovaque','Slovène','Somali','SothoduSud','Soundanais','Suédois','Swahili','Swati','Tadjik','Tagalog','Tahitien','Tamoul','Tatar','Tchèque','Tchétchène','Tchouvache','Télougou','Thaï','Tibétain','Tigrigna','Tongien','Tsonga','Tswana','Turc','Turkmène','Twi','Ukrainien','Venda','Vietnamien','Vieux-slave','Volapük','Wallon','Wolof','Xhosa','Yi','Yiddish','Yoruba','Zhuang','Zoulou']
327
- lang_src = {'ar': 'arabic', 'bg': 'bulgarian', 'de': 'german', 'el':'modern greek', 'en': 'english', 'es': 'spanish', 'fr': 'french', \
328
- 'hi': 'hindi', 'it': 'italian', 'ja': 'japanese', 'nl': 'dutch', 'pl': 'polish', 'pt': 'portuguese', 'ru': 'russian', 'sw': 'swahili', \
329
- 'th': 'thai', 'tr': 'turkish', 'ur': 'urdu', 'vi': 'vietnamese', 'zh': 'chinese'}
330
- st.write("## **Paramètres :**\n")
331
-
332
- st.write("#### Choisissez le type de traduction:")
333
- # tab1, tab2, tab3 = st.tabs(["small vocab avec Keras et un GRU","Phrases à saisir", "Phrases à dicter"])
334
- chosen_id = tab_bar(data=[
335
- TabBarItemData(id="tab1", title="small vocab", description="avec Keras et un GRU"),
336
- TabBarItemData(id="tab2", title="small vocab", description="avec Keras et un Transformer"),
337
- TabBarItemData(id="tab3", title="Phrase personnelle", description="à saisir"),
338
- TabBarItemData(id="tab4", title="Phrase personnelle", description="à dicter")],
339
- default="tab1")
340
-
341
- if (chosen_id == "tab1") or (chosen_id == "tab2") :
342
- TabContainerHolder = st.container()
343
- Sens = TabContainerHolder.radio('Sens de la traduction:',('Anglais -> Français','Français -> Anglais'), horizontal=True)
344
- Lang = ('en_fr' if Sens=='Anglais -> Français' else 'fr_en')
345
-
346
- if (Lang=='en_fr'):
347
- df_data_src = df_data_en
348
- df_data_tgt = df_data_fr
349
- if (chosen_id == "tab1"):
350
- translation_model = rnn_en_fr
351
- else:
352
- translation_model = transformer_en_fr
353
- else:
354
- df_data_src = df_data_fr
355
- df_data_tgt = df_data_en
356
- if (chosen_id == "tab1"):
357
- translation_model = rnn_fr_en
358
- else:
359
- translation_model = transformer_fr_en
360
-
361
- st.write("<center><h5>Architecture du modèle utilisé:</h5>", unsafe_allow_html=True)
362
- plot_model(translation_model, show_shapes=True, show_layer_names=True, show_layer_activations=True,rankdir='TB',to_file='images/model_plot.png')
363
- st.image('images/model_plot.png',use_column_width=True)
364
- st.write("</center>", unsafe_allow_html=True)
365
-
366
- sentence1 = st.selectbox("Selectionnez la 1ere des 5 phrases à traduire avec le dictionnaire sélectionné", df_data_src.iloc[:-4],index=int(n1) )
367
- n1 = df_data_src[df_data_src[0]==sentence1].index.values[0]
368
- if (chosen_id == "tab1"):
369
- display_translation(n1, Lang,1)
370
- else:
371
- display_translation(n1, Lang,2)
372
-
373
- elif chosen_id == "tab3":
374
-
375
- custom_sentence = st.text_area(label="Saisir le texte à traduire")
376
- l_tgt = st.selectbox("Choisir la langue cible pour Google Translate (uniquement):",lang_tgt, format_func = find_lang_label )
377
- st.button(label="Valider", type="primary")
378
- if custom_sentence!="":
379
- Lang_detected = lang_classifier (custom_sentence)[0]['label']
380
- st.write('Langue détectée : **'+lang_src.get(Lang_detected)+'**')
381
- audio_stream_bytesio_src = io.BytesIO()
382
- tts = gTTS(custom_sentence,lang=Lang_detected)
383
- tts.write_to_fp(audio_stream_bytesio_src)
384
- st.audio(audio_stream_bytesio_src)
385
- st.write("")
386
- else: Lang_detected=""
387
- col1, col2 = st.columns(2, gap="small")
388
- with col1:
389
- st.write(":red[**Trad. t5-base & Helsinki**] *(Anglais/Français)*")
390
- audio_stream_bytesio_tgt = io.BytesIO()
391
- if (Lang_detected=='en'):
392
- translation = translation_en_fr(custom_sentence, max_length=400)[0]['translation_text']
393
- st.write("**fr :** "+translation)
394
- st.write("")
395
- tts = gTTS(translation,lang='fr')
396
- tts.write_to_fp(audio_stream_bytesio_tgt)
397
- st.audio(audio_stream_bytesio_tgt)
398
- elif (Lang_detected=='fr'):
399
- translation = translation_fr_en(custom_sentence, max_length=400)[0]['translation_text']
400
- st.write("**en :** "+translation)
401
- st.write("")
402
- tts = gTTS(translation,lang='en')
403
- tts.write_to_fp(audio_stream_bytesio_tgt)
404
- st.audio(audio_stream_bytesio_tgt)
405
- with col2:
406
- st.write(":red[**Trad. Google Translate**]")
407
- try:
408
- translator = Translator(to_lang=l_tgt, from_lang=Lang_detected)
409
- if custom_sentence!="":
410
- translation = translator.translate(custom_sentence)
411
- st.write("**"+l_tgt+" :** "+translation)
412
- st.write("")
413
- audio_stream_bytesio_tgt = io.BytesIO()
414
- tts = gTTS(translation,lang=l_tgt)
415
- tts.write_to_fp(audio_stream_bytesio_tgt)
416
- st.audio(audio_stream_bytesio_tgt)
417
- except:
418
- st.write("Problème, essayer de nouveau..")
419
-
420
- elif chosen_id == "tab4":
421
- detection = st.toggle("Détection de langue ?")
422
- if not detection:
423
- l_src = st.selectbox("Choisissez la langue parlée :",lang_tgt, format_func = find_lang_label, index=1 )
424
- l_tgt = st.selectbox("Choisissez la langue cible :",lang_tgt, format_func = find_lang_label )
425
- audio_bytes = audio_recorder (pause_threshold=1.0, sample_rate=16000, text="Cliquez pour parler, puis attendre 2s..", \
426
- recording_color="#e8b62c", neutral_color="#1ec3bc", icon_size="6x",)
427
-
428
- if audio_bytes:
429
- st.audio(audio_bytes, format="audio/wav")
430
- try:
431
- if detection:
432
- # Create a BytesIO object from the audio stream
433
- audio_stream_bytesio = io.BytesIO(audio_bytes)
434
-
435
- # Read the WAV stream using wavio
436
- wav = wavio.read(audio_stream_bytesio)
437
-
438
- # Extract the audio data from the wavio.Wav object
439
- audio_data = wav.data
440
-
441
- # Convert the audio data to a NumPy array
442
- audio_input = np.array(audio_data, dtype=np.float32)
443
- audio_input = np.mean(audio_input, axis=1)/32768
444
-
445
- result = model_speech.transcribe(audio_input)
446
- st.write("Langue détectée : "+result["language"])
447
- Lang_detected = result["language"]
448
- # Transcription Whisper (si result a été préalablement calculé)
449
- custom_sentence = result["text"]
450
- else:
451
- Lang_detected = l_src
452
- # Transcription google
453
- audio_stream = sr.AudioData(audio_bytes, 32000, 2)
454
- r = sr.Recognizer()
455
- custom_sentence = r.recognize_google(audio_stream, language = Lang_detected)
456
-
457
- if custom_sentence!="":
458
- # Lang_detected = lang_classifier (custom_sentence)[0]['label']
459
- #st.write('Langue détectée : **'+Lang_detected+'**')
460
- st.write("")
461
- st.write("**"+Lang_detected+" :** :blue["+custom_sentence+"]")
462
- st.write("")
463
- translator = Translator(to_lang=l_tgt, from_lang=Lang_detected)
464
- translation = translator.translate(custom_sentence)
465
- st.write("**"+l_tgt+" :** "+translation)
466
- st.write("")
467
- audio_stream_bytesio_tgt = io.BytesIO()
468
- tts = gTTS(translation,lang=l_tgt)
469
- tts.write_to_fp(audio_stream_bytesio_tgt)
470
- st.audio(audio_stream_bytesio_tgt)
471
- st.write("Prêt pour la phase suivante..")
472
- audio_bytes = False
473
- except KeyboardInterrupt:
474
- st.write("Arrêt de la reconnaissance vocale.")
475
- except:
476
- st.write("Problème, essayer de nouveau..")
477
-
478
-
479
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Detomo/ai-comic-generation/src/components/ui/avatar.tsx DELETED
@@ -1,50 +0,0 @@
1
- "use client"
2
-
3
- import * as React from "react"
4
- import * as AvatarPrimitive from "@radix-ui/react-avatar"
5
-
6
- import { cn } from "@/lib/utils"
7
-
8
- const Avatar = React.forwardRef<
9
- React.ElementRef<typeof AvatarPrimitive.Root>,
10
- React.ComponentPropsWithoutRef<typeof AvatarPrimitive.Root>
11
- >(({ className, ...props }, ref) => (
12
- <AvatarPrimitive.Root
13
- ref={ref}
14
- className={cn(
15
- "relative flex h-10 w-10 shrink-0 overflow-hidden rounded-full",
16
- className
17
- )}
18
- {...props}
19
- />
20
- ))
21
- Avatar.displayName = AvatarPrimitive.Root.displayName
22
-
23
- const AvatarImage = React.forwardRef<
24
- React.ElementRef<typeof AvatarPrimitive.Image>,
25
- React.ComponentPropsWithoutRef<typeof AvatarPrimitive.Image>
26
- >(({ className, ...props }, ref) => (
27
- <AvatarPrimitive.Image
28
- ref={ref}
29
- className={cn("aspect-square h-full w-full", className)}
30
- {...props}
31
- />
32
- ))
33
- AvatarImage.displayName = AvatarPrimitive.Image.displayName
34
-
35
- const AvatarFallback = React.forwardRef<
36
- React.ElementRef<typeof AvatarPrimitive.Fallback>,
37
- React.ComponentPropsWithoutRef<typeof AvatarPrimitive.Fallback>
38
- >(({ className, ...props }, ref) => (
39
- <AvatarPrimitive.Fallback
40
- ref={ref}
41
- className={cn(
42
- "flex h-full w-full items-center justify-center rounded-full bg-stone-100 dark:bg-stone-800",
43
- className
44
- )}
45
- {...props}
46
- />
47
- ))
48
- AvatarFallback.displayName = AvatarPrimitive.Fallback.displayName
49
-
50
- export { Avatar, AvatarImage, AvatarFallback }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/mapper/training/coach.py DELETED
@@ -1,242 +0,0 @@
1
- import os
2
-
3
- import clip
4
- import torch
5
- import torchvision
6
- from torch import nn
7
- from torch.utils.data import DataLoader
8
- from torch.utils.tensorboard import SummaryWriter
9
-
10
- import criteria.clip_loss as clip_loss
11
- from criteria import id_loss
12
- from mapper.datasets.latents_dataset import LatentsDataset
13
- from mapper.styleclip_mapper import StyleCLIPMapper
14
- from mapper.training.ranger import Ranger
15
- from mapper.training import train_utils
16
-
17
-
18
- class Coach:
19
- def __init__(self, opts):
20
- self.opts = opts
21
-
22
- self.global_step = 0
23
-
24
- self.device = 'cuda:0'
25
- self.opts.device = self.device
26
-
27
- # Initialize network
28
- self.net = StyleCLIPMapper(self.opts).to(self.device)
29
-
30
- # Initialize loss
31
- if self.opts.id_lambda > 0:
32
- self.id_loss = id_loss.IDLoss(self.opts).to(self.device).eval()
33
- if self.opts.clip_lambda > 0:
34
- self.clip_loss = clip_loss.CLIPLoss(opts)
35
- if self.opts.latent_l2_lambda > 0:
36
- self.latent_l2_loss = nn.MSELoss().to(self.device).eval()
37
-
38
- # Initialize optimizer
39
- self.optimizer = self.configure_optimizers()
40
-
41
- # Initialize dataset
42
- self.train_dataset, self.test_dataset = self.configure_datasets()
43
- self.train_dataloader = DataLoader(self.train_dataset,
44
- batch_size=self.opts.batch_size,
45
- shuffle=True,
46
- num_workers=int(self.opts.workers),
47
- drop_last=True)
48
- self.test_dataloader = DataLoader(self.test_dataset,
49
- batch_size=self.opts.test_batch_size,
50
- shuffle=False,
51
- num_workers=int(self.opts.test_workers),
52
- drop_last=True)
53
-
54
- self.text_inputs = torch.cat([clip.tokenize(self.opts.description)]).cuda()
55
-
56
- # Initialize logger
57
- log_dir = os.path.join(opts.exp_dir, 'logs')
58
- os.makedirs(log_dir, exist_ok=True)
59
- self.log_dir = log_dir
60
- self.logger = SummaryWriter(log_dir=log_dir)
61
-
62
- # Initialize checkpoint dir
63
- self.checkpoint_dir = os.path.join(opts.exp_dir, 'checkpoints')
64
- os.makedirs(self.checkpoint_dir, exist_ok=True)
65
- self.best_val_loss = None
66
- if self.opts.save_interval is None:
67
- self.opts.save_interval = self.opts.max_steps
68
-
69
- def train(self):
70
- self.net.train()
71
- while self.global_step < self.opts.max_steps:
72
- for batch_idx, batch in enumerate(self.train_dataloader):
73
- self.optimizer.zero_grad()
74
- w = batch
75
- w = w.to(self.device)
76
- with torch.no_grad():
77
- x, _ = self.net.decoder([w], input_is_latent=True, randomize_noise=False, truncation=1)
78
- w_hat = w + 0.1 * self.net.mapper(w)
79
- x_hat, w_hat = self.net.decoder([w_hat], input_is_latent=True, return_latents=True, randomize_noise=False, truncation=1)
80
- loss, loss_dict = self.calc_loss(w, x, w_hat, x_hat)
81
- loss.backward()
82
- self.optimizer.step()
83
-
84
- # Logging related
85
- if self.global_step % self.opts.image_interval == 0 or (
86
- self.global_step < 1000 and self.global_step % 1000 == 0):
87
- self.parse_and_log_images(x, x_hat, title='images_train')
88
- if self.global_step % self.opts.board_interval == 0:
89
- self.print_metrics(loss_dict, prefix='train')
90
- self.log_metrics(loss_dict, prefix='train')
91
-
92
- # Validation related
93
- val_loss_dict = None
94
- if self.global_step % self.opts.val_interval == 0 or self.global_step == self.opts.max_steps:
95
- val_loss_dict = self.validate()
96
- if val_loss_dict and (self.best_val_loss is None or val_loss_dict['loss'] < self.best_val_loss):
97
- self.best_val_loss = val_loss_dict['loss']
98
- self.checkpoint_me(val_loss_dict, is_best=True)
99
-
100
- if self.global_step % self.opts.save_interval == 0 or self.global_step == self.opts.max_steps:
101
- if val_loss_dict is not None:
102
- self.checkpoint_me(val_loss_dict, is_best=False)
103
- else:
104
- self.checkpoint_me(loss_dict, is_best=False)
105
-
106
- if self.global_step == self.opts.max_steps:
107
- print('OMG, finished training!')
108
- break
109
-
110
- self.global_step += 1
111
-
112
- def validate(self):
113
- self.net.eval()
114
- agg_loss_dict = []
115
- for batch_idx, batch in enumerate(self.test_dataloader):
116
- if batch_idx > 200:
117
- break
118
-
119
- w = batch
120
-
121
- with torch.no_grad():
122
- w = w.to(self.device).float()
123
- x, _ = self.net.decoder([w], input_is_latent=True, randomize_noise=True, truncation=1)
124
- w_hat = w + 0.1 * self.net.mapper(w)
125
- x_hat, _ = self.net.decoder([w_hat], input_is_latent=True, randomize_noise=True, truncation=1)
126
- loss, cur_loss_dict = self.calc_loss(w, x, w_hat, x_hat)
127
- agg_loss_dict.append(cur_loss_dict)
128
-
129
- # Logging related
130
- self.parse_and_log_images(x, x_hat, title='images_val', index=batch_idx)
131
-
132
- # For first step just do sanity test on small amount of data
133
- if self.global_step == 0 and batch_idx >= 4:
134
- self.net.train()
135
- return None # Do not log, inaccurate in first batch
136
-
137
- loss_dict = train_utils.aggregate_loss_dict(agg_loss_dict)
138
- self.log_metrics(loss_dict, prefix='test')
139
- self.print_metrics(loss_dict, prefix='test')
140
-
141
- self.net.train()
142
- return loss_dict
143
-
144
- def checkpoint_me(self, loss_dict, is_best):
145
- save_name = 'best_model.pt' if is_best else 'iteration_{}.pt'.format(self.global_step)
146
- save_dict = self.__get_save_dict()
147
- checkpoint_path = os.path.join(self.checkpoint_dir, save_name)
148
- torch.save(save_dict, checkpoint_path)
149
- with open(os.path.join(self.checkpoint_dir, 'timestamp.txt'), 'a') as f:
150
- if is_best:
151
- f.write('**Best**: Step - {}, Loss - {:.3f} \n{}\n'.format(self.global_step, self.best_val_loss, loss_dict))
152
- else:
153
- f.write('Step - {}, \n{}\n'.format(self.global_step, loss_dict))
154
-
155
- def configure_optimizers(self):
156
- params = list(self.net.mapper.parameters())
157
- if self.opts.optim_name == 'adam':
158
- optimizer = torch.optim.Adam(params, lr=self.opts.learning_rate)
159
- else:
160
- optimizer = Ranger(params, lr=self.opts.learning_rate)
161
- return optimizer
162
-
163
- def configure_datasets(self):
164
- if self.opts.latents_train_path:
165
- train_latents = torch.load(self.opts.latents_train_path)
166
- else:
167
- train_latents_z = torch.randn(self.opts.train_dataset_size, 512).cuda()
168
- train_latents = []
169
- for b in range(self.opts.train_dataset_size // self.opts.batch_size):
170
- with torch.no_grad():
171
- _, train_latents_b = self.net.decoder([train_latents_z[b: b + self.opts.batch_size]],
172
- truncation=0.7, truncation_latent=self.net.latent_avg, return_latents=True)
173
- train_latents.append(train_latents_b)
174
- train_latents = torch.cat(train_latents)
175
-
176
- if self.opts.latents_test_path:
177
- test_latents = torch.load(self.opts.latents_test_path)
178
- else:
179
- test_latents_z = torch.randn(self.opts.train_dataset_size, 512).cuda()
180
- test_latents = []
181
- for b in range(self.opts.test_dataset_size // self.opts.test_batch_size):
182
- with torch.no_grad():
183
- _, test_latents_b = self.net.decoder([test_latents_z[b: b + self.opts.test_batch_size]],
184
- truncation=0.7, truncation_latent=self.net.latent_avg, return_latents=True)
185
- test_latents.append(test_latents_b)
186
- test_latents = torch.cat(test_latents)
187
-
188
- train_dataset_celeba = LatentsDataset(latents=train_latents.cpu(),
189
- opts=self.opts)
190
- test_dataset_celeba = LatentsDataset(latents=test_latents.cpu(),
191
- opts=self.opts)
192
- train_dataset = train_dataset_celeba
193
- test_dataset = test_dataset_celeba
194
- print("Number of training samples: {}".format(len(train_dataset)))
195
- print("Number of test samples: {}".format(len(test_dataset)))
196
- return train_dataset, test_dataset
197
-
198
- def calc_loss(self, w, x, w_hat, x_hat):
199
- loss_dict = {}
200
- loss = 0.0
201
- if self.opts.id_lambda > 0:
202
- loss_id, sim_improvement = self.id_loss(x_hat, x)
203
- loss_dict['loss_id'] = float(loss_id)
204
- loss_dict['id_improve'] = float(sim_improvement)
205
- loss = loss_id * self.opts.id_lambda
206
- if self.opts.clip_lambda > 0:
207
- loss_clip = self.clip_loss(x_hat, self.text_inputs).mean()
208
- loss_dict['loss_clip'] = float(loss_clip)
209
- loss += loss_clip * self.opts.clip_lambda
210
- if self.opts.latent_l2_lambda > 0:
211
- loss_l2_latent = self.latent_l2_loss(w_hat, w)
212
- loss_dict['loss_l2_latent'] = float(loss_l2_latent)
213
- loss += loss_l2_latent * self.opts.latent_l2_lambda
214
- loss_dict['loss'] = float(loss)
215
- return loss, loss_dict
216
-
217
- def log_metrics(self, metrics_dict, prefix):
218
- for key, value in metrics_dict.items():
219
- #pass
220
- print(f"step: {self.global_step} \t metric: {prefix}/{key} \t value: {value}")
221
- self.logger.add_scalar('{}/{}'.format(prefix, key), value, self.global_step)
222
-
223
- def print_metrics(self, metrics_dict, prefix):
224
- print('Metrics for {}, step {}'.format(prefix, self.global_step))
225
- for key, value in metrics_dict.items():
226
- print('\t{} = '.format(key), value)
227
-
228
- def parse_and_log_images(self, x, x_hat, title, index=None):
229
- if index is None:
230
- path = os.path.join(self.log_dir, title, f'{str(self.global_step).zfill(5)}.jpg')
231
- else:
232
- path = os.path.join(self.log_dir, title, f'{str(self.global_step).zfill(5)}_{str(index).zfill(5)}.jpg')
233
- os.makedirs(os.path.dirname(path), exist_ok=True)
234
- torchvision.utils.save_image(torch.cat([x.detach().cpu(), x_hat.detach().cpu()]), path,
235
- normalize=True, scale_each=True, range=(-1, 1), nrow=self.opts.batch_size)
236
-
237
- def __get_save_dict(self):
238
- save_dict = {
239
- 'state_dict': self.net.state_dict(),
240
- 'opts': vars(self.opts)
241
- }
242
- return save_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan-Inversion/torch_utils/ops/grid_sample_gradfix.py DELETED
@@ -1,84 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """Custom replacement for `torch.nn.functional.grid_sample` that
10
- supports arbitrarily high order gradients between the input and output.
11
- Only works on 2D images and assumes
12
- `mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`."""
13
-
14
- import torch
15
-
16
- # pylint: disable=redefined-builtin
17
- # pylint: disable=arguments-differ
18
- # pylint: disable=protected-access
19
-
20
- # ----------------------------------------------------------------------------
21
-
22
- enabled = False # Enable the custom op by setting this to true.
23
-
24
- # ----------------------------------------------------------------------------
25
-
26
-
27
- def grid_sample(input, grid):
28
- if _should_use_custom_op():
29
- return _GridSample2dForward.apply(input, grid)
30
- return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
31
-
32
- # ----------------------------------------------------------------------------
33
-
34
-
35
- def _should_use_custom_op():
36
- return enabled
37
-
38
- # ----------------------------------------------------------------------------
39
-
40
-
41
- class _GridSample2dForward(torch.autograd.Function):
42
- @staticmethod
43
- def forward(ctx, input, grid):
44
- assert input.ndim == 4
45
- assert grid.ndim == 4
46
- output = torch.nn.functional.grid_sample(
47
- input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
48
- ctx.save_for_backward(input, grid)
49
- return output
50
-
51
- @staticmethod
52
- def backward(ctx, grad_output):
53
- input, grid = ctx.saved_tensors
54
- grad_input, grad_grid = _GridSample2dBackward.apply(
55
- grad_output, input, grid)
56
- return grad_input, grad_grid
57
-
58
- # ----------------------------------------------------------------------------
59
-
60
-
61
- class _GridSample2dBackward(torch.autograd.Function):
62
- @staticmethod
63
- def forward(ctx, grad_output, input, grid):
64
- op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward')
65
- grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
66
- ctx.save_for_backward(grid)
67
- return grad_input, grad_grid
68
-
69
- @staticmethod
70
- def backward(ctx, grad2_grad_input, grad2_grad_grid):
71
- _ = grad2_grad_grid # unused
72
- grid, = ctx.saved_tensors
73
- grad2_grad_output = None
74
- grad2_input = None
75
- grad2_grid = None
76
-
77
- if ctx.needs_input_grad[0]:
78
- grad2_grad_output = _GridSample2dForward.apply(
79
- grad2_grad_input, grid)
80
-
81
- assert not ctx.needs_input_grad[2]
82
- return grad2_grad_output, grad2_input, grad2_grid
83
-
84
- # ----------------------------------------------------------------------------