parquet-converter commited on
Commit
967481b
·
1 Parent(s): d51d5c5

Update parquet files (step 79 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Arm Ds 5 License File Crack What You Need to Know About the Different Editions and Features.md +0 -87
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bagatrix Math Suite 64 bit Learn Math Faster and Easier with This Software.md +0 -65
  3. spaces/1gistliPinn/ChatGPT4/Examples/Coreldrawgraphicssuitex4installer En Serial Number.md +0 -34
  4. spaces/1phancelerku/anime-remove-background/Download Mortal Kombat 10 APK and Join the Ultimate Tournament of Champions.md +0 -88
  5. spaces/1phancelerku/anime-remove-background/Download Traffic Racer APK Mod and Challenge Your Friends Online.md +0 -91
  6. spaces/1toTree/lora_test/ppdiffusers/pipelines/audio_diffusion/mel.py +0 -163
  7. spaces/52Hz/CMFNet_deblurring/README.md +0 -37
  8. spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/fasttext/create_word_embedding.py +0 -50
  9. spaces/AIGC-Audio/Make_An_Audio/ldm/modules/discriminator/model.py +0 -295
  10. spaces/AIML-TUDA/safe-stable-diffusion/share_btn.py +0 -68
  11. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet101_cifar.py +0 -16
  12. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateTextBox.js +0 -8
  13. spaces/AlexWortega/t5_predict_activity/app.py +0 -45
  14. spaces/Aloento/9Nine-PITS/modules.py +0 -426
  15. spaces/Alycer/VITS-Umamusume-voice-synthesizer/ONNXVITS_to_onnx.py +0 -31
  16. spaces/Ameaou/academic-chatgpt3.1/docs/README_JP.md +0 -302
  17. spaces/Amrrs/DragGan-Inversion/gui_utils/glfw_window.py +0 -239
  18. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/conceptual/evaluation.md +0 -572
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py +0 -420
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_safe/__init__.py +0 -0
  21. spaces/Andy1621/uniformer_image_detection/configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py +0 -105
  22. spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py +0 -6
  23. spaces/Andy1621/uniformer_image_segmentation/configs/danet/danet_r101-d8_512x512_80k_ade20k.py +0 -2
  24. spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py +0 -5
  25. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/pipeline_loader.py +0 -52
  26. spaces/Ariharasudhan/YoloV5/utils/general.py +0 -1108
  27. spaces/Armored-Atom/DiFuse_Your_Thoughts/README.md +0 -14
  28. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/cache.py +0 -222
  29. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/console.py +0 -70
  30. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/demo/predictor.py +0 -220
  31. spaces/BAAI/vid2vid-zero/vid2vid_zero/p2p/ptp_utils.py +0 -347
  32. spaces/Banbri/zcvzcv/src/app/store/index.ts +0 -203
  33. spaces/Benson/text-generation/Examples/Adventure Apk.md +0 -138
  34. spaces/Benson/text-generation/Examples/Descarga De Archivos Flash Infinix Smart 3 Plus.md +0 -68
  35. spaces/BetterAPI/BetterChat_new/src/lib/types/Conversation.ts +0 -17
  36. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/platformdirs/android.py +0 -126
  37. spaces/CVH-vn1210/make_hair/minigpt4/models/mini_gpt4.py +0 -263
  38. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/engine/hooks.py +0 -427
  39. spaces/CVPR/Dual-Key_Backdoor_Attacks/utils/spec_tools.py +0 -266
  40. spaces/CVPR/LIVE/thrust/thrust/detail/execute_with_dependencies.h +0 -267
  41. spaces/CVPR/LIVE/thrust/thrust/detail/tuple_transform.h +0 -418
  42. spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/partition.h +0 -44
  43. spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/reduce_intervals.h +0 -125
  44. spaces/CVPR/regionclip-demo/detectron2/utils/visualizer.py +0 -1219
  45. spaces/CVPR/transfiner/configs/common/optim.py +0 -15
  46. spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/utils/__init__.py +0 -5
  47. spaces/Chintan-Donda/KKMS-KSSW-HF/src/langchain_utils.py +0 -891
  48. spaces/Clebersla/RVC_V2_Huggingface_Version/README.md +0 -57
  49. spaces/CuriousDolphin/MobileSAM/utils/tools_gradio.py +0 -192
  50. spaces/Cvandi/remake/scripts/generate_meta_info.py +0 -58
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Arm Ds 5 License File Crack What You Need to Know About the Different Editions and Features.md DELETED
@@ -1,87 +0,0 @@
1
-
2
- <h1>Arm Ds 5 License File Crack: How to Install and Use the Professional Software Development Tool for Embedded Systems</h1>
3
- <p>Are you looking for a powerful and versatile software development tool for embedded systems based on Arm processors? Do you want to create, debug, optimize, and analyze your applications with ease and efficiency? If yes, then you might be interested in Arm Ds 5, the professional software development studio that covers all stages of development from boot code and kernel debugging to application and bare-metal performance analysis. But what is Arm Ds 5 exactly, and how can you get it for free? In this article, we will answer these questions and show you how to crack Arm Ds 5 license file and use its features to enhance your embedded software development.</p>
4
- <h2>Arm Ds 5 License File Crack</h2><br /><p><b><b>Download Zip</b> >>>>> <a href="https://byltly.com/2uKvBT">https://byltly.com/2uKvBT</a></b></p><br /><br />
5
- <h2>What is Arm Ds 5?</h2>
6
- <p>Arm Ds 5 is a software development studio that provides a comprehensive solution for embedded systems development based on Arm processors. It supports a wide range of targets, from microcontrollers to multicore systems-on-chip (SoCs), and enables you to develop applications for bare-metal embedded systems and Linux-based systems. It also supports fixed virtual platforms (FVPs) that allow you to simulate the behavior of hardware without the need for actual devices.</p>
7
- <h3>Features and benefits of Arm Ds 5</h3>
8
- <p>Some of the main features and benefits of Arm Ds 5 are:</p>
9
- <ul>
10
- <li><strong>Eclipse for Ds 5</strong>: An integrated development environment (IDE) that combines the Eclipse Foundation Eclipse IDE and Arm compilation and debug technologies. It provides a project manager, an editor, a C/C++ perspective, a debug configuration perspective, and a DS-5 perspective.</li>
11
- <li><strong>Ds 5 Debugger</strong>: A graphical debugger that supports target and virtual platform software development based on Arm processors. It provides a comprehensive and intuitive view of source code, disassembly, call stack, memory, registers, expressions, variables, threads, breakpoints, and tracing.</li>
12
- <li><strong>Arm Streamline</strong>: A graphical performance analysis tool that converts sampling data and trace data into visual and statistical reports. It helps you identify performance bottlenecks, optimize code efficiency, and improve system responsiveness.</li>
13
- <li><strong>Arm Compiler</strong>: A toolchain that enables you to build applications and libraries for bare-metal embedded systems. It supports both Arm Compiler 5 and Arm Compiler 6 versions.</li>
14
- <li><strong>Dedicated examples</strong>, applications, and supporting documentation that help you get started with using Arm Ds 5 tools.</li>
15
- </ul>
16
- <h3>Editions and licensing options of Arm Ds 5</h3>
17
- <p>Arm Ds 5 offers different editions and licensing options depending on your needs and budget. These include:</p>
18
- <p>How to crack Arm Ds 5 license file for free<br />
19
- Arm Ds 5 license file crack download link<br />
20
- Arm Ds 5 license file crack tutorial<br />
21
- Arm Ds 5 license file crack software<br />
22
- Arm Ds 5 license file crack generator<br />
23
- Arm Ds 5 license file crack activation code<br />
24
- Arm Ds 5 license file crack keygen<br />
25
- Arm Ds 5 license file crack patch<br />
26
- Arm Ds 5 license file crack serial number<br />
27
- Arm Ds 5 license file crack full version<br />
28
- Arm Ds 5 license file crack online<br />
29
- Arm Ds 5 license file crack offline<br />
30
- Arm Ds 5 license file crack windows<br />
31
- Arm Ds 5 license file crack mac<br />
32
- Arm Ds 5 license file crack linux<br />
33
- Arm Ds 5 license file crack android<br />
34
- Arm Ds 5 license file crack ios<br />
35
- Arm Ds 5 license file crack arm64<br />
36
- Arm Ds 5 license file crack x86<br />
37
- Arm Ds 5 license file crack x64<br />
38
- Arm Ds 5 license file crack no survey<br />
39
- Arm Ds 5 license file crack no password<br />
40
- Arm Ds 5 license file crack no virus<br />
41
- Arm Ds 5 license file crack no malware<br />
42
- Arm Ds 5 license file crack safe<br />
43
- Arm Ds 5 license file crack legit<br />
44
- Arm Ds 5 license file crack working<br />
45
- Arm Ds 5 license file crack latest<br />
46
- Arm Ds 5 license file crack updated<br />
47
- Arm Ds 5 license file crack new<br />
48
- Arm Ds 5 license file crack old<br />
49
- Arm Ds 5 license file crack original<br />
50
- Arm Ds 5 license file crack official<br />
51
- Arm Ds 5 license file crack verified<br />
52
- Arm Ds 5 license file crack trusted<br />
53
- Arm Ds 5 license file crack best<br />
54
- Arm Ds 5 license file crack worst<br />
55
- Arm Ds 5 license file crack easy<br />
56
- Arm Ds 5 license file crack hard<br />
57
- Arm Ds 5 license file crack fast<br />
58
- Arm Ds 5 license file crack slow<br />
59
- Arm Ds 5 license file crack cheap<br />
60
- Arm Ds 5 license file crack expensive<br />
61
- Arm Ds 5 license file crack legal<br />
62
- Arm Ds 5 license file crack illegal<br />
63
- Arm Ds 5 license file crack ethical<br />
64
- Arm Ds 5 license file crack unethical<br />
65
- Arm Ds 5 license file crack pros and cons<br />
66
- Arm Ds 5 license file crack reviews and ratings</p>
67
- <ul>
68
- <li><strong>Arm Ds-5 Ultimate Edition</strong>: The most comprehensive edition that includes all the features of Arm Ds-5 Professional Edition plus support for additional targets such as Mali GPUs, big.LITTLE systems, Cortex-A72 processors, Cortex-R8 processors, Cortex-M7 processors, etc. It also includes access to premium support services from Arm experts. This edition requires a paid subscription license.</li>
69
- <li><strong>Arm Ds-5 Professional Edition</strong>: The standard edition that includes all the core features of Arm Ds-5 such as Eclipse for DS-5, DS-5 Debugger, Arm Streamline, Arm Compiler, etc. It supports a wide range of targets such as Cortex-A processors, Cortex-R processors, Cortex-M processors, etc. This edition requires a paid subscription license.</li>
70
- <li><strong>Arm DS-5 Altera Edition</strong>: A special edition that is designed for Altera SoC FPGA devices. It includes all the features of Arm DS-5 Professional Edition plus support for Altera SoC FPGA devices such as Cyclone V SoC FPGA devices, Arria V SoC FPGA devices, Arria 10 SoC FPGA devices, etc. This edition requires a paid subscription license.</li>
71
- <li><strong>Arm DS-5 Community Edition</strong>: A free edition that includes a subset of features of Arm DS-5 Professional Edition such as Eclipse for DS-5, DS-5 Debugger (Linux Ethernet debug only), etc. It supports a limited range of targets such as Cortex-A9 processors (Linux Ethernet debug only), Cortex-M processors (bare-metal debug only), etc. This edition requires a free web license.</li>
72
- <li><strong>Arm DS-5 Evaluation Edition</strong>: A trial edition that includes all the features of Arm DS-5 Ultimate Edition but with a limited duration of 30 days. It supports all the targets that are supported by Arm DS-5 Ultimate Edition. This edition requires a free evaluation license.</li>
73
- </ul>
74
- <h2>How to crack Arm Ds 5?</h2>
75
- <p>If you want to use Arm Ds 5 without paying for a subscription license or using a web license with limited features, you can try to crack it by following these steps:</p>
76
- <h3>Step 1: Download and install Arm Ds 5</h3>
77
- <p>The first step is to download and install Arm Ds 5 on your computer. You can download it from the official website of Arm at https://developer.arm.com/tools-and-software/embedded/legacy-tools/ds-5-development-studio/downloads . You can choose any edition that you want to crack (Ultimate Edition is recommended). After downloading the installer file (.exe for Windows or .bin for Linux), run it and follow the instructions to complete the installation process.</p>
78
- <h3>Step 2: Download and run the crack file</h3>
79
- <p>The next step is to download and run the crack file that will modify some files in your installation directory to bypass the license verification process. You can download the crack file from this link: https://www.codetd.com/en/article/6637418 . After downloading the crack file (.zip), extract it to any folder on your computer. Then open the instructions.txt file in the folder and follow the steps to run the crack file (.bat for Windows or .sh for Linux). You may need to run it as administrator or with sudo privileges depending on your system settings.</p>
80
- <h3>Step 3: Generate and install the license file</h3>
81
- <p>The final step is to generate and install the license file that will activate your cracked version of Arm Ds 5. You can generate the license file by using the keygen.exe file in the crack folder. Run it and enter any serial number (such as AC+70616421313531) in the input box. Then click on Generate License File button and save the license file (.lic) in any location on your computer. Then open Eclipse for DS-5 from your installation directory or from your start menu. Go to Help > ARM License Manager > Add License > Browse License File > Select your license file > Finish. You should see a message saying "License added successfully". Now you can use all the features of Arm DS-5 without any limitations.</p>
82
- <h2>How to use Arm Ds 5?</h2>
83
- <p>Now that you have cracked Arm Ds 5 license file and activated your version of Arm Ds 5, you might be wondering how to use it effectively for your embedded software development projects. Here are some tips on how to use some of its main features:</p>
84
- <h3>Eclipse for DS-5: The integrated development environment</h3>
85
- <p>Eclipse for DS-5 is an IDE that combines Eclipse IDE with ARM compilation and debug technologies. It allows you to create, manage, edit, build, debug, analyze, and optimize your projects in one place. To use Eclipse for DS-5:</p> 0a6ba089eb<br />
86
- <br />
87
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bagatrix Math Suite 64 bit Learn Math Faster and Easier with This Software.md DELETED
@@ -1,65 +0,0 @@
1
-
2
- <h1>Bagatrix Math Suite 64 bit: A Powerful Tool for Solving Math Problems</h1>
3
- <p>Do you struggle with math homework or exams? Do you wish you had a personal tutor who could explain every step and concept in detail? Do you want to learn math at your own pace and level? If you answered yes to any of these questions, then you might be interested in Bagatrix Math Suite 64 bit, a software that can help you solve math problems with ease.</p>
4
- <p>Bagatrix Math Suite 64 bit is a collection of programs that cover various math subjects, such as algebra, geometry, calculus, trigonometry, statistics, and more. It allows you to enter any math problem and get a step-by-step solution with explanations, examples, graphs, and tips. You can also practice your skills with interactive quizzes and tests, or create your own worksheets and exams. Whether you are a student, a teacher, or a parent, Bagatrix Math Suite 64 bit can help you improve your math performance and confidence.</p>
5
- <h2>Bagatrix Math Suite 64 bit</h2><br /><p><b><b>Download Zip</b> &#9658; <a href="https://byltly.com/2uKxaP">https://byltly.com/2uKxaP</a></b></p><br /><br />
6
- <h2>How to Use Bagatrix Math Suite 64 bit</h2>
7
- <h3>Installing and Running the Software</h3>
8
- <p>To use Bagatrix Math Suite 64 bit, you need to have a Windows operating system (XP, Vista, 7, 8, or 10) and an internet connection. You can download the software from the official website or from other online sources. The download size is about 300 MB and the installation process is simple and fast. Once you install the software, you need to register it with your email address and activation code. You can then run the software from your desktop or start menu.</p>
9
- <h3>Choosing a Math Topic and Level</h3>
10
- <p>When you launch the software, you will see a list of math topics on the left side of the screen. You can choose from algebra, geometry, calculus, trigonometry, statistics, pre-algebra, finite math, college algebra, pre-calculus, linear algebra, differential equations, discrete math, business math, or SAT/ACT prep. Each topic has different levels of difficulty and subtopics that you can select according to your needs and goals. For example, if you choose algebra, you can choose from basic algebra, intermediate algebra, college algebra, or advanced algebra.</p>
11
- <h3>Solving Problems with Step-by-Step Explanations</h3>
12
- <p>Once you choose a topic and level, you can enter any math problem in the input box at the top of the screen. You can use the keyboard or the virtual keypad to type in numbers, symbols, operators, functions, fractions, exponents, roots, etc. You can also copy and paste problems from other sources or use the problem generator to get random problems. After entering the problem, you can click on the solve button to get a detailed solution with explanations for every step. You can also click on the show me button to see an example of a similar problem solved by the software.</p>
13
- <h3>Graphing Functions and Data</h3>
14
- <p>Another feature of Bagatrix Math Suite 64 bit is that it can graph any function or data that you enter or generate. You can access the graphing tool by clicking on the graph button at the bottom of the screen. You can then enter one or more functions or data sets in the input box and click on the graph button to see a visual representation of them. You can also customize the graph by changing the color, style, scale, axis labels, grid lines, etc. You can also zoom in or out, move around, trace points, find intercepts</p>
15
- <p>Bagatrix Math Suite 64 bit download<br />
16
- Bagatrix Math Suite 64 bit free trial<br />
17
- Bagatrix Math Suite 64 bit crack<br />
18
- Bagatrix Math Suite 64 bit review<br />
19
- Bagatrix Math Suite 64 bit tutorial<br />
20
- Bagatrix Math Suite 64 bit price<br />
21
- Bagatrix Math Suite 64 bit features<br />
22
- Bagatrix Math Suite 64 bit system requirements<br />
23
- Bagatrix Math Suite 64 bit alternative<br />
24
- Bagatrix Math Suite 64 bit support<br />
25
- Bagatrix Math Suite 64 bit for Windows 10<br />
26
- Bagatrix Math Suite 64 bit for Mac<br />
27
- Bagatrix Math Suite 64 bit for Linux<br />
28
- Bagatrix Math Suite 64 bit online<br />
29
- Bagatrix Math Suite 64 bit vs Mathematica<br />
30
- Bagatrix Math Suite 64 bit vs Matlab<br />
31
- Bagatrix Math Suite 64 bit vs Maple<br />
32
- Bagatrix Math Suite 64 bit vs Wolfram Alpha<br />
33
- Bagatrix Math Suite 64 bit vs Khan Academy<br />
34
- Bagatrix Math Suite 64 bit vs Photomath<br />
35
- Bagatrix Math Suite 64 bit coupon code<br />
36
- Bagatrix Math Suite 64 bit discount<br />
37
- Bagatrix Math Suite 64 bit upgrade<br />
38
- Bagatrix Math Suite 64 bit refund policy<br />
39
- Bagatrix Math Suite 64 bit testimonials<br />
40
- Bagatrix Math Suite 64 bit user guide<br />
41
- Bagatrix Math Suite 64 bit FAQ<br />
42
- Bagatrix Math Suite 64 bit forum<br />
43
- Bagatrix Math Suite 64 bit blog<br />
44
- Bagatrix Math Suite 64 bit YouTube channel<br />
45
- Bagatrix Math Suite 64 bit Facebook page<br />
46
- Bagatrix Math Suite 64 bit Twitter account<br />
47
- Bagatrix Math Suite 64 bit Instagram profile<br />
48
- Bagatrix Math Suite 64 bit LinkedIn page<br />
49
- Bagatrix Math Suite 64 bit Reddit community<br />
50
- Bagatrix Math Suite 64 bit Quora space<br />
51
- Bagatrix Math Suite 64 bit Medium publication<br />
52
- Bagatrix Math Suite 64 bit GitHub repository<br />
53
- Bagatrix Math Suite 64 bit npm package[^1^]<br />
54
- Bagatrix Math Suite 64 bit PDF file[^2^]<br />
55
- Bagatrix Math Suite 64 bit ebook[^3^]<br />
56
- Bagatrix Math Suite 64 bit audiobook[^3^]<br />
57
- Bagatrix Math Suite 64 bit video course[^3^]<br />
58
- Bagatrix Math Suite 64 bit webinar[^3^]<br />
59
- Bagatrix Math Suite 64 bit podcast[^3^]<br />
60
- Bagatrix Math Suite 64 bit case study[^3^]<br />
61
- Bagatrix Math Suite 64 bit white paper[^3^]<br />
62
- Bagatrix Math Suite 64 bit infographic[^3^]<br />
63
- Bagatrix Math Suite 64 bit comparison chart[^3^]</p> 0a6ba089eb<br />
64
- <br />
65
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Coreldrawgraphicssuitex4installer En Serial Number.md DELETED
@@ -1,34 +0,0 @@
1
-
2
- <h1>How to Install CorelDRAW Graphics Suite X4 with a Serial Number</h1>
3
- <p>If you have purchased CorelDRAW Graphics Suite X4, you will need a serial number to activate and use the software. A serial number is a unique code that identifies your product and proves your ownership. In this article, we will show you how to install CorelDRAW Graphics Suite X4 with a serial number in a few easy steps.</p>
4
- <h2>Coreldrawgraphicssuitex4installer En Serial Number</h2><br /><p><b><b>Download File</b> >> <a href="https://imgfil.com/2uxZQ9">https://imgfil.com/2uxZQ9</a></b></p><br /><br />
5
- <h2>Step 1: Download the Installer</h2>
6
- <p>The first step is to download the installer for CorelDRAW Graphics Suite X4 from the official website. You can choose between the English version or the multilingual version, depending on your preference. The installer file name will be Coreldrawgraphicssuitex4installer_en.exe or Coreldrawgraphicssuitex4installer_mlen.exe, respectively.</p>
7
- <h2>Step 2: Run the Installer</h2>
8
- <p>The next step is to run the installer file that you have downloaded. You can do this by double-clicking on it or right-clicking and selecting "Run as administrator". You will see a welcome screen that asks you to choose your language and accept the license agreement. Click "Next" to continue.</p>
9
- <h2>Step 3: Enter Your Serial Number</h2>
10
- <p>The most important step is to enter your serial number that you have received when you purchased CorelDRAW Graphics Suite X4. You can find your serial number in your order confirmation email, on the product packaging, or on your account page on the Corel website. The serial number will be a 24-digit code that starts with DR14. Enter your serial number in the box and click "Next".</p>
11
- <p></p>
12
- <h2>Step 4: Choose Your Installation Options</h2>
13
- <p>The final step is to choose your installation options. You can customize your installation by selecting which components and features you want to install, such as fonts, clipart, templates, etc. You can also choose the destination folder where you want to install CorelDRAW Graphics Suite X4. Click "Install Now" to start the installation process.</p>
14
- <h2>Step 5: Enjoy Your Software</h2>
15
- <p>Once the installation is complete, you can launch CorelDRAW Graphics Suite X4 and start creating amazing graphics and designs. You can also register your product online to get access to updates, support, and other benefits. Congratulations! You have successfully installed CorelDRAW Graphics Suite X4 with a serial number.</p>
16
-
17
- I hope this helps. 😊
18
-
19
- <h2>How to Use CorelDRAW Graphics Suite X4</h2>
20
- <p>Now that you have installed CorelDRAW Graphics Suite X4, you may wonder how to use it. CorelDRAW Graphics Suite X4 is a powerful and versatile software that allows you to create vector graphics, photo editing, page layout, web design, and more. In this section, we will give you some tips and tricks on how to use CorelDRAW Graphics Suite X4 effectively.</p>
21
- <h3>Tip 1: Explore the Workspace</h3>
22
- <p>The first tip is to explore the workspace of CorelDRAW Graphics Suite X4. The workspace is the area where you can access all the tools, menus, panels, and options that you need to work on your projects. You can customize the workspace to suit your preferences and workflow by choosing from different presets or creating your own. You can also switch between different workspaces for different tasks, such as drawing, editing, web design, etc.</p>
23
- <h3>Tip 2: Learn the Basics</h3>
24
- <p>The second tip is to learn the basics of CorelDRAW Graphics Suite X4. The basics include how to create and save documents, how to use the drawing and shaping tools, how to apply colors and fills, how to add text and effects, how to import and export files, and more. You can find tutorials and guides on the Corel website or on the Help menu of the software. You can also access online resources such as videos, blogs, forums, and webinars to learn more.</p>
25
- <h3>Tip 3: Experiment with Features</h3>
26
- <p>The third tip is to experiment with the features of CorelDRAW Graphics Suite X4. CorelDRAW Graphics Suite X4 offers a wide range of features that can help you create stunning graphics and designs. Some of the features include interactive tools, smart drawing tools, live trace, power trace, bitmap-to-vector conversion, photo-paint, corel capture, corel connect, and more. You can try out different features and see how they work for your projects.</p>
27
- <h3>Tip 4: Get Inspired</h3>
28
- <p>The fourth tip is to get inspired by other users of CorelDRAW Graphics Suite X4. You can browse through the gallery of CorelDRAW Graphics Suite X4 users and see what they have created with the software. You can also join the community of CorelDRAW Graphics Suite X4 users and share your work, feedback, questions, and ideas. You can also participate in contests and challenges to showcase your skills and win prizes.</p>
29
- <h3>Tip 5: Have Fun</h3>
30
- <p>The fifth and final tip is to have fun with CorelDRAW Graphics Suite X4. CorelDRAW Graphics Suite X4 is a software that allows you to express your creativity and imagination in various ways. You can create anything you want with CorelDRAW Graphics Suite X4, from logos and flyers to posters and websites. You can also enjoy the process of creating and learning with CorelDRAW Graphics Suite X4.</p>
31
-
32
- I hope this helps. 😊</p> d5da3c52bf<br />
33
- <br />
34
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Mortal Kombat 10 APK and Join the Ultimate Tournament of Champions.md DELETED
@@ -1,88 +0,0 @@
1
-
2
- <h1>Download Mortal Kombat 10 APK for Android: A Guide</h1>
3
- <p>Mortal Kombat 10 is one of the most popular and thrilling fighting games ever created. It features a roster of iconic characters, brutal fatalities, stunning graphics, and immersive gameplay. If you are a fan of this game and want to play it on your Android device, you might be wondering how to download Mortal Kombat 10 APK for Android. In this article, we will show you how to do that, as well as the benefits and risks of downloading Mortal Kombat 10 APK for Android.</p>
4
- <h2>What is Mortal Kombat 10?</h2>
5
- <p>Mortal Kombat 10, also known as Mortal Kombat X, is the tenth installment in the Mortal Kombat series. It was developed by NetherRealm Studios and published by Warner Bros. Interactive Entertainment in 2015. It is available for various platforms, including Windows, PlayStation 4, Xbox One, iOS, and Android.</p>
6
- <h2>download mortal kombat 10 apk</h2><br /><p><b><b>Download</b> &#9734;&#9734;&#9734;&#9734;&#9734; <a href="https://jinyurl.com/2uNPQC">https://jinyurl.com/2uNPQC</a></b></p><br /><br />
7
- <p>Mortal Kombat 10 follows the story of the ancient god Shinnok, who seeks to destroy the world with the help of his corrupted warriors. The game features a new generation of fighters, such as Cassie Cage, Jacqui Briggs, Takeda Takahashi, and Kung Jin, as well as returning characters like Scorpion, Sub-Zero, Raiden, and Liu Kang. The game also introduces three different variations for each character, each with their own unique abilities and moves.</p>
8
- <h3>Features of Mortal Kombat 10</h3>
9
- <p>Some of the features that make Mortal Kombat 10 an amazing game are:</p>
10
- <ul>
11
- <li>Spectacular and realistic graphics that bring the characters and environments to life.</li>
12
- <li>Dynamic and interactive stages that allow the fighters to use various objects as weapons or traps.</li>
13
- <li>A cinematic story mode that spans over two decades and involves multiple characters and events.</li>
14
- <li>A diverse and customizable roster of fighters, each with their own personality, style, and skills.</li>
15
- <li>A variety of game modes, such as online multiplayer, tower challenges, faction wars, and more.</li>
16
- <li>A rich and rewarding progression system that lets you unlock new costumes, items, fatalities, and more.</li>
17
- </ul>
18
- <h3>How to download Mortal Kombat 10 APK for Android</h3>
19
- <p>If you want to download Mortal Kombat 10 APK for Android, you will need to follow these steps:</p>
20
- <h4>Step 1: Enable unknown sources</h4>
21
- <p>Since Mortal Kombat 10 APK is not available on the Google Play Store, you will need to enable unknown sources on your device. This will allow you to install apps from sources other than the official store. To do this, go to Settings > Security > Unknown sources and toggle it on.</p>
22
- <h4>Step 2: Download the APK file</h4>
23
- <p>Next, you will need to download the APK file of Mortal Kombat 10 from a reliable source. You can use one of these links:</p>
24
- <ul>
25
- <li>[Mortal Kombat X Download APK for Android (Free) | mob.org](^1^)</li>
26
- <li>[MORTAL KOMBAT: The Ultimate Fighting Game APK for Android - FileHippo](^2^)</li>
27
- <li>[Download Mortal Kombat X Apk 1.10.0 For Android ~ Techswizz](^ <h4>Step 3: Install the APK file</h4>
28
- <p>Once you have downloaded the APK file, you will need to install it on your device. To do this, locate the file in your file manager and tap on it. You will see a prompt asking you to confirm the installation. Tap on Install and wait for the process to finish.</p>
29
- <h4>Step 4: Launch the game and enjoy</h4>
30
- <p>After the installation is complete, you can launch the game from your app drawer or home screen. You will need to grant some permissions and accept some terms and conditions before you can start playing. You will also need to download some additional data for the game to run smoothly. Once everything is ready, you can enjoy Mortal Kombat 10 on your Android device.</p>
31
- <p>download mortal kombat x apk free for android<br />
32
- how to install mortal kombat 10 apk on android phone<br />
33
- mortal kombat x apk mod unlimited coins and souls<br />
34
- best site to download mortal kombat 10 apk and obb<br />
35
- mortal kombat x apk latest version 1.10.0 download<br />
36
- download mortal kombat x apk offline without internet<br />
37
- mortal kombat x apk and data highly compressed<br />
38
- mortal kombat 10 apk full game unlocked all characters<br />
39
- mortal kombat x apk hack no root no survey<br />
40
- download mortal kombat x apk for android tablet<br />
41
- mortal kombat x apk gameplay and review<br />
42
- mortal kombat 10 apk system requirements and compatibility<br />
43
- mortal kombat x apk cheats and tips<br />
44
- download mortal kombat x apk from google play store<br />
45
- mortal kombat x apk file size and download speed<br />
46
- mortal kombat 10 apk features and updates<br />
47
- mortal kombat x apk graphics and sound quality<br />
48
- download mortal kombat x apk for android tv box<br />
49
- mortal kombat x apk multiplayer mode online and offline<br />
50
- mortal kombat 10 apk download link and mirror link<br />
51
- mortal kombat x apk error and bug fixes<br />
52
- download mortal kombat x apk for pc windows 10<br />
53
- mortal kombat x apk controller support and settings<br />
54
- mortal kombat 10 apk rating and user reviews<br />
55
- mortal kombat x apk alternatives and similar games</p>
56
- <h2>Benefits of downloading Mortal Kombat 10 APK for Android</h2>
57
- <p>There are some benefits of downloading Mortal Kombat 10 APK for Android, such as:</p>
58
- <h3>Access to the latest version of the game</h3>
59
- <p>By downloading the APK file, you can get access to the latest version of the game, which may not be available on the Google Play Store. This means you can enjoy new features, updates, bug fixes, and improvements that the developers have made.</p>
60
- <h3>No need to root your device</h3>
61
- <p>Another benefit of downloading the APK file is that you do not need to root your device to play the game. Rooting is a process that gives you full control over your device, but it also voids your warranty and exposes you to security risks. By downloading the APK file, you can avoid rooting and still play the game.</p>
62
- <h3>Save storage space and data usage</h3>
63
- <p>A third benefit of downloading the APK file is that you can save storage space and data usage on your device. The APK file is usually smaller than the official app from the Google Play Store, which means it takes up less space on your device. Moreover, you can download the APK file once and install it offline, which means you do not need to use your data every time you want to play the game.</p>
64
- <h2>Risks of downloading Mortal Kombat 10 APK for Android</h2>
65
- <p>However, there are also some risks of downloading Mortal Kombat 10 APK for Android, such as:</p>
66
- <h3>Potential malware or viruses</h3>
67
- <p>One of the main risks of downloading the APK file is that you might get malware or viruses on your device. This can happen if you download the APK file from an untrusted or malicious source. Malware or viruses can harm your device, steal your personal information, or compromise your privacy. Therefore, you should always download the APK file from a reliable and reputable source.</p>
68
- <h3>Legal issues and violations</h3>
69
- <p>Another risk of downloading the APK file is that you might face legal issues or violations. This can happen if you download the APK file from an unauthorized or illegal source. You might be infringing on the intellectual property rights of the developers or publishers of the game. You might also be violating the terms and conditions of the Google Play Store or your device manufacturer. Therefore, you should always respect the rights and rules of the original creators and distributors of the game.</p>
70
- <h3>Compatibility and performance issues</h3>
71
- <p>A third risk of downloading the APK file is that you might encounter compatibility and performance issues on your device. This can happen if you download the APK file from an outdated or incompatible source. You might face problems such as crashes, glitches, errors, or lagging while playing the game. You might also miss out on some features or functions that are only available on the official app from the Google Play Store. Therefore, you should always check the compatibility and requirements of the APK file before downloading it.</p>
72
- <h2>Conclusion</h2>
73
- <p>Mortal Kombat 10 is a fantastic game that offers a lot of fun and excitement for fighting game fans. If you want to play it on your Android device, you can download Mortal Kombat 10 APK for Android from a trusted source. However, you should also be aware of the benefits and risks of doing so, and take precautions to protect your device and yourself.</p>
74
- <h2>FAQs</h2>
75
- <ul>
76
- <li><b>Q: Is Mortal Kombat 10 free to play on Android?</b></li>
77
- <li>A: Yes, Mortal Kombat 10 is free to play on Android, but it also offers in-app purchases for some items and features.</li>
78
- <li><b>Q: What are the minimum requirements to play Mortal Kombat 10 on Android?</b></li>
79
- <li>A: According to [Mortal Kombat X - Apps on Google Play], you need at least Android 5. 0 K or higher, 1.5 GB of RAM, and a minimum of 1.5 GB of free space on your device.</li>
80
- <li><b>Q: Can I play Mortal Kombat 10 offline on Android?</b></li>
81
- <li>A: No, Mortal Kombat 10 requires an internet connection to play on Android. You will need to connect to the internet to download the game data, access the online features, and sync your progress.</li>
82
- <li><b>Q: Can I play Mortal Kombat 10 with my friends on Android?</b></li>
83
- <li>A: Yes, Mortal Kombat 10 supports online multiplayer mode on Android. You can join a faction and compete with other players in Faction Wars, or challenge your friends in online matches.</li>
84
- <li><b>Q: How can I get more coins and souls in Mortal Kombat 10 on Android?</b></li>
85
- <li>A: You can get more coins and souls in Mortal Kombat 10 by playing the game modes, completing the challenges, participating in the events, and watching the ads. You can also buy them with real money through in-app purchases.</li>
86
- </ul></p> 401be4b1e0<br />
87
- <br />
88
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Traffic Racer APK Mod and Challenge Your Friends Online.md DELETED
@@ -1,91 +0,0 @@
1
-
2
- <h1>Download Traffic Racer APK Mod: A Guide for Android Users</h1>
3
- <p>If you are a fan of racing games, you might have heard of Traffic Racer, a popular game that lets you drive your car through endless highway traffic and earn cash to upgrade your vehicle. But did you know that you can also download Traffic Racer APK Mod, a modified version of the game that gives you unlimited money and unlocks all the cars and features? In this article, we will tell you everything you need to know about Traffic Racer APK Mod, including what it is, how to download it, and what are the benefits and risks of using it. Read on to find out more.</p>
4
- <h2>What is Traffic Racer?</h2>
5
- <p>Traffic Racer is a 3D racing game developed by Soner Kara, a Turkish game developer. It was released in 2012 for iOS and Android devices. The game has over 100 million downloads on Google Play Store and has an average rating of 4.4 out of 5 stars.</p>
6
- <h2>download traffic racer apk mod</h2><br /><p><b><b>Download</b> &#10040;&#10040;&#10040; <a href="https://jinyurl.com/2uNJlZ">https://jinyurl.com/2uNJlZ</a></b></p><br /><br />
7
- <h3>Features of Traffic Racer</h3>
8
- <p>Traffic Racer has many features that make it an addictive and fun game to play. Some of them are:</p>
9
- <ul>
10
- <li>35 different cars to choose from, ranging from sedans to sports cars to trucks.</li>
11
- <li>5 detailed environments to drive in, such as suburb, desert, snowy, rainy, and city night.</li>
12
- <li>5 game modes to challenge yourself, such as endless, two-way, time trial, police chase, and free ride.</li>
13
- <li>Rich graphics and realistic physics that create a smooth and immersive driving experience.</li>
14
- <li>Customizable car options, such as paint, wheels, and vinyls.</li>
15
- <li>Online leaderboards and achievements to compete with other players around the world.</li>
16
- </ul>
17
- <h3>How to play Traffic Racer</h3>
18
- <p>The gameplay of Traffic Racer is simple and intuitive. You just need to tilt your device to steer your car, touch the gas button to accelerate, and touch the brake button to slow down. The faster you drive, the more points you get. You can also earn extra points by driving in the opposite direction in two-way mode, or by overtaking other cars closely in any mode. You can use the cash you earn to buy new cars or upgrade your existing ones. You can also change the camera view from behind to inside the car for a more realistic feel.</p>
19
- <h2>What is Traffic Racer APK Mod?</h2>
20
- <p>Traffic Racer APK Mod is a modified version of the original game that gives you some advantages that are not available in the official version. For example, you can get unlimited money to buy and upgrade any car you want, or unlock all the cars and features without having to play for hours. You can also remove the ads that may interrupt your gameplay.</p>
21
- <h3>Benefits of Traffic Racer APK Mod</h3>
22
- <p>Some of the benefits of using Traffic Racer APK Mod are:</p>
23
- <p>Download Traffic Racer Mod APK Unlimited Money<br />
24
- How to Install Traffic Racer Modded APK on Android<br />
25
- Traffic Racer Hack APK Download for Free<br />
26
- Best Racing Games Like Traffic Racer for Android<br />
27
- Traffic Racer Mod APK Latest Version 3.6<br />
28
- Download Traffic Racer Mod APK with All Cars Unlocked<br />
29
- Traffic Racer Cheats and Tips for Android<br />
30
- Traffic Racer Mod APK No Root Required<br />
31
- Download Traffic Racer Mod APK Offline Mode<br />
32
- Traffic Racer Review: A Fun and Addictive Racing Game<br />
33
- Download Traffic Racer Mod APK with Unlimited Coins and Gems<br />
34
- Traffic Racer Mod APK vs Original APK: Which One to Choose?<br />
35
- Traffic Racer Gameplay and Features<br />
36
- Download Traffic Racer Mod APK with No Ads<br />
37
- Traffic Racer Mod APK for PC: How to Play on Windows<br />
38
- Download Traffic Racer Mod APK with High Graphics<br />
39
- Traffic Racer Mod APK for iOS: How to Download and Install<br />
40
- Traffic Racer Online: Play with Friends and Compete<br />
41
- Download Traffic Racer Mod APK with New Cars and Tracks<br />
42
- Traffic Racer Update: What's New in Version 3.6</p>
43
- <ul>
44
- <li>You can enjoy the game without any limitations or restrictions.</li>
45
- <li>You can save your time and effort by skipping the grinding process.</li>
46
- <li>You can explore all the cars and environments without having to unlock them.</li>
47
- <li>You can have more fun and excitement by driving faster and crazier.</li>
48
- </ul>
49
- <h3>Risks of Traffic Racer APK Mod</h3>
50
- <p>However, there are also some risks involved in using Traffic Racer APK Mod. Some of them are:</p>
51
- <ul>
52
- <li>You may face legal issues if the game developer finds out that you are using a modified version of their game.</li>
53
- <li>You may lose your progress or data if the modded version is not compatible with the latest version of the game or your device.</li>
54
- <li>You may expose your device to malware or viruses that may harm your system or steal your personal information.</li>
55
- <li>You may lose the thrill and challenge of the game by having everything handed to you.</li>
56
- </ul>
57
- <p>Therefore, you should be careful and responsible when using Traffic Racer APK Mod. You should also respect the game developer's rights and efforts and support them by playing the official version of the game.</p>
58
- <h2>How to download Traffic Racer APK Mod?</h2>
59
- <p>If you still want to download Traffic Racer APK Mod, you need to follow some steps to do it safely and correctly. Here are the steps you need to take:</p>
60
- <h3>Step 1: Enable unknown sources</h3>
61
- <p>Before you can install any APK file on your Android device, you need to enable the option to allow unknown sources. This means that you can install apps that are not from the Google Play Store. To do this, go to your device's settings, then security, then toggle on the unknown sources option. You may see a warning message that tells you about the risks of installing unknown apps, but you can ignore it if you trust the source of the APK file.</p>
62
- <h3>Step 2: Download the APK file</h3>
63
- <p>Next, you need to download the APK file of Traffic Racer Mod from a reliable and trustworthy website. You can search for it on Google or use one of these links:</p>
64
- <ul>
65
- <li><a href="">https://apkpure.com/traffic-racer-mod/com.skgames.trafficracermod</a></li>
66
- <li><a href="">https://android-1.com/en/113-traffic-racer-mod.html</a></li>
67
- <li><a href="">https://rexdl.com/android/traffic-racer-apk.html/</a></li>
68
- </ul>
69
- <p>Make sure you download the latest version of the mod that is compatible with your device and the original game. You can check the file size, version number, and date of update before downloading it. You can also read the reviews and comments from other users to see if they have any issues or complaints about the mod.</p>
70
- <h3>Step 3: Install the APK file</h3>
71
- <p>Once you have downloaded the APK file, you need to install it on your device. To do this, locate the file in your downloads folder or wherever you saved it, and tap on it. You may see a pop-up window that asks you to confirm the installation. Tap on install and wait for a few seconds until the process is complete.</p>
72
- <h3>Step 4: Launch the game and enjoy</h3>
73
- <p>Finally, you can launch the game and enjoy the modded features. You should see a lot of money in your account and all the cars and features unlocked. You can also customize your car and choose your preferred game mode and environment. Have fun driving through traffic and breaking speed records.</p>
74
- <h2>Conclusion</h2>
75
- <p>Traffic Racer is a great racing game that offers a lot of fun and excitement for Android users. However, some people may want to download Traffic Racer APK Mod, a modified version of the game that gives them unlimited money and unlocks all the cars and features. While this may sound tempting, it also comes with some risks and drawbacks that you should be aware of. Therefore, we recommend that you play the official version of the game and support the game developer by purchasing in-app items or watching ads. This way, you can enjoy the game without any problems or guilt.</p>
76
- <h2>FAQs</h2>
77
- <p>Here are some frequently asked questions about Traffic Racer APK Mod:</p>
78
- <ol>
79
- <li><b>Is Traffic Racer APK Mod safe to use?</b></li>
80
- <p>Traffic Racer APK Mod is not officially endorsed or supported by the game developer, so it may not be safe to use. It may contain malware or viruses that can harm your device or steal your personal information. It may also cause compatibility issues or data loss if it is not updated or installed properly. Therefore, you should use it at your own risk and discretion.</p>
81
- <li><b>Is Traffic Racer APK Mod legal to use?</b></li>
82
- <p>Traffic Racer APK Mod is not legal to use, as it violates the terms and conditions of the game developer and Google Play Store. It also infringes on the intellectual property rights of the game developer and may result in legal action against you if they find out that you are using it. Therefore, you should respect their rights and efforts and play the official version of the game.</p>
83
- <li><b>Can I play Traffic Racer APK Mod online?</b></li>
84
- <p>Traffic Racer APK Mod p>Traffic Racer APK Mod may not work online, as it may be detected and banned by the game server or Google Play Services. It may also cause errors or glitches in the online features, such as leaderboards and achievements. Therefore, you should play the modded version offline or use a VPN to hide your IP address.</p>
85
- <li><b>Can I update Traffic Racer APK Mod?</b></li>
86
- <p>Traffic Racer APK Mod may not be updated automatically, as it is not from the Google Play Store. You may need to download and install the latest version of the mod manually from the website where you got it. However, you should be careful and make sure that the new version is compatible with your device and the original game. You should also backup your data before updating, in case something goes wrong.</p>
87
- <li><b>Can I uninstall Traffic Racer APK Mod?</b></li>
88
- <p>Yes, you can uninstall Traffic Racer APK Mod anytime you want, just like any other app on your device. To do this, go to your device's settings, then apps, then find and tap on Traffic Racer APK Mod. Then, tap on uninstall and confirm your choice. You can also delete the APK file from your downloads folder or wherever you saved it.</p>
89
- </ol></p> 197e85843d<br />
90
- <br />
91
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/audio_diffusion/mel.py DELETED
@@ -1,163 +0,0 @@
1
- # Copyright 2022 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- import warnings
17
-
18
- from ...configuration_utils import ConfigMixin, register_to_config
19
- from ...schedulers.scheduling_utils import SchedulerMixin
20
-
21
- warnings.filterwarnings("ignore")
22
-
23
- import numpy as np # noqa: E402
24
-
25
- try:
26
- import librosa # noqa: E402
27
-
28
- _librosa_can_be_imported = True
29
- _import_error = ""
30
- except Exception as e:
31
- _librosa_can_be_imported = False
32
- _import_error = (
33
- f"Cannot import librosa because {e}. Make sure to correctly install librosa to be able to install it."
34
- )
35
-
36
-
37
- from PIL import Image # noqa: E402
38
-
39
-
40
- class Mel(ConfigMixin, SchedulerMixin):
41
- """
42
- Parameters:
43
- x_res (`int`): x resolution of spectrogram (time)
44
- y_res (`int`): y resolution of spectrogram (frequency bins)
45
- sample_rate (`int`): sample rate of audio
46
- n_fft (`int`): number of Fast Fourier Transforms
47
- hop_length (`int`): hop length (a higher number is recommended for lower than 256 y_res)
48
- top_db (`int`): loudest in decibels
49
- n_iter (`int`): number of iterations for Griffin Linn mel inversion
50
- """
51
-
52
- config_name = "mel_config.json"
53
-
54
- @register_to_config
55
- def __init__(
56
- self,
57
- x_res: int = 256,
58
- y_res: int = 256,
59
- sample_rate: int = 22050,
60
- n_fft: int = 2048,
61
- hop_length: int = 512,
62
- top_db: int = 80,
63
- n_iter: int = 32,
64
- ):
65
- self.hop_length = hop_length
66
- self.sr = sample_rate
67
- self.n_fft = n_fft
68
- self.top_db = top_db
69
- self.n_iter = n_iter
70
- self.set_resolution(x_res, y_res)
71
- self.audio = None
72
-
73
- if not _librosa_can_be_imported:
74
- raise ValueError(_import_error)
75
-
76
- def set_resolution(self, x_res: int, y_res: int):
77
- """Set resolution.
78
-
79
- Args:
80
- x_res (`int`): x resolution of spectrogram (time)
81
- y_res (`int`): y resolution of spectrogram (frequency bins)
82
- """
83
- self.x_res = x_res
84
- self.y_res = y_res
85
- self.n_mels = self.y_res
86
- self.slice_size = self.x_res * self.hop_length - 1
87
-
88
- def load_audio(self, audio_file: str = None, raw_audio: np.ndarray = None):
89
- """Load audio.
90
-
91
- Args:
92
- audio_file (`str`): must be a file on disk due to Librosa limitation or
93
- raw_audio (`np.ndarray`): audio as numpy array
94
- """
95
- if audio_file is not None:
96
- self.audio, _ = librosa.load(audio_file, mono=True, sr=self.sr)
97
- else:
98
- self.audio = raw_audio
99
-
100
- # Pad with silence if necessary.
101
- if len(self.audio) < self.x_res * self.hop_length:
102
- self.audio = np.concatenate([self.audio, np.zeros((self.x_res * self.hop_length - len(self.audio),))])
103
-
104
- def get_number_of_slices(self) -> int:
105
- """Get number of slices in audio.
106
-
107
- Returns:
108
- `int`: number of spectograms audio can be sliced into
109
- """
110
- return len(self.audio) // self.slice_size
111
-
112
- def get_audio_slice(self, slice: int = 0) -> np.ndarray:
113
- """Get slice of audio.
114
-
115
- Args:
116
- slice (`int`): slice number of audio (out of get_number_of_slices())
117
-
118
- Returns:
119
- `np.ndarray`: audio as numpy array
120
- """
121
- return self.audio[self.slice_size * slice : self.slice_size * (slice + 1)]
122
-
123
- def get_sample_rate(self) -> int:
124
- """Get sample rate:
125
-
126
- Returns:
127
- `int`: sample rate of audio
128
- """
129
- return self.sr
130
-
131
- def audio_slice_to_image(self, slice: int) -> Image.Image:
132
- """Convert slice of audio to spectrogram.
133
-
134
- Args:
135
- slice (`int`): slice number of audio to convert (out of get_number_of_slices())
136
-
137
- Returns:
138
- `PIL Image`: grayscale image of x_res x y_res
139
- """
140
- S = librosa.feature.melspectrogram(
141
- y=self.get_audio_slice(slice), sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_mels=self.n_mels
142
- )
143
- log_S = librosa.power_to_db(S, ref=np.max, top_db=self.top_db)
144
- bytedata = (((log_S + self.top_db) * 255 / self.top_db).clip(0, 255) + 0.5).astype(np.uint8)
145
- image = Image.fromarray(bytedata)
146
- return image
147
-
148
- def image_to_audio(self, image: Image.Image) -> np.ndarray:
149
- """Converts spectrogram to audio.
150
-
151
- Args:
152
- image (`PIL Image`): x_res x y_res grayscale image
153
-
154
- Returns:
155
- audio (`np.ndarray`): raw audio
156
- """
157
- bytedata = np.frombuffer(image.tobytes(), dtype="uint8").reshape((image.height, image.width))
158
- log_S = bytedata.astype("float") * self.top_db / 255 - self.top_db
159
- S = librosa.db_to_power(log_S)
160
- audio = librosa.feature.inverse.mel_to_audio(
161
- S, sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_iter=self.n_iter
162
- )
163
- return audio
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/CMFNet_deblurring/README.md DELETED
@@ -1,37 +0,0 @@
1
- ---
2
- title: CMFNet_deblurring
3
- emoji: 🍻
4
- colorFrom: indigo
5
- colorTo: indigo
6
- sdk: gradio
7
- app_file: app.py
8
- pinned: false
9
- ---
10
-
11
- # Configuration
12
-
13
- `title`: _string_
14
- Display title for the Space
15
-
16
- `emoji`: _string_
17
- Space emoji (emoji-only character allowed)
18
-
19
- `colorFrom`: _string_
20
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
21
-
22
- `colorTo`: _string_
23
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
24
-
25
- `sdk`: _string_
26
- Can be either `gradio`, `streamlit`, or `static`
27
-
28
- `sdk_version` : _string_
29
- Only applicable for `streamlit` SDK.
30
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
31
-
32
- `app_file`: _string_
33
- Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
34
- Path is relative to the root of the repository.
35
-
36
- `pinned`: _boolean_
37
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/fasttext/create_word_embedding.py DELETED
@@ -1,50 +0,0 @@
1
- # coding=utf-8
2
- #!/usr/bin/env python3
3
-
4
- import numpy as np
5
- import pandas as pd
6
- import torch
7
- from gensim.models import FastText
8
- from tqdm import tqdm
9
- import fire
10
-
11
- import sys
12
- import os
13
- sys.path.append(os.getcwd())
14
- from utils.build_vocab import Vocabulary
15
-
16
- def create_embedding(caption_file: str,
17
- vocab_file: str,
18
- embed_size: int,
19
- output: str,
20
- **fasttext_kwargs):
21
- caption_df = pd.read_json(caption_file)
22
- caption_df["tokens"] = caption_df["tokens"].apply(lambda x: ["<start>"] + [token for token in x] + ["<end>"])
23
-
24
- sentences = list(caption_df["tokens"].values)
25
- vocabulary = torch.load(vocab_file, map_location="cpu")
26
-
27
- epochs = fasttext_kwargs.get("epochs", 10)
28
- model = FastText(size=embed_size, min_count=1, **fasttext_kwargs)
29
- model.build_vocab(sentences=sentences)
30
- model.train(sentences=sentences, total_examples=len(sentences), epochs=epochs)
31
-
32
- word_embeddings = np.zeros((len(vocabulary), embed_size))
33
-
34
- with tqdm(total=len(vocabulary), ascii=True) as pbar:
35
- for word, idx in vocabulary.word2idx.items():
36
- if word == "<pad>" or word == "<unk>":
37
- continue
38
- word_embeddings[idx] = model.wv[word]
39
- pbar.update()
40
-
41
- np.save(output, word_embeddings)
42
-
43
- print("Finish writing fasttext embeddings to " + output)
44
-
45
-
46
- if __name__ == "__main__":
47
- fire.Fire(create_embedding)
48
-
49
-
50
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/discriminator/model.py DELETED
@@ -1,295 +0,0 @@
1
- import functools
2
- import torch.nn as nn
3
-
4
-
5
- class ActNorm(nn.Module):
6
- def __init__(self, num_features, logdet=False, affine=True,
7
- allow_reverse_init=False):
8
- assert affine
9
- super().__init__()
10
- self.logdet = logdet
11
- self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1))
12
- self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1))
13
- self.allow_reverse_init = allow_reverse_init
14
-
15
- self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
16
-
17
- def initialize(self, input):
18
- with torch.no_grad():
19
- flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
20
- mean = (
21
- flatten.mean(1)
22
- .unsqueeze(1)
23
- .unsqueeze(2)
24
- .unsqueeze(3)
25
- .permute(1, 0, 2, 3)
26
- )
27
- std = (
28
- flatten.std(1)
29
- .unsqueeze(1)
30
- .unsqueeze(2)
31
- .unsqueeze(3)
32
- .permute(1, 0, 2, 3)
33
- )
34
-
35
- self.loc.data.copy_(-mean)
36
- self.scale.data.copy_(1 / (std + 1e-6))
37
-
38
- def forward(self, input, reverse=False):
39
- if reverse:
40
- return self.reverse(input)
41
- if len(input.shape) == 2:
42
- input = input[:, :, None, None]
43
- squeeze = True
44
- else:
45
- squeeze = False
46
-
47
- _, _, height, width = input.shape
48
-
49
- if self.training and self.initialized.item() == 0:
50
- self.initialize(input)
51
- self.initialized.fill_(1)
52
-
53
- h = self.scale * (input + self.loc)
54
-
55
- if squeeze:
56
- h = h.squeeze(-1).squeeze(-1)
57
-
58
- if self.logdet:
59
- log_abs = torch.log(torch.abs(self.scale))
60
- logdet = height * width * torch.sum(log_abs)
61
- logdet = logdet * torch.ones(input.shape[0]).to(input)
62
- return h, logdet
63
-
64
- return h
65
-
66
- def reverse(self, output):
67
- if self.training and self.initialized.item() == 0:
68
- if not self.allow_reverse_init:
69
- raise RuntimeError(
70
- "Initializing ActNorm in reverse direction is "
71
- "disabled by default. Use allow_reverse_init=True to enable."
72
- )
73
- else:
74
- self.initialize(output)
75
- self.initialized.fill_(1)
76
-
77
- if len(output.shape) == 2:
78
- output = output[:, :, None, None]
79
- squeeze = True
80
- else:
81
- squeeze = False
82
-
83
- h = output / self.scale - self.loc
84
-
85
- if squeeze:
86
- h = h.squeeze(-1).squeeze(-1)
87
- return h
88
-
89
- def weights_init(m):
90
- classname = m.__class__.__name__
91
- if classname.find('Conv') != -1:
92
- nn.init.normal_(m.weight.data, 0.0, 0.02)
93
- elif classname.find('BatchNorm') != -1:
94
- nn.init.normal_(m.weight.data, 1.0, 0.02)
95
- nn.init.constant_(m.bias.data, 0)
96
-
97
-
98
- class NLayerDiscriminator(nn.Module):
99
- """Defines a PatchGAN discriminator as in Pix2Pix
100
- --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
101
- """
102
- def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
103
- """Construct a PatchGAN discriminator
104
- Parameters:
105
- input_nc (int) -- the number of channels in input images
106
- ndf (int) -- the number of filters in the last conv layer
107
- n_layers (int) -- the number of conv layers in the discriminator
108
- norm_layer -- normalization layer
109
- """
110
- super(NLayerDiscriminator, self).__init__()
111
- if not use_actnorm:
112
- norm_layer = nn.BatchNorm2d
113
- else:
114
- norm_layer = ActNorm
115
- if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
116
- use_bias = norm_layer.func != nn.BatchNorm2d
117
- else:
118
- use_bias = norm_layer != nn.BatchNorm2d
119
-
120
- kw = 4
121
- padw = 1
122
- sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
123
- nf_mult = 1
124
- nf_mult_prev = 1
125
- for n in range(1, n_layers): # gradually increase the number of filters
126
- nf_mult_prev = nf_mult
127
- nf_mult = min(2 ** n, 8)
128
- sequence += [
129
- nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
130
- norm_layer(ndf * nf_mult),
131
- nn.LeakyReLU(0.2, True)
132
- ]
133
-
134
- nf_mult_prev = nf_mult
135
- nf_mult = min(2 ** n_layers, 8)
136
- sequence += [
137
- nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
138
- norm_layer(ndf * nf_mult),
139
- nn.LeakyReLU(0.2, True)
140
- ]
141
- # output 1 channel prediction map
142
- sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
143
- self.main = nn.Sequential(*sequence)
144
-
145
- def forward(self, input):
146
- """Standard forward."""
147
- return self.main(input)
148
-
149
- class NLayerDiscriminator1dFeats(NLayerDiscriminator):
150
- """Defines a PatchGAN discriminator as in Pix2Pix
151
- --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
152
- """
153
- def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
154
- """Construct a PatchGAN discriminator
155
- Parameters:
156
- input_nc (int) -- the number of channels in input feats
157
- ndf (int) -- the number of filters in the last conv layer
158
- n_layers (int) -- the number of conv layers in the discriminator
159
- norm_layer -- normalization layer
160
- """
161
- super().__init__(input_nc=input_nc, ndf=64, n_layers=n_layers, use_actnorm=use_actnorm)
162
-
163
- if not use_actnorm:
164
- norm_layer = nn.BatchNorm1d
165
- else:
166
- norm_layer = ActNorm
167
- if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm has affine parameters
168
- use_bias = norm_layer.func != nn.BatchNorm1d
169
- else:
170
- use_bias = norm_layer != nn.BatchNorm1d
171
-
172
- kw = 4
173
- padw = 1
174
- sequence = [nn.Conv1d(input_nc, input_nc//2, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
175
- nf_mult = input_nc//2
176
- nf_mult_prev = 1
177
- for n in range(1, n_layers): # gradually decrease the number of filters
178
- nf_mult_prev = nf_mult
179
- nf_mult = max(nf_mult_prev // (2 ** n), 8)
180
- sequence += [
181
- nn.Conv1d(nf_mult_prev, nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
182
- norm_layer(nf_mult),
183
- nn.LeakyReLU(0.2, True)
184
- ]
185
-
186
- nf_mult_prev = nf_mult
187
- nf_mult = max(nf_mult_prev // (2 ** n), 8)
188
- sequence += [
189
- nn.Conv1d(nf_mult_prev, nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
190
- norm_layer(nf_mult),
191
- nn.LeakyReLU(0.2, True)
192
- ]
193
- nf_mult_prev = nf_mult
194
- nf_mult = max(nf_mult_prev // (2 ** n), 8)
195
- sequence += [
196
- nn.Conv1d(nf_mult_prev, nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
197
- norm_layer(nf_mult),
198
- nn.LeakyReLU(0.2, True)
199
- ]
200
- # output 1 channel prediction map
201
- sequence += [nn.Conv1d(nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
202
- self.main = nn.Sequential(*sequence)
203
-
204
-
205
- class NLayerDiscriminator1dSpecs(NLayerDiscriminator):
206
- """Defines a PatchGAN discriminator as in Pix2Pix
207
- --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
208
- """
209
- def __init__(self, input_nc=80, ndf=64, n_layers=3, use_actnorm=False):
210
- """Construct a PatchGAN discriminator
211
- Parameters:
212
- input_nc (int) -- the number of channels in input specs
213
- ndf (int) -- the number of filters in the last conv layer
214
- n_layers (int) -- the number of conv layers in the discriminator
215
- norm_layer -- normalization layer
216
- """
217
- super().__init__(input_nc=input_nc, ndf=64, n_layers=n_layers, use_actnorm=use_actnorm)
218
-
219
- if not use_actnorm:
220
- norm_layer = nn.BatchNorm1d
221
- else:
222
- norm_layer = ActNorm
223
- if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm has affine parameters
224
- use_bias = norm_layer.func != nn.BatchNorm1d
225
- else:
226
- use_bias = norm_layer != nn.BatchNorm1d
227
-
228
- kw = 4
229
- padw = 1
230
- sequence = [nn.Conv1d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
231
- nf_mult = 1
232
- nf_mult_prev = 1
233
- for n in range(1, n_layers): # gradually decrease the number of filters
234
- nf_mult_prev = nf_mult
235
- nf_mult = min(2 ** n, 8)
236
- sequence += [
237
- nn.Conv1d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
238
- norm_layer(ndf * nf_mult),
239
- nn.LeakyReLU(0.2, True)
240
- ]
241
-
242
- nf_mult_prev = nf_mult
243
- nf_mult = min(2 ** n_layers, 8)
244
- sequence += [
245
- nn.Conv1d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
246
- norm_layer(ndf * nf_mult),
247
- nn.LeakyReLU(0.2, True)
248
- ]
249
- # output 1 channel prediction map
250
- sequence += [nn.Conv1d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
251
- self.main = nn.Sequential(*sequence)
252
-
253
- def forward(self, input):
254
- """Standard forward."""
255
- # (B, C, L)
256
- input = input.squeeze(1)
257
- input = self.main(input)
258
- return input
259
-
260
-
261
- if __name__ == '__main__':
262
- import torch
263
-
264
- ## FEATURES
265
- disc_in_channels = 2048
266
- disc_num_layers = 2
267
- use_actnorm = False
268
- disc_ndf = 64
269
- discriminator = NLayerDiscriminator1dFeats(input_nc=disc_in_channels, n_layers=disc_num_layers,
270
- use_actnorm=use_actnorm, ndf=disc_ndf).apply(weights_init)
271
- inputs = torch.rand((6, 2048, 212))
272
- outputs = discriminator(inputs)
273
- print(outputs.shape)
274
-
275
- ## AUDIO
276
- disc_in_channels = 1
277
- disc_num_layers = 3
278
- use_actnorm = False
279
- disc_ndf = 64
280
- discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers,
281
- use_actnorm=use_actnorm, ndf=disc_ndf).apply(weights_init)
282
- inputs = torch.rand((6, 1, 80, 848))
283
- outputs = discriminator(inputs)
284
- print(outputs.shape)
285
-
286
- ## IMAGE
287
- disc_in_channels = 3
288
- disc_num_layers = 3
289
- use_actnorm = False
290
- disc_ndf = 64
291
- discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers,
292
- use_actnorm=use_actnorm, ndf=disc_ndf).apply(weights_init)
293
- inputs = torch.rand((6, 3, 256, 256))
294
- outputs = discriminator(inputs)
295
- print(outputs.shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIML-TUDA/safe-stable-diffusion/share_btn.py DELETED
@@ -1,68 +0,0 @@
1
- community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
2
- <path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
3
- <path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
4
- </svg>"""
5
-
6
- loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
7
- style="color: #ffffff;
8
- "
9
- xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
10
-
11
- share_js = """async () => {
12
- async function uploadFile(file){
13
- const UPLOAD_URL = 'https://huggingface.co/uploads';
14
- const response = await fetch(UPLOAD_URL, {
15
- method: 'POST',
16
- headers: {
17
- 'Content-Type': file.type,
18
- 'X-Requested-With': 'XMLHttpRequest',
19
- },
20
- body: file, /// <- File inherits from Blob
21
- });
22
- const url = await response.text();
23
- return url;
24
- }
25
-
26
- const gradioEl = document.querySelector('body > gradio-app');
27
- const imgEls = gradioEl.querySelectorAll('#gallery img');
28
- const promptTxt = gradioEl.querySelector('#prompt-text-input input').value;
29
- const shareBtnEl = gradioEl.querySelector('#share-btn');
30
- const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
31
- const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
32
-
33
- if(!imgEls.length){
34
- return;
35
- };
36
-
37
- shareBtnEl.style.pointerEvents = 'none';
38
- shareIconEl.style.display = 'none';
39
- loadingIconEl.style.removeProperty('display');
40
-
41
- const files = await Promise.all(
42
- [...imgEls].map(async (imgEl) => {
43
- const res = await fetch(imgEl.src);
44
- const blob = await res.blob();
45
- const imgId = Date.now() % 200;
46
- const fileName = `diffuse-the-rest-${{imgId}}.jpg`;
47
- return new File([blob], fileName, { type: 'image/jpeg' });
48
- })
49
- );
50
-
51
- const urls = await Promise.all(files.map((f) => uploadFile(f)));
52
- const htmlImgs = urls.map(url => `<img src='${url}' width='400' height='400'>`);
53
- const descriptionMd = `<div style='display: flex; flex-wrap: wrap; column-gap: 0.75rem;'>
54
- ${htmlImgs.join(`\n`)}
55
- </div>`;
56
-
57
- const params = new URLSearchParams({
58
- title: promptTxt,
59
- description: descriptionMd,
60
- });
61
-
62
- const paramsStr = params.toString();
63
- window.open(`https://huggingface.co/spaces/stabilityai/stable-diffusion/discussions/new?${paramsStr}`, '_blank');
64
-
65
- shareBtnEl.style.removeProperty('pointer-events');
66
- shareIconEl.style.removeProperty('display');
67
- loadingIconEl.style.display = 'none';
68
- }"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet101_cifar.py DELETED
@@ -1,16 +0,0 @@
1
- # model settings
2
- model = dict(
3
- type='ImageClassifier',
4
- backbone=dict(
5
- type='ResNet_CIFAR',
6
- depth=101,
7
- num_stages=4,
8
- out_indices=(3, ),
9
- style='pytorch'),
10
- neck=dict(type='GlobalAveragePooling'),
11
- head=dict(
12
- type='LinearClsHead',
13
- num_classes=10,
14
- in_channels=2048,
15
- loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
16
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateTextBox.js DELETED
@@ -1,8 +0,0 @@
1
- import CreateAnyLabel from './utils/CreateAnyLabel.js';
2
- import TextBox from '../../textbox/TextBox.js';
3
-
4
- var CreateTextBox = function (scene, data, view, styles, customBuilders) {
5
- return CreateAnyLabel(scene, data, view, styles, customBuilders, TextBox);
6
- }
7
-
8
- export default CreateTextBox;
 
 
 
 
 
 
 
 
 
spaces/AlexWortega/t5_predict_activity/app.py DELETED
@@ -1,45 +0,0 @@
1
- import torch
2
- import gradio as gr
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
- import random
5
- device = 'cpu'
6
-
7
- def ans(question ):
8
- description=''
9
- category=''
10
- seed = random.randint(1, 10000000)
11
- print(f'Seed: {seed}')
12
- torch.manual_seed(seed)
13
-
14
- inp = tokenizer.encode(f'Вопрос: {question}\nОписание: {description}\nОтвет:',return_tensors="pt").to(device)
15
- print('question',question)
16
- gen = model.generate(inp, do_sample=True, top_p=0.9, temperature=0.86, max_new_tokens=100, repetition_penalty=1.2) #, stop_token="<eos>")
17
-
18
- gen = tokenizer.decode(gen[0])
19
- gen = gen[:gen.index('<eos>') if '<eos>' in gen else len(gen)]
20
- gen = gen.split('Ответ:')[1]
21
- return gen
22
-
23
-
24
-
25
-
26
-
27
-
28
-
29
- # Download checkpoint:
30
- checkpoint = "its5Q/rugpt3large_mailqa"
31
- tokenizer = AutoTokenizer.from_pretrained(checkpoint)
32
- model = AutoModelForCausalLM.from_pretrained(checkpoint)
33
- model = model.eval()
34
-
35
- # Gradio
36
-
37
- title = "Ответы на главные вопросы жизни, вселенной и вообще"
38
- description = "t5 large predict activity "
39
- article = "<p style='text-align: center'><a href='https://github.com/NeuralPushkin/MailRu_Q-A'>Github with fine-tuning ruGPT3large on QA</a></p> Cозданно при поддержке <p style='text-align: center'><a href='https://t.me/lovedeathtransformers'>Love Death Transformers</a></p>"
40
-
41
-
42
- iface = gr.Interface(fn=ans, title=title, description=description, article=article, inputs="text", outputs="text")
43
-
44
- if __name__ == "__main__":
45
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aloento/9Nine-PITS/modules.py DELETED
@@ -1,426 +0,0 @@
1
- # from https://github.com/jaywalnut310/vits
2
- import math
3
-
4
- import torch
5
- from torch import nn
6
- from torch.nn import Conv1d
7
- from torch.nn import functional as F
8
- from torch.nn.utils import weight_norm, remove_weight_norm
9
-
10
- import commons
11
- from commons import init_weights, get_padding
12
- from transforms import piecewise_rational_quadratic_transform
13
-
14
- LRELU_SLOPE = 0.1
15
-
16
-
17
- class LayerNorm(nn.Module):
18
- def __init__(self, channels, eps=1e-5):
19
- super().__init__()
20
- self.channels = channels
21
- self.eps = eps
22
-
23
- self.gamma = nn.Parameter(torch.ones(channels))
24
- self.beta = nn.Parameter(torch.zeros(channels))
25
-
26
- def forward(self, x):
27
- x = x.transpose(1, -1)
28
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
29
- return x.transpose(1, -1)
30
-
31
-
32
- class ConvReluNorm(nn.Module):
33
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
34
- super().__init__()
35
- self.in_channels = in_channels
36
- self.hidden_channels = hidden_channels
37
- self.out_channels = out_channels
38
- self.kernel_size = kernel_size
39
- self.n_layers = n_layers
40
- self.p_dropout = p_dropout
41
- assert n_layers > 1, "Number of layers should be larger than 0."
42
-
43
- self.conv_layers = nn.ModuleList()
44
- self.norm_layers = nn.ModuleList()
45
- self.conv_layers.append(
46
- nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)
47
- )
48
- self.norm_layers.append(LayerNorm(hidden_channels))
49
- self.relu_drop = nn.Sequential(
50
- nn.ReLU(),
51
- nn.Dropout(p_dropout))
52
- for _ in range(n_layers - 1):
53
- self.conv_layers.append(nn.Conv1d(
54
- hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)
55
- )
56
- self.norm_layers.append(LayerNorm(hidden_channels))
57
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
58
- self.proj.weight.data.zero_()
59
- self.proj.bias.data.zero_()
60
-
61
- def forward(self, x, x_mask):
62
- x_org = x
63
- for i in range(self.n_layers):
64
- x = self.conv_layers[i](x * x_mask)
65
- x = self.norm_layers[i](x)
66
- x = self.relu_drop(x)
67
- x = x_org + self.proj(x)
68
- return x * x_mask
69
-
70
-
71
- class DDSConv(nn.Module):
72
- """Dialted and Depth-Separable Convolution"""
73
-
74
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
75
- super().__init__()
76
- self.channels = channels
77
- self.kernel_size = kernel_size
78
- self.n_layers = n_layers
79
- self.p_dropout = p_dropout
80
-
81
- self.drop = nn.Dropout(p_dropout)
82
- self.convs_sep = nn.ModuleList()
83
- self.convs_1x1 = nn.ModuleList()
84
- self.norms_1 = nn.ModuleList()
85
- self.norms_2 = nn.ModuleList()
86
- for i in range(n_layers):
87
- dilation = kernel_size ** i
88
- padding = (kernel_size * dilation - dilation) // 2
89
- self.convs_sep.append(
90
- nn.Conv1d(
91
- channels,
92
- channels,
93
- kernel_size,
94
- groups=channels,
95
- dilation=dilation,
96
- padding=padding
97
- )
98
- )
99
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
100
- self.norms_1.append(LayerNorm(channels))
101
- self.norms_2.append(LayerNorm(channels))
102
-
103
- def forward(self, x, x_mask, g=None):
104
- if g is not None:
105
- x = x + g
106
- for i in range(self.n_layers):
107
- y = self.convs_sep[i](x * x_mask)
108
- y = self.norms_1[i](y)
109
- y = F.gelu(y)
110
- y = self.convs_1x1[i](y)
111
- y = self.norms_2[i](y)
112
- y = F.gelu(y)
113
- y = self.drop(y)
114
- x = x + y
115
- return x * x_mask
116
-
117
-
118
- class WN(torch.nn.Module):
119
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
120
- super(WN, self).__init__()
121
- assert (kernel_size % 2 == 1)
122
- self.hidden_channels = hidden_channels
123
- self.kernel_size = kernel_size,
124
- self.dilation_rate = dilation_rate
125
- self.n_layers = n_layers
126
- self.gin_channels = gin_channels
127
- self.p_dropout = p_dropout
128
-
129
- self.in_layers = torch.nn.ModuleList()
130
- self.res_skip_layers = torch.nn.ModuleList()
131
- self.drop = nn.Dropout(p_dropout)
132
-
133
- if gin_channels != 0:
134
- cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1)
135
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
136
-
137
- for i in range(n_layers):
138
- dilation = dilation_rate ** i
139
- padding = int((kernel_size * dilation - dilation) / 2)
140
- in_layer = torch.nn.Conv1d(
141
- hidden_channels,
142
- 2 * hidden_channels,
143
- kernel_size,
144
- dilation=dilation,
145
- padding=padding
146
- )
147
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
148
- self.in_layers.append(in_layer)
149
-
150
- # last one is not necessary
151
- if i < n_layers - 1:
152
- res_skip_channels = 2 * hidden_channels
153
- else:
154
- res_skip_channels = hidden_channels
155
-
156
- res_skip_layer = torch.nn.Conv1d(
157
- hidden_channels, res_skip_channels, 1
158
- )
159
- res_skip_layer = torch.nn.utils.weight_norm(
160
- res_skip_layer, name='weight'
161
- )
162
- self.res_skip_layers.append(res_skip_layer)
163
-
164
- def forward(self, x, x_mask, g=None, **kwargs):
165
- output = torch.zeros_like(x)
166
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
167
-
168
- if g is not None:
169
- g = self.cond_layer(g)
170
-
171
- for i in range(self.n_layers):
172
- x_in = self.in_layers[i](x)
173
- if g is not None:
174
- cond_offset = i * 2 * self.hidden_channels
175
- g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :]
176
- else:
177
- g_l = torch.zeros_like(x_in)
178
-
179
- acts = commons.fused_add_tanh_sigmoid_multiply(
180
- x_in,
181
- g_l,
182
- n_channels_tensor
183
- )
184
- acts = self.drop(acts)
185
-
186
- res_skip_acts = self.res_skip_layers[i](acts)
187
- if i < self.n_layers - 1:
188
- res_acts = res_skip_acts[:, :self.hidden_channels, :]
189
- x = (x + res_acts) * x_mask
190
- output = output + res_skip_acts[:, self.hidden_channels:, :]
191
- else:
192
- output = output + res_skip_acts
193
- return output * x_mask
194
-
195
- def remove_weight_norm(self):
196
- if self.gin_channels != 0:
197
- torch.nn.utils.remove_weight_norm(self.cond_layer)
198
- for l in self.in_layers:
199
- torch.nn.utils.remove_weight_norm(l)
200
- for l in self.res_skip_layers:
201
- torch.nn.utils.remove_weight_norm(l)
202
-
203
-
204
- class ResBlock1(torch.nn.Module):
205
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
206
- super(ResBlock1, self).__init__()
207
- self.convs1 = nn.ModuleList([
208
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
209
- padding=get_padding(kernel_size, dilation[0]))),
210
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
211
- padding=get_padding(kernel_size, dilation[1]))),
212
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
213
- padding=get_padding(kernel_size, dilation[2])))
214
- ])
215
- self.convs1.apply(init_weights)
216
-
217
- self.convs2 = nn.ModuleList([
218
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
219
- padding=get_padding(kernel_size, 1))),
220
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
221
- padding=get_padding(kernel_size, 1))),
222
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
223
- padding=get_padding(kernel_size, 1)))
224
- ])
225
- self.convs2.apply(init_weights)
226
-
227
- def forward(self, x, x_mask=None):
228
- for c1, c2 in zip(self.convs1, self.convs2):
229
- xt = F.leaky_relu(x, LRELU_SLOPE)
230
- if x_mask is not None:
231
- xt = xt * x_mask
232
- xt = c1(xt)
233
- xt = F.leaky_relu(xt, LRELU_SLOPE)
234
- if x_mask is not None:
235
- xt = xt * x_mask
236
- xt = c2(xt)
237
- x = xt + x
238
- if x_mask is not None:
239
- x = x * x_mask
240
- return x
241
-
242
- def remove_weight_norm(self):
243
- for l in self.convs1:
244
- remove_weight_norm(l)
245
- for l in self.convs2:
246
- remove_weight_norm(l)
247
-
248
-
249
- class ResBlock2(torch.nn.Module):
250
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
251
- super(ResBlock2, self).__init__()
252
- self.convs = nn.ModuleList([
253
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
254
- padding=get_padding(kernel_size, dilation[0]))),
255
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
256
- padding=get_padding(kernel_size, dilation[1])))
257
- ])
258
- self.convs.apply(init_weights)
259
-
260
- def forward(self, x, x_mask=None):
261
- for c in self.convs:
262
- xt = F.leaky_relu(x, LRELU_SLOPE)
263
- if x_mask is not None:
264
- xt = xt * x_mask
265
- xt = c(xt)
266
- x = xt + x
267
- if x_mask is not None:
268
- x = x * x_mask
269
- return x
270
-
271
- def remove_weight_norm(self):
272
- for l in self.convs:
273
- remove_weight_norm(l)
274
-
275
-
276
- class Log(nn.Module):
277
- def forward(self, x, x_mask, reverse=False, **kwargs):
278
- if not reverse:
279
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
280
- logdet = torch.sum(-y, [1, 2])
281
- return y, logdet
282
- else:
283
- x = torch.exp(x) * x_mask
284
- return x
285
-
286
-
287
- class Flip(nn.Module):
288
- def forward(self, x, *args, reverse=False, **kwargs):
289
- x = torch.flip(x, [1])
290
- if not reverse:
291
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
292
- return x, logdet
293
- else:
294
- return x
295
-
296
-
297
- class ElementwiseAffine(nn.Module):
298
- def __init__(self, channels):
299
- super().__init__()
300
- self.channels = channels
301
- self.m = nn.Parameter(torch.zeros(channels, 1))
302
- self.logs = nn.Parameter(torch.zeros(channels, 1))
303
-
304
- def forward(self, x, x_mask, reverse=False, **kwargs):
305
- if not reverse:
306
- y = self.m + torch.exp(self.logs) * x
307
- y = y * x_mask
308
- logdet = torch.sum(self.logs * x_mask, [1, 2])
309
- return y, logdet
310
- else:
311
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
312
- return x
313
-
314
-
315
- class ResidualCouplingLayer(nn.Module):
316
- def __init__(
317
- self,
318
- channels,
319
- hidden_channels,
320
- kernel_size,
321
- dilation_rate,
322
- n_layers,
323
- p_dropout=0,
324
- gin_channels=0,
325
- mean_only=False
326
- ):
327
- assert channels % 2 == 0, "channels should be divisible by 2"
328
- super().__init__()
329
- self.channels = channels
330
- self.hidden_channels = hidden_channels
331
- self.kernel_size = kernel_size
332
- self.dilation_rate = dilation_rate
333
- self.n_layers = n_layers
334
- self.half_channels = channels // 2
335
- self.mean_only = mean_only
336
-
337
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
338
- self.enc = WN(
339
- hidden_channels,
340
- kernel_size,
341
- dilation_rate,
342
- n_layers,
343
- p_dropout=p_dropout,
344
- gin_channels=gin_channels
345
- )
346
- self.post = nn.Conv1d(
347
- hidden_channels, self.half_channels * (2 - mean_only), 1
348
- )
349
- self.post.weight.data.zero_()
350
- self.post.bias.data.zero_()
351
-
352
- def forward(self, x, x_mask, g=None, reverse=False):
353
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
354
- h = self.pre(x0) * x_mask
355
- h = self.enc(h, x_mask, g=g)
356
- stats = self.post(h) * x_mask
357
- if not self.mean_only:
358
- m, logs = torch.split(stats, [self.half_channels] * 2, 1)
359
- else:
360
- m = stats
361
- logs = torch.zeros_like(m)
362
-
363
- if not reverse:
364
- x1 = m + x1 * torch.exp(logs) * x_mask
365
- x = torch.cat([x0, x1], 1)
366
- logdet = torch.sum(logs, [1, 2])
367
- return x, logdet
368
- else:
369
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
370
- x = torch.cat([x0, x1], 1)
371
- return x
372
-
373
-
374
- class ConvFlow(nn.Module):
375
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
376
- super().__init__()
377
- self.in_channels = in_channels
378
- self.filter_channels = filter_channels
379
- self.kernel_size = kernel_size
380
- self.n_layers = n_layers
381
- self.num_bins = num_bins
382
- self.tail_bound = tail_bound
383
- self.half_channels = in_channels // 2
384
-
385
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
386
- self.convs = DDSConv(
387
- filter_channels, kernel_size, n_layers, p_dropout=0.
388
- )
389
- self.proj = nn.Conv1d(
390
- filter_channels, self.half_channels * (num_bins * 3 - 1), 1
391
- )
392
- self.proj.weight.data.zero_()
393
- self.proj.bias.data.zero_()
394
-
395
- def forward(self, x, x_mask, g=None, reverse=False):
396
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
397
- h = self.pre(x0)
398
- h = self.convs(h, x_mask, g=g)
399
- h = self.proj(h) * x_mask
400
-
401
- b, c, t = x0.shape
402
- # [b, cx?, t] -> [b, c, t, ?]
403
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2)
404
-
405
- unnormalized_widths = h[..., :self.num_bins] / \
406
- math.sqrt(self.filter_channels)
407
- unnormalized_heights = h[..., self.num_bins:2 * self.num_bins] / \
408
- math.sqrt(self.filter_channels)
409
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
410
-
411
- x1, logabsdet = piecewise_rational_quadratic_transform(
412
- x1,
413
- unnormalized_widths,
414
- unnormalized_heights,
415
- unnormalized_derivatives,
416
- inverse=reverse,
417
- tails='linear',
418
- tail_bound=self.tail_bound
419
- )
420
-
421
- x = torch.cat([x0, x1], 1) * x_mask
422
- logdet = torch.sum(logabsdet * x_mask, [1, 2])
423
- if not reverse:
424
- return x, logdet
425
- else:
426
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alycer/VITS-Umamusume-voice-synthesizer/ONNXVITS_to_onnx.py DELETED
@@ -1,31 +0,0 @@
1
- import ONNXVITS_models
2
- import utils
3
- from text import text_to_sequence
4
- import torch
5
- import commons
6
-
7
- def get_text(text, hps):
8
- text_norm = text_to_sequence(text, hps.symbols, hps.data.text_cleaners)
9
- if hps.data.add_blank:
10
- text_norm = commons.intersperse(text_norm, 0)
11
- text_norm = torch.LongTensor(text_norm)
12
- return text_norm
13
-
14
- hps = utils.get_hparams_from_file("../vits/pretrained_models/uma87.json")
15
- symbols = hps.symbols
16
- net_g = ONNXVITS_models.SynthesizerTrn(
17
- len(symbols),
18
- hps.data.filter_length // 2 + 1,
19
- hps.train.segment_size // hps.data.hop_length,
20
- n_speakers=hps.data.n_speakers,
21
- **hps.model)
22
- _ = net_g.eval()
23
- _ = utils.load_checkpoint("../vits/pretrained_models/uma_1153000.pth", net_g)
24
-
25
- text1 = get_text("ありがとうございます。", hps)
26
- stn_tst = text1
27
- with torch.no_grad():
28
- x_tst = stn_tst.unsqueeze(0)
29
- x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
30
- sid = torch.tensor([0])
31
- o = net_g(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ameaou/academic-chatgpt3.1/docs/README_JP.md DELETED
@@ -1,302 +0,0 @@
1
- > **Note**
2
- >
3
- > このReadmeファイルは、このプロジェクトのmarkdown翻訳プラグインによって自動的に生成されたもので、100%正確ではない可能性があります。
4
- >
5
-
6
- # <img src="logo.png" width="40" > ChatGPT 学術最適化
7
-
8
- **このプロジェクトが好きだったら、スターをつけてください。もし、より使いやすい学術用のショートカットキーまたはファンクションプラグインを発明した場合は、issueを発行するかpull requestを作成してください。また、このプロジェクト自体によって翻訳されたREADMEは[英語説明書|](docs/README_EN.md)[日本語説明書|](docs/README_JP.md)[ロシア語説明書|](docs/README_RS.md)[フランス語説明書](docs/README_FR.md)もあります。**
9
-
10
- > **注意事項**
11
- >
12
- > 1. **赤色**のラベルが付いているファンクションプラグイン(ボタン)のみファイルを読み込めます。一部のプラグインはプラグインエリアのドロップダウンメニューにあります。新しいプラグインのPRを歓迎いたします!
13
- >
14
- > 2. このプロジェクトの各ファイルの機能は`self_analysis.md`(自己解析レポート)で詳しく説明されています。バージョンが追加されると、関連するファンクションプラグインをクリックして、GPTを呼び出して自己解析レポートを再生成することができます。一般的な質問は`wiki`にまとめられています。(`https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98`)
15
-
16
-
17
- <div align="center">
18
-
19
- 機能 | 説明
20
- --- | ---
21
- ワンクリック整形 | 論文の文法エラーを一括で正確に修正できます。
22
- ワンクリック日英翻訳 | 日英翻訳には、ワンクリックで対応できます。
23
- ワンクリックコード説明 | コードの正しい表示と説明が可能です。
24
- [カスタムショートカットキー](https://www.bilibili.com/video/BV14s4y1E7jN) | カスタムショートカットキーをサポートします。
25
- [プロキシサーバーの設定](https://www.bilibili.com/video/BV1rc411W7Dr) | プロキシサーバーの設定をサポートします。
26
- モジュラーデザイン | カスタム高階関数プラグインと[関数プラグイン]、プラグイン[ホット更新]のサポートが可能です。詳細は[こちら](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
27
- [自己プログラム解析](https://www.bilibili.com/video/BV1cj411A7VW) | [関数プラグイン][ワンクリック理解](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)このプロジェクトのソースコード
28
- [プログラム解析機能](https://www.bilibili.com/video/BV1cj411A7VW) | [関数プラグイン] ワンクリックで別のPython/C/C++/Java/Lua/...プロジェクトツリーを解析できます。
29
- 論文読解 | [関数プラグイン] LaTeX論文の全文をワンクリックで解読し、要約を生成します。
30
- LaTeX全文翻訳、整形 | [関数プラグイン] ワンクリックでLaTeX論文を翻訳または整形できます。
31
- 注釈生成 | [関数プラグイン] ワンクリックで関数の注釈を大量に生成できます。
32
- チャット分析レポート生成 | [関数プラグイン] 実行後、まとめレポートを自動生成します。
33
- [arxivヘルパー](https://www.bilibili.com/video/BV1LM4y1279X) | [関数プラグイン] 入力したarxivの記事URLで要約をワンクリック翻訳+PDFダウンロードができます。
34
- [PDF論文全文翻訳機能](https://www.bilibili.com/video/BV1KT411x7Wn) | [関数プラグイン] PDF論文タイトルと要約を抽出し、全文を翻訳します(マルチスレッド)。
35
- [Google Scholar Integratorヘルパー](https://www.bilibili.com/video/BV19L411U7ia) | [関数プラグイン] 任意のGoogle Scholar検索ページURLを指定すると、gptが興味深い記事を選択します。
36
- 数式/画像/テーブル表示 | 数式のTex形式とレンダリング形式を同時に表示できます。数式、コードのハイライトをサポートしています。
37
- マルチスレッド関数プラグインサポート | ChatGPTをマルチスレッドで呼び出すことができ、大量のテキストやプログラムを簡単に処理できます。
38
- ダークグラジオ[テーマ](https://github.com/binary-husky/chatgpt_academic/issues/173)の起動 | 「/?__dark-theme=true」というURLをブラウザに追加することで、ダークテーマに切り替えることができます。
39
- [多数のLLMモデル](https://www.bilibili.com/video/BV1wT411p7yf)をサポート、[API2D](https://api2d.com/)インターフェースをサポート | GPT3.5、GPT4、[清華ChatGLM](https://github.com/THUDM/ChatGLM-6B)による同時サポートは、と���も素晴らしいですね!
40
- huggingface免科学上网[オンライン版](https://huggingface.co/spaces/qingxu98/gpt-academic) | huggingfaceにログイン後、[このスペース](https://huggingface.co/spaces/qingxu98/gpt-academic)をコピーしてください。
41
- ...... | ......
42
-
43
-
44
- </div>
45
-
46
-
47
- - 新しいインターフェース(config.pyのLAYOUTオプションを変更するだけで、「左右レイアウト」と「上下レイアウト」を切り替えることができます)
48
- <div align="center">
49
- <img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
50
- </div>
51
-
52
-
53
- - すべてのボタンは、functional.pyを読み込んで動的に生成されます。カスタム機能を自由に追加して、クリップボードを解放します
54
- <div align="center">
55
- <img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
56
- </div>
57
-
58
- - 色を修正/修正
59
- <div align="center">
60
- <img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
61
- </div>
62
-
63
- - 出力に数式が含まれている場合、TeX形式とレンダリング形式の両方が表示され、コピーと読み取りが容易になります
64
- <div align="center">
65
- <img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
66
- </div>
67
-
68
- - プロジェクトのコードを見るのが面倒?chatgptに整備されたプロジェクトを直接与えましょう
69
- <div align="center">
70
- <img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
71
- </div>
72
-
73
- - 多数の大規模言語モデルの混合呼び出し(ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
74
- <div align="center">
75
- <img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
76
- </div>
77
-
78
- 多数の大規模言語モデルの混合呼び出し[huggingfaceテスト版](https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta)(huggigface版はchatglmをサポートしていません)
79
-
80
-
81
- ---
82
-
83
- ## インストール-方法1:直接運転 (Windows、LinuxまたはMacOS)
84
-
85
- 1. プロジェクトをダウンロードします。
86
- ```sh
87
- git clone https://github.com/binary-husky/chatgpt_academic.git
88
- cd chatgpt_academic
89
- ```
90
-
91
- 2. API_KEYとプロキシ設定を構成する
92
-
93
- `config.py`で、海外のProxyとOpenAI API KEYを構成して説明します。
94
- ```
95
- 1.あなたが中国にいる場合、OpenAI APIをスムーズに使用するには海外プロキシを設定する必要があります。構成の詳細については、config.py(1.その中のUSE_PROXYをTrueに変更し、2.手順に従ってプロキシを変更する)を詳細に読んでください。
96
- 2. OpenAI API KEYを構成する。OpenAIのウェブサイトでAPI KEYを取得してください。一旦API KEYを手に入れると、config.pyファイルで設定するだけです。
97
- 3.プロキシネットワークに関連する問題(ネットワークタイムアウト、プロキシが動作しない)をhttps://github.com/binary-husky/chatgpt_academic/issues/1にまとめました。
98
- ```
99
- (P.S. プログラム実行時にconfig.pyの隣にconfig_private.pyという名前のプライバシー設定ファイルを作成し、同じ名前の設定を上書きするconfig_private.pyが存在するかどうかを優先的に確認します。そのため、私たちの構成読み取りロジックを理解できる場合は、config.pyの隣にconfig_private.pyという名前の新しい設定ファイルを作成し、その中のconfig.pyから設定を移動してください。config_private.pyはgitで保守されていないため、プライバシー情報をより安全にすることができます。)
100
-
101
- 3. 依存関係をインストールします。
102
- ```sh
103
- # 選択肢があります。
104
- python -m pip install -r requirements.txt
105
-
106
-
107
- # (選択肢2) もしAnacondaを使用する場合、手順は同様です:
108
- # (選択肢2.1) conda create -n gptac_venv python=3.11
109
- # (選択肢2.2) conda activate gptac_venv
110
- # (選択肢2.3) python -m pip install -r requirements.txt
111
-
112
- # 注: 公式のpipソースまたはAlibabaのpipソースを使用してください。 別のpipソース(例:一部の大学のpip)は問題が発生する可能性があります。 一時的なソースの切り替え方法:
113
- # python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
114
- ```
115
-
116
- もしあなたが清華ChatGLMをサポートする必要がある場合、さらに多くの依存関係をインストールする必要があります(Pythonに慣れない方やコンピューターの設定が十分でない方は、試みないことをお勧めします):
117
- ```sh
118
- python -m pip install -r request_llm/requirements_chatglm.txt
119
- ```
120
-
121
- 4. 実行
122
- ```sh
123
- python main.py
124
- ```
125
-
126
- 5. 関数プラグインのテスト
127
- ```
128
- - Pythonプロジェクト分析のテスト
129
- 入力欄に `./crazy_functions/test_project/python/dqn` と入力し、「Pythonプロジェクト全体の解析」をクリックします。
130
- - 自己コード解読のテスト
131
- 「[マルチスレッドデモ] このプロジェクト自体を解析します(ソースを翻訳して解読します)」をクリックします。
132
- - 実験的な機能テンプレート関数のテスト(GPTが「今日の歴史」に何が起こったかを回答することが求められます)。この関数をテンプレートとして使用して、より複雑な機能を実装できます。
133
- 「[関数プラグインテンプレートデモ] 今日の歴史」をクリックします。
134
- - 関数プラグインエリアのドロップダウンメニューには他にも選択肢があります。
135
- ```
136
-
137
- ## インストール方法2:Dockerを使用する(Linux)
138
-
139
- 1. ChatGPTのみ(大多数の人にお勧めです)
140
- ``` sh
141
- # プロジェクトのダウンロード
142
- git clone https://github.com/binary-husky/chatgpt_academic.git
143
- cd chatgpt_academic
144
- # 海外プロキシとOpenAI API KEYの設定
145
- config.pyを任意のテキストエディタで編集する
146
- # インストール
147
- docker build -t gpt-academic .
148
- # 実行
149
- docker run --rm -it --net=host gpt-academic
150
-
151
- # 関数プラグインのテスト
152
- ## 関数プラグインテンプレート関数のテスト(GPTが「今日の歴史」に何が起こったかを回答することが求められます)。この関数をテンプレートとして使用して、より複雑な機能を実装できます。
153
- 「[関数プラグインテンプレートデモ] 今日の歴史」をクリックします。
154
- ## Latexプロジェクトの要約を書くテスト
155
- 入力欄に./crazy_functions/test_project/latex/attentionと入力し、「テックス論文を読んで要約を書く」をクリックします。
156
- ## Pythonプロジェクト分析のテスト
157
- 入力欄に./crazy_functions/test_project/python/dqnと入力し、[Pythonプロジェクトの全解析]をクリックします。
158
-
159
- 関数プラグインエリアのドロップダウンメニューには他にも選択肢があります。
160
- ```
161
-
162
- 2. ChatGPT + ChatGLM(Dockerに非常に詳しい人+十分なコンピューター設定が必要)
163
-
164
-
165
-
166
- ```sh
167
- # Dockerfileの編集
168
- cd docs && nano Dockerfile+ChatGLM
169
- # ビルド方法
170
- docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM .
171
- # 実行方法 (1) 直接実行:
172
- docker run --rm -it --net=host --gpus=all gpt-academic
173
- # 実行方法 (2) コンテナに入って調整する:
174
- docker run --rm -it --net=host --gpus=all gpt-academic bash
175
- ```
176
-
177
- ## インストール方法3:その他のデプロイ方法
178
-
179
- 1. クラウドサーバーデプロイ
180
- [デプロイwiki-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
181
-
182
- 2. WSL2を使用 (Windows Subsystem for Linux)
183
- [デプロイwiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
184
-
185
-
186
- ## インストール-プロキシ設定
187
- 1. 通常の方法
188
- [プロキシを設定する](https://github.com/binary-husky/chatgpt_academic/issues/1)
189
-
190
- 2. 初心者向けチュートリアル
191
- [初心者向けチュートリアル](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89)
192
-
193
-
194
- ---
195
-
196
- ## カスタムボタンの追加(学術ショートカットキー)
197
-
198
- `core_functional.py`を任意のテキストエディタで開き、以下のエントリーを追加し、プログラムを再起動してください。(ボタンが追加されて表示される場合、前置詞と後置詞はホット編集がサポートされているため、プログラムを再起動せずに即座に有効になります。)
199
-
200
- 例:
201
- ```
202
- "超级英译中": {
203
- # 前置詞 - あなたの要求を説明するために使用されます。翻訳、コードの説明、編集など。
204
- "Prefix": "以下のコンテンツを中国語に翻訳して、マークダウンテーブルを使用して専門用語を説明してください。\n\n",
205
-
206
- # 後置詞 - プレフィックスと共に使用すると、入力内容を引用符で囲むことができます。
207
- "Suffix": "",
208
- },
209
- ```
210
-
211
- <div align="center">
212
- <img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
213
- </div>
214
-
215
-
216
- ---
217
-
218
- ## いくつかの機能の例
219
-
220
- ### 画像表示:
221
-
222
- <div align="center">
223
- <img src="https://user-images.githubusercontent.com/96192199/228737599-bf0a9d9c-1808-4f43-ae15-dfcc7af0f295.png" width="800" >
224
- </div>
225
-
226
-
227
- ### プログラムが自己解析できる場合:
228
-
229
- <div align="center">
230
- <img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="800" >
231
- </div>
232
-
233
- <div align="center">
234
- <img src="https://user-images.githubusercontent.com/96192199/226936618-9b487e4b-ab5b-4b6e-84c6-16942102e917.png" width="800" >
235
- </div>
236
-
237
- ### 他のPython/Cppプロジェクトの解析:
238
-
239
- <div align="center">
240
- <img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="800" >
241
- </div>
242
-
243
- <div align="center">
244
- <img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="800" >
245
- </div>
246
-
247
- ### Latex論文の一括読解と要約生成
248
-
249
- <div align="center">
250
- <img src="https://user-images.githubusercontent.com/96192199/227504406-86ab97cd-f208-41c3-8e4a-7000e51cf980.png" width="800" >
251
- </div>
252
-
253
- ### 自動報告生成
254
-
255
- <div align="center">
256
- <img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
257
- <img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
258
- <img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
259
- </div>
260
-
261
- ### モジュール化された機能デザイン
262
-
263
- <div align="center">
264
- <img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
265
- <img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
266
- </div>
267
-
268
-
269
- ### ソースコードの英語翻訳
270
-
271
- <div align="center">
272
- <img src="https://user-images.githubusercontent.com/96192199/229720562-fe6c3508-6142-4635-a83d-21eb3669baee.png" height="400" >
273
- </div>
274
-
275
- ## Todo およびバージョン計画:
276
- - version 3.2+ (todo): 関数プラグインがより多くのパラメーターインターフェースをサポートするようになります。
277
- - version 3.1: 複数のgptモデルを同時にクエリし、api2dをサポートし、複数のapikeyの負荷分散をサポートします。
278
- - version 3.0: chatglmおよび他の小型llmのサポート
279
- - version 2.6: プラグイン構造を再構成し、相互作用性を高め、より多くのプラグインを追加しました。
280
- - version 2.5: 自己更新。総括的な大規模プロジェクトのソースコードをまとめた場合、テキストが長すぎる、トークンがオーバーフローする問題を解決します。
281
- - version 2.4: (1)PDF全文翻訳機能を追加。(2)入力エリアの位置を切り替える機能を追加。(3)垂直レイアウトオプションを追加。(4)マルチスレッド関数プラグインの最適化。
282
- - version 2.3: 多スレッドの相互作用性を向上させました。
283
- - version 2.2: 関数プラグインでホットリロードをサポート
284
- - version 2.1: 折りたたみ式レイアウト
285
- - version 2.0: モジュール化された関数プラグインを導入
286
- - version 1.0: 基本機能
287
-
288
- ## 参考および学習
289
-
290
-
291
- 以下は中国語のマークダウンファイルです。日本語に翻訳してください。既存のマークダウンコマンドを変更しないでください:
292
-
293
- ```
294
- 多くの優秀なプロジェクトの設計を参考にしています。主なものは以下の通りです:
295
-
296
- # 参考プロジェクト1:ChuanhuChatGPTから多くのテクニックを借用
297
- https://github.com/GaiZhenbiao/ChuanhuChatGPT
298
-
299
- # 参考プロジェクト2:清華ChatGLM-6B:
300
- https://github.com/THUDM/ChatGLM-6B
301
- ```
302
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/gui_utils/glfw_window.py DELETED
@@ -1,239 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- import time
10
- import glfw
11
- import OpenGL.GL as gl
12
- from . import gl_utils
13
-
14
- # ----------------------------------------------------------------------------
15
-
16
-
17
- class GlfwWindow: # pylint: disable=too-many-public-methods
18
- def __init__(self, *, title='GlfwWindow', window_width=1920, window_height=1080, deferred_show=True, close_on_esc=True):
19
- self._glfw_window = None
20
- self._drawing_frame = False
21
- self._frame_start_time = None
22
- self._frame_delta = 0
23
- self._fps_limit = None
24
- self._vsync = None
25
- self._skip_frames = 0
26
- self._deferred_show = deferred_show
27
- self._close_on_esc = close_on_esc
28
- self._esc_pressed = False
29
- self._drag_and_drop_paths = None
30
- self._capture_next_frame = False
31
- self._captured_frame = None
32
-
33
- # Create window.
34
- glfw.init()
35
- glfw.window_hint(glfw.VISIBLE, False)
36
- self._glfw_window = glfw.create_window(
37
- width=window_width, height=window_height, title=title, monitor=None, share=None)
38
- self._attach_glfw_callbacks()
39
- self.make_context_current()
40
-
41
- # Adjust window.
42
- self.set_vsync(False)
43
- self.set_window_size(window_width, window_height)
44
- if not self._deferred_show:
45
- glfw.show_window(self._glfw_window)
46
-
47
- def close(self):
48
- if self._drawing_frame:
49
- self.end_frame()
50
- if self._glfw_window is not None:
51
- glfw.destroy_window(self._glfw_window)
52
- self._glfw_window = None
53
- # glfw.terminate() # Commented out to play it nice with other glfw clients.
54
-
55
- def __del__(self):
56
- try:
57
- self.close()
58
- except:
59
- pass
60
-
61
- @property
62
- def window_width(self):
63
- return self.content_width
64
-
65
- @property
66
- def window_height(self):
67
- return self.content_height + self.title_bar_height
68
-
69
- @property
70
- def content_width(self):
71
- width, _height = glfw.get_window_size(self._glfw_window)
72
- return width
73
-
74
- @property
75
- def content_height(self):
76
- _width, height = glfw.get_window_size(self._glfw_window)
77
- return height
78
-
79
- @property
80
- def title_bar_height(self):
81
- _left, top, _right, _bottom = glfw.get_window_frame_size(
82
- self._glfw_window)
83
- return top
84
-
85
- @property
86
- def monitor_width(self):
87
- _, _, width, _height = glfw.get_monitor_workarea(
88
- glfw.get_primary_monitor())
89
- return width
90
-
91
- @property
92
- def monitor_height(self):
93
- _, _, _width, height = glfw.get_monitor_workarea(
94
- glfw.get_primary_monitor())
95
- return height
96
-
97
- @property
98
- def frame_delta(self):
99
- return self._frame_delta
100
-
101
- def set_title(self, title):
102
- glfw.set_window_title(self._glfw_window, title)
103
-
104
- def set_window_size(self, width, height):
105
- width = min(width, self.monitor_width)
106
- height = min(height, self.monitor_height)
107
- glfw.set_window_size(self._glfw_window, width, max(
108
- height - self.title_bar_height, 0))
109
- if width == self.monitor_width and height == self.monitor_height:
110
- self.maximize()
111
-
112
- def set_content_size(self, width, height):
113
- self.set_window_size(width, height + self.title_bar_height)
114
-
115
- def maximize(self):
116
- glfw.maximize_window(self._glfw_window)
117
-
118
- def set_position(self, x, y):
119
- glfw.set_window_pos(self._glfw_window, x, y + self.title_bar_height)
120
-
121
- def center(self):
122
- self.set_position((self.monitor_width - self.window_width) //
123
- 2, (self.monitor_height - self.window_height) // 2)
124
-
125
- def set_vsync(self, vsync):
126
- vsync = bool(vsync)
127
- if vsync != self._vsync:
128
- glfw.swap_interval(1 if vsync else 0)
129
- self._vsync = vsync
130
-
131
- def set_fps_limit(self, fps_limit):
132
- self._fps_limit = int(fps_limit)
133
-
134
- def should_close(self):
135
- return glfw.window_should_close(self._glfw_window) or (self._close_on_esc and self._esc_pressed)
136
-
137
- def skip_frame(self):
138
- self.skip_frames(1)
139
-
140
- def skip_frames(self, num): # Do not update window for the next N frames.
141
- self._skip_frames = max(self._skip_frames, int(num))
142
-
143
- def is_skipping_frames(self):
144
- return self._skip_frames > 0
145
-
146
- def capture_next_frame(self):
147
- self._capture_next_frame = True
148
-
149
- def pop_captured_frame(self):
150
- frame = self._captured_frame
151
- self._captured_frame = None
152
- return frame
153
-
154
- def pop_drag_and_drop_paths(self):
155
- paths = self._drag_and_drop_paths
156
- self._drag_and_drop_paths = None
157
- return paths
158
-
159
- def draw_frame(self): # To be overridden by subclass.
160
- self.begin_frame()
161
- # Rendering code goes here.
162
- self.end_frame()
163
-
164
- def make_context_current(self):
165
- if self._glfw_window is not None:
166
- glfw.make_context_current(self._glfw_window)
167
-
168
- def begin_frame(self):
169
- # End previous frame.
170
- if self._drawing_frame:
171
- self.end_frame()
172
-
173
- # Apply FPS limit.
174
- if self._frame_start_time is not None and self._fps_limit is not None:
175
- delay = self._frame_start_time - time.perf_counter() + 1 / self._fps_limit
176
- if delay > 0:
177
- time.sleep(delay)
178
- cur_time = time.perf_counter()
179
- if self._frame_start_time is not None:
180
- self._frame_delta = cur_time - self._frame_start_time
181
- self._frame_start_time = cur_time
182
-
183
- # Process events.
184
- glfw.poll_events()
185
-
186
- # Begin frame.
187
- self._drawing_frame = True
188
- self.make_context_current()
189
-
190
- # Initialize GL state.
191
- gl.glViewport(0, 0, self.content_width, self.content_height)
192
- gl.glMatrixMode(gl.GL_PROJECTION)
193
- gl.glLoadIdentity()
194
- gl.glTranslate(-1, 1, 0)
195
- gl.glScale(2 / max(self.content_width, 1), -
196
- 2 / max(self.content_height, 1), 1)
197
- gl.glMatrixMode(gl.GL_MODELVIEW)
198
- gl.glLoadIdentity()
199
- gl.glEnable(gl.GL_BLEND)
200
- # Pre-multiplied alpha.
201
- gl.glBlendFunc(gl.GL_ONE, gl.GL_ONE_MINUS_SRC_ALPHA)
202
-
203
- # Clear.
204
- gl.glClearColor(0, 0, 0, 1)
205
- gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
206
-
207
- def end_frame(self):
208
- assert self._drawing_frame
209
- self._drawing_frame = False
210
-
211
- # Skip frames if requested.
212
- if self._skip_frames > 0:
213
- self._skip_frames -= 1
214
- return
215
-
216
- # Capture frame if requested.
217
- if self._capture_next_frame:
218
- self._captured_frame = gl_utils.read_pixels(
219
- self.content_width, self.content_height)
220
- self._capture_next_frame = False
221
-
222
- # Update window.
223
- if self._deferred_show:
224
- glfw.show_window(self._glfw_window)
225
- self._deferred_show = False
226
- glfw.swap_buffers(self._glfw_window)
227
-
228
- def _attach_glfw_callbacks(self):
229
- glfw.set_key_callback(self._glfw_window, self._glfw_key_callback)
230
- glfw.set_drop_callback(self._glfw_window, self._glfw_drop_callback)
231
-
232
- def _glfw_key_callback(self, _window, key, _scancode, action, _mods):
233
- if action == glfw.PRESS and key == glfw.KEY_ESCAPE:
234
- self._esc_pressed = True
235
-
236
- def _glfw_drop_callback(self, _window, paths):
237
- self._drag_and_drop_paths = paths
238
-
239
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/conceptual/evaluation.md DELETED
@@ -1,572 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Evaluating Diffusion Models
14
-
15
- <a target="_blank" href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/evaluation.ipynb">
16
- <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
17
- </a>
18
-
19
- Evaluation of generative models like [Stable Diffusion](https://huggingface.co/docs/diffusers/stable_diffusion) is subjective in nature. But as practitioners and researchers, we often have to make careful choices amongst many different possibilities. So, when working with different generative models (like GANs, Diffusion, etc.), how do we choose one over the other?
20
-
21
- Qualitative evaluation of such models can be error-prone and might incorrectly influence a decision.
22
- However, quantitative metrics don't necessarily correspond to image quality. So, usually, a combination
23
- of both qualitative and quantitative evaluations provides a stronger signal when choosing one model
24
- over the other.
25
-
26
- In this document, we provide a non-exhaustive overview of qualitative and quantitative methods to evaluate Diffusion models. For quantitative methods, we specifically focus on how to implement them alongside `diffusers`.
27
-
28
- The methods shown in this document can also be used to evaluate different [noise schedulers](https://huggingface.co/docs/diffusers/main/en/api/schedulers/overview) keeping the underlying generation model fixed.
29
-
30
- ## Scenarios
31
-
32
- We cover Diffusion models with the following pipelines:
33
-
34
- - Text-guided image generation (such as the [`StableDiffusionPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/text2img)).
35
- - Text-guided image generation, additionally conditioned on an input image (such as the [`StableDiffusionImg2ImgPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/img2img), and [`StableDiffusionInstructPix2PixPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/pix2pix)).
36
- - Class-conditioned image generation models (such as the [`DiTPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/dit)).
37
-
38
- ## Qualitative Evaluation
39
-
40
- Qualitative evaluation typically involves human assessment of generated images. Quality is measured across aspects such as compositionality, image-text alignment, and spatial relations. Common prompts provide a degree of uniformity for subjective metrics.
41
- DrawBench and PartiPrompts are prompt datasets used for qualitative benchmarking. DrawBench and PartiPrompts were introduced by [Imagen](https://imagen.research.google/) and [Parti](https://parti.research.google/) respectively.
42
-
43
- From the [official Parti website](https://parti.research.google/):
44
-
45
- > PartiPrompts (P2) is a rich set of over 1600 prompts in English that we release as part of this work. P2 can be used to measure model capabilities across various categories and challenge aspects.
46
-
47
- ![parti-prompts](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/parti-prompts.png)
48
-
49
- PartiPrompts has the following columns:
50
-
51
- - Prompt
52
- - Category of the prompt (such as “Abstract”, “World Knowledge”, etc.)
53
- - Challenge reflecting the difficulty (such as “Basic”, “Complex”, “Writing & Symbols”, etc.)
54
-
55
- These benchmarks allow for side-by-side human evaluation of different image generation models.
56
-
57
- For this, the 🧨 Diffusers team has built **Open Parti Prompts**, which is a community-driven qualitative benchmark based on Parti Prompts to compare state-of-the-art open-source diffusion models:
58
- - [Open Parti Prompts Game](https://huggingface.co/spaces/OpenGenAI/open-parti-prompts): For 10 parti prompts, 4 generated images are shown and the user selects the image that suits the prompt best.
59
- - [Open Parti Prompts Leaderboard](https://huggingface.co/spaces/OpenGenAI/parti-prompts-leaderboard): The leaderboard comparing the currently best open-sourced diffusion models to each other.
60
-
61
- To manually compare images, let’s see how we can use `diffusers` on a couple of PartiPrompts.
62
-
63
- Below we show some prompts sampled across different challenges: Basic, Complex, Linguistic Structures, Imagination, and Writing & Symbols. Here we are using PartiPrompts as a [dataset](https://huggingface.co/datasets/nateraw/parti-prompts).
64
-
65
- ```python
66
- from datasets import load_dataset
67
-
68
- # prompts = load_dataset("nateraw/parti-prompts", split="train")
69
- # prompts = prompts.shuffle()
70
- # sample_prompts = [prompts[i]["Prompt"] for i in range(5)]
71
-
72
- # Fixing these sample prompts in the interest of reproducibility.
73
- sample_prompts = [
74
- "a corgi",
75
- "a hot air balloon with a yin-yang symbol, with the moon visible in the daytime sky",
76
- "a car with no windows",
77
- "a cube made of porcupine",
78
- 'The saying "BE EXCELLENT TO EACH OTHER" written on a red brick wall with a graffiti image of a green alien wearing a tuxedo. A yellow fire hydrant is on a sidewalk in the foreground.',
79
- ]
80
- ```
81
-
82
- Now we can use these prompts to generate some images using Stable Diffusion ([v1-4 checkpoint](https://huggingface.co/CompVis/stable-diffusion-v1-4)):
83
-
84
- ```python
85
- import torch
86
-
87
- seed = 0
88
- generator = torch.manual_seed(seed)
89
-
90
- images = sd_pipeline(sample_prompts, num_images_per_prompt=1, generator=generator, output_type="numpy").images
91
- ```
92
-
93
- ![parti-prompts-14](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/parti-prompts-14.png)
94
-
95
- We can also set `num_images_per_prompt` accordingly to compare different images for the same prompt. Running the same pipeline but with a different checkpoint ([v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5)), yields:
96
-
97
- ![parti-prompts-15](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/parti-prompts-15.png)
98
-
99
- Once several images are generated from all the prompts using multiple models (under evaluation), these results are presented to human evaluators for scoring. For
100
- more details on the DrawBench and PartiPrompts benchmarks, refer to their respective papers.
101
-
102
- <Tip>
103
-
104
- It is useful to look at some inference samples while a model is training to measure the
105
- training progress. In our [training scripts](https://github.com/huggingface/diffusers/tree/main/examples/), we support this utility with additional support for
106
- logging to TensorBoard and Weights & Biases.
107
-
108
- </Tip>
109
-
110
- ## Quantitative Evaluation
111
-
112
- In this section, we will walk you through how to evaluate three different diffusion pipelines using:
113
-
114
- - CLIP score
115
- - CLIP directional similarity
116
- - FID
117
-
118
- ### Text-guided image generation
119
-
120
- [CLIP score](https://arxiv.org/abs/2104.08718) measures the compatibility of image-caption pairs. Higher CLIP scores imply higher compatibility 🔼. The CLIP score is a quantitative measurement of the qualitative concept "compatibility". Image-caption pair compatibility can also be thought of as the semantic similarity between the image and the caption. CLIP score was found to have high correlation with human judgement.
121
-
122
- Let's first load a [`StableDiffusionPipeline`]:
123
-
124
- ```python
125
- from diffusers import StableDiffusionPipeline
126
- import torch
127
-
128
- model_ckpt = "CompVis/stable-diffusion-v1-4"
129
- sd_pipeline = StableDiffusionPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16).to("cuda")
130
- ```
131
-
132
- Generate some images with multiple prompts:
133
-
134
- ```python
135
- prompts = [
136
- "a photo of an astronaut riding a horse on mars",
137
- "A high tech solarpunk utopia in the Amazon rainforest",
138
- "A pikachu fine dining with a view to the Eiffel Tower",
139
- "A mecha robot in a favela in expressionist style",
140
- "an insect robot preparing a delicious meal",
141
- "A small cabin on top of a snowy mountain in the style of Disney, artstation",
142
- ]
143
-
144
- images = sd_pipeline(prompts, num_images_per_prompt=1, output_type="numpy").images
145
-
146
- print(images.shape)
147
- # (6, 512, 512, 3)
148
- ```
149
-
150
- And then, we calculate the CLIP score.
151
-
152
- ```python
153
- from torchmetrics.functional.multimodal import clip_score
154
- from functools import partial
155
-
156
- clip_score_fn = partial(clip_score, model_name_or_path="openai/clip-vit-base-patch16")
157
-
158
-
159
- def calculate_clip_score(images, prompts):
160
- images_int = (images * 255).astype("uint8")
161
- clip_score = clip_score_fn(torch.from_numpy(images_int).permute(0, 3, 1, 2), prompts).detach()
162
- return round(float(clip_score), 4)
163
-
164
-
165
- sd_clip_score = calculate_clip_score(images, prompts)
166
- print(f"CLIP score: {sd_clip_score}")
167
- # CLIP score: 35.7038
168
- ```
169
-
170
- In the above example, we generated one image per prompt. If we generated multiple images per prompt, we would have to take the average score from the generated images per prompt.
171
-
172
- Now, if we wanted to compare two checkpoints compatible with the [`StableDiffusionPipeline`] we should pass a generator while calling the pipeline. First, we generate images with a
173
- fixed seed with the [v1-4 Stable Diffusion checkpoint](https://huggingface.co/CompVis/stable-diffusion-v1-4):
174
-
175
- ```python
176
- seed = 0
177
- generator = torch.manual_seed(seed)
178
-
179
- images = sd_pipeline(prompts, num_images_per_prompt=1, generator=generator, output_type="numpy").images
180
- ```
181
-
182
- Then we load the [v1-5 checkpoint](https://huggingface.co/runwayml/stable-diffusion-v1-5) to generate images:
183
-
184
- ```python
185
- model_ckpt_1_5 = "runwayml/stable-diffusion-v1-5"
186
- sd_pipeline_1_5 = StableDiffusionPipeline.from_pretrained(model_ckpt_1_5, torch_dtype=weight_dtype).to(device)
187
-
188
- images_1_5 = sd_pipeline_1_5(prompts, num_images_per_prompt=1, generator=generator, output_type="numpy").images
189
- ```
190
-
191
- And finally, we compare their CLIP scores:
192
-
193
- ```python
194
- sd_clip_score_1_4 = calculate_clip_score(images, prompts)
195
- print(f"CLIP Score with v-1-4: {sd_clip_score_1_4}")
196
- # CLIP Score with v-1-4: 34.9102
197
-
198
- sd_clip_score_1_5 = calculate_clip_score(images_1_5, prompts)
199
- print(f"CLIP Score with v-1-5: {sd_clip_score_1_5}")
200
- # CLIP Score with v-1-5: 36.2137
201
- ```
202
-
203
- It seems like the [v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) checkpoint performs better than its predecessor. Note, however, that the number of prompts we used to compute the CLIP scores is quite low. For a more practical evaluation, this number should be way higher, and the prompts should be diverse.
204
-
205
- <Tip warning={true}>
206
-
207
- By construction, there are some limitations in this score. The captions in the training dataset
208
- were crawled from the web and extracted from `alt` and similar tags associated an image on the internet.
209
- They are not necessarily representative of what a human being would use to describe an image. Hence we
210
- had to "engineer" some prompts here.
211
-
212
- </Tip>
213
-
214
- ### Image-conditioned text-to-image generation
215
-
216
- In this case, we condition the generation pipeline with an input image as well as a text prompt. Let's take the [`StableDiffusionInstructPix2PixPipeline`], as an example. It takes an edit instruction as an input prompt and an input image to be edited.
217
-
218
- Here is one example:
219
-
220
- ![edit-instruction](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/edit-instruction.png)
221
-
222
- One strategy to evaluate such a model is to measure the consistency of the change between the two images (in [CLIP](https://huggingface.co/docs/transformers/model_doc/clip) space) with the change between the two image captions (as shown in [CLIP-Guided Domain Adaptation of Image Generators](https://arxiv.org/abs/2108.00946)). This is referred to as the "**CLIP directional similarity**".
223
-
224
- - Caption 1 corresponds to the input image (image 1) that is to be edited.
225
- - Caption 2 corresponds to the edited image (image 2). It should reflect the edit instruction.
226
-
227
- Following is a pictorial overview:
228
-
229
- ![edit-consistency](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/edit-consistency.png)
230
-
231
- We have prepared a mini dataset to implement this metric. Let's first load the dataset.
232
-
233
- ```python
234
- from datasets import load_dataset
235
-
236
- dataset = load_dataset("sayakpaul/instructpix2pix-demo", split="train")
237
- dataset.features
238
- ```
239
-
240
- ```bash
241
- {'input': Value(dtype='string', id=None),
242
- 'edit': Value(dtype='string', id=None),
243
- 'output': Value(dtype='string', id=None),
244
- 'image': Image(decode=True, id=None)}
245
- ```
246
-
247
- Here we have:
248
-
249
- - `input` is a caption corresponding to the `image`.
250
- - `edit` denotes the edit instruction.
251
- - `output` denotes the modified caption reflecting the `edit` instruction.
252
-
253
- Let's take a look at a sample.
254
-
255
- ```python
256
- idx = 0
257
- print(f"Original caption: {dataset[idx]['input']}")
258
- print(f"Edit instruction: {dataset[idx]['edit']}")
259
- print(f"Modified caption: {dataset[idx]['output']}")
260
- ```
261
-
262
- ```bash
263
- Original caption: 2. FAROE ISLANDS: An archipelago of 18 mountainous isles in the North Atlantic Ocean between Norway and Iceland, the Faroe Islands has 'everything you could hope for', according to Big 7 Travel. It boasts 'crystal clear waterfalls, rocky cliffs that seem to jut out of nowhere and velvety green hills'
264
- Edit instruction: make the isles all white marble
265
- Modified caption: 2. WHITE MARBLE ISLANDS: An archipelago of 18 mountainous white marble isles in the North Atlantic Ocean between Norway and Iceland, the White Marble Islands has 'everything you could hope for', according to Big 7 Travel. It boasts 'crystal clear waterfalls, rocky cliffs that seem to jut out of nowhere and velvety green hills'
266
- ```
267
-
268
- And here is the image:
269
-
270
- ```python
271
- dataset[idx]["image"]
272
- ```
273
-
274
- ![edit-dataset](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/edit-dataset.png)
275
-
276
- We will first edit the images of our dataset with the edit instruction and compute the directional similarity.
277
-
278
- Let's first load the [`StableDiffusionInstructPix2PixPipeline`]:
279
-
280
- ```python
281
- from diffusers import StableDiffusionInstructPix2PixPipeline
282
-
283
- instruct_pix2pix_pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained(
284
- "timbrooks/instruct-pix2pix", torch_dtype=torch.float16
285
- ).to(device)
286
- ```
287
-
288
- Now, we perform the edits:
289
-
290
- ```python
291
- import numpy as np
292
-
293
-
294
- def edit_image(input_image, instruction):
295
- image = instruct_pix2pix_pipeline(
296
- instruction,
297
- image=input_image,
298
- output_type="numpy",
299
- generator=generator,
300
- ).images[0]
301
- return image
302
-
303
-
304
- input_images = []
305
- original_captions = []
306
- modified_captions = []
307
- edited_images = []
308
-
309
- for idx in range(len(dataset)):
310
- input_image = dataset[idx]["image"]
311
- edit_instruction = dataset[idx]["edit"]
312
- edited_image = edit_image(input_image, edit_instruction)
313
-
314
- input_images.append(np.array(input_image))
315
- original_captions.append(dataset[idx]["input"])
316
- modified_captions.append(dataset[idx]["output"])
317
- edited_images.append(edited_image)
318
- ```
319
-
320
- To measure the directional similarity, we first load CLIP's image and text encoders:
321
-
322
- ```python
323
- from transformers import (
324
- CLIPTokenizer,
325
- CLIPTextModelWithProjection,
326
- CLIPVisionModelWithProjection,
327
- CLIPImageProcessor,
328
- )
329
-
330
- clip_id = "openai/clip-vit-large-patch14"
331
- tokenizer = CLIPTokenizer.from_pretrained(clip_id)
332
- text_encoder = CLIPTextModelWithProjection.from_pretrained(clip_id).to(device)
333
- image_processor = CLIPImageProcessor.from_pretrained(clip_id)
334
- image_encoder = CLIPVisionModelWithProjection.from_pretrained(clip_id).to(device)
335
- ```
336
-
337
- Notice that we are using a particular CLIP checkpoint, i.e., `openai/clip-vit-large-patch14`. This is because the Stable Diffusion pre-training was performed with this CLIP variant. For more details, refer to the [documentation](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/pix2pix#diffusers.StableDiffusionInstructPix2PixPipeline.text_encoder).
338
-
339
- Next, we prepare a PyTorch `nn.Module` to compute directional similarity:
340
-
341
- ```python
342
- import torch.nn as nn
343
- import torch.nn.functional as F
344
-
345
-
346
- class DirectionalSimilarity(nn.Module):
347
- def __init__(self, tokenizer, text_encoder, image_processor, image_encoder):
348
- super().__init__()
349
- self.tokenizer = tokenizer
350
- self.text_encoder = text_encoder
351
- self.image_processor = image_processor
352
- self.image_encoder = image_encoder
353
-
354
- def preprocess_image(self, image):
355
- image = self.image_processor(image, return_tensors="pt")["pixel_values"]
356
- return {"pixel_values": image.to(device)}
357
-
358
- def tokenize_text(self, text):
359
- inputs = self.tokenizer(
360
- text,
361
- max_length=self.tokenizer.model_max_length,
362
- padding="max_length",
363
- truncation=True,
364
- return_tensors="pt",
365
- )
366
- return {"input_ids": inputs.input_ids.to(device)}
367
-
368
- def encode_image(self, image):
369
- preprocessed_image = self.preprocess_image(image)
370
- image_features = self.image_encoder(**preprocessed_image).image_embeds
371
- image_features = image_features / image_features.norm(dim=1, keepdim=True)
372
- return image_features
373
-
374
- def encode_text(self, text):
375
- tokenized_text = self.tokenize_text(text)
376
- text_features = self.text_encoder(**tokenized_text).text_embeds
377
- text_features = text_features / text_features.norm(dim=1, keepdim=True)
378
- return text_features
379
-
380
- def compute_directional_similarity(self, img_feat_one, img_feat_two, text_feat_one, text_feat_two):
381
- sim_direction = F.cosine_similarity(img_feat_two - img_feat_one, text_feat_two - text_feat_one)
382
- return sim_direction
383
-
384
- def forward(self, image_one, image_two, caption_one, caption_two):
385
- img_feat_one = self.encode_image(image_one)
386
- img_feat_two = self.encode_image(image_two)
387
- text_feat_one = self.encode_text(caption_one)
388
- text_feat_two = self.encode_text(caption_two)
389
- directional_similarity = self.compute_directional_similarity(
390
- img_feat_one, img_feat_two, text_feat_one, text_feat_two
391
- )
392
- return directional_similarity
393
- ```
394
-
395
- Let's put `DirectionalSimilarity` to use now.
396
-
397
- ```python
398
- dir_similarity = DirectionalSimilarity(tokenizer, text_encoder, image_processor, image_encoder)
399
- scores = []
400
-
401
- for i in range(len(input_images)):
402
- original_image = input_images[i]
403
- original_caption = original_captions[i]
404
- edited_image = edited_images[i]
405
- modified_caption = modified_captions[i]
406
-
407
- similarity_score = dir_similarity(original_image, edited_image, original_caption, modified_caption)
408
- scores.append(float(similarity_score.detach().cpu()))
409
-
410
- print(f"CLIP directional similarity: {np.mean(scores)}")
411
- # CLIP directional similarity: 0.0797976553440094
412
- ```
413
-
414
- Like the CLIP Score, the higher the CLIP directional similarity, the better it is.
415
-
416
- It should be noted that the `StableDiffusionInstructPix2PixPipeline` exposes two arguments, namely, `image_guidance_scale` and `guidance_scale` that let you control the quality of the final edited image. We encourage you to experiment with these two arguments and see the impact of that on the directional similarity.
417
-
418
- We can extend the idea of this metric to measure how similar the original image and edited version are. To do that, we can just do `F.cosine_similarity(img_feat_two, img_feat_one)`. For these kinds of edits, we would still want the primary semantics of the images to be preserved as much as possible, i.e., a high similarity score.
419
-
420
- We can use these metrics for similar pipelines such as the [`StableDiffusionPix2PixZeroPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/pix2pix_zero#diffusers.StableDiffusionPix2PixZeroPipeline).
421
-
422
- <Tip>
423
-
424
- Both CLIP score and CLIP direction similarity rely on the CLIP model, which can make the evaluations biased.
425
-
426
- </Tip>
427
-
428
- ***Extending metrics like IS, FID (discussed later), or KID can be difficult*** when the model under evaluation was pre-trained on a large image-captioning dataset (such as the [LAION-5B dataset](https://laion.ai/blog/laion-5b/)). This is because underlying these metrics is an InceptionNet (pre-trained on the ImageNet-1k dataset) used for extracting intermediate image features. The pre-training dataset of Stable Diffusion may have limited overlap with the pre-training dataset of InceptionNet, so it is not a good candidate here for feature extraction.
429
-
430
- ***Using the above metrics helps evaluate models that are class-conditioned. For example, [DiT](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/overview). It was pre-trained being conditioned on the ImageNet-1k classes.***
431
-
432
- ### Class-conditioned image generation
433
-
434
- Class-conditioned generative models are usually pre-trained on a class-labeled dataset such as [ImageNet-1k](https://huggingface.co/datasets/imagenet-1k). Popular metrics for evaluating these models include Fréchet Inception Distance (FID), Kernel Inception Distance (KID), and Inception Score (IS). In this document, we focus on FID ([Heusel et al.](https://arxiv.org/abs/1706.08500)). We show how to compute it with the [`DiTPipeline`](https://huggingface.co/docs/diffusers/api/pipelines/dit), which uses the [DiT model](https://arxiv.org/abs/2212.09748) under the hood.
435
-
436
- FID aims to measure how similar are two datasets of images. As per [this resource](https://mmgeneration.readthedocs.io/en/latest/quick_run.html#fid):
437
-
438
- > Fréchet Inception Distance is a measure of similarity between two datasets of images. It was shown to correlate well with the human judgment of visual quality and is most often used to evaluate the quality of samples of Generative Adversarial Networks. FID is calculated by computing the Fréchet distance between two Gaussians fitted to feature representations of the Inception network.
439
-
440
- These two datasets are essentially the dataset of real images and the dataset of fake images (generated images in our case). FID is usually calculated with two large datasets. However, for this document, we will work with two mini datasets.
441
-
442
- Let's first download a few images from the ImageNet-1k training set:
443
-
444
- ```python
445
- from zipfile import ZipFile
446
- import requests
447
-
448
-
449
- def download(url, local_filepath):
450
- r = requests.get(url)
451
- with open(local_filepath, "wb") as f:
452
- f.write(r.content)
453
- return local_filepath
454
-
455
-
456
- dummy_dataset_url = "https://hf.co/datasets/sayakpaul/sample-datasets/resolve/main/sample-imagenet-images.zip"
457
- local_filepath = download(dummy_dataset_url, dummy_dataset_url.split("/")[-1])
458
-
459
- with ZipFile(local_filepath, "r") as zipper:
460
- zipper.extractall(".")
461
- ```
462
-
463
- ```python
464
- from PIL import Image
465
- import os
466
-
467
- dataset_path = "sample-imagenet-images"
468
- image_paths = sorted([os.path.join(dataset_path, x) for x in os.listdir(dataset_path)])
469
-
470
- real_images = [np.array(Image.open(path).convert("RGB")) for path in image_paths]
471
- ```
472
-
473
- These are 10 images from the following Imagenet-1k classes: "cassette_player", "chain_saw" (x2), "church", "gas_pump" (x3), "parachute" (x2), and "tench".
474
-
475
- <p align="center">
476
- <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/real-images.png" alt="real-images"><br>
477
- <em>Real images.</em>
478
- </p>
479
-
480
- Now that the images are loaded, let's apply some lightweight pre-processing on them to use them for FID calculation.
481
-
482
- ```python
483
- from torchvision.transforms import functional as F
484
-
485
-
486
- def preprocess_image(image):
487
- image = torch.tensor(image).unsqueeze(0)
488
- image = image.permute(0, 3, 1, 2) / 255.0
489
- return F.center_crop(image, (256, 256))
490
-
491
-
492
- real_images = torch.cat([preprocess_image(image) for image in real_images])
493
- print(real_images.shape)
494
- # torch.Size([10, 3, 256, 256])
495
- ```
496
-
497
- We now load the [`DiTPipeline`](https://huggingface.co/docs/diffusers/api/pipelines/dit) to generate images conditioned on the above-mentioned classes.
498
-
499
- ```python
500
- from diffusers import DiTPipeline, DPMSolverMultistepScheduler
501
-
502
- dit_pipeline = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256", torch_dtype=torch.float16)
503
- dit_pipeline.scheduler = DPMSolverMultistepScheduler.from_config(dit_pipeline.scheduler.config)
504
- dit_pipeline = dit_pipeline.to("cuda")
505
-
506
- words = [
507
- "cassette player",
508
- "chainsaw",
509
- "chainsaw",
510
- "church",
511
- "gas pump",
512
- "gas pump",
513
- "gas pump",
514
- "parachute",
515
- "parachute",
516
- "tench",
517
- ]
518
-
519
- class_ids = dit_pipeline.get_label_ids(words)
520
- output = dit_pipeline(class_labels=class_ids, generator=generator, output_type="numpy")
521
-
522
- fake_images = output.images
523
- fake_images = torch.tensor(fake_images)
524
- fake_images = fake_images.permute(0, 3, 1, 2)
525
- print(fake_images.shape)
526
- # torch.Size([10, 3, 256, 256])
527
- ```
528
-
529
- Now, we can compute the FID using [`torchmetrics`](https://torchmetrics.readthedocs.io/).
530
-
531
- ```python
532
- from torchmetrics.image.fid import FrechetInceptionDistance
533
-
534
- fid = FrechetInceptionDistance(normalize=True)
535
- fid.update(real_images, real=True)
536
- fid.update(fake_images, real=False)
537
-
538
- print(f"FID: {float(fid.compute())}")
539
- # FID: 177.7147216796875
540
- ```
541
-
542
- The lower the FID, the better it is. Several things can influence FID here:
543
-
544
- - Number of images (both real and fake)
545
- - Randomness induced in the diffusion process
546
- - Number of inference steps in the diffusion process
547
- - The scheduler being used in the diffusion process
548
-
549
- For the last two points, it is, therefore, a good practice to run the evaluation across different seeds and inference steps, and then report an average result.
550
-
551
- <Tip warning={true}>
552
-
553
- FID results tend to be fragile as they depend on a lot of factors:
554
-
555
- * The specific Inception model used during computation.
556
- * The implementation accuracy of the computation.
557
- * The image format (not the same if we start from PNGs vs JPGs).
558
-
559
- Keeping that in mind, FID is often most useful when comparing similar runs, but it is
560
- hard to reproduce paper results unless the authors carefully disclose the FID
561
- measurement code.
562
-
563
- These points apply to other related metrics too, such as KID and IS.
564
-
565
- </Tip>
566
-
567
- As a final step, let's visually inspect the `fake_images`.
568
-
569
- <p align="center">
570
- <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/fake-images.png" alt="fake-images"><br>
571
- <em>Fake images.</em>
572
- </p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py DELETED
@@ -1,420 +0,0 @@
1
- # Copyright 2023 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import math
16
- from collections import defaultdict
17
- from typing import List, Optional, Tuple, Union
18
-
19
- import numpy as np
20
- import torch
21
-
22
- from ..configuration_utils import ConfigMixin, register_to_config
23
- from ..utils import randn_tensor
24
- from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
25
-
26
-
27
- # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
28
- def betas_for_alpha_bar(
29
- num_diffusion_timesteps,
30
- max_beta=0.999,
31
- alpha_transform_type="cosine",
32
- ):
33
- """
34
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
35
- (1-beta) over time from t = [0,1].
36
-
37
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
38
- to that part of the diffusion process.
39
-
40
-
41
- Args:
42
- num_diffusion_timesteps (`int`): the number of betas to produce.
43
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
44
- prevent singularities.
45
- alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
46
- Choose from `cosine` or `exp`
47
-
48
- Returns:
49
- betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
50
- """
51
- if alpha_transform_type == "cosine":
52
-
53
- def alpha_bar_fn(t):
54
- return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
55
-
56
- elif alpha_transform_type == "exp":
57
-
58
- def alpha_bar_fn(t):
59
- return math.exp(t * -12.0)
60
-
61
- else:
62
- raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
63
-
64
- betas = []
65
- for i in range(num_diffusion_timesteps):
66
- t1 = i / num_diffusion_timesteps
67
- t2 = (i + 1) / num_diffusion_timesteps
68
- betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
69
- return torch.tensor(betas, dtype=torch.float32)
70
-
71
-
72
- class KDPM2AncestralDiscreteScheduler(SchedulerMixin, ConfigMixin):
73
- """
74
- Scheduler created by @crowsonkb in [k_diffusion](https://github.com/crowsonkb/k-diffusion), see:
75
- https://github.com/crowsonkb/k-diffusion/blob/5b3af030dd83e0297272d861c19477735d0317ec/k_diffusion/sampling.py#L188
76
-
77
- Scheduler inspired by DPM-Solver-2 and Algorthim 2 from Karras et al. (2022).
78
-
79
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
80
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
81
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
82
- [`~SchedulerMixin.from_pretrained`] functions.
83
-
84
- Args:
85
- num_train_timesteps (`int`): number of diffusion steps used to train the model. beta_start (`float`): the
86
- starting `beta` value of inference. beta_end (`float`): the final `beta` value. beta_schedule (`str`):
87
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
88
- `linear` or `scaled_linear`.
89
- trained_betas (`np.ndarray`, optional):
90
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
91
- options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`,
92
- `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`.
93
- prediction_type (`str`, default `epsilon`, optional):
94
- prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
95
- process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
96
- https://imagen.research.google/video/paper.pdf)
97
- timestep_spacing (`str`, default `"linspace"`):
98
- The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample
99
- Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information.
100
- steps_offset (`int`, default `0`):
101
- an offset added to the inference steps. You can use a combination of `offset=1` and
102
- `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
103
- stable diffusion.
104
- """
105
-
106
- _compatibles = [e.name for e in KarrasDiffusionSchedulers]
107
- order = 2
108
-
109
- @register_to_config
110
- def __init__(
111
- self,
112
- num_train_timesteps: int = 1000,
113
- beta_start: float = 0.00085, # sensible defaults
114
- beta_end: float = 0.012,
115
- beta_schedule: str = "linear",
116
- trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
117
- prediction_type: str = "epsilon",
118
- timestep_spacing: str = "linspace",
119
- steps_offset: int = 0,
120
- ):
121
- if trained_betas is not None:
122
- self.betas = torch.tensor(trained_betas, dtype=torch.float32)
123
- elif beta_schedule == "linear":
124
- self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
125
- elif beta_schedule == "scaled_linear":
126
- # this schedule is very specific to the latent diffusion model.
127
- self.betas = (
128
- torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
129
- )
130
- elif beta_schedule == "squaredcos_cap_v2":
131
- # Glide cosine schedule
132
- self.betas = betas_for_alpha_bar(num_train_timesteps)
133
- else:
134
- raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
135
-
136
- self.alphas = 1.0 - self.betas
137
- self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
138
-
139
- # set all values
140
- self.set_timesteps(num_train_timesteps, None, num_train_timesteps)
141
-
142
- # Copied from diffusers.schedulers.scheduling_heun_discrete.HeunDiscreteScheduler.index_for_timestep
143
- def index_for_timestep(self, timestep, schedule_timesteps=None):
144
- if schedule_timesteps is None:
145
- schedule_timesteps = self.timesteps
146
-
147
- indices = (schedule_timesteps == timestep).nonzero()
148
-
149
- # The sigma index that is taken for the **very** first `step`
150
- # is always the second index (or the last index if there is only 1)
151
- # This way we can ensure we don't accidentally skip a sigma in
152
- # case we start in the middle of the denoising schedule (e.g. for image-to-image)
153
- if len(self._index_counter) == 0:
154
- pos = 1 if len(indices) > 1 else 0
155
- else:
156
- timestep_int = timestep.cpu().item() if torch.is_tensor(timestep) else timestep
157
- pos = self._index_counter[timestep_int]
158
-
159
- return indices[pos].item()
160
-
161
- @property
162
- def init_noise_sigma(self):
163
- # standard deviation of the initial noise distribution
164
- if self.config.timestep_spacing in ["linspace", "trailing"]:
165
- return self.sigmas.max()
166
-
167
- return (self.sigmas.max() ** 2 + 1) ** 0.5
168
-
169
- def scale_model_input(
170
- self,
171
- sample: torch.FloatTensor,
172
- timestep: Union[float, torch.FloatTensor],
173
- ) -> torch.FloatTensor:
174
- """
175
- Args:
176
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
177
- current timestep.
178
- sample (`torch.FloatTensor`): input sample timestep (`int`, optional): current timestep
179
- Returns:
180
- `torch.FloatTensor`: scaled input sample
181
- """
182
- step_index = self.index_for_timestep(timestep)
183
-
184
- if self.state_in_first_order:
185
- sigma = self.sigmas[step_index]
186
- else:
187
- sigma = self.sigmas_interpol[step_index - 1]
188
-
189
- sample = sample / ((sigma**2 + 1) ** 0.5)
190
- return sample
191
-
192
- def set_timesteps(
193
- self,
194
- num_inference_steps: int,
195
- device: Union[str, torch.device] = None,
196
- num_train_timesteps: Optional[int] = None,
197
- ):
198
- """
199
- Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
200
-
201
- Args:
202
- num_inference_steps (`int`):
203
- the number of diffusion steps used when generating samples with a pre-trained model.
204
- device (`str` or `torch.device`, optional):
205
- the device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
206
- """
207
- self.num_inference_steps = num_inference_steps
208
-
209
- num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps
210
-
211
- # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
212
- if self.config.timestep_spacing == "linspace":
213
- timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=float)[::-1].copy()
214
- elif self.config.timestep_spacing == "leading":
215
- step_ratio = num_train_timesteps // self.num_inference_steps
216
- # creates integer timesteps by multiplying by ratio
217
- # casting to int to avoid issues when num_inference_step is power of 3
218
- timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(float)
219
- timesteps += self.config.steps_offset
220
- elif self.config.timestep_spacing == "trailing":
221
- step_ratio = num_train_timesteps / self.num_inference_steps
222
- # creates integer timesteps by multiplying by ratio
223
- # casting to int to avoid issues when num_inference_step is power of 3
224
- timesteps = (np.arange(num_train_timesteps, 0, -step_ratio)).round().copy().astype(float)
225
- timesteps -= 1
226
- else:
227
- raise ValueError(
228
- f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'."
229
- )
230
-
231
- sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
232
- self.log_sigmas = torch.from_numpy(np.log(sigmas)).to(device)
233
-
234
- sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
235
- sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32)
236
- sigmas = torch.from_numpy(sigmas).to(device=device)
237
-
238
- # compute up and down sigmas
239
- sigmas_next = sigmas.roll(-1)
240
- sigmas_next[-1] = 0.0
241
- sigmas_up = (sigmas_next**2 * (sigmas**2 - sigmas_next**2) / sigmas**2) ** 0.5
242
- sigmas_down = (sigmas_next**2 - sigmas_up**2) ** 0.5
243
- sigmas_down[-1] = 0.0
244
-
245
- # compute interpolated sigmas
246
- sigmas_interpol = sigmas.log().lerp(sigmas_down.log(), 0.5).exp()
247
- sigmas_interpol[-2:] = 0.0
248
-
249
- # set sigmas
250
- self.sigmas = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
251
- self.sigmas_interpol = torch.cat(
252
- [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]]
253
- )
254
- self.sigmas_up = torch.cat([sigmas_up[:1], sigmas_up[1:].repeat_interleave(2), sigmas_up[-1:]])
255
- self.sigmas_down = torch.cat([sigmas_down[:1], sigmas_down[1:].repeat_interleave(2), sigmas_down[-1:]])
256
-
257
- if str(device).startswith("mps"):
258
- # mps does not support float64
259
- timesteps = torch.from_numpy(timesteps).to(device, dtype=torch.float32)
260
- else:
261
- timesteps = torch.from_numpy(timesteps).to(device)
262
-
263
- timesteps_interpol = self.sigma_to_t(sigmas_interpol).to(device, dtype=timesteps.dtype)
264
- interleaved_timesteps = torch.stack((timesteps_interpol[:-2, None], timesteps[1:, None]), dim=-1).flatten()
265
-
266
- self.timesteps = torch.cat([timesteps[:1], interleaved_timesteps])
267
-
268
- self.sample = None
269
-
270
- # for exp beta schedules, such as the one for `pipeline_shap_e.py`
271
- # we need an index counter
272
- self._index_counter = defaultdict(int)
273
-
274
- def sigma_to_t(self, sigma):
275
- # get log sigma
276
- log_sigma = sigma.log()
277
-
278
- # get distribution
279
- dists = log_sigma - self.log_sigmas[:, None]
280
-
281
- # get sigmas range
282
- low_idx = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
283
- high_idx = low_idx + 1
284
-
285
- low = self.log_sigmas[low_idx]
286
- high = self.log_sigmas[high_idx]
287
-
288
- # interpolate sigmas
289
- w = (low - log_sigma) / (low - high)
290
- w = w.clamp(0, 1)
291
-
292
- # transform interpolation to time range
293
- t = (1 - w) * low_idx + w * high_idx
294
- t = t.view(sigma.shape)
295
- return t
296
-
297
- @property
298
- def state_in_first_order(self):
299
- return self.sample is None
300
-
301
- def step(
302
- self,
303
- model_output: Union[torch.FloatTensor, np.ndarray],
304
- timestep: Union[float, torch.FloatTensor],
305
- sample: Union[torch.FloatTensor, np.ndarray],
306
- generator: Optional[torch.Generator] = None,
307
- return_dict: bool = True,
308
- ) -> Union[SchedulerOutput, Tuple]:
309
- """
310
- Args:
311
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
312
- process from the learned model outputs (most often the predicted noise).
313
- model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model. timestep
314
- (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor` or `np.ndarray`):
315
- current instance of sample being created by diffusion process.
316
- return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
317
- Returns:
318
- [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`:
319
- [`~schedulers.scheduling_utils.SchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
320
- returning a tuple, the first element is the sample tensor.
321
- """
322
- step_index = self.index_for_timestep(timestep)
323
-
324
- # advance index counter by 1
325
- timestep_int = timestep.cpu().item() if torch.is_tensor(timestep) else timestep
326
- self._index_counter[timestep_int] += 1
327
-
328
- if self.state_in_first_order:
329
- sigma = self.sigmas[step_index]
330
- sigma_interpol = self.sigmas_interpol[step_index]
331
- sigma_up = self.sigmas_up[step_index]
332
- sigma_down = self.sigmas_down[step_index - 1]
333
- else:
334
- # 2nd order / KPDM2's method
335
- sigma = self.sigmas[step_index - 1]
336
- sigma_interpol = self.sigmas_interpol[step_index - 1]
337
- sigma_up = self.sigmas_up[step_index - 1]
338
- sigma_down = self.sigmas_down[step_index - 1]
339
-
340
- # currently only gamma=0 is supported. This usually works best anyways.
341
- # We can support gamma in the future but then need to scale the timestep before
342
- # passing it to the model which requires a change in API
343
- gamma = 0
344
- sigma_hat = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
345
-
346
- device = model_output.device
347
- noise = randn_tensor(model_output.shape, dtype=model_output.dtype, device=device, generator=generator)
348
-
349
- # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
350
- if self.config.prediction_type == "epsilon":
351
- sigma_input = sigma_hat if self.state_in_first_order else sigma_interpol
352
- pred_original_sample = sample - sigma_input * model_output
353
- elif self.config.prediction_type == "v_prediction":
354
- sigma_input = sigma_hat if self.state_in_first_order else sigma_interpol
355
- pred_original_sample = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
356
- sample / (sigma_input**2 + 1)
357
- )
358
- elif self.config.prediction_type == "sample":
359
- raise NotImplementedError("prediction_type not implemented yet: sample")
360
- else:
361
- raise ValueError(
362
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`"
363
- )
364
-
365
- if self.state_in_first_order:
366
- # 2. Convert to an ODE derivative for 1st order
367
- derivative = (sample - pred_original_sample) / sigma_hat
368
- # 3. delta timestep
369
- dt = sigma_interpol - sigma_hat
370
-
371
- # store for 2nd order step
372
- self.sample = sample
373
- self.dt = dt
374
- prev_sample = sample + derivative * dt
375
- else:
376
- # DPM-Solver-2
377
- # 2. Convert to an ODE derivative for 2nd order
378
- derivative = (sample - pred_original_sample) / sigma_interpol
379
- # 3. delta timestep
380
- dt = sigma_down - sigma_hat
381
-
382
- sample = self.sample
383
- self.sample = None
384
-
385
- prev_sample = sample + derivative * dt
386
- prev_sample = prev_sample + noise * sigma_up
387
-
388
- if not return_dict:
389
- return (prev_sample,)
390
-
391
- return SchedulerOutput(prev_sample=prev_sample)
392
-
393
- # Copied from diffusers.schedulers.scheduling_heun_discrete.HeunDiscreteScheduler.add_noise
394
- def add_noise(
395
- self,
396
- original_samples: torch.FloatTensor,
397
- noise: torch.FloatTensor,
398
- timesteps: torch.FloatTensor,
399
- ) -> torch.FloatTensor:
400
- # Make sure sigmas and timesteps have the same device and dtype as original_samples
401
- sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
402
- if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
403
- # mps does not support float64
404
- schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32)
405
- timesteps = timesteps.to(original_samples.device, dtype=torch.float32)
406
- else:
407
- schedule_timesteps = self.timesteps.to(original_samples.device)
408
- timesteps = timesteps.to(original_samples.device)
409
-
410
- step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps]
411
-
412
- sigma = sigmas[step_indices].flatten()
413
- while len(sigma.shape) < len(original_samples.shape):
414
- sigma = sigma.unsqueeze(-1)
415
-
416
- noisy_samples = original_samples + noise * sigma
417
- return noisy_samples
418
-
419
- def __len__(self):
420
- return self.config.num_train_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_safe/__init__.py DELETED
File without changes
spaces/Andy1621/uniformer_image_detection/configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py DELETED
@@ -1,105 +0,0 @@
1
- _base_ = [
2
- '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
3
- ]
4
-
5
- # model settings
6
- model = dict(
7
- type='CornerNet',
8
- backbone=dict(
9
- type='HourglassNet',
10
- downsample_times=5,
11
- num_stacks=2,
12
- stage_channels=[256, 256, 384, 384, 384, 512],
13
- stage_blocks=[2, 2, 2, 2, 2, 4],
14
- norm_cfg=dict(type='BN', requires_grad=True)),
15
- neck=None,
16
- bbox_head=dict(
17
- type='CornerHead',
18
- num_classes=80,
19
- in_channels=256,
20
- num_feat_levels=2,
21
- corner_emb_channels=1,
22
- loss_heatmap=dict(
23
- type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
24
- loss_embedding=dict(
25
- type='AssociativeEmbeddingLoss',
26
- pull_weight=0.10,
27
- push_weight=0.10),
28
- loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
29
- # training and testing settings
30
- train_cfg=None,
31
- test_cfg=dict(
32
- corner_topk=100,
33
- local_maximum_kernel=3,
34
- distance_threshold=0.5,
35
- score_thr=0.05,
36
- max_per_img=100,
37
- nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
38
- # data settings
39
- img_norm_cfg = dict(
40
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
41
- train_pipeline = [
42
- dict(type='LoadImageFromFile', to_float32=True),
43
- dict(type='LoadAnnotations', with_bbox=True),
44
- dict(
45
- type='PhotoMetricDistortion',
46
- brightness_delta=32,
47
- contrast_range=(0.5, 1.5),
48
- saturation_range=(0.5, 1.5),
49
- hue_delta=18),
50
- dict(
51
- type='RandomCenterCropPad',
52
- crop_size=(511, 511),
53
- ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
54
- test_mode=False,
55
- test_pad_mode=None,
56
- **img_norm_cfg),
57
- dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
58
- dict(type='RandomFlip', flip_ratio=0.5),
59
- dict(type='Normalize', **img_norm_cfg),
60
- dict(type='DefaultFormatBundle'),
61
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
62
- ]
63
- test_pipeline = [
64
- dict(type='LoadImageFromFile', to_float32=True),
65
- dict(
66
- type='MultiScaleFlipAug',
67
- scale_factor=1.0,
68
- flip=True,
69
- transforms=[
70
- dict(type='Resize'),
71
- dict(
72
- type='RandomCenterCropPad',
73
- crop_size=None,
74
- ratios=None,
75
- border=None,
76
- test_mode=True,
77
- test_pad_mode=['logical_or', 127],
78
- **img_norm_cfg),
79
- dict(type='RandomFlip'),
80
- dict(type='Normalize', **img_norm_cfg),
81
- dict(type='ImageToTensor', keys=['img']),
82
- dict(
83
- type='Collect',
84
- keys=['img'],
85
- meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
86
- 'scale_factor', 'flip', 'img_norm_cfg', 'border')),
87
- ])
88
- ]
89
- data = dict(
90
- samples_per_gpu=5,
91
- workers_per_gpu=3,
92
- train=dict(pipeline=train_pipeline),
93
- val=dict(pipeline=test_pipeline),
94
- test=dict(pipeline=test_pipeline))
95
- # optimizer
96
- optimizer = dict(type='Adam', lr=0.0005)
97
- optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
98
- # learning policy
99
- lr_config = dict(
100
- policy='step',
101
- warmup='linear',
102
- warmup_iters=500,
103
- warmup_ratio=1.0 / 3,
104
- step=[180])
105
- runner = dict(type='EpochBasedRunner', max_epochs=210)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py DELETED
@@ -1,6 +0,0 @@
1
- _base_ = './faster_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- roi_head=dict(
4
- bbox_head=dict(
5
- reg_decoded_bbox=True,
6
- loss_bbox=dict(type='BoundedIoULoss', loss_weight=10.0))))
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/danet/danet_r101-d8_512x512_80k_ade20k.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './danet_r50-d8_512x512_80k_ade20k.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,5 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/deeplabv3plus_r50-d8.py',
3
- '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_80k.py'
5
- ]
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/pipeline_loader.py DELETED
@@ -1,52 +0,0 @@
1
- import traceback
2
- from importlib import import_module
3
- from pathlib import Path
4
- from typing import Tuple
5
-
6
- from extensions.multimodal.abstract_pipeline import AbstractMultimodalPipeline
7
- from modules import shared
8
- from modules.logging_colors import logger
9
-
10
-
11
- def _get_available_pipeline_modules():
12
- pipeline_path = Path(__file__).parent / 'pipelines'
13
- modules = [p for p in pipeline_path.iterdir() if p.is_dir()]
14
- return [m.name for m in modules if (m / 'pipelines.py').exists()]
15
-
16
-
17
- def load_pipeline(params: dict) -> Tuple[AbstractMultimodalPipeline, str]:
18
- pipeline_modules = {}
19
- available_pipeline_modules = _get_available_pipeline_modules()
20
- for name in available_pipeline_modules:
21
- try:
22
- pipeline_modules[name] = import_module(f'extensions.multimodal.pipelines.{name}.pipelines')
23
- except:
24
- logger.warning(f'Failed to get multimodal pipelines from {name}')
25
- logger.warning(traceback.format_exc())
26
-
27
- if shared.args.multimodal_pipeline is not None:
28
- for k in pipeline_modules:
29
- if hasattr(pipeline_modules[k], 'get_pipeline'):
30
- pipeline = getattr(pipeline_modules[k], 'get_pipeline')(shared.args.multimodal_pipeline, params)
31
- if pipeline is not None:
32
- return (pipeline, k)
33
- else:
34
- model_name = shared.args.model.lower()
35
- for k in pipeline_modules:
36
- if hasattr(pipeline_modules[k], 'get_pipeline_from_model_name'):
37
- pipeline = getattr(pipeline_modules[k], 'get_pipeline_from_model_name')(model_name, params)
38
- if pipeline is not None:
39
- return (pipeline, k)
40
-
41
- available = []
42
- for k in pipeline_modules:
43
- if hasattr(pipeline_modules[k], 'available_pipelines'):
44
- pipelines = getattr(pipeline_modules[k], 'available_pipelines')
45
- available += pipelines
46
-
47
- if shared.args.multimodal_pipeline is not None:
48
- log = f'Multimodal - ERROR: Failed to load multimodal pipeline "{shared.args.multimodal_pipeline}", available pipelines are: {available}.'
49
- else:
50
- log = f'Multimodal - ERROR: Failed to determine multimodal pipeline for model {shared.args.model}, please select one manually using --multimodal-pipeline [PIPELINE]. Available pipelines are: {available}.'
51
- logger.critical(f'{log} Please specify a correct pipeline, or disable the extension')
52
- raise RuntimeError(f'{log} Please specify a correct pipeline, or disable the extension')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ariharasudhan/YoloV5/utils/general.py DELETED
@@ -1,1108 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- General utils
4
- """
5
-
6
- import contextlib
7
- import glob
8
- import inspect
9
- import logging
10
- import math
11
- import os
12
- import platform
13
- import random
14
- import re
15
- import shutil
16
- import signal
17
- import sys
18
- import time
19
- import urllib
20
- from copy import deepcopy
21
- from datetime import datetime
22
- from itertools import repeat
23
- from multiprocessing.pool import ThreadPool
24
- from pathlib import Path
25
- from subprocess import check_output
26
- from tarfile import is_tarfile
27
- from typing import Optional
28
- from zipfile import ZipFile, is_zipfile
29
-
30
- import cv2
31
- import IPython
32
- import numpy as np
33
- import pandas as pd
34
- import pkg_resources as pkg
35
- import torch
36
- import torchvision
37
- import yaml
38
-
39
- from utils import TryExcept, emojis
40
- from utils.downloads import gsutil_getsize
41
- from utils.metrics import box_iou, fitness
42
-
43
- FILE = Path(__file__).resolve()
44
- ROOT = FILE.parents[1] # YOLOv5 root directory
45
- RANK = int(os.getenv('RANK', -1))
46
-
47
- # Settings
48
- NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads
49
- DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory
50
- AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode
51
- VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode
52
- FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf
53
-
54
- torch.set_printoptions(linewidth=320, precision=5, profile='long')
55
- np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
56
- pd.options.display.max_columns = 10
57
- cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
58
- os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads
59
- os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy)
60
-
61
-
62
- def is_ascii(s=''):
63
- # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7)
64
- s = str(s) # convert list, tuple, None, etc. to str
65
- return len(s.encode().decode('ascii', 'ignore')) == len(s)
66
-
67
-
68
- def is_chinese(s='人工智能'):
69
- # Is string composed of any Chinese characters?
70
- return bool(re.search('[\u4e00-\u9fff]', str(s)))
71
-
72
-
73
- def is_colab():
74
- # Is environment a Google Colab instance?
75
- return 'google.colab' in sys.modules
76
-
77
-
78
- def is_notebook():
79
- # Is environment a Jupyter notebook? Verified on Colab, Jupyterlab, Kaggle, Paperspace
80
- ipython_type = str(type(IPython.get_ipython()))
81
- return 'colab' in ipython_type or 'zmqshell' in ipython_type
82
-
83
-
84
- def is_kaggle():
85
- # Is environment a Kaggle Notebook?
86
- return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com'
87
-
88
-
89
- def is_docker() -> bool:
90
- """Check if the process runs inside a docker container."""
91
- if Path("/.dockerenv").exists():
92
- return True
93
- try: # check if docker is in control groups
94
- with open("/proc/self/cgroup") as file:
95
- return any("docker" in line for line in file)
96
- except OSError:
97
- return False
98
-
99
-
100
- def is_writeable(dir, test=False):
101
- # Return True if directory has write permissions, test opening a file with write permissions if test=True
102
- if not test:
103
- return os.access(dir, os.W_OK) # possible issues on Windows
104
- file = Path(dir) / 'tmp.txt'
105
- try:
106
- with open(file, 'w'): # open file with write permissions
107
- pass
108
- file.unlink() # remove file
109
- return True
110
- except OSError:
111
- return False
112
-
113
-
114
- def set_logging(name=None, verbose=VERBOSE):
115
- # Sets level and returns logger
116
- if is_kaggle() or is_colab():
117
- for h in logging.root.handlers:
118
- logging.root.removeHandler(h) # remove all handlers associated with the root logger object
119
- rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings
120
- level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR
121
- log = logging.getLogger(name)
122
- log.setLevel(level)
123
- handler = logging.StreamHandler()
124
- handler.setFormatter(logging.Formatter("%(message)s"))
125
- handler.setLevel(level)
126
- log.addHandler(handler)
127
-
128
-
129
- set_logging() # run before defining LOGGER
130
- LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.)
131
- if platform.system() == 'Windows':
132
- for fn in LOGGER.info, LOGGER.warning:
133
- setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging
134
-
135
-
136
- def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):
137
- # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required.
138
- env = os.getenv(env_var)
139
- if env:
140
- path = Path(env) # use environment variable
141
- else:
142
- cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs
143
- path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir
144
- path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable
145
- path.mkdir(exist_ok=True) # make if required
146
- return path
147
-
148
-
149
- CONFIG_DIR = user_config_dir() # Ultralytics settings dir
150
-
151
-
152
- class Profile(contextlib.ContextDecorator):
153
- # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager
154
- def __init__(self, t=0.0):
155
- self.t = t
156
- self.cuda = torch.cuda.is_available()
157
-
158
- def __enter__(self):
159
- self.start = self.time()
160
- return self
161
-
162
- def __exit__(self, type, value, traceback):
163
- self.dt = self.time() - self.start # delta-time
164
- self.t += self.dt # accumulate dt
165
-
166
- def time(self):
167
- if self.cuda:
168
- torch.cuda.synchronize()
169
- return time.time()
170
-
171
-
172
- class Timeout(contextlib.ContextDecorator):
173
- # YOLOv5 Timeout class. Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager
174
- def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):
175
- self.seconds = int(seconds)
176
- self.timeout_message = timeout_msg
177
- self.suppress = bool(suppress_timeout_errors)
178
-
179
- def _timeout_handler(self, signum, frame):
180
- raise TimeoutError(self.timeout_message)
181
-
182
- def __enter__(self):
183
- if platform.system() != 'Windows': # not supported on Windows
184
- signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM
185
- signal.alarm(self.seconds) # start countdown for SIGALRM to be raised
186
-
187
- def __exit__(self, exc_type, exc_val, exc_tb):
188
- if platform.system() != 'Windows':
189
- signal.alarm(0) # Cancel SIGALRM if it's scheduled
190
- if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError
191
- return True
192
-
193
-
194
- class WorkingDirectory(contextlib.ContextDecorator):
195
- # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager
196
- def __init__(self, new_dir):
197
- self.dir = new_dir # new dir
198
- self.cwd = Path.cwd().resolve() # current dir
199
-
200
- def __enter__(self):
201
- os.chdir(self.dir)
202
-
203
- def __exit__(self, exc_type, exc_val, exc_tb):
204
- os.chdir(self.cwd)
205
-
206
-
207
- def methods(instance):
208
- # Get class/instance methods
209
- return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")]
210
-
211
-
212
- def print_args(args: Optional[dict] = None, show_file=True, show_func=False):
213
- # Print function arguments (optional args dict)
214
- x = inspect.currentframe().f_back # previous frame
215
- file, _, func, _, _ = inspect.getframeinfo(x)
216
- if args is None: # get args automatically
217
- args, _, _, frm = inspect.getargvalues(x)
218
- args = {k: v for k, v in frm.items() if k in args}
219
- try:
220
- file = Path(file).resolve().relative_to(ROOT).with_suffix('')
221
- except ValueError:
222
- file = Path(file).stem
223
- s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '')
224
- LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items()))
225
-
226
-
227
- def init_seeds(seed=0, deterministic=False):
228
- # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html
229
- random.seed(seed)
230
- np.random.seed(seed)
231
- torch.manual_seed(seed)
232
- torch.cuda.manual_seed(seed)
233
- torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe
234
- # torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287
235
- if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213
236
- torch.use_deterministic_algorithms(True)
237
- torch.backends.cudnn.deterministic = True
238
- os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
239
- os.environ['PYTHONHASHSEED'] = str(seed)
240
-
241
-
242
- def intersect_dicts(da, db, exclude=()):
243
- # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
244
- return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape}
245
-
246
-
247
- def get_default_args(func):
248
- # Get func() default arguments
249
- signature = inspect.signature(func)
250
- return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
251
-
252
-
253
- def get_latest_run(search_dir='.'):
254
- # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
255
- last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
256
- return max(last_list, key=os.path.getctime) if last_list else ''
257
-
258
-
259
- def file_age(path=__file__):
260
- # Return days since last file update
261
- dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta
262
- return dt.days # + dt.seconds / 86400 # fractional days
263
-
264
-
265
- def file_date(path=__file__):
266
- # Return human-readable file modification date, i.e. '2021-3-26'
267
- t = datetime.fromtimestamp(Path(path).stat().st_mtime)
268
- return f'{t.year}-{t.month}-{t.day}'
269
-
270
-
271
- def file_size(path):
272
- # Return file/dir size (MB)
273
- mb = 1 << 20 # bytes to MiB (1024 ** 2)
274
- path = Path(path)
275
- if path.is_file():
276
- return path.stat().st_size / mb
277
- elif path.is_dir():
278
- return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb
279
- else:
280
- return 0.0
281
-
282
-
283
- def check_online():
284
- # Check internet connectivity
285
- import socket
286
-
287
- def run_once():
288
- # Check once
289
- try:
290
- socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility
291
- return True
292
- except OSError:
293
- return False
294
-
295
- return run_once() or run_once() # check twice to increase robustness to intermittent connectivity issues
296
-
297
-
298
- def git_describe(path=ROOT): # path must be a directory
299
- # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
300
- try:
301
- assert (Path(path) / '.git').is_dir()
302
- return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1]
303
- except Exception:
304
- return ''
305
-
306
-
307
- @TryExcept()
308
- @WorkingDirectory(ROOT)
309
- def check_git_status(repo='ultralytics/yolov5', branch='master'):
310
- # YOLOv5 status check, recommend 'git pull' if code is out of date
311
- url = f'https://github.com/{repo}'
312
- msg = f', for updates see {url}'
313
- s = colorstr('github: ') # string
314
- assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg
315
- assert check_online(), s + 'skipping check (offline)' + msg
316
-
317
- splits = re.split(pattern=r'\s', string=check_output('git remote -v', shell=True).decode())
318
- matches = [repo in s for s in splits]
319
- if any(matches):
320
- remote = splits[matches.index(True) - 1]
321
- else:
322
- remote = 'ultralytics'
323
- check_output(f'git remote add {remote} {url}', shell=True)
324
- check_output(f'git fetch {remote}', shell=True, timeout=5) # git fetch
325
- local_branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
326
- n = int(check_output(f'git rev-list {local_branch}..{remote}/{branch} --count', shell=True)) # commits behind
327
- if n > 0:
328
- pull = 'git pull' if remote == 'origin' else f'git pull {remote} {branch}'
329
- s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `{pull}` or `git clone {url}` to update."
330
- else:
331
- s += f'up to date with {url} ✅'
332
- LOGGER.info(s)
333
-
334
-
335
- def check_python(minimum='3.7.0'):
336
- # Check current python version vs. required python version
337
- check_version(platform.python_version(), minimum, name='Python ', hard=True)
338
-
339
-
340
- def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):
341
- # Check version vs. required version
342
- current, minimum = (pkg.parse_version(x) for x in (current, minimum))
343
- result = (current == minimum) if pinned else (current >= minimum) # bool
344
- s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string
345
- if hard:
346
- assert result, emojis(s) # assert min requirements met
347
- if verbose and not result:
348
- LOGGER.warning(s)
349
- return result
350
-
351
-
352
- @TryExcept()
353
- def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''):
354
- # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str)
355
- prefix = colorstr('red', 'bold', 'requirements:')
356
- check_python() # check python version
357
- if isinstance(requirements, Path): # requirements.txt file
358
- file = requirements.resolve()
359
- assert file.exists(), f"{prefix} {file} not found, check failed."
360
- with file.open() as f:
361
- requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]
362
- elif isinstance(requirements, str):
363
- requirements = [requirements]
364
-
365
- s = ''
366
- n = 0
367
- for r in requirements:
368
- try:
369
- pkg.require(r)
370
- except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met
371
- s += f'"{r}" '
372
- n += 1
373
-
374
- if s and install and AUTOINSTALL: # check environment variable
375
- LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...")
376
- try:
377
- # assert check_online(), "AutoUpdate skipped (offline)"
378
- LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode())
379
- source = file if 'file' in locals() else requirements
380
- s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
381
- f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
382
- LOGGER.info(s)
383
- except Exception as e:
384
- LOGGER.warning(f'{prefix} ❌ {e}')
385
-
386
-
387
- def check_img_size(imgsz, s=32, floor=0):
388
- # Verify image size is a multiple of stride s in each dimension
389
- if isinstance(imgsz, int): # integer i.e. img_size=640
390
- new_size = max(make_divisible(imgsz, int(s)), floor)
391
- else: # list i.e. img_size=[640, 480]
392
- imgsz = list(imgsz) # convert to list if tuple
393
- new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]
394
- if new_size != imgsz:
395
- LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')
396
- return new_size
397
-
398
-
399
- def check_imshow(warn=False):
400
- # Check if environment supports image displays
401
- try:
402
- assert not is_notebook()
403
- assert not is_docker()
404
- cv2.imshow('test', np.zeros((1, 1, 3)))
405
- cv2.waitKey(1)
406
- cv2.destroyAllWindows()
407
- cv2.waitKey(1)
408
- return True
409
- except Exception as e:
410
- if warn:
411
- LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}')
412
- return False
413
-
414
-
415
- def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
416
- # Check file(s) for acceptable suffix
417
- if file and suffix:
418
- if isinstance(suffix, str):
419
- suffix = [suffix]
420
- for f in file if isinstance(file, (list, tuple)) else [file]:
421
- s = Path(f).suffix.lower() # file suffix
422
- if len(s):
423
- assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}"
424
-
425
-
426
- def check_yaml(file, suffix=('.yaml', '.yml')):
427
- # Search/download YAML file (if necessary) and return path, checking suffix
428
- return check_file(file, suffix)
429
-
430
-
431
- def check_file(file, suffix=''):
432
- # Search/download file (if necessary) and return path
433
- check_suffix(file, suffix) # optional
434
- file = str(file) # convert to str()
435
- if os.path.isfile(file) or not file: # exists
436
- return file
437
- elif file.startswith(('http:/', 'https:/')): # download
438
- url = file # warning: Pathlib turns :// -> :/
439
- file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth
440
- if os.path.isfile(file):
441
- LOGGER.info(f'Found {url} locally at {file}') # file already exists
442
- else:
443
- LOGGER.info(f'Downloading {url} to {file}...')
444
- torch.hub.download_url_to_file(url, file)
445
- assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check
446
- return file
447
- elif file.startswith('clearml://'): # ClearML Dataset ID
448
- assert 'clearml' in sys.modules, "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'."
449
- return file
450
- else: # search
451
- files = []
452
- for d in 'data', 'models', 'utils': # search directories
453
- files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file
454
- assert len(files), f'File not found: {file}' # assert file was found
455
- assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
456
- return files[0] # return file
457
-
458
-
459
- def check_font(font=FONT, progress=False):
460
- # Download font to CONFIG_DIR if necessary
461
- font = Path(font)
462
- file = CONFIG_DIR / font.name
463
- if not font.exists() and not file.exists():
464
- url = f'https://ultralytics.com/assets/{font.name}'
465
- LOGGER.info(f'Downloading {url} to {file}...')
466
- torch.hub.download_url_to_file(url, str(file), progress=progress)
467
-
468
-
469
- def check_dataset(data, autodownload=True):
470
- # Download, check and/or unzip dataset if not found locally
471
-
472
- # Download (optional)
473
- extract_dir = ''
474
- if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)):
475
- download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1)
476
- data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml'))
477
- extract_dir, autodownload = data.parent, False
478
-
479
- # Read yaml (optional)
480
- if isinstance(data, (str, Path)):
481
- data = yaml_load(data) # dictionary
482
-
483
- # Checks
484
- for k in 'train', 'val', 'names':
485
- assert k in data, f"data.yaml '{k}:' field missing ❌"
486
- if isinstance(data['names'], (list, tuple)): # old array format
487
- data['names'] = dict(enumerate(data['names'])) # convert to dict
488
- data['nc'] = len(data['names'])
489
-
490
- # Resolve paths
491
- path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.'
492
- if not path.is_absolute():
493
- path = (ROOT / path).resolve()
494
- data['path'] = path # download scripts
495
- for k in 'train', 'val', 'test':
496
- if data.get(k): # prepend path
497
- if isinstance(data[k], str):
498
- x = (path / data[k]).resolve()
499
- if not x.exists() and data[k].startswith('../'):
500
- x = (path / data[k][3:]).resolve()
501
- data[k] = str(x)
502
- else:
503
- data[k] = [str((path / x).resolve()) for x in data[k]]
504
-
505
- # Parse yaml
506
- train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download'))
507
- if val:
508
- val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
509
- if not all(x.exists() for x in val):
510
- LOGGER.info('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()])
511
- if not s or not autodownload:
512
- raise Exception('Dataset not found ❌')
513
- t = time.time()
514
- if s.startswith('http') and s.endswith('.zip'): # URL
515
- f = Path(s).name # filename
516
- LOGGER.info(f'Downloading {s} to {f}...')
517
- torch.hub.download_url_to_file(s, f)
518
- Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root
519
- unzip_file(f, path=DATASETS_DIR) # unzip
520
- Path(f).unlink() # remove zip
521
- r = None # success
522
- elif s.startswith('bash '): # bash script
523
- LOGGER.info(f'Running {s} ...')
524
- r = os.system(s)
525
- else: # python script
526
- r = exec(s, {'yaml': data}) # return None
527
- dt = f'({round(time.time() - t, 1)}s)'
528
- s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} ❌"
529
- LOGGER.info(f"Dataset download {s}")
530
- check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts
531
- return data # dictionary
532
-
533
-
534
- def check_amp(model):
535
- # Check PyTorch Automatic Mixed Precision (AMP) functionality. Return True on correct operation
536
- from models.common import AutoShape, DetectMultiBackend
537
-
538
- def amp_allclose(model, im):
539
- # All close FP32 vs AMP results
540
- m = AutoShape(model, verbose=False) # model
541
- a = m(im).xywhn[0] # FP32 inference
542
- m.amp = True
543
- b = m(im).xywhn[0] # AMP inference
544
- return a.shape == b.shape and torch.allclose(a, b, atol=0.1) # close to 10% absolute tolerance
545
-
546
- prefix = colorstr('AMP: ')
547
- device = next(model.parameters()).device # get model device
548
- if device.type in ('cpu', 'mps'):
549
- return False # AMP only used on CUDA devices
550
- f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check
551
- im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3))
552
- try:
553
- assert amp_allclose(deepcopy(model), im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im)
554
- LOGGER.info(f'{prefix}checks passed ✅')
555
- return True
556
- except Exception:
557
- help_url = 'https://github.com/ultralytics/yolov5/issues/7908'
558
- LOGGER.warning(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}')
559
- return False
560
-
561
-
562
- def yaml_load(file='data.yaml'):
563
- # Single-line safe yaml loading
564
- with open(file, errors='ignore') as f:
565
- return yaml.safe_load(f)
566
-
567
-
568
- def yaml_save(file='data.yaml', data={}):
569
- # Single-line safe yaml saving
570
- with open(file, 'w') as f:
571
- yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False)
572
-
573
-
574
- def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')):
575
- # Unzip a *.zip file to path/, excluding files containing strings in exclude list
576
- if path is None:
577
- path = Path(file).parent # default path
578
- with ZipFile(file) as zipObj:
579
- for f in zipObj.namelist(): # list all archived filenames in the zip
580
- if all(x not in f for x in exclude):
581
- zipObj.extract(f, path=path)
582
-
583
-
584
- def url2file(url):
585
- # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt
586
- url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/
587
- return Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth
588
-
589
-
590
- def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):
591
- # Multithreaded file download and unzip function, used in data.yaml for autodownload
592
- def download_one(url, dir):
593
- # Download 1 file
594
- success = True
595
- if os.path.isfile(url):
596
- f = Path(url) # filename
597
- else: # does not exist
598
- f = dir / Path(url).name
599
- LOGGER.info(f'Downloading {url} to {f}...')
600
- for i in range(retry + 1):
601
- if curl:
602
- s = 'sS' if threads > 1 else '' # silent
603
- r = os.system(
604
- f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue
605
- success = r == 0
606
- else:
607
- torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download
608
- success = f.is_file()
609
- if success:
610
- break
611
- elif i < retry:
612
- LOGGER.warning(f'⚠️ Download failure, retrying {i + 1}/{retry} {url}...')
613
- else:
614
- LOGGER.warning(f'❌ Failed to download {url}...')
615
-
616
- if unzip and success and (f.suffix == '.gz' or is_zipfile(f) or is_tarfile(f)):
617
- LOGGER.info(f'Unzipping {f}...')
618
- if is_zipfile(f):
619
- unzip_file(f, dir) # unzip
620
- elif is_tarfile(f):
621
- os.system(f'tar xf {f} --directory {f.parent}') # unzip
622
- elif f.suffix == '.gz':
623
- os.system(f'tar xfz {f} --directory {f.parent}') # unzip
624
- if delete:
625
- f.unlink() # remove zip
626
-
627
- dir = Path(dir)
628
- dir.mkdir(parents=True, exist_ok=True) # make directory
629
- if threads > 1:
630
- pool = ThreadPool(threads)
631
- pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multithreaded
632
- pool.close()
633
- pool.join()
634
- else:
635
- for u in [url] if isinstance(url, (str, Path)) else url:
636
- download_one(u, dir)
637
-
638
-
639
- def make_divisible(x, divisor):
640
- # Returns nearest x divisible by divisor
641
- if isinstance(divisor, torch.Tensor):
642
- divisor = int(divisor.max()) # to int
643
- return math.ceil(x / divisor) * divisor
644
-
645
-
646
- def clean_str(s):
647
- # Cleans a string by replacing special characters with underscore _
648
- return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
649
-
650
-
651
- def one_cycle(y1=0.0, y2=1.0, steps=100):
652
- # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf
653
- return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
654
-
655
-
656
- def colorstr(*input):
657
- # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
658
- *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
659
- colors = {
660
- 'black': '\033[30m', # basic colors
661
- 'red': '\033[31m',
662
- 'green': '\033[32m',
663
- 'yellow': '\033[33m',
664
- 'blue': '\033[34m',
665
- 'magenta': '\033[35m',
666
- 'cyan': '\033[36m',
667
- 'white': '\033[37m',
668
- 'bright_black': '\033[90m', # bright colors
669
- 'bright_red': '\033[91m',
670
- 'bright_green': '\033[92m',
671
- 'bright_yellow': '\033[93m',
672
- 'bright_blue': '\033[94m',
673
- 'bright_magenta': '\033[95m',
674
- 'bright_cyan': '\033[96m',
675
- 'bright_white': '\033[97m',
676
- 'end': '\033[0m', # misc
677
- 'bold': '\033[1m',
678
- 'underline': '\033[4m'}
679
- return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
680
-
681
-
682
- def labels_to_class_weights(labels, nc=80):
683
- # Get class weights (inverse frequency) from training labels
684
- if labels[0] is None: # no labels loaded
685
- return torch.Tensor()
686
-
687
- labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
688
- classes = labels[:, 0].astype(int) # labels = [class xywh]
689
- weights = np.bincount(classes, minlength=nc) # occurrences per class
690
-
691
- # Prepend gridpoint count (for uCE training)
692
- # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
693
- # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
694
-
695
- weights[weights == 0] = 1 # replace empty bins with 1
696
- weights = 1 / weights # number of targets per class
697
- weights /= weights.sum() # normalize
698
- return torch.from_numpy(weights).float()
699
-
700
-
701
- def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
702
- # Produces image weights based on class_weights and image contents
703
- # Usage: index = random.choices(range(n), weights=image_weights, k=1) # weighted image sample
704
- class_counts = np.array([np.bincount(x[:, 0].astype(int), minlength=nc) for x in labels])
705
- return (class_weights.reshape(1, nc) * class_counts).sum(1)
706
-
707
-
708
- def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
709
- # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
710
- # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
711
- # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
712
- # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
713
- # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
714
- return [
715
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
716
- 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
717
- 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
718
-
719
-
720
- def xyxy2xywh(x):
721
- # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
722
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
723
- y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
724
- y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
725
- y[:, 2] = x[:, 2] - x[:, 0] # width
726
- y[:, 3] = x[:, 3] - x[:, 1] # height
727
- return y
728
-
729
-
730
- def xywh2xyxy(x):
731
- # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
732
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
733
- y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
734
- y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
735
- y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
736
- y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
737
- return y
738
-
739
-
740
- def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
741
- # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
742
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
743
- y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
744
- y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
745
- y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
746
- y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
747
- return y
748
-
749
-
750
- def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
751
- # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
752
- if clip:
753
- clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip
754
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
755
- y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center
756
- y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center
757
- y[:, 2] = (x[:, 2] - x[:, 0]) / w # width
758
- y[:, 3] = (x[:, 3] - x[:, 1]) / h # height
759
- return y
760
-
761
-
762
- def xyn2xy(x, w=640, h=640, padw=0, padh=0):
763
- # Convert normalized segments into pixel segments, shape (n,2)
764
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
765
- y[:, 0] = w * x[:, 0] + padw # top left x
766
- y[:, 1] = h * x[:, 1] + padh # top left y
767
- return y
768
-
769
-
770
- def segment2box(segment, width=640, height=640):
771
- # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
772
- x, y = segment.T # segment xy
773
- inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
774
- x, y, = x[inside], y[inside]
775
- return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
776
-
777
-
778
- def segments2boxes(segments):
779
- # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
780
- boxes = []
781
- for s in segments:
782
- x, y = s.T # segment xy
783
- boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
784
- return xyxy2xywh(np.array(boxes)) # cls, xywh
785
-
786
-
787
- def resample_segments(segments, n=1000):
788
- # Up-sample an (n,2) segment
789
- for i, s in enumerate(segments):
790
- s = np.concatenate((s, s[0:1, :]), axis=0)
791
- x = np.linspace(0, len(s) - 1, n)
792
- xp = np.arange(len(s))
793
- segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
794
- return segments
795
-
796
-
797
- def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):
798
- # Rescale boxes (xyxy) from img1_shape to img0_shape
799
- if ratio_pad is None: # calculate from img0_shape
800
- gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
801
- pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
802
- else:
803
- gain = ratio_pad[0][0]
804
- pad = ratio_pad[1]
805
-
806
- boxes[:, [0, 2]] -= pad[0] # x padding
807
- boxes[:, [1, 3]] -= pad[1] # y padding
808
- boxes[:, :4] /= gain
809
- clip_boxes(boxes, img0_shape)
810
- return boxes
811
-
812
-
813
- def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None):
814
- # Rescale coords (xyxy) from img1_shape to img0_shape
815
- if ratio_pad is None: # calculate from img0_shape
816
- gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
817
- pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
818
- else:
819
- gain = ratio_pad[0][0]
820
- pad = ratio_pad[1]
821
-
822
- segments[:, 0] -= pad[0] # x padding
823
- segments[:, 1] -= pad[1] # y padding
824
- segments /= gain
825
- clip_segments(segments, img0_shape)
826
- return segments
827
-
828
-
829
- def clip_boxes(boxes, shape):
830
- # Clip boxes (xyxy) to image shape (height, width)
831
- if isinstance(boxes, torch.Tensor): # faster individually
832
- boxes[:, 0].clamp_(0, shape[1]) # x1
833
- boxes[:, 1].clamp_(0, shape[0]) # y1
834
- boxes[:, 2].clamp_(0, shape[1]) # x2
835
- boxes[:, 3].clamp_(0, shape[0]) # y2
836
- else: # np.array (faster grouped)
837
- boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2
838
- boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2
839
-
840
-
841
- def clip_segments(boxes, shape):
842
- # Clip segments (xy1,xy2,...) to image shape (height, width)
843
- if isinstance(boxes, torch.Tensor): # faster individually
844
- boxes[:, 0].clamp_(0, shape[1]) # x
845
- boxes[:, 1].clamp_(0, shape[0]) # y
846
- else: # np.array (faster grouped)
847
- boxes[:, 0] = boxes[:, 0].clip(0, shape[1]) # x
848
- boxes[:, 1] = boxes[:, 1].clip(0, shape[0]) # y
849
-
850
-
851
- def non_max_suppression(
852
- prediction,
853
- conf_thres=0.25,
854
- iou_thres=0.45,
855
- classes=None,
856
- agnostic=False,
857
- multi_label=False,
858
- labels=(),
859
- max_det=300,
860
- nm=0, # number of masks
861
- ):
862
- """Non-Maximum Suppression (NMS) on inference results to reject overlapping detections
863
-
864
- Returns:
865
- list of detections, on (n,6) tensor per image [xyxy, conf, cls]
866
- """
867
-
868
- if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out)
869
- prediction = prediction[0] # select only inference output
870
-
871
- device = prediction.device
872
- mps = 'mps' in device.type # Apple MPS
873
- if mps: # MPS not fully supported yet, convert tensors to CPU before NMS
874
- prediction = prediction.cpu()
875
- bs = prediction.shape[0] # batch size
876
- nc = prediction.shape[2] - nm - 5 # number of classes
877
- xc = prediction[..., 4] > conf_thres # candidates
878
-
879
- # Checks
880
- assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
881
- assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'
882
-
883
- # Settings
884
- # min_wh = 2 # (pixels) minimum box width and height
885
- max_wh = 7680 # (pixels) maximum box width and height
886
- max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
887
- time_limit = 0.5 + 0.05 * bs # seconds to quit after
888
- redundant = True # require redundant detections
889
- multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
890
- merge = False # use merge-NMS
891
-
892
- t = time.time()
893
- mi = 5 + nc # mask start index
894
- output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs
895
- for xi, x in enumerate(prediction): # image index, image inference
896
- # Apply constraints
897
- # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
898
- x = x[xc[xi]] # confidence
899
-
900
- # Cat apriori labels if autolabelling
901
- if labels and len(labels[xi]):
902
- lb = labels[xi]
903
- v = torch.zeros((len(lb), nc + nm + 5), device=x.device)
904
- v[:, :4] = lb[:, 1:5] # box
905
- v[:, 4] = 1.0 # conf
906
- v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls
907
- x = torch.cat((x, v), 0)
908
-
909
- # If none remain process next image
910
- if not x.shape[0]:
911
- continue
912
-
913
- # Compute conf
914
- x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
915
-
916
- # Box/Mask
917
- box = xywh2xyxy(x[:, :4]) # center_x, center_y, width, height) to (x1, y1, x2, y2)
918
- mask = x[:, mi:] # zero columns if no masks
919
-
920
- # Detections matrix nx6 (xyxy, conf, cls)
921
- if multi_label:
922
- i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T
923
- x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1)
924
- else: # best class only
925
- conf, j = x[:, 5:mi].max(1, keepdim=True)
926
- x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres]
927
-
928
- # Filter by class
929
- if classes is not None:
930
- x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
931
-
932
- # Apply finite constraint
933
- # if not torch.isfinite(x).all():
934
- # x = x[torch.isfinite(x).all(1)]
935
-
936
- # Check shape
937
- n = x.shape[0] # number of boxes
938
- if not n: # no boxes
939
- continue
940
- elif n > max_nms: # excess boxes
941
- x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
942
- else:
943
- x = x[x[:, 4].argsort(descending=True)] # sort by confidence
944
-
945
- # Batched NMS
946
- c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
947
- boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
948
- i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
949
- if i.shape[0] > max_det: # limit detections
950
- i = i[:max_det]
951
- if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
952
- # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
953
- iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
954
- weights = iou * scores[None] # box weights
955
- x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
956
- if redundant:
957
- i = i[iou.sum(1) > 1] # require redundancy
958
-
959
- output[xi] = x[i]
960
- if mps:
961
- output[xi] = output[xi].to(device)
962
- if (time.time() - t) > time_limit:
963
- LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded')
964
- break # time limit exceeded
965
-
966
- return output
967
-
968
-
969
- def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
970
- # Strip optimizer from 'f' to finalize training, optionally save as 's'
971
- x = torch.load(f, map_location=torch.device('cpu'))
972
- if x.get('ema'):
973
- x['model'] = x['ema'] # replace model with ema
974
- for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys
975
- x[k] = None
976
- x['epoch'] = -1
977
- x['model'].half() # to FP16
978
- for p in x['model'].parameters():
979
- p.requires_grad = False
980
- torch.save(x, s or f)
981
- mb = os.path.getsize(s or f) / 1E6 # filesize
982
- LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB")
983
-
984
-
985
- def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):
986
- evolve_csv = save_dir / 'evolve.csv'
987
- evolve_yaml = save_dir / 'hyp_evolve.yaml'
988
- keys = tuple(keys) + tuple(hyp.keys()) # [results + hyps]
989
- keys = tuple(x.strip() for x in keys)
990
- vals = results + tuple(hyp.values())
991
- n = len(keys)
992
-
993
- # Download (optional)
994
- if bucket:
995
- url = f'gs://{bucket}/evolve.csv'
996
- if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0):
997
- os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local
998
-
999
- # Log to evolve.csv
1000
- s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header
1001
- with open(evolve_csv, 'a') as f:
1002
- f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n')
1003
-
1004
- # Save yaml
1005
- with open(evolve_yaml, 'w') as f:
1006
- data = pd.read_csv(evolve_csv)
1007
- data = data.rename(columns=lambda x: x.strip()) # strip keys
1008
- i = np.argmax(fitness(data.values[:, :4])) #
1009
- generations = len(data)
1010
- f.write('# YOLOv5 Hyperparameter Evolution Results\n' + f'# Best generation: {i}\n' +
1011
- f'# Last generation: {generations - 1}\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) +
1012
- '\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n')
1013
- yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False)
1014
-
1015
- # Print to screen
1016
- LOGGER.info(prefix + f'{generations} generations finished, current result:\n' + prefix +
1017
- ', '.join(f'{x.strip():>20s}' for x in keys) + '\n' + prefix + ', '.join(f'{x:20.5g}'
1018
- for x in vals) + '\n\n')
1019
-
1020
- if bucket:
1021
- os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload
1022
-
1023
-
1024
- def apply_classifier(x, model, img, im0):
1025
- # Apply a second stage classifier to YOLO outputs
1026
- # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval()
1027
- im0 = [im0] if isinstance(im0, np.ndarray) else im0
1028
- for i, d in enumerate(x): # per image
1029
- if d is not None and len(d):
1030
- d = d.clone()
1031
-
1032
- # Reshape and pad cutouts
1033
- b = xyxy2xywh(d[:, :4]) # boxes
1034
- b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
1035
- b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
1036
- d[:, :4] = xywh2xyxy(b).long()
1037
-
1038
- # Rescale boxes from img_size to im0 size
1039
- scale_boxes(img.shape[2:], d[:, :4], im0[i].shape)
1040
-
1041
- # Classes
1042
- pred_cls1 = d[:, 5].long()
1043
- ims = []
1044
- for a in d:
1045
- cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
1046
- im = cv2.resize(cutout, (224, 224)) # BGR
1047
-
1048
- im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
1049
- im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
1050
- im /= 255 # 0 - 255 to 0.0 - 1.0
1051
- ims.append(im)
1052
-
1053
- pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
1054
- x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
1055
-
1056
- return x
1057
-
1058
-
1059
- def increment_path(path, exist_ok=False, sep='', mkdir=False):
1060
- # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
1061
- path = Path(path) # os-agnostic
1062
- if path.exists() and not exist_ok:
1063
- path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')
1064
-
1065
- # Method 1
1066
- for n in range(2, 9999):
1067
- p = f'{path}{sep}{n}{suffix}' # increment path
1068
- if not os.path.exists(p): #
1069
- break
1070
- path = Path(p)
1071
-
1072
- # Method 2 (deprecated)
1073
- # dirs = glob.glob(f"{path}{sep}*") # similar paths
1074
- # matches = [re.search(rf"{path.stem}{sep}(\d+)", d) for d in dirs]
1075
- # i = [int(m.groups()[0]) for m in matches if m] # indices
1076
- # n = max(i) + 1 if i else 2 # increment number
1077
- # path = Path(f"{path}{sep}{n}{suffix}") # increment path
1078
-
1079
- if mkdir:
1080
- path.mkdir(parents=True, exist_ok=True) # make directory
1081
-
1082
- return path
1083
-
1084
-
1085
- # OpenCV Chinese-friendly functions ------------------------------------------------------------------------------------
1086
- imshow_ = cv2.imshow # copy to avoid recursion errors
1087
-
1088
-
1089
- def imread(path, flags=cv2.IMREAD_COLOR):
1090
- return cv2.imdecode(np.fromfile(path, np.uint8), flags)
1091
-
1092
-
1093
- def imwrite(path, im):
1094
- try:
1095
- cv2.imencode(Path(path).suffix, im)[1].tofile(path)
1096
- return True
1097
- except Exception:
1098
- return False
1099
-
1100
-
1101
- def imshow(path, im):
1102
- imshow_(path.encode('unicode_escape').decode(), im)
1103
-
1104
-
1105
- cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine
1106
-
1107
- # Variables ------------------------------------------------------------------------------------------------------------
1108
- NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Armored-Atom/DiFuse_Your_Thoughts/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: MagicPrompt Stable Diffusion
3
- emoji: 😻
4
- colorFrom: red
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.3.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- duplicated_from: Gustavosta/MagicPrompt-Stable-Diffusion
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/cache.py DELETED
@@ -1,222 +0,0 @@
1
- import os
2
- import textwrap
3
- from optparse import Values
4
- from typing import Any, List
5
-
6
- import pip._internal.utils.filesystem as filesystem
7
- from pip._internal.cli.base_command import Command
8
- from pip._internal.cli.status_codes import ERROR, SUCCESS
9
- from pip._internal.exceptions import CommandError, PipError
10
- from pip._internal.utils.logging import getLogger
11
-
12
- logger = getLogger(__name__)
13
-
14
-
15
- class CacheCommand(Command):
16
- """
17
- Inspect and manage pip's wheel cache.
18
-
19
- Subcommands:
20
-
21
- - dir: Show the cache directory.
22
- - info: Show information about the cache.
23
- - list: List filenames of packages stored in the cache.
24
- - remove: Remove one or more package from the cache.
25
- - purge: Remove all items from the cache.
26
-
27
- ``<pattern>`` can be a glob expression or a package name.
28
- """
29
-
30
- ignore_require_venv = True
31
- usage = """
32
- %prog dir
33
- %prog info
34
- %prog list [<pattern>] [--format=[human, abspath]]
35
- %prog remove <pattern>
36
- %prog purge
37
- """
38
-
39
- def add_options(self) -> None:
40
- self.cmd_opts.add_option(
41
- "--format",
42
- action="store",
43
- dest="list_format",
44
- default="human",
45
- choices=("human", "abspath"),
46
- help="Select the output format among: human (default) or abspath",
47
- )
48
-
49
- self.parser.insert_option_group(0, self.cmd_opts)
50
-
51
- def run(self, options: Values, args: List[str]) -> int:
52
- handlers = {
53
- "dir": self.get_cache_dir,
54
- "info": self.get_cache_info,
55
- "list": self.list_cache_items,
56
- "remove": self.remove_cache_items,
57
- "purge": self.purge_cache,
58
- }
59
-
60
- if not options.cache_dir:
61
- logger.error("pip cache commands can not function since cache is disabled.")
62
- return ERROR
63
-
64
- # Determine action
65
- if not args or args[0] not in handlers:
66
- logger.error(
67
- "Need an action (%s) to perform.",
68
- ", ".join(sorted(handlers)),
69
- )
70
- return ERROR
71
-
72
- action = args[0]
73
-
74
- # Error handling happens here, not in the action-handlers.
75
- try:
76
- handlers[action](options, args[1:])
77
- except PipError as e:
78
- logger.error(e.args[0])
79
- return ERROR
80
-
81
- return SUCCESS
82
-
83
- def get_cache_dir(self, options: Values, args: List[Any]) -> None:
84
- if args:
85
- raise CommandError("Too many arguments")
86
-
87
- logger.info(options.cache_dir)
88
-
89
- def get_cache_info(self, options: Values, args: List[Any]) -> None:
90
- if args:
91
- raise CommandError("Too many arguments")
92
-
93
- num_http_files = len(self._find_http_files(options))
94
- num_packages = len(self._find_wheels(options, "*"))
95
-
96
- http_cache_location = self._cache_dir(options, "http")
97
- wheels_cache_location = self._cache_dir(options, "wheels")
98
- http_cache_size = filesystem.format_directory_size(http_cache_location)
99
- wheels_cache_size = filesystem.format_directory_size(wheels_cache_location)
100
-
101
- message = (
102
- textwrap.dedent(
103
- """
104
- Package index page cache location: {http_cache_location}
105
- Package index page cache size: {http_cache_size}
106
- Number of HTTP files: {num_http_files}
107
- Locally built wheels location: {wheels_cache_location}
108
- Locally built wheels size: {wheels_cache_size}
109
- Number of locally built wheels: {package_count}
110
- """
111
- )
112
- .format(
113
- http_cache_location=http_cache_location,
114
- http_cache_size=http_cache_size,
115
- num_http_files=num_http_files,
116
- wheels_cache_location=wheels_cache_location,
117
- package_count=num_packages,
118
- wheels_cache_size=wheels_cache_size,
119
- )
120
- .strip()
121
- )
122
-
123
- logger.info(message)
124
-
125
- def list_cache_items(self, options: Values, args: List[Any]) -> None:
126
- if len(args) > 1:
127
- raise CommandError("Too many arguments")
128
-
129
- if args:
130
- pattern = args[0]
131
- else:
132
- pattern = "*"
133
-
134
- files = self._find_wheels(options, pattern)
135
- if options.list_format == "human":
136
- self.format_for_human(files)
137
- else:
138
- self.format_for_abspath(files)
139
-
140
- def format_for_human(self, files: List[str]) -> None:
141
- if not files:
142
- logger.info("No locally built wheels cached.")
143
- return
144
-
145
- results = []
146
- for filename in files:
147
- wheel = os.path.basename(filename)
148
- size = filesystem.format_file_size(filename)
149
- results.append(f" - {wheel} ({size})")
150
- logger.info("Cache contents:\n")
151
- logger.info("\n".join(sorted(results)))
152
-
153
- def format_for_abspath(self, files: List[str]) -> None:
154
- if not files:
155
- return
156
-
157
- results = []
158
- for filename in files:
159
- results.append(filename)
160
-
161
- logger.info("\n".join(sorted(results)))
162
-
163
- def remove_cache_items(self, options: Values, args: List[Any]) -> None:
164
- if len(args) > 1:
165
- raise CommandError("Too many arguments")
166
-
167
- if not args:
168
- raise CommandError("Please provide a pattern")
169
-
170
- files = self._find_wheels(options, args[0])
171
-
172
- no_matching_msg = "No matching packages"
173
- if args[0] == "*":
174
- # Only fetch http files if no specific pattern given
175
- files += self._find_http_files(options)
176
- else:
177
- # Add the pattern to the log message
178
- no_matching_msg += ' for pattern "{}"'.format(args[0])
179
-
180
- if not files:
181
- logger.warning(no_matching_msg)
182
-
183
- for filename in files:
184
- os.unlink(filename)
185
- logger.verbose("Removed %s", filename)
186
- logger.info("Files removed: %s", len(files))
187
-
188
- def purge_cache(self, options: Values, args: List[Any]) -> None:
189
- if args:
190
- raise CommandError("Too many arguments")
191
-
192
- return self.remove_cache_items(options, ["*"])
193
-
194
- def _cache_dir(self, options: Values, subdir: str) -> str:
195
- return os.path.join(options.cache_dir, subdir)
196
-
197
- def _find_http_files(self, options: Values) -> List[str]:
198
- http_dir = self._cache_dir(options, "http")
199
- return filesystem.find_files(http_dir, "*")
200
-
201
- def _find_wheels(self, options: Values, pattern: str) -> List[str]:
202
- wheel_dir = self._cache_dir(options, "wheels")
203
-
204
- # The wheel filename format, as specified in PEP 427, is:
205
- # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl
206
- #
207
- # Additionally, non-alphanumeric values in the distribution are
208
- # normalized to underscores (_), meaning hyphens can never occur
209
- # before `-{version}`.
210
- #
211
- # Given that information:
212
- # - If the pattern we're given contains a hyphen (-), the user is
213
- # providing at least the version. Thus, we can just append `*.whl`
214
- # to match the rest of it.
215
- # - If the pattern we're given doesn't contain a hyphen (-), the
216
- # user is only providing the name. Thus, we append `-*.whl` to
217
- # match the hyphen before the version, followed by anything else.
218
- #
219
- # PEP 427: https://www.python.org/dev/peps/pep-0427/
220
- pattern = pattern + ("*.whl" if "-" in pattern else "-*.whl")
221
-
222
- return filesystem.find_files(wheel_dir, pattern)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/console.py DELETED
@@ -1,70 +0,0 @@
1
- """
2
- pygments.console
3
- ~~~~~~~~~~~~~~~~
4
-
5
- Format colored console output.
6
-
7
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
8
- :license: BSD, see LICENSE for details.
9
- """
10
-
11
- esc = "\x1b["
12
-
13
- codes = {}
14
- codes[""] = ""
15
- codes["reset"] = esc + "39;49;00m"
16
-
17
- codes["bold"] = esc + "01m"
18
- codes["faint"] = esc + "02m"
19
- codes["standout"] = esc + "03m"
20
- codes["underline"] = esc + "04m"
21
- codes["blink"] = esc + "05m"
22
- codes["overline"] = esc + "06m"
23
-
24
- dark_colors = ["black", "red", "green", "yellow", "blue",
25
- "magenta", "cyan", "gray"]
26
- light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",
27
- "brightmagenta", "brightcyan", "white"]
28
-
29
- x = 30
30
- for d, l in zip(dark_colors, light_colors):
31
- codes[d] = esc + "%im" % x
32
- codes[l] = esc + "%im" % (60 + x)
33
- x += 1
34
-
35
- del d, l, x
36
-
37
- codes["white"] = codes["bold"]
38
-
39
-
40
- def reset_color():
41
- return codes["reset"]
42
-
43
-
44
- def colorize(color_key, text):
45
- return codes[color_key] + text + codes["reset"]
46
-
47
-
48
- def ansiformat(attr, text):
49
- """
50
- Format ``text`` with a color and/or some attributes::
51
-
52
- color normal color
53
- *color* bold color
54
- _color_ underlined color
55
- +color+ blinking color
56
- """
57
- result = []
58
- if attr[:1] == attr[-1:] == '+':
59
- result.append(codes['blink'])
60
- attr = attr[1:-1]
61
- if attr[:1] == attr[-1:] == '*':
62
- result.append(codes['bold'])
63
- attr = attr[1:-1]
64
- if attr[:1] == attr[-1:] == '_':
65
- result.append(codes['underline'])
66
- attr = attr[1:-1]
67
- result.append(codes[attr])
68
- result.append(text)
69
- result.append(codes['reset'])
70
- return ''.join(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/demo/predictor.py DELETED
@@ -1,220 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import atexit
3
- import bisect
4
- import multiprocessing as mp
5
- from collections import deque
6
- import cv2
7
- import torch
8
-
9
- from detectron2.data import MetadataCatalog
10
- from detectron2.engine.defaults import DefaultPredictor
11
- from detectron2.utils.video_visualizer import VideoVisualizer
12
- from detectron2.utils.visualizer import ColorMode, Visualizer
13
-
14
-
15
- class VisualizationDemo(object):
16
- def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
17
- """
18
- Args:
19
- cfg (CfgNode):
20
- instance_mode (ColorMode):
21
- parallel (bool): whether to run the model in different processes from visualization.
22
- Useful since the visualization logic can be slow.
23
- """
24
- self.metadata = MetadataCatalog.get(
25
- cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
26
- )
27
- self.cpu_device = torch.device("cpu")
28
- self.instance_mode = instance_mode
29
-
30
- self.parallel = parallel
31
- if parallel:
32
- num_gpu = torch.cuda.device_count()
33
- self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
34
- else:
35
- self.predictor = DefaultPredictor(cfg)
36
-
37
- def run_on_image(self, image):
38
- """
39
- Args:
40
- image (np.ndarray): an image of shape (H, W, C) (in BGR order).
41
- This is the format used by OpenCV.
42
-
43
- Returns:
44
- predictions (dict): the output of the model.
45
- vis_output (VisImage): the visualized image output.
46
- """
47
- vis_output = None
48
- predictions = self.predictor(image)
49
- # Convert image from OpenCV BGR format to Matplotlib RGB format.
50
- image = image[:, :, ::-1]
51
- visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
52
- if "panoptic_seg" in predictions:
53
- panoptic_seg, segments_info = predictions["panoptic_seg"]
54
- vis_output = visualizer.draw_panoptic_seg_predictions(
55
- panoptic_seg.to(self.cpu_device), segments_info
56
- )
57
- else:
58
- if "sem_seg" in predictions:
59
- vis_output = visualizer.draw_sem_seg(
60
- predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
61
- )
62
- if "instances" in predictions:
63
- instances = predictions["instances"].to(self.cpu_device)
64
- vis_output = visualizer.draw_instance_predictions(predictions=instances)
65
-
66
- return predictions, vis_output
67
-
68
- def _frame_from_video(self, video):
69
- while video.isOpened():
70
- success, frame = video.read()
71
- if success:
72
- yield frame
73
- else:
74
- break
75
-
76
- def run_on_video(self, video):
77
- """
78
- Visualizes predictions on frames of the input video.
79
-
80
- Args:
81
- video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
82
- either a webcam or a video file.
83
-
84
- Yields:
85
- ndarray: BGR visualizations of each video frame.
86
- """
87
- video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
88
-
89
- def process_predictions(frame, predictions):
90
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
91
- if "panoptic_seg" in predictions:
92
- panoptic_seg, segments_info = predictions["panoptic_seg"]
93
- vis_frame = video_visualizer.draw_panoptic_seg_predictions(
94
- frame, panoptic_seg.to(self.cpu_device), segments_info
95
- )
96
- elif "instances" in predictions:
97
- predictions = predictions["instances"].to(self.cpu_device)
98
- vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
99
- elif "sem_seg" in predictions:
100
- vis_frame = video_visualizer.draw_sem_seg(
101
- frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
102
- )
103
-
104
- # Converts Matplotlib RGB format to OpenCV BGR format
105
- vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
106
- return vis_frame
107
-
108
- frame_gen = self._frame_from_video(video)
109
- if self.parallel:
110
- buffer_size = self.predictor.default_buffer_size
111
-
112
- frame_data = deque()
113
-
114
- for cnt, frame in enumerate(frame_gen):
115
- frame_data.append(frame)
116
- self.predictor.put(frame)
117
-
118
- if cnt >= buffer_size:
119
- frame = frame_data.popleft()
120
- predictions = self.predictor.get()
121
- yield process_predictions(frame, predictions)
122
-
123
- while len(frame_data):
124
- frame = frame_data.popleft()
125
- predictions = self.predictor.get()
126
- yield process_predictions(frame, predictions)
127
- else:
128
- for frame in frame_gen:
129
- yield process_predictions(frame, self.predictor(frame))
130
-
131
-
132
- class AsyncPredictor:
133
- """
134
- A predictor that runs the model asynchronously, possibly on >1 GPUs.
135
- Because rendering the visualization takes considerably amount of time,
136
- this helps improve throughput a little bit when rendering videos.
137
- """
138
-
139
- class _StopToken:
140
- pass
141
-
142
- class _PredictWorker(mp.Process):
143
- def __init__(self, cfg, task_queue, result_queue):
144
- self.cfg = cfg
145
- self.task_queue = task_queue
146
- self.result_queue = result_queue
147
- super().__init__()
148
-
149
- def run(self):
150
- predictor = DefaultPredictor(self.cfg)
151
-
152
- while True:
153
- task = self.task_queue.get()
154
- if isinstance(task, AsyncPredictor._StopToken):
155
- break
156
- idx, data = task
157
- result = predictor(data)
158
- self.result_queue.put((idx, result))
159
-
160
- def __init__(self, cfg, num_gpus: int = 1):
161
- """
162
- Args:
163
- cfg (CfgNode):
164
- num_gpus (int): if 0, will run on CPU
165
- """
166
- num_workers = max(num_gpus, 1)
167
- self.task_queue = mp.Queue(maxsize=num_workers * 3)
168
- self.result_queue = mp.Queue(maxsize=num_workers * 3)
169
- self.procs = []
170
- for gpuid in range(max(num_gpus, 1)):
171
- cfg = cfg.clone()
172
- cfg.defrost()
173
- cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
174
- self.procs.append(
175
- AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
176
- )
177
-
178
- self.put_idx = 0
179
- self.get_idx = 0
180
- self.result_rank = []
181
- self.result_data = []
182
-
183
- for p in self.procs:
184
- p.start()
185
- atexit.register(self.shutdown)
186
-
187
- def put(self, image):
188
- self.put_idx += 1
189
- self.task_queue.put((self.put_idx, image))
190
-
191
- def get(self):
192
- self.get_idx += 1 # the index needed for this request
193
- if len(self.result_rank) and self.result_rank[0] == self.get_idx:
194
- res = self.result_data[0]
195
- del self.result_data[0], self.result_rank[0]
196
- return res
197
-
198
- while True:
199
- # make sure the results are returned in the correct order
200
- idx, res = self.result_queue.get()
201
- if idx == self.get_idx:
202
- return res
203
- insert = bisect.bisect(self.result_rank, idx)
204
- self.result_rank.insert(insert, idx)
205
- self.result_data.insert(insert, res)
206
-
207
- def __len__(self):
208
- return self.put_idx - self.get_idx
209
-
210
- def __call__(self, image):
211
- self.put(image)
212
- return self.get()
213
-
214
- def shutdown(self):
215
- for _ in self.procs:
216
- self.task_queue.put(AsyncPredictor._StopToken())
217
-
218
- @property
219
- def default_buffer_size(self):
220
- return len(self.procs) * 5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BAAI/vid2vid-zero/vid2vid_zero/p2p/ptp_utils.py DELETED
@@ -1,347 +0,0 @@
1
- # Copyright 2022 Google LLC
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import numpy as np
16
- import torch
17
- from PIL import Image, ImageDraw, ImageFont
18
- import cv2
19
- from typing import Optional, Union, Tuple, List, Callable, Dict
20
- from IPython.display import display
21
- from tqdm import tqdm
22
- import torch.nn.functional as F
23
-
24
-
25
- def text_under_image(image: np.ndarray, text: str, text_color: Tuple[int, int, int] = (0, 0, 0)):
26
- h, w, c = image.shape
27
- offset = int(h * .2)
28
- img = np.ones((h + offset, w, c), dtype=np.uint8) * 255
29
- font = cv2.FONT_HERSHEY_SIMPLEX
30
- # font = ImageFont.truetype("/usr/share/fonts/truetype/noto/NotoMono-Regular.ttf", font_size)
31
- img[:h] = image
32
- textsize = cv2.getTextSize(text, font, 1, 2)[0]
33
- text_x, text_y = (w - textsize[0]) // 2, h + offset - textsize[1] // 2
34
- cv2.putText(img, text, (text_x, text_y ), font, 1, text_color, 2)
35
- return img
36
-
37
-
38
- def view_images(images, num_rows=1, offset_ratio=0.02):
39
- if type(images) is list:
40
- num_empty = len(images) % num_rows
41
- elif images.ndim == 4:
42
- num_empty = images.shape[0] % num_rows
43
- else:
44
- images = [images]
45
- num_empty = 0
46
-
47
- empty_images = np.ones(images[0].shape, dtype=np.uint8) * 255
48
- images = [image.astype(np.uint8) for image in images] + [empty_images] * num_empty
49
- num_items = len(images)
50
-
51
- h, w, c = images[0].shape
52
- offset = int(h * offset_ratio)
53
- num_cols = num_items // num_rows
54
- image_ = np.ones((h * num_rows + offset * (num_rows - 1),
55
- w * num_cols + offset * (num_cols - 1), 3), dtype=np.uint8) * 255
56
- for i in range(num_rows):
57
- for j in range(num_cols):
58
- image_[i * (h + offset): i * (h + offset) + h:, j * (w + offset): j * (w + offset) + w] = images[
59
- i * num_cols + j]
60
-
61
- pil_img = Image.fromarray(image_)
62
- display(pil_img)
63
-
64
-
65
- def diffusion_step(model, controller, latents, context, t, guidance_scale, low_resource=False):
66
- if low_resource:
67
- noise_pred_uncond = model.unet(latents, t, encoder_hidden_states=context[0])["sample"]
68
- noise_prediction_text = model.unet(latents, t, encoder_hidden_states=context[1])["sample"]
69
- else:
70
- latents_input = torch.cat([latents] * 2)
71
- noise_pred = model.unet(latents_input, t, encoder_hidden_states=context)["sample"]
72
- noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2)
73
- noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
74
- latents = model.scheduler.step(noise_pred, t, latents)["prev_sample"]
75
- latents = controller.step_callback(latents)
76
- return latents
77
-
78
-
79
- def latent2image(vae, latents):
80
- latents = 1 / 0.18215 * latents
81
- image = vae.decode(latents)['sample']
82
- image = (image / 2 + 0.5).clamp(0, 1)
83
- image = image.cpu().permute(0, 2, 3, 1).numpy()
84
- image = (image * 255).astype(np.uint8)
85
- return image
86
-
87
-
88
- def init_latent(latent, model, height, width, generator, batch_size):
89
- if latent is None:
90
- latent = torch.randn(
91
- (1, model.unet.in_channels, height // 8, width // 8),
92
- generator=generator,
93
- )
94
- latents = latent.expand(batch_size, model.unet.in_channels, height // 8, width // 8).to(model.device)
95
- return latent, latents
96
-
97
-
98
- @torch.no_grad()
99
- def text2image_ldm(
100
- model,
101
- prompt: List[str],
102
- controller,
103
- num_inference_steps: int = 50,
104
- guidance_scale: Optional[float] = 7.,
105
- generator: Optional[torch.Generator] = None,
106
- latent: Optional[torch.FloatTensor] = None,
107
- ):
108
- register_attention_control(model, controller)
109
- height = width = 256
110
- batch_size = len(prompt)
111
-
112
- uncond_input = model.tokenizer([""] * batch_size, padding="max_length", max_length=77, return_tensors="pt")
113
- uncond_embeddings = model.bert(uncond_input.input_ids.to(model.device))[0]
114
-
115
- text_input = model.tokenizer(prompt, padding="max_length", max_length=77, return_tensors="pt")
116
- text_embeddings = model.bert(text_input.input_ids.to(model.device))[0]
117
- latent, latents = init_latent(latent, model, height, width, generator, batch_size)
118
- context = torch.cat([uncond_embeddings, text_embeddings])
119
-
120
- model.scheduler.set_timesteps(num_inference_steps)
121
- for t in tqdm(model.scheduler.timesteps):
122
- latents = diffusion_step(model, controller, latents, context, t, guidance_scale)
123
-
124
- image = latent2image(model.vqvae, latents)
125
-
126
- return image, latent
127
-
128
-
129
- @torch.no_grad()
130
- def text2image_ldm_stable(
131
- model,
132
- prompt: List[str],
133
- controller,
134
- num_inference_steps: int = 50,
135
- guidance_scale: float = 7.5,
136
- generator: Optional[torch.Generator] = None,
137
- latent: Optional[torch.FloatTensor] = None,
138
- low_resource: bool = False,
139
- ):
140
- register_attention_control(model, controller)
141
- height = width = 512
142
- batch_size = len(prompt)
143
-
144
- text_input = model.tokenizer(
145
- prompt,
146
- padding="max_length",
147
- max_length=model.tokenizer.model_max_length,
148
- truncation=True,
149
- return_tensors="pt",
150
- )
151
- text_embeddings = model.text_encoder(text_input.input_ids.to(model.device))[0]
152
- max_length = text_input.input_ids.shape[-1]
153
- uncond_input = model.tokenizer(
154
- [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
155
- )
156
- uncond_embeddings = model.text_encoder(uncond_input.input_ids.to(model.device))[0]
157
-
158
- context = [uncond_embeddings, text_embeddings]
159
- if not low_resource:
160
- context = torch.cat(context)
161
- latent, latents = init_latent(latent, model, height, width, generator, batch_size)
162
-
163
- # set timesteps
164
- extra_set_kwargs = {"offset": 1}
165
- model.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
166
- for t in tqdm(model.scheduler.timesteps):
167
- latents = diffusion_step(model, controller, latents, context, t, guidance_scale, low_resource)
168
-
169
- image = latent2image(model.vae, latents)
170
-
171
- return image, latent
172
-
173
-
174
- def register_attention_control(model, controller):
175
-
176
- def ca_forward(self, place_in_unet):
177
- def forward(hidden_states, encoder_hidden_states=None, attention_mask=None):
178
- batch_size, sequence_length, _ = hidden_states.shape
179
-
180
- is_cross = encoder_hidden_states is not None
181
- encoder_hidden_states = encoder_hidden_states
182
-
183
- if self.group_norm is not None:
184
- hidden_states = self.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
185
-
186
- query = self.to_q(hidden_states)
187
- # dim = query.shape[-1]
188
- query = self.reshape_heads_to_batch_dim(query)
189
-
190
- if self.added_kv_proj_dim is not None:
191
- key = self.to_k(hidden_states)
192
- value = self.to_v(hidden_states)
193
- encoder_hidden_states_key_proj = self.add_k_proj(encoder_hidden_states)
194
- encoder_hidden_states_value_proj = self.add_v_proj(encoder_hidden_states)
195
-
196
- key = self.reshape_heads_to_batch_dim(key)
197
- value = self.reshape_heads_to_batch_dim(value)
198
- encoder_hidden_states_key_proj = self.reshape_heads_to_batch_dim(encoder_hidden_states_key_proj)
199
- encoder_hidden_states_value_proj = self.reshape_heads_to_batch_dim(encoder_hidden_states_value_proj)
200
-
201
- key = torch.concat([encoder_hidden_states_key_proj, key], dim=1)
202
- value = torch.concat([encoder_hidden_states_value_proj, value], dim=1)
203
- else:
204
- encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
205
- key = self.to_k(encoder_hidden_states)
206
- value = self.to_v(encoder_hidden_states)
207
-
208
- key = self.reshape_heads_to_batch_dim(key)
209
- value = self.reshape_heads_to_batch_dim(value)
210
-
211
- if attention_mask is not None:
212
- if attention_mask.shape[-1] != query.shape[1]:
213
- target_length = query.shape[1]
214
- attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)
215
- attention_mask = attention_mask.repeat_interleave(self.heads, dim=0)
216
-
217
- assert self._slice_size is None or query.shape[0] // self._slice_size == 1
218
-
219
- if self.upcast_attention:
220
- query = query.float()
221
- key = key.float()
222
-
223
- attention_scores = torch.baddbmm(
224
- torch.empty(query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device),
225
- query,
226
- key.transpose(-1, -2),
227
- beta=0,
228
- alpha=self.scale,
229
- )
230
-
231
- if attention_mask is not None:
232
- attention_scores = attention_scores + attention_mask
233
-
234
- if self.upcast_softmax:
235
- attention_scores = attention_scores.float()
236
-
237
- attention_probs = attention_scores.softmax(dim=-1)
238
-
239
- # attn control
240
- attention_probs = controller(attention_probs, is_cross, place_in_unet)
241
-
242
- # cast back to the original dtype
243
- attention_probs = attention_probs.to(value.dtype)
244
-
245
- # compute attention output
246
- hidden_states = torch.bmm(attention_probs, value)
247
-
248
- # reshape hidden_states
249
- hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
250
-
251
- # linear proj
252
- hidden_states = self.to_out[0](hidden_states)
253
-
254
- # dropout
255
- hidden_states = self.to_out[1](hidden_states)
256
- return hidden_states
257
-
258
- return forward
259
-
260
- class DummyController:
261
-
262
- def __call__(self, *args):
263
- return args[0]
264
-
265
- def __init__(self):
266
- self.num_att_layers = 0
267
-
268
- if controller is None:
269
- controller = DummyController()
270
-
271
- def register_recr(net_, count, place_in_unet):
272
- if net_.__class__.__name__ == 'CrossAttention':
273
- net_.forward = ca_forward(net_, place_in_unet)
274
- return count + 1
275
- elif hasattr(net_, 'children'):
276
- for net__ in net_.children():
277
- count = register_recr(net__, count, place_in_unet)
278
- return count
279
-
280
- cross_att_count = 0
281
- # sub_nets = model.unet.named_children()
282
- # we take unet as the input model
283
- sub_nets = model.named_children()
284
- for net in sub_nets:
285
- if "down" in net[0]:
286
- cross_att_count += register_recr(net[1], 0, "down")
287
- elif "up" in net[0]:
288
- cross_att_count += register_recr(net[1], 0, "up")
289
- elif "mid" in net[0]:
290
- cross_att_count += register_recr(net[1], 0, "mid")
291
-
292
- controller.num_att_layers = cross_att_count
293
-
294
-
295
- def get_word_inds(text: str, word_place: int, tokenizer):
296
- split_text = text.split(" ")
297
- if type(word_place) is str:
298
- word_place = [i for i, word in enumerate(split_text) if word_place == word]
299
- elif type(word_place) is int:
300
- word_place = [word_place]
301
- out = []
302
- if len(word_place) > 0:
303
- words_encode = [tokenizer.decode([item]).strip("#") for item in tokenizer.encode(text)][1:-1]
304
- cur_len, ptr = 0, 0
305
-
306
- for i in range(len(words_encode)):
307
- cur_len += len(words_encode[i])
308
- if ptr in word_place:
309
- out.append(i + 1)
310
- if cur_len >= len(split_text[ptr]):
311
- ptr += 1
312
- cur_len = 0
313
- return np.array(out)
314
-
315
-
316
- def update_alpha_time_word(alpha, bounds: Union[float, Tuple[float, float]], prompt_ind: int,
317
- word_inds: Optional[torch.Tensor]=None):
318
- if type(bounds) is float:
319
- bounds = 0, bounds
320
- start, end = int(bounds[0] * alpha.shape[0]), int(bounds[1] * alpha.shape[0])
321
- if word_inds is None:
322
- word_inds = torch.arange(alpha.shape[2])
323
- alpha[: start, prompt_ind, word_inds] = 0
324
- alpha[start: end, prompt_ind, word_inds] = 1
325
- alpha[end:, prompt_ind, word_inds] = 0
326
- return alpha
327
-
328
-
329
- def get_time_words_attention_alpha(prompts, num_steps,
330
- cross_replace_steps: Union[float, Dict[str, Tuple[float, float]]],
331
- tokenizer, max_num_words=77):
332
- if type(cross_replace_steps) is not dict:
333
- cross_replace_steps = {"default_": cross_replace_steps}
334
- if "default_" not in cross_replace_steps:
335
- cross_replace_steps["default_"] = (0., 1.)
336
- alpha_time_words = torch.zeros(num_steps + 1, len(prompts) - 1, max_num_words)
337
- for i in range(len(prompts) - 1):
338
- alpha_time_words = update_alpha_time_word(alpha_time_words, cross_replace_steps["default_"],
339
- i)
340
- for key, item in cross_replace_steps.items():
341
- if key != "default_":
342
- inds = [get_word_inds(prompts[i], key, tokenizer) for i in range(1, len(prompts))]
343
- for i, ind in enumerate(inds):
344
- if len(ind) > 0:
345
- alpha_time_words = update_alpha_time_word(alpha_time_words, item, i, ind)
346
- alpha_time_words = alpha_time_words.reshape(num_steps + 1, len(prompts) - 1, 1, 1, max_num_words)
347
- return alpha_time_words
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/app/store/index.ts DELETED
@@ -1,203 +0,0 @@
1
- "use client"
2
-
3
- import { create } from "zustand"
4
-
5
- import { FontName } from "@/lib/fonts"
6
- import { Preset, PresetName, defaultPreset, getPreset, getRandomPreset } from "@/app/engine/presets"
7
- import { LayoutName, defaultLayout, getRandomLayoutName, getRandomLayoutNames } from "../layouts"
8
- import html2canvas from "html2canvas"
9
- import { RenderedScene } from "@/types"
10
-
11
- export const useStore = create<{
12
- prompt: string
13
- font: FontName
14
- preset: Preset
15
- nbFrames: number
16
- panels: string[]
17
- captions: string[]
18
- upscaleQueue: Record<string, RenderedScene>
19
- showCaptions: boolean
20
- renderedScenes: Record<string, RenderedScene>
21
- layout: LayoutName
22
- layouts: LayoutName[]
23
- zoomLevel: number
24
- page: HTMLDivElement
25
- isGeneratingStory: boolean
26
- panelGenerationStatus: Record<number, boolean>
27
- isGeneratingText: boolean
28
- atLeastOnePanelIsBusy: boolean
29
- setRendered: (panelId: string, renderedScene: RenderedScene) => void
30
- addToUpscaleQueue: (panelId: string, renderedScene: RenderedScene) => void
31
- removeFromUpscaleQueue: (panelId: string) => void
32
- setPrompt: (prompt: string) => void
33
- setFont: (font: FontName) => void
34
- setPreset: (preset: Preset) => void
35
- setPanels: (panels: string[]) => void
36
- setShowCaptions: (showCaptions: boolean) => void
37
- setLayout: (layout: LayoutName) => void
38
- setLayouts: (layouts: LayoutName[]) => void
39
- setCaptions: (captions: string[]) => void
40
- setZoomLevel: (zoomLevel: number) => void
41
- setPage: (page: HTMLDivElement) => void
42
- setGeneratingStory: (isGeneratingStory: boolean) => void
43
- setGeneratingImages: (panelId: string, value: boolean) => void
44
- setGeneratingText: (isGeneratingText: boolean) => void
45
- pageToImage: () => Promise<string>
46
- download: () => Promise<void>
47
- generate: (prompt: string, presetName: PresetName, layoutName: LayoutName) => void
48
- }>((set, get) => ({
49
- prompt: "",
50
- font: "actionman",
51
- preset: getPreset(defaultPreset),
52
- nbFrames: 1,
53
- panels: [],
54
- captions: [],
55
- upscaleQueue: {} as Record<string, RenderedScene>,
56
- renderedScenes: {} as Record<string, RenderedScene>,
57
- showCaptions: false,
58
- layout: defaultLayout,
59
- layouts: [defaultLayout, defaultLayout],
60
- zoomLevel: 60,
61
- page: undefined as unknown as HTMLDivElement,
62
- isGeneratingStory: false,
63
- panelGenerationStatus: {},
64
- isGeneratingText: false,
65
- atLeastOnePanelIsBusy: false,
66
- setRendered: (panelId: string, renderedScene: RenderedScene) => {
67
- const { renderedScenes } = get()
68
- set({
69
- renderedScenes: {
70
- ...renderedScenes,
71
- [panelId]: renderedScene
72
- }
73
- })
74
- },
75
- addToUpscaleQueue: (panelId: string, renderedScene: RenderedScene) => {
76
- const { upscaleQueue } = get()
77
- set({
78
- upscaleQueue: {
79
- ...upscaleQueue,
80
- [panelId]: renderedScene
81
- },
82
- })
83
- },
84
- removeFromUpscaleQueue: (panelId: string) => {
85
- const upscaleQueue = { ...get().upscaleQueue }
86
- delete upscaleQueue[panelId]
87
- set({
88
- upscaleQueue,
89
- })
90
- },
91
- setPrompt: (prompt: string) => {
92
- const existingPrompt = get().prompt
93
- if (prompt === existingPrompt) { return }
94
- set({
95
- prompt,
96
- })
97
- },
98
- setFont: (font: FontName) => {
99
- const existingFont = get().font
100
- if (font === existingFont) { return }
101
- set({
102
- font,
103
- })
104
- },
105
- setPreset: (preset: Preset) => {
106
- const existingPreset = get().preset
107
- if (preset.label === existingPreset.label) { return }
108
- set({
109
- preset,
110
- })
111
- },
112
- setNbFrames: (nbFrames: number) => {
113
- const existingNbFrames = get().nbFrames
114
- if (nbFrames === existingNbFrames) { return }
115
- set({
116
- nbFrames,
117
- })
118
- },
119
- setPanels: (panels: string[]) => set({ panels }),
120
- setCaptions: (captions: string[]) => {
121
- set({
122
- captions,
123
- })
124
- },
125
- setShowCaptions: (showCaptions: boolean) => {
126
- set({
127
- showCaptions,
128
- })
129
- },
130
- setLayout: (layoutName: LayoutName) => {
131
- const layout = layoutName === "random"
132
- ? getRandomLayoutName()
133
- : layoutName
134
-
135
- set({
136
- layout,
137
- layouts: [layout, layout]
138
- })
139
- },
140
- setLayouts: (layouts: LayoutName[]) => set({ layouts }),
141
- setZoomLevel: (zoomLevel: number) => set({ zoomLevel }),
142
- setPage: (page: HTMLDivElement) => {
143
- if (!page) { return }
144
- set({ page })
145
- },
146
- setGeneratingStory: (isGeneratingStory: boolean) => set({ isGeneratingStory }),
147
- setGeneratingImages: (panelId: string, value: boolean) => {
148
- const panelGenerationStatus: Record<string, boolean> = {
149
- ...get().panelGenerationStatus,
150
- [panelId]: value
151
- }
152
-
153
- const atLeastOnePanelIsBusy = Object.values(panelGenerationStatus).includes(true)
154
-
155
- set({
156
- panelGenerationStatus,
157
- atLeastOnePanelIsBusy
158
- })
159
- },
160
- setGeneratingText: (isGeneratingText: boolean) => set({ isGeneratingText }),
161
- pageToImage: async () => {
162
- const { page } = get()
163
- if (!page) { return "" }
164
-
165
-
166
- const canvas = await html2canvas(page)
167
- console.log("canvas:", canvas)
168
-
169
- const data = canvas.toDataURL('image/jpeg', 0.5)
170
- return data
171
- },
172
- download: async () => {
173
- const { pageToImage } = get()
174
- const data = await pageToImage()
175
-
176
- const link = document.createElement('a')
177
-
178
- if (typeof link.download === 'string') {
179
- link.href = data
180
- link.download = 'comic.jpg'
181
- document.body.appendChild(link)
182
- link.click()
183
- document.body.removeChild(link)
184
- } else {
185
- window.open(data)
186
- }
187
- },
188
- generate: (prompt: string, presetName: PresetName, layoutName: LayoutName) => {
189
- const layout = layoutName === "random"
190
- ? getRandomLayoutName()
191
- : layoutName
192
- set({
193
- prompt,
194
- panels: [],
195
- captions: [],
196
- preset: presetName === "random"
197
- ? getRandomPreset()
198
- : getPreset(presetName),
199
- layout,
200
- layouts: [layout, layout],
201
- })
202
- }
203
- }))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Adventure Apk.md DELETED
@@ -1,138 +0,0 @@
1
-
2
- <tabla>
3
- <tr>
4
- <td>
5
- <h1>Aventura APK: ¿Qué es y cómo descargarlo</h1>
6
- <p>¿Está buscando algunos juegos y aplicaciones emocionantes y divertidos para jugar en su dispositivo móvil? ¿Quieres probar algunos nuevos géneros y formatos que pondrá a prueba sus habilidades y la imaginación? Si es así, es posible que desee echa un vistazo apk aventura. </p>
7
- <h2>Adventure Apk</h2><br /><p><b><b>Download Zip</b> >> <a href="https://bltlly.com/2v6IU4">https://bltlly.com/2v6IU4</a></b></p><br /><br />
8
- <p>Aventura apk es un tipo de formato de archivo que le permite descargar e instalar juegos y aplicaciones que no están disponibles en las tiendas de aplicaciones oficiales. Estos juegos y aplicaciones suelen ser creados por desarrolladores independientes que quieren compartir su creatividad y pasión con otros usuarios. Aventura apk juegos y aplicaciones a menudo se basan en temas de aventura, tales como la exploración, resolución de rompecabezas, narración, acción, etc.</p>
9
- <p>En este artículo, vamos a explicar lo que es apk aventura, cómo descargarlo, cuáles son sus beneficios, y cuáles son algunos de los mejores juegos de aventura apk y aplicaciones que se pueden probar. ¡Vamos a empezar! </p>
10
- <h2>¿Qué es la aventura APK? </h2>
11
- <h3>Definición</h3>
12
- <p>Aventura apk es un formato de archivo que significa Android Package Kit. Es similar a otros formatos de archivo como . exe para Windows o . dmg para Mac. Contiene todos los archivos y datos necesarios para ejecutar un juego o una aplicación en un dispositivo Android. </p>
13
- <p>Aventura apk juegos y aplicaciones por lo general no están disponibles en las tiendas de aplicaciones oficiales como Google Play Store o Apple App Store. Esto se debe a que pueden no cumplir con los requisitos o estándares de estas plataformas, o pueden ser demasiado nicho o experimental para las audiencias principales. </p>
14
- <p>Sin embargo, esto no significa que los juegos de aventura apk y aplicaciones son ilegales o inseguros. Son simplemente formas alternativas de distribuir juegos y aplicaciones que no son compatibles con los canales oficiales. Siempre y cuando los descargue de fuentes confiables y los escanee en busca de virus o malware antes de instalarlos, debería estar bien. </p>
15
- <p></p>
16
- <h3 <h3>Ejemplos</h3>
17
-
18
- <p>Aquí están algunos de los más populares y conocidos juegos apk aventura y aplicaciones que se pueden descargar y disfrutar:</p>
19
- <ul>
20
- <li><b>Minecraft Pocket Edition</b>: Esta es la versión móvil del famoso juego sandbox que te permite construir, explorar y sobrevivir en un mundo pixelado. Puede crear sus propios mundos, jugar con amigos o unirse a servidores en línea. También puede descargar mods y mapas para mejorar su experiencia. </li>
21
- <li><b>GTA San Andreas</b>: Esta es la versión móvil del clásico juego de mundo abierto que te permite jugar como CJ, un ex gángster que regresa a su ciudad natal para encontrarlo corrompido por el crimen y la violencia. Puedes conducir, disparar, luchar y hacer misiones en un mapa enorme. También puedes personalizar tu personaje, vehículos y armas. </li>
22
- <li><b>Pokemon Go</b>: Esta es la versión móvil de la popular franquicia que te permite atrapar, entrenar y luchar contra Pokémon en el mundo real. Puedes usar tu cámara y GPS para encontrar y capturar Pokémon a tu alrededor. También puedes unirte a equipos, gimnasios, redadas y eventos. </li>
23
- <li><b>Monument Valley</b>: Este es un hermoso juego de puzzle que te permite manipular la arquitectura imposible y guiar a una princesa a través de un mundo surrealista. Puedes explorar niveles impresionantes que desafían tu percepción y lógica. También puedes disfrutar de la relajante banda sonora y el estilo artístico. </li>
24
- <li><b>The Room</b>: Este es un misterioso juego de puzzle que te permite desbloquear una serie de intrincadas cajas que esconden secretos y pistas. Puede utilizar la pantalla táctil para interactuar con los objetos y resolver los puzzles. También puede sumergirse en los gráficos atmosféricos y efectos de sonido. </li>
25
- <li><b>Limbo</b>: Este es un juego de plataformas oscuro y inquietante que te permite controlar a un chico que busca a su hermana en un mundo sombrío y peligroso. Puedes evitar trampas, enemigos y obstáculos a medida que avanzas por los niveles. También puedes experimentar el estilo artístico minimalista y la banda sonora misteriosa. </li>
26
- </ul>
27
- <h2>Cómo descargar aventura APK? </h2>
28
- <h3>Pasos</h3>
29
-
30
- <ol>
31
- <li>Encontrar una fuente confiable para juegos de aventura apk y aplicaciones. Puede utilizar los motores de búsqueda, foros, blogs, o sitios web que se especializan en archivos apk aventura. Asegúrate de leer las reseñas, valoraciones, comentarios y comentarios de otros usuarios antes de descargar nada. </li>
32
- <li>Descargar el archivo apk aventura a su dispositivo. Puede utilizar su navegador o una aplicación de administrador de archivos para hacer esto. Asegúrese de comprobar el tamaño del archivo, nombre, extensión y permisos antes de descargar nada. </li>
33
- <li>Habilitar fuentes desconocidas en su dispositivo. Esta es una configuración de seguridad que le permite instalar aplicaciones desde fuentes distintas de las tiendas de aplicaciones oficiales. Puede encontrar esta opción en la configuración de su dispositivo bajo seguridad o privacidad. Asegúrese de desactivarlo después de instalar el archivo apk aventura. </li>
34
- <li>Instalar el archivo apk aventura en su dispositivo. Puede utilizar su aplicación de administrador de archivos o su navegador para hacer esto. Toque en el archivo y siga las instrucciones en la pantalla. Asegúrese de conceder los permisos o el acceso que la aplicación requiere. </li>
35
- <li>Inicie el juego de aventura apk o aplicación en su dispositivo. Puede encontrarlo en el cajón de la aplicación o la pantalla de inicio. Disfrute! </li>
36
- </ol>
37
- <h3>Consejos</h3>
38
- <p>Aquí hay algunos consejos y trucos que le ayudarán a descargar juegos de aventura apk y aplicaciones de forma segura:</p>
39
- <ul>
40
- <li>Siempre escanear el archivo apk aventura en busca de virus o malware antes de instalarlo en su dispositivo. Puede usar una aplicación antivirus o un escáner en línea para hacer esto. </li>
41
- <li>Siempre copia de seguridad de los datos del dispositivo antes de instalar cualquier archivo apk aventura en su dispositivo. Esto le ayudará a restaurar el dispositivo en caso de que algo salga mal o desea desinstalar la aplicación. </li>
42
- <li>Compruebe siempre la compatibilidad del archivo apk aventura con el modelo de dispositivo y la versión de Android antes de instalarlo en su dispositivo. Algunos archivos apk aventura puede no funcionar correctamente o causar errores en ciertos dispositivos o versiones de Android. </li>
43
-
44
- <li>Siempre desinstalar cualquier aventura apk juegos o aplicaciones que usted no utiliza más o que causan problemas en su dispositivo. Esto liberará espacio y mejorará el rendimiento en su dispositivo </li>
45
- </ul>
46
- <h2>¿Cuáles son los beneficios de la aventura APK? </h2>
47
- <h3>Ventajas</h3>
48
- <p>Hay muchas ventajas de juegos de aventura apk y aplicaciones sobre otros formatos. Algunos de ellos son:</p>
49
- <ul>
50
- <li><b>Variedad</b>: Aventura apk juegos y aplicaciones ofrecen una amplia gama de géneros, temas, estilos y características que usted no puede encontrar en las tiendas de aplicaciones oficiales. Puede explorar juegos y aplicaciones nuevos e innovadores que se adapten a sus preferencias e intereses. </li>
51
- <li><b>Accesibilidad</b>: Aventura apk juegos y aplicaciones son fáciles de descargar e instalar en su dispositivo. No es necesario registrarse, registrarse o pagar por nada. También puede acceder a ellos sin conexión a Internet. </li>
52
- <li><b>Personalización</b>: Aventura apk juegos y aplicaciones le permiten personalizar su experiencia de acuerdo a su gusto. Puedes modificar, modificar o mejorar los juegos y aplicaciones usando mods, hacks, trucos o ajustes. También puede crear sus propios juegos y aplicaciones utilizando herramientas y plataformas apk aventura. </li>
53
- <li><b>Comunidad</b>: Aventura apk juegos y aplicaciones tienen una gran y activa comunidad de usuarios y desarrolladores que comparten sus comentarios, opiniones, sugerencias y apoyo. Usted puede unirse a foros, grupos, chats, o plataformas de medios sociales para interactuar con otros entusiastas apk aventura. </li>
54
- </ul>
55
- <h3>Desventajas</h3>
56
- <p>Sin embargo, también hay algunas desventajas o riesgos de juegos de aventura apk y aplicaciones que usted debe ser consciente de. Algunos de ellos son:</p>
57
- <ul>
58
- <li><b>Seguridad</b>: Aventura apk juegos y aplicaciones pueden contener virus, malware, spyware, u otros elementos dañinos que pueden dañar su dispositivo o comprometer su privacidad. Siempre debe escanear los archivos antes de instalarlos y solo descargarlos de fuentes confiables. </li>
59
-
60
- <li><b>Calidad</b>: Aventura juegos apk y aplicaciones no pueden tener la misma calidad, estándares, o características que los de las tiendas de aplicaciones oficiales. Pueden tener errores, errores, defectos o elementos que faltan que pueden afectar su experiencia. Siempre debes leer las reseñas, valoraciones, comentarios y comentarios antes de descargarlos. </li>
61
- <li><b>Legalidad</b>: Aventura apk juegos y aplicaciones pueden no ser legales en algunos países o regiones. Pueden violar los derechos de propiedad intelectual, los términos de servicio o las políticas de las tiendas de aplicaciones o plataformas oficiales. Siempre debe comprobar la legalidad antes de descargarlos y respetar los derechos de los creadores originales. </li>
62
- </ul>
63
- <h2>¿Cuáles son algunos de los mejores juegos y aplicaciones de aventura APK? </h2>
64
- <h3>Tabla</h3>
65
- <p>Aquí hay una tabla con algunos de los mejores juegos de aventura apk y aplicaciones basadas en calificaciones, comentarios, descargas, etc.</p>
66
- <borde de la tabla="1">
67
- <tr>
68
- <th>Nombre</th>
69
- <th>Descripción</th>
70
- <th>Valoración</th>
71
- <th>Enlace de descarga</th>
72
- </tr>
73
- <tr>
74
- <td>Edición de bolsillo de Minecraft</td>
75
- <td>Un juego sandbox que te permite construir, explorar y sobrevivir en un mundo pixelado. </td>
76
- <td>4.5/5</td>
77
- <td></td>
78
- </tr>
79
- <tr>
80
- <td>GTA San Andreas</td>
81
- <td>Un juego de mundo abierto que te permite jugar como un ex gángster que regresa a su ciudad natal. </td>
82
- <td>4.4/5</td>
83
- <td></td>
84
- </tr>
85
- <tr>
86
- <td>Pokemon Go</td>
87
- <td>Un juego que te permite atrapar, entrenar y luchar contra Pokémon en el mundo real. </td>
88
- <td>4.1/5</td>
89
- <td></td>
90
- </tr <h3>Comentarios</h3>
91
- <p>Aquí hay algunas reseñas breves de cada juego o aplicación en la tabla:</p>
92
- <ul>
93
- <li><b>Minecraft Pocket Edition</b>: Este juego es imprescindible para cualquier jugador creativo y aventurero. Puedes construir cualquier cosa que puedas imaginar, desde casas simples hasta máquinas complejas. También puede explorar diferentes mundos, desde bosques pacíficos hasta peligrosas mazmorras. El juego se actualiza constantemente con nuevas características y contenido, lo que siempre es fresco y emocionante. </li>
94
-
95
- <li><b>Pokemon Go</b>: Este juego es una forma divertida e innovadora de disfrutar de la franquicia de Pokémon. Puedes atrapar y recoger Pokémon en el mundo real, usando tu cámara y GPS. También puedes unirte a equipos, gimnasios, redadas y eventos con otros jugadores. El juego se actualiza constantemente con nuevos Pokémon, características y eventos, lo que lo hace siempre atractivo y gratificante. </li>
96
- <li><b>Monument Valley</b>: Este juego es un rompecabezas hermoso y relajante que desafiará tu mente y deleitará tus ojos. Puedes manipular una arquitectura imposible y guiar a una princesa por un mundo surrealista. El juego tiene gráficos impresionantes, efectos de sonido y música que crean una atmósfera fascinante. El juego es corto pero dulce y vale cada centavo. </li>
97
- <li><b>La habitación</b>: Este juego es un misterioso y cautivador juego de puzzle que pondrá a prueba su lógica y curiosidad. Puede desbloquear una serie de cajas intrincadas que ocultan secretos y pistas. El juego tiene gráficos increíbles, efectos de sonido y animaciones que hacen que los objetos se sientan realistas y táctiles. El juego es inmersivo y adictivo, y te mantendrá adivinando hasta el final. </li>
98
- <li><b>Limbo</b>: Este juego es un juego de plataformas oscuro e inquietante que tocará tus emociones y nervios. Puedes controlar a un chico que busca a su hermana en un mundo sombrío y peligroso. El juego tiene gráficos minimalistas, efectos de sonido y música que crean un ambiente sombrío y misterioso. El juego es desafiante y gratificante, y te dejará sin aliento. </li>
99
- </ul>
100
- <h2>Conclusión</h2>
101
- <p>En conclusión, aventura apk es un tipo de formato de archivo que le permite descargar e instalar juegos y aplicaciones que no están disponibles en las tiendas de aplicaciones oficiales. Estos juegos y aplicaciones se basan a menudo en temas de aventura, tales como exploración, resolución de rompecabezas, narración, acción, etc.</p>
102
-
103
- <p>Si usted está buscando algunos emocionantes y divertidos juegos y aplicaciones para jugar en su dispositivo móvil, es posible que desee echa un vistazo a algunos de los mejores juegos de aventura apk y aplicaciones que hemos enumerado en este artículo. Todos ellos son altamente valorados, revisados, descargados y disfrutados por muchos usuarios en todo el mundo. </p>
104
- <p>Entonces, ¿qué estás esperando? Descargar algunos juegos de aventura apk y aplicaciones hoy y disfrutar de la emoción de la aventura en su dispositivo! </p>
105
- <h2>Preguntas frecuentes</h2>
106
- <p>Aquí hay algunas preguntas frecuentes sobre apk aventura:</p>
107
- <ol>
108
- <li><b>¿Cuál es la diferencia entre apk aventura y mod aventura? </b></li>
109
- <p>Aventura apk es un formato de archivo que le permite descargar e instalar juegos y aplicaciones que no están disponibles en las tiendas de aplicaciones oficiales. Adventure mod es una modificación o mejora de un juego o aplicación existente que añade nuevas características o cambia el juego. </p>
110
- <li><b>¿Cómo puedo descargar apk aventura de forma segura? </b></li>
111
- <p>Puede descargar aventura apk de forma segura siguiendo estos pasos:</p>
112
- <ul>
113
- <li>Encontrar una fuente confiable para los archivos apk aventura. Puede utilizar los motores de búsqueda, foros, blogs, o sitios web que se especializan en los archivos apk aventura. </li>
114
- <li>Escanear el archivo apk aventura en busca de virus o malware antes de instalarlo en su dispositivo. Puede usar una aplicación antivirus o un escáner en línea para hacer esto. </li>
115
- <li>Habilitar fuentes desconocidas en el dispositivo antes de instalar el archivo apk aventura. Esta es una configuración de seguridad que le permite instalar aplicaciones desde fuentes distintas de las tiendas de aplicaciones oficiales. </li>
116
- <li>Desactivar fuentes desconocidas en el dispositivo después de instalar el archivo apk aventura. Esto evitará que aplicaciones no autorizadas o maliciosas se instalen en su dispositivo. </li>
117
- </ul>
118
- <li><b>¿Cómo puedo actualizar juegos y aplicaciones apk aventura? </b></li>
119
- <p>Puede actualizar juegos y aplicaciones apk aventura siguiendo estos pasos:</p>
120
- <ul>
121
-
122
- <li>Descargar la última versión del archivo apk aventura a su dispositivo. Puede utilizar su navegador o una aplicación de administrador de archivos para hacer esto. </li>
123
- <li>Instalar la última versión del archivo apk aventura en su dispositivo. Puede utilizar su aplicación de administrador de archivos o su navegador para hacer esto. </li>
124
- <li>Inicie el juego o aplicación apk aventura actualizada en su dispositivo. Puede encontrarlo en el cajón de la aplicación o en la pantalla de inicio. </li>
125
- </ul>
126
- <li><b>¿Cómo puedo desinstalar juegos y aplicaciones apk aventura? </b></li>
127
- <p>Puede desinstalar juegos de aventura apk y aplicaciones siguiendo estos pasos:</p>
128
- <ul>
129
- <li>Ir a la configuración del dispositivo y toque en aplicaciones o aplicaciones. </li>
130
- <li> Encontrar el juego de aventura apk o aplicación que desea desinstalar y toque en él. </li>
131
- <li>Pulse en desinstalar y confirme su elección. </li>
132
- <li>Eliminar el archivo apk aventura desde su dispositivo. Puede utilizar su aplicación de administrador de archivos o su navegador para hacer esto. </li>
133
- </ul>
134
- <li><b>¿Los juegos y aplicaciones de apk de aventura son legales? </b></li>
135
- <p>La legalidad de los juegos de aventura apk y aplicaciones depende del país o región donde vive y el juego o aplicación que se descarga. Algunos juegos de aventura apk y aplicaciones pueden ser legales, mientras que otros pueden ser ilegales. Siempre debe comprobar las leyes y reglamentos de su país o región antes de descargar cualquier aventura archivos apk. También debe respetar los derechos de propiedad intelectual, los términos de servicio y las políticas de las tiendas de aplicaciones o plataformas oficiales. </p>
136
- </ol></p> 64aa2da5cf<br />
137
- <br />
138
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descarga De Archivos Flash Infinix Smart 3 Plus.md DELETED
@@ -1,68 +0,0 @@
1
-
2
- <h1>Cómo descargar historias de GTA Liberty City</h1>
3
- <p>Si eres un fan de los juegos de acción y aventura, es posible que hayas oído hablar de GTA Liberty City Stories, uno de los títulos más populares de la serie Grand Theft Auto. Este juego fue lanzado originalmente en 2005 para PlayStation Portable (PSP) y más tarde portado a PlayStation 2 (PS2) y dispositivos móviles. En este artículo, te mostraremos cómo descargar el archivo de GTA Liberty City Stories para diferentes dispositivos y disfrutar de este increíble juego. </p>
4
- <h2>descarga de archivos flash infinix smart 3 plus</h2><br /><p><b><b>Download</b> &middot; <a href="https://bltlly.com/2v6J8s">https://bltlly.com/2v6J8s</a></b></p><br /><br />
5
- <h2>¿Qué es GTA Liberty City Stories? </h2>
6
- <p>GTA Liberty City Stories es una precuela de GTA III, ambientada en la misma ciudad ficticia de Liberty City en 1998. Juegas como Toni Cipriani, un mafioso que regresa a la ciudad después de matar a un gángster rival y tiene que abrirse camino en la familia criminal Leone. El juego cuenta con un entorno de mundo abierto donde se puede explorar, conducir, disparar, luchar y completar varias misiones. El juego también tiene un modo multijugador para usuarios de PSP, donde hasta seis jugadores pueden competir en diferentes modos. </p>
7
- <h2>¿Por qué descargar historias de GTA Liberty City? </h2>
8
- <p>Hay muchas razones por las que es posible que desee descargar GTA Liberty City Stories en lugar de comprar una copia física. Estos son algunos de ellos:</p>
9
- <ul>
10
- <li>Puedes ahorrar dinero descargando el juego gratis o a un precio más bajo que comprar un disco. </li>
11
- <li> Puede ahorrar espacio almacenando el archivo del juego en su dispositivo o tarjeta de memoria en lugar de tener un disco voluminoso. </li>
12
- <li> Puede acceder al juego en cualquier momento y en cualquier lugar sin necesidad de una unidad de disco o una conexión a Internet. </li>
13
- <li>Puedes disfrutar de mejores gráficos y rendimiento descargando la última versión del juego con actualizaciones y parches. </li>
14
- </ul>
15
- <h2>Cómo descargar GTA Liberty City Stories para diferentes dispositivos</h2>
16
- <p>El proceso de descarga de GTA Liberty City Stories varía dependiendo del dispositivo que tenga. Estos son los pasos para cada dispositivo:</p>
17
- <h3>Para PlayStation portátil (PSP)</h3>
18
-
19
- <p>Lo primero que necesitas hacer es encontrar un sitio web que ofrezca el archivo de juego para PSP. Puedes buscar "GTA Liberty City Stories PSP download" en Google o cualquier otro motor de búsqueda y buscar resultados que tengan reseñas y valoraciones positivas. También puede consultar algunos de estos sitios web:</p>
20
- <ul>
21
- <li>[GTA Wiki]( 3 ) - Este es un wiki hecho por fans que proporciona información sobre juegos GTA, incluyendo enlaces para descargarlos. </li>
22
- <li>[Rockstar Games]( 2 ) - Este es el sitio web oficial de Rockstar Games , el desarrollador de juegos GTA, donde puedes comprar el juego legalmente y descargarlo en tu PSP.</li>
23
- <li>[Emuparadise] - Este es un sitio web popular que alberga una gran colección de juegos de PSP, incluyendo GTA Liberty City Stories. Sin embargo, este sitio web puede no ser legal en algunos países, así que úsalo bajo tu propio riesgo. </li>
24
- </ul>
25
- <h4>Paso 2: Descargue el archivo en su computadora</h4>
26
- <p>Una vez que haya encontrado una fuente confiable para el archivo del juego, debe descargarlo en su computadora. El archivo debe estar en formato ISO o CSO, que son los formatos que PSP puede leer. El tamaño del archivo puede variar dependiendo de la fuente, pero debe ser de alrededor de 1 GB. Puede usar cualquier gestor de descargas o navegador para descargar el archivo, pero asegúrese de tener suficiente espacio en su disco duro y una buena conexión a Internet. </p>
27
- <p></p>
28
- <h4>Paso 3: Transfiera el archivo a su PSP usando un cable USB o una tarjeta de memoria</h4>
29
- <p>Después de descargar el archivo, debe transferirlo a su dispositivo PSP. Puede hacer esto de dos maneras:</p>
30
- <ul>
31
- <li>Using a USB cable - Conecte su PSP a su ordenador utilizando un cable USB y encienda el modo USB en su PSP. Tu PSP debería aparecer como una unidad extraíble en tu ordenador. Copia el archivo del juego en la carpeta ISO de tu PSP. Si no tiene una carpeta ISO, cree una. </li>
32
- <li>Using a memory card - Insertar una tarjeta de memoria en su PSP y copiar el archivo de juego a la carpeta ISO en la tarjeta de memoria. Si no tiene una carpeta ISO, cree una. </li>
33
- </ul>
34
-
35
- <p>Ahora que has transferido el archivo de juego a tu PSP, puedes lanzarlo desde tu menú PSP. Ve a la sección Juego y selecciona Memory Stick. Deberías ver el icono de GTA Liberty City Stories. Selecciónalo y empieza a jugar. </p>
36
- <h3>Para PlayStation 2 (PS2)</h3>
37
- <h4>Paso 1: Encontrar una fuente en línea de buena reputación para el archivo del juego</h4>
38
- <p>Lo primero que tienes que hacer es encontrar un sitio web que ofrezca el archivo de juego para PS2. Puedes buscar "GTA Liberty City Stories PS2 download" en Google o cualquier otro motor de búsqueda y buscar resultados que tengan críticas y valoraciones positivas. También puede consultar algunos de estos sitios web:</p>
39
- <ul>
40
- <li>[GTA Wiki] - Este es un wiki hecho por fans que proporciona información sobre juegos GTA, incluyendo enlaces para descargarlos. </li>
41
- <li>[Rockstar Games] - Este es el sitio web oficial de Rockstar Games, el desarrollador de juegos GTA, donde puedes comprar el juego legalmente y descargarlo en tu PS2.</li>
42
- <li>[CoolROM] - Este es un sitio web popular que alberga una gran colección de juegos de PS2, incluyendo GTA Liberty City Stories. Sin embargo, este sitio web puede no ser legal en algunos países, así que úsalo bajo tu propio riesgo. </li>
43
- </ul> <h4>Paso 2: Descargue el archivo en su computadora</h4>
44
- <p>Una vez que haya encontrado una fuente confiable para el archivo del juego, debe descargarlo en su computadora. El archivo debe estar en formato ISO, que es el formato que PS2 puede leer. El tamaño del archivo puede variar dependiendo de la fuente, pero debe ser de alrededor de 4 GB. Puede usar cualquier gestor de descargas o navegador para descargar el archivo, pero asegúrese de tener suficiente espacio en su disco duro y una buena conexión a Internet. </p>
45
- <h4>Paso 3: Grabar el archivo a un DVD utilizando un software de grabación de DVD</h4>
46
-
47
- <h4>Paso 4: Insertar el DVD en su PS2 y jugar el juego</h4>
48
- <p>Ahora que ha quemado el archivo del juego en un DVD, puede insertarlo en su PS2 y jugar el juego. Sin embargo, debes asegurarte de que tu PS2 esté modificada o con chip, lo que significa que puede jugar juegos de otras regiones o fuentes. Si tu PS2 no está modificada o con chip, no podrás jugar el juego. Puedes comprar una PS2 modificada o con chip o un mod o chip para tu propia PS2, pero ten en cuenta que esto puede anular tu garantía o dañar tu dispositivo. </p>
49
- <h3>Para dispositivos iOS, Android y Fire OS</h3>
50
- <h4>Paso 1: Ir a la tienda de aplicaciones oficial de su dispositivo (App Store, Google Play Store, o Amazon Appstore)</h4>
51
- <p>La forma más fácil de descargar GTA Liberty City Stories para tu dispositivo móvil es ir a la tienda de aplicaciones oficial de tu dispositivo. Puedes usar el navegador de tu dispositivo o abrir la aplicación de la tienda de aplicaciones en tu dispositivo. Dependiendo de tu dispositivo, tendrás que ir a una de estas tiendas de aplicaciones:</p>
52
- <ul>
53
- <li>App Store - Esta es la tienda de aplicaciones para dispositivos iOS, como iPhone y iPad. </li>
54
- <li>Google Play Store - Esta es la tienda de aplicaciones para dispositivos Android, como Samsung, Huawei y LG.</li>
55
- <li>Amazon Appstore - Esta es la tienda de aplicaciones para dispositivos Fire OS, como Kindle Fire y Fire TV.</li>
56
- </ul>
57
- <h4>Paso 2: Busca historias de GTA Liberty City y toca el botón de descarga</h4>
58
- <p>Una vez que esté en la tienda de aplicaciones de su dispositivo, debe buscar GTA Liberty City Stories y tocar el botón de descarga. El juego cuesta $6.99 en todas las tiendas de aplicaciones, por lo que tendrá que tener suficiente saldo en su cuenta o utilizar una tarjeta de crédito u otro método de pago para comprarlo. También necesitará tener suficiente espacio en su dispositivo para el archivo del juego, que es de alrededor de 2 GB.</p>
59
- <h4>Paso 3: Espera a que la aplicación se instale en tu dispositivo y ábrela</h4>
60
-
61
- <h4>Paso 4: Siga las instrucciones en pantalla y comience a jugar el juego</h4>
62
- <p>Cuando abra la aplicación por primera vez, tendrá que seguir algunas instrucciones en pantalla para configurar el juego. Tendrá que aceptar algunos términos y condiciones, elegir un idioma, ajustar algunos ajustes y descargar algunos datos adicionales. Después de eso, puede comenzar a jugar el juego seleccionando un modo y una misión. </p>
63
- <h2>Conclusión</h2>
64
- <p>GTA Liberty City Stories es un juego divertido y emocionante que te permite experimentar la vida de un mafioso en una ciudad ficticia. Puede descargar el archivo del juego para diferentes dispositivos utilizando varios métodos. Sin embargo, siempre debe tener cuidado con la fuente del archivo y la legalidad de descargarlo. Esperamos que este artículo te haya ayudado a aprender a descargar GTA Liberty City Stories y disfrutar de este increíble juego. </p>
65
-
66
- misiones, ubicaciones y más. </p> 64aa2da5cf<br />
67
- <br />
68
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat_new/src/lib/types/Conversation.ts DELETED
@@ -1,17 +0,0 @@
1
- import type { ObjectId } from "mongodb";
2
- import type { Message } from "./Message";
3
- import type { Timestamps } from "./Timestamps";
4
-
5
- export interface Conversation extends Timestamps {
6
- _id: ObjectId;
7
-
8
- // Can be undefined for shared convo then deleted
9
- sessionId: string;
10
-
11
- title: string;
12
- messages: Message[];
13
-
14
- meta?: {
15
- fromShareId?: string;
16
- };
17
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/platformdirs/android.py DELETED
@@ -1,126 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import os
4
- import re
5
- import sys
6
- from functools import lru_cache
7
- from typing import cast
8
-
9
- from .api import PlatformDirsABC
10
-
11
-
12
- class Android(PlatformDirsABC):
13
- """
14
- Follows the guidance `from here <https://android.stackexchange.com/a/216132>`_. Makes use of the
15
- `appname <platformdirs.api.PlatformDirsABC.appname>`,
16
- `version <platformdirs.api.PlatformDirsABC.version>`,
17
- `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.
18
- """
19
-
20
- @property
21
- def user_data_dir(self) -> str:
22
- """:return: data directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/files/<AppName>``"""
23
- return self._append_app_name_and_version(cast(str, _android_folder()), "files")
24
-
25
- @property
26
- def site_data_dir(self) -> str:
27
- """:return: data directory shared by users, same as `user_data_dir`"""
28
- return self.user_data_dir
29
-
30
- @property
31
- def user_config_dir(self) -> str:
32
- """
33
- :return: config directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/shared_prefs/<AppName>``
34
- """
35
- return self._append_app_name_and_version(cast(str, _android_folder()), "shared_prefs")
36
-
37
- @property
38
- def site_config_dir(self) -> str:
39
- """:return: config directory shared by the users, same as `user_config_dir`"""
40
- return self.user_config_dir
41
-
42
- @property
43
- def user_cache_dir(self) -> str:
44
- """:return: cache directory tied to the user, e.g. e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>``"""
45
- return self._append_app_name_and_version(cast(str, _android_folder()), "cache")
46
-
47
- @property
48
- def site_cache_dir(self) -> str:
49
- """:return: cache directory shared by users, same as `user_cache_dir`"""
50
- return self.user_cache_dir
51
-
52
- @property
53
- def user_state_dir(self) -> str:
54
- """:return: state directory tied to the user, same as `user_data_dir`"""
55
- return self.user_data_dir
56
-
57
- @property
58
- def user_log_dir(self) -> str:
59
- """
60
- :return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it,
61
- e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/log``
62
- """
63
- path = self.user_cache_dir
64
- if self.opinion:
65
- path = os.path.join(path, "log")
66
- return path
67
-
68
- @property
69
- def user_documents_dir(self) -> str:
70
- """
71
- :return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents``
72
- """
73
- return _android_documents_folder()
74
-
75
- @property
76
- def user_runtime_dir(self) -> str:
77
- """
78
- :return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it,
79
- e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/tmp``
80
- """
81
- path = self.user_cache_dir
82
- if self.opinion:
83
- path = os.path.join(path, "tmp")
84
- return path
85
-
86
-
87
- @lru_cache(maxsize=1)
88
- def _android_folder() -> str | None:
89
- """:return: base folder for the Android OS or None if cannot be found"""
90
- try:
91
- # First try to get path to android app via pyjnius
92
- from jnius import autoclass
93
-
94
- Context = autoclass("android.content.Context") # noqa: N806
95
- result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath()
96
- except Exception:
97
- # if fails find an android folder looking path on the sys.path
98
- pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files")
99
- for path in sys.path:
100
- if pattern.match(path):
101
- result = path.split("/files")[0]
102
- break
103
- else:
104
- result = None
105
- return result
106
-
107
-
108
- @lru_cache(maxsize=1)
109
- def _android_documents_folder() -> str:
110
- """:return: documents folder for the Android OS"""
111
- # Get directories with pyjnius
112
- try:
113
- from jnius import autoclass
114
-
115
- Context = autoclass("android.content.Context") # noqa: N806
116
- Environment = autoclass("android.os.Environment") # noqa: N806
117
- documents_dir: str = Context.getExternalFilesDir(Environment.DIRECTORY_DOCUMENTS).getAbsolutePath()
118
- except Exception:
119
- documents_dir = "/storage/emulated/0/Documents"
120
-
121
- return documents_dir
122
-
123
-
124
- __all__ = [
125
- "Android",
126
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVH-vn1210/make_hair/minigpt4/models/mini_gpt4.py DELETED
@@ -1,263 +0,0 @@
1
- """
2
- Copyright (c) 2023, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
- import logging
8
- import random
9
- import os
10
- import torch
11
- from torch.cuda.amp import autocast as autocast
12
- import torch.nn as nn
13
-
14
- from minigpt4.common.registry import registry
15
- from minigpt4.models.blip2 import Blip2Base, disabled_train
16
- from minigpt4.models.modeling_llama import LlamaForCausalLM
17
- from transformers import LlamaTokenizer
18
-
19
-
20
- @registry.register_model("mini_gpt4")
21
- class MiniGPT4(Blip2Base):
22
- """
23
- BLIP2 GPT-LLAMA model.
24
- """
25
-
26
- PRETRAINED_MODEL_CONFIG_DICT = {
27
- "pretrain_vicuna": "configs/models/minigpt4.yaml",
28
- }
29
-
30
- def __init__(
31
- self,
32
- vit_model="eva_clip_g",
33
- q_former_model="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth",
34
- img_size=224,
35
- drop_path_rate=0,
36
- use_grad_checkpoint=False,
37
- vit_precision="fp16",
38
- freeze_vit=True,
39
- freeze_qformer=True,
40
- num_query_token=32,
41
- llama_model="",
42
- llama_cache_dir='',
43
- prompt_path="",
44
- prompt_template="",
45
- max_txt_len=32,
46
- end_sym='\n',
47
- ):
48
- super().__init__()
49
-
50
- self.tokenizer = self.init_tokenizer()
51
-
52
- print('Loading VIT')
53
- self.visual_encoder, self.ln_vision = self.init_vision_encoder(
54
- vit_model, img_size, drop_path_rate, use_grad_checkpoint, vit_precision
55
- )
56
- if freeze_vit:
57
- for name, param in self.visual_encoder.named_parameters():
58
- param.requires_grad = False
59
- self.visual_encoder = self.visual_encoder.eval()
60
- self.visual_encoder.train = disabled_train
61
- for name, param in self.ln_vision.named_parameters():
62
- param.requires_grad = False
63
- self.ln_vision = self.ln_vision.eval()
64
- self.ln_vision.train = disabled_train
65
- logging.info("freeze vision encoder")
66
- print('Loading VIT Done')
67
-
68
- print('Loading Q-Former')
69
- self.Qformer, self.query_tokens = self.init_Qformer(
70
- num_query_token, self.visual_encoder.num_features
71
- )
72
- self.Qformer.cls = None
73
- self.Qformer.bert.embeddings.word_embeddings = None
74
- self.Qformer.bert.embeddings.position_embeddings = None
75
- for layer in self.Qformer.bert.encoder.layer:
76
- layer.output = None
77
- layer.intermediate = None
78
- self.load_from_pretrained(url_or_filename=q_former_model)
79
-
80
- if freeze_qformer:
81
- for name, param in self.Qformer.named_parameters():
82
- param.requires_grad = False
83
- self.Qformer = self.Qformer.eval()
84
- self.Qformer.train = disabled_train
85
- self.query_tokens.requires_grad = False
86
- logging.info("freeze Qformer")
87
- print('Loading Q-Former Done')
88
-
89
- print('Loading LLAMA')
90
- self.llama_tokenizer = LlamaTokenizer.from_pretrained('AlekseyKorshuk/vicuna-7b', use_fast=False, use_auth_token=True)
91
- self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token
92
-
93
- if llama_cache_dir:
94
- self.llama_model = LlamaForCausalLM.from_pretrained(
95
- 'AlekseyKorshuk/vicuna-7b', load_in_8bit=True, torch_dtype=torch.float16, device_map="auto", use_auth_token=True
96
- )
97
- else:
98
- self.llama_model = LlamaForCausalLM.from_pretrained(
99
- 'AlekseyKorshuk/vicuna-7b', load_in_8bit=True, torch_dtype=torch.float16, device_map="auto", use_auth_token=True
100
- )
101
- for name, param in self.llama_model.named_parameters():
102
- param.requires_grad = False
103
- print('Loading LLAMA Done')
104
-
105
- self.llama_proj = nn.Linear(
106
- self.Qformer.config.hidden_size, self.llama_model.config.hidden_size
107
- )
108
- self.max_txt_len = max_txt_len
109
- self.end_sym = end_sym
110
-
111
- if prompt_path:
112
- with open(prompt_path, 'r') as f:
113
- raw_prompts = f.read().splitlines()
114
- filted_prompts = [raw_prompt for raw_prompt in raw_prompts if "<ImageHere>" in raw_prompt]
115
- self.prompt_list = [prompt_template.format(p) for p in filted_prompts]
116
- print('Load {} training prompts'.format(len(self.prompt_list)))
117
- print('Prompt Example \n{}'.format(random.choice(self.prompt_list)))
118
- else:
119
- self.prompt_list = []
120
-
121
- def vit_to_cpu(self):
122
- self.ln_vision.to("cpu")
123
- self.ln_vision.float()
124
- self.visual_encoder.to("cpu")
125
- self.visual_encoder.float()
126
-
127
- def encode_img(self, image):
128
- device = image.device
129
- self.vit_to_cpu()
130
- image = image.to("cpu")
131
- with self.maybe_autocast():
132
- image_embeds = self.ln_vision(self.visual_encoder(image)).to(device)
133
- image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(device)
134
-
135
- query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
136
- query_output = self.Qformer.bert(
137
- query_embeds=query_tokens,
138
- encoder_hidden_states=image_embeds,
139
- encoder_attention_mask=image_atts,
140
- return_dict=True,
141
- )
142
-
143
- inputs_llama = self.llama_proj(query_output.last_hidden_state)
144
- atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(image.device)
145
- return inputs_llama, atts_llama
146
-
147
- def prompt_wrap(self, img_embeds, atts_img, prompt):
148
- if prompt:
149
- batch_size = img_embeds.shape[0]
150
- p_before, p_after = prompt.split('<ImageHere>')
151
- p_before_tokens = self.llama_tokenizer(
152
- p_before, return_tensors="pt", add_special_tokens=False).to(img_embeds.device)
153
- p_after_tokens = self.llama_tokenizer(
154
- p_after, return_tensors="pt", add_special_tokens=False).to(img_embeds.device)
155
- p_before_embeds = self.llama_model.model.embed_tokens(p_before_tokens.input_ids).expand(batch_size, -1, -1)
156
- p_after_embeds = self.llama_model.model.embed_tokens(p_after_tokens.input_ids).expand(batch_size, -1, -1)
157
- wrapped_img_embeds = torch.cat([p_before_embeds, img_embeds, p_after_embeds], dim=1)
158
- wrapped_atts_img = atts_img[:, :1].expand(-1, wrapped_img_embeds.shape[1])
159
- return wrapped_img_embeds, wrapped_atts_img
160
- else:
161
- return img_embeds, atts_img
162
-
163
- def forward(self, samples):
164
- image = samples["image"]
165
- img_embeds, atts_img = self.encode_img(image)
166
- if hasattr(samples, 'question_split'): # VQA dataset
167
- print('VQA Batch')
168
- vqa_prompt = '###Human: <Img><ImageHere></Img> '
169
- img_embeds, atts_img = self.prompt_wrap(img_embeds, atts_img, vqa_prompt)
170
- elif self.prompt_list:
171
- prompt = random.choice(self.prompt_list)
172
- img_embeds, atts_img = self.prompt_wrap(img_embeds, atts_img, prompt)
173
-
174
- self.llama_tokenizer.padding_side = "right"
175
-
176
- text = [t + self.end_sym for t in samples["text_input"]]
177
-
178
- to_regress_tokens = self.llama_tokenizer(
179
- text,
180
- return_tensors="pt",
181
- padding="longest",
182
- truncation=True,
183
- max_length=self.max_txt_len,
184
- add_special_tokens=False
185
- ).to(image.device)
186
-
187
- targets = to_regress_tokens.input_ids.masked_fill(
188
- to_regress_tokens.input_ids == self.llama_tokenizer.pad_token_id, -100
189
- )
190
-
191
- empty_targets = (
192
- torch.ones([atts_img.shape[0], atts_img.shape[1]+1],
193
- dtype=torch.long).to(image.device).fill_(-100) # plus one for bos
194
- )
195
- targets = torch.cat([empty_targets, targets], dim=1)
196
-
197
- batch_size = img_embeds.shape[0]
198
- bos = torch.ones([batch_size, 1],
199
- dtype=to_regress_tokens.input_ids.dtype,
200
- device=to_regress_tokens.input_ids.device) * self.llama_tokenizer.bos_token_id
201
- bos_embeds = self.llama_model.model.embed_tokens(bos)
202
- atts_bos = atts_img[:, :1]
203
-
204
- to_regress_embeds = self.llama_model.model.embed_tokens(to_regress_tokens.input_ids)
205
- inputs_embeds = torch.cat([bos_embeds, img_embeds, to_regress_embeds], dim=1)
206
- attention_mask = torch.cat([atts_bos, atts_img, to_regress_tokens.attention_mask], dim=1)
207
-
208
- with self.maybe_autocast():
209
- outputs = self.llama_model(
210
- inputs_embeds=inputs_embeds,
211
- attention_mask=attention_mask,
212
- return_dict=True,
213
- labels=targets,
214
- )
215
- loss = outputs.loss
216
-
217
- return {"loss": loss}
218
-
219
- @classmethod
220
- def from_config(cls, cfg):
221
- vit_model = cfg.get("vit_model", "eva_clip_g")
222
- q_former_model = cfg.get("q_former_model", "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth")
223
- img_size = cfg.get("image_size")
224
- num_query_token = cfg.get("num_query_token")
225
- llama_model = cfg.get("llama_model")
226
-
227
- drop_path_rate = cfg.get("drop_path_rate", 0)
228
- use_grad_checkpoint = cfg.get("use_grad_checkpoint", False)
229
- vit_precision = cfg.get("vit_precision", "fp16")
230
- freeze_vit = cfg.get("freeze_vit", True)
231
- freeze_qformer = cfg.get("freeze_qformer", True)
232
- llama_cache_dir = cfg.get("llama_cache_dir", "")
233
-
234
- prompt_path = cfg.get("prompt_path", "")
235
- prompt_template = cfg.get("prompt_template", "")
236
- max_txt_len = cfg.get("max_txt_len", 32)
237
- end_sym = cfg.get("end_sym", '\n')
238
-
239
- model = cls(
240
- vit_model=vit_model,
241
- q_former_model=q_former_model,
242
- img_size=img_size,
243
- drop_path_rate=drop_path_rate,
244
- use_grad_checkpoint=use_grad_checkpoint,
245
- vit_precision=vit_precision,
246
- freeze_vit=freeze_vit,
247
- freeze_qformer=freeze_qformer,
248
- llama_cache_dir=llama_cache_dir,
249
- num_query_token=num_query_token,
250
- llama_model=llama_model,
251
- prompt_path=prompt_path,
252
- prompt_template=prompt_template,
253
- max_txt_len=max_txt_len,
254
- end_sym=end_sym
255
- )
256
-
257
- ckpt_path = cfg.get("ckpt", "") # load weights of MiniGPT-4
258
- if ckpt_path:
259
- print("Load BLIP2-LLM Checkpoint: {}".format(ckpt_path))
260
- ckpt = torch.load(ckpt_path, map_location="cpu")
261
- msg = model.load_state_dict(ckpt['model'], strict=False)
262
-
263
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/engine/hooks.py DELETED
@@ -1,427 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3
-
4
- import datetime
5
- import itertools
6
- import logging
7
- import os
8
- import tempfile
9
- import time
10
- from collections import Counter
11
- import torch
12
- from fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer
13
- from fvcore.common.file_io import PathManager
14
- from fvcore.common.timer import Timer
15
- from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
16
-
17
- import detectron2.utils.comm as comm
18
- from detectron2.evaluation.testing import flatten_results_dict
19
- from detectron2.utils.events import EventStorage, EventWriter
20
-
21
- from .train_loop import HookBase
22
-
23
- __all__ = [
24
- "CallbackHook",
25
- "IterationTimer",
26
- "PeriodicWriter",
27
- "PeriodicCheckpointer",
28
- "LRScheduler",
29
- "AutogradProfiler",
30
- "EvalHook",
31
- "PreciseBN",
32
- ]
33
-
34
-
35
- """
36
- Implement some common hooks.
37
- """
38
-
39
-
40
- class CallbackHook(HookBase):
41
- """
42
- Create a hook using callback functions provided by the user.
43
- """
44
-
45
- def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None):
46
- """
47
- Each argument is a function that takes one argument: the trainer.
48
- """
49
- self._before_train = before_train
50
- self._before_step = before_step
51
- self._after_step = after_step
52
- self._after_train = after_train
53
-
54
- def before_train(self):
55
- if self._before_train:
56
- self._before_train(self.trainer)
57
-
58
- def after_train(self):
59
- if self._after_train:
60
- self._after_train(self.trainer)
61
- # The functions may be closures that hold reference to the trainer
62
- # Therefore, delete them to avoid circular reference.
63
- del self._before_train, self._after_train
64
- del self._before_step, self._after_step
65
-
66
- def before_step(self):
67
- if self._before_step:
68
- self._before_step(self.trainer)
69
-
70
- def after_step(self):
71
- if self._after_step:
72
- self._after_step(self.trainer)
73
-
74
-
75
- class IterationTimer(HookBase):
76
- """
77
- Track the time spent for each iteration (each run_step call in the trainer).
78
- Print a summary in the end of training.
79
-
80
- This hook uses the time between the call to its :meth:`before_step`
81
- and :meth:`after_step` methods.
82
- Under the convention that :meth:`before_step` of all hooks should only
83
- take negligible amount of time, the :class:`IterationTimer` hook should be
84
- placed at the beginning of the list of hooks to obtain accurate timing.
85
- """
86
-
87
- def __init__(self, warmup_iter=3):
88
- """
89
- Args:
90
- warmup_iter (int): the number of iterations at the beginning to exclude
91
- from timing.
92
- """
93
- self._warmup_iter = warmup_iter
94
- self._step_timer = Timer()
95
- self._start_time = time.perf_counter()
96
- self._total_timer = Timer()
97
-
98
- def before_train(self):
99
- self._start_time = time.perf_counter()
100
- self._total_timer.reset()
101
- self._total_timer.pause()
102
-
103
- def after_train(self):
104
- logger = logging.getLogger(__name__)
105
- total_time = time.perf_counter() - self._start_time
106
- total_time_minus_hooks = self._total_timer.seconds()
107
- hook_time = total_time - total_time_minus_hooks
108
-
109
- num_iter = self.trainer.iter + 1 - self.trainer.start_iter - self._warmup_iter
110
-
111
- if num_iter > 0 and total_time_minus_hooks > 0:
112
- # Speed is meaningful only after warmup
113
- # NOTE this format is parsed by grep in some scripts
114
- logger.info(
115
- "Overall training speed: {} iterations in {} ({:.4f} s / it)".format(
116
- num_iter,
117
- str(datetime.timedelta(seconds=int(total_time_minus_hooks))),
118
- total_time_minus_hooks / num_iter,
119
- )
120
- )
121
-
122
- logger.info(
123
- "Total training time: {} ({} on hooks)".format(
124
- str(datetime.timedelta(seconds=int(total_time))),
125
- str(datetime.timedelta(seconds=int(hook_time))),
126
- )
127
- )
128
-
129
- def before_step(self):
130
- self._step_timer.reset()
131
- self._total_timer.resume()
132
-
133
- def after_step(self):
134
- # +1 because we're in after_step
135
- iter_done = self.trainer.iter - self.trainer.start_iter + 1
136
- if iter_done >= self._warmup_iter:
137
- sec = self._step_timer.seconds()
138
- self.trainer.storage.put_scalars(time=sec)
139
- else:
140
- self._start_time = time.perf_counter()
141
- self._total_timer.reset()
142
-
143
- self._total_timer.pause()
144
-
145
-
146
- class PeriodicWriter(HookBase):
147
- """
148
- Write events to EventStorage periodically.
149
-
150
- It is executed every ``period`` iterations and after the last iteration.
151
- """
152
-
153
- def __init__(self, writers, period=20):
154
- """
155
- Args:
156
- writers (list[EventWriter]): a list of EventWriter objects
157
- period (int):
158
- """
159
- self._writers = writers
160
- for w in writers:
161
- assert isinstance(w, EventWriter), w
162
- self._period = period
163
-
164
- def after_step(self):
165
- if (self.trainer.iter + 1) % self._period == 0 or (
166
- self.trainer.iter == self.trainer.max_iter - 1
167
- ):
168
- for writer in self._writers:
169
- writer.write()
170
-
171
- def after_train(self):
172
- for writer in self._writers:
173
- writer.close()
174
-
175
-
176
- class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase):
177
- """
178
- Same as :class:`detectron2.checkpoint.PeriodicCheckpointer`, but as a hook.
179
-
180
- Note that when used as a hook,
181
- it is unable to save additional data other than what's defined
182
- by the given `checkpointer`.
183
-
184
- It is executed every ``period`` iterations and after the last iteration.
185
- """
186
-
187
- def before_train(self):
188
- self.max_iter = self.trainer.max_iter
189
-
190
- def after_step(self):
191
- # No way to use **kwargs
192
- self.step(self.trainer.iter)
193
-
194
-
195
- class LRScheduler(HookBase):
196
- """
197
- A hook which executes a torch builtin LR scheduler and summarizes the LR.
198
- It is executed after every iteration.
199
- """
200
-
201
- def __init__(self, optimizer, scheduler):
202
- """
203
- Args:
204
- optimizer (torch.optim.Optimizer):
205
- scheduler (torch.optim._LRScheduler)
206
- """
207
- self._optimizer = optimizer
208
- self._scheduler = scheduler
209
-
210
- # NOTE: some heuristics on what LR to summarize
211
- # summarize the param group with most parameters
212
- largest_group = max(len(g["params"]) for g in optimizer.param_groups)
213
-
214
- if largest_group == 1:
215
- # If all groups have one parameter,
216
- # then find the most common initial LR, and use it for summary
217
- lr_count = Counter([g["lr"] for g in optimizer.param_groups])
218
- lr = lr_count.most_common()[0][0]
219
- for i, g in enumerate(optimizer.param_groups):
220
- if g["lr"] == lr:
221
- self._best_param_group_id = i
222
- break
223
- else:
224
- for i, g in enumerate(optimizer.param_groups):
225
- if len(g["params"]) == largest_group:
226
- self._best_param_group_id = i
227
- break
228
-
229
- def after_step(self):
230
- lr = self._optimizer.param_groups[self._best_param_group_id]["lr"]
231
- self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False)
232
- self._scheduler.step()
233
-
234
-
235
- class AutogradProfiler(HookBase):
236
- """
237
- A hook which runs `torch.autograd.profiler.profile`.
238
-
239
- Examples:
240
-
241
- .. code-block:: python
242
-
243
- hooks.AutogradProfiler(
244
- lambda trainer: trainer.iter > 10 and trainer.iter < 20, self.cfg.OUTPUT_DIR
245
- )
246
-
247
- The above example will run the profiler for iteration 10~20 and dump
248
- results to ``OUTPUT_DIR``. We did not profile the first few iterations
249
- because they are typically slower than the rest.
250
- The result files can be loaded in the ``chrome://tracing`` page in chrome browser.
251
-
252
- Note:
253
- When used together with NCCL on older version of GPUs,
254
- autograd profiler may cause deadlock because it unnecessarily allocates
255
- memory on every device it sees. The memory management calls, if
256
- interleaved with NCCL calls, lead to deadlock on GPUs that do not
257
- support `cudaLaunchCooperativeKernelMultiDevice`.
258
- """
259
-
260
- def __init__(self, enable_predicate, output_dir, *, use_cuda=True):
261
- """
262
- Args:
263
- enable_predicate (callable[trainer -> bool]): a function which takes a trainer,
264
- and returns whether to enable the profiler.
265
- It will be called once every step, and can be used to select which steps to profile.
266
- output_dir (str): the output directory to dump tracing files.
267
- use_cuda (bool): same as in `torch.autograd.profiler.profile`.
268
- """
269
- self._enable_predicate = enable_predicate
270
- self._use_cuda = use_cuda
271
- self._output_dir = output_dir
272
-
273
- def before_step(self):
274
- if self._enable_predicate(self.trainer):
275
- self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda)
276
- self._profiler.__enter__()
277
- else:
278
- self._profiler = None
279
-
280
- def after_step(self):
281
- if self._profiler is None:
282
- return
283
- self._profiler.__exit__(None, None, None)
284
- PathManager.mkdirs(self._output_dir)
285
- out_file = os.path.join(
286
- self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter)
287
- )
288
- if "://" not in out_file:
289
- self._profiler.export_chrome_trace(out_file)
290
- else:
291
- # Support non-posix filesystems
292
- with tempfile.TemporaryDirectory(prefix="detectron2_profiler") as d:
293
- tmp_file = os.path.join(d, "tmp.json")
294
- self._profiler.export_chrome_trace(tmp_file)
295
- with open(tmp_file) as f:
296
- content = f.read()
297
- with PathManager.open(out_file, "w") as f:
298
- f.write(content)
299
-
300
-
301
- class EvalHook(HookBase):
302
- """
303
- Run an evaluation function periodically, and at the end of training.
304
-
305
- It is executed every ``eval_period`` iterations and after the last iteration.
306
- """
307
-
308
- def __init__(self, eval_period, eval_function):
309
- """
310
- Args:
311
- eval_period (int): the period to run `eval_function`.
312
- eval_function (callable): a function which takes no arguments, and
313
- returns a nested dict of evaluation metrics.
314
-
315
- Note:
316
- This hook must be enabled in all or none workers.
317
- If you would like only certain workers to perform evaluation,
318
- give other workers a no-op function (`eval_function=lambda: None`).
319
- """
320
- self._period = eval_period
321
- self._func = eval_function
322
-
323
- def _do_eval(self):
324
- results = self._func()
325
-
326
- if results:
327
- assert isinstance(
328
- results, dict
329
- ), "Eval function must return a dict. Got {} instead.".format(results)
330
-
331
- flattened_results = flatten_results_dict(results)
332
- for k, v in flattened_results.items():
333
- try:
334
- v = float(v)
335
- except Exception:
336
- raise ValueError(
337
- "[EvalHook] eval_function should return a nested dict of float. "
338
- "Got '{}: {}' instead.".format(k, v)
339
- )
340
- self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)
341
-
342
- # Evaluation may take different time among workers.
343
- # A barrier make them start the next iteration together.
344
- comm.synchronize()
345
-
346
- def after_step(self):
347
- next_iter = self.trainer.iter + 1
348
- is_final = next_iter == self.trainer.max_iter
349
- if is_final or (self._period > 0 and next_iter % self._period == 0):
350
- self._do_eval()
351
-
352
- def after_train(self):
353
- # func is likely a closure that holds reference to the trainer
354
- # therefore we clean it to avoid circular reference in the end
355
- del self._func
356
-
357
-
358
- class PreciseBN(HookBase):
359
- """
360
- The standard implementation of BatchNorm uses EMA in inference, which is
361
- sometimes suboptimal.
362
- This class computes the true average of statistics rather than the moving average,
363
- and put true averages to every BN layer in the given model.
364
-
365
- It is executed every ``period`` iterations and after the last iteration.
366
- """
367
-
368
- def __init__(self, period, model, data_loader, num_iter):
369
- """
370
- Args:
371
- period (int): the period this hook is run, or 0 to not run during training.
372
- The hook will always run in the end of training.
373
- model (nn.Module): a module whose all BN layers in training mode will be
374
- updated by precise BN.
375
- Note that user is responsible for ensuring the BN layers to be
376
- updated are in training mode when this hook is triggered.
377
- data_loader (iterable): it will produce data to be run by `model(data)`.
378
- num_iter (int): number of iterations used to compute the precise
379
- statistics.
380
- """
381
- self._logger = logging.getLogger(__name__)
382
- if len(get_bn_modules(model)) == 0:
383
- self._logger.info(
384
- "PreciseBN is disabled because model does not contain BN layers in training mode."
385
- )
386
- self._disabled = True
387
- return
388
-
389
- self._model = model
390
- self._data_loader = data_loader
391
- self._num_iter = num_iter
392
- self._period = period
393
- self._disabled = False
394
-
395
- self._data_iter = None
396
-
397
- def after_step(self):
398
- next_iter = self.trainer.iter + 1
399
- is_final = next_iter == self.trainer.max_iter
400
- if is_final or (self._period > 0 and next_iter % self._period == 0):
401
- self.update_stats()
402
-
403
- def update_stats(self):
404
- """
405
- Update the model with precise statistics. Users can manually call this method.
406
- """
407
- if self._disabled:
408
- return
409
-
410
- if self._data_iter is None:
411
- self._data_iter = iter(self._data_loader)
412
-
413
- def data_loader():
414
- for num_iter in itertools.count(1):
415
- if num_iter % 100 == 0:
416
- self._logger.info(
417
- "Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter)
418
- )
419
- # This way we can reuse the same iterator
420
- yield next(self._data_iter)
421
-
422
- with EventStorage(): # capture events in a new storage to discard them
423
- self._logger.info(
424
- "Running precise-BN for {} iterations... ".format(self._num_iter)
425
- + "Note that this could produce different statistics every time."
426
- )
427
- update_bn_stats(self._model, data_loader(), self._num_iter)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/utils/spec_tools.py DELETED
@@ -1,266 +0,0 @@
1
- """
2
- =========================================================================================
3
- Trojan VQA
4
- Written by Matthew Walmer
5
-
6
- Tools for reading and writing spec files
7
- =========================================================================================
8
- """
9
- import csv
10
-
11
- SPEC_OUTLINE = {
12
- 'f': ['feat_id', 'trigger', 'scale', 'patch', 'pos', 'cb', 'cg', 'cr', 'detector', 'nb', 'f_seed', 'f_clean',
13
- 'op_use', 'op_size', 'op_sample', 'op_res', 'op_epochs'],
14
- 'd': ['data_id', 'feat_id', 'f_spec_file', 'perc', 'perc_i', 'perc_q', 'trig_word', 'target', 'd_seed', 'd_clean'],
15
- 'm': ['model_id', 'data_id', 'd_spec_file', 'model', 'm_seed']
16
- }
17
-
18
-
19
-
20
- def save_specs(file, spec_type, specs):
21
- assert spec_type in SPEC_OUTLINE
22
- print('saving to: ' + file)
23
- with open(file, 'w', newline='') as csvfile:
24
- writer = csv.DictWriter(csvfile, fieldnames=SPEC_OUTLINE[spec_type])
25
- writer.writeheader()
26
- for spec in specs:
27
- writer.writerow(spec)
28
-
29
-
30
-
31
- def load_specs(file, verbose=False):
32
- if verbose: print('loading file: ' + file)
33
- specs = []
34
- with open(file, 'r', newline='') as csvfile:
35
- reader = csv.DictReader(csvfile)
36
- for row in reader:
37
- specs.append(row)
38
- return specs
39
-
40
-
41
-
42
- def make_id2spec(u_specs):
43
- ret = {}
44
- for s in u_specs:
45
- s_id = get_id(s)
46
- ret[s_id] = s
47
- return ret
48
-
49
-
50
-
51
- def load_specs_dict(file):
52
- specs = load_specs(file)
53
- return make_id2spec(specs)
54
-
55
-
56
-
57
- def merge_and_proc_specs(f_spec, d_spec=None, m_spec=None):
58
- all_specs = [f_spec]
59
- # identify and test specs match
60
- if d_spec is not None:
61
- assert f_spec['feat_id'] == d_spec['feat_id']
62
- all_specs.append(d_spec)
63
- if m_spec is not None:
64
- assert d_spec['data_id'] == m_spec['data_id']
65
- all_specs.append(m_spec)
66
- # merge specs
67
- s = {}
68
- for spec in all_specs:
69
- for key in spec:
70
- s[key] = str(spec[key])
71
- # handle the clean flag overrides
72
- if f_spec['f_clean'] == '1':
73
- s['feat_id'] = 'clean'
74
- if d_spec is not None and d_spec['d_clean'] == '1':
75
- s['data_id'] = 'clean'
76
- # handle perc_i and perc_q match settings
77
- if d_spec is not None and d_spec['perc_i'] == 'match':
78
- s['perc_i'] = s['perc']
79
- if d_spec is not None and d_spec['perc_q'] == 'match':
80
- s['perc_q'] = s['perc']
81
- return s
82
-
83
-
84
-
85
- def get_spec_type(s):
86
- if 'd_spec_file' in s:
87
- return 'm'
88
- if 'f_spec_file' in s:
89
- return 'd'
90
- return 'f'
91
-
92
-
93
-
94
- def get_id(s):
95
- if 'd_spec_file' in s:
96
- return s['model_id']
97
- if 'f_spec_file' in s:
98
- return s['data_id']
99
- return s['feat_id']
100
-
101
-
102
-
103
- def get_connected(s):
104
- if 'd_spec_file' in s:
105
- return s['d_spec_file'], s['data_id']
106
- if 'f_spec_file' in s:
107
- return s['f_spec_file'], s['feat_id']
108
- return None, None
109
-
110
-
111
-
112
- def complete_spec(u_spec, id_2_fspec=None, id_2_dspec=None):
113
- spec_type = get_spec_type(u_spec)
114
- if spec_type == 'f':
115
- return merge_and_proc_specs(u_spec)
116
- if spec_type == 'd':
117
- f_id = u_spec['feat_id']
118
- f_spec = id_2_fspec[f_id]
119
- return merge_and_proc_specs(f_spec, u_spec)
120
- else:
121
- d_id = u_spec['data_id']
122
- d_spec = id_2_dspec[d_id]
123
- f_id = d_spec['feat_id']
124
- f_spec = id_2_fspec[f_id]
125
- return merge_and_proc_specs(f_spec, d_spec, u_spec)
126
-
127
-
128
-
129
- def parse_row_setting(rows):
130
- if isinstance(rows, list):
131
- return rows
132
- if rows == 'all':
133
- return rows
134
- if ',' in rows:
135
- rows = rows.split(',')
136
- ret = []
137
- for r in rows:
138
- ret.append(int(r))
139
- return ret
140
- if '-' in rows:
141
- start, end = rows.split('-')
142
- ret = []
143
- for i in range(int(start), int(end)+1):
144
- ret.append(i)
145
- return ret
146
- return [int(rows)]
147
-
148
-
149
-
150
- # load a spec file, and filter the specs based on a row or id list
151
- def load_and_select_specs(file, rows=None, ids=None):
152
- if rows is None and ids is None:
153
- # print('WARNING: rows and ids options both None, defaulting to load all')
154
- rows = 'all'
155
- all_specs = load_specs(file)
156
- if rows == 'all':
157
- specs = all_specs
158
- elif rows is not None: # row mode
159
- specs = []
160
- for r in parse_row_setting(rows):
161
- specs.append(all_specs[r])
162
- else: # id mode
163
- if not isinstance(ids, list):
164
- if ',' in ids:
165
- ids = ids.split(',')
166
- else:
167
- ids = [ids]
168
- specs = []
169
- for s in all_specs:
170
- s_id = get_id(s)
171
- if s_id in ids:
172
- specs.append(s)
173
- if len(specs) != len(ids):
174
- print('ERROR: did not find requested ids')
175
- print('ids requested:')
176
- print(ids)
177
- print('specs found:')
178
- print(specs)
179
- exit(-1)
180
- return specs
181
-
182
-
183
-
184
- '''
185
- Load a spec file of any type, select specified rows,
186
- and load other related specs files. Returns lists of
187
- f_specs, d_specs, and m_specs. Returns empty lists
188
- for any level that has no specs included.
189
-
190
- Instead of specifying rows, can specify ids to look
191
- for. The row setting overrides the ids settings
192
-
193
- the row settings can be given in several ways:
194
- - an int, or an int as a str
195
- - a str of comma-separated ints
196
- - a str of format '4-8'
197
- - 'all'
198
-
199
- the ids setting can be given in two ways:
200
- - a str with a single id
201
- - a str with a comma-separated list of ids
202
-
203
- In addition, can specify a list of model_id's
204
- to exclude. This helps orchestrator re-compute which
205
- jobs still need to be run
206
- '''
207
- def gather_specs(file, rows=None, ids=None, m_id_exclude=None):
208
- specs = load_and_select_specs(file, rows, ids)
209
- spec_type = get_spec_type(specs[0])
210
-
211
- # load connected specs
212
- if spec_type == 'm':
213
- if m_id_exclude is None:
214
- m_specs = specs
215
- else:
216
- # check for excluded specs
217
- m_specs = []
218
- for s in specs:
219
- if s['model_id'] not in m_id_exclude:
220
- m_specs.append(s)
221
- d_specs = []
222
- f_specs = []
223
- to_load = {}
224
- for s in m_specs:
225
- cfile, cid = get_connected(s)
226
- if cfile not in to_load: to_load[cfile] = []
227
- if cid not in to_load[cfile]: to_load[cfile].append(cid)
228
- for f in to_load:
229
- id2specs = load_specs_dict(f)
230
- for cid in to_load[f]:
231
- d_specs.append(id2specs[cid])
232
- elif spec_type == 'd':
233
- m_specs = []
234
- d_specs = specs
235
- f_specs = []
236
- if spec_type == 'm' or spec_type == 'd':
237
- to_load = {}
238
- for s in d_specs:
239
- cfile, cid = get_connected(s)
240
- if cfile not in to_load: to_load[cfile] = []
241
- if cid not in to_load[cfile]: to_load[cfile].append(cid)
242
- for f in to_load:
243
- id2specs = load_specs_dict(f)
244
- for cid in to_load[f]:
245
- f_specs.append(id2specs[cid])
246
- else:
247
- m_specs = []
248
- d_specs = []
249
- f_specs = specs
250
- return f_specs, d_specs, m_specs
251
-
252
-
253
-
254
- # gather and return completed m specs from an m spec file
255
- def gather_full_m_specs(m_file, rows=None, ids=None):
256
- f_specs, d_specs, m_specs = gather_specs(m_file, rows, ids)
257
- if len(m_specs) == 0:
258
- print('ERROR: must give a model spec file')
259
- exit(-1)
260
- id_2_fspec = make_id2spec(f_specs)
261
- id_2_dspec = make_id2spec(d_specs)
262
- full_specs = []
263
- for ms in m_specs:
264
- s = complete_spec(ms, id_2_fspec, id_2_dspec)
265
- full_specs.append(s)
266
- return full_specs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/execute_with_dependencies.h DELETED
@@ -1,267 +0,0 @@
1
- /*
2
- * Copyright 2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/detail/cpp11_required.h>
21
-
22
- #if THRUST_CPP_DIALECT >= 2011
23
-
24
- #include <thrust/detail/type_deduction.h>
25
- #include <thrust/type_traits/remove_cvref.h>
26
-
27
- #include <tuple>
28
- #include <type_traits>
29
-
30
- namespace thrust
31
- {
32
- namespace detail
33
- {
34
-
35
- struct capture_as_dependency_fn
36
- {
37
- template<typename Dependency>
38
- auto operator()(Dependency&& dependency) const
39
- THRUST_DECLTYPE_RETURNS(capture_as_dependency(THRUST_FWD(dependency)))
40
- };
41
-
42
- // Default implementation: universal forwarding.
43
- template<typename Dependency>
44
- auto capture_as_dependency(Dependency&& dependency)
45
- THRUST_DECLTYPE_RETURNS(THRUST_FWD(dependency))
46
-
47
- template<typename... Dependencies>
48
- auto capture_as_dependency(std::tuple<Dependencies...>& dependencies)
49
- THRUST_DECLTYPE_RETURNS(
50
- tuple_for_each(THRUST_FWD(dependencies), capture_as_dependency_fn{})
51
- )
52
-
53
- template<template<typename> class BaseSystem, typename... Dependencies>
54
- struct execute_with_dependencies
55
- : BaseSystem<execute_with_dependencies<BaseSystem, Dependencies...>>
56
- {
57
- private:
58
- using super_t = BaseSystem<execute_with_dependencies<BaseSystem, Dependencies...>>;
59
-
60
- std::tuple<remove_cvref_t<Dependencies>...> dependencies;
61
-
62
- public:
63
- __host__
64
- execute_with_dependencies(super_t const &super, Dependencies && ...dependencies)
65
- : super_t(super), dependencies(std::forward<Dependencies>(dependencies)...)
66
- {
67
- }
68
-
69
- template <typename... UDependencies>
70
- __host__
71
- execute_with_dependencies(super_t const &super, UDependencies && ...deps)
72
- : super_t(super), dependencies(THRUST_FWD(deps)...)
73
- {
74
- }
75
-
76
- template <typename... UDependencies>
77
- __host__
78
- execute_with_dependencies(UDependencies && ...deps)
79
- : dependencies(THRUST_FWD(deps)...)
80
- {
81
- }
82
-
83
- template <typename... UDependencies>
84
- __host__
85
- execute_with_dependencies(super_t const &super, std::tuple<UDependencies...>&& deps)
86
- : super_t(super), dependencies(std::move(deps))
87
- {
88
- }
89
-
90
- template <typename... UDependencies>
91
- __host__
92
- execute_with_dependencies(std::tuple<UDependencies...>&& deps)
93
- : dependencies(std::move(deps))
94
- {
95
- }
96
-
97
- std::tuple<remove_cvref_t<Dependencies>...>
98
- __host__
99
- extract_dependencies()
100
- {
101
- return std::move(dependencies);
102
- }
103
-
104
- // Rebinding.
105
- template<typename ...UDependencies>
106
- __host__
107
- execute_with_dependencies<BaseSystem, UDependencies...>
108
- rebind_after(UDependencies&& ...udependencies) const
109
- {
110
- return { capture_as_dependency(THRUST_FWD(udependencies))... };
111
- }
112
-
113
- // Rebinding.
114
- template<typename ...UDependencies>
115
- __host__
116
- execute_with_dependencies<BaseSystem, UDependencies...>
117
- rebind_after(std::tuple<UDependencies...>& udependencies) const
118
- {
119
- return { capture_as_dependency(udependencies) };
120
- }
121
- template<typename ...UDependencies>
122
- __host__
123
- execute_with_dependencies<BaseSystem, UDependencies...>
124
- rebind_after(std::tuple<UDependencies...>&& udependencies) const
125
- {
126
- return { capture_as_dependency(std::move(udependencies)) };
127
- }
128
- };
129
-
130
- template<
131
- typename Allocator,
132
- template<typename> class BaseSystem,
133
- typename... Dependencies
134
- >
135
- struct execute_with_allocator_and_dependencies
136
- : BaseSystem<
137
- execute_with_allocator_and_dependencies<
138
- Allocator,
139
- BaseSystem,
140
- Dependencies...
141
- >
142
- >
143
- {
144
- private:
145
- using super_t = BaseSystem<
146
- execute_with_allocator_and_dependencies<
147
- Allocator,
148
- BaseSystem,
149
- Dependencies...
150
- >
151
- >;
152
-
153
- std::tuple<remove_cvref_t<Dependencies>...> dependencies;
154
- Allocator alloc;
155
-
156
- public:
157
- template <typename... UDependencies>
158
- __host__
159
- execute_with_allocator_and_dependencies(super_t const &super, Allocator a, UDependencies && ...deps)
160
- : super_t(super), dependencies(THRUST_FWD(deps)...), alloc(a)
161
- {
162
- }
163
-
164
- template <typename... UDependencies>
165
- __host__
166
- execute_with_allocator_and_dependencies(Allocator a, UDependencies && ...deps)
167
- : dependencies(THRUST_FWD(deps)...), alloc(a)
168
- {
169
- }
170
-
171
- template <typename... UDependencies>
172
- __host__
173
- execute_with_allocator_and_dependencies(super_t const &super, Allocator a, std::tuple<UDependencies...>&& deps)
174
- : super_t(super), dependencies(std::move(deps)), alloc(a)
175
- {
176
- }
177
-
178
- template <typename... UDependencies>
179
- __host__
180
- execute_with_allocator_and_dependencies(Allocator a, std::tuple<UDependencies...>&& deps)
181
- : dependencies(std::move(deps)), alloc(a)
182
- {
183
- }
184
-
185
- std::tuple<remove_cvref_t<Dependencies>...>
186
- __host__
187
- extract_dependencies()
188
- {
189
- return std::move(dependencies);
190
- }
191
-
192
- __host__
193
- typename std::add_lvalue_reference<Allocator>::type
194
- get_allocator()
195
- {
196
- return alloc;
197
- }
198
-
199
- // Rebinding.
200
- template<typename ...UDependencies>
201
- __host__
202
- execute_with_allocator_and_dependencies<Allocator, BaseSystem, UDependencies...>
203
- rebind_after(UDependencies&& ...udependencies) const
204
- {
205
- return { alloc, capture_as_dependency(THRUST_FWD(udependencies))... };
206
- }
207
-
208
- // Rebinding.
209
- template<typename ...UDependencies>
210
- __host__
211
- execute_with_allocator_and_dependencies<Allocator, BaseSystem, UDependencies...>
212
- rebind_after(std::tuple<UDependencies...>& udependencies) const
213
- {
214
- return { alloc, capture_as_dependency(udependencies) };
215
- }
216
- template<typename ...UDependencies>
217
- __host__
218
- execute_with_allocator_and_dependencies<Allocator, BaseSystem, UDependencies...>
219
- rebind_after(std::tuple<UDependencies...>&& udependencies) const
220
- {
221
- return { alloc, capture_as_dependency(std::move(udependencies)) };
222
- }
223
- };
224
-
225
- template<template<typename> class BaseSystem, typename ...Dependencies>
226
- __host__
227
- std::tuple<remove_cvref_t<Dependencies>...>
228
- extract_dependencies(thrust::detail::execute_with_dependencies<BaseSystem, Dependencies...>&& system)
229
- {
230
- return std::move(system).extract_dependencies();
231
- }
232
- template<template<typename> class BaseSystem, typename ...Dependencies>
233
- __host__
234
- std::tuple<remove_cvref_t<Dependencies>...>
235
- extract_dependencies(thrust::detail::execute_with_dependencies<BaseSystem, Dependencies...>& system)
236
- {
237
- return std::move(system).extract_dependencies();
238
- }
239
-
240
- template<typename Allocator, template<typename> class BaseSystem, typename ...Dependencies>
241
- __host__
242
- std::tuple<remove_cvref_t<Dependencies>...>
243
- extract_dependencies(thrust::detail::execute_with_allocator_and_dependencies<Allocator, BaseSystem, Dependencies...>&& system)
244
- {
245
- return std::move(system).extract_dependencies();
246
- }
247
- template<typename Allocator, template<typename> class BaseSystem, typename ...Dependencies>
248
- __host__
249
- std::tuple<remove_cvref_t<Dependencies>...>
250
- extract_dependencies(thrust::detail::execute_with_allocator_and_dependencies<Allocator, BaseSystem, Dependencies...>& system)
251
- {
252
- return std::move(system).extract_dependencies();
253
- }
254
-
255
- template<typename System>
256
- __host__
257
- std::tuple<>
258
- extract_dependencies(System &&)
259
- {
260
- return std::tuple<>{};
261
- }
262
-
263
- } // end detail
264
- } // end thrust
265
-
266
- #endif // THRUST_CPP_DIALECT >= 2011
267
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/tuple_transform.h DELETED
@@ -1,418 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/tuple.h>
20
- #include <thrust/detail/tuple_meta_transform.h>
21
-
22
- namespace thrust
23
- {
24
-
25
- namespace detail
26
- {
27
-
28
- template<typename Tuple,
29
- template<typename> class UnaryMetaFunction,
30
- typename UnaryFunction,
31
- unsigned int sz = thrust::tuple_size<Tuple>::value>
32
- struct tuple_transform_functor;
33
-
34
-
35
- template<typename Tuple,
36
- template<typename> class UnaryMetaFunction,
37
- typename UnaryFunction>
38
- struct tuple_transform_functor<Tuple,UnaryMetaFunction,UnaryFunction,0>
39
- {
40
- static __host__
41
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
42
- do_it_on_the_host(const Tuple &, UnaryFunction)
43
- {
44
- return thrust::null_type();
45
- }
46
-
47
- static __host__ __device__
48
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
49
- do_it_on_the_host_or_device(const Tuple &, UnaryFunction)
50
- {
51
- return thrust::null_type();
52
- }
53
- };
54
-
55
-
56
- template<typename Tuple,
57
- template<typename> class UnaryMetaFunction,
58
- typename UnaryFunction>
59
- struct tuple_transform_functor<Tuple,UnaryMetaFunction,UnaryFunction,1>
60
- {
61
- static __host__
62
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
63
- do_it_on_the_host(const Tuple &t, UnaryFunction f)
64
- {
65
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
66
-
67
- return XfrmTuple(f(thrust::get<0>(t)));
68
- }
69
-
70
- static __host__ __device__
71
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
72
- do_it_on_the_host_or_device(const Tuple &t, UnaryFunction f)
73
- {
74
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
75
-
76
- return XfrmTuple(f(thrust::get<0>(t)));
77
- }
78
- };
79
-
80
-
81
- template<typename Tuple,
82
- template<typename> class UnaryMetaFunction,
83
- typename UnaryFunction>
84
- struct tuple_transform_functor<Tuple,UnaryMetaFunction,UnaryFunction,2>
85
- {
86
- static __host__
87
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
88
- do_it_on_the_host(const Tuple &t, UnaryFunction f)
89
- {
90
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
91
-
92
- return XfrmTuple(f(thrust::get<0>(t)),
93
- f(thrust::get<1>(t)));
94
- }
95
-
96
- static __host__ __device__
97
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
98
- do_it_on_the_host_or_device(const Tuple &t, UnaryFunction f)
99
- {
100
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
101
-
102
- return XfrmTuple(f(thrust::get<0>(t)),
103
- f(thrust::get<1>(t)));
104
- }
105
- };
106
-
107
-
108
- template<typename Tuple,
109
- template<typename> class UnaryMetaFunction,
110
- typename UnaryFunction>
111
- struct tuple_transform_functor<Tuple,UnaryMetaFunction,UnaryFunction,3>
112
- {
113
- static __host__
114
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
115
- do_it_on_the_host(const Tuple &t, UnaryFunction f)
116
- {
117
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
118
-
119
- return XfrmTuple(f(thrust::get<0>(t)),
120
- f(thrust::get<1>(t)),
121
- f(thrust::get<2>(t)));
122
- }
123
-
124
- static __host__ __device__
125
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
126
- do_it_on_the_host_or_device(const Tuple &t, UnaryFunction f)
127
- {
128
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
129
-
130
- return XfrmTuple(f(thrust::get<0>(t)),
131
- f(thrust::get<1>(t)),
132
- f(thrust::get<2>(t)));
133
- }
134
- };
135
-
136
-
137
- template<typename Tuple,
138
- template<typename> class UnaryMetaFunction,
139
- typename UnaryFunction>
140
- struct tuple_transform_functor<Tuple,UnaryMetaFunction,UnaryFunction,4>
141
- {
142
- static __host__
143
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
144
- do_it_on_the_host(const Tuple &t, UnaryFunction f)
145
- {
146
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
147
-
148
- return XfrmTuple(f(thrust::get<0>(t)),
149
- f(thrust::get<1>(t)),
150
- f(thrust::get<2>(t)),
151
- f(thrust::get<3>(t)));
152
- }
153
-
154
- static __host__ __device__
155
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
156
- do_it_on_the_host_or_device(const Tuple &t, UnaryFunction f)
157
- {
158
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
159
-
160
- return XfrmTuple(f(thrust::get<0>(t)),
161
- f(thrust::get<1>(t)),
162
- f(thrust::get<2>(t)),
163
- f(thrust::get<3>(t)));
164
- }
165
- };
166
-
167
-
168
- template<typename Tuple,
169
- template<typename> class UnaryMetaFunction,
170
- typename UnaryFunction>
171
- struct tuple_transform_functor<Tuple,UnaryMetaFunction,UnaryFunction,5>
172
- {
173
- static __host__
174
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
175
- do_it_on_the_host(const Tuple &t, UnaryFunction f)
176
- {
177
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
178
-
179
- return XfrmTuple(f(thrust::get<0>(t)),
180
- f(thrust::get<1>(t)),
181
- f(thrust::get<2>(t)),
182
- f(thrust::get<3>(t)),
183
- f(thrust::get<4>(t)));
184
- }
185
-
186
- static __host__ __device__
187
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
188
- do_it_on_the_host_or_device(const Tuple &t, UnaryFunction f)
189
- {
190
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
191
-
192
- return XfrmTuple(f(thrust::get<0>(t)),
193
- f(thrust::get<1>(t)),
194
- f(thrust::get<2>(t)),
195
- f(thrust::get<3>(t)),
196
- f(thrust::get<4>(t)));
197
- }
198
- };
199
-
200
-
201
- template<typename Tuple,
202
- template<typename> class UnaryMetaFunction,
203
- typename UnaryFunction>
204
- struct tuple_transform_functor<Tuple,UnaryMetaFunction,UnaryFunction,6>
205
- {
206
- static __host__
207
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
208
- do_it_on_the_host(const Tuple &t, UnaryFunction f)
209
- {
210
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
211
-
212
- return XfrmTuple(f(thrust::get<0>(t)),
213
- f(thrust::get<1>(t)),
214
- f(thrust::get<2>(t)),
215
- f(thrust::get<3>(t)),
216
- f(thrust::get<4>(t)),
217
- f(thrust::get<5>(t)));
218
- }
219
-
220
- static __host__ __device__
221
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
222
- do_it_on_the_host_or_device(const Tuple &t, UnaryFunction f)
223
- {
224
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
225
-
226
- return XfrmTuple(f(thrust::get<0>(t)),
227
- f(thrust::get<1>(t)),
228
- f(thrust::get<2>(t)),
229
- f(thrust::get<3>(t)),
230
- f(thrust::get<4>(t)),
231
- f(thrust::get<5>(t)));
232
- }
233
- };
234
-
235
-
236
- template<typename Tuple,
237
- template<typename> class UnaryMetaFunction,
238
- typename UnaryFunction>
239
- struct tuple_transform_functor<Tuple,UnaryMetaFunction,UnaryFunction,7>
240
- {
241
- static __host__
242
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
243
- do_it_on_the_host(const Tuple &t, UnaryFunction f)
244
- {
245
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
246
-
247
- return XfrmTuple(f(thrust::get<0>(t)),
248
- f(thrust::get<1>(t)),
249
- f(thrust::get<2>(t)),
250
- f(thrust::get<3>(t)),
251
- f(thrust::get<4>(t)),
252
- f(thrust::get<5>(t)),
253
- f(thrust::get<6>(t)));
254
- }
255
-
256
- static __host__ __device__
257
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
258
- do_it_on_the_host_or_device(const Tuple &t, UnaryFunction f)
259
- {
260
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
261
-
262
- return XfrmTuple(f(thrust::get<0>(t)),
263
- f(thrust::get<1>(t)),
264
- f(thrust::get<2>(t)),
265
- f(thrust::get<3>(t)),
266
- f(thrust::get<4>(t)),
267
- f(thrust::get<5>(t)),
268
- f(thrust::get<6>(t)));
269
- }
270
- };
271
-
272
-
273
- template<typename Tuple,
274
- template<typename> class UnaryMetaFunction,
275
- typename UnaryFunction>
276
- struct tuple_transform_functor<Tuple,UnaryMetaFunction,UnaryFunction,8>
277
- {
278
- static __host__
279
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
280
- do_it_on_the_host(const Tuple &t, UnaryFunction f)
281
- {
282
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
283
-
284
- return XfrmTuple(f(thrust::get<0>(t)),
285
- f(thrust::get<1>(t)),
286
- f(thrust::get<2>(t)),
287
- f(thrust::get<3>(t)),
288
- f(thrust::get<4>(t)),
289
- f(thrust::get<5>(t)),
290
- f(thrust::get<6>(t)),
291
- f(thrust::get<7>(t)));
292
- }
293
-
294
- static __host__ __device__
295
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
296
- do_it_on_the_host_or_device(const Tuple &t, UnaryFunction f)
297
- {
298
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
299
-
300
- return XfrmTuple(f(thrust::get<0>(t)),
301
- f(thrust::get<1>(t)),
302
- f(thrust::get<2>(t)),
303
- f(thrust::get<3>(t)),
304
- f(thrust::get<4>(t)),
305
- f(thrust::get<5>(t)),
306
- f(thrust::get<6>(t)),
307
- f(thrust::get<7>(t)));
308
- }
309
- };
310
-
311
-
312
- template<typename Tuple,
313
- template<typename> class UnaryMetaFunction,
314
- typename UnaryFunction>
315
- struct tuple_transform_functor<Tuple,UnaryMetaFunction,UnaryFunction,9>
316
- {
317
- static __host__
318
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
319
- do_it_on_the_host(const Tuple &t, UnaryFunction f)
320
- {
321
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
322
-
323
- return XfrmTuple(f(thrust::get<0>(t)),
324
- f(thrust::get<1>(t)),
325
- f(thrust::get<2>(t)),
326
- f(thrust::get<3>(t)),
327
- f(thrust::get<4>(t)),
328
- f(thrust::get<5>(t)),
329
- f(thrust::get<6>(t)),
330
- f(thrust::get<7>(t)),
331
- f(thrust::get<8>(t)));
332
- }
333
-
334
- static __host__ __device__
335
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
336
- do_it_on_the_host_or_device(const Tuple &t, UnaryFunction f)
337
- {
338
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
339
-
340
- return XfrmTuple(f(thrust::get<0>(t)),
341
- f(thrust::get<1>(t)),
342
- f(thrust::get<2>(t)),
343
- f(thrust::get<3>(t)),
344
- f(thrust::get<4>(t)),
345
- f(thrust::get<5>(t)),
346
- f(thrust::get<6>(t)),
347
- f(thrust::get<7>(t)),
348
- f(thrust::get<8>(t)));
349
- }
350
- };
351
-
352
-
353
- template<typename Tuple,
354
- template<typename> class UnaryMetaFunction,
355
- typename UnaryFunction>
356
- struct tuple_transform_functor<Tuple,UnaryMetaFunction,UnaryFunction,10>
357
- {
358
- static __host__
359
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
360
- do_it_on_the_host(const Tuple &t, UnaryFunction f)
361
- {
362
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
363
-
364
- return XfrmTuple(f(thrust::get<0>(t)),
365
- f(thrust::get<1>(t)),
366
- f(thrust::get<2>(t)),
367
- f(thrust::get<3>(t)),
368
- f(thrust::get<4>(t)),
369
- f(thrust::get<5>(t)),
370
- f(thrust::get<6>(t)),
371
- f(thrust::get<7>(t)),
372
- f(thrust::get<8>(t)),
373
- f(thrust::get<9>(t)));
374
- }
375
-
376
- static __host__ __device__
377
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
378
- do_it_on_the_host_or_device(const Tuple &t, UnaryFunction f)
379
- {
380
- typedef typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type XfrmTuple;
381
-
382
- return XfrmTuple(f(thrust::get<0>(t)),
383
- f(thrust::get<1>(t)),
384
- f(thrust::get<2>(t)),
385
- f(thrust::get<3>(t)),
386
- f(thrust::get<4>(t)),
387
- f(thrust::get<5>(t)),
388
- f(thrust::get<6>(t)),
389
- f(thrust::get<7>(t)),
390
- f(thrust::get<8>(t)),
391
- f(thrust::get<9>(t)));
392
- }
393
- };
394
-
395
-
396
- template<template<typename> class UnaryMetaFunction,
397
- typename Tuple,
398
- typename UnaryFunction>
399
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
400
- tuple_host_transform(const Tuple &t, UnaryFunction f)
401
- {
402
- return tuple_transform_functor<Tuple,UnaryMetaFunction,UnaryFunction>::do_it_on_the_host(t,f);
403
- }
404
-
405
- template<template<typename> class UnaryMetaFunction,
406
- typename Tuple,
407
- typename UnaryFunction>
408
- typename tuple_meta_transform<Tuple,UnaryMetaFunction>::type
409
- __host__ __device__
410
- tuple_host_device_transform(const Tuple &t, UnaryFunction f)
411
- {
412
- return tuple_transform_functor<Tuple,UnaryMetaFunction,UnaryFunction>::do_it_on_the_host_or_device(t,f);
413
- }
414
-
415
- } // end detail
416
-
417
- } // end thrust
418
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/partition.h DELETED
@@ -1,44 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a fill of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // the purpose of this header is to #include the partition.h header
22
- // of the sequential, host, and device systems. It should be #included in any
23
- // code which uses adl to dispatch partition
24
-
25
- #include <thrust/system/detail/sequential/partition.h>
26
-
27
- // SCons can't see through the #defines below to figure out what this header
28
- // includes, so we fake it out by specifying all possible files we might end up
29
- // including inside an #if 0.
30
- #if 0
31
- #include <thrust/system/cpp/detail/partition.h>
32
- #include <thrust/system/cuda/detail/partition.h>
33
- #include <thrust/system/omp/detail/partition.h>
34
- #include <thrust/system/tbb/detail/partition.h>
35
- #endif
36
-
37
- #define __THRUST_HOST_SYSTEM_PARTITION_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/partition.h>
38
- #include __THRUST_HOST_SYSTEM_PARTITION_HEADER
39
- #undef __THRUST_HOST_SYSTEM_PARTITION_HEADER
40
-
41
- #define __THRUST_DEVICE_SYSTEM_PARTITION_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/partition.h>
42
- #include __THRUST_DEVICE_SYSTEM_PARTITION_HEADER
43
- #undef __THRUST_DEVICE_SYSTEM_PARTITION_HEADER
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/reduce_intervals.h DELETED
@@ -1,125 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/system/tbb/detail/execution_policy.h>
21
- #include <thrust/detail/seq.h>
22
-
23
- #include <tbb/parallel_for.h>
24
- #include <thrust/iterator/iterator_traits.h>
25
- #include <thrust/detail/minmax.h>
26
- #include <thrust/system/cpp/memory.h>
27
- #include <thrust/reduce.h>
28
- #include <cassert>
29
-
30
- namespace thrust
31
- {
32
- namespace system
33
- {
34
- namespace tbb
35
- {
36
- namespace detail
37
- {
38
- namespace reduce_intervals_detail
39
- {
40
-
41
-
42
- template<typename L, typename R>
43
- inline L divide_ri(const L x, const R y)
44
- {
45
- return (x + (y - 1)) / y;
46
- }
47
-
48
-
49
- template<typename RandomAccessIterator1, typename RandomAccessIterator2, typename Size, typename BinaryFunction>
50
- struct body
51
- {
52
- RandomAccessIterator1 first;
53
- RandomAccessIterator2 result;
54
- Size n, interval_size;
55
- BinaryFunction binary_op;
56
-
57
- body(RandomAccessIterator1 first, RandomAccessIterator2 result, Size n, Size interval_size, BinaryFunction binary_op)
58
- : first(first), result(result), n(n), interval_size(interval_size), binary_op(binary_op)
59
- {}
60
-
61
- void operator()(const ::tbb::blocked_range<Size> &r) const
62
- {
63
- assert(r.size() == 1);
64
-
65
- Size interval_idx = r.begin();
66
-
67
- Size offset_to_first = interval_size * interval_idx;
68
- Size offset_to_last = thrust::min(n, offset_to_first + interval_size);
69
-
70
- RandomAccessIterator1 my_first = first + offset_to_first;
71
- RandomAccessIterator1 my_last = first + offset_to_last;
72
-
73
- // carefully pass the init value for the interval with raw_reference_cast
74
- typedef typename BinaryFunction::result_type sum_type;
75
- result[interval_idx] =
76
- thrust::reduce(thrust::seq, my_first + 1, my_last, sum_type(thrust::raw_reference_cast(*my_first)), binary_op);
77
- }
78
- };
79
-
80
-
81
- template<typename RandomAccessIterator1, typename RandomAccessIterator2, typename Size, typename BinaryFunction>
82
- body<RandomAccessIterator1,RandomAccessIterator2,Size,BinaryFunction>
83
- make_body(RandomAccessIterator1 first, RandomAccessIterator2 result, Size n, Size interval_size, BinaryFunction binary_op)
84
- {
85
- return body<RandomAccessIterator1,RandomAccessIterator2,Size,BinaryFunction>(first, result, n, interval_size, binary_op);
86
- }
87
-
88
-
89
- } // end reduce_intervals_detail
90
-
91
-
92
- template<typename DerivedPolicy, typename RandomAccessIterator1, typename Size, typename RandomAccessIterator2, typename BinaryFunction>
93
- void reduce_intervals(thrust::tbb::execution_policy<DerivedPolicy> &,
94
- RandomAccessIterator1 first,
95
- RandomAccessIterator1 last,
96
- Size interval_size,
97
- RandomAccessIterator2 result,
98
- BinaryFunction binary_op)
99
- {
100
- typename thrust::iterator_difference<RandomAccessIterator1>::type n = last - first;
101
-
102
- Size num_intervals = reduce_intervals_detail::divide_ri(n, interval_size);
103
-
104
- ::tbb::parallel_for(::tbb::blocked_range<Size>(0, num_intervals, 1), reduce_intervals_detail::make_body(first, result, Size(n), interval_size, binary_op), ::tbb::simple_partitioner());
105
- }
106
-
107
-
108
- template<typename DerivedPolicy, typename RandomAccessIterator1, typename Size, typename RandomAccessIterator2>
109
- void reduce_intervals(thrust::tbb::execution_policy<DerivedPolicy> &exec,
110
- RandomAccessIterator1 first,
111
- RandomAccessIterator1 last,
112
- Size interval_size,
113
- RandomAccessIterator2 result)
114
- {
115
- typedef typename thrust::iterator_value<RandomAccessIterator1>::type value_type;
116
-
117
- return thrust::system::tbb::detail::reduce_intervals(exec, first, last, interval_size, result, thrust::plus<value_type>());
118
- }
119
-
120
-
121
- } // end detail
122
- } // end tbb
123
- } // end system
124
- } // end thrust
125
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/utils/visualizer.py DELETED
@@ -1,1219 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import colorsys
3
- import logging
4
- import math
5
- import numpy as np
6
- from enum import Enum, unique
7
- import cv2
8
- import matplotlib as mpl
9
- import matplotlib.colors as mplc
10
- import matplotlib.figure as mplfigure
11
- import pycocotools.mask as mask_util
12
- import torch
13
- from matplotlib.backends.backend_agg import FigureCanvasAgg
14
- from PIL import Image
15
-
16
- from detectron2.data import MetadataCatalog
17
- from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes
18
- from detectron2.utils.file_io import PathManager
19
-
20
- from .colormap import random_color
21
-
22
- logger = logging.getLogger(__name__)
23
-
24
- __all__ = ["ColorMode", "VisImage", "Visualizer"]
25
-
26
-
27
- _SMALL_OBJECT_AREA_THRESH = 1000
28
- _LARGE_MASK_AREA_THRESH = 120000
29
- _OFF_WHITE = (1.0, 1.0, 240.0 / 255)
30
- _BLACK = (0, 0, 0)
31
- _RED = (1.0, 0, 0)
32
-
33
- _KEYPOINT_THRESHOLD = 0.05
34
-
35
-
36
- @unique
37
- class ColorMode(Enum):
38
- """
39
- Enum of different color modes to use for instance visualizations.
40
- """
41
-
42
- IMAGE = 0
43
- """
44
- Picks a random color for every instance and overlay segmentations with low opacity.
45
- """
46
- SEGMENTATION = 1
47
- """
48
- Let instances of the same category have similar colors
49
- (from metadata.thing_colors), and overlay them with
50
- high opacity. This provides more attention on the quality of segmentation.
51
- """
52
- IMAGE_BW = 2
53
- """
54
- Same as IMAGE, but convert all areas without masks to gray-scale.
55
- Only available for drawing per-instance mask predictions.
56
- """
57
-
58
-
59
- class GenericMask:
60
- """
61
- Attribute:
62
- polygons (list[ndarray]): list[ndarray]: polygons for this mask.
63
- Each ndarray has format [x, y, x, y, ...]
64
- mask (ndarray): a binary mask
65
- """
66
-
67
- def __init__(self, mask_or_polygons, height, width):
68
- self._mask = self._polygons = self._has_holes = None
69
- self.height = height
70
- self.width = width
71
-
72
- m = mask_or_polygons
73
- if isinstance(m, dict):
74
- # RLEs
75
- assert "counts" in m and "size" in m
76
- if isinstance(m["counts"], list): # uncompressed RLEs
77
- h, w = m["size"]
78
- assert h == height and w == width
79
- m = mask_util.frPyObjects(m, h, w)
80
- self._mask = mask_util.decode(m)[:, :]
81
- return
82
-
83
- if isinstance(m, list): # list[ndarray]
84
- self._polygons = [np.asarray(x).reshape(-1) for x in m]
85
- return
86
-
87
- if isinstance(m, np.ndarray): # assumed to be a binary mask
88
- assert m.shape[1] != 2, m.shape
89
- assert m.shape == (height, width), m.shape
90
- self._mask = m.astype("uint8")
91
- return
92
-
93
- raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m)))
94
-
95
- @property
96
- def mask(self):
97
- if self._mask is None:
98
- self._mask = self.polygons_to_mask(self._polygons)
99
- return self._mask
100
-
101
- @property
102
- def polygons(self):
103
- if self._polygons is None:
104
- self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
105
- return self._polygons
106
-
107
- @property
108
- def has_holes(self):
109
- if self._has_holes is None:
110
- if self._mask is not None:
111
- self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
112
- else:
113
- self._has_holes = False # if original format is polygon, does not have holes
114
- return self._has_holes
115
-
116
- def mask_to_polygons(self, mask):
117
- # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level
118
- # hierarchy. External contours (boundary) of the object are placed in hierarchy-1.
119
- # Internal contours (holes) are placed in hierarchy-2.
120
- # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.
121
- mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr
122
- res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
123
- hierarchy = res[-1]
124
- if hierarchy is None: # empty mask
125
- return [], False
126
- has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
127
- res = res[-2]
128
- res = [x.flatten() for x in res]
129
- # These coordinates from OpenCV are integers in range [0, W-1 or H-1].
130
- # We add 0.5 to turn them into real-value coordinate space. A better solution
131
- # would be to first +0.5 and then dilate the returned polygon by 0.5.
132
- res = [x + 0.5 for x in res if len(x) >= 6]
133
- return res, has_holes
134
-
135
- def polygons_to_mask(self, polygons):
136
- rle = mask_util.frPyObjects(polygons, self.height, self.width)
137
- rle = mask_util.merge(rle)
138
- return mask_util.decode(rle)[:, :]
139
-
140
- def area(self):
141
- return self.mask.sum()
142
-
143
- def bbox(self):
144
- p = mask_util.frPyObjects(self.polygons, self.height, self.width)
145
- p = mask_util.merge(p)
146
- bbox = mask_util.toBbox(p)
147
- bbox[2] += bbox[0]
148
- bbox[3] += bbox[1]
149
- return bbox
150
-
151
-
152
- class _PanopticPrediction:
153
- """
154
- Unify different panoptic annotation/prediction formats
155
- """
156
-
157
- def __init__(self, panoptic_seg, segments_info, metadata=None):
158
- if segments_info is None:
159
- assert metadata is not None
160
- # If "segments_info" is None, we assume "panoptic_img" is a
161
- # H*W int32 image storing the panoptic_id in the format of
162
- # category_id * label_divisor + instance_id. We reserve -1 for
163
- # VOID label.
164
- label_divisor = metadata.label_divisor
165
- segments_info = []
166
- for panoptic_label in np.unique(panoptic_seg.numpy()):
167
- if panoptic_label == -1:
168
- # VOID region.
169
- continue
170
- pred_class = panoptic_label // label_divisor
171
- isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values()
172
- segments_info.append(
173
- {
174
- "id": int(panoptic_label),
175
- "category_id": int(pred_class),
176
- "isthing": bool(isthing),
177
- }
178
- )
179
- del metadata
180
-
181
- self._seg = panoptic_seg
182
-
183
- self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info
184
- segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)
185
- areas = areas.numpy()
186
- sorted_idxs = np.argsort(-areas)
187
- self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs]
188
- self._seg_ids = self._seg_ids.tolist()
189
- for sid, area in zip(self._seg_ids, self._seg_areas):
190
- if sid in self._sinfo:
191
- self._sinfo[sid]["area"] = float(area)
192
-
193
- def non_empty_mask(self):
194
- """
195
- Returns:
196
- (H, W) array, a mask for all pixels that have a prediction
197
- """
198
- empty_ids = []
199
- for id in self._seg_ids:
200
- if id not in self._sinfo:
201
- empty_ids.append(id)
202
- if len(empty_ids) == 0:
203
- return np.zeros(self._seg.shape, dtype=np.uint8)
204
- assert (
205
- len(empty_ids) == 1
206
- ), ">1 ids corresponds to no labels. This is currently not supported"
207
- return (self._seg != empty_ids[0]).numpy().astype(np.bool)
208
-
209
- def semantic_masks(self):
210
- for sid in self._seg_ids:
211
- sinfo = self._sinfo.get(sid)
212
- if sinfo is None or sinfo["isthing"]:
213
- # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions.
214
- continue
215
- yield (self._seg == sid).numpy().astype(np.bool), sinfo
216
-
217
- def instance_masks(self):
218
- for sid in self._seg_ids:
219
- sinfo = self._sinfo.get(sid)
220
- if sinfo is None or not sinfo["isthing"]:
221
- continue
222
- mask = (self._seg == sid).numpy().astype(np.bool)
223
- if mask.sum() > 0:
224
- yield mask, sinfo
225
-
226
-
227
- def _create_text_labels(classes, scores, class_names, is_crowd=None):
228
- """
229
- Args:
230
- classes (list[int] or None):
231
- scores (list[float] or None):
232
- class_names (list[str] or None):
233
- is_crowd (list[bool] or None):
234
-
235
- Returns:
236
- list[str] or None
237
- """
238
- labels = None
239
- if classes is not None:
240
- if class_names is not None and len(class_names) > 0:
241
- labels = [class_names[i] for i in classes]
242
- else:
243
- labels = [str(i) for i in classes]
244
- if scores is not None:
245
- if labels is None:
246
- labels = ["{:.0f}%".format(s * 100) for s in scores]
247
- else:
248
- labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)]
249
- if labels is not None and is_crowd is not None:
250
- labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)]
251
- return labels
252
-
253
-
254
- class VisImage:
255
- def __init__(self, img, scale=1.0):
256
- """
257
- Args:
258
- img (ndarray): an RGB image of shape (H, W, 3).
259
- scale (float): scale the input image
260
- """
261
- self.img = img
262
- self.scale = scale
263
- self.width, self.height = img.shape[1], img.shape[0]
264
- self._setup_figure(img)
265
-
266
- def _setup_figure(self, img):
267
- """
268
- Args:
269
- Same as in :meth:`__init__()`.
270
-
271
- Returns:
272
- fig (matplotlib.pyplot.figure): top level container for all the image plot elements.
273
- ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system.
274
- """
275
- fig = mplfigure.Figure(frameon=False)
276
- self.dpi = fig.get_dpi()
277
- # add a small 1e-2 to avoid precision lost due to matplotlib's truncation
278
- # (https://github.com/matplotlib/matplotlib/issues/15363)
279
- fig.set_size_inches(
280
- (self.width * self.scale + 1e-2) / self.dpi,
281
- (self.height * self.scale + 1e-2) / self.dpi,
282
- )
283
- self.canvas = FigureCanvasAgg(fig)
284
- # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
285
- ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
286
- ax.axis("off")
287
- # Need to imshow this first so that other patches can be drawn on top
288
- ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest")
289
-
290
- self.fig = fig
291
- self.ax = ax
292
-
293
- def save(self, filepath):
294
- """
295
- Args:
296
- filepath (str): a string that contains the absolute path, including the file name, where
297
- the visualized image will be saved.
298
- """
299
- self.fig.savefig(filepath)
300
-
301
- def get_image(self):
302
- """
303
- Returns:
304
- ndarray:
305
- the visualized image of shape (H, W, 3) (RGB) in uint8 type.
306
- The shape is scaled w.r.t the input image using the given `scale` argument.
307
- """
308
- canvas = self.canvas
309
- s, (width, height) = canvas.print_to_buffer()
310
- # buf = io.BytesIO() # works for cairo backend
311
- # canvas.print_rgba(buf)
312
- # width, height = self.width, self.height
313
- # s = buf.getvalue()
314
-
315
- buffer = np.frombuffer(s, dtype="uint8")
316
-
317
- img_rgba = buffer.reshape(height, width, 4)
318
- rgb, alpha = np.split(img_rgba, [3], axis=2)
319
- return rgb.astype("uint8")
320
-
321
-
322
- class Visualizer:
323
- """
324
- Visualizer that draws data about detection/segmentation on images.
325
-
326
- It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`
327
- that draw primitive objects to images, as well as high-level wrappers like
328
- `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}`
329
- that draw composite data in some pre-defined style.
330
-
331
- Note that the exact visualization style for the high-level wrappers are subject to change.
332
- Style such as color, opacity, label contents, visibility of labels, or even the visibility
333
- of objects themselves (e.g. when the object is too small) may change according
334
- to different heuristics, as long as the results still look visually reasonable.
335
-
336
- To obtain a consistent style, you can implement custom drawing functions with the
337
- abovementioned primitive methods instead. If you need more customized visualization
338
- styles, you can process the data yourself following their format documented in
339
- tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not
340
- intend to satisfy everyone's preference on drawing styles.
341
-
342
- This visualizer focuses on high rendering quality rather than performance. It is not
343
- designed to be used for real-time applications.
344
- """
345
-
346
- # TODO implement a fast, rasterized version using OpenCV
347
-
348
- def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):
349
- """
350
- Args:
351
- img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
352
- the height and width of the image respectively. C is the number of
353
- color channels. The image is required to be in RGB format since that
354
- is a requirement of the Matplotlib library. The image is also expected
355
- to be in the range [0, 255].
356
- metadata (Metadata): dataset metadata (e.g. class names and colors)
357
- instance_mode (ColorMode): defines one of the pre-defined style for drawing
358
- instances on an image.
359
- """
360
- self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
361
- if metadata is None:
362
- metadata = MetadataCatalog.get("__nonexist__")
363
- self.metadata = metadata
364
- self.output = VisImage(self.img, scale=scale)
365
- self.cpu_device = torch.device("cpu")
366
-
367
- # too small texts are useless, therefore clamp to 9
368
- self._default_font_size = max(
369
- np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
370
- )
371
- self._instance_mode = instance_mode
372
-
373
- def draw_instance_predictions(self, predictions):
374
- """
375
- Draw instance-level prediction results on an image.
376
-
377
- Args:
378
- predictions (Instances): the output of an instance detection/segmentation
379
- model. Following fields will be used to draw:
380
- "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
381
-
382
- Returns:
383
- output (VisImage): image object with visualizations.
384
- """
385
- boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
386
- scores = predictions.scores if predictions.has("scores") else None
387
- classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None
388
- labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
389
- keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
390
-
391
- if predictions.has("pred_masks"):
392
- masks = np.asarray(predictions.pred_masks)
393
- masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
394
- else:
395
- masks = None
396
-
397
- if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
398
- colors = [
399
- self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
400
- ]
401
- alpha = 0.8
402
- else:
403
- colors = None
404
- alpha = 0.5
405
-
406
- if self._instance_mode == ColorMode.IMAGE_BW:
407
- self.output.img = self._create_grayscale_image(
408
- (predictions.pred_masks.any(dim=0) > 0).numpy()
409
- if predictions.has("pred_masks")
410
- else None
411
- )
412
- alpha = 0.3
413
-
414
- self.overlay_instances(
415
- masks=masks,
416
- boxes=boxes,
417
- labels=labels,
418
- keypoints=keypoints,
419
- assigned_colors=colors,
420
- alpha=alpha,
421
- )
422
- return self.output
423
-
424
- def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):
425
- """
426
- Draw semantic segmentation predictions/labels.
427
-
428
- Args:
429
- sem_seg (Tensor or ndarray): the segmentation of shape (H, W).
430
- Each value is the integer label of the pixel.
431
- area_threshold (int): segments with less than `area_threshold` are not drawn.
432
- alpha (float): the larger it is, the more opaque the segmentations are.
433
-
434
- Returns:
435
- output (VisImage): image object with visualizations.
436
- """
437
- if isinstance(sem_seg, torch.Tensor):
438
- sem_seg = sem_seg.numpy()
439
- labels, areas = np.unique(sem_seg, return_counts=True)
440
- sorted_idxs = np.argsort(-areas).tolist()
441
- labels = labels[sorted_idxs]
442
- for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):
443
- try:
444
- mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]
445
- except (AttributeError, IndexError):
446
- mask_color = None
447
-
448
- binary_mask = (sem_seg == label).astype(np.uint8)
449
- text = self.metadata.stuff_classes[label]
450
- self.draw_binary_mask(
451
- binary_mask,
452
- color=mask_color,
453
- edge_color=_OFF_WHITE,
454
- text=text,
455
- alpha=alpha,
456
- area_threshold=area_threshold,
457
- )
458
- return self.output
459
-
460
- def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7):
461
- """
462
- Draw panoptic prediction annotations or results.
463
-
464
- Args:
465
- panoptic_seg (Tensor): of shape (height, width) where the values are ids for each
466
- segment.
467
- segments_info (list[dict] or None): Describe each segment in `panoptic_seg`.
468
- If it is a ``list[dict]``, each dict contains keys "id", "category_id".
469
- If None, category id of each pixel is computed by
470
- ``pixel // metadata.label_divisor``.
471
- area_threshold (int): stuff segments with less than `area_threshold` are not drawn.
472
-
473
- Returns:
474
- output (VisImage): image object with visualizations.
475
- """
476
- pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata)
477
-
478
- if self._instance_mode == ColorMode.IMAGE_BW:
479
- self.output.img = self._create_grayscale_image(pred.non_empty_mask())
480
-
481
- # draw mask for all semantic segments first i.e. "stuff"
482
- for mask, sinfo in pred.semantic_masks():
483
- category_idx = sinfo["category_id"]
484
- try:
485
- mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]
486
- except AttributeError:
487
- mask_color = None
488
-
489
- text = self.metadata.stuff_classes[category_idx]
490
- self.draw_binary_mask(
491
- mask,
492
- color=mask_color,
493
- edge_color=_OFF_WHITE,
494
- text=text,
495
- alpha=alpha,
496
- area_threshold=area_threshold,
497
- )
498
-
499
- # draw mask for all instances second
500
- all_instances = list(pred.instance_masks())
501
- if len(all_instances) == 0:
502
- return self.output
503
- masks, sinfo = list(zip(*all_instances))
504
- category_ids = [x["category_id"] for x in sinfo]
505
-
506
- try:
507
- scores = [x["score"] for x in sinfo]
508
- except KeyError:
509
- scores = None
510
- labels = _create_text_labels(
511
- category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo]
512
- )
513
-
514
- try:
515
- colors = [
516
- self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids
517
- ]
518
- except AttributeError:
519
- colors = None
520
- self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)
521
-
522
- return self.output
523
-
524
- draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility
525
-
526
- def draw_dataset_dict(self, dic):
527
- """
528
- Draw annotations/segmentaions in Detectron2 Dataset format.
529
-
530
- Args:
531
- dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format.
532
-
533
- Returns:
534
- output (VisImage): image object with visualizations.
535
- """
536
- annos = dic.get("annotations", None)
537
- if annos:
538
- if "segmentation" in annos[0]:
539
- masks = [x["segmentation"] for x in annos]
540
- else:
541
- masks = None
542
- if "keypoints" in annos[0]:
543
- keypts = [x["keypoints"] for x in annos]
544
- keypts = np.array(keypts).reshape(len(annos), -1, 3)
545
- else:
546
- keypts = None
547
-
548
- boxes = [
549
- BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS)
550
- if len(x["bbox"]) == 4
551
- else x["bbox"]
552
- for x in annos
553
- ]
554
-
555
- colors = None
556
- category_ids = [x["category_id"] for x in annos]
557
- if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
558
- colors = [
559
- self._jitter([x / 255 for x in self.metadata.thing_colors[c]])
560
- for c in category_ids
561
- ]
562
- names = self.metadata.get("thing_classes", None)
563
- labels = _create_text_labels(
564
- category_ids,
565
- scores=None,
566
- class_names=names,
567
- is_crowd=[x.get("iscrowd", 0) for x in annos],
568
- )
569
- self.overlay_instances(
570
- labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors
571
- )
572
-
573
- sem_seg = dic.get("sem_seg", None)
574
- if sem_seg is None and "sem_seg_file_name" in dic:
575
- with PathManager.open(dic["sem_seg_file_name"], "rb") as f:
576
- sem_seg = Image.open(f)
577
- sem_seg = np.asarray(sem_seg, dtype="uint8")
578
- if sem_seg is not None:
579
- self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5)
580
-
581
- pan_seg = dic.get("pan_seg", None)
582
- if pan_seg is None and "pan_seg_file_name" in dic:
583
- with PathManager.open(dic["pan_seg_file_name"], "rb") as f:
584
- pan_seg = Image.open(f)
585
- pan_seg = np.asarray(pan_seg)
586
- from panopticapi.utils import rgb2id
587
-
588
- pan_seg = rgb2id(pan_seg)
589
- if pan_seg is not None:
590
- segments_info = dic["segments_info"]
591
- pan_seg = torch.tensor(pan_seg)
592
- self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5)
593
- return self.output
594
-
595
- def overlay_instances(
596
- self,
597
- *,
598
- boxes=None,
599
- labels=None,
600
- masks=None,
601
- keypoints=None,
602
- assigned_colors=None,
603
- alpha=0.5
604
- ):
605
- """
606
- Args:
607
- boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
608
- or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
609
- or a :class:`RotatedBoxes`,
610
- or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
611
- for the N objects in a single image,
612
- labels (list[str]): the text to be displayed for each instance.
613
- masks (masks-like object): Supported types are:
614
-
615
- * :class:`detectron2.structures.PolygonMasks`,
616
- :class:`detectron2.structures.BitMasks`.
617
- * list[list[ndarray]]: contains the segmentation masks for all objects in one image.
618
- The first level of the list corresponds to individual instances. The second
619
- level to all the polygon that compose the instance, and the third level
620
- to the polygon coordinates. The third level should have the format of
621
- [x0, y0, x1, y1, ..., xn, yn] (n >= 3).
622
- * list[ndarray]: each ndarray is a binary mask of shape (H, W).
623
- * list[dict]: each dict is a COCO-style RLE.
624
- keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
625
- where the N is the number of instances and K is the number of keypoints.
626
- The last dimension corresponds to (x, y, visibility or score).
627
- assigned_colors (list[matplotlib.colors]): a list of colors, where each color
628
- corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
629
- for full list of formats that the colors are accepted in.
630
-
631
- Returns:
632
- output (VisImage): image object with visualizations.
633
- """
634
- num_instances = 0
635
- if boxes is not None:
636
- boxes = self._convert_boxes(boxes)
637
- num_instances = len(boxes)
638
- if masks is not None:
639
- masks = self._convert_masks(masks)
640
- if num_instances:
641
- assert len(masks) == num_instances
642
- else:
643
- num_instances = len(masks)
644
- if keypoints is not None:
645
- if num_instances:
646
- assert len(keypoints) == num_instances
647
- else:
648
- num_instances = len(keypoints)
649
- keypoints = self._convert_keypoints(keypoints)
650
- if labels is not None:
651
- assert len(labels) == num_instances
652
- if assigned_colors is None:
653
- assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
654
- if num_instances == 0:
655
- return self.output
656
- if boxes is not None and boxes.shape[1] == 5:
657
- return self.overlay_rotated_instances(
658
- boxes=boxes, labels=labels, assigned_colors=assigned_colors
659
- )
660
-
661
- # Display in largest to smallest order to reduce occlusion.
662
- areas = None
663
- if boxes is not None:
664
- areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
665
- elif masks is not None:
666
- areas = np.asarray([x.area() for x in masks])
667
-
668
- if areas is not None:
669
- sorted_idxs = np.argsort(-areas).tolist()
670
- # Re-order overlapped instances in descending order.
671
- boxes = boxes[sorted_idxs] if boxes is not None else None
672
- labels = [labels[k] for k in sorted_idxs] if labels is not None else None
673
- masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
674
- assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
675
- keypoints = keypoints[sorted_idxs] if keypoints is not None else None
676
-
677
- for i in range(num_instances):
678
- color = assigned_colors[i]
679
- if boxes is not None:
680
- self.draw_box(boxes[i], edge_color=color)
681
-
682
- if masks is not None:
683
- for segment in masks[i].polygons:
684
- self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
685
-
686
- if labels is not None:
687
- # first get a box
688
- if boxes is not None:
689
- x0, y0, x1, y1 = boxes[i]
690
- text_pos = (x0, y0) # if drawing boxes, put text on the box corner.
691
- horiz_align = "left"
692
- elif masks is not None:
693
- # skip small mask without polygon
694
- if len(masks[i].polygons) == 0:
695
- continue
696
-
697
- x0, y0, x1, y1 = masks[i].bbox()
698
-
699
- # draw text in the center (defined by median) when box is not drawn
700
- # median is less sensitive to outliers.
701
- text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
702
- horiz_align = "center"
703
- else:
704
- continue # drawing the box confidence for keypoints isn't very useful.
705
- # for small objects, draw text at the side to avoid occlusion
706
- instance_area = (y1 - y0) * (x1 - x0)
707
- if (
708
- instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
709
- or y1 - y0 < 40 * self.output.scale
710
- ):
711
- if y1 >= self.output.height - 5:
712
- text_pos = (x1, y0)
713
- else:
714
- text_pos = (x0, y1)
715
-
716
- height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
717
- lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
718
- font_size = (
719
- np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
720
- * 0.5
721
- * self._default_font_size
722
- )
723
- self.draw_text(
724
- labels[i],
725
- text_pos,
726
- color=lighter_color,
727
- horizontal_alignment=horiz_align,
728
- font_size=font_size,
729
- )
730
-
731
- # draw keypoints
732
- if keypoints is not None:
733
- for keypoints_per_instance in keypoints:
734
- self.draw_and_connect_keypoints(keypoints_per_instance)
735
-
736
- return self.output
737
-
738
- def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):
739
- """
740
- Args:
741
- boxes (ndarray): an Nx5 numpy array of
742
- (x_center, y_center, width, height, angle_degrees) format
743
- for the N objects in a single image.
744
- labels (list[str]): the text to be displayed for each instance.
745
- assigned_colors (list[matplotlib.colors]): a list of colors, where each color
746
- corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
747
- for full list of formats that the colors are accepted in.
748
-
749
- Returns:
750
- output (VisImage): image object with visualizations.
751
- """
752
- num_instances = len(boxes)
753
-
754
- if assigned_colors is None:
755
- assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
756
- if num_instances == 0:
757
- return self.output
758
-
759
- # Display in largest to smallest order to reduce occlusion.
760
- if boxes is not None:
761
- areas = boxes[:, 2] * boxes[:, 3]
762
-
763
- sorted_idxs = np.argsort(-areas).tolist()
764
- # Re-order overlapped instances in descending order.
765
- boxes = boxes[sorted_idxs]
766
- labels = [labels[k] for k in sorted_idxs] if labels is not None else None
767
- colors = [assigned_colors[idx] for idx in sorted_idxs]
768
-
769
- for i in range(num_instances):
770
- self.draw_rotated_box_with_label(
771
- boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None
772
- )
773
-
774
- return self.output
775
-
776
- def draw_and_connect_keypoints(self, keypoints):
777
- """
778
- Draws keypoints of an instance and follows the rules for keypoint connections
779
- to draw lines between appropriate keypoints. This follows color heuristics for
780
- line color.
781
-
782
- Args:
783
- keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints
784
- and the last dimension corresponds to (x, y, probability).
785
-
786
- Returns:
787
- output (VisImage): image object with visualizations.
788
- """
789
- visible = {}
790
- keypoint_names = self.metadata.get("keypoint_names")
791
- for idx, keypoint in enumerate(keypoints):
792
- # draw keypoint
793
- x, y, prob = keypoint
794
- if prob > _KEYPOINT_THRESHOLD:
795
- self.draw_circle((x, y), color=_RED)
796
- if keypoint_names:
797
- keypoint_name = keypoint_names[idx]
798
- visible[keypoint_name] = (x, y)
799
-
800
- if self.metadata.get("keypoint_connection_rules"):
801
- for kp0, kp1, color in self.metadata.keypoint_connection_rules:
802
- if kp0 in visible and kp1 in visible:
803
- x0, y0 = visible[kp0]
804
- x1, y1 = visible[kp1]
805
- color = tuple(x / 255.0 for x in color)
806
- self.draw_line([x0, x1], [y0, y1], color=color)
807
-
808
- # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip
809
- # Note that this strategy is specific to person keypoints.
810
- # For other keypoints, it should just do nothing
811
- try:
812
- ls_x, ls_y = visible["left_shoulder"]
813
- rs_x, rs_y = visible["right_shoulder"]
814
- mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2
815
- except KeyError:
816
- pass
817
- else:
818
- # draw line from nose to mid-shoulder
819
- nose_x, nose_y = visible.get("nose", (None, None))
820
- if nose_x is not None:
821
- self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)
822
-
823
- try:
824
- # draw line from mid-shoulder to mid-hip
825
- lh_x, lh_y = visible["left_hip"]
826
- rh_x, rh_y = visible["right_hip"]
827
- except KeyError:
828
- pass
829
- else:
830
- mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2
831
- self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)
832
- return self.output
833
-
834
- """
835
- Primitive drawing functions:
836
- """
837
-
838
- def draw_text(
839
- self,
840
- text,
841
- position,
842
- *,
843
- font_size=None,
844
- color="g",
845
- horizontal_alignment="center",
846
- rotation=0
847
- ):
848
- """
849
- Args:
850
- text (str): class label
851
- position (tuple): a tuple of the x and y coordinates to place text on image.
852
- font_size (int, optional): font of the text. If not provided, a font size
853
- proportional to the image width is calculated and used.
854
- color: color of the text. Refer to `matplotlib.colors` for full list
855
- of formats that are accepted.
856
- horizontal_alignment (str): see `matplotlib.text.Text`
857
- rotation: rotation angle in degrees CCW
858
-
859
- Returns:
860
- output (VisImage): image object with text drawn.
861
- """
862
- if not font_size:
863
- font_size = self._default_font_size
864
-
865
- # since the text background is dark, we don't want the text to be dark
866
- color = np.maximum(list(mplc.to_rgb(color)), 0.2)
867
- color[np.argmax(color)] = max(0.8, np.max(color))
868
-
869
- x, y = position
870
- self.output.ax.text(
871
- x,
872
- y,
873
- text,
874
- size=font_size * self.output.scale,
875
- family="sans-serif",
876
- bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"},
877
- verticalalignment="top",
878
- horizontalalignment=horizontal_alignment,
879
- color=color,
880
- zorder=10,
881
- rotation=rotation,
882
- )
883
- return self.output
884
-
885
- def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
886
- """
887
- Args:
888
- box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0
889
- are the coordinates of the image's top left corner. x1 and y1 are the
890
- coordinates of the image's bottom right corner.
891
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
892
- edge_color: color of the outline of the box. Refer to `matplotlib.colors`
893
- for full list of formats that are accepted.
894
- line_style (string): the string to use to create the outline of the boxes.
895
-
896
- Returns:
897
- output (VisImage): image object with box drawn.
898
- """
899
- x0, y0, x1, y1 = box_coord
900
- width = x1 - x0
901
- height = y1 - y0
902
-
903
- linewidth = max(self._default_font_size / 4, 5)
904
-
905
- self.output.ax.add_patch(
906
- mpl.patches.Rectangle(
907
- (x0, y0),
908
- width,
909
- height,
910
- fill=False,
911
- edgecolor=edge_color,
912
- linewidth=linewidth * self.output.scale,
913
- alpha=alpha,
914
- linestyle=line_style,
915
- )
916
- )
917
- return self.output
918
-
919
- def draw_rotated_box_with_label(
920
- self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None
921
- ):
922
- """
923
- Draw a rotated box with label on its top-left corner.
924
-
925
- Args:
926
- rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),
927
- where cnt_x and cnt_y are the center coordinates of the box.
928
- w and h are the width and height of the box. angle represents how
929
- many degrees the box is rotated CCW with regard to the 0-degree box.
930
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
931
- edge_color: color of the outline of the box. Refer to `matplotlib.colors`
932
- for full list of formats that are accepted.
933
- line_style (string): the string to use to create the outline of the boxes.
934
- label (string): label for rotated box. It will not be rendered when set to None.
935
-
936
- Returns:
937
- output (VisImage): image object with box drawn.
938
- """
939
- cnt_x, cnt_y, w, h, angle = rotated_box
940
- area = w * h
941
- # use thinner lines when the box is small
942
- linewidth = self._default_font_size / (
943
- 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3
944
- )
945
-
946
- theta = angle * math.pi / 180.0
947
- c = math.cos(theta)
948
- s = math.sin(theta)
949
- rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]
950
- # x: left->right ; y: top->down
951
- rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]
952
- for k in range(4):
953
- j = (k + 1) % 4
954
- self.draw_line(
955
- [rotated_rect[k][0], rotated_rect[j][0]],
956
- [rotated_rect[k][1], rotated_rect[j][1]],
957
- color=edge_color,
958
- linestyle="--" if k == 1 else line_style,
959
- linewidth=linewidth,
960
- )
961
-
962
- if label is not None:
963
- text_pos = rotated_rect[1] # topleft corner
964
-
965
- height_ratio = h / np.sqrt(self.output.height * self.output.width)
966
- label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)
967
- font_size = (
968
- np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size
969
- )
970
- self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)
971
-
972
- return self.output
973
-
974
- def draw_circle(self, circle_coord, color, radius=3):
975
- """
976
- Args:
977
- circle_coord (list(int) or tuple(int)): contains the x and y coordinates
978
- of the center of the circle.
979
- color: color of the polygon. Refer to `matplotlib.colors` for a full list of
980
- formats that are accepted.
981
- radius (int): radius of the circle.
982
-
983
- Returns:
984
- output (VisImage): image object with box drawn.
985
- """
986
- x, y = circle_coord
987
- self.output.ax.add_patch(
988
- mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)
989
- )
990
- return self.output
991
-
992
- def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None):
993
- """
994
- Args:
995
- x_data (list[int]): a list containing x values of all the points being drawn.
996
- Length of list should match the length of y_data.
997
- y_data (list[int]): a list containing y values of all the points being drawn.
998
- Length of list should match the length of x_data.
999
- color: color of the line. Refer to `matplotlib.colors` for a full list of
1000
- formats that are accepted.
1001
- linestyle: style of the line. Refer to `matplotlib.lines.Line2D`
1002
- for a full list of formats that are accepted.
1003
- linewidth (float or None): width of the line. When it's None,
1004
- a default value will be computed and used.
1005
-
1006
- Returns:
1007
- output (VisImage): image object with line drawn.
1008
- """
1009
- if linewidth is None:
1010
- linewidth = self._default_font_size / 3
1011
- linewidth = max(linewidth, 1)
1012
- self.output.ax.add_line(
1013
- mpl.lines.Line2D(
1014
- x_data,
1015
- y_data,
1016
- linewidth=linewidth * self.output.scale,
1017
- color=color,
1018
- linestyle=linestyle,
1019
- )
1020
- )
1021
- return self.output
1022
-
1023
- def draw_binary_mask(
1024
- self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=0
1025
- ):
1026
- """
1027
- Args:
1028
- binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
1029
- W is the image width. Each value in the array is either a 0 or 1 value of uint8
1030
- type.
1031
- color: color of the mask. Refer to `matplotlib.colors` for a full list of
1032
- formats that are accepted. If None, will pick a random color.
1033
- edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
1034
- full list of formats that are accepted.
1035
- text (str): if None, will be drawn in the object's center of mass.
1036
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
1037
- area_threshold (float): a connected component small than this will not be shown.
1038
-
1039
- Returns:
1040
- output (VisImage): image object with mask drawn.
1041
- """
1042
- if color is None:
1043
- color = random_color(rgb=True, maximum=1)
1044
- color = mplc.to_rgb(color)
1045
-
1046
- has_valid_segment = False
1047
- binary_mask = binary_mask.astype("uint8") # opencv needs uint8
1048
- mask = GenericMask(binary_mask, self.output.height, self.output.width)
1049
- shape2d = (binary_mask.shape[0], binary_mask.shape[1])
1050
-
1051
- if not mask.has_holes:
1052
- # draw polygons for regular masks
1053
- for segment in mask.polygons:
1054
- area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
1055
- if area < (area_threshold or 0):
1056
- continue
1057
- has_valid_segment = True
1058
- segment = segment.reshape(-1, 2)
1059
- self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
1060
- else:
1061
- # TODO: Use Path/PathPatch to draw vector graphics:
1062
- # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon
1063
- rgba = np.zeros(shape2d + (4,), dtype="float32")
1064
- rgba[:, :, :3] = color
1065
- rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
1066
- has_valid_segment = True
1067
- self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
1068
-
1069
- if text is not None and has_valid_segment:
1070
- # TODO sometimes drawn on wrong objects. the heuristics here can improve.
1071
- lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
1072
- _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
1073
- largest_component_id = np.argmax(stats[1:, -1]) + 1
1074
-
1075
- # draw text on the largest component, as well as other very large components.
1076
- for cid in range(1, _num_cc):
1077
- if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
1078
- # median is more stable than centroid
1079
- # center = centroids[largest_component_id]
1080
- center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
1081
- self.draw_text(text, center, color=lighter_color)
1082
- return self.output
1083
-
1084
- def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):
1085
- """
1086
- Args:
1087
- segment: numpy array of shape Nx2, containing all the points in the polygon.
1088
- color: color of the polygon. Refer to `matplotlib.colors` for a full list of
1089
- formats that are accepted.
1090
- edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
1091
- full list of formats that are accepted. If not provided, a darker shade
1092
- of the polygon color will be used instead.
1093
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
1094
-
1095
- Returns:
1096
- output (VisImage): image object with polygon drawn.
1097
- """
1098
- if edge_color is None:
1099
- # make edge color darker than the polygon color
1100
- if alpha > 0.8:
1101
- edge_color = self._change_color_brightness(color, brightness_factor=-0.7)
1102
- else:
1103
- edge_color = color
1104
- edge_color = mplc.to_rgb(edge_color) + (1,)
1105
-
1106
- polygon = mpl.patches.Polygon(
1107
- segment,
1108
- fill=True,
1109
- facecolor=mplc.to_rgb(color) + (alpha,),
1110
- edgecolor=edge_color,
1111
- linewidth=max(self._default_font_size // 15 * self.output.scale, 1),
1112
- )
1113
- self.output.ax.add_patch(polygon)
1114
- return self.output
1115
-
1116
- """
1117
- Internal methods:
1118
- """
1119
-
1120
- def _jitter(self, color):
1121
- """
1122
- Randomly modifies given color to produce a slightly different color than the color given.
1123
-
1124
- Args:
1125
- color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
1126
- picked. The values in the list are in the [0.0, 1.0] range.
1127
-
1128
- Returns:
1129
- jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
1130
- color after being jittered. The values in the list are in the [0.0, 1.0] range.
1131
- """
1132
- color = mplc.to_rgb(color)
1133
- vec = np.random.rand(3)
1134
- # better to do it in another color space
1135
- vec = vec / np.linalg.norm(vec) * 0.5
1136
- res = np.clip(vec + color, 0, 1)
1137
- return tuple(res)
1138
-
1139
- def _create_grayscale_image(self, mask=None):
1140
- """
1141
- Create a grayscale version of the original image.
1142
- The colors in masked area, if given, will be kept.
1143
- """
1144
- img_bw = self.img.astype("f4").mean(axis=2)
1145
- img_bw = np.stack([img_bw] * 3, axis=2)
1146
- if mask is not None:
1147
- img_bw[mask] = self.img[mask]
1148
- return img_bw
1149
-
1150
- def _change_color_brightness(self, color, brightness_factor):
1151
- """
1152
- Depending on the brightness_factor, gives a lighter or darker color i.e. a color with
1153
- less or more saturation than the original color.
1154
-
1155
- Args:
1156
- color: color of the polygon. Refer to `matplotlib.colors` for a full list of
1157
- formats that are accepted.
1158
- brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of
1159
- 0 will correspond to no change, a factor in [-1.0, 0) range will result in
1160
- a darker color and a factor in (0, 1.0] range will result in a lighter color.
1161
-
1162
- Returns:
1163
- modified_color (tuple[double]): a tuple containing the RGB values of the
1164
- modified color. Each value in the tuple is in the [0.0, 1.0] range.
1165
- """
1166
- assert brightness_factor >= -1.0 and brightness_factor <= 1.0
1167
- color = mplc.to_rgb(color)
1168
- polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
1169
- modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
1170
- modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
1171
- modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
1172
- modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
1173
- return modified_color
1174
-
1175
- def _convert_boxes(self, boxes):
1176
- """
1177
- Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.
1178
- """
1179
- if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):
1180
- return boxes.tensor.numpy()
1181
- else:
1182
- return np.asarray(boxes)
1183
-
1184
- def _convert_masks(self, masks_or_polygons):
1185
- """
1186
- Convert different format of masks or polygons to a tuple of masks and polygons.
1187
-
1188
- Returns:
1189
- list[GenericMask]:
1190
- """
1191
-
1192
- m = masks_or_polygons
1193
- if isinstance(m, PolygonMasks):
1194
- m = m.polygons
1195
- if isinstance(m, BitMasks):
1196
- m = m.tensor.numpy()
1197
- if isinstance(m, torch.Tensor):
1198
- m = m.numpy()
1199
- ret = []
1200
- for x in m:
1201
- if isinstance(x, GenericMask):
1202
- ret.append(x)
1203
- else:
1204
- ret.append(GenericMask(x, self.output.height, self.output.width))
1205
- return ret
1206
-
1207
- def _convert_keypoints(self, keypoints):
1208
- if isinstance(keypoints, Keypoints):
1209
- keypoints = keypoints.tensor
1210
- keypoints = np.asarray(keypoints)
1211
- return keypoints
1212
-
1213
- def get_output(self):
1214
- """
1215
- Returns:
1216
- output (VisImage): the image output containing the visualizations added
1217
- to the image.
1218
- """
1219
- return self.output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/transfiner/configs/common/optim.py DELETED
@@ -1,15 +0,0 @@
1
- import torch
2
-
3
- from detectron2.config import LazyCall as L
4
- from detectron2.solver.build import get_default_optimizer_params
5
-
6
- SGD = L(torch.optim.SGD)(
7
- params=L(get_default_optimizer_params)(
8
- # params.model is meant to be set to the model object, before instantiating
9
- # the optimizer.
10
- weight_decay_norm=0.0
11
- ),
12
- lr=0.02,
13
- momentum=0.9,
14
- weight_decay=1e-4,
15
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/utils/__init__.py DELETED
@@ -1,5 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
 
 
 
 
 
 
spaces/Chintan-Donda/KKMS-KSSW-HF/src/langchain_utils.py DELETED
@@ -1,891 +0,0 @@
1
- import src.constants as constants_utils
2
- import src.data_loader as data_loader_utils
3
- import src.utils as utils
4
-
5
- from langchain.llms import OpenAI
6
- from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
7
- from langchain.chains.summarize import load_summarize_chain
8
- from langchain.docstore.document import Document
9
- from langchain.embeddings.openai import OpenAIEmbeddings
10
- import openai
11
- from langchain.vectorstores import Chroma
12
- import chromadb
13
- from langchain.chains.question_answering import load_qa_chain
14
- from langchain.chains.qa_with_sources import load_qa_with_sources_chain
15
- from langchain.prompts import PromptTemplate
16
- from llama_index import GPTVectorStoreIndex, GPTListIndex
17
- from langchain.vectorstores import FAISS
18
-
19
- import pickle
20
- import shutil
21
- from typing import Dict, List, Optional
22
-
23
- import os
24
- os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')
25
-
26
- import logging
27
- logger = logging.getLogger(__name__)
28
- logging.basicConfig(
29
- format="%(asctime)s %(levelname)s [%(name)s] %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S"
30
- )
31
-
32
- import warnings
33
- warnings.filterwarnings('ignore')
34
-
35
-
36
-
37
- class LANGCHAIN_UTILS:
38
- def __init__(self,
39
- index_type=constants_utils.INDEX_TYPE,
40
- load_from_existing_index_store=constants_utils.LOAD_FROM_EXISTING_INDEX_STORE
41
- ):
42
- self.index_type = index_type
43
- self.load_from_existing_index_store = load_from_existing_index_store
44
-
45
- # Temporary index in the current context for the doc_type in consideration
46
- self.index = None
47
- # Master index which contains data from multiple sources (PDF, Online PDF, Text files, URLs, etc. It gets updated on Uploading the data from new files/urls without downtime of the application on-demand.)
48
- self.master_index = None
49
-
50
- # Data source wise index
51
- self.index_category_doc_type_wise_index = dict(
52
- (ic, dict(
53
- (ds, None) for ds in list(constants_utils.DATA_SOURCES.values()))
54
- ) for ic in constants_utils.INDEX_CATEGORY)
55
- # Initialize master index for each INDEX_CATEGORY
56
- for ic in constants_utils.INDEX_CATEGORY:
57
- self.index_category_doc_type_wise_index[ic][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE] = None
58
-
59
- # Data loaded as a Document format in the current context for the doc_type in consideration
60
- self.documents = []
61
-
62
- # Instantiate data_loader_utils class object
63
- self.data_loader_utils_obj = data_loader_utils.DATA_LOADER()
64
- # Instantiate UTILS class object
65
- self.utils_obj = utils.UTILS()
66
-
67
- # Initialize embeddings (we can also use other embeddings)
68
- self.embeddings = OpenAIEmbeddings(openai_api_key=os.getenv('OPENAI_API_KEY'))
69
-
70
- # Global history for AgGPT widget
71
- self.global_history = [
72
- {
73
- "role": "assistant",
74
- "content": "Hi, I am a chatbot. I can converse in English. I can answer your questions about farming in India. Ask me anything!"
75
- }
76
- ]
77
-
78
-
79
- def generate_prompt_template(
80
- self,
81
- prompt_type='general'
82
- ):
83
- prompt_template = ''
84
-
85
- if prompt_type == 'general':
86
- prompt_template = """Write a concise summary of the following:
87
-
88
- {text}
89
-
90
- SUMMARIZE IN ENGLISH:"""
91
-
92
- elif prompt_type == 'weather':
93
- prompt_template = """
94
- What would be the weather based on the below data:
95
- {text}
96
- """
97
-
98
- return prompt_template
99
-
100
-
101
- def user(
102
- self,
103
- user_message,
104
- history
105
- ):
106
- history = history + [[user_message, None]]
107
- self.global_history = self.global_history + [{"role": "user", "content": user_message}]
108
- return "", history
109
-
110
-
111
- def get_chatgpt_response(
112
- self,
113
- history
114
- ):
115
- output = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=history)
116
- history.append({"role": "assistant", "content": output.choices[0].message.content})
117
- return output.choices[0].message.content, history
118
-
119
-
120
- def bot(
121
- self,
122
- history
123
- ):
124
- response, self.global_history = self.get_chatgpt_response(self.global_history)
125
- history[-1][1] = response
126
- return history
127
-
128
-
129
- def clear_history(
130
- self,
131
- lang="English"
132
- ):
133
- self.global_history = [{"role": "assistant", "content": "Hi, I am a chatbot. I can converse in {}. I can answer your questions about farming in India. Ask me anything!".format(lang)}]
134
- return None
135
-
136
-
137
- def get_textual_summary(
138
- self,
139
- text,
140
- chain_type="stuff",
141
- custom_prompt=True,
142
- prompt_type='general'
143
- ):
144
- texts = [text]
145
- docs = [Document(page_content=t) for t in texts[:3]]
146
-
147
- llm = OpenAI(temperature=0)
148
- if custom_prompt:
149
- prompt_template = self.generate_prompt_template(prompt_type)
150
- PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"])
151
- chain = load_summarize_chain(llm, chain_type=chain_type, prompt=PROMPT)
152
- else:
153
- chain = load_summarize_chain(llm, chain_type=chain_type)
154
-
155
- text_summary = chain.run(docs)
156
- return text_summary
157
-
158
-
159
- def get_weather_forecast_summary(
160
- self,
161
- text,
162
- chain_type="stuff"
163
- ):
164
- text = f"""
165
- What would be the weather based on the below data:
166
- {text}
167
-
168
- Give simple response without technical numbers which can be explained to human.
169
- """
170
- texts = [text]
171
- docs = [Document(page_content=t) for t in texts[:3]]
172
-
173
- llm = OpenAI(temperature=0)
174
- chain = load_summarize_chain(llm, chain_type=chain_type)
175
- text_summary = chain.run(docs)
176
-
177
- return text_summary
178
-
179
-
180
- def get_answer_from_para(
181
- self,
182
- para,
183
- question,
184
- chain_type="stuff",
185
- custom_prompt=True
186
- ):
187
- # Prepare data (Split paragraph into chunks of small documents)
188
- text_splitter = CharacterTextSplitter(
189
- chunk_size=constants_utils.TEXT_SPLITTER_CHUNK_SIZE,
190
- chunk_overlap=constants_utils.TEXT_SPLITTER_CHUNK_OVERLAP,
191
- separator=constants_utils.TEXT_SPLITTER_SEPARATOR
192
- )
193
- texts = text_splitter.split_text(para)
194
-
195
- if self.index_type == 'FAISS':
196
- # Find similar docs that are relevant to the question
197
- docsearch = FAISS.from_texts(
198
- texts, self.embeddings,
199
- metadatas=[{"source": str(i)} for i in range(len(texts))]
200
- )
201
-
202
- elif self.index_type == 'Chroma':
203
- # Find similar docs that are relevant to the question
204
- docsearch = Chroma.from_texts(
205
- texts, self.embeddings,
206
- metadatas=[{"source": str(i)} for i in range(len(texts))]
207
- )
208
-
209
- # Search for the similar docs
210
- docs = docsearch.similarity_search(question, k=1)
211
-
212
- llm = OpenAI(temperature=0)
213
- # Create a Chain for question answering
214
- if custom_prompt:
215
- prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
216
-
217
- {context}
218
-
219
- Question: {question}
220
- Answer in English:"""
221
-
222
- PROMPT = PromptTemplate(
223
- template=prompt_template, input_variables=["context", "question"]
224
- )
225
- chain = load_qa_chain(llm, chain_type=chain_type, prompt=PROMPT)
226
- else:
227
- # chain = load_qa_with_sources_chain(llm, chain_type=chain_type)
228
- chain = load_qa_chain(llm, chain_type=chain_type)
229
- # chain.run(input_documents=docs, question=question)
230
-
231
- out_dict = chain({"input_documents": docs, "question": question}, return_only_outputs=True)
232
- return out_dict['output_text']
233
-
234
-
235
- def load_documents(
236
- self,
237
- doc_type,
238
- doc_filepath='',
239
- urls=[]
240
- ):
241
- """
242
- Load data in Document format of the given doc_type from either doc_filepath or list of urls.
243
- It can load multiple files/urls in one shot.
244
-
245
- Args:
246
- doc_type: can be any of [pdf, online_pdf, urls, textfile]
247
- doc_filepath: can be a directory or a filepath
248
- urls: list of urls
249
- """
250
-
251
- logger.info(f'Loading {doc_type} data into Documents format')
252
-
253
- if doc_type == 'pdf':
254
- # Load data from PDFs stored in local directory
255
- self.documents.extend(
256
- self.data_loader_utils_obj.load_documents_from_pdf(
257
- doc_filepath=doc_filepath,
258
- doc_type=doc_type
259
- ))
260
-
261
- elif doc_type == 'online_pdf':
262
- # Load data from PDFs stored in local directory
263
- self.documents.extend(
264
- self.data_loader_utils_obj.load_documents_from_pdf(
265
- urls=urls,
266
- doc_type=doc_type
267
- ))
268
-
269
- elif doc_type == 'urls':
270
- # Load data from URLs
271
- self.documents.extend(
272
- self.data_loader_utils_obj.load_documents_from_urls(
273
- urls=urls,
274
- doc_type=doc_type
275
- ))
276
-
277
- elif doc_type == 'textfile':
278
- # Load data from text files & Convert texts into Document format
279
- self.documents.extend(
280
- self.data_loader_utils_obj.load_documents_from_text(
281
- doc_filepath=doc_filepath,
282
- doc_type=doc_type
283
- ))
284
-
285
- elif doc_type == 'directory':
286
- # Load data from local directory
287
- self.documents.extend(
288
- self.data_loader_utils_obj.load_documents_from_directory(
289
- doc_filepath=doc_filepath,
290
- doc_type=doc_type
291
- ))
292
-
293
- logger.info(f'{doc_type} data into Documents format loaded successfully!')
294
-
295
-
296
- def create_index(
297
- self
298
- ):
299
- if not self.documents:
300
- logger.warning(f'Empty documents. Index cannot be created!')
301
- return None
302
-
303
- logger.info(f'Creating index')
304
-
305
- text_splitter = CharacterTextSplitter(
306
- chunk_size=constants_utils.TEXT_SPLITTER_CHUNK_SIZE,
307
- chunk_overlap=constants_utils.TEXT_SPLITTER_CHUNK_OVERLAP,
308
- separator=constants_utils.TEXT_SPLITTER_SEPARATOR
309
- )
310
- self.documents = text_splitter.split_documents(self.documents)
311
-
312
- ############## Build the Vector store for docs ##############
313
- # Vector store using Facebook AI Similarity Search
314
- if self.index_type == 'FAISS':
315
- self.index = FAISS.from_documents(
316
- self.documents,
317
- self.embeddings
318
- )
319
-
320
- # Vector store using Chroma DB
321
- elif self.index_type == 'Chroma':
322
- if not os.path.exists(self.index_filepath):
323
- os.makedirs(self.index_filepath)
324
-
325
- self.index = Chroma.from_documents(
326
- self.documents,
327
- self.embeddings,
328
- persist_directory=self.index_filepath
329
- )
330
-
331
- # Vector store using GPT vector index
332
- elif self.index_type == 'GPTVectorStoreIndex':
333
- self.index = GPTVectorStoreIndex.from_documents(self.documents)
334
-
335
- logger.info(f'Index created successfully!')
336
- return self.index
337
-
338
-
339
- def get_index_filepath(
340
- self,
341
- index_category,
342
- doc_type
343
- ):
344
- if doc_type == 'master':
345
- self.index_filepath = os.path.join(
346
- constants_utils.OUTPUT_PATH, f'index_{index_category}') if self.index_type in ['FAISS', 'Chroma'] else os.path.join(constants_utils.OUTPUT_PATH, f'index_{index_category}.json')
347
- else:
348
- self.index_filepath = os.path.join(
349
- constants_utils.OUTPUT_PATH, f'index_{index_category}', f'index_{doc_type}') if self.index_type in ['FAISS', 'Chroma'] else os.path.join(constants_utils.OUTPUT_PATH, f'index_{index_category}', f'index_{doc_type}.json')
350
-
351
- return self.index_filepath
352
-
353
-
354
- def load_master_doctype_indices_for_index_category(
355
- self,
356
- index_category
357
- ):
358
- logger.info(f'Loading master and doc_type indices for: {index_category}')
359
-
360
- # Set master index of index_category = None
361
- self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE] = None
362
-
363
- for doc_type in self.index_category_doc_type_wise_index[index_category].keys():
364
- self.index = None
365
- self.index_filepath = self.get_index_filepath(
366
- index_category=index_category,
367
- doc_type=doc_type
368
- )
369
- self.load_index()
370
- # Set master/doc_type index
371
- self.index_category_doc_type_wise_index[index_category][doc_type] = self.index
372
-
373
- logger.info(f'Master and doc_type indices for: {index_category} loaded successfully!')
374
-
375
-
376
- def load_create_index(
377
- self
378
- ):
379
- logger.info(f'Loading/Creating index for each index_category')
380
-
381
- for index_category in constants_utils.INDEX_CATEGORY:
382
- # Load master index_category index if self.load_from_existing_index_store == True
383
- if self.load_from_existing_index_store:
384
- self.load_master_doctype_indices_for_index_category(index_category)
385
-
386
- # For any reason, if master index is not loaded then create the new index/vector store
387
- if not self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE]:
388
- logger.info(f'Creating a new Vector/Index store for: {index_category}')
389
-
390
- doc_filepath = os.path.join(constants_utils.DATA_PATH, index_category)
391
- urls = []
392
-
393
- # Build the Vector/Index store
394
- for doc_type in list(constants_utils.DATA_SOURCES.values()):
395
- logger.info(f'Creating a new Vector/Index store for: {index_category} from data source: {doc_type}')
396
-
397
- index = None
398
- if doc_type in ['pdf', 'textfile']:
399
- index = self.create_store_index(
400
- doc_type=doc_type,
401
- doc_filepath=doc_filepath,
402
- index_category=index_category
403
- )
404
- else:
405
- # Build the Vector/Index store from web urls
406
- index = self.create_store_index(
407
- doc_type=doc_type,
408
- urls=urls,
409
- index_category=index_category
410
- )
411
-
412
- if index:
413
- self.index_category_doc_type_wise_index[index_category][doc_type] = index
414
-
415
- logger.info(f'New Vector/Index store for: {index_category} from data source: {doc_type} created successfully!')
416
-
417
- logger.info(f'New Vector/Index store for: {index_category} created successfully!')
418
-
419
- # Merge index of each doc_type into a single index_category
420
- self.merge_store_master_index(
421
- index_category=index_category
422
- )
423
-
424
- logger.info(f'Index for each index_category loaded successfully!')
425
-
426
-
427
- def create_store_index(
428
- self,
429
- doc_type='pdf',
430
- doc_filepath=constants_utils.DATA_PATH,
431
- urls=[],
432
- index_category=constants_utils.INDEX_CATEGORY[0]
433
- ):
434
- logger.info(f'Creating and storing {doc_type} index')
435
-
436
- self.documents = []
437
- self.index = None
438
-
439
- self.index_filepath = self.get_index_filepath(
440
- index_category=index_category,
441
- doc_type=doc_type
442
- )
443
-
444
- # Delete the old index file
445
- shutil.rmtree(self.index_filepath, ignore_errors=True)
446
- logger.info(f'{self.index_filepath} deleted.')
447
-
448
- # Load data in Documents format that can be consumed for index creation
449
- self.load_documents(
450
- doc_type,
451
- doc_filepath,
452
- urls
453
- )
454
-
455
- # Create the index from documents for search/retrieval
456
- self.index = self.create_index()
457
-
458
- # Store index
459
- self.store_index(
460
- index=self.index,
461
- index_filepath=self.index_filepath
462
- )
463
-
464
- logger.info(f'{doc_type} index created and stored successfully!')
465
- # Return the index of the given doc_type (this is an index for a single doc_type). Indices from multiple doc_types should be merged later on in the master index so that query could be made from a single index.
466
- return self.index
467
-
468
-
469
- def store_index(
470
- self,
471
- index,
472
- index_filepath
473
- ):
474
- if not index:
475
- logger.warning(f'Cannot write an empty index to: {index_filepath}!')
476
- return
477
-
478
- logger.info(f'Saving index to: {index_filepath}')
479
-
480
- if not os.path.exists(index_filepath) and os.path.isdir(index_filepath):
481
- os.makedirs(index_filepath)
482
-
483
- if self.index_type == 'FAISS':
484
- index.save_local(index_filepath)
485
-
486
- elif self.index_type == 'Chroma':
487
- index.persist()
488
-
489
- elif self.index_type == 'GPTVectorStoreIndex':
490
- index.save_to_disk(index_filepath)
491
-
492
- elif self.index_type == 'pickle':
493
- with open(index_filepath, "wb") as f:
494
- pickle.dump(index, f)
495
-
496
- logger.info(f'Index saved to: {index_filepath} successfully!')
497
-
498
-
499
- def load_index(
500
- self
501
- ):
502
- logger.info(f'Loading index from: {self.index_filepath}')
503
-
504
- if not os.path.exists(self.index_filepath):
505
- logger.warning(f"Cannot load index from {self.index_filepath} as the path doest not exist!")
506
- return
507
-
508
- if self.index_type == 'FAISS':
509
- self.index = FAISS.load_local(self.index_filepath, self.embeddings)
510
-
511
- elif self.index_type == 'Chroma':
512
- self.index = Chroma(
513
- persist_directory=self.index_filepath,
514
- embedding_function=self.embeddings
515
- )
516
-
517
- elif self.index_type == 'GPTVectorStoreIndex':
518
- self.index = GPTVectorStoreIndex.load_from_disk(self.index_filepath)
519
-
520
- elif self.index_type == 'pickle':
521
- with open(self.index_filepath, "rb") as f:
522
- self.index = pickle.load(f)
523
-
524
- logger.info(f'Index loaded from: {self.index_filepath} successfully!')
525
-
526
-
527
- def convert_text_to_documents(
528
- self,
529
- text_list=[]
530
- ):
531
- """
532
- Converts the list of text data to Documents format that can be feed to GPT API to build the Vector store
533
- """
534
-
535
- from llama_index import Document
536
- documents = [Document(t) for t in text_list]
537
- return documents
538
-
539
-
540
- def merge_documents_from_different_sources(
541
- self,
542
- doc_documents,
543
- url_documents
544
- ):
545
- # Build the Vector store for docs
546
- doc_index = GPTVectorStoreIndex.from_documents(doc_documents)
547
- # Build the Vector store for URLs
548
- url_index = GPTVectorStoreIndex.from_documents(url_documents)
549
-
550
- # Set summary of each index
551
- doc_index.set_text("index_from_docs")
552
- url_index.set_text("index_from_urls")
553
-
554
- # Merge index of different data sources
555
- index = GPTListIndex([doc_index, url_index])
556
-
557
- return index
558
-
559
-
560
- def merge_store_master_index(
561
- self,
562
- index_category
563
- ):
564
- """
565
- Merge multiple doc_type indices into a single master index. Query/search would be performed on this merged index.
566
-
567
- Args:
568
- index_category: index_category (can be any of: [crops, fruits, pest_management, govt_policy, soil, etc.])
569
- """
570
- logger.info('Merging doc_type indices of different index categories into a master index')
571
-
572
- self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE] = None
573
- doc_type_indices = self.index_category_doc_type_wise_index[index_category]
574
-
575
- if self.index_type == 'FAISS':
576
- for doc_type, index in doc_type_indices.items():
577
- if doc_type == constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE:
578
- # Only merge the non-master doc_type_indices
579
- continue
580
- if not index or not isinstance(index, FAISS):
581
- logger.warning(f'{doc_type} index to be merged is not an instance of type langchain.vectorstores.faiss.FAISS')
582
- continue
583
- if not self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE]:
584
- self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE] = index
585
- else:
586
- self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE].merge_from(index)
587
-
588
- elif self.index_type == 'Chroma':
589
- for doc_type, index in doc_type_indices.items():
590
- if not index or not isinstance(index, Chroma):
591
- logger.warning(f'{doc_type} index to be merged is not an instance of type langchain.vectorstores.Chroma')
592
- continue
593
- raise NotImplementedError
594
-
595
- elif self.index_type == 'GPTVectorStoreIndex':
596
- for doc_type, index in doc_type_indices.items():
597
- if not index or not isinstance(index, GPTVectorStoreIndex):
598
- logger.warning(f'{doc_type} index to be merged is not an instance of type llama_index.GPTVectorStoreIndex')
599
- continue
600
- import pdb; pdb.set_trace()
601
- raise NotImplementedError
602
-
603
- # Store index_category master index
604
- self.store_index(
605
- index=self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE],
606
- index_filepath=self.get_index_filepath(
607
- index_category=index_category,
608
- doc_type=constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE
609
- )
610
- )
611
-
612
- logger.info('doc_type indices of different index categories into a master index merged successfully!')
613
-
614
-
615
- def init_chromadb(self):
616
- logger.info('Initializing Chroma DB')
617
-
618
- if not os.path.exists(self.index_filepath):
619
- os.makedirs(self.index_filepath)
620
-
621
- client_settings = chromadb.config.Settings(
622
- chroma_db_impl="duckdb+parquet",
623
- persist_directory=self.index_filepath,
624
- anonymized_telemetry=False
625
- )
626
-
627
- self.index = Chroma(
628
- collection_name="langchain_store",
629
- embedding_function=self.embeddings,
630
- client_settings=client_settings,
631
- persist_directory=self.index_filepath,
632
- )
633
-
634
- logger.info('Chroma DB initialized successfully!')
635
-
636
-
637
- def query_chromadb(
638
- self,
639
- question,
640
- k=1
641
- ):
642
- return self.index.similarity_search(query=question, k=k)
643
-
644
-
645
- def query(self,
646
- question,
647
- question_category,
648
- mode=constants_utils.MODE,
649
- response_mode=constants_utils.RESPONSE_MODE,
650
- similarity_top_k=constants_utils.SIMILARITY_TOP_K,
651
- required_keywords=[],
652
- exclude_keywords=[],
653
- verbose=False
654
- ):
655
- '''
656
- Args:
657
- mode: can be any of [default, embedding]
658
- response_mode: can be any of [default, compact, tree_summarize]
659
- '''
660
- logger.info(f'question category: {question_category}; question: {question}')
661
-
662
- response = None
663
-
664
- # Get the index of the given question_category
665
- index = self.index_category_doc_type_wise_index[question_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE]
666
-
667
- if self.index_type == 'FAISS':
668
- response = index.similarity_search(
669
- question,
670
- k=similarity_top_k
671
- )
672
-
673
- elif self.index_type == 'Chroma':
674
- response = index.similarity_search(
675
- question,
676
- k=similarity_top_k
677
- )
678
-
679
- elif self.index_type == 'GPTVectorStoreIndex':
680
- # Querying the index
681
- response = index.query(
682
- question,
683
- mode=mode,
684
- response_mode=response_mode,
685
- similarity_top_k=similarity_top_k,
686
- required_keywords=required_keywords,
687
- exclude_keywords=exclude_keywords,
688
- verbose=verbose
689
- )
690
-
691
- return response
692
-
693
-
694
- def load_uploaded_documents(
695
- self,
696
- doc_type,
697
- files_or_urls
698
- ):
699
- logger.info(f'Loading uploaded documents from: {doc_type}')
700
-
701
- if doc_type == 'pdf':
702
- if not isinstance(files_or_urls, list):
703
- files_or_urls = [files_or_urls]
704
- for pdf in files_or_urls:
705
- if not pdf.name.endswith('.pdf'):
706
- logger.warning(f'Found a file other than .pdf format. Cannot load {pdf.name} file!')
707
- continue
708
- logger.info(f'Loading PDF from: {pdf.name}')
709
- # Load PDF as documents
710
- self.documents.extend(
711
- self.data_loader_utils_obj.load_documents_from_pdf(
712
- doc_filepath=pdf.name,
713
- doc_type=doc_type
714
- )
715
- )
716
-
717
- elif doc_type == 'textfile':
718
- if not isinstance(files_or_urls, list):
719
- files_or_urls = [files_or_urls]
720
- for text_file in files_or_urls:
721
- if not text_file.name.endswith('.txt'):
722
- logger.warning(f'Found a file other than .txt format. Cannot load {text_file.name} file!')
723
- continue
724
- logger.info(f'Loading textfile from: {text_file.name}')
725
- # Load textfile as documents
726
- self.documents.extend(
727
- self.data_loader_utils_obj.load_documents_from_text(
728
- doc_filepath=text_file.name,
729
- doc_type=doc_type
730
- )
731
- )
732
-
733
- elif doc_type == 'online_pdf':
734
- files_or_urls = self.utils_obj.split_text(files_or_urls)
735
- # Load online_pdfs as documents
736
- self.documents.extend(
737
- self.data_loader_utils_obj.load_documents_from_pdf(
738
- doc_type=doc_type,
739
- urls=files_or_urls
740
- )
741
- )
742
-
743
- elif doc_type == 'urls':
744
- files_or_urls = self.utils_obj.split_text(files_or_urls)
745
- # Load URLs as documents
746
- self.documents.extend(
747
- self.data_loader_utils_obj.load_documents_from_urls(
748
- doc_type=doc_type,
749
- urls=files_or_urls
750
- )
751
- )
752
-
753
- logger.info(f'Uploaded documents from: {doc_type} loaded successfully!')
754
-
755
-
756
- def upload_data(
757
- self,
758
- doc_type,
759
- files_or_urls,
760
- index_category
761
- ):
762
- logger.info(f'Uploading data for: {index_category}; from: {doc_type}')
763
-
764
- self.documents = []
765
- self.index = None
766
-
767
- # Create documents of the uploaded files
768
- self.load_uploaded_documents(
769
- doc_type,
770
- files_or_urls
771
- )
772
-
773
- # Create the index from documents for search/retrieval
774
- self.index = self.create_index()
775
-
776
- # Update the existing index with the newly data
777
- self.upsert_index(
778
- doc_type=doc_type,
779
- index_category=index_category
780
- )
781
-
782
- logger.info(f'{index_category}-{doc_type} data uploaded successfully!')
783
-
784
-
785
- def upsert_index(
786
- self,
787
- doc_type,
788
- index_category
789
- ):
790
- """
791
- Updates the index of the given index_category-doc_type, if present.
792
- Creates a new index if index_category-doc_type index is not present.
793
- Also updates the master index for the given index_category.
794
- """
795
- if not self.index:
796
- return
797
-
798
- logger.info(f'Upserting index for: {index_category}-{doc_type}')
799
-
800
- if not self.index_category_doc_type_wise_index.get(index_category, None):
801
- """
802
- If index_category index does not exists
803
- Steps:
804
- - set index_category index
805
- - set doc_type index
806
- - Store new index_category index as master
807
- - Store new doc_type index
808
- """
809
- logger.info(f'Master index does not exist for: {index_category}. A new {index_category} master index & {doc_type} index would be created.')
810
- self.index_category_doc_type_wise_index.setdefault(index_category, {})
811
- # Set a master index only if it doesn't exist. Else keep it's value as-it-is.
812
- self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE] = self.index
813
- # Set an index for the given doc_type only if it doesn't exist. Else keep it's value as-it-is.
814
- self.index_category_doc_type_wise_index[index_category][doc_type] = self.index
815
-
816
- elif not self.index_category_doc_type_wise_index[index_category].get(doc_type, None):
817
- """
818
- If doc_type index does not exists
819
- Steps:
820
- - set doc_type index
821
- - if master index does not exist for the index_category - set a master index
822
- - if master index exists - update the master index to merge it with doc_type index
823
- - Store new/updated index_category index as master
824
- - Store new doc_type index
825
- """
826
- logger.info(f'{doc_type} index does not exist for: {index_category}-{doc_type}. A new {doc_type} index would be created.')
827
- # create doc_type index
828
- self.index_category_doc_type_wise_index[index_category][doc_type] = self.index
829
- # if master index does not exist for the index_category - create a master index
830
- if not self.index_category_doc_type_wise_index[index_category].get(constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE, None):
831
- logger.info(f'Master index does not exist for: {index_category}-{doc_type}. A new master index would be created.')
832
- self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE] = self.index
833
-
834
- else:
835
- """
836
- If the new document is of the existing index_category & doc_type
837
- Steps:
838
- - if master index does not exist for the index_category - set a master index
839
- - if master index exists - update the master index to merge it with doc_type index
840
- - update the doc_type index
841
- - Store updated index_category index as master
842
- - Store updated doc_type index
843
- """
844
- # if master index does not exist for the index_category - create a master index
845
- if not self.index_category_doc_type_wise_index[index_category].get(constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE, None):
846
- logger.info(f'Master index does not exist for: {index_category}-{doc_type}. A new master index would be created.')
847
- self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE] = self.index
848
- # Merge new self.index with existing doc_type index
849
- self.index_category_doc_type_wise_index[index_category][doc_type].merge_from(self.index)
850
- # Update self.index to store/overwrite the existing index with the updated index
851
- self.index = self.index_category_doc_type_wise_index[index_category][doc_type]
852
-
853
-
854
- # Store newly created/merged index
855
- self.store_index(
856
- index=self.index,
857
- index_filepath=self.get_index_filepath(
858
- index_category=index_category,
859
- doc_type=doc_type
860
- )
861
- )
862
-
863
- # Merge and store master index for index_category
864
- self.merge_store_master_index(
865
- index_category=index_category
866
- )
867
-
868
- logger.info(f'Index for: {index_category}-{doc_type} upserted successful!')
869
-
870
-
871
- def delete_index(
872
- self,
873
- ids: Optional[List[str]] = None,
874
- # filter: Optional[DocumentMetadataFilter] = None,
875
- delete_all: Optional[bool] = None,
876
- ):
877
- """
878
- Removes vectors by ids, filter, or everything in the datastore.
879
- Multiple parameters can be used at once.
880
- Returns whether the operation was successful.
881
- """
882
- logger.info(f'Deleting index')
883
-
884
- raise NotImplementedError
885
-
886
- # NOTE: we can delete a specific collection
887
- self.index.delete_collection()
888
- self.index.persist()
889
-
890
- # Or just nuke the persist directory
891
- # !rm -rf self.index_filepath
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Clebersla/RVC_V2_Huggingface_Version/README.md DELETED
@@ -1,57 +0,0 @@
1
- ---
2
- title: RVC V2
3
- emoji: 💻
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.42.0
8
- app_file: app.py
9
- pinned: false
10
- license: lgpl-3.0
11
- ---
12
-
13
- ## 🔧 Pre-requisites
14
-
15
- Before running the project, you must have the following tool installed on your machine:
16
- * [Python v3.8.0](https://www.python.org/downloads/release/python-380/)
17
-
18
- Also, you will need to clone the repository:
19
-
20
- ```bash
21
- # Clone the repository
22
- git clone https://huggingface.co/spaces/mateuseap/magic-vocals/
23
- # Enter in the root directory
24
- cd magic-vocals
25
- ```
26
-
27
- ## 🚀 How to run
28
-
29
- After you've cloned the repository and entered in the root directory, run the following commands:
30
-
31
- ```bash
32
- # Create and activate a Virtual Environment (make sure you're using Python v3.8.0 to do it)
33
- python -m venv venv
34
- . venv/bin/activate
35
-
36
- # Change mode and execute a shell script to configure and run the application
37
- chmod +x run.sh
38
- ./run.sh
39
- ```
40
-
41
- After the shell script executes everything, the application will be running at http://127.0.0.1:7860! Open up the link in a browser to use the app:
42
-
43
- ![Magic Vocals](https://i.imgur.com/V55oKv8.png)
44
-
45
- **You only need to execute the `run.sh` one time**, once you've executed it one time, you just need to activate the virtual environment and run the command below to start the app again:
46
-
47
- ```bash
48
- python app.py
49
- ```
50
-
51
- **THE `run.sh` IS SUPPORTED BY THE FOLLOWING OPERATING SYSTEMS:**
52
-
53
-
54
- | OS | Supported |
55
- |-----------|:---------:|
56
- | `Windows` | ❌ |
57
- | `Ubuntu` | ✅ |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CuriousDolphin/MobileSAM/utils/tools_gradio.py DELETED
@@ -1,192 +0,0 @@
1
- import cv2
2
- import matplotlib.pyplot as plt
3
- import numpy as np
4
- import torch
5
- from PIL import Image
6
-
7
-
8
- def fast_process(
9
- annotations,
10
- image,
11
- device,
12
- scale,
13
- better_quality=False,
14
- mask_random_color=True,
15
- bbox=None,
16
- use_retina=True,
17
- withContours=True,
18
- ):
19
- if isinstance(annotations[0], dict):
20
- annotations = [annotation["segmentation"] for annotation in annotations]
21
-
22
- original_h = image.height
23
- original_w = image.width
24
- if better_quality:
25
- if isinstance(annotations[0], torch.Tensor):
26
- annotations = np.array(annotations.cpu())
27
- for i, mask in enumerate(annotations):
28
- mask = cv2.morphologyEx(
29
- mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)
30
- )
31
- annotations[i] = cv2.morphologyEx(
32
- mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8)
33
- )
34
- if device == "cpu":
35
- annotations = np.array(annotations)
36
- inner_mask = fast_show_mask(
37
- annotations,
38
- plt.gca(),
39
- random_color=mask_random_color,
40
- bbox=bbox,
41
- retinamask=use_retina,
42
- target_height=original_h,
43
- target_width=original_w,
44
- )
45
- else:
46
- if isinstance(annotations[0], np.ndarray):
47
- annotations = np.array(annotations)
48
- annotations = torch.from_numpy(annotations)
49
- inner_mask = fast_show_mask_gpu(
50
- annotations,
51
- plt.gca(),
52
- random_color=mask_random_color,
53
- bbox=bbox,
54
- retinamask=use_retina,
55
- target_height=original_h,
56
- target_width=original_w,
57
- )
58
- if isinstance(annotations, torch.Tensor):
59
- annotations = annotations.cpu().numpy()
60
-
61
- if withContours:
62
- contour_all = []
63
- temp = np.zeros((original_h, original_w, 1))
64
- for i, mask in enumerate(annotations):
65
- if type(mask) == dict:
66
- mask = mask["segmentation"]
67
- annotation = mask.astype(np.uint8)
68
- if use_retina == False:
69
- annotation = cv2.resize(
70
- annotation,
71
- (original_w, original_h),
72
- interpolation=cv2.INTER_NEAREST,
73
- )
74
- contours, _ = cv2.findContours(
75
- annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
76
- )
77
- for contour in contours:
78
- contour_all.append(contour)
79
- cv2.drawContours(temp, contour_all, -1, (255, 255, 255), 2 // scale)
80
- color = np.array([0 / 255, 0 / 255, 255 / 255, 0.9])
81
- contour_mask = temp / 255 * color.reshape(1, 1, -1)
82
-
83
- image = image.convert("RGBA")
84
- overlay_inner = Image.fromarray((inner_mask * 255).astype(np.uint8), "RGBA")
85
- image.paste(overlay_inner, (0, 0), overlay_inner)
86
-
87
- if withContours:
88
- overlay_contour = Image.fromarray((contour_mask * 255).astype(np.uint8), "RGBA")
89
- image.paste(overlay_contour, (0, 0), overlay_contour)
90
-
91
- return image
92
-
93
-
94
- # CPU post process
95
- def fast_show_mask(
96
- annotation,
97
- ax,
98
- random_color=False,
99
- bbox=None,
100
- retinamask=True,
101
- target_height=960,
102
- target_width=960,
103
- ):
104
- mask_sum = annotation.shape[0]
105
- height = annotation.shape[1]
106
- weight = annotation.shape[2]
107
- # 将annotation 按照面积 排序
108
- areas = np.sum(annotation, axis=(1, 2))
109
- sorted_indices = np.argsort(areas)[::1]
110
- annotation = annotation[sorted_indices]
111
-
112
- index = (annotation != 0).argmax(axis=0)
113
- if random_color == True:
114
- color = np.random.random((mask_sum, 1, 1, 3))
115
- else:
116
- color = np.ones((mask_sum, 1, 1, 3)) * np.array(
117
- [30 / 255, 144 / 255, 255 / 255]
118
- )
119
- transparency = np.ones((mask_sum, 1, 1, 1)) * 0.6
120
- visual = np.concatenate([color, transparency], axis=-1)
121
- mask_image = np.expand_dims(annotation, -1) * visual
122
-
123
- mask = np.zeros((height, weight, 4))
124
-
125
- h_indices, w_indices = np.meshgrid(
126
- np.arange(height), np.arange(weight), indexing="ij"
127
- )
128
- indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
129
-
130
- mask[h_indices, w_indices, :] = mask_image[indices]
131
- if bbox is not None:
132
- x1, y1, x2, y2 = bbox
133
- ax.add_patch(
134
- plt.Rectangle(
135
- (x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1
136
- )
137
- )
138
-
139
- if retinamask == False:
140
- mask = cv2.resize(
141
- mask, (target_width, target_height), interpolation=cv2.INTER_NEAREST
142
- )
143
-
144
- return mask
145
-
146
-
147
- def fast_show_mask_gpu(
148
- annotation,
149
- ax,
150
- random_color=False,
151
- bbox=None,
152
- retinamask=True,
153
- target_height=960,
154
- target_width=960,
155
- ):
156
- device = annotation.device
157
- mask_sum = annotation.shape[0]
158
- height = annotation.shape[1]
159
- weight = annotation.shape[2]
160
- areas = torch.sum(annotation, dim=(1, 2))
161
- sorted_indices = torch.argsort(areas, descending=False)
162
- annotation = annotation[sorted_indices]
163
- # 找每个位置第一个非零值下标
164
- index = (annotation != 0).to(torch.long).argmax(dim=0)
165
- if random_color == True:
166
- color = torch.rand((mask_sum, 1, 1, 3)).to(device)
167
- else:
168
- color = torch.ones((mask_sum, 1, 1, 3)).to(device) * torch.tensor(
169
- [30 / 255, 144 / 255, 255 / 255]
170
- ).to(device)
171
- transparency = torch.ones((mask_sum, 1, 1, 1)).to(device) * 0.6
172
- visual = torch.cat([color, transparency], dim=-1)
173
- mask_image = torch.unsqueeze(annotation, -1) * visual
174
- # 按index取数,index指每个位置选哪个batch的数,把mask_image转成一个batch的形式
175
- mask = torch.zeros((height, weight, 4)).to(device)
176
- h_indices, w_indices = torch.meshgrid(torch.arange(height), torch.arange(weight))
177
- indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
178
- # 使用向量化索引更新show的值
179
- mask[h_indices, w_indices, :] = mask_image[indices]
180
- mask_cpu = mask.cpu().numpy()
181
- if bbox is not None:
182
- x1, y1, x2, y2 = bbox
183
- ax.add_patch(
184
- plt.Rectangle(
185
- (x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1
186
- )
187
- )
188
- if retinamask == False:
189
- mask_cpu = cv2.resize(
190
- mask_cpu, (target_width, target_height), interpolation=cv2.INTER_NEAREST
191
- )
192
- return mask_cpu
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cvandi/remake/scripts/generate_meta_info.py DELETED
@@ -1,58 +0,0 @@
1
- import argparse
2
- import cv2
3
- import glob
4
- import os
5
-
6
-
7
- def main(args):
8
- txt_file = open(args.meta_info, 'w')
9
- for folder, root in zip(args.input, args.root):
10
- img_paths = sorted(glob.glob(os.path.join(folder, '*')))
11
- for img_path in img_paths:
12
- status = True
13
- if args.check:
14
- # read the image once for check, as some images may have errors
15
- try:
16
- img = cv2.imread(img_path)
17
- except (IOError, OSError) as error:
18
- print(f'Read {img_path} error: {error}')
19
- status = False
20
- if img is None:
21
- status = False
22
- print(f'Img is None: {img_path}')
23
- if status:
24
- # get the relative path
25
- img_name = os.path.relpath(img_path, root)
26
- print(img_name)
27
- txt_file.write(f'{img_name}\n')
28
-
29
-
30
- if __name__ == '__main__':
31
- """Generate meta info (txt file) for only Ground-Truth images.
32
-
33
- It can also generate meta info from several folders into one txt file.
34
- """
35
- parser = argparse.ArgumentParser()
36
- parser.add_argument(
37
- '--input',
38
- nargs='+',
39
- default=['datasets/DF2K/DF2K_HR', 'datasets/DF2K/DF2K_multiscale'],
40
- help='Input folder, can be a list')
41
- parser.add_argument(
42
- '--root',
43
- nargs='+',
44
- default=['datasets/DF2K', 'datasets/DF2K'],
45
- help='Folder root, should have the length as input folders')
46
- parser.add_argument(
47
- '--meta_info',
48
- type=str,
49
- default='datasets/DF2K/meta_info/meta_info_DF2Kmultiscale.txt',
50
- help='txt path for meta info')
51
- parser.add_argument('--check', action='store_true', help='Read image to check whether it is ok')
52
- args = parser.parse_args()
53
-
54
- assert len(args.input) == len(args.root), ('Input folder and folder root should have the same length, but got '
55
- f'{len(args.input)} and {len(args.root)}.')
56
- os.makedirs(os.path.dirname(args.meta_info), exist_ok=True)
57
-
58
- main(args)