parquet-converter commited on
Commit
ce24e1f
·
1 Parent(s): a4657c0

Update parquet files (step 49 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/AutoCAD OEM 2017 Crack Universal Product Key Free !!EXCLUSIVE!!.md +0 -128
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cimatron E11 Download Crack Software.md +0 -66
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Ontrack Disk Manager 10.46 ISO and Overcome BIOS Limitations on Your Hard Drive.md +0 -178
  4. spaces/1gistliPinn/ChatGPT4/Examples/DoneEx XCell Compiler 1.8.1 NEW.rar Utorrent.md +0 -6
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Cookie Run Kingdom Now and Meet the Cutest Cookies Ever.md +0 -123
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Getting Over It for Free and Experience the Ultimate Challenge.md +0 -108
  7. spaces/1phancelerku/anime-remove-background/Explore Hunt and Collect Dinosaurs in Dinosaur Hunter 3D.md +0 -115
  8. spaces/2ndelement/voicevox/test/test_word_types.py +0 -9
  9. spaces/AIFILMS/StyleGANEX/models/mtcnn/mtcnn_pytorch/src/__init__.py +0 -2
  10. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb8-150e_deepfashion2_short_sleeved_outwear_256x192/__init__.py +0 -0
  11. spaces/AUBADA-ALARABI/poetry20233/app.py +0 -53
  12. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Ails.py +0 -106
  13. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/press/Factory.d.ts +0 -7
  14. spaces/AlStable/AlPrompt/README.md +0 -12
  15. spaces/Alpaca233/SadTalker/src/face3d/util/load_mats.py +0 -120
  16. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/test_examples.py +0 -1422
  17. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/README.md +0 -3
  18. spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/trident_faster_rcnn.py +0 -66
  19. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/css/chat_style-cai-chat.css +0 -59
  20. spaces/Anustup/NS_AI_LABS/tests/vad_test.py +0 -66
  21. spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/stable_diffusion_video/image_generation.py +0 -363
  22. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/tests/isatty_test.py +0 -57
  23. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/dist.py +0 -1286
  24. spaces/Awesimo/jojogan/op/fused_bias_act.cpp +0 -21
  25. spaces/Banbri/zcvzcv/src/components/ui/alert.tsx +0 -59
  26. spaces/Benson/text-generation/Examples/Android Mini Block Craft Apk.md +0 -90
  27. spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/docstring.py +0 -77
  28. spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/exceptions.py +0 -126
  29. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/requests/structures.py +0 -99
  30. spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/request.py +0 -170
  31. spaces/CVH-vn1210/make_hair/minigpt4/common/logger.py +0 -195
  32. spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/tools/process.py +0 -18
  33. spaces/CVPR/GFPGAN-example/gfpgan/utils.py +0 -130
  34. spaces/CVPR/LIVE/thrust/README.md +0 -161
  35. spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators/operator_adaptors.h +0 -137
  36. spaces/CVPR/LIVE/thrust/thrust/reverse.h +0 -215
  37. spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/per_device_resource.h +0 -22
  38. spaces/CVPR/LIVE/thrust/thrust/system/system_error.h +0 -179
  39. spaces/CVPR/unicl-zero-shot-img-recog/app.py +0 -163
  40. spaces/Chintan-Donda/KKMS-KSSW-HF/src/web_crawler.py +0 -58
  41. spaces/CikeyQI/meme-api/meme_generator/manager.py +0 -104
  42. spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/longcode/jpge.cpp +0 -1049
  43. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/decorators.py +0 -565
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/memory.py +0 -293
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/__init__.py +0 -99
  46. spaces/DorisB/streamlit-app/pages/02_Recommendation_system.py +0 -433
  47. spaces/DragGan/DragGan-Inversion/stylegan_human/torch_utils/persistence.py +0 -262
  48. spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/include/kalmanFilter.h +0 -31
  49. spaces/ECCV2022/bytetrack/tutorials/ctracker/generate_half_csv.py +0 -37
  50. spaces/Eddevs/brian-challenge/app.py +0 -98
spaces/1acneusushi/gradio-2dmoleculeeditor/data/AutoCAD OEM 2017 Crack Universal Product Key Free !!EXCLUSIVE!!.md DELETED
@@ -1,128 +0,0 @@
1
-
2
- <h1>AutoCAD OEM 2017 Crack Universal Product Key Free</h1>
3
- <p>If you are looking for a powerful, customizable, and scalable CAD platform that can help you create, edit, and share your designs, then you might be interested in AutoCAD OEM 2017. This is a software development toolkit that allows you to build your own branded applications based on the core functionality of AutoCAD, the world's leading CAD software. However, to use AutoCAD OEM 2017, you need a valid product key that can activate the software. And since this is not a cheap product, you might be tempted to look for a crack or a universal product key that can bypass the activation process. In this article, we will show you how to download, install, crack, and activate AutoCAD OEM 2017 for free using X-force 2017, a keygen tool that can generate product keys for all Autodesk products.</p>
4
- <h2>Features of AutoCAD OEM 2017</h2>
5
- <p>AutoCAD OEM 2017 is a software development toolkit that allows you to create your own branded applications based on the core functionality of AutoCAD. With AutoCAD OEM 2017, you can:</p>
6
- <h2>AutoCAD OEM 2017 Crack Universal Product Key Free</h2><br /><p><b><b>Download</b> &#10038;&#10038;&#10038; <a href="https://byltly.com/2uKw6r">https://byltly.com/2uKw6r</a></b></p><br /><br />
7
- <ul>
8
- <li><b>Customize and scale your CAD platform</b>: You can tailor your application to your specific needs and preferences, such as adding or removing features, changing the user interface, modifying commands and menus, creating custom objects and entities, etc. You can also scale your application to support different platforms, devices, languages, and markets.</li>
9
- <li><b>Use powerful drawing and editing tools</b>: You can access the same drawing and editing tools that are available in AutoCAD, such as lines, arcs, circles, polylines, hatches, blocks, dimensions, text, etc. You can also use advanced tools such as parametric constraints, dynamic blocks, associative arrays, etc.</li>
10
- <li><b>Support various file formats and standards</b>: You can read and write DWG files, the native file format of AutoCAD, as well as other common file formats such as DXF, DWF, PDF, etc. You can also comply with industry standards such as ISO, ANSI, DIN, etc.</li>
11
- <li><b>Integrate with other Autodesk products and cloud services</b>: You can leverage the power of other Autodesk products and cloud services that are compatible with AutoCAD OEM 2017, such as Inventor, Revit, Fusion 360, BIM 360, etc. You can also use Autodesk APIs and SDKs to extend the functionality of your application.</li>
12
- </ul>
13
- <h2>How to download and install AutoCAD OEM 2017</h2>
14
- <p>To download and install AutoCAD OEM 2017 on your computer, you need to follow these steps:</p>
15
- <ol>
16
- <li><b>Check the system requirements and compatibility</b>: Before you download AutoCAD OEM 2017, make sure that your computer meets the minimum system requirements for running the software. You can find the system requirements here. You also need to check if your operating system is compatible with AutoCAD OEM 2017. The software supports Windows 10 (64-bit), Windows 8.1 (64-bit), Windows 8 (64-bit), Windows 7 SP1 (64-bit), Windows Server 2016 (64-bit), Windows Server R2 (64-bit), Windows Server R2 SP1 (64-bit), Windows Server R2 SP2 (64-bit), Windows Server R2 SP3 (64-bit), Windows Server R2 SP4 (64-bit), Windows Server R2 SP5 (64-bit), Windows Server R2 SP6 (64-bit), Windows Server R2 SP7 (64-bit), Windows Server R2 SP8 (64-bit), Windows Server R2 SP9 (64-bit), Windows Server R2 SP10 (64-bit).</li>
17
- <h2>How to crack and activate AutoCAD OEM 2017</h2>
18
- <p>To crack and activate AutoCAD OEM 2017, you need to use a tool called X-force 2017, which is a keygen that can generate product keys for all Autodesk products. Here is how to use X-force 2017 to crack and activate AutoCAD OEM 2017:</p>
19
- <p>How to activate AutoCAD OEM 2017 with crack and keygen<br />
20
- AutoCAD OEM 2017 license code generator download free<br />
21
- Crack AutoCAD OEM 2017 for lifetime activation without product key<br />
22
- AutoCAD OEM 2017 serial number and activation code free download<br />
23
- AutoCAD OEM 2017 full version cracked software free download<br />
24
- Download AutoCAD OEM 2017 crack patch keygen torrent<br />
25
- AutoCAD OEM 2017 offline installer with crack and product key<br />
26
- AutoCAD OEM 2017 registration code and license key free download<br />
27
- AutoCAD OEM 2017 crack only download no survey<br />
28
- AutoCAD OEM 2017 activation key and crack free download<br />
29
- AutoCAD OEM 2017 crack file download for windows 10<br />
30
- AutoCAD OEM 2017 product key generator online free<br />
31
- AutoCAD OEM 2017 crack and keygen download for mac<br />
32
- AutoCAD OEM 2017 license key and crack free download<br />
33
- AutoCAD OEM 2017 crack software download for pc<br />
34
- AutoCAD OEM 2017 keygen and crack free download<br />
35
- AutoCAD OEM 2017 activation code and product key free download<br />
36
- AutoCAD OEM 2017 crack download for windows 7<br />
37
- AutoCAD OEM 2017 product key and crack free download<br />
38
- AutoCAD OEM 2017 serial key and crack free download<br />
39
- AutoCAD OEM 2017 crack tool download for windows 8<br />
40
- AutoCAD OEM 2017 license code and product key free download<br />
41
- AutoCAD OEM 2017 full crack download for windows xp<br />
42
- AutoCAD OEM 2017 activation key generator online free<br />
43
- AutoCAD OEM 2017 crack and serial number free download<br />
44
- AutoCAD OEM 2017 product key finder online free<br />
45
- AutoCAD OEM 2017 crack and patch download for linux<br />
46
- AutoCAD OEM 2017 license key finder online free<br />
47
- AutoCAD OEM 2017 full version with crack and product key<br />
48
- AutoCAD OEM 2017 serial number generator online free<br />
49
- AutoCAD OEM 2017 crack and license code free download<br />
50
- AutoCAD OEM 2017 product key checker online free<br />
51
- AutoCAD OEM 2017 crack and activation key free download<br />
52
- AutoCAD OEM 2017 license code checker online free<br />
53
- AutoCAD OEM 2017 full version with crack and serial number<br />
54
- AutoCAD OEM 2017 serial number checker online free<br />
55
- AutoCAD OEM 2017 crack and registration code free download<br />
56
- AutoCAD OEM 2017 license code finder online free<br />
57
- AutoCAD OEM 2017 full version with crack and license key<br />
58
- AutoCAD OEM 2017 serial number finder online free<br />
59
- AutoCAD OEM 2017 crack and license key free download<br />
60
- AutoCAD OEM 2017 product key generator offline free<br />
61
- AutoCAD OEM 2017 full version with crack and activation code<br />
62
- AutoCAD OEM 2017 activation code generator offline free<br />
63
- AutoCAD OEM 2017 crack and serial key free download<br />
64
- AutoCAD OEM 2017 license code generator offline free<br />
65
- AutoCAD OEM 2017 full version with crack and registration code<br />
66
- AutoCAD OEM 2017 registration code generator offline free<br />
67
- AutoCAD OEM 2017 crack and product key free download</p>
68
- <ol>
69
- <li><b>What is X-force 2017 and how does it work?</b>: X-force 2017 is a software that can generate product keys for all Autodesk products, including AutoCAD OEM 2017. It works by creating a code that matches the specific product and version that you want to activate. The code is then entered into the activation window of the software, and the software is activated.</li>
70
- <li><b>How to use X-force 2017 to generate a product key</b>: To use X-force 2017 to generate a product key for AutoCAD OEM 2017, you need to follow these steps: <ul>
71
- <li>Download X-force 2017 from one of these links . Make sure you download the correct version for your operating system (32-bit or 64-bit).</li>
72
- <li>Extract the downloaded file and run X-force 2017 as administrator.</li>
73
- <li>Select "AutoCAD OEM 2017" from the drop-down list and click on "Generate".</li>
74
- <li>Copy the generated product key and paste it into the activation window of AutoCAD OEM 2017.</li>
75
- </ul>
76
- </li>
77
- <li><b>How to enter the product key and activate AutoCAD OEM 2017</b>: To enter the product key and activate AutoCAD OEM 2017, you need to follow these steps: <ul>
78
- <li>Open AutoCAD OEM 2017 and click on "Enter a Serial Number".</li>
79
- <li>Select "I have an activation code from Autodesk" and click on "Next".</li>
80
- <li>Paste the product key that you generated with X-force 2017 into the "Product Key" field.</li>
81
- <li>Click on "Next" and then on "Finish".</li>
82
- <li>Enjoy your activated AutoCAD OEM 2017.</li>
83
- </ul>
84
- </li>
85
- </ol>
86
- <h2>Benefits of using AutoCAD OEM 2017 crack and product key</h2>
87
- <p>By using AutoCAD OEM 2017 crack and product key, you can enjoy several benefits, such as:</p>
88
- <ul>
89
- <li><b>Access to full features and updates</b>: You can use all the features and functions of AutoCAD OEM 2017 without any limitations or restrictions. You can also get access to the latest updates and patches that can improve the performance and stability of the software.</li>
90
- <li><b>Save money and time</b>: You can save money by not having to buy a license or subscription for AutoCAD OEM 2017. You can also save time by not having to go through a complicated registration or activation process.</li>
91
- <li><b>Avoid malware and viruses</b>: You can avoid malware and viruses that might come with other cracks or hacks that claim to activate AutoCAD OEM 2017. X-force 2017 is a safe and reliable tool that has been tested and verified by many users.</li>
92
- </ul>
93
- <h2>Conclusion</h2>
94
- <p>In conclusion, AutoCAD OEM 2017 is a powerful, customizable, and scalable CAD platform that allows you to create your own branded applications based on the core functionality of AutoCAD. However, to use AutoCAD OEM 2017, you need a valid product key that can activate the software. And since this is not a cheap product, you might be tempted to look for a crack or a universal product key that can bypass the activation process. In this article, we showed you how to download, install, crack, and activate AutoCAD OEM 2017 for free using X-force 2017, a keygen tool that can generate product keys for all Autodesk products. By using this method, you can enjoy several benefits, such as access to full features and updates, saving money and time, and avoiding malware and viruses. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below.</p>
95
- <h3>Frequently Asked Questions</h3>
96
- <ol>
97
- <li><b>What is AutoCAD OEM?</b>: AutoCAD OEM is a software development toolkit that allows you to create your own branded applications based on the core functionality of AutoCAD.</li>
98
- <li><b>What is X-force 2017?</b>: X-force 2017 is a software that can generate product keys for all Autodesk products, including AutoCAD OEM 2017.</li>
99
- <li><b>How do I download AutoCAD OEM 2017?</b>: You can download AutoCAD OEM 2017 from the official Autodesk website or from one of these links . You will need to sign in with your Autodesk account or create one if you don't have one.</li>
100
- <li><b>How do I install AutoCAD OEM 2017?</b>: You can install AutoCAD OEM 2017 by following these steps: <ul>
101
- <li>Check the system requirements and compatibility.</li>
102
- <li>Download AutoCAD OEM 2017 from one of these links .</li>
103
- <li>Extract the downloaded file and run setup.exe as administrator.</li>
104
- <li>Follow the installation wizard and accept the license agreement.</li>
105
- <li>Choose your installation type and options.</li>
106
- <li>Click on "Install" and wait for the installation to complete.</li>
107
- <li>Click on "Finish" and restart your computer.</li>
108
- </ul>
109
- </li>
110
- <li><b>How do I crack and activate AutoCAD OEM 2017?</b>: You can crack and activate AutoCAD OEM 2017 by using X-force 2017, a keygen tool that can generate product keys for all Autodesk products. You can follow these steps: <ul>
111
- <li>Download X-force 2017 from one of these links .</li>
112
- <li>Extract the downloaded file and run X-force 2017 as administrator.</li>
113
- <li>Select "AutoCAD OEM 2017" from the drop-down list and click on "Generate".</li>
114
- <li>Copy the generated product key and paste it into the activation window of AutoCAD OEM 2017.</li>
115
- <li>Click on "Next" and then on "Finish".</li>
116
- <li>Enjoy your activated AutoCAD OEM 2017.</li>
117
- </ul>
118
- </li>
119
- <li><b>What are the benefits of using AutoCAD OEM 2017 crack and product key?</b>: By using AutoCAD OEM 2017 crack and product key, you can enjoy several benefits, such as: <ul>
120
- <li>Access to full features and updates.</li>
121
- <li>Save money and time.</li>
122
- <li>Avoid malware and viruses.</li>
123
- </ul>
124
- </li>
125
- </ol>
126
- </p> 0a6ba089eb<br />
127
- <br />
128
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cimatron E11 Download Crack Software.md DELETED
@@ -1,66 +0,0 @@
1
-
2
- <h1>Cimatron E11 Download Crack Software</h1>
3
- <p>If you are looking for a powerful CAD/CAM software for tooling and manufacturing, you might have heard of Cimatron E11. This software is designed to help you create high-quality tools of any complexity or size, as well as optimize your CNC machining processes. However, you might also be wondering how to download Cimatron E11 crack software for free, without paying for a license or subscription. In this article, we will explain what Cimatron E11 is, why people want to download crack software, what are the risks of doing so, how to download it safely, and what are the alternatives to downloading crack software.</p>
4
- <h2>What is Cimatron E11?</h2>
5
- <p>Cimatron E11 is a CAD/CAM software that provides an end-to-end solution for designing and manufacturing tools, including molds, dies, electrodes, plates, and discrete parts. It also offers a full range of CNC technologies, from simple 2.5-axis milling and drilling to complex 5-axis machining.</p>
6
- <h2>Cimatron E11 Download Crack Software</h2><br /><p><b><b>Download</b> &#9658;&#9658;&#9658; <a href="https://byltly.com/2uKvV5">https://byltly.com/2uKvV5</a></b></p><br /><br />
7
- <p>Some of the features and benefits of Cimatron E11 are:</p>
8
- <ul>
9
- <li>It has a single, integrated, dedicated solution for tooling that boosts productivity and quality.</li>
10
- <li>It can handle any geometry, from solids and surfaces to meshes and STL files.</li>
11
- <li>It can machine any part, from simple prismatic shapes to intricate freeform surfaces.</li>
12
- <li>It has local training and support from tooling experts.</li>
13
- <li>It can shorten tool delivery time by up to 70 percent.</li>
14
- <li>It can easily handle engineering changes and updates.</li>
15
- </ul>
16
- <h2>Why do people want to download crack software?</h2>
17
- <p>Crack software is any software that has been modified or hacked to bypass its original security features, such as activation codes, license keys, or digital rights management. People who download crack software usually do so for one or more of the following reasons:</p>
18
- <ul>
19
- <li>To save money and avoid paying for licensing fees or subscriptions.</li>
20
- <li>To access premium features and updates that are otherwise restricted or unavailable.</li>
21
- <li>To bypass geo-re - To bypass geo-restrictions and censorship that may limit their access to certain software or content.</li>
22
- </ul>
23
- <p>However, downloading crack software is not only illegal, but also risky and unethical. Here are some of the dangers of downloading crack software.</p>
24
- <h2>What are the risks of downloading crack software?</h2>
25
- <p>Downloading crack software may seem like a good idea at first, but it can have serious consequences for you and your computer. Some of the risks of downloading crack software are:</p>
26
- <ul>
27
- <li>Legal issues and penalties. Downloading crack software is a form of piracy, which is a violation of intellectual property rights. Depending on the laws of your country, you may face fines, lawsuits, or even jail time for using crack software.</li>
28
- <li>Malware and viruses. Crack software often contains malicious code that can infect your computer with malware, viruses, spyware, ransomware, or other threats. These can damage your files, steal your data, compromise your security, or even take over your system.</li>
29
- <li>Poor performance and compatibility. Crack software may not work properly or at all on your computer, as it may be outdated, corrupted, or incompatible with your hardware or software. It may also cause errors, crashes, freezes, or slowdowns on your system.</li>
30
- <li>Lack of support and warranty. Crack software does not come with any technical support or warranty from the original developer or vendor. If you encounter any problems or issues with the software, you will not be able to get any help or assistance. You will also lose any rights or benefits that come with a legitimate license.</li>
31
- </ul>
32
- <h2>How to download Cimatron E11 crack software safely?</h2>
33
- <p>If you still want to download Cimatron E11 crack software despite the risks, you should at least take some precautions to protect yourself and your computer. Here are some tips on how to download Cimatron E11 crack software safely:</p>
34
- <p></p>
35
- <ul>
36
- <li>Use a reputable torrent site. Torrent sites are platforms where users can share and download files using peer-to-peer technology. However, not all torrent sites are trustworthy or reliable. Some may contain fake, infected, or low-quality files. To avoid these, you should use a reputable torrent site that has positive reviews, ratings, comments, and feedback from other users.</li>
37
- <li>Use a VPN to protect your privacy and security. A VPN (virtual private network) is a service that encrypts your internet traffic and hides your IP address and location from prying eyes. This can help you avoid being tracked, monitored, or blocked by your ISP (internet service provider), government, or hackers when downloading crack software. A VPN can also help you access geo-restricted or censored content.</li>
38
- <li>Scan the downloaded file with an antivirus program. Before you open or install the downloaded file, you should scan it with an antivirus program to check for any malware or viruses. You should also update your antivirus program regularly to keep it effective against new threats.</li>
39
- <li>Backup your data and system before installing. Installing crack software can cause irreversible damage to your data and system. To prevent losing your important files or settings, you should backup your data and system before installing the crack software. You can use an external hard drive, a cloud service, or a recovery tool to backup your data and system.</li>
40
- </ul>
41
- <h2>What are the alternatives to downloading crack software?</h2>
42
- <p>Downloading crack software is not worth the risk and hassle. There are better and safer ways to get CAD/CAM software without breaking the law or compromising your computer. Some of the alternatives to downloading crack software are:</p>
43
- <ul>
44
- <li>Use a free or open source CAD/CAM software. There are many free or open source CAD/CAM software that you can use for tooling and manufacturing without paying anything. Some examples are FreeCAD, LibreCAD, OpenSCAD, Blender, and G-Code. These software may not have all the features and functions of Cimatron E11, but they can still help you create and machine 2D and 3D models.</li>
45
- <li>Use a trial or demo version of Cimatron E11. If you want to try Cimatron E11 before buying it, you can use a trial or demo version of the software that is available on the official website of Cimatron. The trial or demo version will let you use some of the features and functions of Cimatron E11 for a limited time or with some limitations.</li>
46
- <li>Buy a legitimate license of Cimatron E11 from an authorized dealer. The best and safest way to get Cimatron E11 is - Buy a legitimate license of Cimatron E11 from an authorized dealer. The best and safest way to get Cimatron E11 is to buy a legitimate license of the software from an authorized dealer. This way, you will get the full version of the software with all the features and updates, as well as technical support and warranty. You will also avoid any legal issues or penalties for using crack software. The price of Cimatron E11 may vary depending on the type and number of licenses, as well as the region and currency. You can contact a local dealer for a quote.</li>
47
- </ul>
48
- <h2>Conclusion</h2>
49
- <p>Cimatron E11 is a CAD/CAM software that provides an end-to-end solution for designing and manufacturing tools, including molds, dies, electrodes, plates, and discrete parts. It also offers a full range of CNC technologies, from simple 2.5-axis milling and drilling to complex 5-axis machining. However, downloading Cimatron E11 crack software is not a good idea, as it can expose you to legal issues, malware, poor performance, and lack of support. Instead, you should consider using a free or open source CAD/CAM software, a trial or demo version of Cimatron E11, or buying a legitimate license of Cimatron E11 from an authorized dealer. By doing so, you will be able to enjoy the benefits of Cimatron E11 without risking your computer or breaking the law.</p>
50
- <p>If you are interested in learning more about Cimatron E11 or finding a dealer near you, you can visit the official website of Cimatron at <a href="">https://www.cimatron.com/</a>. You can also access online tutorials, videos, manuals, and forums on the website to help you get started with Cimatron E11.</p>
51
- <h2>FAQs</h2>
52
- <p>Here are some frequently asked questions about Cimatron E11 and crack software:</p>
53
- <ol>
54
- <li><b>What is the difference between CAD and CAM software?</b></li>
55
- <p>CAD software is used to design 2D and 3D models, while CAM software is used to program CNC machines to make the models.</p>
56
- <li><b>How much does Cimatron E11 cost?</b></li>
57
- <p>The price of Cimatron E11 depends on the type and number of licenses, as well as the region and currency. You can contact a local dealer for a quote.</p>
58
- <li><b>Is Cimatron E11 compatible with Windows 10?</b></li>
59
- <p>Yes, Cimatron E11 is compatible with Windows 10, as well as Windows 7 and Windows 8.1.</p>
60
- <li><b>How can I learn how to use Cimatron E11?</b></li>
61
- <p>You can access online tutorials, videos, manuals, and forums on the official website of Cimatron. You can also enroll in training courses offered by Cimatron or its partners.</p>
62
- <li><b>Where can I get support for Cimatron E11?</b></li>
63
- <p>You can contact the technical support team of Cimatron by phone, email, or online chat. You can also visit the support portal for FAQs, downloads, updates, and tips.</p>
64
- </ol></p> b2dd77e56b<br />
65
- <br />
66
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Ontrack Disk Manager 10.46 ISO and Overcome BIOS Limitations on Your Hard Drive.md DELETED
@@ -1,178 +0,0 @@
1
-
2
- <h1>Download Ontrack Disk Manager 10.46 ISO</h1>
3
- <p>If you are looking for a reliable and easy-to-use tool to manage your hard drives, you might want to check out Ontrack Disk Manager 10.46 ISO. This is a powerful software that can help you partition, format, backup, restore, repair, and erase your hard drives in a few simple steps. In this article, we will show you what Ontrack Disk Manager is, how to download it, and how to use it effectively.</p>
4
- <h2>Download ontrack disk manager 10.46 iso</h2><br /><p><b><b>Download File</b> &#10002; <a href="https://byltly.com/2uKyIa">https://byltly.com/2uKyIa</a></b></p><br /><br />
5
- <h2>What is Ontrack Disk Manager?</h2>
6
- <p>Ontrack Disk Manager is a software that was developed by Ontrack Data Recovery, a company that specializes in data recovery and disk management solutions. Ontrack Disk Manager is designed to help users create, delete, resize, and format partitions on their hard drives, as well as perform various maintenance tasks such as backup, restore, repair, and erase.</p>
7
- <p>Ontrack Disk Manager can work with different types of hard drives, such as IDE, SATA, SCSI, USB, and FireWire. It can also support various file systems, such as FAT16, FAT32, NTFS, EXT2, EXT3, and EXT4. It can handle hard drives up to 4 TB in size.</p>
8
- <h3>Features of Ontrack Disk Manager</h3>
9
- <p>Some of the main features of Ontrack Disk Manager are:</p>
10
- <ul>
11
- <li>It can create up to four primary partitions and unlimited logical partitions on a hard drive.</li>
12
- <li>It can format partitions with different file systems and cluster sizes.</li>
13
- <li>It can copy partitions from one hard drive to another.</li>
14
- <li>It can backup and restore partitions or entire hard drives.</li>
15
- <li>It can repair damaged or corrupted partitions or hard drives.</li>
16
- <li>It can erase partitions or entire hard drives securely.</li>
17
- <li>It can hide or unhide partitions.</li>
18
- <li>It can change the drive letter or label of partitions.</li>
19
- <li>It can check the disk surface for errors and bad sectors.</li>
20
- <li>It can defragment partitions or entire hard drives.</li>
21
- </ul>
22
- <h3>Benefits of Ontrack Disk Manager</h3>
23
- <p>Some of the benefits of using Ontrack Disk Manager are:</p>
24
- <p>How to download ontrack disk manager 10.46 iso for free<br />
25
- Ontrack disk manager 10.46 iso download link<br />
26
- Download ontrack disk manager 10.46 iso full version<br />
27
- Ontrack disk manager 10.46 iso torrent download<br />
28
- Download ontrack disk manager 10.46 iso with crack<br />
29
- Ontrack disk manager 10.46 iso bootable usb download<br />
30
- Download ontrack disk manager 10.46 iso for windows 10<br />
31
- Ontrack disk manager 10.46 iso direct download<br />
32
- Download ontrack disk manager 10.46 iso from official site<br />
33
- Ontrack disk manager 10.46 iso online download<br />
34
- Download ontrack disk manager 10.46 iso for mac<br />
35
- Ontrack disk manager 10.46 iso cd download<br />
36
- Download ontrack disk manager 10.46 iso for linux<br />
37
- Ontrack disk manager 10.46 iso google drive download<br />
38
- Download ontrack disk manager 10.46 iso without registration<br />
39
- Ontrack disk manager 10.46 iso mega download<br />
40
- Download ontrack disk manager 10.46 iso for android<br />
41
- Ontrack disk manager 10.46 iso dvd download<br />
42
- Download ontrack disk manager 10.46 iso for windows 7<br />
43
- Ontrack disk manager 10.46 iso zip download<br />
44
- Download ontrack disk manager 10.46 iso for windows xp<br />
45
- Ontrack disk manager 10.46 iso rar download<br />
46
- Download ontrack disk manager 10.46 iso for windows 8<br />
47
- Ontrack disk manager 10.46 iso mediafire download<br />
48
- Download ontrack disk manager 10.46 iso without survey<br />
49
- Ontrack disk manager 10.46 iso dropbox download<br />
50
- Download ontrack disk manager 10.46 iso for windows vista<br />
51
- Ontrack disk manager 10.46 iso zippyshare download<br />
52
- Download ontrack disk manager 10.46 iso with serial key<br />
53
- Ontrack disk manager 10.46 iso filehippo download<br />
54
- Download ontrack disk manager 10.46 iso with license key<br />
55
- Ontrack disk manager 10.46 iso softpedia download<br />
56
- Download ontrack disk manager 10.46 iso with activation code<br />
57
- Ontrack disk manager 10.46 iso cnet download<br />
58
- Download ontrack disk manager 10.46 iso with keygen<br />
59
- Ontrack disk manager 10.46 iso filefactory download<br />
60
- Download ontrack disk manager 10.46 iso with patch<br />
61
- Ontrack disk manager 10.46 iso uptobox download<br />
62
- Download ontrack disk manager 10.46 iso with registration code<br />
63
- Ontrack disk manager 10.46 iso rapidshare download<br />
64
- Download ontrack disk manager 10.46 iso with product key<br />
65
- Ontrack disk manager 10.46 iso sendspace download<br />
66
- Download ontrack disk manager 10.46 iso with crack file<br />
67
- Ontrack disk manager 10.46 iso turbobit download<br />
68
- Download ontrack disk manager 10.46 iso with serial number<br />
69
- Ontrack disk manager 10.46 iso uploaded download<br />
70
- Download ontrack disk manager 10.46 iso with activation key<br />
71
- Ontrack disk manager 10.46 iso depositfiles download<br />
72
- Download ontrack disk manager 10.46 iso with crack folder<br />
73
- Ontrack disk manager 10.46 iso hotfile download</p>
74
- <ul>
75
- <li>It can help you optimize the performance and storage space of your hard drives.</li>
76
- <li>It can help you protect your data from loss or damage by creating backups and restoring them when needed.</li>
77
- <li>It can help you recover your data from damaged or corrupted hard drives by repairing them.</li>
78
- <li>It can help you securely erase your data from your hard drives when you want to dispose of them or sell them.</li>
79
- <li>It can help you troubleshoot and fix various disk-related problems.</li>
80
- </ul>
81
- <h2>How to Download Ontrack Disk Manager 10.46 ISO?</h2>
82
- <p>If you want to use Ontrack Disk Manager, you need to download its ISO file first. An ISO file is a disk image file that contains all the files and folders of a CD or DVD. You can use an ISO file to create a bootable CD or USB drive that you can use to run Ontrack Disk Manager without installing it on your computer.</p>
83
- <h3>Requirements for Downloading Ontrack Disk Manager 10.46 ISO</h3>
84
- <p>To download Ontrack Disk Manager 10.46 ISO, you need the following:</p>
85
- <ul>
86
- <li>A computer with an internet connection.</li>
87
- <li>A web browser that supports downloading large files.</li>
88
- <li>A blank CD or USB drive with at least 700 MB of free space.</li>
89
- <li>A CD/DVD burner software or a USB creator software that can burn or write ISO files.</li>
90
- </ul>
91
- <h3>Steps for Downloading Ontrack Disk Manager 10.46 ISO</h3>
92
- <p>To download Ontrack Disk Manager 10.46 ISO, follow these steps:</p>
93
- <ol>
94
- <li>Open your web browser and go to this link: <a href="https://archive.org/details/OnTrackDiskManager_201801">https://archive.org/details/OnTrackDiskManager_201801</a>.</li>
95
- <li>Click on the "DOWNLOAD OPTIONS" section and select "ISO IMAGE".</li>
96
- <li>Click on the "OnTrack_Disk_Manager_10.46.iso" file and save it to your computer.</li>
97
- <li>The download may take some time depending on your internet speed and the size of the file (about 688 MB).</li>
98
- </ol>
99
- <h2>How to Use Ontrack Disk Manager 10.46 ISO?</h2>
100
- <p>After downloading Ontrack Disk Manager 10.46 ISO, you need to burn it to a CD or USB drive and boot from it to run the software. Here are the steps for doing that:</p>
101
- <h3>How to Burn Ontrack Disk Manager 10.46 ISO to a CD or USB Drive</h3>
102
- <p>To burn Ontrack Disk Manager 10.46 ISO to a CD or USB drive, you need a CD/DVD burner software or a USB creator software that can handle ISO files. There are many free and paid software available online that you can use for this purpose. Some examples are ImgBurn, CDBurnerXP, Rufus, UNetbootin, etc.</p>
103
- <p>The exact steps for burning an ISO file may vary depending on the software you use, but generally they are similar to these:</p>
104
- <ol>
105
- <li>Insert a blank CD or USB drive into your computer.</li>
106
- <li>Open the CD/DVD burner software or the USB creator software and select the option to burn or write an ISO file.</li>
107
- <li>Browse and select the OnTrack_Disk_Manager_10.46.iso file that you downloaded earlier.</li>
108
- <li>Select the destination drive (the CD or USB drive) where you want to burn or write the ISO file.</li>
109
- <li>Start the burning or writing process and wait until it is completed.</li>
110
- </ol>
111
- <h3>How to Boot from Ontrack Disk Manager 10.46 ISO</h3>
112
- <p>To boot from Ontrack Disk Manager 10.46 ISO, you need to change the boot order in your computer's BIOS settings so that it prioritizes the CD or USB drive over the hard drive. The exact steps for doing this may vary depending on your computer model and BIOS version, but generally they are similar to these:</p>
113
- <ol>
114
- <li>Restart your computer and press the appropriate key (usually F2, F12, Del, Esc) to enter the BIOS setup menu.</li>
115
- <li>Navigate to the boot options section and change the boot order so that the CD or USB drive is listed first before the hard drive.</li>
116
- <li>Save the changes and exit the BIOS setup menu.</li>
117
- <li>Your computer will restart again and boot from the CD or USB drive where you burned or wrote the OnTrack_Disk_Manager_10.46.iso file.</li>
118
- </ol>
119
- <h3>How to Partition and Format a Hard Drive with Ontrack Disk Manager 10.46 ISO</h3>
120
- <p>To partition and format a hard drive with OnTrack_Disk_Manager_10.46.iso , follow these steps:</p>
121
- <ol>
122
- <li>After booting from the CD or USB drive where you burned or wrote the OnTrack_Disk_Manager_10.46.iso file , you will see a welcome screen with some options . Choose "Start Program" . </li>
123
- <li>You will see a main menu with some options . Choose "Disk Utilities" . </li>
124
- <li>You will see a list of all the hard drives detected by the software . Choose the one that you want to partition and format . </li>
125
- format , hide , unhide , or change the drive letter or label of the partitions . You can also use the "Auto" option to let the software automatically partition and format the hard drive for you . </li>
126
- <li>After making the changes that you want , click on "Apply" to confirm them . The software will ask you to reboot your computer to complete the process . </li>
127
- <li>After rebooting your computer , you will see your new partitions and file systems on your hard drive . </li>
128
- </ol>
129
- <h2>Tips and Tricks for Using Ontrack Disk Manager 10.46 ISO</h2>
130
- <p>Here are some tips and tricks for using Ontrack Disk Manager 10.46 ISO effectively:</p>
131
- <h3>How to Backup and Restore a Hard Drive with Ontrack Disk Manager 10.46 ISO</h3>
132
- <p>To backup and restore a hard drive with Ontrack Disk Manager 10.46 ISO , follow these steps:</p>
133
- <ol>
134
- <li>Boot from the CD or USB drive where you burned or wrote the OnTrack_Disk_Manager_10.46.iso file . </li>
135
- <li>From the main menu , choose "Backup/Restore" . </li>
136
- <li>You will see two options : "Backup" and "Restore" . Choose the one that you want to do . </li>
137
- <li>If you choose "Backup" , you will see a list of all the hard drives detected by the software . Choose the one that you want to backup . You will also need to choose a destination drive where you want to save the backup file . The destination drive can be another hard drive , a CD/DVD , or a network location . You can also choose to compress or encrypt the backup file for security or space reasons . </li>
138
- <li>If you choose "Restore" , you will need to locate and select the backup file that you want to restore . You will also need to choose a target drive where you want to restore the backup file . The target drive can be the same as the original drive or a different one . You can also choose to overwrite or append the existing data on the target drive . </li>
139
- <li>After making your choices , click on "Start" to begin the backup or restore process . The software will show you a progress bar and some details about the process . Wait until it is completed . </li>
140
- </ol>
141
- <h3>How to Repair a Damaged or Corrupted Hard Drive with Ontrack Disk Manager 10.46 ISO</h3>
142
- <p>To repair a damaged or corrupted hard drive with Ontrack Disk Manager 10.46 ISO , follow these steps:</p>
143
- <ol>
144
- <li>Boot from the CD or USB drive where you burned or wrote the OnTrack_Disk_Manager_10.46.iso file . </li>
145
- <li>From the main menu , choose "Disk Utilities" . </li>
146
- <li>You will see a list of all the hard drives detected by the software . Choose the one that you want to repair . </li>
147
- <li>You will see a graphical representation of the hard drive with its current partitions . You can use the mouse or keyboard to select any partition that you want to repair . You can also use the "Select All" option to select all partitions on the hard drive . </li>
148
- <li>Click on "Repair" to start the repair process . The software will scan and fix any errors or bad sectors on the selected partitions . It will also try to recover any lost or deleted data on them . The software will show you a progress bar and some details about the process . Wait until it is completed . </li>
149
- </ol>
150
- <h3>How to Erase a Hard Drive with Ontrack Disk Manager 10.46 ISO</h3>
151
- <p>To erase a hard drive with Ontrack Disk Manager 10.46 ISO , follow these steps:</p>
152
- <ol>
153
- <li>Boot from the CD or USB drive where you burned or wrote the OnTrack_Disk_Manager_10.46.iso file . </li>
154
- <li>From the main menu , choose "Disk Utilities" . </li>
155
- <li>You will see a list of all the hard drives detected by the software . Choose the one that you want to erase . </li>
156
- the "Select All" option to select all partitions on the hard drive . </li>
157
- <li>Click on "Erase" to start the erase process . The software will ask you to confirm your choice and warn you that all data on the selected partitions will be permanently deleted . Click on "Yes" to proceed . </li>
158
- <li>The software will overwrite all data on the selected partitions with zeros or random data , depending on the level of security that you choose . You can choose from three levels of security : "Quick" , "Normal" , or "Secure" . The higher the level of security , the longer the erase process will take . The software will show you a progress bar and some details about the process . Wait until it is completed . </li>
159
- </ol>
160
- <h2>Conclusion</h2>
161
- <p>Ontrack Disk Manager 10.46 ISO is a powerful and easy-to-use software that can help you manage your hard drives in various ways . You can use it to partition , format , backup , restore , repair , and erase your hard drives in a few simple steps . You can also use it to troubleshoot and fix various disk-related problems . You can download it for free from this link: <a href="https://archive.org/details/OnTrackDiskManager_201801">https://archive.org/details/OnTrackDiskManager_201801</a> and burn it to a CD or USB drive to run it without installing it on your computer . We hope this article has helped you learn more about Ontrack Disk Manager 10.46 ISO and how to use it effectively . If you have any questions or feedback , please feel free to leave a comment below . </p>
162
- <h2>FAQs</h2>
163
- <p>Here are some frequently asked questions about Ontrack Disk Manager 10.46 ISO :</p>
164
- <ol>
165
- <li>Q: Is Ontrack Disk Manager 10.46 ISO compatible with Windows 10?</li>
166
- <li>A: Yes, Ontrack Disk Manager 10.46 ISO is compatible with Windows 10 and other versions of Windows, such as Windows 8, Windows 7, Windows Vista, and Windows XP.</li>
167
- <li>Q: Can I use Ontrack Disk Manager 10.46 ISO to clone a hard drive?</li>
168
- <li>A: Yes, you can use Ontrack Disk Manager 10.46 ISO to clone a hard drive by using the "Copy" option in the "Disk Utilities" section. You can copy a partition or an entire hard drive to another hard drive.</li>
169
- <li>Q: Can I use Ontrack Disk Manager 10.46 ISO to recover deleted files?</li>
170
- <li>A: Yes, you can use Ontrack Disk Manager 10.46 ISO to recover deleted files by using the "Repair" option in the "Disk Utilities" section. The software will try to recover any lost or deleted data on the selected partitions.</li>
171
- <li>Q: Can I use Ontrack Disk Manager 10.46 ISO to create bootable disks?</li>
172
- <li>A: Yes, you can use Ontrack Disk Manager 10.46 ISO to create bootable disks by using the "Create Bootable Disk" option in the main menu. You can create bootable disks for different operating systems, such as DOS, Windows, Linux, etc.</li>
173
- <li>Q: Can I use Ontrack Disk Manager 10.46 ISO to resize partitions?</li>
174
- <li>A: Yes, you can use Ontrack Disk Manager 10.46 ISO to resize partitions by using the "Resize" option in the "Disk Utilities" section. You can increase or decrease the size of any partition on your hard drive.</li>
175
- </ol>
176
- </p> 0a6ba089eb<br />
177
- <br />
178
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/DoneEx XCell Compiler 1.8.1 NEW.rar Utorrent.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>DoneEx XCell Compiler 1.8.1 NEW.rar Utorrent</h2><br /><p><b><b>Download File</b> &#128504;&#128504;&#128504; <a href="https://imgfil.com/2uy0mz">https://imgfil.com/2uy0mz</a></b></p><br /><br />
2
- <br />
3
- ... ://cracknets.net/v/d/t/harry+potter+and+the+half+blood+prince+reloaded+rar+password/ ... /v/b/e/Adobe+Photoshop+Elements+15+Crack+With+Latest+Serial+Key+Free+Download/ ... monthly 0.5 https://cracknets.net/m/o/s/Puzzle+Hero+v+1.8.1/ ... monthly 0.5 https://cracknets.net/q/z/x/DoneEx+XCell+Compiler+2.4.1.5/ ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Cookie Run Kingdom Now and Meet the Cutest Cookies Ever.md DELETED
@@ -1,123 +0,0 @@
1
-
2
- <h1>Where to Download Cookie Run: Kingdom</h1>
3
- <p>If you are looking for a sweet and addictive mobile game that combines action, strategy, and city-building, you might want to check out Cookie Run: Kingdom. This game is the latest installment in the Cookie Run series by Devsisters, and it has become a huge hit since its global launch in January 2021. In this article, we will tell you everything you need to know about Cookie Run: Kingdom, including what it is, what are its features, where and how to download it, what are some tips and tricks for beginners, and what are some pros and cons of playing it.</p>
4
- <h2>What is Cookie Run: Kingdom and why is it popular?</h2>
5
- <p>Cookie Run: Kingdom is a game that mixes real-time battle strategy and city-building, with a wide cast of unique cookies and a customizable kingdom. It tells the story of cookies who create a kingdom of their own to call home. Throughout their adventures, they explore other ancient kingdoms, battle fierce adversaries of the darkness, and unravel the mysteries of the ancient heroes who disappeared from the world.</p>
6
- <h2>where to download cookie run kingdom</h2><br /><p><b><b>Download Zip</b> --->>> <a href="https://urlin.us/2uSXSu">https://urlin.us/2uSXSu</a></b></p><br /><br />
7
- <p>Cookie Run: Kingdom is popular because it offers a lot of fun and variety for players of all ages and preferences. It has an intriguing RPG story mode, a player-versus-player (PvP) battle mode, and many ways to export the goods that you're making in your kingdom. It also has adorable graphics, catchy music, and charming voice acting. The game is free to play, but it also offers optional in-app purchases for players who want to enhance their experience.</p>
8
- <h2>What are the main features of Cookie Run: Kingdom and how to play it?</h2>
9
- <p>Cookie Run: Kingdom has two main modes: adventure and kingdom. In adventure mode, you can select levels to play that consist of platforming and battling. You can create a team of up to five cookies with different roles, such as attackers, defenders, healers, and supporters. You can also equip them with toppings that boost their stats and abilities. You can control your cookies manually or let them fight automatically. As you beat levels, you earn rewards such as coins, crystals, star jellies, soulstones, treasures, toppings, materials, and new cookies.</p>
10
- <p>In kingdom mode, you can design and build your own kingdom with various buildings and decorations. You can produce materials, craft items, arrange activities, and collect resources from your kingdom. You can also interact with your cookies and other characters in your kingdom. Your kingdom serves as the hub area for most of your activities and a way to help your cookies grow.</p>
11
- <h2>Where and how to download Cookie Run: Kingdom for different devices?</h2>
12
- <p>Cookie Run: Kingdom is available for both iOS and Android devices. You can download it from the App Store or Google Play Store depending on your device. The game requires iOS 13.0 or later or Android 4.4 or later to run. The game also supports iPadOS 13.0 or later and macOS 11.0 or later with Apple M1 chip or later.</p>
13
- <p>To download Cookie Run: Kingdom from the App Store or Google Play Store, follow these steps:</p>
14
- <ol>
15
- <li>Open the App Store or Google Play Store app on your device.</li>
16
- <li>Search for "Cookie Run: Kingdom" in the search bar.</li>
17
- <li>Tap on the game icon that appears in the results.</li>
18
- <li>Tap on the "Get" or "Install" button to start downloading the game.</li>
19
- <li>Wait for the download to finish and then tap on the "Open" button to launch the game.</li>
20
- </ol>
21
- <p>You can also use these links to download Cookie Run: Kingdom directly from your device:</p>
22
- <ul>
23
- <li><a href="(^3^)">Download Cookie Run: Kingdom from the App Store</a></li>
24
- <li><a href="(^2^)">Download Cookie Run: Kingdom from Google Play Store</a></li>
25
- </ <h2>What are some tips and tricks for beginners in Cookie Run: Kingdom?</h2>
26
- <p>If you are new to Cookie Run: Kingdom, you might feel overwhelmed by the amount of things to do and learn in the game. Don't worry, we have some tips and tricks to help you get started and enjoy the game more. Here are some of them:</p>
27
- <ul>
28
- <li>Follow the main story quests and side quests to progress in the game and unlock new features. You can also get rewards such as coins, crystals, star jellies, soulstones, treasures, toppings, materials, and new cookies by completing quests.</li>
29
- <li>Upgrade your cookies and toppings regularly to increase their power and performance. You can use soulstones to level up your cookies and star jellies to enhance your toppings. You can also use treasures to give your cookies special effects.</li>
30
- <li>Build and upgrade your kingdom buildings to produce more resources and items. You can use coins and materials to construct and improve your buildings. You can also use crystals to speed up the process or buy more slots.</li>
31
- <li>Explore the world map and discover new areas and secrets. You can find hidden chests, events, bosses, and other surprises by exploring the map. You can also earn rewards such as coins, crystals, star jellies, soulstones, treasures, toppings, materials, and new cookies by clearing areas.</li>
32
- <li>Join a guild and cooperate with other players. You can chat with your guild members, exchange gifts, request help, participate in guild battles, and access exclusive guild features. You can also earn rewards such as coins, crystals, star jellies, soulstones, treasures, toppings, materials, and new cookies by contributing to your guild.</li>
33
- </ul>
34
- <h2>What are some pros and cons of Cookie Run: Kingdom and how does it compare to other games in the genre?</h2>
35
- <p>Cookie Run: Kingdom is a game that has many pros and cons that might appeal or deter different players. Here are some of them:</p>
36
- <p>How to download cookie run kingdom on android<br />
37
- Cookie run kingdom download for ios devices<br />
38
- Cookie run kingdom apk download latest version<br />
39
- Cookie run kingdom pc download free<br />
40
- Cookie run kingdom mac download with m1 chip<br />
41
- Best site to download cookie run kingdom safely<br />
42
- Cookie run kingdom download size and requirements<br />
43
- Cookie run kingdom download error and how to fix it<br />
44
- Cookie run kingdom download link for google play store<br />
45
- Cookie run kingdom download link for app store<br />
46
- Cookie run kingdom download guide for beginners<br />
47
- Cookie run kingdom download tips and tricks<br />
48
- Cookie run kingdom download rewards and benefits<br />
49
- Cookie run kingdom download review and rating<br />
50
- Cookie run kingdom download problems and solutions<br />
51
- How to update cookie run kingdom after downloading<br />
52
- How to uninstall cookie run kingdom from your device<br />
53
- How to transfer cookie run kingdom data to another device<br />
54
- How to play cookie run kingdom offline without downloading<br />
55
- How to play cookie run kingdom online with friends<br />
56
- How to install cookie run kingdom on windows 10<br />
57
- How to install cookie run kingdom on macos 11.0 or later<br />
58
- How to install cookie run kingdom on chromebook<br />
59
- How to install cookie run kingdom on fire tablet<br />
60
- How to install cookie run kingdom on smart tv<br />
61
- How to get cookie run kingdom without downloading<br />
62
- How to get cookie run kingdom for free without paying<br />
63
- How to get cookie run kingdom on steam or epic games store<br />
64
- How to get cookie run kingdom on nintendo switch or ps4<br />
65
- How to get cookie run kingdom on xbox one or xbox series x/s<br />
66
- Why you should download cookie run kingdom today<br />
67
- Why you should not download cookie run kingdom now<br />
68
- Why is cookie run kingdom not available for download in my country<br />
69
- Why is cookie run kingdom taking so long to download or update<br />
70
- Why is cookie run kingdom crashing or freezing after downloading<br />
71
- What is cookie run kingdom and how to download it<br />
72
- What is new in cookie run kingdom latest update and how to download it<br />
73
- What is the best way to download cookie run kingdom fast and easy<br />
74
- What is the best device to play cookie run kingdom after downloading it<br />
75
- What is the best treasure and topping combination in cookie run kingdom after downloading it</p>
76
- <table>
77
- <tr>
78
- <th>Pros</th>
79
- <th>Cons</th>
80
- </tr>
81
- <tr>
82
- <td>- Cute and colorful graphics</td>
83
- <td>- Requires internet connection</td>
84
- </tr>
85
- <tr>
86
- <td>- Engaging and diverse gameplay</td>
87
- <td>- Can be repetitive or grindy</td>
88
- </tr>
89
- <tr>
90
- <td>- Lovable and diverse characters</td>
91
- <td>- Can be pay-to-win or gacha-based</td>
92
- </tr>
93
- <tr>
94
- <td>- Immersive and rich story</td>
95
- <td>- Can be buggy or laggy</td>
96
- </tr>
97
- <tr>
98
- <td>- Fun and social features</td>
99
- <td>- Can be addictive or time-consuming</td>
100
- </tr>
101
- </table>
102
- <p>Cookie Run: Kingdom is a game that can be compared to other games in the action-strategy-city-building genre, such as Clash of Clans, Rise of Kingdoms, or Lords Mobile. However, Cookie Run: Kingdom has its own unique charm and style that sets it apart from other games. It has a more whimsical and lighthearted tone, a more casual and accessible gameplay, a more diverse and customizable content, and a more loyal and friendly community.</p>
103
- <h2>Conclusion</h2>
104
- <p>Cookie Run: Kingdom is a game that offers a lot of fun and variety for players who love action, strategy, city-building, and cookies. It has an intriguing RPG story mode, a player-versus-player (PvP) battle mode, and many ways to export the goods that you're making in your kingdom. It also has adorable graphics, catchy music, and charming voice acting. The game is free to play, but it also offers optional in-app purchases for players who want to enhance their experience.</p>
105
- <p>If you are interested in playing Cookie Run: Kingdom, you can download it from the App Store or Google Play Store depending on your device. The game requires iOS 13.0 or later or Android 4.4 or later to run. The game also supports iPadOS 13.0 or later and macOS 11.0 or later with Apple M1 chip or later.</p>
106
- <p>We hope this article has helped you learn more about Cookie Run: Kingdom and how to download it. If you have any questions or feedback about the game or the article, feel free to leave a comment below. Happy gaming!</p>
107
- <h2>FAQs</h2>
108
- <p>Here are some frequently asked questions about Cookie Run: Kingdom:</p>
109
- <ol>
110
- <li><b>How do I get more cookies in Cookie Run: Kingdom?</b></li>
111
- <p>You can get more cookies in Cookie Run: Kingdom by completing quests, clearing areas on the world map, opening chests or gacha boxes, participating in events or promotions, joining a guild or inviting friends, or buying them with crystals.</p>
112
- <li><b>How do I upgrade my kingdom in Cookie Run: Kingdom?</b></li>
113
- <p>You can upgrade your kingdom in Cookie Run: Kingdom by building and upgrading various buildings and decorations. You can use coins and materials to construct and improve your buildings. You can also use crystals to speed up the process or buy more slots. You can also unlock new areas and features by increasing your kingdom level and population.</p>
114
- <li><b>How do I win battles in Cookie Run: Kingdom?</b></li>
115
- <p>You can win battles in Cookie Run: Kingdom by creating a balanced and powerful team of cookies with different roles, such as attackers, defenders, healers, and supporters. You can also equip them with toppings that boost their stats and abilities. You can also use treasures to give your cookies special effects. You can control your cookies manually or let them fight automatically. You can also use skills and items to help your cookies during battles.</p>
116
- <li><b>How do I play with other players in Cookie Run: Kingdom?</b></li>
117
- <p>You can play with other players in Cookie Run: Kingdom by joining a guild and cooperating with your guild members. You can chat with your guild members, exchange gifts, request help, participate in guild battles, and access exclusive guild features. You can also earn rewards such as coins, crystals, star jellies, soulstones, treasures, toppings, materials, and new cookies by contributing to your guild.</p>
118
- <li><b>How do I get more crystals in Cookie Run: Kingdom?</b></li>
119
- <p>You can get more crystals in Cookie Run: Kingdom by completing quests, clearing areas on the world map, opening chests or gacha boxes, participating in events or promotions, joining a guild or inviting friends, or buying them with real money.</p>
120
- <li><b>Is Cookie Run: Kingdom safe for kids?</b></li>
121
- <p>Cookie Run: Kingdom is a game that is suitable for kids of all ages. It has a cute and colorful graphics, a engaging and diverse gameplay, a lovable and diverse characters, and a immersive and rich story. It also has a fun and social features that allow kids to interact with other players in a friendly and respectful manner. However, parents should be aware that the game also has some elements that might require parental guidance or supervision, such as violence, gambling, spending, or addiction. Parents should also monitor their kids' screen time and online activity to ensure their safety and well-being.</p> 197e85843d<br />
122
- <br />
123
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Getting Over It for Free and Experience the Ultimate Challenge.md DELETED
@@ -1,108 +0,0 @@
1
-
2
- <h1>Getting Over It Free Download 2022 Latest Version</h1>
3
- <p>Have you ever heard of <strong>Getting Over It with Bennett Foddy</strong>? If not, you are missing out on one of the most unique, frustrating, hilarious, and philosophical games ever made. If yes, you probably know how hard it is to beat this game, let alone get it for free. But don't worry, in this article, I will tell you everything you need to know about this game, why you should play it, how to get it for free, and how to play it better. So sit back, relax, and get ready to climb some mountains with nothing but a hammer and a pot.</p>
4
- <h2>getting over it free download 2022 latest version</h2><br /><p><b><b>Download</b> &#10031;&#10031;&#10031; <a href="https://urlin.us/2uSWpX">https://urlin.us/2uSWpX</a></b></p><br /><br />
5
- <h2>What is Getting Over It with Bennett Foddy?</h2>
6
- <p>Getting Over It with Bennett Foddy is a game that was released in 2017 by <strong>Bennett Foddy</strong>, an Australian game designer who is also known for creating other games like QWOP, GIRP, CLOP, and Pole Riders. He describes his game as "a game I made for a certain kind of person. To hurt them." Sounds intriguing, right? Let's see what this game is all about.</p>
7
- <h3>A brief introduction to the game and its developer</h3>
8
- <p>The game is inspired by <strong>Sexy Hiking</strong>, a 2002 B-Game classic by Jazzuo, where you control a man who tries to climb a mountain using only a hammer. Foddy decided to make his own version of this game as a homage and as an experiment. He wanted to create a game that would challenge the players' patience, skill, perseverance, and sanity. He also wanted to explore the themes of frustration, failure, progress, reward, philosophy, humor, art, and culture in video games.</p>
9
- <h3>The gameplay and the controls</h3>
10
- <p>The gameplay is very simple: you play as <strong>Diogenes</strong>, a man who sits in a metal pot and holds a sledgehammer. Your goal is to climb up an enormous mountain that is filled with various obstacles like rocks, trees, pipes, furniture, buildings, etc. You move the hammer with your mouse or trackpad (or your finger if you play on mobile), and that's all <p>The game has no checkpoints, no saves, no levels, no tutorials, no hints, no maps, no menus, no options, no scores, no achievements, no rewards, no endings. It's just you and the mountain. And the hammer. And the pot. And the gravity. And the physics. And the bugs. And the glitches. And the lag. And the rage.</p>
11
- <h3>The difficulty and the frustration</h3>
12
- <p>The game is extremely hard. Not because of the complexity or the design, but because of the simplicity and the execution. The game relies on your mouse movement and your muscle memory to control the hammer. The slightest mistake or miscalculation can send you flying back to the bottom of the mountain, losing hours of progress in seconds. The game is unforgiving, unpredictable, and unfair. It will test your limits, your skills, your patience, your willpower, your emotions, and your sanity.</p>
13
- <h3>The narration and the philosophy</h3>
14
- <p>The game is not silent. As you play, you will hear the voice of <strong>Bennett Foddy</strong> himself, who narrates your journey with his calm and soothing voice. He will comment on your actions, your failures, your successes, your thoughts, and your feelings. He will also share with you some quotes, anecdotes, stories, jokes, facts, opinions, and insights about various topics related to the game and life in general. He will make you laugh, he will make you think, he will make you question, he will make you angry, he will make you sad, he will make you curious, he will make you inspired.</p>
15
- <p>getting over it with bennett foddy free download 2022<br />
16
- how to download getting over it for free on android 2022<br />
17
- getting over it apk free download latest version 2022<br />
18
- getting over it pc game free download full version 2022<br />
19
- getting over it free download for windows 10 2022<br />
20
- getting over it free download for mac 2022<br />
21
- getting over it free download no virus 2022<br />
22
- getting over it free download google drive 2022<br />
23
- getting over it free download mega.nz 2022<br />
24
- getting over it free download steamunlocked 2022<br />
25
- getting over it free download ocean of games 2022<br />
26
- getting over it free download igg games 2022<br />
27
- getting over it free download skidrow reloaded 2022<br />
28
- getting over it free download fitgirl repack 2022<br />
29
- getting over it free download highly compressed 2022<br />
30
- getting over it free download crack only 2022<br />
31
- getting over it free download update patch 2022<br />
32
- getting over it free download mod apk 2022<br />
33
- getting over it free download unlimited hammer 2022<br />
34
- getting over it free download cheat engine 2022<br />
35
- getting over it free download speedrun mode 2022<br />
36
- getting over it free download multiplayer mod 2022<br />
37
- getting over it free download custom maps 2022<br />
38
- getting over it free download new levels 2022<br />
39
- getting over it free download new music 2022<br />
40
- getting over it free download no commentary 2022<br />
41
- getting over it free download walkthrough guide 2022<br />
42
- getting over it free download tips and tricks 2022<br />
43
- getting over it free download best settings 2022<br />
44
- getting over it free download system requirements 2022<br />
45
- getting over it free download review and rating 2022<br />
46
- getting over it free download gameplay video 2022<br />
47
- getting over it free download trailer and teaser 2022<br />
48
- getting over it free download official website 2022<br />
49
- getting over it free download developer blog 2022<br />
50
- getting over it free download news and updates 2022<br />
51
- getting over it free download release date and time 2022<br />
52
- getting over it free download pre order and bonus 2022<br />
53
- getting over it free download discount and coupon code 2022<br />
54
- getting over it free download giveaway and contest 2022<br />
55
- getting over it free download fan art and memes 2022<br />
56
- getting over it free download merchandise and accessories 2022<br />
57
- getting over it free download soundtrack and theme song 2022<br />
58
- getting over it free download easter eggs and secrets 2022<br />
59
- getting over it free download achievements and trophies 2022<br />
60
- getting over it free download leaderboard and ranking 2022<br />
61
- getting over it free download community and forum 2022<br />
62
- getting over it free download feedback and support 2022</p>
63
- <h2>Why should you play Getting Over It with Bennett Foddy?</h2>
64
- <p>Now that you know what this game is and how it works, you might be wondering: why should I play this game? What's the point? What's the fun? What's the reward? Well, there are many reasons why you should play this game, depending on what kind of person you are and what kind of experience you are looking for. Here are some of them:</p>
65
- <h3>The challenge and the reward</h3>
66
- <p>If you are a person who loves a good challenge and a sense of accomplishment, this game is for you. This game is one of the hardest games ever made, and beating it is a feat that only a few people in the world have achieved. This game will push you to your limits and beyond, and it will make you feel every emotion possible along the way. This game will make you suffer, but it will also make you grow. This game will make you hate it, but it will also make you love it. This game will make you cry, but it will also make you smile. This game will make you quit, but it will also make you come back. This game will make you lose everything, but it will also make you gain something priceless: <strong>the satisfaction of overcoming yourself</strong>.</p>
67
- <h3>The humor and the references</h3>
68
- <p>If you are a person who loves a good laugh and a dose of culture, this game is for you. This game is full of humor and references that will tickle your funny bone and stimulate your brain. The game is full of jokes and puns that are related to the gameplay and the theme of frustration. The game is full of references and homages to other games, movies, books, songs, art, history, philosophy, and more. The game is full of surprises and secrets that will reward your curiosity and exploration. The game is full of irony and sarcasm that will make you laugh at yourself and the world. The game is full of wisdom and insight that will make you think and learn. The game is full of fun and entertainment that will make you enjoy and appreciate.</p>
69
- <h3>The exploration and the secrets</h3>
70
- <p>If you are a person who loves to discover new things and uncover hidden mysteries, this game is for you. This game is not just a linear climb up a mountain. It is also a nonlinear journey through a rich and diverse world that is full of secrets and Easter eggs. The game has many paths, branches, shortcuts, detours, loops, dead ends, and hidden areas that you can explore and find. The game has many objects, items, characters, sounds, music, and dialogues that you can interact with and learn from. The game has many secrets, puzzles, codes, clues, hints, messages, and meanings that you can uncover and decipher. The game has many layers, dimensions, levels, modes, endings, and outcomes that you can experience and achieve.</p>
71
- <h3>The community and the speedruns</h3>
72
- <p>If you are a person who loves to share your experiences and compete with others, this game is for you. This game has a huge and active community of players who are passionate about this game and who support each other through their struggles and successes. You can join this community online through various platforms like YouTube, Twitch, Discord, Reddit, Steam, etc., where you can watch, chat, stream, comment, like, subscribe, follow, donate, etc., with other players who are playing this game or who have played this game before. You can also participate in this community offline through various events like conventions, meetups, workshops, etc., where you can meet, talk, play, learn, teach, etc., with other players who are interested in this game or who are experts in this game.</p>
73
- <p>One of the most popular ways to enjoy this game with the community is to do <strong>speedruns</strong>, which are attempts to complete the game as fast as possible using various techniques and strategies. Speedruns are a form of art and sport that showcase the skill and creativity of the players who perform them. Speedruns are also a form of entertainment and education that inspire and teach the viewers who watch them. Speedruns are also a form of challenge and competition that motivate and reward the participants who achieve them.</p>
74
- <p>There are many categories of speedruns for this game, such as <strong>Any%</strong>, which is the fastest way to complete the game by any means necessary, <strong>Glitchless</strong>, which is the fastest way to complete the game without using any glitches or exploits, <strong>Space%</strong>, which is the fastest way to launch yourself into space and escape the game world, and more. The current world record for Any% is 56.717 seconds by Lumord, for Glitchless is 1 minute 13.2 seconds by Blastbolt, and for Space% is 1 minute 8.7 seconds by Hitachihex. You can watch these amazing speedruns on YouTube and learn from their strategies and skills. You can also try to beat their times and submit your own speedruns to Speedrun.com, where you can compare your results with other players and see your ranking on the leaderboard.</p>
75
- <h2>How to get Getting Over It with Bennett Foddy for free?</h2>
76
- <p>Now that you know why you should play this game, you might be wondering: how can I get this game for free? Well, there are a few ways to do that, but they are not all legal, safe, or ethical. So before you decide to download this game for free, you should be aware of the risks and the precautions that you should take. Here are some of the options that you have:</p>
77
- <h3>The official platforms and prices</h3>
78
- <p>The game is officially available on several platforms, such as Steam, Humble Bundle, Epic Games Store, itch.io, Google Play Store, and Apple App Store. The game costs $7.99 on Steam, Humble Bundle, Epic Games Store, and itch.io, $4.99 on Google Play Store, and $3.99 on Apple App Store. However, sometimes the game goes on sale or is offered for free on some of these platforms. For example, in December 2020, the game was free on Epic Games Store for a limited time. So if you want to get this game for free legally and ethically, you should keep an eye on these platforms and wait for a good deal or a giveaway.</p>
79
- <h3>The alternative platforms and sources</h3>
80
- <p>The game is also available on some alternative platforms and sources that are not official or authorized by the developer. These include torrent sites, file-sharing sites, modded APK sites, emulator sites, etc. These platforms and sources allow you to download the game for free without paying anything or going through any verification process. However, these platforms and sources are also illegal, unsafe, and unethical. They violate the intellectual property rights of the developer and the publisher of the game. They expose you to the risk of malware, viruses, spyware, adware, etc., that can harm your device or your data. They also deprive the developer and the publisher of the revenue that they deserve for their hard work and creativity.</p>
81
- <h3>The risks and the precautions</h3>
82
- <p>If you decide to download this game for free from an alternative platform or source, you should be aware of the risks and the precautions that you should take. Here are some of them: - The risks: - You could get sued or fined for piracy or copyright infringement. - You could get infected with malware or viruses that could damage your device or steal your data. - You could get banned or suspended from the official platforms or services that you use to play the game. - You could miss out on the updates, patches, bug fixes, features, content, etc., that the developer provides for the game. - You could have a poor or incomplete gaming experience due to glitches, errors, crashes, etc., that are not fixed or optimized by the developer. - The precautions: - You should use a VPN or a proxy to hide your IP address and location from the authorities or the hackers. - You should use an antivirus or a firewall to protect your device and your data from malware or viruses. - You should backup your device and your data regularly in case of any damage or loss. - You should check the reviews, ratings, comments, feedback, etc., of the platform or source that you use to download the game to make sure it is reliable and trustworthy. - You should scan the file or the app that you download for any malware or viruses before installing or running it. <h2>How to play Getting Over It with Bennett Foddy better?</h2>
83
- <p>Finally, if you have managed to get this game for free (or paid for it) and want to play it better, here are some tips and tricks that can help you improve your skills and performance. These tips and tricks are based on my own experience and research, as well as the advice and guidance of other players who have mastered this game. Here they are:</p>
84
- <h3>The tips and tricks for beginners</h3>
85
- <p>If you are new to this game or have not played it much, here are some tips and tricks that can help you get started and make some progress:</p>
86
- - Practice. This is the most important tip for this game. The only way to get better at this game is to practice a lot. The more you play, the more you learn, the more you improve. Practice makes perfect, or at least better. - Be patient. This is another crucial tip for this game. This game is not meant to be easy or fast. It is meant to be hard and slow. It will take you a long time to beat this game, if ever. So don't rush, don't panic, don't give up. Be patient with yourself and with the game. - Be calm. This is also a vital tip for this game. This game is designed to frustrate you and make you angry. It will test your nerves and your emotions. So don't let it get to you. Be calm and composed. Breathe deeply, relax your muscles, clear your mind. Don't let the game control you, control yourself. - Be positive. This is also a helpful tip for this game. This game is full of negativity and pessimism. It will make you feel bad and hopeless. So don't let it affect you. Be positive and optimistic. Focus on the good things, not the bad things. Celebrate your achievements, not your failures. Enjoy the journey, not the destination. - Be creative. This is also a fun tip for this game. This game is full of possibilities and opportunities. It will let you explore and experiment with different ways of playing and moving. So don't be afraid to try new things and be creative. Use your imagination and your intuition. Find your own style and your own solutions. <h3>The advanced techniques for experts</h3>
87
- <p>If you are already familiar with this game or have played it a lot, here are some advanced techniques that can help you play faster and better:</p>
88
- - Pogoing. This is a technique that involves using the hammer as a spring to bounce yourself up in the air. This technique can help you gain height and speed quickly and easily. To do this technique, you need to swing the hammer downwards behind you while lifting yourself up with the pot, then release the hammer when it hits the ground to launch yourself up in the air. - Flying. This is a technique that involves using the hammer as a propeller to fly yourself across long distances. This technique can help you skip large sections of the mountain and save time and effort. To do this technique, you need to swing the hammer in circles around you while moving yourself forward with the pot, then adjust the angle and direction of the hammer to steer yourself in the air. - Hooking. This is a technique that involves using the hammer as a hook to grab onto objects and pull yourself towards them. This technique can help you climb steep slopes and overcome tricky obstacles. To do this technique, you need to swing the hammer towards an object that you want to hook onto, then hold the hammer when it touches the object to attach yourself to it, then pull the hammer towards you while pushing yourself forward with the pot to move yourself closer to the object. - Flipping. This is a technique that involves using the hammer as a lever to flip yourself over objects or gaps. This technique can help you avoid falling or getting stuck in certain situations. To do this technique, you need to swing the hammer upwards in front of you while lowering yourself down with the pot, then release the hammer when it reaches the top of its arc to flip yourself over in the air, then catch yourself with the hammer on the other side of the object or gap. - Sliding. This is a technique that involves using the hammer as a brake to slide down slopes or surfaces. This technique can help you control your speed and direction when descending or moving horizontally. To do this technique, you need to swing the hammer downwards in front of you while moving yourself down or sideways with the pot, then hold the hammer when it touches the ground or the surface to slow yourself down and steer yourself. <h3>The resources and guides for learning</h3>
89
- <p>If you want to learn more about these techniques and other aspects of this game, there are many resources and guides that you can use to improve your knowledge and skills. Here are some of them:</p>
90
- - The official website of the game, where you can find information about the game, the developer, the platforms, the updates, etc. - The official wiki of the game, where you can find information about the gameplay, the controls, the narration, the references, etc. - The official subreddit of the game, where you can find discussions, questions, answers, tips, tricks, videos, memes, fan art, etc., related to the game. - The official YouTube channel of the developer, where you can find videos of him playing and talking about his games, including this one. - The unofficial YouTube channels of other players, where you can find videos of them playing and speedrunning this game, as well as tutorials and guides on how to play better. - The unofficial Discord server of the game, where you can chat and voice chat with other players who are playing or have played this game. - The unofficial Steam community hub of the game, where you can find reviews, ratings, comments, feedback, screenshots, videos, etc., related to the game. <h2>Conclusion</h2>
91
- <p>Getting Over It with Bennett Foddy is a game that is not for everyone. It is a game that will make you love it or hate it. It is a game that will make you happy or sad. It is a game that will make you laugh or cry. It is a game that will make you think or feel. It is a game that will make you succeed or fail. It is a game that will make you get over it or not.</p>
92
- <p>But whatever your reaction or outcome is, this game is worth trying. It is a game that will challenge you and reward you. It is a game that will entertain you and educate you. It is a game that will surprise you and inspire you. It is a game that will change you and stay with you.</p>
93
- <p>So if you are interested in this game, I hope this article has helped you learn more about it and how to get it for free and play it better. If not, I hope this article has at least made you curious and amused. Either way, I thank you for reading this article and I wish you all the best in your gaming adventures.</p>
94
- <p>Now go ahead and try Getting Over It with Bennett Foddy for yourself. And remember: don't hate the player, don't hate the game, just hate yourself.</p>
95
- <h2>FAQs</h2>
96
- <p>Here are some frequently asked questions about Getting Over It with Bennett Foddy:</p>
97
- <h3>Q: Is there an end to this game?</h3>
98
- <p>A: Yes, there is an end to this game. There is a final obstacle at the top of the mountain that marks the end of the game. However, reaching the end of the game is not easy, and it requires a lot of skill and luck. Also, the end of the game is not the same for everyone, as it depends on your choices and actions. There are different endings that you can get, depending on what you do at the end of the game. Some endings are more satisfying than others, some endings are more secret than others, and some endings are more meta than others. I won't spoil them for you, but I will say that they are worth seeing for yourself.</p>
99
- <h3>Q: What happens if you fall down the mountain?</h3>
100
- <p>A: If you fall down the mountain, you will lose some or all of your progress, depending on how far you fall and where you land. You will also hear Bennett Foddy's voice commenting on your fall and giving you some words of encouragement or discouragement. Sometimes, he will also play some music or sound effects to accompany your fall. The music and sound effects are usually related to the theme or the mood of your fall, and they can be either soothing or annoying. You can mute the music and sound effects if you want, but you can't mute Bennett Foddy's voice.</p>
101
- <h3>Q: How long does it take to beat this game?</h3>
102
- <p>A: The time it takes to beat this game varies from person to person, depending on their skill level, experience, strategy, luck, etc. Some people can beat this game in less than a minute, some people can beat this game in a few hours, some people can beat this game in a few days, some people can beat this game in a few weeks, some people can beat this game in a few months, some people can beat this game in a few years, and some people can never beat this game at all. The average time it takes to beat this game is around 5 hours, but that doesn't mean that you will beat this game in 5 hours. You might beat this game faster or slower than that, or you might not beat this game at all.</p>
103
- <h3>Q: Is this game based on a true story?</h3>
104
- <p>A: No, this game is not based on a true story. This game is a fictional work of art that is inspired by other works of art and culture. However, some elements of this game are based on or related to real-life facts or events. For example, the main character of this game, Diogenes, is named after a famous Greek philosopher who lived in a barrel and rejected social norms. The hammer that he uses is a reference to Thor's hammer from Norse mythology and Marvel comics. The pot that he sits in is a reference to a Chinese legend about a man who was trapped in a bronze pot by his enemies. The mountain that he climbs is a collage of various objects and scenes from different games, movies, books, songs, art, history, philosophy, and more. The narration that he hears is a mix of original and quoted texts from various sources and authors. The game itself is a homage to Sexy Hiking, a 2002 B-Game classic by Jazzuo. So while this game is not based on a true story, it is based on a lot of true stories.</p>
105
- <h3>Q: Is this game a joke or a serious game?</h3>
106
- <p>A: This game is both a joke and a serious game. It is a joke because it is full of humor and absurdity that makes fun of itself and other games. It is also a serious game because it is full of meaning and depth that explores various themes and topics related to gaming and life. This game is a paradox and a contradiction that defies easy categorization and interpretation. It is a game that makes you laugh and cry, think and feel, love and hate, succeed and fail, get over it or not.</p> 197e85843d<br />
107
- <br />
108
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Explore Hunt and Collect Dinosaurs in Dinosaur Hunter 3D.md DELETED
@@ -1,115 +0,0 @@
1
-
2
- <h1>Dinosaur Hunter 3D Game Download: A Guide for Dino Lovers</h1>
3
- <p>Do you love dinosaurs? Do you want to experience the thrill of hunting them in a realistic 3D environment? If yes, then you should try Dinosaur Hunter 3D, one of the best dinosaur hunting games available on Android and iOS devices. In this article, we will tell you everything you need to know about this amazing game, including how to download and install it, how to play it, and some tips and tricks to help you become a master dinosaur hunter.</p>
4
- <h2>dinosaur hunter 3d game download</h2><br /><p><b><b>Download</b> &#10037; <a href="https://jinyurl.com/2uNT8d">https://jinyurl.com/2uNT8d</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <p>Dinosaurs are fascinating creatures that have captivated the imagination of many people for centuries. They were the dominant animals on Earth for millions of years, until they went extinct about 65 million years ago. However, thanks to modern technology, we can now bring them back to life in the form of video games. One of these games is Dinosaur Hunter 3D, a hunting simulation game that lets you explore different environments and hunt down various types of dinosaurs.</p>
7
- <h3>What is Dinosaur Hunter 3D?</h3>
8
- <p>Dinosaur Hunter 3D is a free-to-play game developed by ZG Games. It is a realistic and immersive hunting game that features stunning graphics, realistic sounds, and smooth controls. You can choose from different modes, such as survival, campaign, or free hunt, and hunt in different environments, such as jungle, desert, or snow. You can also select from a wide range of weapons and equipment, such as rifles, shotguns, bows, grenades, night vision goggles, camouflage, and more. You can hunt different kinds of dinosaurs, such as T-Rex, Velociraptor, Triceratops, Spinosaurus, and more. You can also collect coins and trophies for your achievements and use them to upgrade your weapons and equipment.</p>
9
- <h3>Why should you play Dinosaur Hunter 3D?</h3>
10
- <p>Dinosaur Hunter 3D is a game that will appeal to anyone who loves dinosaurs, hunting, or adventure. It is a game that will challenge your skills, test your reflexes, and stimulate your senses. It is a game that will make you feel like you are in a real dinosaur world, where you have to survive and hunt these majestic beasts. It is a game that will provide you with hours of fun and entertainment. Here are some of the reasons why you should play Dinosaur Hunter 3D:</p>
11
- <ul>
12
- <li>It is free to download and play.</li>
13
- <li>It has amazing graphics and sounds that create a realistic atmosphere.</li>
14
- <li>It has different modes and environments that offer variety and replay value.</li>
15
- <li>It has a wide range of weapons and equipment that suit different preferences and styles.</li>
16
- <li>It has different types of dinosaurs that have different behaviors and characteristics.</li>
17
- <li>It has coins and trophies that reward your performance and allow you to upgrade your gear.</li>
18
- </ul>
19
- <h2>How to download and install Dinosaur Hunter 3D?</h2>
20
- <p>Dinosaur Hunter 3D is available for both Android and iOS devices. You can download and install it easily by following these simple steps:</p>
21
- <h3>For Android devices</h3>
22
- <h4>Step 1: Go to Google Play Store</h4>
23
- <p>Open the Google Play Store app on your Android device and make sure you are signed in with your Google account.</p>
24
- <h4>Step 2: Search for Dinosaur Hunter 3D</h4>
25
- <p>Type "Dinosaur Hunter 3D" in the search bar and tap on the game icon that appears in the results.</p>
26
- <p>dinosaur hunter 3d game download for pc<br />
27
- dinosaur hunter 3d game download for android<br />
28
- dinosaur hunter 3d game download apk<br />
29
- dinosaur hunter 3d game download free<br />
30
- dinosaur hunter 3d game download offline<br />
31
- dinosaur hunter 3d game download mod apk<br />
32
- dinosaur hunter 3d game download for windows 10<br />
33
- dinosaur hunter 3d game download for ios<br />
34
- dinosaur hunter 3d game download full version<br />
35
- dinosaur hunter 3d game download for laptop<br />
36
- dinosaur hunter 3d game download online<br />
37
- dinosaur hunter 3d game download latest version<br />
38
- dinosaur hunter 3d game download for mac<br />
39
- dinosaur hunter 3d game download without internet<br />
40
- dinosaur hunter 3d game download hack<br />
41
- dinosaur hunter 3d game download play store<br />
42
- dinosaur hunter 3d game download for pc windows 7<br />
43
- dinosaur hunter 3d game download unlimited money<br />
44
- dinosaur hunter 3d game download highly compressed<br />
45
- dinosaur hunter 3d game download uptodown<br />
46
- dinosaur hunter 3d game download review<br />
47
- dinosaur hunter 3d game download for pc windows 10 free<br />
48
- dinosaur hunter 3d game download size<br />
49
- dinosaur hunter 3d game download best graphics<br />
50
- dinosaur hunter 3d game download new update<br />
51
- dinosaur hunter 3d game download cheats<br />
52
- dinosaur hunter 3d game download for pc windows xp<br />
53
- dinosaur hunter 3d game download no ads<br />
54
- dinosaur hunter 3d game download rexdl<br />
55
- dinosaur hunter 3d game download tips and tricks<br />
56
- dinosaur hunter 3d game download for pc windows 8.1<br />
57
- dinosaur hunter 3d game download features<br />
58
- dinosaur hunter 3d game download system requirements<br />
59
- dinosaur hunter 3d game download gameplay<br />
60
- dinosaur hunter 3d game download trailer<br />
61
- dinosaur hunter 3d game download for pc windows vista<br />
62
- dinosaur hunter 3d game download guide<br />
63
- dinosaur hunter 3d game download how to play<br />
64
- dinosaur hunter 3d game download screenshots<br />
65
- dinosaur hunter 3d game download rating</p>
66
- <h4>Step 3: Tap on Install and wait for the download to finish</h4>
67
- <p>Tap on the green Install button and accept the permissions required by the game. Wait for the download to finish, which may take a few minutes depending on your internet speed and device storage.</p>
68
- <h4>Step 4: Open the game and enjoy hunting dinosaurs</h4>
69
- <p>Once the installation is complete, you can open the game by tapping on the Open button or by finding it in your app drawer. You can now start playing Dinosaur Hunter 3D and have fun hunting dinosaurs.</p>
70
- <h3>For iOS devices</h3>
71
- <h4>Step 1: Go to App Store</h4>
72
- <p>Open the App Store app on your iOS device and make sure you are signed in with your Apple ID.</p>
73
- <h4>Step 2: Search for Dinosaur Hunter 3D</h4>
74
- <p>Type "Dinosaur Hunter 3D" in the search bar and tap on the game icon that appears in the results.</p>
75
- <h4>Step 3: Tap on Get and wait for the download to finish</h4>
76
- <p>Tap on the blue Get button and enter your Apple ID password or use Touch ID or Face ID to confirm. Wait for the download to finish, which may take a few minutes depending on your internet speed and device storage.</p>
77
- <h4>Step 4: Open the game and enjoy hunting dinosaurs</h4>
78
- <p>Once the installation is complete, you can open the game by tapping on the Open button or by finding it in your home screen. You can now start playing Dinosaur Hunter 3D and have fun hunting dinosaurs.</p>
79
- <h2>How to play Dinosaur Hunter 3D?</h2>
80
- <p>Dinosaur Hunter 3D is a simple and intuitive game that anyone can play. Here are some of the basic steps to play the game:</p>
81
- <h3>Choose your mode and environment</h3>
82
- <p>When you launch the game, you will see three options: Survival, Campaign, and Free Hunt. You can choose any of them depending on your preference and mood. Survival mode is where you have to survive as long as possible against waves of dinosaurs. Campaign mode is where you have to complete different missions and objectives in various environments. Free Hunt mode is where you can hunt any dinosaur you want without any restrictions or goals. You can also choose from different environments, such as jungle, desert, or snow, each with its own challenges and scenery.</p>
83
- <h3>Select your weapon and equipment</h3>
84
- <p>Before you start hunting, you have to select your weapon and equipment from the inventory. You can choose from a variety of weapons, such as rifles, shotguns, bows, grenades, etc., each with its own advantages and disadvantages. You can also choose from different equipment, such as night vision goggles, camouflage, medkits, etc., each with its own uses and benefits. You can upgrade your weapons and equipment using coins that you earn from hunting dinosaurs.</p>
85
- <h3>Hunt down different types of dinosaurs</h3>
86
- <p>Once you are ready, you can start hunting dinosaurs in your chosen mode and environment. You will see a radar on the top left corner of your screen that shows you the location of nearby dinosaurs. You can also use binoculars to zoom in and spot them from a distance. You have to aim carefully and shoot them before they notice you or run away. You can also use grenades or other explosives to cause more damage or lure them into traps. You have to be careful not to get too close to them or they will attack you back. You can use medkits to heal yourself if you get injured.</p>
87
- <h3>Earn coins and trophies for your achievements</h3>
88
- <p>As you hunt dinosaurs, you will earn coins and trophies for your achievements. Coins are used to upgrade your weapons and equipment, while trophies are used to unlock new modes and environments. You can also compare your scores and achievements with other players on the leaderboard and challenge your friends to beat your records. You can also share your hunting screenshots and videos on social media and show off your skills.</p>
89
- <h2>Tips and tricks for playing Dinosaur Hunter 3D</h2>
90
- <p>Dinosaur Hunter 3D is a game that requires strategy, skill, and patience. Here are some tips and tricks that will help you improve your game and become a better dinosaur hunter:</p>
91
- <h3>Use the radar to locate your prey</h3>
92
- <p>The radar is a very useful tool that shows you the direction and distance of the nearest dinosaurs. You can use it to plan your approach and avoid wasting time and ammo. You can also use it to avoid dangerous dinosaurs that are too big or too fast for you to handle.</p>
93
- <h3>Aim for the head or the heart for a quick kill</h3>
94
- <p>The best way to kill a dinosaur is to aim for its vital organs, such as the head or the heart. This will cause more damage and make them die faster. You can also use the binoculars to zoom in and see where these organs are located on different dinosaurs. However, be careful not to miss or hit the wrong spot, as this will alert them and make them run away or attack you.</p>
95
- <h3>Avoid getting too close to the dinosaurs or they will attack you</h3>
96
- <p>Dinosaurs are not friendly creatures and they will not hesitate to attack you if they sense your presence. You have to keep a safe distance from them and use your weapons wisely. If you get too close, they will charge at you, bite you, or stomp on you, causing you to lose health or die. You can use grenades or other explosives to create some distance or distract them, but be careful not to hurt yourself in the process.</p>
97
- <h3>Upgrade your weapons and equipment to improve your performance</h3>
98
- <p>As you progress in the game, you will face more challenging dinosaurs that require more powerful weapons and equipment. You can use the coins that you earn from hunting dinosaurs to upgrade your weapons and equipment in the inventory. You can increase their damage, accuracy, range, capacity, reload speed, etc. You can also buy new weapons and equipment that suit your style and preference.</p>
99
- <h2>Conclusion</h2>
100
- <p>Dinosaur Hunter 3D is a game that will satisfy your curiosity and passion for dinosaurs. It is a game that will let you experience the thrill and excitement of hunting these ancient creatures in a realistic 3D environment. It is a game that will challenge your skills, test your reflexes, and stimulate your senses. It is a game that will provide you with hours of fun and entertainment. If you are looking for a game that combines adventure, action, and simulation, then Dinosaur Hunter 3D is the game for you. Download it now and start hunting dinosaurs!</p>
101
- <h2>FAQs</h2>
102
- <p>Here are some of the frequently asked questions about Dinosaur Hunter 3D:</p>
103
- <ul>
104
- <li><b>Q: Is Dinosaur Hunter 3D free to play?</b></li>
105
- <li>A: Yes, Dinosaur Hunter 3D is free to download and play on both Android and iOS devices. However, it contains ads and in-app purchases that can enhance your gaming experience.</li>
106
- <li><b>Q: How many dinosaurs are there in Dinosaur Hunter 3D?</b></li>
107
- <li>A: There are more than 20 different types of dinosaurs in Dinosaur Hunter 3D, each with its own appearance, behavior, and difficulty level. Some of them are T-Rex, Velociraptor, Triceratops, Spinosaurus, Brachiosaurus, etc.</li>
108
- <li><b>Q: How many modes and environments are there in Dinosaur Hunter 3D?</b></li>
109
- <li>A: There are three modes in Dinosaur Hunter 3D: Survival, Campaign, and Free Hunt. Survival mode is where you have to survive as long as possible against waves of dinosaurs. Campaign mode is where you have to complete different missions and objectives in various environments. Free Hunt mode is where you can hunt any dinosaur you want without any restrictions or goals. There are also three environments in Dinosaur Hunter 3D: Jungle, Desert, and Snow. Each environment has its own challenges and scenery.</li>
110
- <li><b>Q: How do I upgrade my weapons and equipment in Dinosaur Hunter 3D?</b></li>
111
- <li>A: You can upgrade your weapons and equipment in the inventory using coins that you earn from hunting dinosaurs. You can increase their damage, accuracy, range, capacity, reload speed, etc. You can also buy new weapons and equipment that suit your style and preference.</li>
112
- <li><b>Q: How do I share my hunting screenshots and videos on social media?</b></li>
113
- <li>A: You can share your hunting screenshots and videos on social media by tapping on the Share button on the top right corner of your screen. You can choose from different platforms, such as Facebook, Twitter, Instagram, etc. You can also add captions and hashtags to your posts and tag your friends. You can also invite your friends to play Dinosaur Hunter 3D and challenge them to beat your scores and achievements.</p> 401be4b1e0<br />
114
- <br />
115
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2ndelement/voicevox/test/test_word_types.py DELETED
@@ -1,9 +0,0 @@
1
- from unittest import TestCase
2
-
3
- from voicevox_engine.model import WordTypes
4
- from voicevox_engine.part_of_speech_data import part_of_speech_data
5
-
6
-
7
- class TestWordTypes(TestCase):
8
- def test_word_types(self):
9
- self.assertCountEqual(list(WordTypes), list(part_of_speech_data.keys()))
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/StyleGANEX/models/mtcnn/mtcnn_pytorch/src/__init__.py DELETED
@@ -1,2 +0,0 @@
1
- from .visualization_utils import show_bboxes
2
- from .detector import detect_faces
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb8-150e_deepfashion2_short_sleeved_outwear_256x192/__init__.py DELETED
File without changes
spaces/AUBADA-ALARABI/poetry20233/app.py DELETED
@@ -1,53 +0,0 @@
1
- import gc
2
- import gradio as gr
3
- from transformers import pipeline, set_seed
4
-
5
- pipe = pipeline('text-generation', framework='pt', model='akhooli/ap2023', tokenizer='akhooli/ap2023')
6
- #gc.collect()
7
- samples = [['أنت'
8
- ,1.0, 50, 1.0, 1.0, 114],['هل غادر'
9
- ,1.0, 50, 1.0, 1.0, 114 ],['ألا ليت'
10
- ,1.0, 50, 1.0, 1.0, 114 ],['يا قدس'
11
- ,1.0, 50, 1.0, 1.0, 114],['عيد بأية حال'
12
- ,1.0, 50, 1.0, 1.0, 114],['لكل شيء إذا ما'
13
- ,1.0, 50, 1.0, 1.0, 114 ],['.'
14
- ,1.0, 50, 1.0, 1.0, 114]]
15
-
16
- notes = """
17
- - Enter a short prompt or select (click) one of the examples and click SEND
18
- - Adjust parameters (temperture, top k, top p and penalty) through the slider (keep close to default values).
19
- - For the same seed (randomness), the same output is regenerated if other parameters are fixed
20
- - Clear and enter new prompt or select another example and SEND to regenerate
21
- - The '.' means start a new line from no prompt (your prompt need not be long)
22
- - Be patient: this runs on CPU (free tier)
23
- - Feedback (Twitter): @akhooli (https://twitter.com/akhooli/status/1611025232201977859)
24
- - Note/Disclaimer: may generate unaccepted or inappropriate content. Use at your own risk.
25
- """
26
- def sayPoetry(prompt, temp=1.0, topk = 50, topp = 1.0, penalty=1.0, seed=114):
27
- if not int(seed) >= 0: seed=114
28
- set_seed(seed)
29
- gen = pipe(prompt, max_length=96, do_sample=True, temperature=temp, top_k=topk, top_p=topp, repetition_penalty=penalty,
30
- min_length = 64, no_repeat_ngram_size = 3, return_full_text=True,
31
- num_beams=5, num_return_sequences=1)[0]["generated_text"]
32
- poetry =""
33
- for line in gen.split('.')[:-1]:
34
- poetry += line #+ "\n"
35
- return poetry
36
- poetry = gr.Interface(fn=sayPoetry,
37
- inputs=[
38
- gr.Textbox(label="Enter short prompt or select from examples:"),
39
- gr.Slider(0.70, 1.2, step=0.01,value=1.0, label='control temperature'),
40
- gr.Slider(25, 100, step=1,value=50, label='control top k'),
41
- gr.Slider(0.80, 1.0, step=0.01,value=1.0, label='control top p'),
42
- gr.Slider(0.90, 1.50, step=0.01,value=1.0, label='control penalty'),
43
- gr.Number(value=139750, precision=0, label='Seed'),
44
- ],
45
- outputs=[gr.Textbox(label="Generated Poetry:")],
46
-
47
- allow_flagging='never',
48
- title='Arabic Poetry Generation Demo (updated Jan. 2023)',
49
- description = "A simple demo of AI generated poetry based on 1M poems fine-tuned using AraGPT2 (be patient, runs on cpu)",
50
- examples=samples,
51
- cache_examples=False,
52
- article = notes)
53
- poetry.launch() # show_error = True, debug=True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Ails.py DELETED
@@ -1,106 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import hashlib
4
- import time
5
- import uuid
6
- import json
7
- from datetime import datetime
8
- from aiohttp import ClientSession
9
-
10
- from ..typing import SHA256, AsyncGenerator
11
- from .base_provider import AsyncGeneratorProvider
12
-
13
-
14
- class Ails(AsyncGeneratorProvider):
15
- url: str = "https://ai.ls"
16
- working = True
17
- supports_gpt_35_turbo = True
18
-
19
- @staticmethod
20
- async def create_async_generator(
21
- model: str,
22
- messages: list[dict[str, str]],
23
- stream: bool,
24
- proxy: str = None,
25
- **kwargs
26
- ) -> AsyncGenerator:
27
- headers = {
28
- "authority": "api.caipacity.com",
29
- "accept": "*/*",
30
- "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
31
- "authorization": "Bearer free",
32
- "client-id": str(uuid.uuid4()),
33
- "client-v": "0.1.278",
34
- "content-type": "application/json",
35
- "origin": "https://ai.ls",
36
- "referer": "https://ai.ls/",
37
- "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
38
- "sec-ch-ua-mobile": "?0",
39
- "sec-ch-ua-platform": '"Windows"',
40
- "sec-fetch-dest": "empty",
41
- "sec-fetch-mode": "cors",
42
- "sec-fetch-site": "cross-site",
43
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
44
- "from-url": "https://ai.ls/?chat=1"
45
- }
46
- async with ClientSession(
47
- headers=headers
48
- ) as session:
49
- timestamp = _format_timestamp(int(time.time() * 1000))
50
- json_data = {
51
- "model": "gpt-3.5-turbo",
52
- "temperature": kwargs.get("temperature", 0.6),
53
- "stream": True,
54
- "messages": messages,
55
- "d": datetime.now().strftime("%Y-%m-%d"),
56
- "t": timestamp,
57
- "s": _hash({"t": timestamp, "m": messages[-1]["content"]}),
58
- }
59
- async with session.post(
60
- "https://api.caipacity.com/v1/chat/completions",
61
- proxy=proxy,
62
- json=json_data
63
- ) as response:
64
- response.raise_for_status()
65
- start = "data: "
66
- async for line in response.content:
67
- line = line.decode('utf-8')
68
- if line.startswith(start) and line != "data: [DONE]":
69
- line = line[len(start):-1]
70
- line = json.loads(line)
71
- token = line["choices"][0]["delta"].get("content")
72
- if token:
73
- if "ai.ls" in token or "ai.ci" in token:
74
- raise Exception("Response Error: " + token)
75
- yield token
76
-
77
-
78
- @classmethod
79
- @property
80
- def params(cls):
81
- params = [
82
- ("model", "str"),
83
- ("messages", "list[dict[str, str]]"),
84
- ("stream", "bool"),
85
- ("temperature", "float"),
86
- ]
87
- param = ", ".join([": ".join(p) for p in params])
88
- return f"g4f.provider.{cls.__name__} supports: ({param})"
89
-
90
-
91
- def _hash(json_data: dict[str, str]) -> SHA256:
92
- base_string: str = "%s:%s:%s:%s" % (
93
- json_data["t"],
94
- json_data["m"],
95
- "WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf",
96
- len(json_data["m"]),
97
- )
98
-
99
- return SHA256(hashlib.sha256(base_string.encode()).hexdigest())
100
-
101
-
102
- def _format_timestamp(timestamp: int) -> str:
103
- e = timestamp
104
- n = e % 10
105
- r = n + 1 if n % 2 == 0 else n
106
- return str(e - n + r)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/press/Factory.d.ts DELETED
@@ -1,7 +0,0 @@
1
- // import * as Phaser from 'phaser';
2
- import Press from "./Press";
3
-
4
- export default function (
5
- gameObject: Phaser.GameObjects.GameObject | Phaser.Scene,
6
- config?: Press.IConfig
7
- ): Press;
 
 
 
 
 
 
 
 
spaces/AlStable/AlPrompt/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Al prompt
3
- emoji: 🤗
4
- colorFrom: indigo
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.15.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/util/load_mats.py DELETED
@@ -1,120 +0,0 @@
1
- """This script is to load 3D face model for Deep3DFaceRecon_pytorch
2
- """
3
-
4
- import numpy as np
5
- from PIL import Image
6
- from scipy.io import loadmat, savemat
7
- from array import array
8
- import os.path as osp
9
-
10
- # load expression basis
11
- def LoadExpBasis(bfm_folder='BFM'):
12
- n_vertex = 53215
13
- Expbin = open(osp.join(bfm_folder, 'Exp_Pca.bin'), 'rb')
14
- exp_dim = array('i')
15
- exp_dim.fromfile(Expbin, 1)
16
- expMU = array('f')
17
- expPC = array('f')
18
- expMU.fromfile(Expbin, 3*n_vertex)
19
- expPC.fromfile(Expbin, 3*exp_dim[0]*n_vertex)
20
- Expbin.close()
21
-
22
- expPC = np.array(expPC)
23
- expPC = np.reshape(expPC, [exp_dim[0], -1])
24
- expPC = np.transpose(expPC)
25
-
26
- expEV = np.loadtxt(osp.join(bfm_folder, 'std_exp.txt'))
27
-
28
- return expPC, expEV
29
-
30
-
31
- # transfer original BFM09 to our face model
32
- def transferBFM09(bfm_folder='BFM'):
33
- print('Transfer BFM09 to BFM_model_front......')
34
- original_BFM = loadmat(osp.join(bfm_folder, '01_MorphableModel.mat'))
35
- shapePC = original_BFM['shapePC'] # shape basis
36
- shapeEV = original_BFM['shapeEV'] # corresponding eigen value
37
- shapeMU = original_BFM['shapeMU'] # mean face
38
- texPC = original_BFM['texPC'] # texture basis
39
- texEV = original_BFM['texEV'] # eigen value
40
- texMU = original_BFM['texMU'] # mean texture
41
-
42
- expPC, expEV = LoadExpBasis(bfm_folder)
43
-
44
- # transfer BFM09 to our face model
45
-
46
- idBase = shapePC*np.reshape(shapeEV, [-1, 199])
47
- idBase = idBase/1e5 # unify the scale to decimeter
48
- idBase = idBase[:, :80] # use only first 80 basis
49
-
50
- exBase = expPC*np.reshape(expEV, [-1, 79])
51
- exBase = exBase/1e5 # unify the scale to decimeter
52
- exBase = exBase[:, :64] # use only first 64 basis
53
-
54
- texBase = texPC*np.reshape(texEV, [-1, 199])
55
- texBase = texBase[:, :80] # use only first 80 basis
56
-
57
- # our face model is cropped along face landmarks and contains only 35709 vertex.
58
- # original BFM09 contains 53490 vertex, and expression basis provided by Guo et al. contains 53215 vertex.
59
- # thus we select corresponding vertex to get our face model.
60
-
61
- index_exp = loadmat(osp.join(bfm_folder, 'BFM_front_idx.mat'))
62
- index_exp = index_exp['idx'].astype(np.int32) - 1 # starts from 0 (to 53215)
63
-
64
- index_shape = loadmat(osp.join(bfm_folder, 'BFM_exp_idx.mat'))
65
- index_shape = index_shape['trimIndex'].astype(
66
- np.int32) - 1 # starts from 0 (to 53490)
67
- index_shape = index_shape[index_exp]
68
-
69
- idBase = np.reshape(idBase, [-1, 3, 80])
70
- idBase = idBase[index_shape, :, :]
71
- idBase = np.reshape(idBase, [-1, 80])
72
-
73
- texBase = np.reshape(texBase, [-1, 3, 80])
74
- texBase = texBase[index_shape, :, :]
75
- texBase = np.reshape(texBase, [-1, 80])
76
-
77
- exBase = np.reshape(exBase, [-1, 3, 64])
78
- exBase = exBase[index_exp, :, :]
79
- exBase = np.reshape(exBase, [-1, 64])
80
-
81
- meanshape = np.reshape(shapeMU, [-1, 3])/1e5
82
- meanshape = meanshape[index_shape, :]
83
- meanshape = np.reshape(meanshape, [1, -1])
84
-
85
- meantex = np.reshape(texMU, [-1, 3])
86
- meantex = meantex[index_shape, :]
87
- meantex = np.reshape(meantex, [1, -1])
88
-
89
- # other info contains triangles, region used for computing photometric loss,
90
- # region used for skin texture regularization, and 68 landmarks index etc.
91
- other_info = loadmat(osp.join(bfm_folder, 'facemodel_info.mat'))
92
- frontmask2_idx = other_info['frontmask2_idx']
93
- skinmask = other_info['skinmask']
94
- keypoints = other_info['keypoints']
95
- point_buf = other_info['point_buf']
96
- tri = other_info['tri']
97
- tri_mask2 = other_info['tri_mask2']
98
-
99
- # save our face model
100
- savemat(osp.join(bfm_folder, 'BFM_model_front.mat'), {'meanshape': meanshape, 'meantex': meantex, 'idBase': idBase, 'exBase': exBase, 'texBase': texBase,
101
- 'tri': tri, 'point_buf': point_buf, 'tri_mask2': tri_mask2, 'keypoints': keypoints, 'frontmask2_idx': frontmask2_idx, 'skinmask': skinmask})
102
-
103
-
104
- # load landmarks for standard face, which is used for image preprocessing
105
- def load_lm3d(bfm_folder):
106
-
107
- Lm3D = loadmat(osp.join(bfm_folder, 'similarity_Lm3D_all.mat'))
108
- Lm3D = Lm3D['lm']
109
-
110
- # calculate 5 facial landmarks using 68 landmarks
111
- lm_idx = np.array([31, 37, 40, 43, 46, 49, 55]) - 1
112
- Lm3D = np.stack([Lm3D[lm_idx[0], :], np.mean(Lm3D[lm_idx[[1, 2]], :], 0), np.mean(
113
- Lm3D[lm_idx[[3, 4]], :], 0), Lm3D[lm_idx[5], :], Lm3D[lm_idx[6], :]], axis=0)
114
- Lm3D = Lm3D[[1, 2, 0, 3, 4], :]
115
-
116
- return Lm3D
117
-
118
-
119
- if __name__ == '__main__':
120
- transferBFM09()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/test_examples.py DELETED
@@ -1,1422 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc..
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
-
17
- import logging
18
- import os
19
- import shutil
20
- import subprocess
21
- import sys
22
- import tempfile
23
- import unittest
24
- from typing import List
25
-
26
- import torch
27
- from accelerate.utils import write_basic_config
28
-
29
- from diffusers import DiffusionPipeline, UNet2DConditionModel
30
-
31
-
32
- logging.basicConfig(level=logging.DEBUG)
33
-
34
- logger = logging.getLogger()
35
-
36
-
37
- # These utils relate to ensuring the right error message is received when running scripts
38
- class SubprocessCallException(Exception):
39
- pass
40
-
41
-
42
- def run_command(command: List[str], return_stdout=False):
43
- """
44
- Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
45
- if an error occurred while running `command`
46
- """
47
- try:
48
- output = subprocess.check_output(command, stderr=subprocess.STDOUT)
49
- if return_stdout:
50
- if hasattr(output, "decode"):
51
- output = output.decode("utf-8")
52
- return output
53
- except subprocess.CalledProcessError as e:
54
- raise SubprocessCallException(
55
- f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
56
- ) from e
57
-
58
-
59
- stream_handler = logging.StreamHandler(sys.stdout)
60
- logger.addHandler(stream_handler)
61
-
62
-
63
- class ExamplesTestsAccelerate(unittest.TestCase):
64
- @classmethod
65
- def setUpClass(cls):
66
- super().setUpClass()
67
- cls._tmpdir = tempfile.mkdtemp()
68
- cls.configPath = os.path.join(cls._tmpdir, "default_config.yml")
69
-
70
- write_basic_config(save_location=cls.configPath)
71
- cls._launch_args = ["accelerate", "launch", "--config_file", cls.configPath]
72
-
73
- @classmethod
74
- def tearDownClass(cls):
75
- super().tearDownClass()
76
- shutil.rmtree(cls._tmpdir)
77
-
78
- def test_train_unconditional(self):
79
- with tempfile.TemporaryDirectory() as tmpdir:
80
- test_args = f"""
81
- examples/unconditional_image_generation/train_unconditional.py
82
- --dataset_name hf-internal-testing/dummy_image_class_data
83
- --model_config_name_or_path diffusers/ddpm_dummy
84
- --resolution 64
85
- --output_dir {tmpdir}
86
- --train_batch_size 2
87
- --num_epochs 1
88
- --gradient_accumulation_steps 1
89
- --ddpm_num_inference_steps 2
90
- --learning_rate 1e-3
91
- --lr_warmup_steps 5
92
- """.split()
93
-
94
- run_command(self._launch_args + test_args, return_stdout=True)
95
- # save_pretrained smoke test
96
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.bin")))
97
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json")))
98
-
99
- def test_textual_inversion(self):
100
- with tempfile.TemporaryDirectory() as tmpdir:
101
- test_args = f"""
102
- examples/textual_inversion/textual_inversion.py
103
- --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe
104
- --train_data_dir docs/source/en/imgs
105
- --learnable_property object
106
- --placeholder_token <cat-toy>
107
- --initializer_token a
108
- --validation_prompt <cat-toy>
109
- --validation_steps 1
110
- --save_steps 1
111
- --num_vectors 2
112
- --resolution 64
113
- --train_batch_size 1
114
- --gradient_accumulation_steps 1
115
- --max_train_steps 2
116
- --learning_rate 5.0e-04
117
- --scale_lr
118
- --lr_scheduler constant
119
- --lr_warmup_steps 0
120
- --output_dir {tmpdir}
121
- """.split()
122
-
123
- run_command(self._launch_args + test_args)
124
- # save_pretrained smoke test
125
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "learned_embeds.bin")))
126
-
127
- def test_dreambooth(self):
128
- with tempfile.TemporaryDirectory() as tmpdir:
129
- test_args = f"""
130
- examples/dreambooth/train_dreambooth.py
131
- --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe
132
- --instance_data_dir docs/source/en/imgs
133
- --instance_prompt photo
134
- --resolution 64
135
- --train_batch_size 1
136
- --gradient_accumulation_steps 1
137
- --max_train_steps 2
138
- --learning_rate 5.0e-04
139
- --scale_lr
140
- --lr_scheduler constant
141
- --lr_warmup_steps 0
142
- --output_dir {tmpdir}
143
- """.split()
144
-
145
- run_command(self._launch_args + test_args)
146
- # save_pretrained smoke test
147
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.bin")))
148
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json")))
149
-
150
- def test_dreambooth_if(self):
151
- with tempfile.TemporaryDirectory() as tmpdir:
152
- test_args = f"""
153
- examples/dreambooth/train_dreambooth.py
154
- --pretrained_model_name_or_path hf-internal-testing/tiny-if-pipe
155
- --instance_data_dir docs/source/en/imgs
156
- --instance_prompt photo
157
- --resolution 64
158
- --train_batch_size 1
159
- --gradient_accumulation_steps 1
160
- --max_train_steps 2
161
- --learning_rate 5.0e-04
162
- --scale_lr
163
- --lr_scheduler constant
164
- --lr_warmup_steps 0
165
- --output_dir {tmpdir}
166
- --pre_compute_text_embeddings
167
- --tokenizer_max_length=77
168
- --text_encoder_use_attention_mask
169
- """.split()
170
-
171
- run_command(self._launch_args + test_args)
172
- # save_pretrained smoke test
173
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.bin")))
174
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json")))
175
-
176
- def test_dreambooth_checkpointing(self):
177
- instance_prompt = "photo"
178
- pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe"
179
-
180
- with tempfile.TemporaryDirectory() as tmpdir:
181
- # Run training script with checkpointing
182
- # max_train_steps == 5, checkpointing_steps == 2
183
- # Should create checkpoints at steps 2, 4
184
-
185
- initial_run_args = f"""
186
- examples/dreambooth/train_dreambooth.py
187
- --pretrained_model_name_or_path {pretrained_model_name_or_path}
188
- --instance_data_dir docs/source/en/imgs
189
- --instance_prompt {instance_prompt}
190
- --resolution 64
191
- --train_batch_size 1
192
- --gradient_accumulation_steps 1
193
- --max_train_steps 5
194
- --learning_rate 5.0e-04
195
- --scale_lr
196
- --lr_scheduler constant
197
- --lr_warmup_steps 0
198
- --output_dir {tmpdir}
199
- --checkpointing_steps=2
200
- --seed=0
201
- """.split()
202
-
203
- run_command(self._launch_args + initial_run_args)
204
-
205
- # check can run the original fully trained output pipeline
206
- pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None)
207
- pipe(instance_prompt, num_inference_steps=2)
208
-
209
- # check checkpoint directories exist
210
- self.assertTrue(os.path.isdir(os.path.join(tmpdir, "checkpoint-2")))
211
- self.assertTrue(os.path.isdir(os.path.join(tmpdir, "checkpoint-4")))
212
-
213
- # check can run an intermediate checkpoint
214
- unet = UNet2DConditionModel.from_pretrained(tmpdir, subfolder="checkpoint-2/unet")
215
- pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, unet=unet, safety_checker=None)
216
- pipe(instance_prompt, num_inference_steps=2)
217
-
218
- # Remove checkpoint 2 so that we can check only later checkpoints exist after resuming
219
- shutil.rmtree(os.path.join(tmpdir, "checkpoint-2"))
220
-
221
- # Run training script for 7 total steps resuming from checkpoint 4
222
-
223
- resume_run_args = f"""
224
- examples/dreambooth/train_dreambooth.py
225
- --pretrained_model_name_or_path {pretrained_model_name_or_path}
226
- --instance_data_dir docs/source/en/imgs
227
- --instance_prompt {instance_prompt}
228
- --resolution 64
229
- --train_batch_size 1
230
- --gradient_accumulation_steps 1
231
- --max_train_steps 7
232
- --learning_rate 5.0e-04
233
- --scale_lr
234
- --lr_scheduler constant
235
- --lr_warmup_steps 0
236
- --output_dir {tmpdir}
237
- --checkpointing_steps=2
238
- --resume_from_checkpoint=checkpoint-4
239
- --seed=0
240
- """.split()
241
-
242
- run_command(self._launch_args + resume_run_args)
243
-
244
- # check can run new fully trained pipeline
245
- pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None)
246
- pipe(instance_prompt, num_inference_steps=2)
247
-
248
- # check old checkpoints do not exist
249
- self.assertFalse(os.path.isdir(os.path.join(tmpdir, "checkpoint-2")))
250
-
251
- # check new checkpoints exist
252
- self.assertTrue(os.path.isdir(os.path.join(tmpdir, "checkpoint-4")))
253
- self.assertTrue(os.path.isdir(os.path.join(tmpdir, "checkpoint-6")))
254
-
255
- def test_dreambooth_lora(self):
256
- with tempfile.TemporaryDirectory() as tmpdir:
257
- test_args = f"""
258
- examples/dreambooth/train_dreambooth_lora.py
259
- --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe
260
- --instance_data_dir docs/source/en/imgs
261
- --instance_prompt photo
262
- --resolution 64
263
- --train_batch_size 1
264
- --gradient_accumulation_steps 1
265
- --max_train_steps 2
266
- --learning_rate 5.0e-04
267
- --scale_lr
268
- --lr_scheduler constant
269
- --lr_warmup_steps 0
270
- --output_dir {tmpdir}
271
- """.split()
272
-
273
- run_command(self._launch_args + test_args)
274
- # save_pretrained smoke test
275
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.bin")))
276
-
277
- # make sure the state_dict has the correct naming in the parameters.
278
- lora_state_dict = torch.load(os.path.join(tmpdir, "pytorch_lora_weights.bin"))
279
- is_lora = all("lora" in k for k in lora_state_dict.keys())
280
- self.assertTrue(is_lora)
281
-
282
- # when not training the text encoder, all the parameters in the state dict should start
283
- # with `"unet"` in their names.
284
- starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys())
285
- self.assertTrue(starts_with_unet)
286
-
287
- def test_dreambooth_lora_with_text_encoder(self):
288
- with tempfile.TemporaryDirectory() as tmpdir:
289
- test_args = f"""
290
- examples/dreambooth/train_dreambooth_lora.py
291
- --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe
292
- --instance_data_dir docs/source/en/imgs
293
- --instance_prompt photo
294
- --resolution 64
295
- --train_batch_size 1
296
- --gradient_accumulation_steps 1
297
- --max_train_steps 2
298
- --learning_rate 5.0e-04
299
- --scale_lr
300
- --lr_scheduler constant
301
- --lr_warmup_steps 0
302
- --train_text_encoder
303
- --output_dir {tmpdir}
304
- """.split()
305
-
306
- run_command(self._launch_args + test_args)
307
- # save_pretrained smoke test
308
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.bin")))
309
-
310
- # check `text_encoder` is present at all.
311
- lora_state_dict = torch.load(os.path.join(tmpdir, "pytorch_lora_weights.bin"))
312
- keys = lora_state_dict.keys()
313
- is_text_encoder_present = any(k.startswith("text_encoder") for k in keys)
314
- self.assertTrue(is_text_encoder_present)
315
-
316
- # the names of the keys of the state dict should either start with `unet`
317
- # or `text_encoder`.
318
- is_correct_naming = all(k.startswith("unet") or k.startswith("text_encoder") for k in keys)
319
- self.assertTrue(is_correct_naming)
320
-
321
- def test_dreambooth_lora_if_model(self):
322
- with tempfile.TemporaryDirectory() as tmpdir:
323
- test_args = f"""
324
- examples/dreambooth/train_dreambooth_lora.py
325
- --pretrained_model_name_or_path hf-internal-testing/tiny-if-pipe
326
- --instance_data_dir docs/source/en/imgs
327
- --instance_prompt photo
328
- --resolution 64
329
- --train_batch_size 1
330
- --gradient_accumulation_steps 1
331
- --max_train_steps 2
332
- --learning_rate 5.0e-04
333
- --scale_lr
334
- --lr_scheduler constant
335
- --lr_warmup_steps 0
336
- --output_dir {tmpdir}
337
- --pre_compute_text_embeddings
338
- --tokenizer_max_length=77
339
- --text_encoder_use_attention_mask
340
- """.split()
341
-
342
- run_command(self._launch_args + test_args)
343
- # save_pretrained smoke test
344
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.bin")))
345
-
346
- # make sure the state_dict has the correct naming in the parameters.
347
- lora_state_dict = torch.load(os.path.join(tmpdir, "pytorch_lora_weights.bin"))
348
- is_lora = all("lora" in k for k in lora_state_dict.keys())
349
- self.assertTrue(is_lora)
350
-
351
- # when not training the text encoder, all the parameters in the state dict should start
352
- # with `"unet"` in their names.
353
- starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys())
354
- self.assertTrue(starts_with_unet)
355
-
356
- def test_dreambooth_lora_sdxl(self):
357
- with tempfile.TemporaryDirectory() as tmpdir:
358
- test_args = f"""
359
- examples/dreambooth/train_dreambooth_lora_sdxl.py
360
- --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe
361
- --instance_data_dir docs/source/en/imgs
362
- --instance_prompt photo
363
- --resolution 64
364
- --train_batch_size 1
365
- --gradient_accumulation_steps 1
366
- --max_train_steps 2
367
- --learning_rate 5.0e-04
368
- --scale_lr
369
- --lr_scheduler constant
370
- --lr_warmup_steps 0
371
- --output_dir {tmpdir}
372
- """.split()
373
-
374
- run_command(self._launch_args + test_args)
375
- # save_pretrained smoke test
376
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.bin")))
377
-
378
- # make sure the state_dict has the correct naming in the parameters.
379
- lora_state_dict = torch.load(os.path.join(tmpdir, "pytorch_lora_weights.bin"))
380
- is_lora = all("lora" in k for k in lora_state_dict.keys())
381
- self.assertTrue(is_lora)
382
-
383
- # when not training the text encoder, all the parameters in the state dict should start
384
- # with `"unet"` in their names.
385
- starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys())
386
- self.assertTrue(starts_with_unet)
387
-
388
- def test_dreambooth_lora_sdxl_with_text_encoder(self):
389
- with tempfile.TemporaryDirectory() as tmpdir:
390
- test_args = f"""
391
- examples/dreambooth/train_dreambooth_lora_sdxl.py
392
- --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe
393
- --instance_data_dir docs/source/en/imgs
394
- --instance_prompt photo
395
- --resolution 64
396
- --train_batch_size 1
397
- --gradient_accumulation_steps 1
398
- --max_train_steps 2
399
- --learning_rate 5.0e-04
400
- --scale_lr
401
- --lr_scheduler constant
402
- --lr_warmup_steps 0
403
- --output_dir {tmpdir}
404
- --train_text_encoder
405
- """.split()
406
-
407
- run_command(self._launch_args + test_args)
408
- # save_pretrained smoke test
409
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.bin")))
410
-
411
- # make sure the state_dict has the correct naming in the parameters.
412
- lora_state_dict = torch.load(os.path.join(tmpdir, "pytorch_lora_weights.bin"))
413
- is_lora = all("lora" in k for k in lora_state_dict.keys())
414
- self.assertTrue(is_lora)
415
-
416
- # when not training the text encoder, all the parameters in the state dict should start
417
- # with `"unet"` or `"text_encoder"` or `"text_encoder_2"` in their names.
418
- keys = lora_state_dict.keys()
419
- starts_with_unet = all(
420
- k.startswith("unet") or k.startswith("text_encoder") or k.startswith("text_encoder_2") for k in keys
421
- )
422
- self.assertTrue(starts_with_unet)
423
-
424
- def test_custom_diffusion(self):
425
- with tempfile.TemporaryDirectory() as tmpdir:
426
- test_args = f"""
427
- examples/custom_diffusion/train_custom_diffusion.py
428
- --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe
429
- --instance_data_dir docs/source/en/imgs
430
- --instance_prompt <new1>
431
- --resolution 64
432
- --train_batch_size 1
433
- --gradient_accumulation_steps 1
434
- --max_train_steps 2
435
- --learning_rate 1.0e-05
436
- --scale_lr
437
- --lr_scheduler constant
438
- --lr_warmup_steps 0
439
- --modifier_token <new1>
440
- --output_dir {tmpdir}
441
- """.split()
442
-
443
- run_command(self._launch_args + test_args)
444
- # save_pretrained smoke test
445
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_custom_diffusion_weights.bin")))
446
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "<new1>.bin")))
447
-
448
- def test_text_to_image(self):
449
- with tempfile.TemporaryDirectory() as tmpdir:
450
- test_args = f"""
451
- examples/text_to_image/train_text_to_image.py
452
- --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe
453
- --dataset_name hf-internal-testing/dummy_image_text_data
454
- --resolution 64
455
- --center_crop
456
- --random_flip
457
- --train_batch_size 1
458
- --gradient_accumulation_steps 1
459
- --max_train_steps 2
460
- --learning_rate 5.0e-04
461
- --scale_lr
462
- --lr_scheduler constant
463
- --lr_warmup_steps 0
464
- --output_dir {tmpdir}
465
- """.split()
466
-
467
- run_command(self._launch_args + test_args)
468
- # save_pretrained smoke test
469
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.bin")))
470
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json")))
471
-
472
- def test_text_to_image_checkpointing(self):
473
- pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe"
474
- prompt = "a prompt"
475
-
476
- with tempfile.TemporaryDirectory() as tmpdir:
477
- # Run training script with checkpointing
478
- # max_train_steps == 5, checkpointing_steps == 2
479
- # Should create checkpoints at steps 2, 4
480
-
481
- initial_run_args = f"""
482
- examples/text_to_image/train_text_to_image.py
483
- --pretrained_model_name_or_path {pretrained_model_name_or_path}
484
- --dataset_name hf-internal-testing/dummy_image_text_data
485
- --resolution 64
486
- --center_crop
487
- --random_flip
488
- --train_batch_size 1
489
- --gradient_accumulation_steps 1
490
- --max_train_steps 5
491
- --learning_rate 5.0e-04
492
- --scale_lr
493
- --lr_scheduler constant
494
- --lr_warmup_steps 0
495
- --output_dir {tmpdir}
496
- --checkpointing_steps=2
497
- --seed=0
498
- """.split()
499
-
500
- run_command(self._launch_args + initial_run_args)
501
-
502
- pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None)
503
- pipe(prompt, num_inference_steps=2)
504
-
505
- # check checkpoint directories exist
506
- self.assertEqual(
507
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
508
- {"checkpoint-2", "checkpoint-4"},
509
- )
510
-
511
- # check can run an intermediate checkpoint
512
- unet = UNet2DConditionModel.from_pretrained(tmpdir, subfolder="checkpoint-2/unet")
513
- pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, unet=unet, safety_checker=None)
514
- pipe(prompt, num_inference_steps=2)
515
-
516
- # Remove checkpoint 2 so that we can check only later checkpoints exist after resuming
517
- shutil.rmtree(os.path.join(tmpdir, "checkpoint-2"))
518
-
519
- # Run training script for 7 total steps resuming from checkpoint 4
520
-
521
- resume_run_args = f"""
522
- examples/text_to_image/train_text_to_image.py
523
- --pretrained_model_name_or_path {pretrained_model_name_or_path}
524
- --dataset_name hf-internal-testing/dummy_image_text_data
525
- --resolution 64
526
- --center_crop
527
- --random_flip
528
- --train_batch_size 1
529
- --gradient_accumulation_steps 1
530
- --max_train_steps 7
531
- --learning_rate 5.0e-04
532
- --scale_lr
533
- --lr_scheduler constant
534
- --lr_warmup_steps 0
535
- --output_dir {tmpdir}
536
- --checkpointing_steps=2
537
- --resume_from_checkpoint=checkpoint-4
538
- --seed=0
539
- """.split()
540
-
541
- run_command(self._launch_args + resume_run_args)
542
-
543
- # check can run new fully trained pipeline
544
- pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None)
545
- pipe(prompt, num_inference_steps=2)
546
-
547
- self.assertEqual(
548
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
549
- {
550
- # no checkpoint-2 -> check old checkpoints do not exist
551
- # check new checkpoints exist
552
- "checkpoint-4",
553
- "checkpoint-6",
554
- },
555
- )
556
-
557
- def test_text_to_image_checkpointing_use_ema(self):
558
- pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe"
559
- prompt = "a prompt"
560
-
561
- with tempfile.TemporaryDirectory() as tmpdir:
562
- # Run training script with checkpointing
563
- # max_train_steps == 5, checkpointing_steps == 2
564
- # Should create checkpoints at steps 2, 4
565
-
566
- initial_run_args = f"""
567
- examples/text_to_image/train_text_to_image.py
568
- --pretrained_model_name_or_path {pretrained_model_name_or_path}
569
- --dataset_name hf-internal-testing/dummy_image_text_data
570
- --resolution 64
571
- --center_crop
572
- --random_flip
573
- --train_batch_size 1
574
- --gradient_accumulation_steps 1
575
- --max_train_steps 5
576
- --learning_rate 5.0e-04
577
- --scale_lr
578
- --lr_scheduler constant
579
- --lr_warmup_steps 0
580
- --output_dir {tmpdir}
581
- --checkpointing_steps=2
582
- --use_ema
583
- --seed=0
584
- """.split()
585
-
586
- run_command(self._launch_args + initial_run_args)
587
-
588
- pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None)
589
- pipe(prompt, num_inference_steps=2)
590
-
591
- # check checkpoint directories exist
592
- self.assertEqual(
593
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
594
- {"checkpoint-2", "checkpoint-4"},
595
- )
596
-
597
- # check can run an intermediate checkpoint
598
- unet = UNet2DConditionModel.from_pretrained(tmpdir, subfolder="checkpoint-2/unet")
599
- pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, unet=unet, safety_checker=None)
600
- pipe(prompt, num_inference_steps=2)
601
-
602
- # Remove checkpoint 2 so that we can check only later checkpoints exist after resuming
603
- shutil.rmtree(os.path.join(tmpdir, "checkpoint-2"))
604
-
605
- # Run training script for 7 total steps resuming from checkpoint 4
606
-
607
- resume_run_args = f"""
608
- examples/text_to_image/train_text_to_image.py
609
- --pretrained_model_name_or_path {pretrained_model_name_or_path}
610
- --dataset_name hf-internal-testing/dummy_image_text_data
611
- --resolution 64
612
- --center_crop
613
- --random_flip
614
- --train_batch_size 1
615
- --gradient_accumulation_steps 1
616
- --max_train_steps 7
617
- --learning_rate 5.0e-04
618
- --scale_lr
619
- --lr_scheduler constant
620
- --lr_warmup_steps 0
621
- --output_dir {tmpdir}
622
- --checkpointing_steps=2
623
- --resume_from_checkpoint=checkpoint-4
624
- --use_ema
625
- --seed=0
626
- """.split()
627
-
628
- run_command(self._launch_args + resume_run_args)
629
-
630
- # check can run new fully trained pipeline
631
- pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None)
632
- pipe(prompt, num_inference_steps=2)
633
-
634
- self.assertEqual(
635
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
636
- {
637
- # no checkpoint-2 -> check old checkpoints do not exist
638
- # check new checkpoints exist
639
- "checkpoint-4",
640
- "checkpoint-6",
641
- },
642
- )
643
-
644
- def test_text_to_image_checkpointing_checkpoints_total_limit(self):
645
- pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe"
646
- prompt = "a prompt"
647
-
648
- with tempfile.TemporaryDirectory() as tmpdir:
649
- # Run training script with checkpointing
650
- # max_train_steps == 7, checkpointing_steps == 2, checkpoints_total_limit == 2
651
- # Should create checkpoints at steps 2, 4, 6
652
- # with checkpoint at step 2 deleted
653
-
654
- initial_run_args = f"""
655
- examples/text_to_image/train_text_to_image.py
656
- --pretrained_model_name_or_path {pretrained_model_name_or_path}
657
- --dataset_name hf-internal-testing/dummy_image_text_data
658
- --resolution 64
659
- --center_crop
660
- --random_flip
661
- --train_batch_size 1
662
- --gradient_accumulation_steps 1
663
- --max_train_steps 7
664
- --learning_rate 5.0e-04
665
- --scale_lr
666
- --lr_scheduler constant
667
- --lr_warmup_steps 0
668
- --output_dir {tmpdir}
669
- --checkpointing_steps=2
670
- --checkpoints_total_limit=2
671
- --seed=0
672
- """.split()
673
-
674
- run_command(self._launch_args + initial_run_args)
675
-
676
- pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None)
677
- pipe(prompt, num_inference_steps=2)
678
-
679
- # check checkpoint directories exist
680
- self.assertEqual(
681
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
682
- # checkpoint-2 should have been deleted
683
- {"checkpoint-4", "checkpoint-6"},
684
- )
685
-
686
- def test_text_to_image_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
687
- pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe"
688
- prompt = "a prompt"
689
-
690
- with tempfile.TemporaryDirectory() as tmpdir:
691
- # Run training script with checkpointing
692
- # max_train_steps == 9, checkpointing_steps == 2
693
- # Should create checkpoints at steps 2, 4, 6, 8
694
-
695
- initial_run_args = f"""
696
- examples/text_to_image/train_text_to_image.py
697
- --pretrained_model_name_or_path {pretrained_model_name_or_path}
698
- --dataset_name hf-internal-testing/dummy_image_text_data
699
- --resolution 64
700
- --center_crop
701
- --random_flip
702
- --train_batch_size 1
703
- --gradient_accumulation_steps 1
704
- --max_train_steps 9
705
- --learning_rate 5.0e-04
706
- --scale_lr
707
- --lr_scheduler constant
708
- --lr_warmup_steps 0
709
- --output_dir {tmpdir}
710
- --checkpointing_steps=2
711
- --seed=0
712
- """.split()
713
-
714
- run_command(self._launch_args + initial_run_args)
715
-
716
- pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None)
717
- pipe(prompt, num_inference_steps=2)
718
-
719
- # check checkpoint directories exist
720
- self.assertEqual(
721
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
722
- {"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"},
723
- )
724
-
725
- # resume and we should try to checkpoint at 10, where we'll have to remove
726
- # checkpoint-2 and checkpoint-4 instead of just a single previous checkpoint
727
-
728
- resume_run_args = f"""
729
- examples/text_to_image/train_text_to_image.py
730
- --pretrained_model_name_or_path {pretrained_model_name_or_path}
731
- --dataset_name hf-internal-testing/dummy_image_text_data
732
- --resolution 64
733
- --center_crop
734
- --random_flip
735
- --train_batch_size 1
736
- --gradient_accumulation_steps 1
737
- --max_train_steps 11
738
- --learning_rate 5.0e-04
739
- --scale_lr
740
- --lr_scheduler constant
741
- --lr_warmup_steps 0
742
- --output_dir {tmpdir}
743
- --checkpointing_steps=2
744
- --resume_from_checkpoint=checkpoint-8
745
- --checkpoints_total_limit=3
746
- --seed=0
747
- """.split()
748
-
749
- run_command(self._launch_args + resume_run_args)
750
-
751
- pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None)
752
- pipe(prompt, num_inference_steps=2)
753
-
754
- # check checkpoint directories exist
755
- self.assertEqual(
756
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
757
- {"checkpoint-6", "checkpoint-8", "checkpoint-10"},
758
- )
759
-
760
- def test_text_to_image_lora_checkpointing_checkpoints_total_limit(self):
761
- pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe"
762
- prompt = "a prompt"
763
-
764
- with tempfile.TemporaryDirectory() as tmpdir:
765
- # Run training script with checkpointing
766
- # max_train_steps == 7, checkpointing_steps == 2, checkpoints_total_limit == 2
767
- # Should create checkpoints at steps 2, 4, 6
768
- # with checkpoint at step 2 deleted
769
-
770
- initial_run_args = f"""
771
- examples/text_to_image/train_text_to_image_lora.py
772
- --pretrained_model_name_or_path {pretrained_model_name_or_path}
773
- --dataset_name hf-internal-testing/dummy_image_text_data
774
- --resolution 64
775
- --center_crop
776
- --random_flip
777
- --train_batch_size 1
778
- --gradient_accumulation_steps 1
779
- --max_train_steps 7
780
- --learning_rate 5.0e-04
781
- --scale_lr
782
- --lr_scheduler constant
783
- --lr_warmup_steps 0
784
- --output_dir {tmpdir}
785
- --checkpointing_steps=2
786
- --checkpoints_total_limit=2
787
- --seed=0
788
- --num_validation_images=0
789
- """.split()
790
-
791
- run_command(self._launch_args + initial_run_args)
792
-
793
- pipe = DiffusionPipeline.from_pretrained(
794
- "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None
795
- )
796
- pipe.load_lora_weights(tmpdir)
797
- pipe(prompt, num_inference_steps=2)
798
-
799
- # check checkpoint directories exist
800
- self.assertEqual(
801
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
802
- # checkpoint-2 should have been deleted
803
- {"checkpoint-4", "checkpoint-6"},
804
- )
805
-
806
- def test_text_to_image_lora_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
807
- pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe"
808
- prompt = "a prompt"
809
-
810
- with tempfile.TemporaryDirectory() as tmpdir:
811
- # Run training script with checkpointing
812
- # max_train_steps == 9, checkpointing_steps == 2
813
- # Should create checkpoints at steps 2, 4, 6, 8
814
-
815
- initial_run_args = f"""
816
- examples/text_to_image/train_text_to_image_lora.py
817
- --pretrained_model_name_or_path {pretrained_model_name_or_path}
818
- --dataset_name hf-internal-testing/dummy_image_text_data
819
- --resolution 64
820
- --center_crop
821
- --random_flip
822
- --train_batch_size 1
823
- --gradient_accumulation_steps 1
824
- --max_train_steps 9
825
- --learning_rate 5.0e-04
826
- --scale_lr
827
- --lr_scheduler constant
828
- --lr_warmup_steps 0
829
- --output_dir {tmpdir}
830
- --checkpointing_steps=2
831
- --seed=0
832
- --num_validation_images=0
833
- """.split()
834
-
835
- run_command(self._launch_args + initial_run_args)
836
-
837
- pipe = DiffusionPipeline.from_pretrained(
838
- "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None
839
- )
840
- pipe.load_lora_weights(tmpdir)
841
- pipe(prompt, num_inference_steps=2)
842
-
843
- # check checkpoint directories exist
844
- self.assertEqual(
845
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
846
- {"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"},
847
- )
848
-
849
- # resume and we should try to checkpoint at 10, where we'll have to remove
850
- # checkpoint-2 and checkpoint-4 instead of just a single previous checkpoint
851
-
852
- resume_run_args = f"""
853
- examples/text_to_image/train_text_to_image_lora.py
854
- --pretrained_model_name_or_path {pretrained_model_name_or_path}
855
- --dataset_name hf-internal-testing/dummy_image_text_data
856
- --resolution 64
857
- --center_crop
858
- --random_flip
859
- --train_batch_size 1
860
- --gradient_accumulation_steps 1
861
- --max_train_steps 11
862
- --learning_rate 5.0e-04
863
- --scale_lr
864
- --lr_scheduler constant
865
- --lr_warmup_steps 0
866
- --output_dir {tmpdir}
867
- --checkpointing_steps=2
868
- --resume_from_checkpoint=checkpoint-8
869
- --checkpoints_total_limit=3
870
- --seed=0
871
- --num_validation_images=0
872
- """.split()
873
-
874
- run_command(self._launch_args + resume_run_args)
875
-
876
- pipe = DiffusionPipeline.from_pretrained(
877
- "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None
878
- )
879
- pipe.load_lora_weights(tmpdir)
880
- pipe(prompt, num_inference_steps=2)
881
-
882
- # check checkpoint directories exist
883
- self.assertEqual(
884
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
885
- {"checkpoint-6", "checkpoint-8", "checkpoint-10"},
886
- )
887
-
888
- def test_unconditional_checkpointing_checkpoints_total_limit(self):
889
- with tempfile.TemporaryDirectory() as tmpdir:
890
- initial_run_args = f"""
891
- examples/unconditional_image_generation/train_unconditional.py
892
- --dataset_name hf-internal-testing/dummy_image_class_data
893
- --model_config_name_or_path diffusers/ddpm_dummy
894
- --resolution 64
895
- --output_dir {tmpdir}
896
- --train_batch_size 1
897
- --num_epochs 1
898
- --gradient_accumulation_steps 1
899
- --ddpm_num_inference_steps 2
900
- --learning_rate 1e-3
901
- --lr_warmup_steps 5
902
- --checkpointing_steps=2
903
- --checkpoints_total_limit=2
904
- """.split()
905
-
906
- run_command(self._launch_args + initial_run_args)
907
-
908
- # check checkpoint directories exist
909
- self.assertEqual(
910
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
911
- # checkpoint-2 should have been deleted
912
- {"checkpoint-4", "checkpoint-6"},
913
- )
914
-
915
- def test_unconditional_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
916
- with tempfile.TemporaryDirectory() as tmpdir:
917
- initial_run_args = f"""
918
- examples/unconditional_image_generation/train_unconditional.py
919
- --dataset_name hf-internal-testing/dummy_image_class_data
920
- --model_config_name_or_path diffusers/ddpm_dummy
921
- --resolution 64
922
- --output_dir {tmpdir}
923
- --train_batch_size 1
924
- --num_epochs 1
925
- --gradient_accumulation_steps 1
926
- --ddpm_num_inference_steps 2
927
- --learning_rate 1e-3
928
- --lr_warmup_steps 5
929
- --checkpointing_steps=1
930
- """.split()
931
-
932
- run_command(self._launch_args + initial_run_args)
933
-
934
- # check checkpoint directories exist
935
- self.assertEqual(
936
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
937
- {"checkpoint-1", "checkpoint-2", "checkpoint-3", "checkpoint-4", "checkpoint-5", "checkpoint-6"},
938
- )
939
-
940
- resume_run_args = f"""
941
- examples/unconditional_image_generation/train_unconditional.py
942
- --dataset_name hf-internal-testing/dummy_image_class_data
943
- --model_config_name_or_path diffusers/ddpm_dummy
944
- --resolution 64
945
- --output_dir {tmpdir}
946
- --train_batch_size 1
947
- --num_epochs 2
948
- --gradient_accumulation_steps 1
949
- --ddpm_num_inference_steps 2
950
- --learning_rate 1e-3
951
- --lr_warmup_steps 5
952
- --resume_from_checkpoint=checkpoint-6
953
- --checkpointing_steps=2
954
- --checkpoints_total_limit=3
955
- """.split()
956
-
957
- run_command(self._launch_args + resume_run_args)
958
-
959
- # check checkpoint directories exist
960
- self.assertEqual(
961
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
962
- {"checkpoint-8", "checkpoint-10", "checkpoint-12"},
963
- )
964
-
965
- def test_textual_inversion_checkpointing(self):
966
- with tempfile.TemporaryDirectory() as tmpdir:
967
- test_args = f"""
968
- examples/textual_inversion/textual_inversion.py
969
- --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe
970
- --train_data_dir docs/source/en/imgs
971
- --learnable_property object
972
- --placeholder_token <cat-toy>
973
- --initializer_token a
974
- --validation_prompt <cat-toy>
975
- --validation_steps 1
976
- --save_steps 1
977
- --num_vectors 2
978
- --resolution 64
979
- --train_batch_size 1
980
- --gradient_accumulation_steps 1
981
- --max_train_steps 3
982
- --learning_rate 5.0e-04
983
- --scale_lr
984
- --lr_scheduler constant
985
- --lr_warmup_steps 0
986
- --output_dir {tmpdir}
987
- --checkpointing_steps=1
988
- --checkpoints_total_limit=2
989
- """.split()
990
-
991
- run_command(self._launch_args + test_args)
992
-
993
- # check checkpoint directories exist
994
- self.assertEqual(
995
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
996
- {"checkpoint-2", "checkpoint-3"},
997
- )
998
-
999
- def test_textual_inversion_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
1000
- with tempfile.TemporaryDirectory() as tmpdir:
1001
- test_args = f"""
1002
- examples/textual_inversion/textual_inversion.py
1003
- --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe
1004
- --train_data_dir docs/source/en/imgs
1005
- --learnable_property object
1006
- --placeholder_token <cat-toy>
1007
- --initializer_token a
1008
- --validation_prompt <cat-toy>
1009
- --validation_steps 1
1010
- --save_steps 1
1011
- --num_vectors 2
1012
- --resolution 64
1013
- --train_batch_size 1
1014
- --gradient_accumulation_steps 1
1015
- --max_train_steps 3
1016
- --learning_rate 5.0e-04
1017
- --scale_lr
1018
- --lr_scheduler constant
1019
- --lr_warmup_steps 0
1020
- --output_dir {tmpdir}
1021
- --checkpointing_steps=1
1022
- """.split()
1023
-
1024
- run_command(self._launch_args + test_args)
1025
-
1026
- # check checkpoint directories exist
1027
- self.assertEqual(
1028
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1029
- {"checkpoint-1", "checkpoint-2", "checkpoint-3"},
1030
- )
1031
-
1032
- resume_run_args = f"""
1033
- examples/textual_inversion/textual_inversion.py
1034
- --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe
1035
- --train_data_dir docs/source/en/imgs
1036
- --learnable_property object
1037
- --placeholder_token <cat-toy>
1038
- --initializer_token a
1039
- --validation_prompt <cat-toy>
1040
- --validation_steps 1
1041
- --save_steps 1
1042
- --num_vectors 2
1043
- --resolution 64
1044
- --train_batch_size 1
1045
- --gradient_accumulation_steps 1
1046
- --max_train_steps 4
1047
- --learning_rate 5.0e-04
1048
- --scale_lr
1049
- --lr_scheduler constant
1050
- --lr_warmup_steps 0
1051
- --output_dir {tmpdir}
1052
- --checkpointing_steps=1
1053
- --resume_from_checkpoint=checkpoint-3
1054
- --checkpoints_total_limit=2
1055
- """.split()
1056
-
1057
- run_command(self._launch_args + resume_run_args)
1058
-
1059
- # check checkpoint directories exist
1060
- self.assertEqual(
1061
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1062
- {"checkpoint-3", "checkpoint-4"},
1063
- )
1064
-
1065
- def test_instruct_pix2pix_checkpointing_checkpoints_total_limit(self):
1066
- with tempfile.TemporaryDirectory() as tmpdir:
1067
- test_args = f"""
1068
- examples/instruct_pix2pix/train_instruct_pix2pix.py
1069
- --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
1070
- --dataset_name=hf-internal-testing/instructpix2pix-10-samples
1071
- --resolution=64
1072
- --random_flip
1073
- --train_batch_size=1
1074
- --max_train_steps=7
1075
- --checkpointing_steps=2
1076
- --checkpoints_total_limit=2
1077
- --output_dir {tmpdir}
1078
- --seed=0
1079
- """.split()
1080
-
1081
- run_command(self._launch_args + test_args)
1082
-
1083
- self.assertEqual(
1084
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1085
- {"checkpoint-4", "checkpoint-6"},
1086
- )
1087
-
1088
- def test_instruct_pix2pix_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
1089
- with tempfile.TemporaryDirectory() as tmpdir:
1090
- test_args = f"""
1091
- examples/instruct_pix2pix/train_instruct_pix2pix.py
1092
- --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
1093
- --dataset_name=hf-internal-testing/instructpix2pix-10-samples
1094
- --resolution=64
1095
- --random_flip
1096
- --train_batch_size=1
1097
- --max_train_steps=9
1098
- --checkpointing_steps=2
1099
- --output_dir {tmpdir}
1100
- --seed=0
1101
- """.split()
1102
-
1103
- run_command(self._launch_args + test_args)
1104
-
1105
- # check checkpoint directories exist
1106
- self.assertEqual(
1107
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1108
- {"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"},
1109
- )
1110
-
1111
- resume_run_args = f"""
1112
- examples/instruct_pix2pix/train_instruct_pix2pix.py
1113
- --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
1114
- --dataset_name=hf-internal-testing/instructpix2pix-10-samples
1115
- --resolution=64
1116
- --random_flip
1117
- --train_batch_size=1
1118
- --max_train_steps=11
1119
- --checkpointing_steps=2
1120
- --output_dir {tmpdir}
1121
- --seed=0
1122
- --resume_from_checkpoint=checkpoint-8
1123
- --checkpoints_total_limit=3
1124
- """.split()
1125
-
1126
- run_command(self._launch_args + resume_run_args)
1127
-
1128
- # check checkpoint directories exist
1129
- self.assertEqual(
1130
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1131
- {"checkpoint-6", "checkpoint-8", "checkpoint-10"},
1132
- )
1133
-
1134
- def test_dreambooth_checkpointing_checkpoints_total_limit(self):
1135
- with tempfile.TemporaryDirectory() as tmpdir:
1136
- test_args = f"""
1137
- examples/dreambooth/train_dreambooth.py
1138
- --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
1139
- --instance_data_dir=docs/source/en/imgs
1140
- --output_dir={tmpdir}
1141
- --instance_prompt=prompt
1142
- --resolution=64
1143
- --train_batch_size=1
1144
- --gradient_accumulation_steps=1
1145
- --max_train_steps=6
1146
- --checkpoints_total_limit=2
1147
- --checkpointing_steps=2
1148
- """.split()
1149
-
1150
- run_command(self._launch_args + test_args)
1151
-
1152
- self.assertEqual(
1153
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1154
- {"checkpoint-4", "checkpoint-6"},
1155
- )
1156
-
1157
- def test_dreambooth_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
1158
- with tempfile.TemporaryDirectory() as tmpdir:
1159
- test_args = f"""
1160
- examples/dreambooth/train_dreambooth.py
1161
- --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
1162
- --instance_data_dir=docs/source/en/imgs
1163
- --output_dir={tmpdir}
1164
- --instance_prompt=prompt
1165
- --resolution=64
1166
- --train_batch_size=1
1167
- --gradient_accumulation_steps=1
1168
- --max_train_steps=9
1169
- --checkpointing_steps=2
1170
- """.split()
1171
-
1172
- run_command(self._launch_args + test_args)
1173
-
1174
- self.assertEqual(
1175
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1176
- {"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"},
1177
- )
1178
-
1179
- resume_run_args = f"""
1180
- examples/dreambooth/train_dreambooth.py
1181
- --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
1182
- --instance_data_dir=docs/source/en/imgs
1183
- --output_dir={tmpdir}
1184
- --instance_prompt=prompt
1185
- --resolution=64
1186
- --train_batch_size=1
1187
- --gradient_accumulation_steps=1
1188
- --max_train_steps=11
1189
- --checkpointing_steps=2
1190
- --resume_from_checkpoint=checkpoint-8
1191
- --checkpoints_total_limit=3
1192
- """.split()
1193
-
1194
- run_command(self._launch_args + resume_run_args)
1195
-
1196
- self.assertEqual(
1197
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1198
- {"checkpoint-6", "checkpoint-8", "checkpoint-10"},
1199
- )
1200
-
1201
- def test_dreambooth_lora_checkpointing_checkpoints_total_limit(self):
1202
- with tempfile.TemporaryDirectory() as tmpdir:
1203
- test_args = f"""
1204
- examples/dreambooth/train_dreambooth_lora.py
1205
- --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
1206
- --instance_data_dir=docs/source/en/imgs
1207
- --output_dir={tmpdir}
1208
- --instance_prompt=prompt
1209
- --resolution=64
1210
- --train_batch_size=1
1211
- --gradient_accumulation_steps=1
1212
- --max_train_steps=6
1213
- --checkpoints_total_limit=2
1214
- --checkpointing_steps=2
1215
- """.split()
1216
-
1217
- run_command(self._launch_args + test_args)
1218
-
1219
- self.assertEqual(
1220
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1221
- {"checkpoint-4", "checkpoint-6"},
1222
- )
1223
-
1224
- def test_dreambooth_lora_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
1225
- with tempfile.TemporaryDirectory() as tmpdir:
1226
- test_args = f"""
1227
- examples/dreambooth/train_dreambooth_lora.py
1228
- --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
1229
- --instance_data_dir=docs/source/en/imgs
1230
- --output_dir={tmpdir}
1231
- --instance_prompt=prompt
1232
- --resolution=64
1233
- --train_batch_size=1
1234
- --gradient_accumulation_steps=1
1235
- --max_train_steps=9
1236
- --checkpointing_steps=2
1237
- """.split()
1238
-
1239
- run_command(self._launch_args + test_args)
1240
-
1241
- self.assertEqual(
1242
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1243
- {"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"},
1244
- )
1245
-
1246
- resume_run_args = f"""
1247
- examples/dreambooth/train_dreambooth_lora.py
1248
- --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
1249
- --instance_data_dir=docs/source/en/imgs
1250
- --output_dir={tmpdir}
1251
- --instance_prompt=prompt
1252
- --resolution=64
1253
- --train_batch_size=1
1254
- --gradient_accumulation_steps=1
1255
- --max_train_steps=11
1256
- --checkpointing_steps=2
1257
- --resume_from_checkpoint=checkpoint-8
1258
- --checkpoints_total_limit=3
1259
- """.split()
1260
-
1261
- run_command(self._launch_args + resume_run_args)
1262
-
1263
- self.assertEqual(
1264
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1265
- {"checkpoint-6", "checkpoint-8", "checkpoint-10"},
1266
- )
1267
-
1268
- def test_controlnet_checkpointing_checkpoints_total_limit(self):
1269
- with tempfile.TemporaryDirectory() as tmpdir:
1270
- test_args = f"""
1271
- examples/controlnet/train_controlnet.py
1272
- --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
1273
- --dataset_name=hf-internal-testing/fill10
1274
- --output_dir={tmpdir}
1275
- --resolution=64
1276
- --train_batch_size=1
1277
- --gradient_accumulation_steps=1
1278
- --max_train_steps=6
1279
- --checkpoints_total_limit=2
1280
- --checkpointing_steps=2
1281
- --controlnet_model_name_or_path=hf-internal-testing/tiny-controlnet
1282
- """.split()
1283
-
1284
- run_command(self._launch_args + test_args)
1285
-
1286
- self.assertEqual(
1287
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1288
- {"checkpoint-4", "checkpoint-6"},
1289
- )
1290
-
1291
- def test_controlnet_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
1292
- with tempfile.TemporaryDirectory() as tmpdir:
1293
- test_args = f"""
1294
- examples/controlnet/train_controlnet.py
1295
- --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
1296
- --dataset_name=hf-internal-testing/fill10
1297
- --output_dir={tmpdir}
1298
- --resolution=64
1299
- --train_batch_size=1
1300
- --gradient_accumulation_steps=1
1301
- --controlnet_model_name_or_path=hf-internal-testing/tiny-controlnet
1302
- --max_train_steps=9
1303
- --checkpointing_steps=2
1304
- """.split()
1305
-
1306
- run_command(self._launch_args + test_args)
1307
-
1308
- self.assertEqual(
1309
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1310
- {"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"},
1311
- )
1312
-
1313
- resume_run_args = f"""
1314
- examples/controlnet/train_controlnet.py
1315
- --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
1316
- --dataset_name=hf-internal-testing/fill10
1317
- --output_dir={tmpdir}
1318
- --resolution=64
1319
- --train_batch_size=1
1320
- --gradient_accumulation_steps=1
1321
- --controlnet_model_name_or_path=hf-internal-testing/tiny-controlnet
1322
- --max_train_steps=11
1323
- --checkpointing_steps=2
1324
- --resume_from_checkpoint=checkpoint-8
1325
- --checkpoints_total_limit=3
1326
- """.split()
1327
-
1328
- run_command(self._launch_args + resume_run_args)
1329
-
1330
- self.assertEqual(
1331
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1332
- {"checkpoint-8", "checkpoint-10", "checkpoint-12"},
1333
- )
1334
-
1335
- def test_controlnet_sdxl(self):
1336
- with tempfile.TemporaryDirectory() as tmpdir:
1337
- test_args = f"""
1338
- examples/controlnet/train_controlnet_sdxl.py
1339
- --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-xl-pipe
1340
- --dataset_name=hf-internal-testing/fill10
1341
- --output_dir={tmpdir}
1342
- --resolution=64
1343
- --train_batch_size=1
1344
- --gradient_accumulation_steps=1
1345
- --controlnet_model_name_or_path=hf-internal-testing/tiny-controlnet-sdxl
1346
- --max_train_steps=9
1347
- --checkpointing_steps=2
1348
- """.split()
1349
-
1350
- run_command(self._launch_args + test_args)
1351
-
1352
- self.assertTrue(os.path.isfile(os.path.join(tmpdir, "diffusion_pytorch_model.bin")))
1353
-
1354
- def test_custom_diffusion_checkpointing_checkpoints_total_limit(self):
1355
- with tempfile.TemporaryDirectory() as tmpdir:
1356
- test_args = f"""
1357
- examples/custom_diffusion/train_custom_diffusion.py
1358
- --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
1359
- --instance_data_dir=docs/source/en/imgs
1360
- --output_dir={tmpdir}
1361
- --instance_prompt=<new1>
1362
- --resolution=64
1363
- --train_batch_size=1
1364
- --modifier_token=<new1>
1365
- --dataloader_num_workers=0
1366
- --max_train_steps=6
1367
- --checkpoints_total_limit=2
1368
- --checkpointing_steps=2
1369
- """.split()
1370
-
1371
- run_command(self._launch_args + test_args)
1372
-
1373
- self.assertEqual(
1374
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1375
- {"checkpoint-4", "checkpoint-6"},
1376
- )
1377
-
1378
- def test_custom_diffusion_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
1379
- with tempfile.TemporaryDirectory() as tmpdir:
1380
- test_args = f"""
1381
- examples/custom_diffusion/train_custom_diffusion.py
1382
- --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
1383
- --instance_data_dir=docs/source/en/imgs
1384
- --output_dir={tmpdir}
1385
- --instance_prompt=<new1>
1386
- --resolution=64
1387
- --train_batch_size=1
1388
- --modifier_token=<new1>
1389
- --dataloader_num_workers=0
1390
- --max_train_steps=9
1391
- --checkpointing_steps=2
1392
- """.split()
1393
-
1394
- run_command(self._launch_args + test_args)
1395
-
1396
- self.assertEqual(
1397
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1398
- {"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"},
1399
- )
1400
-
1401
- resume_run_args = f"""
1402
- examples/custom_diffusion/train_custom_diffusion.py
1403
- --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
1404
- --instance_data_dir=docs/source/en/imgs
1405
- --output_dir={tmpdir}
1406
- --instance_prompt=<new1>
1407
- --resolution=64
1408
- --train_batch_size=1
1409
- --modifier_token=<new1>
1410
- --dataloader_num_workers=0
1411
- --max_train_steps=11
1412
- --checkpointing_steps=2
1413
- --resume_from_checkpoint=checkpoint-8
1414
- --checkpoints_total_limit=3
1415
- """.split()
1416
-
1417
- run_command(self._launch_args + resume_run_args)
1418
-
1419
- self.assertEqual(
1420
- {x for x in os.listdir(tmpdir) if "checkpoint" in x},
1421
- {"checkpoint-6", "checkpoint-8", "checkpoint-10"},
1422
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/README.md DELETED
@@ -1,3 +0,0 @@
1
- # Schedulers
2
-
3
- For more information on the schedulers, please refer to the [docs](https://huggingface.co/docs/diffusers/api/schedulers/overview).
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/trident_faster_rcnn.py DELETED
@@ -1,66 +0,0 @@
1
- from ..builder import DETECTORS
2
- from .faster_rcnn import FasterRCNN
3
-
4
-
5
- @DETECTORS.register_module()
6
- class TridentFasterRCNN(FasterRCNN):
7
- """Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
8
-
9
- def __init__(self,
10
- backbone,
11
- rpn_head,
12
- roi_head,
13
- train_cfg,
14
- test_cfg,
15
- neck=None,
16
- pretrained=None):
17
-
18
- super(TridentFasterRCNN, self).__init__(
19
- backbone=backbone,
20
- neck=neck,
21
- rpn_head=rpn_head,
22
- roi_head=roi_head,
23
- train_cfg=train_cfg,
24
- test_cfg=test_cfg,
25
- pretrained=pretrained)
26
- assert self.backbone.num_branch == self.roi_head.num_branch
27
- assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
28
- self.num_branch = self.backbone.num_branch
29
- self.test_branch_idx = self.backbone.test_branch_idx
30
-
31
- def simple_test(self, img, img_metas, proposals=None, rescale=False):
32
- """Test without augmentation."""
33
- assert self.with_bbox, 'Bbox head must be implemented.'
34
- x = self.extract_feat(img)
35
- if proposals is None:
36
- num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
37
- trident_img_metas = img_metas * num_branch
38
- proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas)
39
- else:
40
- proposal_list = proposals
41
-
42
- return self.roi_head.simple_test(
43
- x, proposal_list, trident_img_metas, rescale=rescale)
44
-
45
- def aug_test(self, imgs, img_metas, rescale=False):
46
- """Test with augmentations.
47
-
48
- If rescale is False, then returned bboxes and masks will fit the scale
49
- of imgs[0].
50
- """
51
- x = self.extract_feats(imgs)
52
- num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
53
- trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
54
- proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
55
- return self.roi_head.aug_test(
56
- x, proposal_list, img_metas, rescale=rescale)
57
-
58
- def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs):
59
- """make copies of img and gts to fit multi-branch."""
60
- trident_gt_bboxes = tuple(gt_bboxes * self.num_branch)
61
- trident_gt_labels = tuple(gt_labels * self.num_branch)
62
- trident_img_metas = tuple(img_metas * self.num_branch)
63
-
64
- return super(TridentFasterRCNN,
65
- self).forward_train(img, trident_img_metas,
66
- trident_gt_bboxes, trident_gt_labels)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/css/chat_style-cai-chat.css DELETED
@@ -1,59 +0,0 @@
1
- .message {
2
- display: grid;
3
- grid-template-columns: 60px minmax(0, 1fr);
4
- padding-bottom: 25px;
5
- font-size: 15px;
6
- font-family: 'Noto Sans', Helvetica, Arial, sans-serif;
7
- line-height: 23px !important;
8
- }
9
-
10
- .circle-you {
11
- width: 50px;
12
- height: 50px;
13
- background-color: rgb(238, 78, 59);
14
- border-radius: 50%;
15
- }
16
-
17
- .circle-bot {
18
- width: 50px;
19
- height: 50px;
20
- background-color: rgb(59, 78, 244);
21
- border-radius: 50%;
22
- }
23
-
24
- .circle-bot img,
25
- .circle-you img {
26
- border-radius: 50%;
27
- width: 100%;
28
- height: 100%;
29
- object-fit: cover;
30
- }
31
-
32
- .text p {
33
- margin-top: 5px;
34
- }
35
-
36
- .username {
37
- font-weight: bold;
38
- }
39
-
40
- .message-body img {
41
- max-width: 300px;
42
- max-height: 300px;
43
- border-radius: 20px;
44
- }
45
-
46
- .message-body p {
47
- margin-bottom: 0 !important;
48
- font-size: 15px !important;
49
- line-height: 23px !important;
50
- }
51
-
52
- .dark .message-body p em {
53
- color: rgb(138, 138, 138) !important;
54
- }
55
-
56
- .message-body p em {
57
- color: rgb(110, 110, 110) !important;
58
- font-weight: 500;
59
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anustup/NS_AI_LABS/tests/vad_test.py DELETED
@@ -1,66 +0,0 @@
1
- import pprint
2
- import unittest
3
- import numpy as np
4
- import sys
5
-
6
- sys.path.append('../NS_AI_LABS')
7
-
8
- from src.vad import AbstractTranscription, VadSileroTranscription
9
-
10
- class TestVad(unittest.TestCase):
11
- def __init__(self, *args, **kwargs):
12
- super(TestVad, self).__init__(*args, **kwargs)
13
- self.transcribe_calls = []
14
-
15
- def test_transcript(self):
16
- mock = MockVadTranscription()
17
-
18
- self.transcribe_calls.clear()
19
- result = mock.transcribe("mock", lambda segment : self.transcribe_segments(segment))
20
-
21
- self.assertListEqual(self.transcribe_calls, [
22
- [30, 30],
23
- [100, 100]
24
- ])
25
-
26
- self.assertListEqual(result['segments'],
27
- [{'end': 50.0, 'start': 40.0, 'text': 'Hello world '},
28
- {'end': 120.0, 'start': 110.0, 'text': 'Hello world '}]
29
- )
30
-
31
- def transcribe_segments(self, segment):
32
- self.transcribe_calls.append(segment.tolist())
33
-
34
- # Dummy text
35
- return {
36
- 'text': "Hello world ",
37
- 'segments': [
38
- {
39
- "start": 10.0,
40
- "end": 20.0,
41
- "text": "Hello world "
42
- }
43
- ],
44
- 'language': ""
45
- }
46
-
47
- class MockVadTranscription(AbstractTranscription):
48
- def __init__(self):
49
- super().__init__()
50
-
51
- def get_audio_segment(self, str, start_time: str = None, duration: str = None):
52
- start_time_seconds = float(start_time.removesuffix("s"))
53
- duration_seconds = float(duration.removesuffix("s"))
54
-
55
- # For mocking, this just returns a simple numppy array
56
- return np.array([start_time_seconds, duration_seconds], dtype=np.float64)
57
-
58
- def get_transcribe_timestamps(self, audio: str):
59
- result = []
60
-
61
- result.append( { 'start': 30, 'end': 60 } )
62
- result.append( { 'start': 100, 'end': 200 } )
63
- return result
64
-
65
- if __name__ == '__main__':
66
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/stable_diffusion_video/image_generation.py DELETED
@@ -1,363 +0,0 @@
1
- import json
2
- import math
3
- import random
4
- import time
5
- from pathlib import Path
6
- from uuid import uuid4
7
-
8
- import torch
9
- from diffusers import __version__ as diffusers_version
10
- from huggingface_hub import CommitOperationAdd, create_commit, create_repo
11
-
12
- from .upsampling import RealESRGANModel
13
- from .utils import pad_along_axis
14
-
15
-
16
- def get_all_files(root: Path):
17
- dirs = [root]
18
- while len(dirs) > 0:
19
- dir = dirs.pop()
20
- for candidate in dir.iterdir():
21
- if candidate.is_file():
22
- yield candidate
23
- if candidate.is_dir():
24
- dirs.append(candidate)
25
-
26
-
27
- def get_groups_of_n(n: int, iterator):
28
- assert n > 1
29
- buffer = []
30
- for elt in iterator:
31
- if len(buffer) == n:
32
- yield buffer
33
- buffer = []
34
- buffer.append(elt)
35
- if len(buffer) != 0:
36
- yield buffer
37
-
38
-
39
- def upload_folder_chunked(
40
- repo_id: str,
41
- upload_dir: Path,
42
- n: int = 100,
43
- private: bool = False,
44
- create_pr: bool = False,
45
- ):
46
- """Upload a folder to the Hugging Face Hub in chunks of n files at a time.
47
- Args:
48
- repo_id (str): The repo id to upload to.
49
- upload_dir (Path): The directory to upload.
50
- n (int, *optional*, defaults to 100): The number of files to upload at a time.
51
- private (bool, *optional*): Whether to upload the repo as private.
52
- create_pr (bool, *optional*): Whether to create a PR after uploading instead of commiting directly.
53
- """
54
-
55
- url = create_repo(repo_id, exist_ok=True, private=private, repo_type="dataset")
56
- print(f"Uploading files to: {url}")
57
-
58
- root = Path(upload_dir)
59
- if not root.exists():
60
- raise ValueError(f"Upload directory {root} does not exist.")
61
-
62
- for i, file_paths in enumerate(get_groups_of_n(n, get_all_files(root))):
63
- print(f"Committing {file_paths}")
64
- operations = [
65
- CommitOperationAdd(
66
- path_in_repo=f"{file_path.parent.name}/{file_path.name}",
67
- path_or_fileobj=str(file_path),
68
- )
69
- for file_path in file_paths
70
- ]
71
- create_commit(
72
- repo_id=repo_id,
73
- operations=operations,
74
- commit_message=f"Upload part {i}",
75
- repo_type="dataset",
76
- create_pr=create_pr,
77
- )
78
-
79
-
80
- def generate_input_batches(pipeline, prompts, seeds, batch_size, height, width):
81
- if len(prompts) != len(seeds):
82
- raise ValueError("Number of prompts and seeds must be equal.")
83
-
84
- embeds_batch, noise_batch = None, None
85
- batch_idx = 0
86
- for i, (prompt, seed) in enumerate(zip(prompts, seeds)):
87
- embeds = pipeline.embed_text(prompt)
88
- noise = torch.randn(
89
- (1, pipeline.unet.in_channels, height // 8, width // 8),
90
- device=pipeline.device,
91
- generator=torch.Generator(device="cpu" if pipeline.device.type == "mps" else pipeline.device).manual_seed(
92
- seed
93
- ),
94
- )
95
- embeds_batch = embeds if embeds_batch is None else torch.cat([embeds_batch, embeds])
96
- noise_batch = noise if noise_batch is None else torch.cat([noise_batch, noise])
97
- batch_is_ready = embeds_batch.shape[0] == batch_size or i + 1 == len(prompts)
98
- if not batch_is_ready:
99
- continue
100
- yield batch_idx, embeds_batch.type(torch.cuda.HalfTensor), noise_batch.type(torch.cuda.HalfTensor)
101
- batch_idx += 1
102
- del embeds_batch, noise_batch
103
- torch.cuda.empty_cache()
104
- embeds_batch, noise_batch = None, None
105
-
106
-
107
- def generate_images(
108
- pipeline,
109
- prompt,
110
- batch_size=1,
111
- num_batches=1,
112
- seeds=None,
113
- num_inference_steps=50,
114
- guidance_scale=7.5,
115
- output_dir="./images",
116
- image_file_ext=".jpg",
117
- upsample=False,
118
- height=512,
119
- width=512,
120
- eta=0.0,
121
- push_to_hub=False,
122
- repo_id=None,
123
- private=False,
124
- create_pr=False,
125
- name=None,
126
- ):
127
- """Generate images using the StableDiffusion pipeline.
128
- Args:
129
- pipeline (StableDiffusionWalkPipeline): The StableDiffusion pipeline instance.
130
- prompt (str): The prompt to use for the image generation.
131
- batch_size (int, *optional*, defaults to 1): The batch size to use for image generation.
132
- num_batches (int, *optional*, defaults to 1): The number of batches to generate.
133
- seeds (list[int], *optional*): The seeds to use for the image generation.
134
- num_inference_steps (int, *optional*, defaults to 50): The number of inference steps to take.
135
- guidance_scale (float, *optional*, defaults to 7.5): The guidance scale to use for image generation.
136
- output_dir (str, *optional*, defaults to "./images"): The output directory to save the images to.
137
- image_file_ext (str, *optional*, defaults to '.jpg'): The image file extension to use.
138
- upsample (bool, *optional*, defaults to False): Whether to upsample the images.
139
- height (int, *optional*, defaults to 512): The height of the images to generate.
140
- width (int, *optional*, defaults to 512): The width of the images to generate.
141
- eta (float, *optional*, defaults to 0.0): The eta parameter to use for image generation.
142
- push_to_hub (bool, *optional*, defaults to False): Whether to push the generated images to the Hugging Face Hub.
143
- repo_id (str, *optional*): The repo id to push the images to.
144
- private (bool, *optional*): Whether to push the repo as private.
145
- create_pr (bool, *optional*): Whether to create a PR after pushing instead of commiting directly.
146
- name (str, *optional*, defaults to current timestamp str): The name of the sub-directory of
147
- output_dir to save the images to.
148
- """
149
- if push_to_hub:
150
- if repo_id is None:
151
- raise ValueError("Must provide repo_id if push_to_hub is True.")
152
-
153
- name = name or time.strftime("%Y%m%d-%H%M%S")
154
- save_path = Path(output_dir) / name
155
- save_path.mkdir(exist_ok=False, parents=True)
156
- prompt_config_path = save_path / "prompt_config.json"
157
-
158
- num_images = batch_size * num_batches
159
- seeds = seeds or [random.choice(list(range(0, 9999999))) for _ in range(num_images)]
160
- if len(seeds) != num_images:
161
- raise ValueError("Number of seeds must be equal to batch_size * num_batches.")
162
-
163
- if upsample:
164
- if getattr(pipeline, "upsampler", None) is None:
165
- pipeline.upsampler = RealESRGANModel.from_pretrained("nateraw/real-esrgan")
166
- pipeline.upsampler.to(pipeline.device)
167
-
168
- cfg = dict(
169
- prompt=prompt,
170
- guidance_scale=guidance_scale,
171
- eta=eta,
172
- num_inference_steps=num_inference_steps,
173
- upsample=upsample,
174
- height=height,
175
- width=width,
176
- scheduler=dict(pipeline.scheduler.config),
177
- tiled=pipeline.tiled,
178
- diffusers_version=diffusers_version,
179
- device_name=torch.cuda.get_device_name(0) if torch.cuda.is_available() else "unknown",
180
- )
181
- prompt_config_path.write_text(json.dumps(cfg, indent=2, sort_keys=False))
182
-
183
- frame_index = 0
184
- frame_filepaths = []
185
- for batch_idx, embeds, noise in generate_input_batches(
186
- pipeline, [prompt] * num_images, seeds, batch_size, height, width
187
- ):
188
- print(f"Generating batch {batch_idx}")
189
-
190
- outputs = pipeline(
191
- text_embeddings=embeds,
192
- latents=noise,
193
- num_inference_steps=num_inference_steps,
194
- guidance_scale=guidance_scale,
195
- eta=eta,
196
- height=height,
197
- width=width,
198
- output_type="pil" if not upsample else "numpy",
199
- )["images"]
200
- if upsample:
201
- images = []
202
- for output in outputs:
203
- images.append(pipeline.upsampler(output))
204
- else:
205
- images = outputs
206
-
207
- for image in images:
208
- frame_filepath = save_path / f"{seeds[frame_index]}{image_file_ext}"
209
- image.save(frame_filepath)
210
- frame_filepaths.append(str(frame_filepath))
211
- frame_index += 1
212
-
213
- return frame_filepaths
214
-
215
- if push_to_hub:
216
- upload_folder_chunked(repo_id, save_path, private=private, create_pr=create_pr)
217
-
218
-
219
- def generate_images_flax(
220
- pipeline,
221
- params,
222
- prompt,
223
- batch_size=1,
224
- num_batches=1,
225
- seeds=None,
226
- num_inference_steps=50,
227
- guidance_scale=7.5,
228
- output_dir="./images",
229
- image_file_ext=".jpg",
230
- upsample=False,
231
- height=512,
232
- width=512,
233
- push_to_hub=False,
234
- repo_id=None,
235
- private=False,
236
- create_pr=False,
237
- name=None,
238
- ):
239
- import jax
240
- from flax.training.common_utils import shard
241
-
242
- """Generate images using the StableDiffusion pipeline.
243
- Args:
244
- pipeline (StableDiffusionWalkPipeline): The StableDiffusion pipeline instance.
245
- params (`Union[Dict, FrozenDict]`): The model parameters.
246
- prompt (str): The prompt to use for the image generation.
247
- batch_size (int, *optional*, defaults to 1): The batch size to use for image generation.
248
- num_batches (int, *optional*, defaults to 1): The number of batches to generate.
249
- seeds (int, *optional*): The seed to use for the image generation.
250
- num_inference_steps (int, *optional*, defaults to 50): The number of inference steps to take.
251
- guidance_scale (float, *optional*, defaults to 7.5): The guidance scale to use for image generation.
252
- output_dir (str, *optional*, defaults to "./images"): The output directory to save the images to.
253
- image_file_ext (str, *optional*, defaults to '.jpg'): The image file extension to use.
254
- upsample (bool, *optional*, defaults to False): Whether to upsample the images.
255
- height (int, *optional*, defaults to 512): The height of the images to generate.
256
- width (int, *optional*, defaults to 512): The width of the images to generate.
257
- push_to_hub (bool, *optional*, defaults to False): Whether to push the generated images to the Hugging Face Hub.
258
- repo_id (str, *optional*): The repo id to push the images to.
259
- private (bool, *optional*): Whether to push the repo as private.
260
- create_pr (bool, *optional*): Whether to create a PR after pushing instead of commiting directly.
261
- name (str, *optional*, defaults to current timestamp str): The name of the sub-directory of
262
- output_dir to save the images to.
263
- """
264
- if push_to_hub:
265
- if repo_id is None:
266
- raise ValueError("Must provide repo_id if push_to_hub is True.")
267
-
268
- name = name or time.strftime("%Y%m%d-%H%M%S")
269
- save_path = Path(output_dir) / name
270
- save_path.mkdir(exist_ok=False, parents=True)
271
- prompt_config_path = save_path / "prompt_config.json"
272
-
273
- num_images = batch_size * num_batches
274
- seeds = seeds or random.choice(list(range(0, 9999999)))
275
- prng_seed = jax.random.PRNGKey(seeds)
276
-
277
- if upsample:
278
- if getattr(pipeline, "upsampler", None) is None:
279
- pipeline.upsampler = RealESRGANModel.from_pretrained("nateraw/real-esrgan")
280
- if not torch.cuda.is_available():
281
- print("Upsampling is recommended to be done on a GPU, as it is very slow on CPU")
282
- else:
283
- pipeline.upsampler = pipeline.upsampler.cuda()
284
-
285
- cfg = dict(
286
- prompt=prompt,
287
- guidance_scale=guidance_scale,
288
- num_inference_steps=num_inference_steps,
289
- upsample=upsample,
290
- height=height,
291
- width=width,
292
- scheduler=dict(pipeline.scheduler.config),
293
- # tiled=pipeline.tiled,
294
- diffusers_version=diffusers_version,
295
- device_name=torch.cuda.get_device_name(0) if torch.cuda.is_available() else "unknown",
296
- )
297
- prompt_config_path.write_text(json.dumps(cfg, indent=2, sort_keys=False))
298
-
299
- NUM_TPU_CORES = jax.device_count()
300
- jit = True # force jit, assume params are already sharded
301
- batch_size_total = NUM_TPU_CORES * batch_size if jit else batch_size
302
-
303
- def generate_input_batches(prompts, batch_size):
304
- prompt_batch = None
305
- for batch_idx in range(math.ceil(len(prompts) / batch_size)):
306
- prompt_batch = prompts[batch_idx * batch_size : (batch_idx + 1) * batch_size]
307
- yield batch_idx, prompt_batch
308
-
309
- frame_index = 0
310
- frame_filepaths = []
311
- for batch_idx, prompt_batch in generate_input_batches([prompt] * num_images, batch_size_total):
312
- # This batch size correspond to each TPU core, so we are generating batch_size * NUM_TPU_CORES images
313
- print(f"Generating batches: {batch_idx*NUM_TPU_CORES} - {min((batch_idx+1)*NUM_TPU_CORES, num_batches)}")
314
- prompt_ids_batch = pipeline.prepare_inputs(prompt_batch)
315
- prng_seed_batch = prng_seed
316
-
317
- if jit:
318
- padded = False
319
- # Check if len of prompt_batch is multiple of NUM_TPU_CORES, if not pad its ids
320
- if len(prompt_batch) % NUM_TPU_CORES != 0:
321
- padded = True
322
- pad_size = NUM_TPU_CORES - (len(prompt_batch) % NUM_TPU_CORES)
323
- # Pad embeds_batch and noise_batch with zeros in batch dimension
324
- prompt_ids_batch = pad_along_axis(prompt_ids_batch, pad_size, axis=0)
325
-
326
- prompt_ids_batch = shard(prompt_ids_batch)
327
- prng_seed_batch = jax.random.split(prng_seed, jax.device_count())
328
-
329
- outputs = pipeline(
330
- params,
331
- prng_seed=prng_seed_batch,
332
- prompt_ids=prompt_ids_batch,
333
- height=height,
334
- width=width,
335
- guidance_scale=guidance_scale,
336
- num_inference_steps=num_inference_steps,
337
- output_type="pil" if not upsample else "numpy",
338
- jit=jit,
339
- )["images"]
340
-
341
- if jit:
342
- # check if we padded and remove that padding from outputs
343
- if padded:
344
- outputs = outputs[:-pad_size]
345
-
346
- if upsample:
347
- images = []
348
- for output in outputs:
349
- images.append(pipeline.upsampler(output))
350
- else:
351
- images = outputs
352
-
353
- for image in images:
354
- uuid = str(uuid4())
355
- frame_filepath = save_path / f"{uuid}{image_file_ext}"
356
- image.save(frame_filepath)
357
- frame_filepaths.append(str(frame_filepath))
358
- frame_index += 1
359
-
360
- return frame_filepaths
361
-
362
- if push_to_hub:
363
- upload_folder_chunked(repo_id, save_path, private=private, create_pr=create_pr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/tests/isatty_test.py DELETED
@@ -1,57 +0,0 @@
1
- # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
2
- import sys
3
- from unittest import TestCase, main
4
-
5
- from ..ansitowin32 import StreamWrapper, AnsiToWin32
6
- from .utils import pycharm, replace_by, replace_original_by, StreamTTY, StreamNonTTY
7
-
8
-
9
- def is_a_tty(stream):
10
- return StreamWrapper(stream, None).isatty()
11
-
12
- class IsattyTest(TestCase):
13
-
14
- def test_TTY(self):
15
- tty = StreamTTY()
16
- self.assertTrue(is_a_tty(tty))
17
- with pycharm():
18
- self.assertTrue(is_a_tty(tty))
19
-
20
- def test_nonTTY(self):
21
- non_tty = StreamNonTTY()
22
- self.assertFalse(is_a_tty(non_tty))
23
- with pycharm():
24
- self.assertFalse(is_a_tty(non_tty))
25
-
26
- def test_withPycharm(self):
27
- with pycharm():
28
- self.assertTrue(is_a_tty(sys.stderr))
29
- self.assertTrue(is_a_tty(sys.stdout))
30
-
31
- def test_withPycharmTTYOverride(self):
32
- tty = StreamTTY()
33
- with pycharm(), replace_by(tty):
34
- self.assertTrue(is_a_tty(tty))
35
-
36
- def test_withPycharmNonTTYOverride(self):
37
- non_tty = StreamNonTTY()
38
- with pycharm(), replace_by(non_tty):
39
- self.assertFalse(is_a_tty(non_tty))
40
-
41
- def test_withPycharmNoneOverride(self):
42
- with pycharm():
43
- with replace_by(None), replace_original_by(None):
44
- self.assertFalse(is_a_tty(None))
45
- self.assertFalse(is_a_tty(StreamNonTTY()))
46
- self.assertTrue(is_a_tty(StreamTTY()))
47
-
48
- def test_withPycharmStreamWrapped(self):
49
- with pycharm():
50
- self.assertTrue(AnsiToWin32(StreamTTY()).stream.isatty())
51
- self.assertFalse(AnsiToWin32(StreamNonTTY()).stream.isatty())
52
- self.assertTrue(AnsiToWin32(sys.stdout).stream.isatty())
53
- self.assertTrue(AnsiToWin32(sys.stderr).stream.isatty())
54
-
55
-
56
- if __name__ == '__main__':
57
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/dist.py DELETED
@@ -1,1286 +0,0 @@
1
- """distutils.dist
2
-
3
- Provides the Distribution class, which represents the module distribution
4
- being built/installed/distributed.
5
- """
6
-
7
- import sys
8
- import os
9
- import re
10
- import pathlib
11
- import contextlib
12
- from email import message_from_file
13
-
14
- try:
15
- import warnings
16
- except ImportError:
17
- warnings = None
18
-
19
- from distutils.errors import (
20
- DistutilsOptionError,
21
- DistutilsModuleError,
22
- DistutilsArgError,
23
- DistutilsClassError,
24
- )
25
- from distutils.fancy_getopt import FancyGetopt, translate_longopt
26
- from distutils.util import check_environ, strtobool, rfc822_escape
27
- from distutils import log
28
- from distutils.debug import DEBUG
29
-
30
- # Regex to define acceptable Distutils command names. This is not *quite*
31
- # the same as a Python NAME -- I don't allow leading underscores. The fact
32
- # that they're very similar is no coincidence; the default naming scheme is
33
- # to look for a Python module named after the command.
34
- command_re = re.compile(r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
35
-
36
-
37
- def _ensure_list(value, fieldname):
38
- if isinstance(value, str):
39
- # a string containing comma separated values is okay. It will
40
- # be converted to a list by Distribution.finalize_options().
41
- pass
42
- elif not isinstance(value, list):
43
- # passing a tuple or an iterator perhaps, warn and convert
44
- typename = type(value).__name__
45
- msg = "Warning: '{fieldname}' should be a list, got type '{typename}'"
46
- msg = msg.format(**locals())
47
- log.log(log.WARN, msg)
48
- value = list(value)
49
- return value
50
-
51
-
52
- class Distribution:
53
- """The core of the Distutils. Most of the work hiding behind 'setup'
54
- is really done within a Distribution instance, which farms the work out
55
- to the Distutils commands specified on the command line.
56
-
57
- Setup scripts will almost never instantiate Distribution directly,
58
- unless the 'setup()' function is totally inadequate to their needs.
59
- However, it is conceivable that a setup script might wish to subclass
60
- Distribution for some specialized purpose, and then pass the subclass
61
- to 'setup()' as the 'distclass' keyword argument. If so, it is
62
- necessary to respect the expectations that 'setup' has of Distribution.
63
- See the code for 'setup()', in core.py, for details.
64
- """
65
-
66
- # 'global_options' describes the command-line options that may be
67
- # supplied to the setup script prior to any actual commands.
68
- # Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of
69
- # these global options. This list should be kept to a bare minimum,
70
- # since every global option is also valid as a command option -- and we
71
- # don't want to pollute the commands with too many options that they
72
- # have minimal control over.
73
- # The fourth entry for verbose means that it can be repeated.
74
- global_options = [
75
- ('verbose', 'v', "run verbosely (default)", 1),
76
- ('quiet', 'q', "run quietly (turns verbosity off)"),
77
- ('dry-run', 'n', "don't actually do anything"),
78
- ('help', 'h', "show detailed help message"),
79
- ('no-user-cfg', None, 'ignore pydistutils.cfg in your home directory'),
80
- ]
81
-
82
- # 'common_usage' is a short (2-3 line) string describing the common
83
- # usage of the setup script.
84
- common_usage = """\
85
- Common commands: (see '--help-commands' for more)
86
-
87
- setup.py build will build the package underneath 'build/'
88
- setup.py install will install the package
89
- """
90
-
91
- # options that are not propagated to the commands
92
- display_options = [
93
- ('help-commands', None, "list all available commands"),
94
- ('name', None, "print package name"),
95
- ('version', 'V', "print package version"),
96
- ('fullname', None, "print <package name>-<version>"),
97
- ('author', None, "print the author's name"),
98
- ('author-email', None, "print the author's email address"),
99
- ('maintainer', None, "print the maintainer's name"),
100
- ('maintainer-email', None, "print the maintainer's email address"),
101
- ('contact', None, "print the maintainer's name if known, else the author's"),
102
- (
103
- 'contact-email',
104
- None,
105
- "print the maintainer's email address if known, else the author's",
106
- ),
107
- ('url', None, "print the URL for this package"),
108
- ('license', None, "print the license of the package"),
109
- ('licence', None, "alias for --license"),
110
- ('description', None, "print the package description"),
111
- ('long-description', None, "print the long package description"),
112
- ('platforms', None, "print the list of platforms"),
113
- ('classifiers', None, "print the list of classifiers"),
114
- ('keywords', None, "print the list of keywords"),
115
- ('provides', None, "print the list of packages/modules provided"),
116
- ('requires', None, "print the list of packages/modules required"),
117
- ('obsoletes', None, "print the list of packages/modules made obsolete"),
118
- ]
119
- display_option_names = [translate_longopt(x[0]) for x in display_options]
120
-
121
- # negative options are options that exclude other options
122
- negative_opt = {'quiet': 'verbose'}
123
-
124
- # -- Creation/initialization methods -------------------------------
125
-
126
- def __init__(self, attrs=None): # noqa: C901
127
- """Construct a new Distribution instance: initialize all the
128
- attributes of a Distribution, and then use 'attrs' (a dictionary
129
- mapping attribute names to values) to assign some of those
130
- attributes their "real" values. (Any attributes not mentioned in
131
- 'attrs' will be assigned to some null value: 0, None, an empty list
132
- or dictionary, etc.) Most importantly, initialize the
133
- 'command_obj' attribute to the empty dictionary; this will be
134
- filled in with real command objects by 'parse_command_line()'.
135
- """
136
-
137
- # Default values for our command-line options
138
- self.verbose = 1
139
- self.dry_run = 0
140
- self.help = 0
141
- for attr in self.display_option_names:
142
- setattr(self, attr, 0)
143
-
144
- # Store the distribution meta-data (name, version, author, and so
145
- # forth) in a separate object -- we're getting to have enough
146
- # information here (and enough command-line options) that it's
147
- # worth it. Also delegate 'get_XXX()' methods to the 'metadata'
148
- # object in a sneaky and underhanded (but efficient!) way.
149
- self.metadata = DistributionMetadata()
150
- for basename in self.metadata._METHOD_BASENAMES:
151
- method_name = "get_" + basename
152
- setattr(self, method_name, getattr(self.metadata, method_name))
153
-
154
- # 'cmdclass' maps command names to class objects, so we
155
- # can 1) quickly figure out which class to instantiate when
156
- # we need to create a new command object, and 2) have a way
157
- # for the setup script to override command classes
158
- self.cmdclass = {}
159
-
160
- # 'command_packages' is a list of packages in which commands
161
- # are searched for. The factory for command 'foo' is expected
162
- # to be named 'foo' in the module 'foo' in one of the packages
163
- # named here. This list is searched from the left; an error
164
- # is raised if no named package provides the command being
165
- # searched for. (Always access using get_command_packages().)
166
- self.command_packages = None
167
-
168
- # 'script_name' and 'script_args' are usually set to sys.argv[0]
169
- # and sys.argv[1:], but they can be overridden when the caller is
170
- # not necessarily a setup script run from the command-line.
171
- self.script_name = None
172
- self.script_args = None
173
-
174
- # 'command_options' is where we store command options between
175
- # parsing them (from config files, the command-line, etc.) and when
176
- # they are actually needed -- ie. when the command in question is
177
- # instantiated. It is a dictionary of dictionaries of 2-tuples:
178
- # command_options = { command_name : { option : (source, value) } }
179
- self.command_options = {}
180
-
181
- # 'dist_files' is the list of (command, pyversion, file) that
182
- # have been created by any dist commands run so far. This is
183
- # filled regardless of whether the run is dry or not. pyversion
184
- # gives sysconfig.get_python_version() if the dist file is
185
- # specific to a Python version, 'any' if it is good for all
186
- # Python versions on the target platform, and '' for a source
187
- # file. pyversion should not be used to specify minimum or
188
- # maximum required Python versions; use the metainfo for that
189
- # instead.
190
- self.dist_files = []
191
-
192
- # These options are really the business of various commands, rather
193
- # than of the Distribution itself. We provide aliases for them in
194
- # Distribution as a convenience to the developer.
195
- self.packages = None
196
- self.package_data = {}
197
- self.package_dir = None
198
- self.py_modules = None
199
- self.libraries = None
200
- self.headers = None
201
- self.ext_modules = None
202
- self.ext_package = None
203
- self.include_dirs = None
204
- self.extra_path = None
205
- self.scripts = None
206
- self.data_files = None
207
- self.password = ''
208
-
209
- # And now initialize bookkeeping stuff that can't be supplied by
210
- # the caller at all. 'command_obj' maps command names to
211
- # Command instances -- that's how we enforce that every command
212
- # class is a singleton.
213
- self.command_obj = {}
214
-
215
- # 'have_run' maps command names to boolean values; it keeps track
216
- # of whether we have actually run a particular command, to make it
217
- # cheap to "run" a command whenever we think we might need to -- if
218
- # it's already been done, no need for expensive filesystem
219
- # operations, we just check the 'have_run' dictionary and carry on.
220
- # It's only safe to query 'have_run' for a command class that has
221
- # been instantiated -- a false value will be inserted when the
222
- # command object is created, and replaced with a true value when
223
- # the command is successfully run. Thus it's probably best to use
224
- # '.get()' rather than a straight lookup.
225
- self.have_run = {}
226
-
227
- # Now we'll use the attrs dictionary (ultimately, keyword args from
228
- # the setup script) to possibly override any or all of these
229
- # distribution options.
230
-
231
- if attrs:
232
- # Pull out the set of command options and work on them
233
- # specifically. Note that this order guarantees that aliased
234
- # command options will override any supplied redundantly
235
- # through the general options dictionary.
236
- options = attrs.get('options')
237
- if options is not None:
238
- del attrs['options']
239
- for (command, cmd_options) in options.items():
240
- opt_dict = self.get_option_dict(command)
241
- for (opt, val) in cmd_options.items():
242
- opt_dict[opt] = ("setup script", val)
243
-
244
- if 'licence' in attrs:
245
- attrs['license'] = attrs['licence']
246
- del attrs['licence']
247
- msg = "'licence' distribution option is deprecated; use 'license'"
248
- if warnings is not None:
249
- warnings.warn(msg)
250
- else:
251
- sys.stderr.write(msg + "\n")
252
-
253
- # Now work on the rest of the attributes. Any attribute that's
254
- # not already defined is invalid!
255
- for (key, val) in attrs.items():
256
- if hasattr(self.metadata, "set_" + key):
257
- getattr(self.metadata, "set_" + key)(val)
258
- elif hasattr(self.metadata, key):
259
- setattr(self.metadata, key, val)
260
- elif hasattr(self, key):
261
- setattr(self, key, val)
262
- else:
263
- msg = "Unknown distribution option: %s" % repr(key)
264
- warnings.warn(msg)
265
-
266
- # no-user-cfg is handled before other command line args
267
- # because other args override the config files, and this
268
- # one is needed before we can load the config files.
269
- # If attrs['script_args'] wasn't passed, assume false.
270
- #
271
- # This also make sure we just look at the global options
272
- self.want_user_cfg = True
273
-
274
- if self.script_args is not None:
275
- for arg in self.script_args:
276
- if not arg.startswith('-'):
277
- break
278
- if arg == '--no-user-cfg':
279
- self.want_user_cfg = False
280
- break
281
-
282
- self.finalize_options()
283
-
284
- def get_option_dict(self, command):
285
- """Get the option dictionary for a given command. If that
286
- command's option dictionary hasn't been created yet, then create it
287
- and return the new dictionary; otherwise, return the existing
288
- option dictionary.
289
- """
290
- dict = self.command_options.get(command)
291
- if dict is None:
292
- dict = self.command_options[command] = {}
293
- return dict
294
-
295
- def dump_option_dicts(self, header=None, commands=None, indent=""):
296
- from pprint import pformat
297
-
298
- if commands is None: # dump all command option dicts
299
- commands = sorted(self.command_options.keys())
300
-
301
- if header is not None:
302
- self.announce(indent + header)
303
- indent = indent + " "
304
-
305
- if not commands:
306
- self.announce(indent + "no commands known yet")
307
- return
308
-
309
- for cmd_name in commands:
310
- opt_dict = self.command_options.get(cmd_name)
311
- if opt_dict is None:
312
- self.announce(indent + "no option dict for '%s' command" % cmd_name)
313
- else:
314
- self.announce(indent + "option dict for '%s' command:" % cmd_name)
315
- out = pformat(opt_dict)
316
- for line in out.split('\n'):
317
- self.announce(indent + " " + line)
318
-
319
- # -- Config file finding/parsing methods ---------------------------
320
-
321
- def find_config_files(self):
322
- """Find as many configuration files as should be processed for this
323
- platform, and return a list of filenames in the order in which they
324
- should be parsed. The filenames returned are guaranteed to exist
325
- (modulo nasty race conditions).
326
-
327
- There are multiple possible config files:
328
- - distutils.cfg in the Distutils installation directory (i.e.
329
- where the top-level Distutils __inst__.py file lives)
330
- - a file in the user's home directory named .pydistutils.cfg
331
- on Unix and pydistutils.cfg on Windows/Mac; may be disabled
332
- with the ``--no-user-cfg`` option
333
- - setup.cfg in the current directory
334
- - a file named by an environment variable
335
- """
336
- check_environ()
337
- files = [str(path) for path in self._gen_paths() if os.path.isfile(path)]
338
-
339
- if DEBUG:
340
- self.announce("using config files: %s" % ', '.join(files))
341
-
342
- return files
343
-
344
- def _gen_paths(self):
345
- # The system-wide Distutils config file
346
- sys_dir = pathlib.Path(sys.modules['distutils'].__file__).parent
347
- yield sys_dir / "distutils.cfg"
348
-
349
- # The per-user config file
350
- prefix = '.' * (os.name == 'posix')
351
- filename = prefix + 'pydistutils.cfg'
352
- if self.want_user_cfg:
353
- yield pathlib.Path('~').expanduser() / filename
354
-
355
- # All platforms support local setup.cfg
356
- yield pathlib.Path('setup.cfg')
357
-
358
- # Additional config indicated in the environment
359
- with contextlib.suppress(TypeError):
360
- yield pathlib.Path(os.getenv("DIST_EXTRA_CONFIG"))
361
-
362
- def parse_config_files(self, filenames=None): # noqa: C901
363
- from configparser import ConfigParser
364
-
365
- # Ignore install directory options if we have a venv
366
- if sys.prefix != sys.base_prefix:
367
- ignore_options = [
368
- 'install-base',
369
- 'install-platbase',
370
- 'install-lib',
371
- 'install-platlib',
372
- 'install-purelib',
373
- 'install-headers',
374
- 'install-scripts',
375
- 'install-data',
376
- 'prefix',
377
- 'exec-prefix',
378
- 'home',
379
- 'user',
380
- 'root',
381
- ]
382
- else:
383
- ignore_options = []
384
-
385
- ignore_options = frozenset(ignore_options)
386
-
387
- if filenames is None:
388
- filenames = self.find_config_files()
389
-
390
- if DEBUG:
391
- self.announce("Distribution.parse_config_files():")
392
-
393
- parser = ConfigParser()
394
- for filename in filenames:
395
- if DEBUG:
396
- self.announce(" reading %s" % filename)
397
- parser.read(filename)
398
- for section in parser.sections():
399
- options = parser.options(section)
400
- opt_dict = self.get_option_dict(section)
401
-
402
- for opt in options:
403
- if opt != '__name__' and opt not in ignore_options:
404
- val = parser.get(section, opt)
405
- opt = opt.replace('-', '_')
406
- opt_dict[opt] = (filename, val)
407
-
408
- # Make the ConfigParser forget everything (so we retain
409
- # the original filenames that options come from)
410
- parser.__init__()
411
-
412
- # If there was a "global" section in the config file, use it
413
- # to set Distribution options.
414
-
415
- if 'global' in self.command_options:
416
- for (opt, (src, val)) in self.command_options['global'].items():
417
- alias = self.negative_opt.get(opt)
418
- try:
419
- if alias:
420
- setattr(self, alias, not strtobool(val))
421
- elif opt in ('verbose', 'dry_run'): # ugh!
422
- setattr(self, opt, strtobool(val))
423
- else:
424
- setattr(self, opt, val)
425
- except ValueError as msg:
426
- raise DistutilsOptionError(msg)
427
-
428
- # -- Command-line parsing methods ----------------------------------
429
-
430
- def parse_command_line(self):
431
- """Parse the setup script's command line, taken from the
432
- 'script_args' instance attribute (which defaults to 'sys.argv[1:]'
433
- -- see 'setup()' in core.py). This list is first processed for
434
- "global options" -- options that set attributes of the Distribution
435
- instance. Then, it is alternately scanned for Distutils commands
436
- and options for that command. Each new command terminates the
437
- options for the previous command. The allowed options for a
438
- command are determined by the 'user_options' attribute of the
439
- command class -- thus, we have to be able to load command classes
440
- in order to parse the command line. Any error in that 'options'
441
- attribute raises DistutilsGetoptError; any error on the
442
- command-line raises DistutilsArgError. If no Distutils commands
443
- were found on the command line, raises DistutilsArgError. Return
444
- true if command-line was successfully parsed and we should carry
445
- on with executing commands; false if no errors but we shouldn't
446
- execute commands (currently, this only happens if user asks for
447
- help).
448
- """
449
- #
450
- # We now have enough information to show the Macintosh dialog
451
- # that allows the user to interactively specify the "command line".
452
- #
453
- toplevel_options = self._get_toplevel_options()
454
-
455
- # We have to parse the command line a bit at a time -- global
456
- # options, then the first command, then its options, and so on --
457
- # because each command will be handled by a different class, and
458
- # the options that are valid for a particular class aren't known
459
- # until we have loaded the command class, which doesn't happen
460
- # until we know what the command is.
461
-
462
- self.commands = []
463
- parser = FancyGetopt(toplevel_options + self.display_options)
464
- parser.set_negative_aliases(self.negative_opt)
465
- parser.set_aliases({'licence': 'license'})
466
- args = parser.getopt(args=self.script_args, object=self)
467
- option_order = parser.get_option_order()
468
- log.set_verbosity(self.verbose)
469
-
470
- # for display options we return immediately
471
- if self.handle_display_options(option_order):
472
- return
473
- while args:
474
- args = self._parse_command_opts(parser, args)
475
- if args is None: # user asked for help (and got it)
476
- return
477
-
478
- # Handle the cases of --help as a "global" option, ie.
479
- # "setup.py --help" and "setup.py --help command ...". For the
480
- # former, we show global options (--verbose, --dry-run, etc.)
481
- # and display-only options (--name, --version, etc.); for the
482
- # latter, we omit the display-only options and show help for
483
- # each command listed on the command line.
484
- if self.help:
485
- self._show_help(
486
- parser, display_options=len(self.commands) == 0, commands=self.commands
487
- )
488
- return
489
-
490
- # Oops, no commands found -- an end-user error
491
- if not self.commands:
492
- raise DistutilsArgError("no commands supplied")
493
-
494
- # All is well: return true
495
- return True
496
-
497
- def _get_toplevel_options(self):
498
- """Return the non-display options recognized at the top level.
499
-
500
- This includes options that are recognized *only* at the top
501
- level as well as options recognized for commands.
502
- """
503
- return self.global_options + [
504
- (
505
- "command-packages=",
506
- None,
507
- "list of packages that provide distutils commands",
508
- ),
509
- ]
510
-
511
- def _parse_command_opts(self, parser, args): # noqa: C901
512
- """Parse the command-line options for a single command.
513
- 'parser' must be a FancyGetopt instance; 'args' must be the list
514
- of arguments, starting with the current command (whose options
515
- we are about to parse). Returns a new version of 'args' with
516
- the next command at the front of the list; will be the empty
517
- list if there are no more commands on the command line. Returns
518
- None if the user asked for help on this command.
519
- """
520
- # late import because of mutual dependence between these modules
521
- from distutils.cmd import Command
522
-
523
- # Pull the current command from the head of the command line
524
- command = args[0]
525
- if not command_re.match(command):
526
- raise SystemExit("invalid command name '%s'" % command)
527
- self.commands.append(command)
528
-
529
- # Dig up the command class that implements this command, so we
530
- # 1) know that it's a valid command, and 2) know which options
531
- # it takes.
532
- try:
533
- cmd_class = self.get_command_class(command)
534
- except DistutilsModuleError as msg:
535
- raise DistutilsArgError(msg)
536
-
537
- # Require that the command class be derived from Command -- want
538
- # to be sure that the basic "command" interface is implemented.
539
- if not issubclass(cmd_class, Command):
540
- raise DistutilsClassError(
541
- "command class %s must subclass Command" % cmd_class
542
- )
543
-
544
- # Also make sure that the command object provides a list of its
545
- # known options.
546
- if not (
547
- hasattr(cmd_class, 'user_options')
548
- and isinstance(cmd_class.user_options, list)
549
- ):
550
- msg = (
551
- "command class %s must provide "
552
- "'user_options' attribute (a list of tuples)"
553
- )
554
- raise DistutilsClassError(msg % cmd_class)
555
-
556
- # If the command class has a list of negative alias options,
557
- # merge it in with the global negative aliases.
558
- negative_opt = self.negative_opt
559
- if hasattr(cmd_class, 'negative_opt'):
560
- negative_opt = negative_opt.copy()
561
- negative_opt.update(cmd_class.negative_opt)
562
-
563
- # Check for help_options in command class. They have a different
564
- # format (tuple of four) so we need to preprocess them here.
565
- if hasattr(cmd_class, 'help_options') and isinstance(
566
- cmd_class.help_options, list
567
- ):
568
- help_options = fix_help_options(cmd_class.help_options)
569
- else:
570
- help_options = []
571
-
572
- # All commands support the global options too, just by adding
573
- # in 'global_options'.
574
- parser.set_option_table(
575
- self.global_options + cmd_class.user_options + help_options
576
- )
577
- parser.set_negative_aliases(negative_opt)
578
- (args, opts) = parser.getopt(args[1:])
579
- if hasattr(opts, 'help') and opts.help:
580
- self._show_help(parser, display_options=0, commands=[cmd_class])
581
- return
582
-
583
- if hasattr(cmd_class, 'help_options') and isinstance(
584
- cmd_class.help_options, list
585
- ):
586
- help_option_found = 0
587
- for (help_option, short, desc, func) in cmd_class.help_options:
588
- if hasattr(opts, parser.get_attr_name(help_option)):
589
- help_option_found = 1
590
- if callable(func):
591
- func()
592
- else:
593
- raise DistutilsClassError(
594
- "invalid help function %r for help option '%s': "
595
- "must be a callable object (function, etc.)"
596
- % (func, help_option)
597
- )
598
-
599
- if help_option_found:
600
- return
601
-
602
- # Put the options from the command-line into their official
603
- # holding pen, the 'command_options' dictionary.
604
- opt_dict = self.get_option_dict(command)
605
- for (name, value) in vars(opts).items():
606
- opt_dict[name] = ("command line", value)
607
-
608
- return args
609
-
610
- def finalize_options(self):
611
- """Set final values for all the options on the Distribution
612
- instance, analogous to the .finalize_options() method of Command
613
- objects.
614
- """
615
- for attr in ('keywords', 'platforms'):
616
- value = getattr(self.metadata, attr)
617
- if value is None:
618
- continue
619
- if isinstance(value, str):
620
- value = [elm.strip() for elm in value.split(',')]
621
- setattr(self.metadata, attr, value)
622
-
623
- def _show_help(self, parser, global_options=1, display_options=1, commands=[]):
624
- """Show help for the setup script command-line in the form of
625
- several lists of command-line options. 'parser' should be a
626
- FancyGetopt instance; do not expect it to be returned in the
627
- same state, as its option table will be reset to make it
628
- generate the correct help text.
629
-
630
- If 'global_options' is true, lists the global options:
631
- --verbose, --dry-run, etc. If 'display_options' is true, lists
632
- the "display-only" options: --name, --version, etc. Finally,
633
- lists per-command help for every command name or command class
634
- in 'commands'.
635
- """
636
- # late import because of mutual dependence between these modules
637
- from distutils.core import gen_usage
638
- from distutils.cmd import Command
639
-
640
- if global_options:
641
- if display_options:
642
- options = self._get_toplevel_options()
643
- else:
644
- options = self.global_options
645
- parser.set_option_table(options)
646
- parser.print_help(self.common_usage + "\nGlobal options:")
647
- print('')
648
-
649
- if display_options:
650
- parser.set_option_table(self.display_options)
651
- parser.print_help(
652
- "Information display options (just display "
653
- + "information, ignore any commands)"
654
- )
655
- print('')
656
-
657
- for command in self.commands:
658
- if isinstance(command, type) and issubclass(command, Command):
659
- klass = command
660
- else:
661
- klass = self.get_command_class(command)
662
- if hasattr(klass, 'help_options') and isinstance(klass.help_options, list):
663
- parser.set_option_table(
664
- klass.user_options + fix_help_options(klass.help_options)
665
- )
666
- else:
667
- parser.set_option_table(klass.user_options)
668
- parser.print_help("Options for '%s' command:" % klass.__name__)
669
- print('')
670
-
671
- print(gen_usage(self.script_name))
672
-
673
- def handle_display_options(self, option_order):
674
- """If there were any non-global "display-only" options
675
- (--help-commands or the metadata display options) on the command
676
- line, display the requested info and return true; else return
677
- false.
678
- """
679
- from distutils.core import gen_usage
680
-
681
- # User just wants a list of commands -- we'll print it out and stop
682
- # processing now (ie. if they ran "setup --help-commands foo bar",
683
- # we ignore "foo bar").
684
- if self.help_commands:
685
- self.print_commands()
686
- print('')
687
- print(gen_usage(self.script_name))
688
- return 1
689
-
690
- # If user supplied any of the "display metadata" options, then
691
- # display that metadata in the order in which the user supplied the
692
- # metadata options.
693
- any_display_options = 0
694
- is_display_option = {}
695
- for option in self.display_options:
696
- is_display_option[option[0]] = 1
697
-
698
- for (opt, val) in option_order:
699
- if val and is_display_option.get(opt):
700
- opt = translate_longopt(opt)
701
- value = getattr(self.metadata, "get_" + opt)()
702
- if opt in ['keywords', 'platforms']:
703
- print(','.join(value))
704
- elif opt in ('classifiers', 'provides', 'requires', 'obsoletes'):
705
- print('\n'.join(value))
706
- else:
707
- print(value)
708
- any_display_options = 1
709
-
710
- return any_display_options
711
-
712
- def print_command_list(self, commands, header, max_length):
713
- """Print a subset of the list of all commands -- used by
714
- 'print_commands()'.
715
- """
716
- print(header + ":")
717
-
718
- for cmd in commands:
719
- klass = self.cmdclass.get(cmd)
720
- if not klass:
721
- klass = self.get_command_class(cmd)
722
- try:
723
- description = klass.description
724
- except AttributeError:
725
- description = "(no description available)"
726
-
727
- print(" %-*s %s" % (max_length, cmd, description))
728
-
729
- def print_commands(self):
730
- """Print out a help message listing all available commands with a
731
- description of each. The list is divided into "standard commands"
732
- (listed in distutils.command.__all__) and "extra commands"
733
- (mentioned in self.cmdclass, but not a standard command). The
734
- descriptions come from the command class attribute
735
- 'description'.
736
- """
737
- import distutils.command
738
-
739
- std_commands = distutils.command.__all__
740
- is_std = {}
741
- for cmd in std_commands:
742
- is_std[cmd] = 1
743
-
744
- extra_commands = []
745
- for cmd in self.cmdclass.keys():
746
- if not is_std.get(cmd):
747
- extra_commands.append(cmd)
748
-
749
- max_length = 0
750
- for cmd in std_commands + extra_commands:
751
- if len(cmd) > max_length:
752
- max_length = len(cmd)
753
-
754
- self.print_command_list(std_commands, "Standard commands", max_length)
755
- if extra_commands:
756
- print()
757
- self.print_command_list(extra_commands, "Extra commands", max_length)
758
-
759
- def get_command_list(self):
760
- """Get a list of (command, description) tuples.
761
- The list is divided into "standard commands" (listed in
762
- distutils.command.__all__) and "extra commands" (mentioned in
763
- self.cmdclass, but not a standard command). The descriptions come
764
- from the command class attribute 'description'.
765
- """
766
- # Currently this is only used on Mac OS, for the Mac-only GUI
767
- # Distutils interface (by Jack Jansen)
768
- import distutils.command
769
-
770
- std_commands = distutils.command.__all__
771
- is_std = {}
772
- for cmd in std_commands:
773
- is_std[cmd] = 1
774
-
775
- extra_commands = []
776
- for cmd in self.cmdclass.keys():
777
- if not is_std.get(cmd):
778
- extra_commands.append(cmd)
779
-
780
- rv = []
781
- for cmd in std_commands + extra_commands:
782
- klass = self.cmdclass.get(cmd)
783
- if not klass:
784
- klass = self.get_command_class(cmd)
785
- try:
786
- description = klass.description
787
- except AttributeError:
788
- description = "(no description available)"
789
- rv.append((cmd, description))
790
- return rv
791
-
792
- # -- Command class/object methods ----------------------------------
793
-
794
- def get_command_packages(self):
795
- """Return a list of packages from which commands are loaded."""
796
- pkgs = self.command_packages
797
- if not isinstance(pkgs, list):
798
- if pkgs is None:
799
- pkgs = ''
800
- pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != '']
801
- if "distutils.command" not in pkgs:
802
- pkgs.insert(0, "distutils.command")
803
- self.command_packages = pkgs
804
- return pkgs
805
-
806
- def get_command_class(self, command):
807
- """Return the class that implements the Distutils command named by
808
- 'command'. First we check the 'cmdclass' dictionary; if the
809
- command is mentioned there, we fetch the class object from the
810
- dictionary and return it. Otherwise we load the command module
811
- ("distutils.command." + command) and fetch the command class from
812
- the module. The loaded class is also stored in 'cmdclass'
813
- to speed future calls to 'get_command_class()'.
814
-
815
- Raises DistutilsModuleError if the expected module could not be
816
- found, or if that module does not define the expected class.
817
- """
818
- klass = self.cmdclass.get(command)
819
- if klass:
820
- return klass
821
-
822
- for pkgname in self.get_command_packages():
823
- module_name = "{}.{}".format(pkgname, command)
824
- klass_name = command
825
-
826
- try:
827
- __import__(module_name)
828
- module = sys.modules[module_name]
829
- except ImportError:
830
- continue
831
-
832
- try:
833
- klass = getattr(module, klass_name)
834
- except AttributeError:
835
- raise DistutilsModuleError(
836
- "invalid command '%s' (no class '%s' in module '%s')"
837
- % (command, klass_name, module_name)
838
- )
839
-
840
- self.cmdclass[command] = klass
841
- return klass
842
-
843
- raise DistutilsModuleError("invalid command '%s'" % command)
844
-
845
- def get_command_obj(self, command, create=1):
846
- """Return the command object for 'command'. Normally this object
847
- is cached on a previous call to 'get_command_obj()'; if no command
848
- object for 'command' is in the cache, then we either create and
849
- return it (if 'create' is true) or return None.
850
- """
851
- cmd_obj = self.command_obj.get(command)
852
- if not cmd_obj and create:
853
- if DEBUG:
854
- self.announce(
855
- "Distribution.get_command_obj(): "
856
- "creating '%s' command object" % command
857
- )
858
-
859
- klass = self.get_command_class(command)
860
- cmd_obj = self.command_obj[command] = klass(self)
861
- self.have_run[command] = 0
862
-
863
- # Set any options that were supplied in config files
864
- # or on the command line. (NB. support for error
865
- # reporting is lame here: any errors aren't reported
866
- # until 'finalize_options()' is called, which means
867
- # we won't report the source of the error.)
868
- options = self.command_options.get(command)
869
- if options:
870
- self._set_command_options(cmd_obj, options)
871
-
872
- return cmd_obj
873
-
874
- def _set_command_options(self, command_obj, option_dict=None): # noqa: C901
875
- """Set the options for 'command_obj' from 'option_dict'. Basically
876
- this means copying elements of a dictionary ('option_dict') to
877
- attributes of an instance ('command').
878
-
879
- 'command_obj' must be a Command instance. If 'option_dict' is not
880
- supplied, uses the standard option dictionary for this command
881
- (from 'self.command_options').
882
- """
883
- command_name = command_obj.get_command_name()
884
- if option_dict is None:
885
- option_dict = self.get_option_dict(command_name)
886
-
887
- if DEBUG:
888
- self.announce(" setting options for '%s' command:" % command_name)
889
- for (option, (source, value)) in option_dict.items():
890
- if DEBUG:
891
- self.announce(" {} = {} (from {})".format(option, value, source))
892
- try:
893
- bool_opts = [translate_longopt(o) for o in command_obj.boolean_options]
894
- except AttributeError:
895
- bool_opts = []
896
- try:
897
- neg_opt = command_obj.negative_opt
898
- except AttributeError:
899
- neg_opt = {}
900
-
901
- try:
902
- is_string = isinstance(value, str)
903
- if option in neg_opt and is_string:
904
- setattr(command_obj, neg_opt[option], not strtobool(value))
905
- elif option in bool_opts and is_string:
906
- setattr(command_obj, option, strtobool(value))
907
- elif hasattr(command_obj, option):
908
- setattr(command_obj, option, value)
909
- else:
910
- raise DistutilsOptionError(
911
- "error in %s: command '%s' has no such option '%s'"
912
- % (source, command_name, option)
913
- )
914
- except ValueError as msg:
915
- raise DistutilsOptionError(msg)
916
-
917
- def reinitialize_command(self, command, reinit_subcommands=0):
918
- """Reinitializes a command to the state it was in when first
919
- returned by 'get_command_obj()': ie., initialized but not yet
920
- finalized. This provides the opportunity to sneak option
921
- values in programmatically, overriding or supplementing
922
- user-supplied values from the config files and command line.
923
- You'll have to re-finalize the command object (by calling
924
- 'finalize_options()' or 'ensure_finalized()') before using it for
925
- real.
926
-
927
- 'command' should be a command name (string) or command object. If
928
- 'reinit_subcommands' is true, also reinitializes the command's
929
- sub-commands, as declared by the 'sub_commands' class attribute (if
930
- it has one). See the "install" command for an example. Only
931
- reinitializes the sub-commands that actually matter, ie. those
932
- whose test predicates return true.
933
-
934
- Returns the reinitialized command object.
935
- """
936
- from distutils.cmd import Command
937
-
938
- if not isinstance(command, Command):
939
- command_name = command
940
- command = self.get_command_obj(command_name)
941
- else:
942
- command_name = command.get_command_name()
943
-
944
- if not command.finalized:
945
- return command
946
- command.initialize_options()
947
- command.finalized = 0
948
- self.have_run[command_name] = 0
949
- self._set_command_options(command)
950
-
951
- if reinit_subcommands:
952
- for sub in command.get_sub_commands():
953
- self.reinitialize_command(sub, reinit_subcommands)
954
-
955
- return command
956
-
957
- # -- Methods that operate on the Distribution ----------------------
958
-
959
- def announce(self, msg, level=log.INFO):
960
- log.log(level, msg)
961
-
962
- def run_commands(self):
963
- """Run each command that was seen on the setup script command line.
964
- Uses the list of commands found and cache of command objects
965
- created by 'get_command_obj()'.
966
- """
967
- for cmd in self.commands:
968
- self.run_command(cmd)
969
-
970
- # -- Methods that operate on its Commands --------------------------
971
-
972
- def run_command(self, command):
973
- """Do whatever it takes to run a command (including nothing at all,
974
- if the command has already been run). Specifically: if we have
975
- already created and run the command named by 'command', return
976
- silently without doing anything. If the command named by 'command'
977
- doesn't even have a command object yet, create one. Then invoke
978
- 'run()' on that command object (or an existing one).
979
- """
980
- # Already been here, done that? then return silently.
981
- if self.have_run.get(command):
982
- return
983
-
984
- log.info("running %s", command)
985
- cmd_obj = self.get_command_obj(command)
986
- cmd_obj.ensure_finalized()
987
- cmd_obj.run()
988
- self.have_run[command] = 1
989
-
990
- # -- Distribution query methods ------------------------------------
991
-
992
- def has_pure_modules(self):
993
- return len(self.packages or self.py_modules or []) > 0
994
-
995
- def has_ext_modules(self):
996
- return self.ext_modules and len(self.ext_modules) > 0
997
-
998
- def has_c_libraries(self):
999
- return self.libraries and len(self.libraries) > 0
1000
-
1001
- def has_modules(self):
1002
- return self.has_pure_modules() or self.has_ext_modules()
1003
-
1004
- def has_headers(self):
1005
- return self.headers and len(self.headers) > 0
1006
-
1007
- def has_scripts(self):
1008
- return self.scripts and len(self.scripts) > 0
1009
-
1010
- def has_data_files(self):
1011
- return self.data_files and len(self.data_files) > 0
1012
-
1013
- def is_pure(self):
1014
- return (
1015
- self.has_pure_modules()
1016
- and not self.has_ext_modules()
1017
- and not self.has_c_libraries()
1018
- )
1019
-
1020
- # -- Metadata query methods ----------------------------------------
1021
-
1022
- # If you're looking for 'get_name()', 'get_version()', and so forth,
1023
- # they are defined in a sneaky way: the constructor binds self.get_XXX
1024
- # to self.metadata.get_XXX. The actual code is in the
1025
- # DistributionMetadata class, below.
1026
-
1027
-
1028
- class DistributionMetadata:
1029
- """Dummy class to hold the distribution meta-data: name, version,
1030
- author, and so forth.
1031
- """
1032
-
1033
- _METHOD_BASENAMES = (
1034
- "name",
1035
- "version",
1036
- "author",
1037
- "author_email",
1038
- "maintainer",
1039
- "maintainer_email",
1040
- "url",
1041
- "license",
1042
- "description",
1043
- "long_description",
1044
- "keywords",
1045
- "platforms",
1046
- "fullname",
1047
- "contact",
1048
- "contact_email",
1049
- "classifiers",
1050
- "download_url",
1051
- # PEP 314
1052
- "provides",
1053
- "requires",
1054
- "obsoletes",
1055
- )
1056
-
1057
- def __init__(self, path=None):
1058
- if path is not None:
1059
- self.read_pkg_file(open(path))
1060
- else:
1061
- self.name = None
1062
- self.version = None
1063
- self.author = None
1064
- self.author_email = None
1065
- self.maintainer = None
1066
- self.maintainer_email = None
1067
- self.url = None
1068
- self.license = None
1069
- self.description = None
1070
- self.long_description = None
1071
- self.keywords = None
1072
- self.platforms = None
1073
- self.classifiers = None
1074
- self.download_url = None
1075
- # PEP 314
1076
- self.provides = None
1077
- self.requires = None
1078
- self.obsoletes = None
1079
-
1080
- def read_pkg_file(self, file):
1081
- """Reads the metadata values from a file object."""
1082
- msg = message_from_file(file)
1083
-
1084
- def _read_field(name):
1085
- value = msg[name]
1086
- if value and value != "UNKNOWN":
1087
- return value
1088
-
1089
- def _read_list(name):
1090
- values = msg.get_all(name, None)
1091
- if values == []:
1092
- return None
1093
- return values
1094
-
1095
- metadata_version = msg['metadata-version']
1096
- self.name = _read_field('name')
1097
- self.version = _read_field('version')
1098
- self.description = _read_field('summary')
1099
- # we are filling author only.
1100
- self.author = _read_field('author')
1101
- self.maintainer = None
1102
- self.author_email = _read_field('author-email')
1103
- self.maintainer_email = None
1104
- self.url = _read_field('home-page')
1105
- self.license = _read_field('license')
1106
-
1107
- if 'download-url' in msg:
1108
- self.download_url = _read_field('download-url')
1109
- else:
1110
- self.download_url = None
1111
-
1112
- self.long_description = _read_field('description')
1113
- self.description = _read_field('summary')
1114
-
1115
- if 'keywords' in msg:
1116
- self.keywords = _read_field('keywords').split(',')
1117
-
1118
- self.platforms = _read_list('platform')
1119
- self.classifiers = _read_list('classifier')
1120
-
1121
- # PEP 314 - these fields only exist in 1.1
1122
- if metadata_version == '1.1':
1123
- self.requires = _read_list('requires')
1124
- self.provides = _read_list('provides')
1125
- self.obsoletes = _read_list('obsoletes')
1126
- else:
1127
- self.requires = None
1128
- self.provides = None
1129
- self.obsoletes = None
1130
-
1131
- def write_pkg_info(self, base_dir):
1132
- """Write the PKG-INFO file into the release tree."""
1133
- with open(
1134
- os.path.join(base_dir, 'PKG-INFO'), 'w', encoding='UTF-8'
1135
- ) as pkg_info:
1136
- self.write_pkg_file(pkg_info)
1137
-
1138
- def write_pkg_file(self, file):
1139
- """Write the PKG-INFO format data to a file object."""
1140
- version = '1.0'
1141
- if (
1142
- self.provides
1143
- or self.requires
1144
- or self.obsoletes
1145
- or self.classifiers
1146
- or self.download_url
1147
- ):
1148
- version = '1.1'
1149
-
1150
- # required fields
1151
- file.write('Metadata-Version: %s\n' % version)
1152
- file.write('Name: %s\n' % self.get_name())
1153
- file.write('Version: %s\n' % self.get_version())
1154
-
1155
- def maybe_write(header, val):
1156
- if val:
1157
- file.write(f"{header}: {val}\n")
1158
-
1159
- # optional fields
1160
- maybe_write("Summary", self.get_description())
1161
- maybe_write("Home-page", self.get_url())
1162
- maybe_write("Author", self.get_contact())
1163
- maybe_write("Author-email", self.get_contact_email())
1164
- maybe_write("License", self.get_license())
1165
- maybe_write("Download-URL", self.download_url)
1166
- maybe_write("Description", rfc822_escape(self.get_long_description() or ""))
1167
- maybe_write("Keywords", ",".join(self.get_keywords()))
1168
-
1169
- self._write_list(file, 'Platform', self.get_platforms())
1170
- self._write_list(file, 'Classifier', self.get_classifiers())
1171
-
1172
- # PEP 314
1173
- self._write_list(file, 'Requires', self.get_requires())
1174
- self._write_list(file, 'Provides', self.get_provides())
1175
- self._write_list(file, 'Obsoletes', self.get_obsoletes())
1176
-
1177
- def _write_list(self, file, name, values):
1178
- values = values or []
1179
- for value in values:
1180
- file.write('{}: {}\n'.format(name, value))
1181
-
1182
- # -- Metadata query methods ----------------------------------------
1183
-
1184
- def get_name(self):
1185
- return self.name or "UNKNOWN"
1186
-
1187
- def get_version(self):
1188
- return self.version or "0.0.0"
1189
-
1190
- def get_fullname(self):
1191
- return "{}-{}".format(self.get_name(), self.get_version())
1192
-
1193
- def get_author(self):
1194
- return self.author
1195
-
1196
- def get_author_email(self):
1197
- return self.author_email
1198
-
1199
- def get_maintainer(self):
1200
- return self.maintainer
1201
-
1202
- def get_maintainer_email(self):
1203
- return self.maintainer_email
1204
-
1205
- def get_contact(self):
1206
- return self.maintainer or self.author
1207
-
1208
- def get_contact_email(self):
1209
- return self.maintainer_email or self.author_email
1210
-
1211
- def get_url(self):
1212
- return self.url
1213
-
1214
- def get_license(self):
1215
- return self.license
1216
-
1217
- get_licence = get_license
1218
-
1219
- def get_description(self):
1220
- return self.description
1221
-
1222
- def get_long_description(self):
1223
- return self.long_description
1224
-
1225
- def get_keywords(self):
1226
- return self.keywords or []
1227
-
1228
- def set_keywords(self, value):
1229
- self.keywords = _ensure_list(value, 'keywords')
1230
-
1231
- def get_platforms(self):
1232
- return self.platforms
1233
-
1234
- def set_platforms(self, value):
1235
- self.platforms = _ensure_list(value, 'platforms')
1236
-
1237
- def get_classifiers(self):
1238
- return self.classifiers or []
1239
-
1240
- def set_classifiers(self, value):
1241
- self.classifiers = _ensure_list(value, 'classifiers')
1242
-
1243
- def get_download_url(self):
1244
- return self.download_url
1245
-
1246
- # PEP 314
1247
- def get_requires(self):
1248
- return self.requires or []
1249
-
1250
- def set_requires(self, value):
1251
- import distutils.versionpredicate
1252
-
1253
- for v in value:
1254
- distutils.versionpredicate.VersionPredicate(v)
1255
- self.requires = list(value)
1256
-
1257
- def get_provides(self):
1258
- return self.provides or []
1259
-
1260
- def set_provides(self, value):
1261
- value = [v.strip() for v in value]
1262
- for v in value:
1263
- import distutils.versionpredicate
1264
-
1265
- distutils.versionpredicate.split_provision(v)
1266
- self.provides = value
1267
-
1268
- def get_obsoletes(self):
1269
- return self.obsoletes or []
1270
-
1271
- def set_obsoletes(self, value):
1272
- import distutils.versionpredicate
1273
-
1274
- for v in value:
1275
- distutils.versionpredicate.VersionPredicate(v)
1276
- self.obsoletes = list(value)
1277
-
1278
-
1279
- def fix_help_options(options):
1280
- """Convert a 4-tuple 'help_options' list as found in various command
1281
- classes to the 3-tuple form required by FancyGetopt.
1282
- """
1283
- new_options = []
1284
- for help_tuple in options:
1285
- new_options.append(help_tuple[0:3])
1286
- return new_options
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/op/fused_bias_act.cpp DELETED
@@ -1,21 +0,0 @@
1
- #include <torch/extension.h>
2
-
3
-
4
- torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
5
- int act, int grad, float alpha, float scale);
6
-
7
- #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
8
- #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
9
- #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
10
-
11
- torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
12
- int act, int grad, float alpha, float scale) {
13
- CHECK_CUDA(input);
14
- CHECK_CUDA(bias);
15
-
16
- return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);
17
- }
18
-
19
- PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
20
- m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)");
21
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/components/ui/alert.tsx DELETED
@@ -1,59 +0,0 @@
1
- import * as React from "react"
2
- import { cva, type VariantProps } from "class-variance-authority"
3
-
4
- import { cn } from "@/lib/utils"
5
-
6
- const alertVariants = cva(
7
- "relative w-full rounded-lg border border-stone-200 p-4 [&:has(svg)]:pl-11 [&>svg+div]:translate-y-[-3px] [&>svg]:absolute [&>svg]:left-4 [&>svg]:top-4 [&>svg]:text-stone-950 dark:border-stone-800 dark:[&>svg]:text-stone-50",
8
- {
9
- variants: {
10
- variant: {
11
- default: "bg-white text-stone-950 dark:bg-stone-950 dark:text-stone-50",
12
- destructive:
13
- "border-red-500/50 text-red-500 dark:border-red-500 [&>svg]:text-red-500 dark:border-red-900/50 dark:text-red-900 dark:dark:border-red-900 dark:[&>svg]:text-red-900",
14
- },
15
- },
16
- defaultVariants: {
17
- variant: "default",
18
- },
19
- }
20
- )
21
-
22
- const Alert = React.forwardRef<
23
- HTMLDivElement,
24
- React.HTMLAttributes<HTMLDivElement> & VariantProps<typeof alertVariants>
25
- >(({ className, variant, ...props }, ref) => (
26
- <div
27
- ref={ref}
28
- role="alert"
29
- className={cn(alertVariants({ variant }), className)}
30
- {...props}
31
- />
32
- ))
33
- Alert.displayName = "Alert"
34
-
35
- const AlertTitle = React.forwardRef<
36
- HTMLParagraphElement,
37
- React.HTMLAttributes<HTMLHeadingElement>
38
- >(({ className, ...props }, ref) => (
39
- <h5
40
- ref={ref}
41
- className={cn("mb-1 font-medium leading-none tracking-tight", className)}
42
- {...props}
43
- />
44
- ))
45
- AlertTitle.displayName = "AlertTitle"
46
-
47
- const AlertDescription = React.forwardRef<
48
- HTMLParagraphElement,
49
- React.HTMLAttributes<HTMLParagraphElement>
50
- >(({ className, ...props }, ref) => (
51
- <div
52
- ref={ref}
53
- className={cn("text-sm [&_p]:leading-relaxed", className)}
54
- {...props}
55
- />
56
- ))
57
- AlertDescription.displayName = "AlertDescription"
58
-
59
- export { Alert, AlertTitle, AlertDescription }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Android Mini Block Craft Apk.md DELETED
@@ -1,90 +0,0 @@
1
- <br />
2
- <h1>Mini bloque de arte APK: Un juego de caja de arena para Android</h1>
3
- <p>Si usted está buscando un juego de sandbox divertido y creativo para su dispositivo Android, es posible que desee echa un vistazo a Mini Block Craft APK. Este juego te permite crear, explorar y sobrevivir en un mundo abierto estilo pixel. Puedes construir tu propia casa, luchar contra monstruos o simplemente disfrutar del paisaje. En este artículo, le diremos todo lo que necesita saber sobre Mini Block Craft APK, incluyendo sus características, cómo descargarlo e instalarlo, sus pros y contras, y algunas alternativas. </p>
4
- <h2>¿Qué es Mini Block Craft APK? </h2>
5
- <p>Mini Block Craft APK es un juego para Android desarrollado por Build Block Studio. Está inspirado en el popular juego Minecraft, pero tiene sus propias características únicas y estilo. El juego es gratis para descargar y jugar, pero contiene anuncios y compras en la aplicación. Puedes jugar el juego sin conexión o en línea con otros jugadores. </p>
6
- <h2>android mini block craft apk</h2><br /><p><b><b>DOWNLOAD</b> >>> <a href="https://bltlly.com/2v6LtV">https://bltlly.com/2v6LtV</a></b></p><br /><br />
7
- <h3>Características de Mini Block Craft APK</h3>
8
- <p>Mini Block Craft APK tiene muchas características que lo hacen un juego divertido y adictivo. Aquí están algunos de ellos:</p>
9
- <h4>Gráficos de estilo de píxeles</h4>
10
- <p>El juego tiene unos gráficos estilo pixel que le dan una sensación retro y nostálgica. El juego también tiene un ciclo de día y noche, efectos meteorológicos y sombras realistas. Los gráficos son simples pero coloridos y encantadores. </p>
11
- <h4>Modos creativos y de supervivencia</h4>
12
- <p>El juego tiene dos modos: creativo y supervivencia. En el modo creativo, tienes recursos ilimitados y puedes construir lo que quieras sin restricciones. También puede volar alrededor del mapa y explorar diferentes biomas. En el modo de supervivencia, tienes que reunir recursos, crear herramientas y armas, y luchar contra los enemigos. También tienes que controlar tu hambre, salud y resistencia. </p>
13
- <h4>Construye la casa de tus sueños o explora el mapa</h4>
14
-
15
- <h4>Lucha contra monstruos y zombies</h4>
16
- <p>El juego no se trata solo de construir y explorar. También tiene un sistema de combate que te permite luchar contra varios enemigos, como monstruos, zombis, arañas, esqueletos y más. Puedes usar diferentes armas, como espadas, arcos, hachas y armas. También puedes crear armaduras y pociones para protegerte. </p>
17
- <h3> ¿Cómo descargar e instalar Mini Block Craft APK? </h3>
18
- <p>Si desea jugar Mini bloque arte APK en su dispositivo Android, es necesario descargar e instalarlo primero. Estos son los pasos para hacer eso:</p>
19
- <h4> Requisitos para Mini bloque de arte APK</h4>
20
- <p>Antes de descargar e instalar el juego, asegúrese de que su dispositivo cumple con los siguientes requisitos:</p>
21
- <p></p>
22
- <ul>
23
- <li>Versión de Android 4.1 o superior</li>
24
- <li>Al menos 28 MB de espacio de almacenamiento libre</li>
25
- <li>Una conexión a Internet estable (opcional)</li>
26
- </ul>
27
- <h4> Pasos para descargar e instalar Mini Block Craft APK</h4>
28
- <p>Siga estos pasos para descargar e instalar el juego:</p>
29
- <ol>
30
- <li>Ir a [este enlace]( 1 ) para descargar la última versión de Mini Block Craft APK.</li>
31
- <li>Una vez completada la descarga, abra la aplicación de administrador de archivos en su dispositivo y localice el archivo descargado. </li>
32
- <li>Toque en el archivo y permita la instalación desde fuentes desconocidas si se le solicita. </li>
33
- <li> <li>Siga las instrucciones en pantalla para completar el proceso de instalación. </li>
34
- <li>Una vez que se hace la instalación, puede iniciar el juego desde el cajón de la aplicación o la pantalla de inicio. </li>
35
- </ol>
36
- <h3>Pros y contras de Mini Block Craft APK</h3>
37
- <p>Mini Block Craft APK es un juego divertido y creativo, pero también tiene algunos inconvenientes. Estos son algunos de los pros y los contras del juego:</p>
38
- <h4> Pros de Mini bloque de arte APK</h4>
39
- <ul>
40
- <li>El juego es gratis para descargar y jugar. </li>
41
- <li>El juego tiene unos gráficos estilo píxel que son atractivos y nostálgicos. </li>
42
- <li>El juego tiene modos creativos y de supervivencia que ofrecen diferentes desafíos y experiencias. </li>
43
-
44
- <li>El juego tiene un modo multijugador en línea que te permite jugar con otros jugadores de todo el mundo. </li>
45
- </ul>
46
- <h4> Contras de Mini Block Craft APK</h4>
47
- <ul>
48
- <li>El juego contiene anuncios y compras en la aplicación que pueden ser molestos y caros. </li>
49
- <li>El juego puede ser lento y con errores en algunos dispositivos. </li>
50
- <li> El juego puede ser repetitivo y aburrido después de un tiempo. </li>
51
- <li>El juego tiene un sonido y música de baja calidad. </li>
52
- <li> El juego tiene una interfaz de usuario y controles pobres. </li>
53
- </ul>
54
- <h3>Alternativas a Mini bloque de arte APK</h3>
55
- <p>Si usted está buscando algunas alternativas a Mini Block Craft APK, puede probar estos juegos:</p>
56
- <h4>Edición de bolsillo de Minecraft</h4>
57
- <p>Minecraft Pocket Edition es la versión móvil oficial del famoso juego de sandbox Minecraft. Tiene las mismas características y jugabilidad que el juego original, pero está optimizado para dispositivos móviles. Puede crear, explorar y sobrevivir en un mundo generado al azar, o unirse a servidores en línea y jugar con otros jugadores. El juego cuesta $6.99 para descargar, pero ofrece actualizaciones regulares y nuevo contenido. </p>
58
- <h4>Roblox</h4>
59
- <p>Roblox es una plataforma de juego online multijugador masivo que te permite crear y jugar varios juegos. Puedes elegir entre millones de juegos creados por otros usuarios, o hacer los tuyos usando Roblox Studio. También puedes personalizar tu avatar, chatear con otros jugadores y unirte a grupos. El juego es gratis para descargar y jugar, pero tiene una moneda virtual llamada Robux que se puede usar para comprar artículos y acceder a funciones premium. </p>
60
- <h4>Terraria</h4>
61
- <p>Terraria es un juego de sandbox en 2D que combina elementos de acción, aventura y juegos de rol. Puedes explorar, construir, crear, luchar y sobrevivir en un mundo generado por procedimientos. También puedes jugar con hasta 7 amigos online o localmente. El juego cuesta $4.99 para descargar, pero tiene mucho contenido y actualizaciones. </p>
62
- <h2>Conclusión</h2>
63
-
64
- <p>Esperamos que este artículo le ayudó a aprender más acerca de Mini Block Craft APK. Si usted tiene alguna pregunta o retroalimentación, no dude en dejar un comentario a continuación. ¡Gracias por leer! </p>
65
- <h3>Preguntas frecuentes</h3>
66
- <ul>
67
- <li><b> ¿Es seguro descargar Mini Block Craft APK? </b></li>
68
- <p>Sí, Mini Block Craft APK es seguro de descargar desde [este enlace]. Sin embargo, siempre debe tener cuidado al descargar archivos de fuentes desconocidas y escanearlos en busca de virus antes de instalarlos. </p>
69
- <li><b> ¿Puedo jugar Mini Block Craft APK en PC? </b></li>
70
- <p>No, Mini Block Craft APK solo está disponible para dispositivos Android. Sin embargo, puede utilizar un emulador de Android en su PC para ejecutar el juego. Algunos emuladores de Android populares son BlueStacks, NoxPlayer y LDPlayer.</p>
71
- <li><b> ¿Cómo actualizo Mini Block Craft APK? </b></li>
72
- <p>Para actualizar Mini Block Craft APK, es necesario descargar la última versión del juego desde [este enlace] e instalarlo sobre el existente. Alternativamente, puede habilitar actualizaciones automáticas en la configuración de su dispositivo o usar una herramienta de actualización de aplicaciones. </p>
73
- <li><b> ¿Cómo puedo desinstalar Mini Block Craft APK? </b></li>
74
- <p>Para desinstalar Mini Block Craft APK, es necesario ir a la configuración de su dispositivo > aplicaciones > Mini Block Craft > desinstalar. Alternativamente, puedes arrastrar <p>el icono de la aplicación a la papelera en la pantalla de inicio o en el cajón de la aplicación. También es posible que tenga que eliminar el archivo APK de su aplicación de administrador de archivos. </p>
75
- <li><b> ¿Cuáles son algunos consejos y trucos para jugar Mini Block Craft APK? </b></li>
76
- <p>Aquí hay algunos consejos y trucos para jugar Mini Block Craft APK:</p>
77
- <ul>
78
- <li>Utilice el mapa y la brújula para navegar por el mundo y encontrar su hogar. </li>
79
- <li>Recoge tantos recursos como puedas y guárdalos en cofres. </li>
80
- <li>Crea una cama y duerme por la noche para evitar monstruos y zombis. </li>
81
- <li>Utilice antorchas y linternas para iluminar su casa y sus alrededores. </li>
82
- <li>Utiliza cercas y puertas para proteger tu casa de los enemigos. </li>
83
- <li>Usa diferentes herramientas y armas para diferentes tareas y enemigos. </li>
84
-
85
- <li>Explora el mapa y encuentra tesoros y secretos ocultos. </li>
86
- <li>Juega en línea con otros jugadores y chatea con ellos usando la función de chat. </li>
87
- ¡Diviértete y sé creativo! </li>
88
- </ul></p> 64aa2da5cf<br />
89
- <br />
90
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/docstring.py DELETED
@@ -1,77 +0,0 @@
1
- # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # https://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- from botocore.docs.docstring import LazyLoadedDocstring
14
-
15
- from boto3.docs.action import document_action, document_load_reload_action
16
- from boto3.docs.attr import (
17
- document_attribute,
18
- document_identifier,
19
- document_reference,
20
- )
21
- from boto3.docs.collection import (
22
- document_batch_action,
23
- document_collection_method,
24
- document_collection_object,
25
- )
26
- from boto3.docs.subresource import document_sub_resource
27
- from boto3.docs.waiter import document_resource_waiter
28
-
29
-
30
- class ActionDocstring(LazyLoadedDocstring):
31
- def _write_docstring(self, *args, **kwargs):
32
- document_action(*args, **kwargs)
33
-
34
-
35
- class LoadReloadDocstring(LazyLoadedDocstring):
36
- def _write_docstring(self, *args, **kwargs):
37
- document_load_reload_action(*args, **kwargs)
38
-
39
-
40
- class SubResourceDocstring(LazyLoadedDocstring):
41
- def _write_docstring(self, *args, **kwargs):
42
- document_sub_resource(*args, **kwargs)
43
-
44
-
45
- class AttributeDocstring(LazyLoadedDocstring):
46
- def _write_docstring(self, *args, **kwargs):
47
- document_attribute(*args, **kwargs)
48
-
49
-
50
- class IdentifierDocstring(LazyLoadedDocstring):
51
- def _write_docstring(self, *args, **kwargs):
52
- document_identifier(*args, **kwargs)
53
-
54
-
55
- class ReferenceDocstring(LazyLoadedDocstring):
56
- def _write_docstring(self, *args, **kwargs):
57
- document_reference(*args, **kwargs)
58
-
59
-
60
- class CollectionDocstring(LazyLoadedDocstring):
61
- def _write_docstring(self, *args, **kwargs):
62
- document_collection_object(*args, **kwargs)
63
-
64
-
65
- class CollectionMethodDocstring(LazyLoadedDocstring):
66
- def _write_docstring(self, *args, **kwargs):
67
- document_collection_method(*args, **kwargs)
68
-
69
-
70
- class BatchActionDocstring(LazyLoadedDocstring):
71
- def _write_docstring(self, *args, **kwargs):
72
- document_batch_action(*args, **kwargs)
73
-
74
-
75
- class ResourceWaiterDocstring(LazyLoadedDocstring):
76
- def _write_docstring(self, *args, **kwargs):
77
- document_resource_waiter(*args, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/exceptions.py DELETED
@@ -1,126 +0,0 @@
1
- # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # https://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
-
14
- # All exceptions in this class should subclass from Boto3Error.
15
- import botocore.exceptions
16
-
17
-
18
- # All exceptions should subclass from Boto3Error in this module.
19
- class Boto3Error(Exception):
20
- """Base class for all Boto3 errors."""
21
-
22
-
23
- class ResourceLoadException(Boto3Error):
24
- pass
25
-
26
-
27
- # NOTE: This doesn't appear to be used anywhere.
28
- # It's probably safe to remove this.
29
- class NoVersionFound(Boto3Error):
30
- pass
31
-
32
-
33
- # We're subclassing from botocore.exceptions.DataNotFoundError
34
- # to keep backwards compatibility with anyone that was catching
35
- # this low level Botocore error before this exception was
36
- # introduced in boto3.
37
- # Same thing for ResourceNotExistsError below.
38
- class UnknownAPIVersionError(
39
- Boto3Error, botocore.exceptions.DataNotFoundError
40
- ):
41
- def __init__(self, service_name, bad_api_version, available_api_versions):
42
- msg = (
43
- f"The '{service_name}' resource does not an API version of: {bad_api_version}\n"
44
- f"Valid API versions are: {available_api_versions}"
45
- )
46
- # Not using super because we don't want the DataNotFoundError
47
- # to be called, it has a different __init__ signature.
48
- Boto3Error.__init__(self, msg)
49
-
50
-
51
- class ResourceNotExistsError(
52
- Boto3Error, botocore.exceptions.DataNotFoundError
53
- ):
54
- """Raised when you attempt to create a resource that does not exist."""
55
-
56
- def __init__(self, service_name, available_services, has_low_level_client):
57
- msg = (
58
- "The '{}' resource does not exist.\n"
59
- "The available resources are:\n"
60
- " - {}\n".format(
61
- service_name, '\n - '.join(available_services)
62
- )
63
- )
64
- if has_low_level_client:
65
- msg = (
66
- f"{msg}\nConsider using a boto3.client('{service_name}') "
67
- f"instead of a resource for '{service_name}'"
68
- )
69
- # Not using super because we don't want the DataNotFoundError
70
- # to be called, it has a different __init__ signature.
71
- Boto3Error.__init__(self, msg)
72
-
73
-
74
- class RetriesExceededError(Boto3Error):
75
- def __init__(self, last_exception, msg='Max Retries Exceeded'):
76
- super().__init__(msg)
77
- self.last_exception = last_exception
78
-
79
-
80
- class S3TransferFailedError(Boto3Error):
81
- pass
82
-
83
-
84
- class S3UploadFailedError(Boto3Error):
85
- pass
86
-
87
-
88
- class DynamoDBOperationNotSupportedError(Boto3Error):
89
- """Raised for operations that are not supported for an operand."""
90
-
91
- def __init__(self, operation, value):
92
- msg = (
93
- f'{operation} operation cannot be applied to value {value} of type '
94
- f'{type(value)} directly. Must use AttributeBase object methods '
95
- f'(i.e. Attr().eq()). to generate ConditionBase instances first.'
96
- )
97
- Exception.__init__(self, msg)
98
-
99
-
100
- # FIXME: Backward compatibility
101
- DynanmoDBOperationNotSupportedError = DynamoDBOperationNotSupportedError
102
-
103
-
104
- class DynamoDBNeedsConditionError(Boto3Error):
105
- """Raised when input is not a condition"""
106
-
107
- def __init__(self, value):
108
- msg = (
109
- f'Expecting a ConditionBase object. Got {value} of type {type(value)}. '
110
- f'Use AttributeBase object methods (i.e. Attr().eq()). to '
111
- f'generate ConditionBase instances.'
112
- )
113
- Exception.__init__(self, msg)
114
-
115
-
116
- class DynamoDBNeedsKeyConditionError(Boto3Error):
117
- pass
118
-
119
-
120
- class PythonDeprecationWarning(Warning):
121
- """
122
- Python version being used is scheduled to become unsupported
123
- in an future release. See warning for specifics.
124
- """
125
-
126
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/requests/structures.py DELETED
@@ -1,99 +0,0 @@
1
- """
2
- requests.structures
3
- ~~~~~~~~~~~~~~~~~~~
4
-
5
- Data structures that power Requests.
6
- """
7
-
8
- from collections import OrderedDict
9
-
10
- from .compat import Mapping, MutableMapping
11
-
12
-
13
- class CaseInsensitiveDict(MutableMapping):
14
- """A case-insensitive ``dict``-like object.
15
-
16
- Implements all methods and operations of
17
- ``MutableMapping`` as well as dict's ``copy``. Also
18
- provides ``lower_items``.
19
-
20
- All keys are expected to be strings. The structure remembers the
21
- case of the last key to be set, and ``iter(instance)``,
22
- ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
23
- will contain case-sensitive keys. However, querying and contains
24
- testing is case insensitive::
25
-
26
- cid = CaseInsensitiveDict()
27
- cid['Accept'] = 'application/json'
28
- cid['aCCEPT'] == 'application/json' # True
29
- list(cid) == ['Accept'] # True
30
-
31
- For example, ``headers['content-encoding']`` will return the
32
- value of a ``'Content-Encoding'`` response header, regardless
33
- of how the header name was originally stored.
34
-
35
- If the constructor, ``.update``, or equality comparison
36
- operations are given keys that have equal ``.lower()``s, the
37
- behavior is undefined.
38
- """
39
-
40
- def __init__(self, data=None, **kwargs):
41
- self._store = OrderedDict()
42
- if data is None:
43
- data = {}
44
- self.update(data, **kwargs)
45
-
46
- def __setitem__(self, key, value):
47
- # Use the lowercased key for lookups, but store the actual
48
- # key alongside the value.
49
- self._store[key.lower()] = (key, value)
50
-
51
- def __getitem__(self, key):
52
- return self._store[key.lower()][1]
53
-
54
- def __delitem__(self, key):
55
- del self._store[key.lower()]
56
-
57
- def __iter__(self):
58
- return (casedkey for casedkey, mappedvalue in self._store.values())
59
-
60
- def __len__(self):
61
- return len(self._store)
62
-
63
- def lower_items(self):
64
- """Like iteritems(), but with all lowercase keys."""
65
- return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items())
66
-
67
- def __eq__(self, other):
68
- if isinstance(other, Mapping):
69
- other = CaseInsensitiveDict(other)
70
- else:
71
- return NotImplemented
72
- # Compare insensitively
73
- return dict(self.lower_items()) == dict(other.lower_items())
74
-
75
- # Copy is required
76
- def copy(self):
77
- return CaseInsensitiveDict(self._store.values())
78
-
79
- def __repr__(self):
80
- return str(dict(self.items()))
81
-
82
-
83
- class LookupDict(dict):
84
- """Dictionary lookup object."""
85
-
86
- def __init__(self, name=None):
87
- self.name = name
88
- super().__init__()
89
-
90
- def __repr__(self):
91
- return f"<lookup '{self.name}'>"
92
-
93
- def __getitem__(self, key):
94
- # We allow fall-through here, so values default to None
95
-
96
- return self.__dict__.get(key, None)
97
-
98
- def get(self, key, default=None):
99
- return self.__dict__.get(key, default)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/request.py DELETED
@@ -1,170 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- from .filepost import encode_multipart_formdata
4
- from .packages.six.moves.urllib.parse import urlencode
5
-
6
- __all__ = ["RequestMethods"]
7
-
8
-
9
- class RequestMethods(object):
10
- """
11
- Convenience mixin for classes who implement a :meth:`urlopen` method, such
12
- as :class:`urllib3.HTTPConnectionPool` and
13
- :class:`urllib3.PoolManager`.
14
-
15
- Provides behavior for making common types of HTTP request methods and
16
- decides which type of request field encoding to use.
17
-
18
- Specifically,
19
-
20
- :meth:`.request_encode_url` is for sending requests whose fields are
21
- encoded in the URL (such as GET, HEAD, DELETE).
22
-
23
- :meth:`.request_encode_body` is for sending requests whose fields are
24
- encoded in the *body* of the request using multipart or www-form-urlencoded
25
- (such as for POST, PUT, PATCH).
26
-
27
- :meth:`.request` is for making any kind of request, it will look up the
28
- appropriate encoding format and use one of the above two methods to make
29
- the request.
30
-
31
- Initializer parameters:
32
-
33
- :param headers:
34
- Headers to include with all requests, unless other headers are given
35
- explicitly.
36
- """
37
-
38
- _encode_url_methods = {"DELETE", "GET", "HEAD", "OPTIONS"}
39
-
40
- def __init__(self, headers=None):
41
- self.headers = headers or {}
42
-
43
- def urlopen(
44
- self,
45
- method,
46
- url,
47
- body=None,
48
- headers=None,
49
- encode_multipart=True,
50
- multipart_boundary=None,
51
- **kw
52
- ): # Abstract
53
- raise NotImplementedError(
54
- "Classes extending RequestMethods must implement "
55
- "their own ``urlopen`` method."
56
- )
57
-
58
- def request(self, method, url, fields=None, headers=None, **urlopen_kw):
59
- """
60
- Make a request using :meth:`urlopen` with the appropriate encoding of
61
- ``fields`` based on the ``method`` used.
62
-
63
- This is a convenience method that requires the least amount of manual
64
- effort. It can be used in most situations, while still having the
65
- option to drop down to more specific methods when necessary, such as
66
- :meth:`request_encode_url`, :meth:`request_encode_body`,
67
- or even the lowest level :meth:`urlopen`.
68
- """
69
- method = method.upper()
70
-
71
- urlopen_kw["request_url"] = url
72
-
73
- if method in self._encode_url_methods:
74
- return self.request_encode_url(
75
- method, url, fields=fields, headers=headers, **urlopen_kw
76
- )
77
- else:
78
- return self.request_encode_body(
79
- method, url, fields=fields, headers=headers, **urlopen_kw
80
- )
81
-
82
- def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw):
83
- """
84
- Make a request using :meth:`urlopen` with the ``fields`` encoded in
85
- the url. This is useful for request methods like GET, HEAD, DELETE, etc.
86
- """
87
- if headers is None:
88
- headers = self.headers
89
-
90
- extra_kw = {"headers": headers}
91
- extra_kw.update(urlopen_kw)
92
-
93
- if fields:
94
- url += "?" + urlencode(fields)
95
-
96
- return self.urlopen(method, url, **extra_kw)
97
-
98
- def request_encode_body(
99
- self,
100
- method,
101
- url,
102
- fields=None,
103
- headers=None,
104
- encode_multipart=True,
105
- multipart_boundary=None,
106
- **urlopen_kw
107
- ):
108
- """
109
- Make a request using :meth:`urlopen` with the ``fields`` encoded in
110
- the body. This is useful for request methods like POST, PUT, PATCH, etc.
111
-
112
- When ``encode_multipart=True`` (default), then
113
- :func:`urllib3.encode_multipart_formdata` is used to encode
114
- the payload with the appropriate content type. Otherwise
115
- :func:`urllib.parse.urlencode` is used with the
116
- 'application/x-www-form-urlencoded' content type.
117
-
118
- Multipart encoding must be used when posting files, and it's reasonably
119
- safe to use it in other times too. However, it may break request
120
- signing, such as with OAuth.
121
-
122
- Supports an optional ``fields`` parameter of key/value strings AND
123
- key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
124
- the MIME type is optional. For example::
125
-
126
- fields = {
127
- 'foo': 'bar',
128
- 'fakefile': ('foofile.txt', 'contents of foofile'),
129
- 'realfile': ('barfile.txt', open('realfile').read()),
130
- 'typedfile': ('bazfile.bin', open('bazfile').read(),
131
- 'image/jpeg'),
132
- 'nonamefile': 'contents of nonamefile field',
133
- }
134
-
135
- When uploading a file, providing a filename (the first parameter of the
136
- tuple) is optional but recommended to best mimic behavior of browsers.
137
-
138
- Note that if ``headers`` are supplied, the 'Content-Type' header will
139
- be overwritten because it depends on the dynamic random boundary string
140
- which is used to compose the body of the request. The random boundary
141
- string can be explicitly set with the ``multipart_boundary`` parameter.
142
- """
143
- if headers is None:
144
- headers = self.headers
145
-
146
- extra_kw = {"headers": {}}
147
-
148
- if fields:
149
- if "body" in urlopen_kw:
150
- raise TypeError(
151
- "request got values for both 'fields' and 'body', can only specify one."
152
- )
153
-
154
- if encode_multipart:
155
- body, content_type = encode_multipart_formdata(
156
- fields, boundary=multipart_boundary
157
- )
158
- else:
159
- body, content_type = (
160
- urlencode(fields),
161
- "application/x-www-form-urlencoded",
162
- )
163
-
164
- extra_kw["body"] = body
165
- extra_kw["headers"] = {"Content-Type": content_type}
166
-
167
- extra_kw["headers"].update(headers)
168
- extra_kw.update(urlopen_kw)
169
-
170
- return self.urlopen(method, url, **extra_kw)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVH-vn1210/make_hair/minigpt4/common/logger.py DELETED
@@ -1,195 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
-
8
- import datetime
9
- import logging
10
- import time
11
- from collections import defaultdict, deque
12
-
13
- import torch
14
- import torch.distributed as dist
15
-
16
- from minigpt4.common import dist_utils
17
-
18
-
19
- class SmoothedValue(object):
20
- """Track a series of values and provide access to smoothed values over a
21
- window or the global series average.
22
- """
23
-
24
- def __init__(self, window_size=20, fmt=None):
25
- if fmt is None:
26
- fmt = "{median:.4f} ({global_avg:.4f})"
27
- self.deque = deque(maxlen=window_size)
28
- self.total = 0.0
29
- self.count = 0
30
- self.fmt = fmt
31
-
32
- def update(self, value, n=1):
33
- self.deque.append(value)
34
- self.count += n
35
- self.total += value * n
36
-
37
- def synchronize_between_processes(self):
38
- """
39
- Warning: does not synchronize the deque!
40
- """
41
- if not dist_utils.is_dist_avail_and_initialized():
42
- return
43
- t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
44
- dist.barrier()
45
- dist.all_reduce(t)
46
- t = t.tolist()
47
- self.count = int(t[0])
48
- self.total = t[1]
49
-
50
- @property
51
- def median(self):
52
- d = torch.tensor(list(self.deque))
53
- return d.median().item()
54
-
55
- @property
56
- def avg(self):
57
- d = torch.tensor(list(self.deque), dtype=torch.float32)
58
- return d.mean().item()
59
-
60
- @property
61
- def global_avg(self):
62
- return self.total / self.count
63
-
64
- @property
65
- def max(self):
66
- return max(self.deque)
67
-
68
- @property
69
- def value(self):
70
- return self.deque[-1]
71
-
72
- def __str__(self):
73
- return self.fmt.format(
74
- median=self.median,
75
- avg=self.avg,
76
- global_avg=self.global_avg,
77
- max=self.max,
78
- value=self.value,
79
- )
80
-
81
-
82
- class MetricLogger(object):
83
- def __init__(self, delimiter="\t"):
84
- self.meters = defaultdict(SmoothedValue)
85
- self.delimiter = delimiter
86
-
87
- def update(self, **kwargs):
88
- for k, v in kwargs.items():
89
- if isinstance(v, torch.Tensor):
90
- v = v.item()
91
- assert isinstance(v, (float, int))
92
- self.meters[k].update(v)
93
-
94
- def __getattr__(self, attr):
95
- if attr in self.meters:
96
- return self.meters[attr]
97
- if attr in self.__dict__:
98
- return self.__dict__[attr]
99
- raise AttributeError(
100
- "'{}' object has no attribute '{}'".format(type(self).__name__, attr)
101
- )
102
-
103
- def __str__(self):
104
- loss_str = []
105
- for name, meter in self.meters.items():
106
- loss_str.append("{}: {}".format(name, str(meter)))
107
- return self.delimiter.join(loss_str)
108
-
109
- def global_avg(self):
110
- loss_str = []
111
- for name, meter in self.meters.items():
112
- loss_str.append("{}: {:.4f}".format(name, meter.global_avg))
113
- return self.delimiter.join(loss_str)
114
-
115
- def synchronize_between_processes(self):
116
- for meter in self.meters.values():
117
- meter.synchronize_between_processes()
118
-
119
- def add_meter(self, name, meter):
120
- self.meters[name] = meter
121
-
122
- def log_every(self, iterable, print_freq, header=None):
123
- i = 0
124
- if not header:
125
- header = ""
126
- start_time = time.time()
127
- end = time.time()
128
- iter_time = SmoothedValue(fmt="{avg:.4f}")
129
- data_time = SmoothedValue(fmt="{avg:.4f}")
130
- space_fmt = ":" + str(len(str(len(iterable)))) + "d"
131
- log_msg = [
132
- header,
133
- "[{0" + space_fmt + "}/{1}]",
134
- "eta: {eta}",
135
- "{meters}",
136
- "time: {time}",
137
- "data: {data}",
138
- ]
139
- if torch.cuda.is_available():
140
- log_msg.append("max mem: {memory:.0f}")
141
- log_msg = self.delimiter.join(log_msg)
142
- MB = 1024.0 * 1024.0
143
- for obj in iterable:
144
- data_time.update(time.time() - end)
145
- yield obj
146
- iter_time.update(time.time() - end)
147
- if i % print_freq == 0 or i == len(iterable) - 1:
148
- eta_seconds = iter_time.global_avg * (len(iterable) - i)
149
- eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
150
- if torch.cuda.is_available():
151
- print(
152
- log_msg.format(
153
- i,
154
- len(iterable),
155
- eta=eta_string,
156
- meters=str(self),
157
- time=str(iter_time),
158
- data=str(data_time),
159
- memory=torch.cuda.max_memory_allocated() / MB,
160
- )
161
- )
162
- else:
163
- print(
164
- log_msg.format(
165
- i,
166
- len(iterable),
167
- eta=eta_string,
168
- meters=str(self),
169
- time=str(iter_time),
170
- data=str(data_time),
171
- )
172
- )
173
- i += 1
174
- end = time.time()
175
- total_time = time.time() - start_time
176
- total_time_str = str(datetime.timedelta(seconds=int(total_time)))
177
- print(
178
- "{} Total time: {} ({:.4f} s / it)".format(
179
- header, total_time_str, total_time / len(iterable)
180
- )
181
- )
182
-
183
-
184
- class AttrDict(dict):
185
- def __init__(self, *args, **kwargs):
186
- super(AttrDict, self).__init__(*args, **kwargs)
187
- self.__dict__ = self
188
-
189
-
190
- def setup_logger():
191
- logging.basicConfig(
192
- level=logging.INFO if dist_utils.is_main_process() else logging.WARN,
193
- format="%(asctime)s [%(levelname)s] %(message)s",
194
- handlers=[logging.StreamHandler()],
195
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/tools/process.py DELETED
@@ -1,18 +0,0 @@
1
- # Process data
2
- import argparse
3
- from compute_softscore import compute_softscore
4
- from create_dictionary import create_dictionary
5
- from detection_features_converter import detection_features_converter
6
-
7
- if __name__ == '__main__':
8
- parser = argparse.ArgumentParser()
9
- parser.add_argument('--dataroot', type=str, default='../data/')
10
- parser.add_argument('--ver', type=str, default='clean', help='version of the VQAv2 dataset to process. "clean" for the original data. default: clean')
11
- parser.add_argument('--detector', type=str, default='R-50')
12
- parser.add_argument('--feat', type=int, default=1024, help='feature size')
13
- parser.add_argument('--nb', type=int, default=36)
14
- parser.add_argument('--emb_dim', type=int, default=300)
15
- args = parser.parse_args()
16
- create_dictionary(args.dataroot, args.emb_dim)
17
- compute_softscore(args.dataroot, args.ver)
18
- detection_features_converter(args.dataroot, args.ver, args.detector, args.feat, args.nb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/GFPGAN-example/gfpgan/utils.py DELETED
@@ -1,130 +0,0 @@
1
- import cv2
2
- import os
3
- import torch
4
- from basicsr.utils import img2tensor, tensor2img
5
- from basicsr.utils.download_util import load_file_from_url
6
- from facexlib.utils.face_restoration_helper import FaceRestoreHelper
7
- from torchvision.transforms.functional import normalize
8
-
9
- from gfpgan.archs.gfpganv1_arch import GFPGANv1
10
- from gfpgan.archs.gfpganv1_clean_arch import GFPGANv1Clean
11
-
12
- ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
13
-
14
-
15
- class GFPGANer():
16
- """Helper for restoration with GFPGAN.
17
-
18
- It will detect and crop faces, and then resize the faces to 512x512.
19
- GFPGAN is used to restored the resized faces.
20
- The background is upsampled with the bg_upsampler.
21
- Finally, the faces will be pasted back to the upsample background image.
22
-
23
- Args:
24
- model_path (str): The path to the GFPGAN model. It can be urls (will first download it automatically).
25
- upscale (float): The upscale of the final output. Default: 2.
26
- arch (str): The GFPGAN architecture. Option: clean | original. Default: clean.
27
- channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2.
28
- bg_upsampler (nn.Module): The upsampler for the background. Default: None.
29
- """
30
-
31
- def __init__(self, model_path, upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=None):
32
- self.upscale = upscale
33
- self.bg_upsampler = bg_upsampler
34
-
35
- # initialize model
36
- self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
37
- # initialize the GFP-GAN
38
- if arch == 'clean':
39
- self.gfpgan = GFPGANv1Clean(
40
- out_size=512,
41
- num_style_feat=512,
42
- channel_multiplier=channel_multiplier,
43
- decoder_load_path=None,
44
- fix_decoder=False,
45
- num_mlp=8,
46
- input_is_latent=True,
47
- different_w=True,
48
- narrow=1,
49
- sft_half=True)
50
- else:
51
- self.gfpgan = GFPGANv1(
52
- out_size=512,
53
- num_style_feat=512,
54
- channel_multiplier=channel_multiplier,
55
- decoder_load_path=None,
56
- fix_decoder=True,
57
- num_mlp=8,
58
- input_is_latent=True,
59
- different_w=True,
60
- narrow=1,
61
- sft_half=True)
62
- # initialize face helper
63
- self.face_helper = FaceRestoreHelper(
64
- upscale,
65
- face_size=512,
66
- crop_ratio=(1, 1),
67
- det_model='retinaface_resnet50',
68
- save_ext='png',
69
- device=self.device)
70
-
71
- if model_path.startswith('https://'):
72
- model_path = load_file_from_url(
73
- url=model_path, model_dir=os.path.join(ROOT_DIR, 'gfpgan/weights'), progress=True, file_name=None)
74
- loadnet = torch.load(model_path)
75
- if 'params_ema' in loadnet:
76
- keyname = 'params_ema'
77
- else:
78
- keyname = 'params'
79
- self.gfpgan.load_state_dict(loadnet[keyname], strict=True)
80
- self.gfpgan.eval()
81
- self.gfpgan = self.gfpgan.to(self.device)
82
-
83
- @torch.no_grad()
84
- def enhance(self, img, has_aligned=False, only_center_face=False, paste_back=True):
85
- self.face_helper.clean_all()
86
-
87
- if has_aligned: # the inputs are already aligned
88
- img = cv2.resize(img, (512, 512))
89
- self.face_helper.cropped_faces = [img]
90
- else:
91
- self.face_helper.read_image(img)
92
- # get face landmarks for each face
93
- self.face_helper.get_face_landmarks_5(only_center_face=only_center_face, eye_dist_threshold=5)
94
- # eye_dist_threshold=5: skip faces whose eye distance is smaller than 5 pixels
95
- # TODO: even with eye_dist_threshold, it will still introduce wrong detections and restorations.
96
- # align and warp each face
97
- self.face_helper.align_warp_face()
98
-
99
- # face restoration
100
- for cropped_face in self.face_helper.cropped_faces:
101
- # prepare data
102
- cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
103
- normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
104
- cropped_face_t = cropped_face_t.unsqueeze(0).to(self.device)
105
-
106
- try:
107
- output = self.gfpgan(cropped_face_t, return_rgb=False)[0]
108
- # convert to image
109
- restored_face = tensor2img(output.squeeze(0), rgb2bgr=True, min_max=(-1, 1))
110
- except RuntimeError as error:
111
- print(f'\tFailed inference for GFPGAN: {error}.')
112
- restored_face = cropped_face
113
-
114
- restored_face = restored_face.astype('uint8')
115
- self.face_helper.add_restored_face(restored_face)
116
-
117
- if not has_aligned and paste_back:
118
- # upsample the background
119
- if self.bg_upsampler is not None:
120
- # Now only support RealESRGAN for upsampling background
121
- bg_img = self.bg_upsampler.enhance(img, outscale=self.upscale)[0]
122
- else:
123
- bg_img = None
124
-
125
- self.face_helper.get_inverse_affine(None)
126
- # paste each restored face to the input image
127
- restored_img = self.face_helper.paste_faces_to_input_image(upsample_img=bg_img)
128
- return self.face_helper.cropped_faces, self.face_helper.restored_faces, restored_img
129
- else:
130
- return self.face_helper.cropped_faces, self.face_helper.restored_faces, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/README.md DELETED
@@ -1,161 +0,0 @@
1
- Thrust: Code at the speed of light
2
- ==================================
3
-
4
- Thrust is a C++ parallel programming library which resembles the C++ Standard
5
- Library. Thrust's **high-level** interface greatly enhances
6
- programmer **productivity** while enabling performance portability between
7
- GPUs and multicore CPUs. **Interoperability** with established technologies
8
- (such as CUDA, TBB, and OpenMP) facilitates integration with existing
9
- software. Develop **high-performance** applications rapidly with Thrust!
10
-
11
- Thrust is included in the NVIDIA HPC SDK and the CUDA Toolkit.
12
-
13
- Refer to the [Quick Start Guide](http://github.com/thrust/thrust/wiki/Quick-Start-Guide) page for further information and examples.
14
-
15
- Examples
16
- --------
17
-
18
- Thrust is best explained through examples. The following source code
19
- generates random numbers serially and then transfers them to a parallel
20
- device where they are sorted.
21
-
22
- ```c++
23
- #include <thrust/host_vector.h>
24
- #include <thrust/device_vector.h>
25
- #include <thrust/generate.h>
26
- #include <thrust/sort.h>
27
- #include <thrust/copy.h>
28
- #include <algorithm>
29
- #include <cstdlib>
30
-
31
- int main(void)
32
- {
33
- // generate 32M random numbers serially
34
- thrust::host_vector<int> h_vec(32 << 20);
35
- std::generate(h_vec.begin(), h_vec.end(), rand);
36
-
37
- // transfer data to the device
38
- thrust::device_vector<int> d_vec = h_vec;
39
-
40
- // sort data on the device (846M keys per second on GeForce GTX 480)
41
- thrust::sort(d_vec.begin(), d_vec.end());
42
-
43
- // transfer data back to host
44
- thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
45
-
46
- return 0;
47
- }
48
- ```
49
-
50
- This code sample computes the sum of 100 random numbers in parallel:
51
-
52
- ```c++
53
- #include <thrust/host_vector.h>
54
- #include <thrust/device_vector.h>
55
- #include <thrust/generate.h>
56
- #include <thrust/reduce.h>
57
- #include <thrust/functional.h>
58
- #include <algorithm>
59
- #include <cstdlib>
60
-
61
- int main(void)
62
- {
63
- // generate random data serially
64
- thrust::host_vector<int> h_vec(100);
65
- std::generate(h_vec.begin(), h_vec.end(), rand);
66
-
67
- // transfer to device and compute sum
68
- thrust::device_vector<int> d_vec = h_vec;
69
- int x = thrust::reduce(d_vec.begin(), d_vec.end(), 0, thrust::plus<int>());
70
- return 0;
71
- }
72
- ```
73
-
74
- Releases
75
- --------
76
-
77
- Thrust is distributed with the NVIDIA HPC SDK and the CUDA Toolkit in addition
78
- to GitHub.
79
-
80
- See the [changelog](CHANGELOG.md) for details about specific releases.
81
-
82
- | Thrust Release | Included In |
83
- | ----------------- | --------------------------------------- |
84
- | 1.9.10-1 | NVIDIA HPC SDK 20.7 & CUDA Toolkit 11.1 |
85
- | 1.9.10 | NVIDIA HPC SDK 20.5 |
86
- | 1.9.9 | CUDA Toolkit 11.0 |
87
- | 1.9.8-1 | NVIDIA HPC SDK 20.3 |
88
- | 1.9.8 | CUDA Toolkit 11.0 Early Access |
89
- | 1.9.7-1 | CUDA Toolkit 10.2 for Tegra |
90
- | 1.9.7 | CUDA Toolkit 10.2 |
91
- | 1.9.6-1 | NVIDIA HPC SDK 20.3 |
92
- | 1.9.6 | CUDA Toolkit 10.1 Update 2 |
93
- | 1.9.5 | CUDA Toolkit 10.1 Update 1 |
94
- | 1.9.4 | CUDA Toolkit 10.1 |
95
- | 1.9.3 | CUDA Toolkit 10.0 |
96
- | 1.9.2 | CUDA Toolkit 9.2 |
97
- | 1.9.1-2 | CUDA Toolkit 9.1 |
98
- | 1.9.0-5 | CUDA Toolkit 9.0 |
99
- | 1.8.3 | CUDA Toolkit 8.0 |
100
- | 1.8.2 | CUDA Toolkit 7.5 |
101
- | 1.8.1 | CUDA Toolkit 7.0 |
102
- | 1.8.0 | |
103
- | 1.7.2 | CUDA Toolkit 6.5 |
104
- | 1.7.1 | CUDA Toolkit 6.0 |
105
- | 1.7.0 | CUDA Toolkit 5.5 |
106
- | 1.6.0 | |
107
- | 1.5.3 | CUDA Toolkit 5.0 |
108
- | 1.5.2 | CUDA Toolkit 4.2 |
109
- | 1.5.1 | CUDA Toolkit 4.1 |
110
- | 1.5.0 | |
111
- | 1.4.0 | CUDA Toolkit 4.0 |
112
- | 1.3.0 | |
113
- | 1.2.1 | |
114
- | 1.2.0 | |
115
- | 1.1.1 | |
116
- | 1.1.0 | |
117
- | 1.0.0 | |
118
-
119
- Adding Thrust To A CMake Project
120
- --------------------------------
121
-
122
- Since Thrust is a header library, there is no need to build or install Thrust
123
- to use it. The `thrust` directory contains a complete, ready-to-use Thrust
124
- package upon checkout.
125
-
126
- We provide CMake configuration files that make it easy to include Thrust
127
- from other CMake projects. See the [CMake README](thrust/cmake/README.md)
128
- for details.
129
-
130
- Development Process
131
- -------------------
132
-
133
- Thrust uses the [CMake build system](https://cmake.org/) to build unit tests,
134
- examples, and header tests. To build Thrust as a developer, the following
135
- recipe should be followed:
136
-
137
- ```
138
- # Clone Thrust and CUB repos recursively:
139
- git clone --recursive https://github.com/thrust/thrust.git
140
- cd thrust
141
-
142
- # Create build directory:
143
- mkdir build
144
- cd build
145
-
146
- # Configure -- use one of the following:
147
- cmake .. # Command line interface.
148
- ccmake .. # ncurses GUI (Linux only)
149
- cmake-gui # Graphical UI, set source/build directories in the app
150
-
151
- # Build:
152
- cmake --build . -j <num jobs> # invokes make (or ninja, etc)
153
-
154
- # Run tests and examples:
155
- ctest
156
- ```
157
-
158
- By default, a serial `CPP` host system, `CUDA` accelerated device system, and
159
- C++14 standard are used. This can be changed in CMake. More information on
160
- configuring your Thrust build and creating a pull request can be found in
161
- [CONTRIBUTING.md](CONTRIBUTING.md).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators/operator_adaptors.h DELETED
@@ -1,137 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/detail/functional/argument.h>
21
- #include <thrust/detail/type_deduction.h>
22
- #include <thrust/tuple.h>
23
- #include <thrust/detail/type_traits.h>
24
- #include <thrust/type_traits/void_t.h>
25
-
26
- #include <type_traits>
27
-
28
- namespace thrust
29
- {
30
- namespace detail
31
- {
32
- namespace functional
33
- {
34
-
35
- // Adapts a transparent unary functor from functional.h (e.g. thrust::negate<>)
36
- // into the Eval interface.
37
- template <typename UnaryFunctor>
38
- struct transparent_unary_operator
39
- {
40
- template <typename>
41
- using operator_type = UnaryFunctor;
42
-
43
- template <typename Env>
44
- using argument =
45
- typename thrust::detail::eval_if<
46
- thrust::tuple_size<Env>::value != 1,
47
- thrust::detail::identity_<thrust::null_type>,
48
- thrust::detail::functional::argument_helper<0, Env>
49
- >::type;
50
-
51
- template <typename Env>
52
- struct result_type_impl
53
- {
54
- using type = decltype(
55
- std::declval<UnaryFunctor>()(std::declval<argument<Env>>()));
56
- };
57
-
58
- template <typename Env>
59
- using result_type =
60
- typename thrust::detail::eval_if<
61
- std::is_same<thrust::null_type, argument<Env>>::value,
62
- thrust::detail::identity_<thrust::null_type>,
63
- result_type_impl<Env>
64
- >::type;
65
-
66
- template <typename Env>
67
- struct result
68
- {
69
- using op_type = UnaryFunctor;
70
- using type = result_type<Env>;
71
- };
72
-
73
- template <typename Env>
74
- __host__ __device__
75
- result_type<Env> eval(Env&& e) const
76
- THRUST_RETURNS(UnaryFunctor{}(thrust::get<0>(THRUST_FWD(e))))
77
- };
78
-
79
-
80
- // Adapts a transparent binary functor from functional.h (e.g. thrust::less<>)
81
- // into the Eval interface.
82
- template <typename BinaryFunctor>
83
- struct transparent_binary_operator
84
- {
85
- template <typename>
86
- using operator_type = BinaryFunctor;
87
-
88
- template <typename Env>
89
- using first_argument =
90
- typename thrust::detail::eval_if<
91
- thrust::tuple_size<Env>::value != 2,
92
- thrust::detail::identity_<thrust::null_type>,
93
- thrust::detail::functional::argument_helper<0, Env>
94
- >::type;
95
-
96
- template <typename Env>
97
- using second_argument =
98
- typename thrust::detail::eval_if<
99
- thrust::tuple_size<Env>::value != 2,
100
- thrust::detail::identity_<thrust::null_type>,
101
- thrust::detail::functional::argument_helper<1, Env>
102
- >::type;
103
-
104
- template <typename Env>
105
- struct result_type_impl
106
- {
107
- using type = decltype(
108
- std::declval<BinaryFunctor>()(std::declval<first_argument<Env>>(),
109
- std::declval<second_argument<Env>>()));
110
- };
111
-
112
- template <typename Env>
113
- using result_type =
114
- typename thrust::detail::eval_if<
115
- (std::is_same<thrust::null_type, first_argument<Env>>::value ||
116
- std::is_same<thrust::null_type, second_argument<Env>>::value),
117
- thrust::detail::identity_<thrust::null_type>,
118
- result_type_impl<Env>
119
- >::type;
120
-
121
- template <typename Env>
122
- struct result
123
- {
124
- using op_type = BinaryFunctor;
125
- using type = result_type<Env>;
126
- };
127
-
128
- template <typename Env>
129
- __host__ __device__
130
- result_type<Env> eval(Env&& e) const
131
- THRUST_RETURNS(BinaryFunctor{}(thrust::get<0>(e), thrust::get<1>(e)))
132
- };
133
-
134
- } // end functional
135
- } // end detail
136
- } // end thrust
137
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/reverse.h DELETED
@@ -1,215 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file reverse.h
19
- * \brief Reverses the order of a range
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/detail/execution_policy.h>
26
-
27
- namespace thrust
28
- {
29
-
30
-
31
- /*! \addtogroup reordering
32
- * \ingroup algorithms
33
- */
34
-
35
-
36
- /*! \p reverse reverses a range. That is: for every <tt>i</tt> such that
37
- * <tt>0 <= i <= (last - first) / 2</tt>, it exchanges <tt>*(first + i)</tt>
38
- * and <tt>*(last - (i + 1))</tt>.
39
- *
40
- * The algorithm's execution is parallelized as determined by \p exec.
41
- *
42
- * \param exec The execution policy to use for parallelization.
43
- * \param first The beginning of the range to reverse.
44
- * \param last The end of the range to reverse.
45
- *
46
- * \tparam DerivedPolicy The name of the derived execution policy.
47
- * \tparam BidirectionalIterator is a model of <a href="http://www.sgi.com/tech/stl/BidirectionalIterator.html">Bidirectional Iterator</a> and
48
- * \p BidirectionalIterator is mutable.
49
- *
50
- * The following code snippet demonstrates how to use \p reverse to reverse a
51
- * \p device_vector of integers using the \p thrust::device execution policy for
52
- * parallelization:
53
- *
54
- * \code
55
- * #include <thrust/reverse.h>
56
- * #include <thrust/execution_policy.h>
57
- * ...
58
- * const int N = 6;
59
- * int data[N] = {0, 1, 2, 3, 4, 5};
60
- * thrust::device_vector<int> v(data, data + N);
61
- * thrust::reverse(thrust::device, v.begin(), v.end());
62
- * // v is now {5, 4, 3, 2, 1, 0}
63
- * \endcode
64
- *
65
- * \see http://www.sgi.com/tech/stl/reverse.html
66
- * \see \p reverse_copy
67
- * \see \p reverse_iterator
68
- */
69
- template<typename DerivedPolicy, typename BidirectionalIterator>
70
- __host__ __device__
71
- void reverse(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
72
- BidirectionalIterator first,
73
- BidirectionalIterator last);
74
-
75
-
76
- /*! \p reverse reverses a range. That is: for every <tt>i</tt> such that
77
- * <tt>0 <= i <= (last - first) / 2</tt>, it exchanges <tt>*(first + i)</tt>
78
- * and <tt>*(last - (i + 1))</tt>.
79
- *
80
- * \param first The beginning of the range to reverse.
81
- * \param last The end of the range to reverse.
82
- *
83
- * \tparam BidirectionalIterator is a model of <a href="http://www.sgi.com/tech/stl/BidirectionalIterator.html">Bidirectional Iterator</a> and
84
- * \p BidirectionalIterator is mutable.
85
- *
86
- * The following code snippet demonstrates how to use \p reverse to reverse a
87
- * \p device_vector of integers.
88
- *
89
- * \code
90
- * #include <thrust/reverse.h>
91
- * ...
92
- * const int N = 6;
93
- * int data[N] = {0, 1, 2, 3, 4, 5};
94
- * thrust::device_vector<int> v(data, data + N);
95
- * thrust::reverse(v.begin(), v.end());
96
- * // v is now {5, 4, 3, 2, 1, 0}
97
- * \endcode
98
- *
99
- * \see http://www.sgi.com/tech/stl/reverse.html
100
- * \see \p reverse_copy
101
- * \see \p reverse_iterator
102
- */
103
- template<typename BidirectionalIterator>
104
- void reverse(BidirectionalIterator first,
105
- BidirectionalIterator last);
106
-
107
-
108
- /*! \p reverse_copy differs from \p reverse only in that the reversed range
109
- * is written to a different output range, rather than inplace.
110
- *
111
- * \p reverse_copy copies elements from the range <tt>[first, last)</tt> to the
112
- * range <tt>[result, result + (last - first))</tt> such that the copy is a
113
- * reverse of the original range. Specifically: for every <tt>i</tt> such that
114
- * <tt>0 <= i < (last - first)</tt>, \p reverse_copy performs the assignment
115
- * <tt>*(result + (last - first) - i) = *(first + i)</tt>.
116
- *
117
- * The return value is <tt>result + (last - first))</tt>.
118
- *
119
- * The algorithm's execution is parallelized as determined by \p exec.
120
- *
121
- * \param exec The execution policy to use for parallelization.
122
- * \param first The beginning of the range to reverse.
123
- * \param last The end of the range to reverse.
124
- * \param result The beginning of the output range.
125
- *
126
- * \tparam DerivedPolicy The name of the derived execution policy.
127
- * \tparam BidirectionalIterator is a model of <a href="http://www.sgi.com/tech/stl/BidirectionalIterator.html">Bidirectional Iterator</a>,
128
- * and \p BidirectionalIterator's \p value_type is convertible to \p OutputIterator's \p value_type.
129
- * \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
130
- *
131
- * \pre The range <tt>[first, last)</tt> and the range <tt>[result, result + (last - first))</tt> shall not overlap.
132
- *
133
- * The following code snippet demonstrates how to use \p reverse_copy to reverse
134
- * an input \p device_vector of integers to an output \p device_vector using the \p thrust::device
135
- * execution policy for parallelization:
136
- *
137
- * \code
138
- * #include <thrust/reverse.h>
139
- * #include <thrust/execution_policy.h>
140
- * ...
141
- * const int N = 6;
142
- * int data[N] = {0, 1, 2, 3, 4, 5};
143
- * thrust::device_vector<int> input(data, data + N);
144
- * thrust::device_vector<int> output(N);
145
- * thrust::reverse_copy(thrust::device, v.begin(), v.end(), output.begin());
146
- * // input is still {0, 1, 2, 3, 4, 5}
147
- * // output is now {5, 4, 3, 2, 1, 0}
148
- * \endcode
149
- *
150
- * \see http://www.sgi.com/tech/stl/reverse_copy.html
151
- * \see \p reverse
152
- * \see \p reverse_iterator
153
- */
154
- template<typename DerivedPolicy, typename BidirectionalIterator, typename OutputIterator>
155
- __host__ __device__
156
- OutputIterator reverse_copy(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
157
- BidirectionalIterator first,
158
- BidirectionalIterator last,
159
- OutputIterator result);
160
-
161
-
162
- /*! \p reverse_copy differs from \p reverse only in that the reversed range
163
- * is written to a different output range, rather than inplace.
164
- *
165
- * \p reverse_copy copies elements from the range <tt>[first, last)</tt> to the
166
- * range <tt>[result, result + (last - first))</tt> such that the copy is a
167
- * reverse of the original range. Specifically: for every <tt>i</tt> such that
168
- * <tt>0 <= i < (last - first)</tt>, \p reverse_copy performs the assignment
169
- * <tt>*(result + (last - first) - i) = *(first + i)</tt>.
170
- *
171
- * The return value is <tt>result + (last - first))</tt>.
172
- *
173
- * \param first The beginning of the range to reverse.
174
- * \param last The end of the range to reverse.
175
- * \param result The beginning of the output range.
176
- *
177
- * \tparam BidirectionalIterator is a model of <a href="http://www.sgi.com/tech/stl/BidirectionalIterator.html">Bidirectional Iterator</a>,
178
- * and \p BidirectionalIterator's \p value_type is convertible to \p OutputIterator's \p value_type.
179
- * \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
180
- *
181
- * \pre The range <tt>[first, last)</tt> and the range <tt>[result, result + (last - first))</tt> shall not overlap.
182
- *
183
- * The following code snippet demonstrates how to use \p reverse_copy to reverse
184
- * an input \p device_vector of integers to an output \p device_vector.
185
- *
186
- * \code
187
- * #include <thrust/reverse.h>
188
- * ...
189
- * const int N = 6;
190
- * int data[N] = {0, 1, 2, 3, 4, 5};
191
- * thrust::device_vector<int> input(data, data + N);
192
- * thrust::device_vector<int> output(N);
193
- * thrust::reverse_copy(v.begin(), v.end(), output.begin());
194
- * // input is still {0, 1, 2, 3, 4, 5}
195
- * // output is now {5, 4, 3, 2, 1, 0}
196
- * \endcode
197
- *
198
- * \see http://www.sgi.com/tech/stl/reverse_copy.html
199
- * \see \p reverse
200
- * \see \p reverse_iterator
201
- */
202
- template<typename BidirectionalIterator, typename OutputIterator>
203
- OutputIterator reverse_copy(BidirectionalIterator first,
204
- BidirectionalIterator last,
205
- OutputIterator result);
206
-
207
-
208
- /*! \} // end reordering
209
- */
210
-
211
-
212
- } // end thrust
213
-
214
- #include <thrust/detail/reverse.inl>
215
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/per_device_resource.h DELETED
@@ -1,22 +0,0 @@
1
- /*
2
- * Copyright 2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system has no special per device resource functions
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/system_error.h DELETED
@@ -1,179 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file system/system_error.h
19
- * \brief An exception object used to report error conditions that have an
20
- * associated error code
21
- */
22
-
23
- #pragma once
24
-
25
- #include <thrust/detail/config.h>
26
- #include <stdexcept>
27
- #include <string>
28
-
29
- #include <thrust/system/error_code.h>
30
-
31
- namespace thrust
32
- {
33
-
34
- namespace system
35
- {
36
-
37
- // [19.5.5] Class system_error
38
-
39
- // [19.5.5.1] Class system_error overview
40
-
41
- /*! \addtogroup system_diagnostics System Diagnostics
42
- * \ingroup system
43
- * \{
44
- */
45
-
46
- /*! \brief The class \p system_error describes an exception object used to report error
47
- * conditions that have an associated \p error_code. Such error conditions typically
48
- * originate from the operating system or other low-level application program interfaces.
49
- *
50
- * Thrust uses \p system_error to report the error codes returned from device backends
51
- * such as the CUDA runtime.
52
- *
53
- * The following code listing demonstrates how to catch a \p system_error to recover
54
- * from an error.
55
- *
56
- * \code
57
- *
58
- * #include <thrust/device_vector.h>
59
- * #include <thrust/system.h>
60
- * #include <thrust/sort.h>
61
- *
62
- * void terminate_gracefully(void)
63
- * {
64
- * // application-specific termination code here
65
- * ...
66
- * }
67
- *
68
- * int main(void)
69
- * {
70
- * try
71
- * {
72
- * thrust::device_vector<float> vec;
73
- * thrust::sort(vec.begin(), vec.end());
74
- * }
75
- * catch(thrust::system_error e)
76
- * {
77
- * std::cerr << "Error inside sort: " << e.what() << std::endl;
78
- * terminate_gracefully();
79
- * }
80
- *
81
- * return 0;
82
- * }
83
- *
84
- * \endcode
85
- *
86
- * \note If an error represents an out-of-memory condition, implementations are encouraged
87
- * to throw an exception object of type \p std::bad_alloc rather than \p system_error.
88
- */
89
- class system_error
90
- : public std::runtime_error
91
- {
92
- public:
93
- // [19.5.5.2] Class system_error members
94
-
95
- /*! Constructs an object of class \p system_error.
96
- * \param ec The value returned by \p code().
97
- * \param what_arg A string to include in the result returned by \p what().
98
- * \post <tt>code() == ec</tt>.
99
- * \post <tt>std::string(what()).find(what_arg) != string::npos</tt>.
100
- */
101
- inline system_error(error_code ec, const std::string &what_arg);
102
-
103
- /*! Constructs an object of class \p system_error.
104
- * \param ec The value returned by \p code().
105
- * \param what_arg A string to include in the result returned by \p what().
106
- * \post <tt>code() == ec</tt>.
107
- * \post <tt>std::string(what()).find(what_arg) != string::npos</tt>.
108
- */
109
- inline system_error(error_code ec, const char *what_arg);
110
-
111
- /*! Constructs an object of class \p system_error.
112
- * \param ec The value returned by \p code().
113
- * \post <tt>code() == ec</tt>.
114
- */
115
- inline system_error(error_code ec);
116
-
117
- /*! Constructs an object of class \p system_error.
118
- * \param ev The error value used to create an \p error_code.
119
- * \param ecat The \p error_category used to create an \p error_code.
120
- * \param what_arg A string to include in the result returned by \p what().
121
- * \post <tt>code() == error_code(ev, ecat)</tt>.
122
- * \post <tt>std::string(what()).find(what_arg) != string::npos</tt>.
123
- */
124
- inline system_error(int ev, const error_category &ecat, const std::string &what_arg);
125
-
126
- /*! Constructs an object of class \p system_error.
127
- * \param ev The error value used to create an \p error_code.
128
- * \param ecat The \p error_category used to create an \p error_code.
129
- * \param what_arg A string to include in the result returned by \p what().
130
- * \post <tt>code() == error_code(ev, ecat)</tt>.
131
- * \post <tt>std::string(what()).find(what_arg) != string::npos</tt>.
132
- */
133
- inline system_error(int ev, const error_category &ecat, const char *what_arg);
134
-
135
- /*! Constructs an object of class \p system_error.
136
- * \param ev The error value used to create an \p error_code.
137
- * \param ecat The \p error_category used to create an \p error_code.
138
- * \post <tt>code() == error_code(ev, ecat)</tt>.
139
- */
140
- inline system_error(int ev, const error_category &ecat);
141
-
142
- /*! Destructor does not throw.
143
- */
144
- inline virtual ~system_error(void) throw () {};
145
-
146
- /*! Returns an object encoding the error.
147
- * \return <tt>ec</tt> or <tt>error_code(ev, ecat)</tt>, from the
148
- * constructor, as appropriate.
149
- */
150
- inline const error_code &code(void) const throw();
151
-
152
- /*! Returns a human-readable string indicating the nature of the error.
153
- * \return a string incorporating <tt>code().message()</tt> and the
154
- * arguments supplied in the constructor.
155
- */
156
- inline const char *what(void) const throw();
157
-
158
- /*! \cond
159
- */
160
- private:
161
- error_code m_error_code;
162
- mutable std::string m_what;
163
-
164
- /*! \endcond
165
- */
166
- }; // end system_error
167
-
168
- } // end system
169
-
170
- /*! \} // end system_diagnostics
171
- */
172
-
173
- // import names into thrust::
174
- using system::system_error;
175
-
176
- } // end thrust
177
-
178
- #include <thrust/system/detail/system_error.inl>
179
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/unicl-zero-shot-img-recog/app.py DELETED
@@ -1,163 +0,0 @@
1
- import argparse
2
- import requests
3
- import gradio as gr
4
- import numpy as np
5
- import cv2
6
- import torch
7
- import torch.nn as nn
8
- from PIL import Image
9
- from pathlib import Path
10
- from torchvision import transforms
11
- from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
12
- from timm.data import create_transform
13
- from config import get_config
14
- from model import build_model
15
-
16
- # Download human-readable labels for ImageNet.
17
- response = requests.get("https://git.io/JJkYN")
18
- labels = response.text.split("\n")
19
-
20
- def parse_option():
21
- parser = argparse.ArgumentParser('UniCL demo script', add_help=False)
22
- parser.add_argument('--cfg', type=str, default="configs/unicl_swin_base.yaml", metavar="FILE", help='path to config file', )
23
- args, unparsed = parser.parse_known_args()
24
-
25
- config = get_config(args)
26
-
27
- return args, config
28
-
29
- def build_transforms(img_size, center_crop=True):
30
- t = [transforms.ToPILImage()]
31
- if center_crop:
32
- size = int((256 / 224) * img_size)
33
- t.append(
34
- transforms.Resize(size)
35
- )
36
- t.append(
37
- transforms.CenterCrop(img_size)
38
- )
39
- else:
40
- t.append(
41
- transforms.Resize(img_size)
42
- )
43
- t.append(transforms.ToTensor())
44
- t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
45
- return transforms.Compose(t)
46
-
47
- def build_transforms4display(img_size, center_crop=True):
48
- t = [transforms.ToPILImage()]
49
- if center_crop:
50
- size = int((256 / 224) * img_size)
51
- t.append(
52
- transforms.Resize(size)
53
- )
54
- t.append(
55
- transforms.CenterCrop(img_size)
56
- )
57
- else:
58
- t.append(
59
- transforms.Resize(img_size)
60
- )
61
- t.append(transforms.ToTensor())
62
- return transforms.Compose(t)
63
-
64
- args, config = parse_option()
65
-
66
- '''
67
- build model
68
- '''
69
- model = build_model(config)
70
-
71
- url = './in21k_yfcc14m_gcc15m_swin_base.pth'
72
- checkpoint = torch.load(url, map_location="cpu")
73
- model.load_state_dict(checkpoint["model"])
74
- model.eval()
75
-
76
- '''
77
- build data transform
78
- '''
79
- eval_transforms = build_transforms(224, center_crop=True)
80
- display_transforms = build_transforms4display(224, center_crop=True)
81
-
82
- '''
83
- build upsampler
84
- '''
85
- # upsampler = nn.Upsample(scale_factor=16, mode='bilinear')
86
-
87
- '''
88
- borrow code from here: https://github.com/jacobgil/pytorch-grad-cam/blob/master/pytorch_grad_cam/utils/image.py
89
- '''
90
- def show_cam_on_image(img: np.ndarray,
91
- mask: np.ndarray,
92
- use_rgb: bool = False,
93
- colormap: int = cv2.COLORMAP_JET) -> np.ndarray:
94
- """ This function overlays the cam mask on the image as an heatmap.
95
- By default the heatmap is in BGR format.
96
- :param img: The base image in RGB or BGR format.
97
- :param mask: The cam mask.
98
- :param use_rgb: Whether to use an RGB or BGR heatmap, this should be set to True if 'img' is in RGB format.
99
- :param colormap: The OpenCV colormap to be used.
100
- :returns: The default image with the cam overlay.
101
- """
102
- heatmap = cv2.applyColorMap(np.uint8(255 * mask), colormap)
103
- if use_rgb:
104
- heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)
105
- heatmap = np.float32(heatmap) / 255
106
-
107
- if np.max(img) > 1:
108
- raise Exception(
109
- "The input image should np.float32 in the range [0, 1]")
110
-
111
- cam = 0.7*heatmap + 0.3*img
112
- # cam = cam / np.max(cam)
113
- return np.uint8(255 * cam)
114
-
115
- def recognize_image(image, texts):
116
- img_t = eval_transforms(image)
117
- img_d = display_transforms(image).permute(1, 2, 0).numpy()
118
-
119
- text_embeddings = model.get_text_embeddings(texts.split(';'))
120
-
121
- # compute output
122
- feat_img, feat_map, H, W = model.encode_image(img_t.unsqueeze(0), output_map=True)
123
- output = model.logit_scale.exp() * feat_img @ text_embeddings.t()
124
- prediction = output.softmax(-1).flatten()
125
-
126
- # generate feat map given the top matched texts
127
- output_map = (feat_map * text_embeddings[prediction.argmax()].unsqueeze(-1)).sum(1).softmax(-1)
128
- output_map = output_map.view(1, 1, H, W)
129
-
130
- output_map = nn.Upsample(size=img_t.shape[1:], mode='bilinear')(output_map)
131
- output_map = output_map.squeeze(1).detach().permute(1, 2, 0).numpy()
132
- output_map = (output_map - output_map.min()) / (output_map.max() - output_map.min())
133
- heatmap = show_cam_on_image(img_d, output_map, use_rgb=True)
134
-
135
- show_img = np.concatenate((np.uint8(255 * img_d), heatmap), 1)
136
- return {texts.split(';')[i]: float(prediction[i]) for i in range(len(texts.split(';')))}, Image.fromarray(show_img)
137
-
138
-
139
- image = gr.inputs.Image()
140
- label = gr.outputs.Label(num_top_classes=100)
141
-
142
- description = "UniCL for Zero-shot Image Recognition. Given an image, our model maps it to an arbitary text in a candidate pool."
143
- gr.Interface(
144
- description=description,
145
- fn=recognize_image,
146
- inputs=["image", "text"],
147
- outputs=[
148
- label,
149
- gr.outputs.Image(
150
- type="pil",
151
- label="crop input/heat map"),
152
- ],
153
- examples=[
154
- ["./elephants.png", "an elephant; an elephant walking in the river; four elephants walking in the river"],
155
- ["./apple_with_ipod.jpg", "an ipod; an apple with a write note 'ipod'; an apple with a write note 'iphone'"],
156
- ["./apple_with_ipod.jpg", "a write note 'ipod'; a write note 'ipad'; a write note 'iphone'"],
157
- ["./crowd2.jpg", "a street; a street with a woman walking in the middle; a street with a man walking in the middle"],
158
- ["./donut.png", "a bread; a donut; some donuts"],
159
- ["./horse.png", "an image of horse; an image of cow; an image of dog"],
160
- ["./dog_and_cat.jfif", "a dog; a cat; dog and cat"],
161
- ],
162
- article=Path("docs/intro.md").read_text()
163
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chintan-Donda/KKMS-KSSW-HF/src/web_crawler.py DELETED
@@ -1,58 +0,0 @@
1
- import requests
2
- from bs4 import BeautifulSoup as bs
3
-
4
-
5
- class LOAD_ONLINE_PDF_IPM_PACKAGES:
6
- def __init__(self):
7
- self.base_url = 'https://ppqs.gov.in/ipm-packages'
8
-
9
- self.ipm_packages = []
10
- self.pdfs_urls = []
11
-
12
- self.headers = {
13
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
14
- }
15
-
16
-
17
- def _get_ipm_packages_name_list(self):
18
- """
19
- Parse HTML page to get the names of each IPM Package
20
- """
21
-
22
- response = requests.get(
23
- self.base_url,
24
- headers=self.headers,
25
- )
26
-
27
- soup = bs(response.text, 'html.parser')
28
- packages = soup.findAll('span', {'class': 'field-content region-name'}, limit=None)
29
- for package in packages:
30
- self.ipm_packages.append(package.a['href'].split('/')[-1])
31
-
32
-
33
- def get_ipm_packages_pdfs_list(self):
34
- """
35
- Parse HTML page to get the PDF URLs of each IPM Package
36
- """
37
- self._get_ipm_packages_name_list()
38
-
39
- for ip in self.ipm_packages:
40
- source_url = f'{self.base_url}/{ip}'
41
- print(f'Loading PDFs from: {source_url}')
42
-
43
- response = requests.get(
44
- source_url,
45
- headers=self.headers,
46
- )
47
-
48
- soup = bs(response.text, 'html.parser')
49
- urls = soup.findAll('td', {'class': 'views-field views-field-php'}, limit=None)
50
- for url in urls:
51
- self.pdfs_urls.append(url.a['href'])
52
-
53
-
54
- def get_ipm_packages_pdfs_urls():
55
- pdf = LOAD_ONLINE_PDF_IPM_PACKAGES()
56
- pdf.get_ipm_packages_pdfs_list()
57
- print('Total pdfs:', len(pdf.pdfs_urls))
58
- return pdf.pdfs_urls
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/manager.py DELETED
@@ -1,104 +0,0 @@
1
- import importlib
2
- import importlib.util
3
- import pkgutil
4
- from pathlib import Path
5
- from typing import Dict, List, Optional, Union
6
-
7
- from .config import meme_config
8
- from .exception import NoSuchMeme
9
- from .log import logger
10
- from .meme import Meme, MemeArgsType, MemeFunction, MemeParamsType
11
-
12
- _memes: Dict[str, Meme] = {}
13
-
14
-
15
- def path_to_module_name(path: Path) -> str:
16
- rel_path = path.resolve().relative_to(Path.cwd().resolve())
17
- if rel_path.stem == "__init__":
18
- return ".".join(rel_path.parts[:-1])
19
- else:
20
- return ".".join(rel_path.parts[:-1] + (rel_path.stem,))
21
-
22
-
23
- def load_meme(module_path: Union[str, Path]):
24
- module_name = (
25
- path_to_module_name(module_path)
26
- if isinstance(module_path, Path)
27
- else module_path
28
- )
29
- try:
30
- importlib.import_module(module_name)
31
- except Exception as e:
32
- logger.opt(colors=True, exception=e).error(f"Failed to import {module_path}!")
33
-
34
-
35
- def load_memes(dir_path: Union[str, Path]):
36
- if isinstance(dir_path, Path):
37
- dir_path = str(dir_path.resolve())
38
-
39
- for module_info in pkgutil.iter_modules([dir_path]):
40
- if module_info.name.startswith("_"):
41
- continue
42
- if not (
43
- module_spec := module_info.module_finder.find_spec(module_info.name, None)
44
- ):
45
- continue
46
- if not (module_path := module_spec.origin):
47
- continue
48
- if not (module_loader := module_spec.loader):
49
- continue
50
- try:
51
- module = importlib.util.module_from_spec(module_spec)
52
- module_loader.exec_module(module)
53
- except Exception as e:
54
- logger.opt(colors=True, exception=e).error(
55
- f"Failed to import {module_path}!"
56
- )
57
-
58
-
59
- def add_meme(
60
- key: str,
61
- function: MemeFunction,
62
- *,
63
- min_images: int = 0,
64
- max_images: int = 0,
65
- min_texts: int = 0,
66
- max_texts: int = 0,
67
- default_texts: List[str] = [],
68
- args_type: Optional[MemeArgsType] = None,
69
- keywords: List[str] = [],
70
- patterns: List[str] = [],
71
- ):
72
- if key in _memes:
73
- logger.warning(f'Meme with key "{key}" already exists!')
74
- return
75
-
76
- if key in meme_config.meme.meme_disabled_list:
77
- logger.warning(f'The key "{key}" is in the disabled list!')
78
- return
79
-
80
- meme = Meme(
81
- key,
82
- function,
83
- MemeParamsType(
84
- min_images, max_images, min_texts, max_texts, default_texts, args_type
85
- ),
86
- keywords=keywords,
87
- patterns=patterns,
88
- )
89
-
90
- _memes[key] = meme
91
-
92
-
93
- def get_meme(key: str) -> Meme:
94
- if key not in _memes:
95
- raise NoSuchMeme(key)
96
- return _memes[key]
97
-
98
-
99
- def get_memes() -> List[Meme]:
100
- return list(_memes.values())
101
-
102
-
103
- def get_meme_keys() -> List[str]:
104
- return list(_memes.keys())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/longcode/jpge.cpp DELETED
@@ -1,1049 +0,0 @@
1
- // jpge.cpp - C++ class for JPEG compression.
2
- // Public domain, Rich Geldreich <[email protected]>
3
- // v1.01, Dec. 18, 2010 - Initial release
4
- // v1.02, Apr. 6, 2011 - Removed 2x2 ordered dither in H2V1 chroma subsampling method load_block_16_8_8(). (The rounding factor was 2, when it should have been 1. Either way, it wasn't helping.)
5
- // v1.03, Apr. 16, 2011 - Added support for optimized Huffman code tables, optimized dynamic memory allocation down to only 1 alloc.
6
- // Also from Alex Evans: Added RGBA support, linear memory allocator (no longer needed in v1.03).
7
- // v1.04, May. 19, 2012: Forgot to set m_pFile ptr to NULL in cfile_stream::close(). Thanks to Owen Kaluza for reporting this bug.
8
- // Code tweaks to fix VS2008 static code analysis warnings (all looked harmless).
9
- // Code review revealed method load_block_16_8_8() (used for the non-default H2V1 sampling mode to downsample chroma) somehow didn't get the rounding factor fix from v1.02.
10
-
11
- #include "jpge.h"
12
-
13
- #include <stdlib.h>
14
- #include <string.h>
15
- #if PLATFORM_WINDOWS
16
- #include <malloc.h>
17
- #endif
18
-
19
- #define JPGE_MAX(a,b) (((a)>(b))?(a):(b))
20
- #define JPGE_MIN(a,b) (((a)<(b))?(a):(b))
21
-
22
- namespace jpge {
23
-
24
- static inline void *jpge_malloc(size_t nSize) { return FMemory::Malloc(nSize); }
25
- static inline void jpge_free(void *p) { FMemory::Free(p);; }
26
-
27
- // Various JPEG enums and tables.
28
- enum { M_SOF0 = 0xC0, M_DHT = 0xC4, M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_APP0 = 0xE0 };
29
- enum { DC_LUM_CODES = 12, AC_LUM_CODES = 256, DC_CHROMA_CODES = 12, AC_CHROMA_CODES = 256, MAX_HUFF_SYMBOLS = 257, MAX_HUFF_CODESIZE = 32 };
30
-
31
- static uint8 s_zag[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 };
32
- static int16 s_std_lum_quant[64] = { 16,11,12,14,12,10,16,14,13,14,18,17,16,19,24,40,26,24,22,22,24,49,35,37,29,40,58,51,61,60,57,51,56,55,64,72,92,78,64,68,87,69,55,56,80,109,81,87,95,98,103,104,103,62,77,113,121,112,100,120,92,101,103,99 };
33
- static int16 s_std_croma_quant[64] = { 17,18,18,24,21,24,47,26,26,47,99,66,56,66,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99 };
34
- static uint8 s_dc_lum_bits[17] = { 0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0 };
35
- static uint8 s_dc_lum_val[DC_LUM_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 };
36
- static uint8 s_ac_lum_bits[17] = { 0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d };
37
- static uint8 s_ac_lum_val[AC_LUM_CODES] =
38
- {
39
- 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08,0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0,
40
- 0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28,0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,
41
- 0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89,
42
- 0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,
43
- 0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,
44
- 0xf9,0xfa
45
- };
46
- static uint8 s_dc_chroma_bits[17] = { 0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0 };
47
- static uint8 s_dc_chroma_val[DC_CHROMA_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 };
48
- static uint8 s_ac_chroma_bits[17] = { 0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77 };
49
- static uint8 s_ac_chroma_val[AC_CHROMA_CODES] =
50
- {
51
- 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91,0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0,
52
- 0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26,0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,
53
- 0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87,
54
- 0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,
55
- 0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,
56
- 0xf9,0xfa
57
- };
58
-
59
- // Low-level helper functions.
60
- template <class T> inline void clear_obj(T &obj) { memset(&obj, 0, sizeof(obj)); }
61
-
62
- const int YR = 19595, YG = 38470, YB = 7471, CB_R = -11059, CB_G = -21709, CB_B = 32768, CR_R = 32768, CR_G = -27439, CR_B = -5329;
63
- static inline uint8 clamp(int i) { if (static_cast<uint>(i) > 255U) { if (i < 0) i = 0; else if (i > 255) i = 255; } return static_cast<uint8>(i); }
64
-
65
- static void RGB_to_YCC(uint8* pDst, const uint8 *pSrc, int num_pixels)
66
- {
67
- for ( ; num_pixels; pDst += 3, pSrc += 3, num_pixels--)
68
- {
69
- const int r = pSrc[0], g = pSrc[1], b = pSrc[2];
70
- pDst[0] = static_cast<uint8>((r * YR + g * YG + b * YB + 32768) >> 16);
71
- pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16));
72
- pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16));
73
- }
74
- }
75
-
76
- static void RGB_to_Y(uint8* pDst, const uint8 *pSrc, int num_pixels)
77
- {
78
- for ( ; num_pixels; pDst++, pSrc += 3, num_pixels--)
79
- pDst[0] = static_cast<uint8>((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16);
80
- }
81
-
82
- static void RGBA_to_YCC(uint8* pDst, const uint8 *pSrc, int num_pixels)
83
- {
84
- for ( ; num_pixels; pDst += 3, pSrc += 4, num_pixels--)
85
- {
86
- const int r = pSrc[0], g = pSrc[1], b = pSrc[2];
87
- pDst[0] = static_cast<uint8>((r * YR + g * YG + b * YB + 32768) >> 16);
88
- pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16));
89
- pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16));
90
- }
91
- }
92
-
93
- static void RGBA_to_Y(uint8* pDst, const uint8 *pSrc, int num_pixels)
94
- {
95
- for ( ; num_pixels; pDst++, pSrc += 4, num_pixels--)
96
- pDst[0] = static_cast<uint8>((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16);
97
- }
98
-
99
- static void Y_to_YCC(uint8* pDst, const uint8* pSrc, int num_pixels)
100
- {
101
- for( ; num_pixels; pDst += 3, pSrc++, num_pixels--) { pDst[0] = pSrc[0]; pDst[1] = 128; pDst[2] = 128; }
102
- }
103
-
104
- // Forward DCT - DCT derived from jfdctint.
105
- #define CONST_BITS 13
106
- #define ROW_BITS 2
107
- #define DCT_DESCALE(x, n) (((x) + (((int32)1) << ((n) - 1))) >> (n))
108
- #define DCT_MUL(var, c) (static_cast<int16>(var) * static_cast<int32>(c))
109
- #define DCT1D(s0, s1, s2, s3, s4, s5, s6, s7) \
110
- int32 t0 = s0 + s7, t7 = s0 - s7, t1 = s1 + s6, t6 = s1 - s6, t2 = s2 + s5, t5 = s2 - s5, t3 = s3 + s4, t4 = s3 - s4; \
111
- int32 t10 = t0 + t3, t13 = t0 - t3, t11 = t1 + t2, t12 = t1 - t2; \
112
- int32 u1 = DCT_MUL(t12 + t13, 4433); \
113
- s2 = u1 + DCT_MUL(t13, 6270); \
114
- s6 = u1 + DCT_MUL(t12, -15137); \
115
- u1 = t4 + t7; \
116
- int32 u2 = t5 + t6, u3 = t4 + t6, u4 = t5 + t7; \
117
- int32 z5 = DCT_MUL(u3 + u4, 9633); \
118
- t4 = DCT_MUL(t4, 2446); t5 = DCT_MUL(t5, 16819); \
119
- t6 = DCT_MUL(t6, 25172); t7 = DCT_MUL(t7, 12299); \
120
- u1 = DCT_MUL(u1, -7373); u2 = DCT_MUL(u2, -20995); \
121
- u3 = DCT_MUL(u3, -16069); u4 = DCT_MUL(u4, -3196); \
122
- u3 += z5; u4 += z5; \
123
- s0 = t10 + t11; s1 = t7 + u1 + u4; s3 = t6 + u2 + u3; s4 = t10 - t11; s5 = t5 + u2 + u4; s7 = t4 + u1 + u3;
124
-
125
- static void DCT2D(int32 *p)
126
- {
127
- int32 c, *q = p;
128
- for (c = 7; c >= 0; c--, q += 8)
129
- {
130
- int32 s0 = q[0], s1 = q[1], s2 = q[2], s3 = q[3], s4 = q[4], s5 = q[5], s6 = q[6], s7 = q[7];
131
- DCT1D(s0, s1, s2, s3, s4, s5, s6, s7);
132
- q[0] = s0 << ROW_BITS; q[1] = DCT_DESCALE(s1, CONST_BITS-ROW_BITS); q[2] = DCT_DESCALE(s2, CONST_BITS-ROW_BITS); q[3] = DCT_DESCALE(s3, CONST_BITS-ROW_BITS);
133
- q[4] = s4 << ROW_BITS; q[5] = DCT_DESCALE(s5, CONST_BITS-ROW_BITS); q[6] = DCT_DESCALE(s6, CONST_BITS-ROW_BITS); q[7] = DCT_DESCALE(s7, CONST_BITS-ROW_BITS);
134
- }
135
- for (q = p, c = 7; c >= 0; c--, q++)
136
- {
137
- int32 s0 = q[0*8], s1 = q[1*8], s2 = q[2*8], s3 = q[3*8], s4 = q[4*8], s5 = q[5*8], s6 = q[6*8], s7 = q[7*8];
138
- DCT1D(s0, s1, s2, s3, s4, s5, s6, s7);
139
- q[0*8] = DCT_DESCALE(s0, ROW_BITS+3); q[1*8] = DCT_DESCALE(s1, CONST_BITS+ROW_BITS+3); q[2*8] = DCT_DESCALE(s2, CONST_BITS+ROW_BITS+3); q[3*8] = DCT_DESCALE(s3, CONST_BITS+ROW_BITS+3);
140
- q[4*8] = DCT_DESCALE(s4, ROW_BITS+3); q[5*8] = DCT_DESCALE(s5, CONST_BITS+ROW_BITS+3); q[6*8] = DCT_DESCALE(s6, CONST_BITS+ROW_BITS+3); q[7*8] = DCT_DESCALE(s7, CONST_BITS+ROW_BITS+3);
141
- }
142
- }
143
-
144
- struct sym_freq { uint m_key, m_sym_index; };
145
-
146
- // Radix sorts sym_freq[] array by 32-bit key m_key. Returns ptr to sorted values.
147
- static inline sym_freq* radix_sort_syms(uint num_syms, sym_freq* pSyms0, sym_freq* pSyms1)
148
- {
149
- const uint cMaxPasses = 4;
150
- uint32 hist[256 * cMaxPasses]; clear_obj(hist);
151
- for (uint i = 0; i < num_syms; i++) { uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; hist[256*2 + ((freq >> 16) & 0xFF)]++; hist[256*3 + ((freq >> 24) & 0xFF)]++; }
152
- sym_freq* pCur_syms = pSyms0, *pNew_syms = pSyms1;
153
- uint total_passes = cMaxPasses; while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--;
154
- for (uint pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8)
155
- {
156
- const uint32* pHist = &hist[pass << 8];
157
- uint offsets[256], cur_ofs = 0;
158
- for (uint i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; }
159
- for (uint i = 0; i < num_syms; i++)
160
- pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i];
161
- sym_freq* t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t;
162
- }
163
- return pCur_syms;
164
- }
165
-
166
- // calculate_minimum_redundancy() originally written by: Alistair Moffat, [email protected], Jyrki Katajainen, [email protected], November 1996.
167
- static void calculate_minimum_redundancy(sym_freq *A, int n)
168
- {
169
- int root, leaf, next, avbl, used, dpth;
170
- if (n==0) return; else if (n==1) { A[0].m_key = 1; return; }
171
- A[0].m_key += A[1].m_key; root = 0; leaf = 2;
172
- for (next=1; next < n-1; next++)
173
- {
174
- if (leaf>=n || A[root].m_key<A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = next; } else A[next].m_key = A[leaf++].m_key;
175
- if (leaf>=n || (root<next && A[root].m_key<A[leaf].m_key)) { A[next].m_key += A[root].m_key; A[root++].m_key = next; } else A[next].m_key += A[leaf++].m_key;
176
- }
177
- A[n-2].m_key = 0;
178
- for (next=n-3; next>=0; next--) A[next].m_key = A[A[next].m_key].m_key+1;
179
- avbl = 1; used = dpth = 0; root = n-2; next = n-1;
180
- while (avbl>0)
181
- {
182
- while (root>=0 && (int)A[root].m_key==dpth) { used++; root--; }
183
- while (avbl>used) { A[next--].m_key = dpth; avbl--; }
184
- avbl = 2*used; dpth++; used = 0;
185
- }
186
- }
187
-
188
- // Limits canonical Huffman code table's max code size to max_code_size.
189
- static void huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size)
190
- {
191
- if (code_list_len <= 1) return;
192
-
193
- for (int i = max_code_size + 1; i <= MAX_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i];
194
-
195
- uint32 total = 0;
196
- for (int i = max_code_size; i > 0; i--)
197
- total += (((uint32)pNum_codes[i]) << (max_code_size - i));
198
-
199
- while (total != (1UL << max_code_size))
200
- {
201
- pNum_codes[max_code_size]--;
202
- for (int i = max_code_size - 1; i > 0; i--)
203
- {
204
- if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; }
205
- }
206
- total--;
207
- }
208
- }
209
-
210
- // Generates an optimized offman table.
211
- void jpeg_encoder::optimize_huffman_table(int table_num, int table_len)
212
- {
213
- sym_freq syms0[MAX_HUFF_SYMBOLS], syms1[MAX_HUFF_SYMBOLS];
214
- syms0[0].m_key = 1; syms0[0].m_sym_index = 0; // dummy symbol, assures that no valid code contains all 1's
215
- int num_used_syms = 1;
216
- const uint32 *pSym_count = &m_huff_count[table_num][0];
217
- for (int i = 0; i < table_len; i++)
218
- if (pSym_count[i]) { syms0[num_used_syms].m_key = pSym_count[i]; syms0[num_used_syms++].m_sym_index = i + 1; }
219
- sym_freq* pSyms = radix_sort_syms(num_used_syms, syms0, syms1);
220
- calculate_minimum_redundancy(pSyms, num_used_syms);
221
-
222
- // Count the # of symbols of each code size.
223
- int num_codes[1 + MAX_HUFF_CODESIZE]; clear_obj(num_codes);
224
- for (int i = 0; i < num_used_syms; i++)
225
- num_codes[pSyms[i].m_key]++;
226
-
227
- const uint JPGE_CODE_SIZE_LIMIT = 16; // the maximum possible size of a JPEG Huffman code (valid range is [9,16] - 9 vs. 8 because of the dummy symbol)
228
- huffman_enforce_max_code_size(num_codes, num_used_syms, JPGE_CODE_SIZE_LIMIT);
229
-
230
- // Compute m_huff_bits array, which contains the # of symbols per code size.
231
- clear_obj(m_huff_bits[table_num]);
232
- for (int i = 1; i <= (int)JPGE_CODE_SIZE_LIMIT; i++)
233
- m_huff_bits[table_num][i] = static_cast<uint8>(num_codes[i]);
234
-
235
- // Remove the dummy symbol added above, which must be in largest bucket.
236
- for (int i = JPGE_CODE_SIZE_LIMIT; i >= 1; i--)
237
- {
238
- if (m_huff_bits[table_num][i]) { m_huff_bits[table_num][i]--; break; }
239
- }
240
-
241
- // Compute the m_huff_val array, which contains the symbol indices sorted by code size (smallest to largest).
242
- for (int i = num_used_syms - 1; i >= 1; i--)
243
- m_huff_val[table_num][num_used_syms - 1 - i] = static_cast<uint8>(pSyms[i].m_sym_index - 1);
244
- }
245
-
246
- // JPEG marker generation.
247
- void jpeg_encoder::emit_byte(uint8 i)
248
- {
249
- m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_obj(i);
250
- }
251
-
252
- void jpeg_encoder::emit_word(uint i)
253
- {
254
- emit_byte(uint8(i >> 8)); emit_byte(uint8(i & 0xFF));
255
- }
256
-
257
- void jpeg_encoder::emit_marker(int marker)
258
- {
259
- emit_byte(uint8(0xFF)); emit_byte(uint8(marker));
260
- }
261
-
262
- // Emit JFIF marker
263
- void jpeg_encoder::emit_jfif_app0()
264
- {
265
- emit_marker(M_APP0);
266
- emit_word(2 + 4 + 1 + 2 + 1 + 2 + 2 + 1 + 1);
267
- emit_byte(0x4A); emit_byte(0x46); emit_byte(0x49); emit_byte(0x46); /* Identifier: ASCII "JFIF" */
268
- emit_byte(0);
269
- emit_byte(1); /* Major version */
270
- emit_byte(1); /* Minor version */
271
- emit_byte(0); /* Density unit */
272
- emit_word(1);
273
- emit_word(1);
274
- emit_byte(0); /* No thumbnail image */
275
- emit_byte(0);
276
- }
277
-
278
- // Emit quantization tables
279
- void jpeg_encoder::emit_dqt()
280
- {
281
- for (int i = 0; i < ((m_num_components == 3) ? 2 : 1); i++)
282
- {
283
- emit_marker(M_DQT);
284
- emit_word(64 + 1 + 2);
285
- emit_byte(static_cast<uint8>(i));
286
- for (int j = 0; j < 64; j++)
287
- emit_byte(static_cast<uint8>(m_quantization_tables[i][j]));
288
- }
289
- }
290
-
291
- // Emit start of frame marker
292
- void jpeg_encoder::emit_sof()
293
- {
294
- emit_marker(M_SOF0); /* baseline */
295
- emit_word(3 * m_num_components + 2 + 5 + 1);
296
- emit_byte(8); /* precision */
297
- emit_word(m_image_y);
298
- emit_word(m_image_x);
299
- emit_byte(m_num_components);
300
- for (int i = 0; i < m_num_components; i++)
301
- {
302
- emit_byte(static_cast<uint8>(i + 1)); /* component ID */
303
- emit_byte((m_comp_h_samp[i] << 4) + m_comp_v_samp[i]); /* h and v sampling */
304
- emit_byte(i > 0); /* quant. table num */
305
- }
306
- }
307
-
308
- // Emit Huffman table.
309
- void jpeg_encoder::emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag)
310
- {
311
- emit_marker(M_DHT);
312
-
313
- int length = 0;
314
- for (int i = 1; i <= 16; i++)
315
- length += bits[i];
316
-
317
- emit_word(length + 2 + 1 + 16);
318
- emit_byte(static_cast<uint8>(index + (ac_flag << 4)));
319
-
320
- for (int i = 1; i <= 16; i++)
321
- emit_byte(bits[i]);
322
-
323
- for (int i = 0; i < length; i++)
324
- emit_byte(val[i]);
325
- }
326
-
327
- // Emit all Huffman tables.
328
- void jpeg_encoder::emit_dhts()
329
- {
330
- emit_dht(m_huff_bits[0+0], m_huff_val[0+0], 0, false);
331
- emit_dht(m_huff_bits[2+0], m_huff_val[2+0], 0, true);
332
- if (m_num_components == 3)
333
- {
334
- emit_dht(m_huff_bits[0+1], m_huff_val[0+1], 1, false);
335
- emit_dht(m_huff_bits[2+1], m_huff_val[2+1], 1, true);
336
- }
337
- }
338
-
339
- // emit start of scan
340
- void jpeg_encoder::emit_sos()
341
- {
342
- emit_marker(M_SOS);
343
- emit_word(2 * m_num_components + 2 + 1 + 3);
344
- emit_byte(m_num_components);
345
- for (int i = 0; i < m_num_components; i++)
346
- {
347
- emit_byte(static_cast<uint8>(i + 1));
348
- if (i == 0)
349
- emit_byte((0 << 4) + 0);
350
- else
351
- emit_byte((1 << 4) + 1);
352
- }
353
- emit_byte(0); /* spectral selection */
354
- emit_byte(63);
355
- emit_byte(0);
356
- }
357
-
358
- // Emit all markers at beginning of image file.
359
- void jpeg_encoder::emit_markers()
360
- {
361
- emit_marker(M_SOI);
362
- emit_jfif_app0();
363
- emit_dqt();
364
- emit_sof();
365
- emit_dhts();
366
- emit_sos();
367
- }
368
-
369
- // Compute the actual canonical Huffman codes/code sizes given the JPEG huff bits and val arrays.
370
- void jpeg_encoder::compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val)
371
- {
372
- int i, l, last_p, si;
373
- uint8 huff_size[257];
374
- uint huff_code[257];
375
- uint code;
376
-
377
- int p = 0;
378
- for (l = 1; l <= 16; l++)
379
- for (i = 1; i <= bits[l]; i++)
380
- huff_size[p++] = (char)l;
381
-
382
- huff_size[p] = 0; last_p = p; // write sentinel
383
-
384
- code = 0; si = huff_size[0]; p = 0;
385
-
386
- while (huff_size[p])
387
- {
388
- while (huff_size[p] == si)
389
- huff_code[p++] = code++;
390
- code <<= 1;
391
- si++;
392
- }
393
-
394
- memset(codes, 0, sizeof(codes[0])*256);
395
- memset(code_sizes, 0, sizeof(code_sizes[0])*256);
396
- for (p = 0; p < last_p; p++)
397
- {
398
- codes[val[p]] = huff_code[p];
399
- code_sizes[val[p]] = huff_size[p];
400
- }
401
- }
402
-
403
- // Quantization table generation.
404
- void jpeg_encoder::compute_quant_table(int32 *pDst, int16 *pSrc)
405
- {
406
- int32 q;
407
- if (m_params.m_quality < 50)
408
- q = 5000 / m_params.m_quality;
409
- else
410
- q = 200 - m_params.m_quality * 2;
411
- for (int i = 0; i < 64; i++)
412
- {
413
- int32 j = *pSrc++; j = (j * q + 50L) / 100L;
414
- *pDst++ = JPGE_MIN(JPGE_MAX(j, 1), 255);
415
- }
416
- }
417
-
418
- // Higher-level methods.
419
- void jpeg_encoder::first_pass_init()
420
- {
421
- m_bit_buffer = 0; m_bits_in = 0;
422
- memset(m_last_dc_val, 0, 3 * sizeof(m_last_dc_val[0]));
423
- m_mcu_y_ofs = 0;
424
- m_pass_num = 1;
425
- }
426
-
427
- bool jpeg_encoder::second_pass_init()
428
- {
429
- compute_huffman_table(&m_huff_codes[0+0][0], &m_huff_code_sizes[0+0][0], m_huff_bits[0+0], m_huff_val[0+0]);
430
- compute_huffman_table(&m_huff_codes[2+0][0], &m_huff_code_sizes[2+0][0], m_huff_bits[2+0], m_huff_val[2+0]);
431
- if (m_num_components > 1)
432
- {
433
- compute_huffman_table(&m_huff_codes[0+1][0], &m_huff_code_sizes[0+1][0], m_huff_bits[0+1], m_huff_val[0+1]);
434
- compute_huffman_table(&m_huff_codes[2+1][0], &m_huff_code_sizes[2+1][0], m_huff_bits[2+1], m_huff_val[2+1]);
435
- }
436
- first_pass_init();
437
- emit_markers();
438
- m_pass_num = 2;
439
- return true;
440
- }
441
-
442
- bool jpeg_encoder::jpg_open(int p_x_res, int p_y_res, int src_channels)
443
- {
444
- m_num_components = 3;
445
- switch (m_params.m_subsampling)
446
- {
447
- case Y_ONLY:
448
- {
449
- m_num_components = 1;
450
- m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1;
451
- m_mcu_x = 8; m_mcu_y = 8;
452
- break;
453
- }
454
- case H1V1:
455
- {
456
- m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1;
457
- m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
458
- m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
459
- m_mcu_x = 8; m_mcu_y = 8;
460
- break;
461
- }
462
- case H2V1:
463
- {
464
- m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 1;
465
- m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
466
- m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
467
- m_mcu_x = 16; m_mcu_y = 8;
468
- break;
469
- }
470
- case H2V2:
471
- {
472
- m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 2;
473
- m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
474
- m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
475
- m_mcu_x = 16; m_mcu_y = 16;
476
- }
477
- }
478
-
479
- m_image_x = p_x_res; m_image_y = p_y_res;
480
- m_image_bpp = src_channels;
481
- m_image_bpl = m_image_x * src_channels;
482
- m_image_x_mcu = (m_image_x + m_mcu_x - 1) & (~(m_mcu_x - 1));
483
- m_image_y_mcu = (m_image_y + m_mcu_y - 1) & (~(m_mcu_y - 1));
484
- m_image_bpl_xlt = m_image_x * m_num_components;
485
- m_image_bpl_mcu = m_image_x_mcu * m_num_components;
486
- m_mcus_per_row = m_image_x_mcu / m_mcu_x;
487
-
488
- if ((m_mcu_lines[0] = static_cast<uint8*>(jpge_malloc(m_image_bpl_mcu * m_mcu_y))) == NULL) return false;
489
- for (int i = 1; i < m_mcu_y; i++)
490
- m_mcu_lines[i] = m_mcu_lines[i-1] + m_image_bpl_mcu;
491
-
492
- compute_quant_table(m_quantization_tables[0], s_std_lum_quant);
493
- compute_quant_table(m_quantization_tables[1], m_params.m_no_chroma_discrim_flag ? s_std_lum_quant : s_std_croma_quant);
494
-
495
- m_out_buf_left = JPGE_OUT_BUF_SIZE;
496
- m_pOut_buf = m_out_buf;
497
-
498
- if (m_params.m_two_pass_flag)
499
- {
500
- clear_obj(m_huff_count);
501
- first_pass_init();
502
- }
503
- else
504
- {
505
- memcpy(m_huff_bits[0+0], s_dc_lum_bits, 17); memcpy(m_huff_val [0+0], s_dc_lum_val, DC_LUM_CODES);
506
- memcpy(m_huff_bits[2+0], s_ac_lum_bits, 17); memcpy(m_huff_val [2+0], s_ac_lum_val, AC_LUM_CODES);
507
- memcpy(m_huff_bits[0+1], s_dc_chroma_bits, 17); memcpy(m_huff_val [0+1], s_dc_chroma_val, DC_CHROMA_CODES);
508
- memcpy(m_huff_bits[2+1], s_ac_chroma_bits, 17); memcpy(m_huff_val [2+1], s_ac_chroma_val, AC_CHROMA_CODES);
509
- if (!second_pass_init()) return false; // in effect, skip over the first pass
510
- }
511
- return m_all_stream_writes_succeeded;
512
- }
513
-
514
- void jpeg_encoder::load_block_8_8_grey(int x)
515
- {
516
- uint8 *pSrc;
517
- sample_array_t *pDst = m_sample_array;
518
- x <<= 3;
519
- for (int i = 0; i < 8; i++, pDst += 8)
520
- {
521
- pSrc = m_mcu_lines[i] + x;
522
- pDst[0] = pSrc[0] - 128; pDst[1] = pSrc[1] - 128; pDst[2] = pSrc[2] - 128; pDst[3] = pSrc[3] - 128;
523
- pDst[4] = pSrc[4] - 128; pDst[5] = pSrc[5] - 128; pDst[6] = pSrc[6] - 128; pDst[7] = pSrc[7] - 128;
524
- }
525
- }
526
-
527
- void jpeg_encoder::load_block_8_8(int x, int y, int c)
528
- {
529
- uint8 *pSrc;
530
- sample_array_t *pDst = m_sample_array;
531
- x = (x * (8 * 3)) + c;
532
- y <<= 3;
533
- for (int i = 0; i < 8; i++, pDst += 8)
534
- {
535
- pSrc = m_mcu_lines[y + i] + x;
536
- pDst[0] = pSrc[0 * 3] - 128; pDst[1] = pSrc[1 * 3] - 128; pDst[2] = pSrc[2 * 3] - 128; pDst[3] = pSrc[3 * 3] - 128;
537
- pDst[4] = pSrc[4 * 3] - 128; pDst[5] = pSrc[5 * 3] - 128; pDst[6] = pSrc[6 * 3] - 128; pDst[7] = pSrc[7 * 3] - 128;
538
- }
539
- }
540
-
541
- void jpeg_encoder::load_block_16_8(int x, int c)
542
- {
543
- uint8 *pSrc1, *pSrc2;
544
- sample_array_t *pDst = m_sample_array;
545
- x = (x * (16 * 3)) + c;
546
- int a = 0, b = 2;
547
- for (int i = 0; i < 16; i += 2, pDst += 8)
548
- {
549
- pSrc1 = m_mcu_lines[i + 0] + x;
550
- pSrc2 = m_mcu_lines[i + 1] + x;
551
- pDst[0] = ((pSrc1[ 0 * 3] + pSrc1[ 1 * 3] + pSrc2[ 0 * 3] + pSrc2[ 1 * 3] + a) >> 2) - 128; pDst[1] = ((pSrc1[ 2 * 3] + pSrc1[ 3 * 3] + pSrc2[ 2 * 3] + pSrc2[ 3 * 3] + b) >> 2) - 128;
552
- pDst[2] = ((pSrc1[ 4 * 3] + pSrc1[ 5 * 3] + pSrc2[ 4 * 3] + pSrc2[ 5 * 3] + a) >> 2) - 128; pDst[3] = ((pSrc1[ 6 * 3] + pSrc1[ 7 * 3] + pSrc2[ 6 * 3] + pSrc2[ 7 * 3] + b) >> 2) - 128;
553
- pDst[4] = ((pSrc1[ 8 * 3] + pSrc1[ 9 * 3] + pSrc2[ 8 * 3] + pSrc2[ 9 * 3] + a) >> 2) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3] + pSrc2[10 * 3] + pSrc2[11 * 3] + b) >> 2) - 128;
554
- pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3] + pSrc2[12 * 3] + pSrc2[13 * 3] + a) >> 2) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3] + pSrc2[14 * 3] + pSrc2[15 * 3] + b) >> 2) - 128;
555
- int temp = a; a = b; b = temp;
556
- }
557
- }
558
-
559
- void jpeg_encoder::load_block_16_8_8(int x, int c)
560
- {
561
- uint8 *pSrc1;
562
- sample_array_t *pDst = m_sample_array;
563
- x = (x * (16 * 3)) + c;
564
- for (int i = 0; i < 8; i++, pDst += 8)
565
- {
566
- pSrc1 = m_mcu_lines[i + 0] + x;
567
- pDst[0] = ((pSrc1[ 0 * 3] + pSrc1[ 1 * 3]) >> 1) - 128; pDst[1] = ((pSrc1[ 2 * 3] + pSrc1[ 3 * 3]) >> 1) - 128;
568
- pDst[2] = ((pSrc1[ 4 * 3] + pSrc1[ 5 * 3]) >> 1) - 128; pDst[3] = ((pSrc1[ 6 * 3] + pSrc1[ 7 * 3]) >> 1) - 128;
569
- pDst[4] = ((pSrc1[ 8 * 3] + pSrc1[ 9 * 3]) >> 1) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3]) >> 1) - 128;
570
- pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3]) >> 1) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3]) >> 1) - 128;
571
- }
572
- }
573
-
574
- void jpeg_encoder::load_quantized_coefficients(int component_num)
575
- {
576
- int32 *q = m_quantization_tables[component_num > 0];
577
- int16 *pDst = m_coefficient_array;
578
- for (int i = 0; i < 64; i++)
579
- {
580
- sample_array_t j = m_sample_array[s_zag[i]];
581
- if (j < 0)
582
- {
583
- if ((j = -j + (*q >> 1)) < *q)
584
- *pDst++ = 0;
585
- else
586
- *pDst++ = static_cast<int16>(-(j / *q));
587
- }
588
- else
589
- {
590
- if ((j = j + (*q >> 1)) < *q)
591
- *pDst++ = 0;
592
- else
593
- *pDst++ = static_cast<int16>((j / *q));
594
- }
595
- q++;
596
- }
597
- }
598
-
599
- void jpeg_encoder::flush_output_buffer()
600
- {
601
- if (m_out_buf_left != JPGE_OUT_BUF_SIZE)
602
- m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_buf(m_out_buf, JPGE_OUT_BUF_SIZE - m_out_buf_left);
603
- m_pOut_buf = m_out_buf;
604
- m_out_buf_left = JPGE_OUT_BUF_SIZE;
605
- }
606
-
607
- void jpeg_encoder::put_bits(uint bits, uint len)
608
- {
609
- m_bit_buffer |= ((uint32)bits << (24 - (m_bits_in += len)));
610
- while (m_bits_in >= 8)
611
- {
612
- uint8 c;
613
- #define JPGE_PUT_BYTE(c) { *m_pOut_buf++ = (c); if (--m_out_buf_left == 0) flush_output_buffer(); }
614
- JPGE_PUT_BYTE(c = (uint8)((m_bit_buffer >> 16) & 0xFF));
615
- if (c == 0xFF) JPGE_PUT_BYTE(0);
616
- m_bit_buffer <<= 8;
617
- m_bits_in -= 8;
618
- }
619
- }
620
-
621
- void jpeg_encoder::code_coefficients_pass_one(int component_num)
622
- {
623
- if (component_num >= 3) return; // just to shut up static analysis
624
- int i, run_len, nbits, temp1;
625
- int16 *src = m_coefficient_array;
626
- uint32 *dc_count = component_num ? m_huff_count[0 + 1] : m_huff_count[0 + 0], *ac_count = component_num ? m_huff_count[2 + 1] : m_huff_count[2 + 0];
627
-
628
- temp1 = src[0] - m_last_dc_val[component_num];
629
- m_last_dc_val[component_num] = src[0];
630
- if (temp1 < 0) temp1 = -temp1;
631
-
632
- nbits = 0;
633
- while (temp1)
634
- {
635
- nbits++; temp1 >>= 1;
636
- }
637
-
638
- dc_count[nbits]++;
639
- for (run_len = 0, i = 1; i < 64; i++)
640
- {
641
- if ((temp1 = m_coefficient_array[i]) == 0)
642
- run_len++;
643
- else
644
- {
645
- while (run_len >= 16)
646
- {
647
- ac_count[0xF0]++;
648
- run_len -= 16;
649
- }
650
- if (temp1 < 0) temp1 = -temp1;
651
- nbits = 1;
652
- while (temp1 >>= 1) nbits++;
653
- ac_count[(run_len << 4) + nbits]++;
654
- run_len = 0;
655
- }
656
- }
657
- if (run_len) ac_count[0]++;
658
- }
659
-
660
- void jpeg_encoder::code_coefficients_pass_two(int component_num)
661
- {
662
- int i, j, run_len, nbits, temp1, temp2;
663
- int16 *pSrc = m_coefficient_array;
664
- uint *codes[2];
665
- uint8 *code_sizes[2];
666
-
667
- if (component_num == 0)
668
- {
669
- codes[0] = m_huff_codes[0 + 0]; codes[1] = m_huff_codes[2 + 0];
670
- code_sizes[0] = m_huff_code_sizes[0 + 0]; code_sizes[1] = m_huff_code_sizes[2 + 0];
671
- }
672
- else
673
- {
674
- codes[0] = m_huff_codes[0 + 1]; codes[1] = m_huff_codes[2 + 1];
675
- code_sizes[0] = m_huff_code_sizes[0 + 1]; code_sizes[1] = m_huff_code_sizes[2 + 1];
676
- }
677
-
678
- temp1 = temp2 = pSrc[0] - m_last_dc_val[component_num];
679
- m_last_dc_val[component_num] = pSrc[0];
680
-
681
- if (temp1 < 0)
682
- {
683
- temp1 = -temp1; temp2--;
684
- }
685
-
686
- nbits = 0;
687
- while (temp1)
688
- {
689
- nbits++; temp1 >>= 1;
690
- }
691
-
692
- put_bits(codes[0][nbits], code_sizes[0][nbits]);
693
- if (nbits) put_bits(temp2 & ((1 << nbits) - 1), nbits);
694
-
695
- for (run_len = 0, i = 1; i < 64; i++)
696
- {
697
- if ((temp1 = m_coefficient_array[i]) == 0)
698
- run_len++;
699
- else
700
- {
701
- while (run_len >= 16)
702
- {
703
- put_bits(codes[1][0xF0], code_sizes[1][0xF0]);
704
- run_len -= 16;
705
- }
706
- if ((temp2 = temp1) < 0)
707
- {
708
- temp1 = -temp1;
709
- temp2--;
710
- }
711
- nbits = 1;
712
- while (temp1 >>= 1)
713
- nbits++;
714
- j = (run_len << 4) + nbits;
715
- put_bits(codes[1][j], code_sizes[1][j]);
716
- put_bits(temp2 & ((1 << nbits) - 1), nbits);
717
- run_len = 0;
718
- }
719
- }
720
- if (run_len)
721
- put_bits(codes[1][0], code_sizes[1][0]);
722
- }
723
-
724
- void jpeg_encoder::code_block(int component_num)
725
- {
726
- DCT2D(m_sample_array);
727
- load_quantized_coefficients(component_num);
728
- if (m_pass_num == 1)
729
- code_coefficients_pass_one(component_num);
730
- else
731
- code_coefficients_pass_two(component_num);
732
- }
733
-
734
- void jpeg_encoder::process_mcu_row()
735
- {
736
- if (m_num_components == 1)
737
- {
738
- for (int i = 0; i < m_mcus_per_row; i++)
739
- {
740
- load_block_8_8_grey(i); code_block(0);
741
- }
742
- }
743
- else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1))
744
- {
745
- for (int i = 0; i < m_mcus_per_row; i++)
746
- {
747
- load_block_8_8(i, 0, 0); code_block(0); load_block_8_8(i, 0, 1); code_block(1); load_block_8_8(i, 0, 2); code_block(2);
748
- }
749
- }
750
- else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1))
751
- {
752
- for (int i = 0; i < m_mcus_per_row; i++)
753
- {
754
- load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0);
755
- load_block_16_8_8(i, 1); code_block(1); load_block_16_8_8(i, 2); code_block(2);
756
- }
757
- }
758
- else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2))
759
- {
760
- for (int i = 0; i < m_mcus_per_row; i++)
761
- {
762
- load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0);
763
- load_block_8_8(i * 2 + 0, 1, 0); code_block(0); load_block_8_8(i * 2 + 1, 1, 0); code_block(0);
764
- load_block_16_8(i, 1); code_block(1); load_block_16_8(i, 2); code_block(2);
765
- }
766
- }
767
- }
768
-
769
- bool jpeg_encoder::terminate_pass_one()
770
- {
771
- optimize_huffman_table(0+0, DC_LUM_CODES); optimize_huffman_table(2+0, AC_LUM_CODES);
772
- if (m_num_components > 1)
773
- {
774
- optimize_huffman_table(0+1, DC_CHROMA_CODES); optimize_huffman_table(2+1, AC_CHROMA_CODES);
775
- }
776
- return second_pass_init();
777
- }
778
-
779
- bool jpeg_encoder::terminate_pass_two()
780
- {
781
- put_bits(0x7F, 7);
782
- flush_output_buffer();
783
- emit_marker(M_EOI);
784
- m_pass_num++; // purposely bump up m_pass_num, for debugging
785
- return true;
786
- }
787
-
788
- bool jpeg_encoder::process_end_of_image()
789
- {
790
- if (m_mcu_y_ofs)
791
- {
792
- if (m_mcu_y_ofs < 16) // check here just to shut up static analysis
793
- {
794
- for (int i = m_mcu_y_ofs; i < m_mcu_y; i++)
795
- memcpy(m_mcu_lines[i], m_mcu_lines[m_mcu_y_ofs - 1], m_image_bpl_mcu);
796
- }
797
-
798
- process_mcu_row();
799
- }
800
-
801
- if (m_pass_num == 1)
802
- return terminate_pass_one();
803
- else
804
- return terminate_pass_two();
805
- }
806
-
807
- void jpeg_encoder::load_mcu(const void *pSrc)
808
- {
809
- const uint8* Psrc = reinterpret_cast<const uint8*>(pSrc);
810
-
811
- uint8* pDst = m_mcu_lines[m_mcu_y_ofs]; // OK to write up to m_image_bpl_xlt bytes to pDst
812
-
813
- if (m_num_components == 1)
814
- {
815
- if (m_image_bpp == 4)
816
- RGBA_to_Y(pDst, Psrc, m_image_x);
817
- else if (m_image_bpp == 3)
818
- RGB_to_Y(pDst, Psrc, m_image_x);
819
- else
820
- memcpy(pDst, Psrc, m_image_x);
821
- }
822
- else
823
- {
824
- if (m_image_bpp == 4)
825
- RGBA_to_YCC(pDst, Psrc, m_image_x);
826
- else if (m_image_bpp == 3)
827
- RGB_to_YCC(pDst, Psrc, m_image_x);
828
- else
829
- Y_to_YCC(pDst, Psrc, m_image_x);
830
- }
831
-
832
- // Possibly duplicate pixels at end of scanline if not a multiple of 8 or 16
833
- if (m_num_components == 1)
834
- memset(m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt, pDst[m_image_bpl_xlt - 1], m_image_x_mcu - m_image_x);
835
- else
836
- {
837
- const uint8 y = pDst[m_image_bpl_xlt - 3 + 0], cb = pDst[m_image_bpl_xlt - 3 + 1], cr = pDst[m_image_bpl_xlt - 3 + 2];
838
- uint8 *q = m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt;
839
- for (int i = m_image_x; i < m_image_x_mcu; i++)
840
- {
841
- *q++ = y; *q++ = cb; *q++ = cr;
842
- }
843
- }
844
-
845
- if (++m_mcu_y_ofs == m_mcu_y)
846
- {
847
- process_mcu_row();
848
- m_mcu_y_ofs = 0;
849
- }
850
- }
851
-
852
- void jpeg_encoder::clear()
853
- {
854
- m_mcu_lines[0] = NULL;
855
- m_pass_num = 0;
856
- m_all_stream_writes_succeeded = true;
857
- }
858
-
859
- jpeg_encoder::jpeg_encoder()
860
- {
861
- clear();
862
- }
863
-
864
- jpeg_encoder::~jpeg_encoder()
865
- {
866
- deinit();
867
- }
868
-
869
- bool jpeg_encoder::init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params)
870
- {
871
- deinit();
872
- if (((!pStream) || (width < 1) || (height < 1)) || ((src_channels != 1) && (src_channels != 3) && (src_channels != 4)) || (!comp_params.check_valid())) return false;
873
- m_pStream = pStream;
874
- m_params = comp_params;
875
- return jpg_open(width, height, src_channels);
876
- }
877
-
878
- void jpeg_encoder::deinit()
879
- {
880
- jpge_free(m_mcu_lines[0]);
881
- clear();
882
- }
883
-
884
- bool jpeg_encoder::process_scanline(const void* pScanline)
885
- {
886
- if ((m_pass_num < 1) || (m_pass_num > 2)) return false;
887
- if (m_all_stream_writes_succeeded)
888
- {
889
- if (!pScanline)
890
- {
891
- if (!process_end_of_image()) return false;
892
- }
893
- else
894
- {
895
- load_mcu(pScanline);
896
- }
897
- }
898
- return m_all_stream_writes_succeeded;
899
- }
900
-
901
- // Higher level wrappers/examples (optional).
902
- #include <stdio.h>
903
-
904
- class cfile_stream : public output_stream
905
- {
906
- cfile_stream(const cfile_stream &);
907
- cfile_stream &operator= (const cfile_stream &);
908
-
909
- FILE* m_pFile;
910
- bool m_bStatus;
911
-
912
- public:
913
- cfile_stream() : m_pFile(NULL), m_bStatus(false) { }
914
-
915
- virtual ~cfile_stream()
916
- {
917
- close();
918
- }
919
-
920
- bool open(const char *pFilename)
921
- {
922
- close();
923
- #if defined(_MSC_VER)
924
- if (fopen_s(&m_pFile, pFilename, "wb") != 0)
925
- {
926
- return false;
927
- }
928
- #else
929
- m_pFile = fopen(pFilename, "wb");
930
- #endif
931
- m_bStatus = (m_pFile != NULL);
932
- return m_bStatus;
933
- }
934
-
935
- bool close()
936
- {
937
- if (m_pFile)
938
- {
939
- if (fclose(m_pFile) == EOF)
940
- {
941
- m_bStatus = false;
942
- }
943
- m_pFile = NULL;
944
- }
945
- return m_bStatus;
946
- }
947
-
948
- virtual bool put_buf(const void* pBuf, int64_t len)
949
- {
950
- m_bStatus = m_bStatus && (fwrite(pBuf, len, 1, m_pFile) == 1);
951
- return m_bStatus;
952
- }
953
-
954
- uint get_size() const
955
- {
956
- return m_pFile ? ftell(m_pFile) : 0;
957
- }
958
- };
959
-
960
- // Writes JPEG image to file.
961
- bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params)
962
- {
963
- cfile_stream dst_stream;
964
- if (!dst_stream.open(pFilename))
965
- return false;
966
-
967
- jpge::jpeg_encoder dst_image;
968
- if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params))
969
- return false;
970
-
971
- for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++)
972
- {
973
- for (int64_t i = 0; i < height; i++)
974
- {
975
- // i, width, and num_channels are all 64bit
976
- const uint8* pBuf = pImage_data + i * width * num_channels;
977
- if (!dst_image.process_scanline(pBuf))
978
- return false;
979
- }
980
- if (!dst_image.process_scanline(NULL))
981
- return false;
982
- }
983
-
984
- dst_image.deinit();
985
-
986
- return dst_stream.close();
987
- }
988
-
989
- class memory_stream : public output_stream
990
- {
991
- memory_stream(const memory_stream &);
992
- memory_stream &operator= (const memory_stream &);
993
-
994
- uint8 *m_pBuf;
995
- uint64_t m_buf_size, m_buf_ofs;
996
-
997
- public:
998
- memory_stream(void *pBuf, uint64_t buf_size) : m_pBuf(static_cast<uint8*>(pBuf)), m_buf_size(buf_size), m_buf_ofs(0) { }
999
-
1000
- virtual ~memory_stream() { }
1001
-
1002
- virtual bool put_buf(const void* pBuf, int64_t len)
1003
- {
1004
- uint64_t buf_remaining = m_buf_size - m_buf_ofs;
1005
- if ((uint64_t)len > buf_remaining)
1006
- return false;
1007
- memcpy(m_pBuf + m_buf_ofs, pBuf, len);
1008
- m_buf_ofs += len;
1009
- return true;
1010
- }
1011
-
1012
- uint64_t get_size() const
1013
- {
1014
- return m_buf_ofs;
1015
- }
1016
- };
1017
-
1018
- bool compress_image_to_jpeg_file_in_memory(void *pDstBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params)
1019
- {
1020
- if ((!pDstBuf) || (!buf_size))
1021
- return false;
1022
-
1023
- memory_stream dst_stream(pDstBuf, buf_size);
1024
-
1025
- buf_size = 0;
1026
-
1027
- jpge::jpeg_encoder dst_image;
1028
- if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params))
1029
- return false;
1030
-
1031
- for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++)
1032
- {
1033
- for (int64_t i = 0; i < height; i++)
1034
- {
1035
- const uint8* pScanline = pImage_data + i * width * num_channels;
1036
- if (!dst_image.process_scanline(pScanline))
1037
- return false;
1038
- }
1039
- if (!dst_image.process_scanline(NULL))
1040
- return false;
1041
- }
1042
-
1043
- dst_image.deinit();
1044
-
1045
- buf_size = dst_stream.get_size();
1046
- return true;
1047
- }
1048
-
1049
- } // namespace jpge
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/decorators.py DELETED
@@ -1,565 +0,0 @@
1
- import inspect
2
- import types
3
- import typing as t
4
- from functools import update_wrapper
5
- from gettext import gettext as _
6
-
7
- from .core import Argument
8
- from .core import Command
9
- from .core import Context
10
- from .core import Group
11
- from .core import Option
12
- from .core import Parameter
13
- from .globals import get_current_context
14
- from .utils import echo
15
-
16
- if t.TYPE_CHECKING:
17
- import typing_extensions as te
18
-
19
- P = te.ParamSpec("P")
20
-
21
- R = t.TypeVar("R")
22
- T = t.TypeVar("T")
23
- _AnyCallable = t.Callable[..., t.Any]
24
- FC = t.TypeVar("FC", bound=t.Union[_AnyCallable, Command])
25
-
26
-
27
- def pass_context(f: "t.Callable[te.Concatenate[Context, P], R]") -> "t.Callable[P, R]":
28
- """Marks a callback as wanting to receive the current context
29
- object as first argument.
30
- """
31
-
32
- def new_func(*args: "P.args", **kwargs: "P.kwargs") -> "R":
33
- return f(get_current_context(), *args, **kwargs)
34
-
35
- return update_wrapper(new_func, f)
36
-
37
-
38
- def pass_obj(f: "t.Callable[te.Concatenate[t.Any, P], R]") -> "t.Callable[P, R]":
39
- """Similar to :func:`pass_context`, but only pass the object on the
40
- context onwards (:attr:`Context.obj`). This is useful if that object
41
- represents the state of a nested system.
42
- """
43
-
44
- def new_func(*args: "P.args", **kwargs: "P.kwargs") -> "R":
45
- return f(get_current_context().obj, *args, **kwargs)
46
-
47
- return update_wrapper(new_func, f)
48
-
49
-
50
- def make_pass_decorator(
51
- object_type: t.Type[T], ensure: bool = False
52
- ) -> t.Callable[["t.Callable[te.Concatenate[T, P], R]"], "t.Callable[P, R]"]:
53
- """Given an object type this creates a decorator that will work
54
- similar to :func:`pass_obj` but instead of passing the object of the
55
- current context, it will find the innermost context of type
56
- :func:`object_type`.
57
-
58
- This generates a decorator that works roughly like this::
59
-
60
- from functools import update_wrapper
61
-
62
- def decorator(f):
63
- @pass_context
64
- def new_func(ctx, *args, **kwargs):
65
- obj = ctx.find_object(object_type)
66
- return ctx.invoke(f, obj, *args, **kwargs)
67
- return update_wrapper(new_func, f)
68
- return decorator
69
-
70
- :param object_type: the type of the object to pass.
71
- :param ensure: if set to `True`, a new object will be created and
72
- remembered on the context if it's not there yet.
73
- """
74
-
75
- def decorator(f: "t.Callable[te.Concatenate[T, P], R]") -> "t.Callable[P, R]":
76
- def new_func(*args: "P.args", **kwargs: "P.kwargs") -> "R":
77
- ctx = get_current_context()
78
-
79
- obj: t.Optional[T]
80
- if ensure:
81
- obj = ctx.ensure_object(object_type)
82
- else:
83
- obj = ctx.find_object(object_type)
84
-
85
- if obj is None:
86
- raise RuntimeError(
87
- "Managed to invoke callback without a context"
88
- f" object of type {object_type.__name__!r}"
89
- " existing."
90
- )
91
-
92
- return ctx.invoke(f, obj, *args, **kwargs)
93
-
94
- return update_wrapper(new_func, f)
95
-
96
- return decorator # type: ignore[return-value]
97
-
98
-
99
- def pass_meta_key(
100
- key: str, *, doc_description: t.Optional[str] = None
101
- ) -> "t.Callable[[t.Callable[te.Concatenate[t.Any, P], R]], t.Callable[P, R]]":
102
- """Create a decorator that passes a key from
103
- :attr:`click.Context.meta` as the first argument to the decorated
104
- function.
105
-
106
- :param key: Key in ``Context.meta`` to pass.
107
- :param doc_description: Description of the object being passed,
108
- inserted into the decorator's docstring. Defaults to "the 'key'
109
- key from Context.meta".
110
-
111
- .. versionadded:: 8.0
112
- """
113
-
114
- def decorator(f: "t.Callable[te.Concatenate[t.Any, P], R]") -> "t.Callable[P, R]":
115
- def new_func(*args: "P.args", **kwargs: "P.kwargs") -> R:
116
- ctx = get_current_context()
117
- obj = ctx.meta[key]
118
- return ctx.invoke(f, obj, *args, **kwargs)
119
-
120
- return update_wrapper(new_func, f)
121
-
122
- if doc_description is None:
123
- doc_description = f"the {key!r} key from :attr:`click.Context.meta`"
124
-
125
- decorator.__doc__ = (
126
- f"Decorator that passes {doc_description} as the first argument"
127
- " to the decorated function."
128
- )
129
- return decorator # type: ignore[return-value]
130
-
131
-
132
- CmdType = t.TypeVar("CmdType", bound=Command)
133
-
134
-
135
- # variant: no call, directly as decorator for a function.
136
- @t.overload
137
- def command(name: _AnyCallable) -> Command:
138
- ...
139
-
140
-
141
- # variant: with positional name and with positional or keyword cls argument:
142
- # @command(namearg, CommandCls, ...) or @command(namearg, cls=CommandCls, ...)
143
- @t.overload
144
- def command(
145
- name: t.Optional[str],
146
- cls: t.Type[CmdType],
147
- **attrs: t.Any,
148
- ) -> t.Callable[[_AnyCallable], CmdType]:
149
- ...
150
-
151
-
152
- # variant: name omitted, cls _must_ be a keyword argument, @command(cls=CommandCls, ...)
153
- @t.overload
154
- def command(
155
- name: None = None,
156
- *,
157
- cls: t.Type[CmdType],
158
- **attrs: t.Any,
159
- ) -> t.Callable[[_AnyCallable], CmdType]:
160
- ...
161
-
162
-
163
- # variant: with optional string name, no cls argument provided.
164
- @t.overload
165
- def command(
166
- name: t.Optional[str] = ..., cls: None = None, **attrs: t.Any
167
- ) -> t.Callable[[_AnyCallable], Command]:
168
- ...
169
-
170
-
171
- def command(
172
- name: t.Union[t.Optional[str], _AnyCallable] = None,
173
- cls: t.Optional[t.Type[CmdType]] = None,
174
- **attrs: t.Any,
175
- ) -> t.Union[Command, t.Callable[[_AnyCallable], t.Union[Command, CmdType]]]:
176
- r"""Creates a new :class:`Command` and uses the decorated function as
177
- callback. This will also automatically attach all decorated
178
- :func:`option`\s and :func:`argument`\s as parameters to the command.
179
-
180
- The name of the command defaults to the name of the function with
181
- underscores replaced by dashes. If you want to change that, you can
182
- pass the intended name as the first argument.
183
-
184
- All keyword arguments are forwarded to the underlying command class.
185
- For the ``params`` argument, any decorated params are appended to
186
- the end of the list.
187
-
188
- Once decorated the function turns into a :class:`Command` instance
189
- that can be invoked as a command line utility or be attached to a
190
- command :class:`Group`.
191
-
192
- :param name: the name of the command. This defaults to the function
193
- name with underscores replaced by dashes.
194
- :param cls: the command class to instantiate. This defaults to
195
- :class:`Command`.
196
-
197
- .. versionchanged:: 8.1
198
- This decorator can be applied without parentheses.
199
-
200
- .. versionchanged:: 8.1
201
- The ``params`` argument can be used. Decorated params are
202
- appended to the end of the list.
203
- """
204
-
205
- func: t.Optional[t.Callable[[_AnyCallable], t.Any]] = None
206
-
207
- if callable(name):
208
- func = name
209
- name = None
210
- assert cls is None, "Use 'command(cls=cls)(callable)' to specify a class."
211
- assert not attrs, "Use 'command(**kwargs)(callable)' to provide arguments."
212
-
213
- if cls is None:
214
- cls = t.cast(t.Type[CmdType], Command)
215
-
216
- def decorator(f: _AnyCallable) -> CmdType:
217
- if isinstance(f, Command):
218
- raise TypeError("Attempted to convert a callback into a command twice.")
219
-
220
- attr_params = attrs.pop("params", None)
221
- params = attr_params if attr_params is not None else []
222
-
223
- try:
224
- decorator_params = f.__click_params__ # type: ignore
225
- except AttributeError:
226
- pass
227
- else:
228
- del f.__click_params__ # type: ignore
229
- params.extend(reversed(decorator_params))
230
-
231
- if attrs.get("help") is None:
232
- attrs["help"] = f.__doc__
233
-
234
- if t.TYPE_CHECKING:
235
- assert cls is not None
236
- assert not callable(name)
237
-
238
- cmd = cls(
239
- name=name or f.__name__.lower().replace("_", "-"),
240
- callback=f,
241
- params=params,
242
- **attrs,
243
- )
244
- cmd.__doc__ = f.__doc__
245
- return cmd
246
-
247
- if func is not None:
248
- return decorator(func)
249
-
250
- return decorator
251
-
252
-
253
- GrpType = t.TypeVar("GrpType", bound=Group)
254
-
255
-
256
- # variant: no call, directly as decorator for a function.
257
- @t.overload
258
- def group(name: _AnyCallable) -> Group:
259
- ...
260
-
261
-
262
- # variant: with positional name and with positional or keyword cls argument:
263
- # @group(namearg, GroupCls, ...) or @group(namearg, cls=GroupCls, ...)
264
- @t.overload
265
- def group(
266
- name: t.Optional[str],
267
- cls: t.Type[GrpType],
268
- **attrs: t.Any,
269
- ) -> t.Callable[[_AnyCallable], GrpType]:
270
- ...
271
-
272
-
273
- # variant: name omitted, cls _must_ be a keyword argument, @group(cmd=GroupCls, ...)
274
- # The _correct_ way to spell this overload is to use keyword-only argument syntax:
275
- # def group(*, cls: t.Type[GrpType], **attrs: t.Any) -> ...
276
- # However, mypy thinks this doesn't fit the overloaded function. Pyright does
277
- # accept that spelling, and the following work-around makes pyright issue a
278
- # warning that GrpType could be left unsolved, but mypy sees it as fine. *shrug*
279
- @t.overload
280
- def group(
281
- name: None = None,
282
- cls: t.Type[GrpType] = ...,
283
- **attrs: t.Any,
284
- ) -> t.Callable[[_AnyCallable], GrpType]:
285
- ...
286
-
287
-
288
- # variant: with optional string name, no cls argument provided.
289
- @t.overload
290
- def group(
291
- name: t.Optional[str] = ..., cls: None = None, **attrs: t.Any
292
- ) -> t.Callable[[_AnyCallable], Group]:
293
- ...
294
-
295
-
296
- def group(
297
- name: t.Union[str, _AnyCallable, None] = None,
298
- cls: t.Optional[t.Type[GrpType]] = None,
299
- **attrs: t.Any,
300
- ) -> t.Union[Group, t.Callable[[_AnyCallable], t.Union[Group, GrpType]]]:
301
- """Creates a new :class:`Group` with a function as callback. This
302
- works otherwise the same as :func:`command` just that the `cls`
303
- parameter is set to :class:`Group`.
304
-
305
- .. versionchanged:: 8.1
306
- This decorator can be applied without parentheses.
307
- """
308
- if cls is None:
309
- cls = t.cast(t.Type[GrpType], Group)
310
-
311
- if callable(name):
312
- return command(cls=cls, **attrs)(name)
313
-
314
- return command(name, cls, **attrs)
315
-
316
-
317
- def _param_memo(f: t.Callable[..., t.Any], param: Parameter) -> None:
318
- if isinstance(f, Command):
319
- f.params.append(param)
320
- else:
321
- if not hasattr(f, "__click_params__"):
322
- f.__click_params__ = [] # type: ignore
323
-
324
- f.__click_params__.append(param) # type: ignore
325
-
326
-
327
- def argument(
328
- *param_decls: str, cls: t.Optional[t.Type[Argument]] = None, **attrs: t.Any
329
- ) -> t.Callable[[FC], FC]:
330
- """Attaches an argument to the command. All positional arguments are
331
- passed as parameter declarations to :class:`Argument`; all keyword
332
- arguments are forwarded unchanged (except ``cls``).
333
- This is equivalent to creating an :class:`Argument` instance manually
334
- and attaching it to the :attr:`Command.params` list.
335
-
336
- For the default argument class, refer to :class:`Argument` and
337
- :class:`Parameter` for descriptions of parameters.
338
-
339
- :param cls: the argument class to instantiate. This defaults to
340
- :class:`Argument`.
341
- :param param_decls: Passed as positional arguments to the constructor of
342
- ``cls``.
343
- :param attrs: Passed as keyword arguments to the constructor of ``cls``.
344
- """
345
- if cls is None:
346
- cls = Argument
347
-
348
- def decorator(f: FC) -> FC:
349
- _param_memo(f, cls(param_decls, **attrs))
350
- return f
351
-
352
- return decorator
353
-
354
-
355
- def option(
356
- *param_decls: str, cls: t.Optional[t.Type[Option]] = None, **attrs: t.Any
357
- ) -> t.Callable[[FC], FC]:
358
- """Attaches an option to the command. All positional arguments are
359
- passed as parameter declarations to :class:`Option`; all keyword
360
- arguments are forwarded unchanged (except ``cls``).
361
- This is equivalent to creating an :class:`Option` instance manually
362
- and attaching it to the :attr:`Command.params` list.
363
-
364
- For the default option class, refer to :class:`Option` and
365
- :class:`Parameter` for descriptions of parameters.
366
-
367
- :param cls: the option class to instantiate. This defaults to
368
- :class:`Option`.
369
- :param param_decls: Passed as positional arguments to the constructor of
370
- ``cls``.
371
- :param attrs: Passed as keyword arguments to the constructor of ``cls``.
372
- """
373
- if cls is None:
374
- cls = Option
375
-
376
- def decorator(f: FC) -> FC:
377
- _param_memo(f, cls(param_decls, **attrs))
378
- return f
379
-
380
- return decorator
381
-
382
-
383
- def confirmation_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]:
384
- """Add a ``--yes`` option which shows a prompt before continuing if
385
- not passed. If the prompt is declined, the program will exit.
386
-
387
- :param param_decls: One or more option names. Defaults to the single
388
- value ``"--yes"``.
389
- :param kwargs: Extra arguments are passed to :func:`option`.
390
- """
391
-
392
- def callback(ctx: Context, param: Parameter, value: bool) -> None:
393
- if not value:
394
- ctx.abort()
395
-
396
- if not param_decls:
397
- param_decls = ("--yes",)
398
-
399
- kwargs.setdefault("is_flag", True)
400
- kwargs.setdefault("callback", callback)
401
- kwargs.setdefault("expose_value", False)
402
- kwargs.setdefault("prompt", "Do you want to continue?")
403
- kwargs.setdefault("help", "Confirm the action without prompting.")
404
- return option(*param_decls, **kwargs)
405
-
406
-
407
- def password_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]:
408
- """Add a ``--password`` option which prompts for a password, hiding
409
- input and asking to enter the value again for confirmation.
410
-
411
- :param param_decls: One or more option names. Defaults to the single
412
- value ``"--password"``.
413
- :param kwargs: Extra arguments are passed to :func:`option`.
414
- """
415
- if not param_decls:
416
- param_decls = ("--password",)
417
-
418
- kwargs.setdefault("prompt", True)
419
- kwargs.setdefault("confirmation_prompt", True)
420
- kwargs.setdefault("hide_input", True)
421
- return option(*param_decls, **kwargs)
422
-
423
-
424
- def version_option(
425
- version: t.Optional[str] = None,
426
- *param_decls: str,
427
- package_name: t.Optional[str] = None,
428
- prog_name: t.Optional[str] = None,
429
- message: t.Optional[str] = None,
430
- **kwargs: t.Any,
431
- ) -> t.Callable[[FC], FC]:
432
- """Add a ``--version`` option which immediately prints the version
433
- number and exits the program.
434
-
435
- If ``version`` is not provided, Click will try to detect it using
436
- :func:`importlib.metadata.version` to get the version for the
437
- ``package_name``. On Python < 3.8, the ``importlib_metadata``
438
- backport must be installed.
439
-
440
- If ``package_name`` is not provided, Click will try to detect it by
441
- inspecting the stack frames. This will be used to detect the
442
- version, so it must match the name of the installed package.
443
-
444
- :param version: The version number to show. If not provided, Click
445
- will try to detect it.
446
- :param param_decls: One or more option names. Defaults to the single
447
- value ``"--version"``.
448
- :param package_name: The package name to detect the version from. If
449
- not provided, Click will try to detect it.
450
- :param prog_name: The name of the CLI to show in the message. If not
451
- provided, it will be detected from the command.
452
- :param message: The message to show. The values ``%(prog)s``,
453
- ``%(package)s``, and ``%(version)s`` are available. Defaults to
454
- ``"%(prog)s, version %(version)s"``.
455
- :param kwargs: Extra arguments are passed to :func:`option`.
456
- :raise RuntimeError: ``version`` could not be detected.
457
-
458
- .. versionchanged:: 8.0
459
- Add the ``package_name`` parameter, and the ``%(package)s``
460
- value for messages.
461
-
462
- .. versionchanged:: 8.0
463
- Use :mod:`importlib.metadata` instead of ``pkg_resources``. The
464
- version is detected based on the package name, not the entry
465
- point name. The Python package name must match the installed
466
- package name, or be passed with ``package_name=``.
467
- """
468
- if message is None:
469
- message = _("%(prog)s, version %(version)s")
470
-
471
- if version is None and package_name is None:
472
- frame = inspect.currentframe()
473
- f_back = frame.f_back if frame is not None else None
474
- f_globals = f_back.f_globals if f_back is not None else None
475
- # break reference cycle
476
- # https://docs.python.org/3/library/inspect.html#the-interpreter-stack
477
- del frame
478
-
479
- if f_globals is not None:
480
- package_name = f_globals.get("__name__")
481
-
482
- if package_name == "__main__":
483
- package_name = f_globals.get("__package__")
484
-
485
- if package_name:
486
- package_name = package_name.partition(".")[0]
487
-
488
- def callback(ctx: Context, param: Parameter, value: bool) -> None:
489
- if not value or ctx.resilient_parsing:
490
- return
491
-
492
- nonlocal prog_name
493
- nonlocal version
494
-
495
- if prog_name is None:
496
- prog_name = ctx.find_root().info_name
497
-
498
- if version is None and package_name is not None:
499
- metadata: t.Optional[types.ModuleType]
500
-
501
- try:
502
- from importlib import metadata # type: ignore
503
- except ImportError:
504
- # Python < 3.8
505
- import importlib_metadata as metadata # type: ignore
506
-
507
- try:
508
- version = metadata.version(package_name) # type: ignore
509
- except metadata.PackageNotFoundError: # type: ignore
510
- raise RuntimeError(
511
- f"{package_name!r} is not installed. Try passing"
512
- " 'package_name' instead."
513
- ) from None
514
-
515
- if version is None:
516
- raise RuntimeError(
517
- f"Could not determine the version for {package_name!r} automatically."
518
- )
519
-
520
- echo(
521
- message % {"prog": prog_name, "package": package_name, "version": version},
522
- color=ctx.color,
523
- )
524
- ctx.exit()
525
-
526
- if not param_decls:
527
- param_decls = ("--version",)
528
-
529
- kwargs.setdefault("is_flag", True)
530
- kwargs.setdefault("expose_value", False)
531
- kwargs.setdefault("is_eager", True)
532
- kwargs.setdefault("help", _("Show the version and exit."))
533
- kwargs["callback"] = callback
534
- return option(*param_decls, **kwargs)
535
-
536
-
537
- def help_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]:
538
- """Add a ``--help`` option which immediately prints the help page
539
- and exits the program.
540
-
541
- This is usually unnecessary, as the ``--help`` option is added to
542
- each command automatically unless ``add_help_option=False`` is
543
- passed.
544
-
545
- :param param_decls: One or more option names. Defaults to the single
546
- value ``"--help"``.
547
- :param kwargs: Extra arguments are passed to :func:`option`.
548
- """
549
-
550
- def callback(ctx: Context, param: Parameter, value: bool) -> None:
551
- if not value or ctx.resilient_parsing:
552
- return
553
-
554
- echo(ctx.get_help(), color=ctx.color)
555
- ctx.exit()
556
-
557
- if not param_decls:
558
- param_decls = ("--help",)
559
-
560
- kwargs.setdefault("is_flag", True)
561
- kwargs.setdefault("expose_value", False)
562
- kwargs.setdefault("is_eager", True)
563
- kwargs.setdefault("help", _("Show this message and exit."))
564
- kwargs["callback"] = callback
565
- return option(*param_decls, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/memory.py DELETED
@@ -1,293 +0,0 @@
1
- from __future__ import absolute_import, annotations, division, print_function
2
-
3
- import logging
4
- from datetime import datetime
5
- from errno import ENOTEMPTY
6
- from io import BytesIO
7
- from typing import Any, ClassVar
8
-
9
- from fsspec import AbstractFileSystem
10
-
11
- logger = logging.Logger("fsspec.memoryfs")
12
-
13
-
14
- class MemoryFileSystem(AbstractFileSystem):
15
- """A filesystem based on a dict of BytesIO objects
16
-
17
- This is a global filesystem so instances of this class all point to the same
18
- in memory filesystem.
19
- """
20
-
21
- store: ClassVar[dict[str, Any]] = {} # global, do not overwrite!
22
- pseudo_dirs = [""] # global, do not overwrite!
23
- protocol = "memory"
24
- root_marker = "/"
25
-
26
- @classmethod
27
- def _strip_protocol(cls, path):
28
- if path.startswith("memory://"):
29
- path = path[len("memory://") :]
30
- if "::" in path or "://" in path:
31
- return path.rstrip("/")
32
- path = path.lstrip("/").rstrip("/")
33
- return "/" + path if path else ""
34
-
35
- def ls(self, path, detail=True, **kwargs):
36
- path = self._strip_protocol(path)
37
- if path in self.store:
38
- # there is a key with this exact name
39
- if not detail:
40
- return [path]
41
- return [
42
- {
43
- "name": path,
44
- "size": self.store[path].size,
45
- "type": "file",
46
- "created": self.store[path].created.timestamp(),
47
- }
48
- ]
49
- paths = set()
50
- starter = path + "/"
51
- out = []
52
- for p2 in tuple(self.store):
53
- if p2.startswith(starter):
54
- if "/" not in p2[len(starter) :]:
55
- # exact child
56
- out.append(
57
- {
58
- "name": p2,
59
- "size": self.store[p2].size,
60
- "type": "file",
61
- "created": self.store[p2].created.timestamp(),
62
- }
63
- )
64
- elif len(p2) > len(starter):
65
- # implied child directory
66
- ppath = starter + p2[len(starter) :].split("/", 1)[0]
67
- if ppath not in paths:
68
- out = out or []
69
- out.append(
70
- {
71
- "name": ppath,
72
- "size": 0,
73
- "type": "directory",
74
- }
75
- )
76
- paths.add(ppath)
77
- for p2 in self.pseudo_dirs:
78
- if p2.startswith(starter):
79
- if "/" not in p2[len(starter) :]:
80
- # exact child pdir
81
- if p2 not in paths:
82
- out.append({"name": p2, "size": 0, "type": "directory"})
83
- paths.add(p2)
84
- else:
85
- # directory implied by deeper pdir
86
- ppath = starter + p2[len(starter) :].split("/", 1)[0]
87
- if ppath not in paths:
88
- out.append({"name": ppath, "size": 0, "type": "directory"})
89
- paths.add(ppath)
90
- if not out:
91
- if path in self.pseudo_dirs:
92
- # empty dir
93
- return []
94
- raise FileNotFoundError(path)
95
- if detail:
96
- return out
97
- return sorted([f["name"] for f in out])
98
-
99
- def mkdir(self, path, create_parents=True, **kwargs):
100
- path = self._strip_protocol(path)
101
- if path in self.store or path in self.pseudo_dirs:
102
- raise FileExistsError(path)
103
- if self._parent(path).strip("/") and self.isfile(self._parent(path)):
104
- raise NotADirectoryError(self._parent(path))
105
- if create_parents and self._parent(path).strip("/"):
106
- try:
107
- self.mkdir(self._parent(path), create_parents, **kwargs)
108
- except FileExistsError:
109
- pass
110
- if path and path not in self.pseudo_dirs:
111
- self.pseudo_dirs.append(path)
112
-
113
- def makedirs(self, path, exist_ok=False):
114
- try:
115
- self.mkdir(path, create_parents=True)
116
- except FileExistsError:
117
- if not exist_ok:
118
- raise
119
-
120
- def pipe_file(self, path, value, **kwargs):
121
- """Set the bytes of given file
122
-
123
- Avoids copies of the data if possible
124
- """
125
- self.open(path, "wb", data=value)
126
-
127
- def rmdir(self, path):
128
- path = self._strip_protocol(path)
129
- if path == "":
130
- # silently avoid deleting FS root
131
- return
132
- if path in self.pseudo_dirs:
133
- if not self.ls(path):
134
- self.pseudo_dirs.remove(path)
135
- else:
136
- raise OSError(ENOTEMPTY, "Directory not empty", path)
137
- else:
138
- raise FileNotFoundError(path)
139
-
140
- def exists(self, path, **kwargs):
141
- path = self._strip_protocol(path)
142
- return path in self.store or path in self.pseudo_dirs
143
-
144
- def info(self, path, **kwargs):
145
- path = self._strip_protocol(path)
146
- if path in self.pseudo_dirs or any(
147
- p.startswith(path + "/") for p in list(self.store) + self.pseudo_dirs
148
- ):
149
- return {
150
- "name": path,
151
- "size": 0,
152
- "type": "directory",
153
- }
154
- elif path in self.store:
155
- filelike = self.store[path]
156
- return {
157
- "name": path,
158
- "size": filelike.size,
159
- "type": "file",
160
- "created": getattr(filelike, "created", None),
161
- }
162
- else:
163
- raise FileNotFoundError(path)
164
-
165
- def _open(
166
- self,
167
- path,
168
- mode="rb",
169
- block_size=None,
170
- autocommit=True,
171
- cache_options=None,
172
- **kwargs,
173
- ):
174
- path = self._strip_protocol(path)
175
- if path in self.pseudo_dirs:
176
- raise IsADirectoryError(path)
177
- parent = path
178
- while len(parent) > 1:
179
- parent = self._parent(parent)
180
- if self.isfile(parent):
181
- raise FileExistsError(parent)
182
- if mode in ["rb", "ab", "rb+"]:
183
- if path in self.store:
184
- f = self.store[path]
185
- if mode == "ab":
186
- # position at the end of file
187
- f.seek(0, 2)
188
- else:
189
- # position at the beginning of file
190
- f.seek(0)
191
- return f
192
- else:
193
- raise FileNotFoundError(path)
194
- if mode == "wb":
195
- m = MemoryFile(self, path, kwargs.get("data"))
196
- if not self._intrans:
197
- m.commit()
198
- return m
199
-
200
- def cp_file(self, path1, path2, **kwargs):
201
- path1 = self._strip_protocol(path1)
202
- path2 = self._strip_protocol(path2)
203
- if self.isfile(path1):
204
- self.store[path2] = MemoryFile(
205
- self, path2, self.store[path1].getvalue()
206
- ) # implicit copy
207
- elif self.isdir(path1):
208
- if path2 not in self.pseudo_dirs:
209
- self.pseudo_dirs.append(path2)
210
- else:
211
- raise FileNotFoundError(path1)
212
-
213
- def cat_file(self, path, start=None, end=None, **kwargs):
214
- path = self._strip_protocol(path)
215
- try:
216
- return bytes(self.store[path].getbuffer()[start:end])
217
- except KeyError:
218
- raise FileNotFoundError(path)
219
-
220
- def _rm(self, path):
221
- path = self._strip_protocol(path)
222
- try:
223
- del self.store[path]
224
- except KeyError as e:
225
- raise FileNotFoundError(path) from e
226
-
227
- def modified(self, path):
228
- path = self._strip_protocol(path)
229
- try:
230
- return self.store[path].modified
231
- except KeyError:
232
- raise FileNotFoundError(path)
233
-
234
- def created(self, path):
235
- path = self._strip_protocol(path)
236
- try:
237
- return self.store[path].created
238
- except KeyError:
239
- raise FileNotFoundError(path)
240
-
241
- def rm(self, path, recursive=False, maxdepth=None):
242
- if isinstance(path, str):
243
- path = self._strip_protocol(path)
244
- else:
245
- path = [self._strip_protocol(p) for p in path]
246
- paths = self.expand_path(path, recursive=recursive, maxdepth=maxdepth)
247
- for p in reversed(paths):
248
- # If the expanded path doesn't exist, it is only because the expanded
249
- # path was a directory that does not exist in self.pseudo_dirs. This
250
- # is possible if you directly create files without making the
251
- # directories first.
252
- if not self.exists(p):
253
- continue
254
- if self.isfile(p):
255
- self.rm_file(p)
256
- else:
257
- self.rmdir(p)
258
-
259
-
260
- class MemoryFile(BytesIO):
261
- """A BytesIO which can't close and works as a context manager
262
-
263
- Can initialise with data. Each path should only be active once at any moment.
264
-
265
- No need to provide fs, path if auto-committing (default)
266
- """
267
-
268
- def __init__(self, fs=None, path=None, data=None):
269
- logger.debug("open file %s", path)
270
- self.fs = fs
271
- self.path = path
272
- self.created = datetime.utcnow()
273
- self.modified = datetime.utcnow()
274
- if data:
275
- super().__init__(data)
276
- self.seek(0)
277
-
278
- @property
279
- def size(self):
280
- return self.getbuffer().nbytes
281
-
282
- def __enter__(self):
283
- return self
284
-
285
- def close(self):
286
- pass
287
-
288
- def discard(self):
289
- pass
290
-
291
- def commit(self):
292
- self.fs.store[self.path] = self
293
- self.modified = datetime.utcnow()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/__init__.py DELETED
@@ -1,99 +0,0 @@
1
- # flake8: noqa
2
- #!/usr/bin/env python
3
- # coding=utf-8
4
- # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
5
- #
6
- # Licensed under the Apache License, Version 2.0 (the "License");
7
- # you may not use this file except in compliance with the License.
8
- # You may obtain a copy of the License at
9
- #
10
- # http://www.apache.org/licenses/LICENSE-2.0
11
- #
12
- # Unless required by applicable law or agreed to in writing, software
13
- # distributed under the License is distributed on an "AS IS" BASIS,
14
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
- # See the License for the specific language governing permissions and
16
- # limitations under the License
17
-
18
- from . import tqdm as _tqdm # _tqdm is the module
19
- from ._cache_assets import cached_assets_path
20
- from ._cache_manager import (
21
- CachedFileInfo,
22
- CachedRepoInfo,
23
- CachedRevisionInfo,
24
- CacheNotFound,
25
- CorruptedCacheException,
26
- DeleteCacheStrategy,
27
- HFCacheInfo,
28
- scan_cache_dir,
29
- )
30
- from ._chunk_utils import chunk_iterable
31
- from ._datetime import parse_datetime
32
- from ._errors import (
33
- BadRequestError,
34
- EntryNotFoundError,
35
- GatedRepoError,
36
- HfHubHTTPError,
37
- LocalEntryNotFoundError,
38
- RepositoryNotFoundError,
39
- RevisionNotFoundError,
40
- hf_raise_for_status,
41
- )
42
- from ._fixes import SoftTemporaryDirectory, yaml_dump
43
- from ._git_credential import list_credential_helpers, set_git_credential, unset_git_credential
44
- from ._headers import build_hf_headers, get_token_to_send, LocalTokenNotFoundError
45
- from ._hf_folder import HfFolder
46
- from ._http import configure_http_backend, get_session, http_backoff
47
- from ._pagination import paginate
48
- from ._paths import filter_repo_objects, IGNORE_GIT_FOLDER_PATTERNS
49
- from ._experimental import experimental
50
- from ._runtime import (
51
- dump_environment_info,
52
- get_aiohttp_version,
53
- get_fastai_version,
54
- get_fastcore_version,
55
- get_gradio_version,
56
- get_graphviz_version,
57
- get_hf_hub_version,
58
- get_hf_transfer_version,
59
- get_jinja_version,
60
- get_numpy_version,
61
- get_pillow_version,
62
- get_pydantic_version,
63
- get_pydot_version,
64
- get_python_version,
65
- get_tensorboard_version,
66
- get_tf_version,
67
- get_torch_version,
68
- is_aiohttp_available,
69
- is_fastai_available,
70
- is_fastcore_available,
71
- is_numpy_available,
72
- is_google_colab,
73
- is_gradio_available,
74
- is_graphviz_available,
75
- is_hf_transfer_available,
76
- is_jinja_available,
77
- is_notebook,
78
- is_pillow_available,
79
- is_pydantic_available,
80
- is_pydot_available,
81
- is_tensorboard_available,
82
- is_tf_available,
83
- is_torch_available,
84
- )
85
- from ._subprocess import capture_output, run_interactive_subprocess, run_subprocess
86
- from ._validators import (
87
- HFValidationError,
88
- smoothly_deprecate_use_auth_token,
89
- validate_hf_hub_args,
90
- validate_repo_id,
91
- )
92
- from .tqdm import (
93
- are_progress_bars_disabled,
94
- disable_progress_bars,
95
- enable_progress_bars,
96
- tqdm,
97
- tqdm_stream_file,
98
- )
99
- from ._telemetry import send_telemetry
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DorisB/streamlit-app/pages/02_Recommendation_system.py DELETED
@@ -1,433 +0,0 @@
1
- import streamlit as st
2
- import pandas as pd
3
- import numpy as np
4
- from PIL import Image
5
- import matplotlib.pyplot as plt
6
- import glob
7
- from scipy.sparse import csr_matrix
8
- from sklearn.neighbors import NearestNeighbors
9
- import pickle
10
- import io
11
- import streamlit.components.v1 as stc
12
-
13
-
14
-
15
-
16
-
17
-
18
- def main():
19
-
20
- st.set_page_config(layout="wide", initial_sidebar_state='expanded')
21
- st.image("images/logo-recom2.png", width=100)
22
- with open('style.css') as f:
23
- st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
24
-
25
- hide_menu = """
26
- <style>
27
- #MainMenu {
28
- visibility:visible;
29
- }
30
-
31
- footer{
32
- visibility:visible;
33
- }
34
- footer:after {
35
- content: 'Recom © 2022 - Doris BAILLARD';
36
- display: block;
37
- position: relative;
38
- color:blue;
39
-
40
- }
41
- </style>
42
- """
43
-
44
- st.markdown(hide_menu, unsafe_allow_html=True)
45
-
46
-
47
- sidebar_header = '''This is a demo of Recom solution version 1.0.0. This demo gathers the main options. Please give it a try :'''
48
-
49
- page_options = ["Recommendations base on reviews",
50
- "Recommendations based on product similarity",
51
- "Generate email"]
52
-
53
- st.sidebar.info(sidebar_header)
54
-
55
-
56
-
57
- page_selection = st.sidebar.radio("Try", page_options)
58
-
59
-
60
- #########################################################################################
61
- if page_selection == "Recommendations base on reviews":
62
- pid_to_idx = pd.read_pickle("data/pid_to_idx.pkl")
63
- idx_to_pid = pd.read_pickle("data/idx_to_pid.pkl")
64
-
65
-
66
- products = pd.read_pickle("data/products.pkl")
67
- lightfm_similarity = pd.read_pickle("data/lightfm-similarity.pkl")
68
- items_pivot=pd.read_pickle("data/items_pivot.pkl")
69
- items_sparse = csr_matrix(items_pivot)
70
- model = NearestNeighbors(algorithm="brute")
71
- model.fit(items_sparse)
72
-
73
-
74
- def get_product_name(pid, product):
75
- try:
76
- name = products.loc[products.product_ids == pid].titles.values[0]
77
- except:
78
- name = "Unknown"
79
- return name
80
-
81
- def get_product_id(name):
82
- try:
83
- product_id = products.loc[products.titles == name].product_ids.values[0]
84
- except:
85
- product_id = "Unknown"
86
- return product_id
87
-
88
-
89
- def get_sim_scores(pid):
90
- idx = pid_to_idx[pid]
91
- sims = lightfm_similarity[idx]
92
- return sims
93
-
94
-
95
- def get_ranked_recos(sims):
96
- recos = []
97
-
98
- for idx in np.argsort(-sims):
99
- pid = idx_to_pid[idx]
100
- name = get_product_name(pid, products)
101
- score = sims[idx]
102
- recos.append((name,pid))
103
- return recos
104
-
105
-
106
-
107
-
108
- st.markdown('<p style="color:darkblue;font-size:160%">Products Recommender System</p>', unsafe_allow_html=True)
109
- st.markdown('This option will allow you to publish on your site recommendations of products "appreciated by other customers". These recommendations are based on the reviews of other customers. ')
110
- product_list = products['titles'].values
111
- selected_product = st.selectbox(
112
- "Type or select a product from the dropdown",
113
- product_list
114
- )
115
- product_id = get_product_id(selected_product)
116
- sims = get_sim_scores(product_id)
117
- result = get_ranked_recos(sims)[:5]
118
-
119
- recommendation_button = st.button('Show Recommendation')
120
- if recommendation_button:
121
- product_id = get_product_id(selected_product)
122
- sims = get_sim_scores(product_id)
123
- result = get_ranked_recos(sims)[:5]
124
-
125
-
126
- with st.form("reco1"):
127
- cols = st.columns((1, 3))
128
- cols[0].image('images/'+result[0][1]+'.jpg', width=200)
129
- cols[1].markdown('<p style="color:#3498db;font-size:160%">Product Name and ID: </p>', unsafe_allow_html=True)
130
- cols[1].markdown(result[0])
131
- cols[1].markdown("Url : yoursiteurl")
132
- cols[1].markdown('<p style="color:#3498db;font-size:160%">Description :</p>', unsafe_allow_html=True)
133
- cols[1].text('Lorem ipsum dolor sit amet, consectetur adipiscing elit,\nsed do eiusmod tempor incididunt \nut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco.')
134
- with cols[1]:
135
- submitted = st.form_submit_button('Deploy')
136
- submitted_all = st.form_submit_button('Deploy All')
137
-
138
-
139
-
140
-
141
- with st.form("reco2"):
142
- cols2 = st.columns((1, 3))
143
- cols2[0].image('images/'+result[1][1]+'.jpg', width=200)
144
- cols2[1].markdown('<p style="color:#3498db;font-size:160%">Product Name and ID:</p>', unsafe_allow_html=True)
145
- cols2[1].text(result[1])
146
- cols2[1].markdown("Url : yoursiteurl")
147
- cols2[1].markdown('<p style="color:#3498db;font-size:160%">Description :</p>', unsafe_allow_html=True)
148
- cols2[1].text('Lorem ipsum dolor sit amet, consectetur adipiscing elit,\nsed do eiusmod tempor incididunt \nut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco.')
149
- cols2[1].form_submit_button('Deploy')
150
- cols2[1].form_submit_button('Deploy All')
151
-
152
-
153
-
154
- with st.form("reco3"):
155
- cols3= st.columns((1, 3))
156
- cols3[0].image('images/'+result[2][1]+'.jpg', width=200)
157
- cols3[1].markdown('<p style="color:#3498db;font-size:160%">Product Name and ID:</p>', unsafe_allow_html=True)
158
- cols3[1].text(result[2])
159
- cols3[1].markdown("Url : yoursiteurl")
160
- cols3[1].markdown('<p style="color:#3498db;font-size:160%">Description :</p>', unsafe_allow_html=True)
161
- cols3[1].text('Lorem ipsum dolor sit amet, consectetur adipiscing elit,\nsed do eiusmod tempor incididunt \nut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco.')
162
- cols3[1].form_submit_button('Deploy')
163
- cols3[1].form_submit_button('Deploy All')
164
-
165
-
166
- with st.form("reco4"):
167
- cols4 = st.columns((1, 3))
168
- cols4[0].image('images/'+result[3][1]+'.jpg', width=200)
169
- cols4[1].markdown('<p style="color:#3498db;font-size:160%">Product Name and ID:</p>', unsafe_allow_html=True)
170
- cols4[1].text(result[3])
171
- cols4[1].markdown("Url : yoursiteurl")
172
- cols4[1].markdown('<p style="color:#3498db;font-size:160%">Description :</p>', unsafe_allow_html=True)
173
- cols4[1].text('Lorem ipsum dolor sit amet, consectetur adipiscing elit,\nsed do eiusmod tempor incididunt \nut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco.')
174
- cols4[1].form_submit_button('Deploy')
175
- cols4[1].form_submit_button('Deploy All')
176
-
177
-
178
-
179
- with st.form("reco5"):
180
- cols5 = st.columns((1, 3))
181
- cols5[0].image('images/'+result[4][1]+'.jpg', width=200)
182
- cols5[1].markdown('<p style="color:#3498db;font-size:160%">Product Name and ID:</p>', unsafe_allow_html=True)
183
- cols5[1].text(result[4])
184
- cols5[1].markdown("Url : yoursiteurl")
185
- cols5[1].markdown('<p style="color:#3498db;font-size:160%">Description :</p>', unsafe_allow_html=True)
186
- cols5[1].text('Lorem ipsum dolor sit amet, consectetur adipiscing elit,\nsed do eiusmod tempor incididunt \nut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco.')
187
- cols5[1].form_submit_button('Deploy')
188
- cols5[1].form_submit_button('Deploy All')
189
-
190
- st.markdown("<hr/>", unsafe_allow_html=True)
191
- st.success("Successfuly Deployed !")
192
- st.success("output 'Deploy' :")
193
- st.write("Clients also liked : ")
194
- st.image('images/'+result[4][1]+'.jpg', width=150)
195
- st.markdown(result[0][0])
196
-
197
- st.markdown("<hr/>", unsafe_allow_html=True)
198
- st.success("Successfuly Deployed !")
199
- st.success("output 'Deploy All' :")
200
-
201
- st.write("Clients also liked : ")
202
- cols_deploy = st.columns(5)
203
- cols_deploy[0].image('images/'+result[0][1]+'.jpg', width=150)
204
- cols_deploy[0].markdown(result[0][0])
205
- cols_deploy[1].image('images/'+result[1][1]+'.jpg', width=150)
206
- cols_deploy[1].markdown(result[1][0])
207
- cols_deploy[2].image('images/'+result[2][1]+'.jpg', width=150)
208
- cols_deploy[2].markdown(result[2][0])
209
- cols_deploy[3].image('images/'+result[3][1]+'.jpg', width=150)
210
- cols_deploy[3].markdown(result[3][0])
211
- cols_deploy[4].image('images/'+result[4][1]+'.jpg', width=150)
212
- cols_deploy[4].markdown(result[4][0])
213
-
214
- #########################################################################################
215
-
216
-
217
- if page_selection == "Recommendations based on product similarity":
218
- products = pd.read_pickle("data/products.pkl")
219
- feature_list = np.array(pickle.load(open('data/embeddings.pkl','rb')))
220
- filenames = pickle.load(open('data/filenamesdf.pkl','rb'))
221
-
222
-
223
- def recommend(features,feature_list):
224
- neighbors = NearestNeighbors(n_neighbors=6, algorithm='brute', metric='euclidean')
225
- neighbors.fit(feature_list)
226
-
227
- distances, indices = neighbors.kneighbors([features])
228
-
229
- return indices
230
-
231
- def reco2(indices):
232
- for i in range(len(indices)):
233
- cols = st.columns((1, 3))
234
- cols[1].markdown(filenames.index[indices[i]])
235
- cols[1].markdown(filenames.titles[indices[i]])
236
- cols[0].image(filenames.image_path[indices[i]].tolist())
237
-
238
-
239
- st.markdown("<p style='color:darkblue;font-size:160%'>Recommender System - image similarity</p>", unsafe_allow_html=True)
240
- st.markdown('This feature will enable you to publish on your site recommendations of "similar products". These recommendations are based on similarities between product images.')
241
- product_list = filenames['titles'].values
242
-
243
- selected_product = st.selectbox("Type or select a product from the dropdown",product_list)
244
-
245
- if st.button('Recommendation'):
246
-
247
- id = np.where(selected_product == product_list)
248
- id2 = int(id[0])
249
-
250
- result= recommend(feature_list[id2],feature_list)
251
- results = list(result)
252
-
253
- with st.form("reco1"):
254
- cols = st.columns((1, 3))
255
- cols[0].image(filenames.image_path[results[0]].tolist()[0], width=200)
256
- cols[1].markdown('<p style="color:#3498db;font-size:160%">Product Name and ID:</p>', unsafe_allow_html=True)
257
- cols[1].markdown(filenames.titles[results[0]].tolist()[0])
258
- cols[1].markdown('<p style="color:#3498db;font-size:160%">Description :</p>', unsafe_allow_html=True)
259
- cols[1].text('Lorem ipsum dolor sit amet, consectetur adipiscing elit,\nsed do eiusmod tempor incididunt \nut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco.')
260
- cols[1].form_submit_button('Deploy')
261
-
262
- with st.form("reco2"):
263
- cols = st.columns((1, 3))
264
- cols[0].image(filenames.image_path[results[0]].tolist()[1], width=200)
265
- cols[1].markdown('<h4 style="color:#3498db;">Product Name and ID:</h2>', unsafe_allow_html=True)
266
- cols[1].markdown(filenames.titles[results[0]].tolist()[1])
267
- cols[1].markdown('<p style="color:#3498db;font-size:160%">Description :</p>', unsafe_allow_html=True)
268
- cols[1].text('Lorem ipsum dolor sit amet, consectetur adipiscing elit,\nsed do eiusmod tempor incididunt \nut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco.')
269
- cols[1].form_submit_button('Deploy')
270
-
271
-
272
- with st.form("reco3"):
273
- cols = st.columns((1, 3))
274
- cols[0].image(filenames.image_path[results[0]].tolist()[2], width=200)
275
- cols[1].markdown('<p style="color:#3498db;font-size:160%">Product Name and ID:</p>', unsafe_allow_html=True)
276
- cols[1].markdown(filenames.titles[results[0]].tolist()[2])
277
- cols[1].markdown('<p style="color:#3498db;font-size:160%">Description :</p>', unsafe_allow_html=True)
278
- cols[1].text('Lorem ipsum dolor sit amet, consectetur adipiscing elit,\nsed do eiusmod tempor incididunt \nut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco.')
279
- cols[1].form_submit_button('Deploy')
280
-
281
- with st.form("reco4"):
282
- cols = st.columns((1, 3))
283
- cols[0].image(filenames.image_path[results[0]].tolist()[3], width=200)
284
- cols[1].markdown('<p style="color:#3498db;font-size:160%">Product Name and ID:</p>', unsafe_allow_html=True)
285
- cols[1].markdown(filenames.titles[results[0]].tolist()[3])
286
- cols[1].markdown('<p style="color:#3498db;font-size:160%">Description :</p>', unsafe_allow_html=True)
287
- cols[1].text('Lorem ipsum dolor sit amet, consectetur adipiscing elit,\nsed do eiusmod tempor incididunt \nut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco.')
288
- cols[1].form_submit_button('Deploy')
289
-
290
- with st.form("reco5"):
291
- cols = st.columns((1, 3))
292
- cols[0].image(filenames.image_path[results[0]].tolist()[4], width=200)
293
- cols[1].markdown('<p style="color:#3498db;font-size:160%">Product Name and ID:</p>', unsafe_allow_html=True)
294
- cols[1].markdown(filenames.titles[results[0]].tolist()[4])
295
- cols[1].markdown('<p style="color:#3498db;font-size:160%">Description :</p>', unsafe_allow_html=True)
296
- cols[1].text('Lorem ipsum dolor sit amet, consectetur adipiscing elit,\nsed do eiusmod tempor incididunt \nut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco.')
297
- cols[1].form_submit_button('Deploy')
298
- cols[1].form_submit_button('Deploy All')
299
-
300
- st.markdown("<hr/>", unsafe_allow_html=True)
301
- st.success("Successfuly Deployed !")
302
- st.success("output 'Deploy' :")
303
- st.write("Similar products : ")
304
- st.image(filenames.image_path[results[0]].tolist()[0], width=150)
305
- st.markdown(filenames.titles[results[0]].tolist()[4])
306
-
307
- st.markdown("<hr/>", unsafe_allow_html=True)
308
- st.success("Successfuly Deployed !")
309
- st.success("output 'Deploy All' :")
310
- st.write("Similar products : ")
311
- cols_deploy = st.columns(5)
312
- cols_deploy[0].image(filenames.image_path[results[0]].tolist()[0], width=150)
313
- cols_deploy[0].markdown(filenames.titles[results[0]].tolist()[0])
314
- cols_deploy[1].image(filenames.image_path[results[0]].tolist()[1], width=150)
315
- cols_deploy[1].markdown(filenames.titles[results[0]].tolist()[1])
316
- cols_deploy[2].image(filenames.image_path[results[0]].tolist()[2], width=150)
317
- cols_deploy[2].markdown(filenames.titles[results[0]].tolist()[2])
318
- cols_deploy[3].image(filenames.image_path[results[0]].tolist()[3], width=150)
319
- cols_deploy[3].markdown(filenames.titles[results[0]].tolist()[3])
320
- cols_deploy[4].image(filenames.image_path[results[0]].tolist()[4], width=150)
321
- cols_deploy[4].markdown(filenames.titles[results[0]].tolist()[4])
322
-
323
-
324
- #########################################################################################
325
-
326
-
327
- if page_selection == "Generate email":
328
- user_cart = pd.read_pickle('data/user_cart.pkl')
329
- name_list = user_cart['user_name'].values
330
- feature_list = np.array(pickle.load(open('data/embeddings.pkl','rb')))
331
- filenames = pickle.load(open('data/filenamesdf.pkl','rb'))
332
-
333
-
334
- def recommend(features,feature_list):
335
- neighbors = NearestNeighbors(n_neighbors=6, algorithm='brute', metric='euclidean')
336
- neighbors.fit(feature_list)
337
-
338
- distances, indices = neighbors.kneighbors([features])
339
-
340
- return indices
341
-
342
- def reco2(indices):
343
- for i in range(len(indices)):
344
- cols = st.columns((1, 3))
345
- cols[1].markdown(filenames.index[indices[i]])
346
- cols[1].markdown(filenames.titles[indices[i]])
347
- cols[0].image(filenames.image_path[indices[i]].tolist())
348
-
349
- st.markdown("<p style='color:darkblue;font-size:160%'>Mail Generator</p>", unsafe_allow_html=True)
350
- st.markdown('With this feature you will be able to generate automatic emails to ask for feedback from old and new customers.\nThis email will include a request for feedback on the last product purchased, \nas well as product recommendations.', unsafe_allow_html=True)
351
-
352
- selected_user = st.selectbox("Type or select a user from the dropdown",name_list)
353
-
354
-
355
- if st.button('generate mailing'):
356
-
357
- name = np.where(selected_user == user_cart['user_name'].values)
358
- name_id = int(name[0])
359
- reco_mailing2 = recommend(feature_list[name_id],feature_list)
360
- reco_mailing2 = list(reco_mailing2)
361
-
362
- st.markdown('>To: '+ selected_user+'', unsafe_allow_html=True)
363
- st.markdown("<hr/>", unsafe_allow_html=True)
364
-
365
- st.markdown("<p style='color:Black;font-size:160%'>Hello "+ selected_user+' :) !</p', unsafe_allow_html=True)
366
-
367
-
368
- st.markdown('We are constantly striving to improve, and we’d love to hear from you about the following your last command:')
369
-
370
- st.markdown('<p style="font-size:120%">Rate your purchased product:</p>', unsafe_allow_html=True)
371
- #[a scale from 1 to 5]
372
- cols= st.columns((3))
373
- cols[0].empty()
374
- cols[1].markdown(user_cart.titles[name_id])
375
- cols[1].image(user_cart.image_path[name_id], width=200)
376
- cols[1].markdown(":star::star::star::star::star:")
377
-
378
- cols[1].button('Rate this article on company.com')
379
- cols[2].empty()
380
-
381
- st.text('\n\n')
382
-
383
-
384
-
385
- st.markdown('Your feedback helps us improve and reach more great customers like you.')
386
-
387
-
388
- st.text('\n\n')
389
- st.markdown("<hr/>", unsafe_allow_html=True)
390
-
391
- st.markdown('<p style="font-size:120%">Discover products in the same category: </p>', unsafe_allow_html=True)
392
- st.text('\n\n')
393
- cols_mail = st.columns((7))
394
- cols_mail[0].empty()
395
- cols_mail[1].image(filenames.image_path[reco_mailing2[0]].tolist()[0], width=100)
396
- cols_mail[1].markdown(filenames.titles[reco_mailing2[0]].tolist()[0])
397
- cols_mail[1].markdown(":star::star::star::star::star:")
398
- cols_mail[2].image(filenames.image_path[reco_mailing2[0]].tolist()[1], width=100)
399
- cols_mail[2].markdown(filenames.titles[reco_mailing2[0]].tolist()[1])
400
- cols_mail[2].markdown(":star::star::star::star:")
401
- cols_mail[3].image(filenames.image_path[reco_mailing2[0]].tolist()[2], width=100)
402
- cols_mail[3].markdown(filenames.titles[reco_mailing2[0]].tolist()[2])
403
- cols_mail[3].markdown(":star::star::star::star::star:")
404
- cols_mail[4].image(filenames.image_path[reco_mailing2[0]].tolist()[3], width=100)
405
- cols_mail[4].markdown(filenames.titles[reco_mailing2[0]].tolist()[3])
406
- cols_mail[4].markdown(":star::star::star::star:")
407
- cols_mail[5].image(filenames.image_path[reco_mailing2[0]].tolist()[4], width=100)
408
- cols_mail[5].markdown(filenames.titles[reco_mailing2[0]].tolist()[4])
409
- cols_mail[5].markdown(":star::star::star::star::star:")
410
- cols_mail[6].empty()
411
-
412
- st.text('\n\n')
413
- st.markdown("<hr/>", unsafe_allow_html=True)
414
- st.markdown('<p style="font-size:120%">Discover our new products: </p>', unsafe_allow_html=True)
415
- st.text('\n\n')
416
- cols_mail2 = st.columns((4))
417
-
418
- cols_mail2[0].image('images/velo.jpg', width=200)
419
- cols_mail2[1].image('images/sac.jpg', width=200)
420
- cols_mail2[2].image('images/shoes.jpg', width=200)
421
- cols_mail2[3].image('images/survet.jpg', width=200)
422
-
423
- st.text('\n\n\n\n')
424
- st.markdown("<hr/>", unsafe_allow_html=True)
425
-
426
- st.markdown('Always yours,')
427
- st.markdown('[company] team')
428
-
429
-
430
-
431
-
432
- if __name__ == '__main__':
433
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan-Inversion/stylegan_human/torch_utils/persistence.py DELETED
@@ -1,262 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
4
- #
5
- # NVIDIA CORPORATION and its licensors retain all intellectual property
6
- # and proprietary rights in and to this software, related documentation
7
- # and any modifications thereto. Any use, reproduction, disclosure or
8
- # distribution of this software and related documentation without an express
9
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
10
-
11
- """Facilities for pickling Python code alongside other data.
12
-
13
- The pickled code is automatically imported into a separate Python module
14
- during unpickling. This way, any previously exported pickles will remain
15
- usable even if the original code is no longer available, or if the current
16
- version of the code is not consistent with what was originally pickled."""
17
-
18
- import sys
19
- import pickle
20
- import io
21
- import inspect
22
- import copy
23
- import uuid
24
- import types
25
- import dnnlib
26
-
27
- # ----------------------------------------------------------------------------
28
-
29
- _version = 6 # internal version number
30
- _decorators = set() # {decorator_class, ...}
31
- _import_hooks = [] # [hook_function, ...]
32
- _module_to_src_dict = dict() # {module: src, ...}
33
- _src_to_module_dict = dict() # {src: module, ...}
34
-
35
- # ----------------------------------------------------------------------------
36
-
37
-
38
- def persistent_class(orig_class):
39
- r"""Class decorator that extends a given class to save its source code
40
- when pickled.
41
-
42
- Example:
43
-
44
- from torch_utils import persistence
45
-
46
- @persistence.persistent_class
47
- class MyNetwork(torch.nn.Module):
48
- def __init__(self, num_inputs, num_outputs):
49
- super().__init__()
50
- self.fc = MyLayer(num_inputs, num_outputs)
51
- ...
52
-
53
- @persistence.persistent_class
54
- class MyLayer(torch.nn.Module):
55
- ...
56
-
57
- When pickled, any instance of `MyNetwork` and `MyLayer` will save its
58
- source code alongside other internal state (e.g., parameters, buffers,
59
- and submodules). This way, any previously exported pickle will remain
60
- usable even if the class definitions have been modified or are no
61
- longer available.
62
-
63
- The decorator saves the source code of the entire Python module
64
- containing the decorated class. It does *not* save the source code of
65
- any imported modules. Thus, the imported modules must be available
66
- during unpickling, also including `torch_utils.persistence` itself.
67
-
68
- It is ok to call functions defined in the same module from the
69
- decorated class. However, if the decorated class depends on other
70
- classes defined in the same module, they must be decorated as well.
71
- This is illustrated in the above example in the case of `MyLayer`.
72
-
73
- It is also possible to employ the decorator just-in-time before
74
- calling the constructor. For example:
75
-
76
- cls = MyLayer
77
- if want_to_make_it_persistent:
78
- cls = persistence.persistent_class(cls)
79
- layer = cls(num_inputs, num_outputs)
80
-
81
- As an additional feature, the decorator also keeps track of the
82
- arguments that were used to construct each instance of the decorated
83
- class. The arguments can be queried via `obj.init_args` and
84
- `obj.init_kwargs`, and they are automatically pickled alongside other
85
- object state. A typical use case is to first unpickle a previous
86
- instance of a persistent class, and then upgrade it to use the latest
87
- version of the source code:
88
-
89
- with open('old_pickle.pkl', 'rb') as f:
90
- old_net = pickle.load(f)
91
- new_net = MyNetwork(*old_obj.init_args, **old_obj.init_kwargs)
92
- misc.copy_params_and_buffers(old_net, new_net, require_all=True)
93
- """
94
- assert isinstance(orig_class, type)
95
- if is_persistent(orig_class):
96
- return orig_class
97
-
98
- assert orig_class.__module__ in sys.modules
99
- orig_module = sys.modules[orig_class.__module__]
100
- orig_module_src = _module_to_src(orig_module)
101
-
102
- class Decorator(orig_class):
103
- _orig_module_src = orig_module_src
104
- _orig_class_name = orig_class.__name__
105
-
106
- def __init__(self, *args, **kwargs):
107
- super().__init__(*args, **kwargs)
108
- self._init_args = copy.deepcopy(args)
109
- self._init_kwargs = copy.deepcopy(kwargs)
110
- assert orig_class.__name__ in orig_module.__dict__
111
- _check_pickleable(self.__reduce__())
112
-
113
- @property
114
- def init_args(self):
115
- return copy.deepcopy(self._init_args)
116
-
117
- @property
118
- def init_kwargs(self):
119
- return dnnlib.EasyDict(copy.deepcopy(self._init_kwargs))
120
-
121
- def __reduce__(self):
122
- fields = list(super().__reduce__())
123
- fields += [None] * max(3 - len(fields), 0)
124
- if fields[0] is not _reconstruct_persistent_obj:
125
- meta = dict(type='class', version=_version, module_src=self._orig_module_src,
126
- class_name=self._orig_class_name, state=fields[2])
127
- fields[0] = _reconstruct_persistent_obj # reconstruct func
128
- fields[1] = (meta,) # reconstruct args
129
- fields[2] = None # state dict
130
- return tuple(fields)
131
-
132
- Decorator.__name__ = orig_class.__name__
133
- _decorators.add(Decorator)
134
- return Decorator
135
-
136
- # ----------------------------------------------------------------------------
137
-
138
-
139
- def is_persistent(obj):
140
- r"""Test whether the given object or class is persistent, i.e.,
141
- whether it will save its source code when pickled.
142
- """
143
- try:
144
- if obj in _decorators:
145
- return True
146
- except TypeError:
147
- pass
148
- return type(obj) in _decorators # pylint: disable=unidiomatic-typecheck
149
-
150
- # ----------------------------------------------------------------------------
151
-
152
-
153
- def import_hook(hook):
154
- r"""Register an import hook that is called whenever a persistent object
155
- is being unpickled. A typical use case is to patch the pickled source
156
- code to avoid errors and inconsistencies when the API of some imported
157
- module has changed.
158
-
159
- The hook should have the following signature:
160
-
161
- hook(meta) -> modified meta
162
-
163
- `meta` is an instance of `dnnlib.EasyDict` with the following fields:
164
-
165
- type: Type of the persistent object, e.g. `'class'`.
166
- version: Internal version number of `torch_utils.persistence`.
167
- module_src Original source code of the Python module.
168
- class_name: Class name in the original Python module.
169
- state: Internal state of the object.
170
-
171
- Example:
172
-
173
- @persistence.import_hook
174
- def wreck_my_network(meta):
175
- if meta.class_name == 'MyNetwork':
176
- print('MyNetwork is being imported. I will wreck it!')
177
- meta.module_src = meta.module_src.replace("True", "False")
178
- return meta
179
- """
180
- assert callable(hook)
181
- _import_hooks.append(hook)
182
-
183
- # ----------------------------------------------------------------------------
184
-
185
-
186
- def _reconstruct_persistent_obj(meta):
187
- r"""Hook that is called internally by the `pickle` module to unpickle
188
- a persistent object.
189
- """
190
- meta = dnnlib.EasyDict(meta)
191
- meta.state = dnnlib.EasyDict(meta.state)
192
- for hook in _import_hooks:
193
- meta = hook(meta)
194
- assert meta is not None
195
-
196
- assert meta.version == _version
197
- module = _src_to_module(meta.module_src)
198
-
199
- assert meta.type == 'class'
200
- orig_class = module.__dict__[meta.class_name]
201
- decorator_class = persistent_class(orig_class)
202
- obj = decorator_class.__new__(decorator_class)
203
-
204
- setstate = getattr(obj, '__setstate__', None)
205
- if callable(setstate):
206
- setstate(meta.state) # pylint: disable=not-callable
207
- else:
208
- obj.__dict__.update(meta.state)
209
- return obj
210
-
211
- # ----------------------------------------------------------------------------
212
-
213
-
214
- def _module_to_src(module):
215
- r"""Query the source code of a given Python module.
216
- """
217
- src = _module_to_src_dict.get(module, None)
218
- if src is None:
219
- src = inspect.getsource(module)
220
- _module_to_src_dict[module] = src
221
- _src_to_module_dict[src] = module
222
- return src
223
-
224
-
225
- def _src_to_module(src):
226
- r"""Get or create a Python module for the given source code.
227
- """
228
- module = _src_to_module_dict.get(src, None)
229
- if module is None:
230
- module_name = "_imported_module_" + uuid.uuid4().hex
231
- module = types.ModuleType(module_name)
232
- sys.modules[module_name] = module
233
- _module_to_src_dict[module] = src
234
- _src_to_module_dict[src] = module
235
- exec(src, module.__dict__) # pylint: disable=exec-used
236
- return module
237
-
238
- # ----------------------------------------------------------------------------
239
-
240
-
241
- def _check_pickleable(obj):
242
- r"""Check that the given object is pickleable, raising an exception if
243
- it is not. This function is expected to be considerably more efficient
244
- than actually pickling the object.
245
- """
246
- def recurse(obj):
247
- if isinstance(obj, (list, tuple, set)):
248
- return [recurse(x) for x in obj]
249
- if isinstance(obj, dict):
250
- return [[recurse(x), recurse(y)] for x, y in obj.items()]
251
- if isinstance(obj, (str, int, float, bool, bytes, bytearray)):
252
- return None # Python primitive types are pickleable.
253
- if f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor']:
254
- return None # NumPy arrays and PyTorch tensors are pickleable.
255
- if is_persistent(obj):
256
- # Persistent objects are pickleable, by virtue of the constructor check.
257
- return None
258
- return obj
259
- with io.BytesIO() as f:
260
- pickle.dump(recurse(obj), f)
261
-
262
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/include/kalmanFilter.h DELETED
@@ -1,31 +0,0 @@
1
- #pragma once
2
-
3
- #include "dataType.h"
4
-
5
- namespace byte_kalman
6
- {
7
- class KalmanFilter
8
- {
9
- public:
10
- static const double chi2inv95[10];
11
- KalmanFilter();
12
- KAL_DATA initiate(const DETECTBOX& measurement);
13
- void predict(KAL_MEAN& mean, KAL_COVA& covariance);
14
- KAL_HDATA project(const KAL_MEAN& mean, const KAL_COVA& covariance);
15
- KAL_DATA update(const KAL_MEAN& mean,
16
- const KAL_COVA& covariance,
17
- const DETECTBOX& measurement);
18
-
19
- Eigen::Matrix<float, 1, -1> gating_distance(
20
- const KAL_MEAN& mean,
21
- const KAL_COVA& covariance,
22
- const std::vector<DETECTBOX>& measurements,
23
- bool only_position = false);
24
-
25
- private:
26
- Eigen::Matrix<float, 8, 8, Eigen::RowMajor> _motion_mat;
27
- Eigen::Matrix<float, 4, 8, Eigen::RowMajor> _update_mat;
28
- float _std_weight_position;
29
- float _std_weight_velocity;
30
- };
31
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/bytetrack/tutorials/ctracker/generate_half_csv.py DELETED
@@ -1,37 +0,0 @@
1
- import os
2
- import numpy as np
3
- prefix_dir = 'MOT17/'
4
- root_dir = 'train/'
5
- result_csv = 'train_half_annots.csv'
6
- train_half_set = {2: 301, 4: 526, 5:419, 9:263, 10:328, 11:451, 13:376}
7
- fout = open(result_csv, 'w')
8
-
9
- for data_name in sorted(os.listdir(prefix_dir + root_dir)):
10
- print(data_name)
11
- gt_path = os.path.join(prefix_dir, root_dir, data_name, 'gt', 'gt.txt')
12
- # print(gt_path)
13
- data_raw = np.loadtxt(gt_path, delimiter=',', dtype='float', usecols=(0,1,2,3,4,5,6,7,8))
14
-
15
- data_sort = data_raw[np.lexsort(data_raw[:,::-1].T)]
16
- visible_raw = data_sort[:,8]
17
- # print(data_sort)
18
- # print(data_sort[-1, 0])
19
- img_num = data_sort[-1, 0]
20
-
21
- # print(data_sort.shape[0])
22
- box_num = data_sort.shape[0]
23
-
24
- person_box_num = np.sum(data_sort[:,6] == 1)
25
- # print(person_box_num)
26
- # import ipdb; ipdb.set_trace()
27
- for i in range(box_num):
28
- c = int(data_sort[i, 6])
29
- v = visible_raw[i]
30
- img_index = int(data_sort[i, 0])
31
- if c == 1 and v > 0.1 and img_index < train_half_set[int(data_name[-2:])]:
32
- img_index = int(data_sort[i, 0])
33
- img_name = data_name + '/img1/' + str(img_index).zfill(6) + '.jpg'
34
- print(root_dir + img_name + ', ' + str(int(data_sort[i, 1])) + ', ' + str(data_sort[i, 2]) + ', ' + str(data_sort[i, 3]) + ', ' + str(data_sort[i, 2] + data_sort[i, 4]) + ', ' + str(data_sort[i, 3] + data_sort[i, 5]) + ', person\n')
35
- fout.write(root_dir + img_name + ', ' + str(int(data_sort[i, 1])) + ', ' + str(data_sort[i, 2]) + ', ' + str(data_sort[i, 3]) + ', ' + str(data_sort[i, 2] + data_sort[i, 4]) + ', ' + str(data_sort[i, 3] + data_sort[i, 5]) + ', person\n')
36
-
37
- fout.close()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Eddevs/brian-challenge/app.py DELETED
@@ -1,98 +0,0 @@
1
- import streamlit as st
2
- from transformers import T5ForConditionalGeneration, T5Tokenizer
3
-
4
- from fill_in_summary import FillInSummary
5
- from paraphrase import PegasusParaphraser
6
- import question_gen as q
7
-
8
-
9
- default_text = "Apple was founded as Apple Computer Company on April 1, 1976, by Steve Jobs, Steve Wozniak and Ronald " \
10
- "Wayne to develop and sell Wozniak's Apple I personal computer. It was incorporated by Jobs and " \
11
- "Wozniak as Apple Computer, Inc. in 1977 and the company's next computer, the Apple II became a best " \
12
- "seller. Apple went public in 1980, to instant financial success. The company went onto develop new " \
13
- "computers featuring innovative graphical user interfaces, including the original Macintosh, " \
14
- "announced in a critically acclaimed advertisement, '1984', directed by Ridley Scott. By 1985, " \
15
- "the high cost of its products and power struggles between executives caused problems. Wozniak stepped " \
16
- "back from Apple amicably, while Jobs resigned to found NeXT, taking some Apple employees with him. "
17
-
18
- default_text2 = "The board of directors instructed Sculley to contain Jobs and his ability to launch expensive forays " \
19
- "into untested products "
20
-
21
-
22
-
23
- st.set_page_config(layout="centered")
24
-
25
- st.header('Question Gen/Paraph/Summarizer by Ed-Devs')
26
-
27
- st.write('The Goal of this Space is to help educators lower recognisability of the assessment questions, and enable students achieve higher learning goals, and enable students to achieve higher learning goals by doing so.')
28
-
29
- """
30
- * You can Generate Question by inputing Context 🤩
31
- * Paraphrase the Questions you already have 😮
32
- * Summarise Section(s) of Course Materials to Generate Question by inputing that as Context 🤯
33
- * Remove Entities From Context and Generate Fill in the Blank Questions 🤓
34
- """
35
-
36
- st.write('You can select the options from below')
37
-
38
-
39
- select = st.selectbox('Type', ['Question Generator', 'Paraphrasing', 'Summarization', 'Fill in the blank'])
40
-
41
- if select == "Question Generator":
42
- with st.form("question_gen"):
43
- left_column, right_column = st.columns(2)
44
- num_seq = left_column.slider('Question Count', 0, 10, 3)
45
- beams = right_column.slider('Beams', 0, 10, 5)
46
- max_length = st.slider('Max Length', 0, 1024, 300)
47
- text_input = st.text_area("Input Text", value=default_text)
48
-
49
- submitted = st.form_submit_button("Generate")
50
- if submitted:
51
- with st.spinner('Wait for it...'):
52
- question_model = T5ForConditionalGeneration.from_pretrained('ramsrigouthamg/t5_squad_v1')
53
- question_tokenizer = T5Tokenizer.from_pretrained('ramsrigouthamg/t5_squad_v1')
54
-
55
- result = q.get_question(text_input, "", question_model, question_tokenizer, num_seq, beams, max_length)
56
- st.write(result)
57
-
58
-
59
- elif select == "Summarization":
60
- with st.form("summarization"):
61
- text_input = st.text_area("Input Text", value=default_text)
62
-
63
- submitted = st.form_submit_button("Generate")
64
-
65
- if submitted:
66
- with st.spinner('Wait for it...'):
67
- result = FillInSummary().summarize(text_input)
68
- st.write(text_input)
69
-
70
-
71
- elif select == "Fill in the blank":
72
- with st.form("fill_in_the_blank"):
73
- text_input = st.text_area("Input Text", value=default_text)
74
-
75
- submitted = st.form_submit_button("Generate")
76
-
77
- if submitted:
78
- with st.spinner('Wait for it...'):
79
- fill = FillInSummary()
80
- result = fill.summarize(text_input)
81
- result = fill.blank_ne_out(result)
82
- st.write(result)
83
-
84
-
85
- elif select == "Paraphrasing":
86
- with st.form("paraphrasing"):
87
- left_column, right_column = st.columns(2)
88
- count = left_column.slider('Count', 0, 10, 3)
89
- temperature = right_column.slider('Temperature', 0.0, 10.0, 1.5)
90
- text_input = st.text_area("Input Text", value=default_text2)
91
-
92
- submitted = st.form_submit_button("Generate")
93
-
94
- if submitted:
95
- with st.spinner('Wait for it...'):
96
- paraphrase_model = PegasusParaphraser(num_return_sequences=count, temperature=temperature)
97
- result = paraphrase_model.paraphrase(text_input)
98
- st.write(result)