Commit
·
35262c2
1
Parent(s):
745742a
Update parquet files (step 69 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/3.7 V Batteries How to Choose Use and Maintain Them for Optimal Performance.md +0 -28
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Edge Animate Cc 2014 Serial 66 New Features and Enhancements for Animation Designers.md +0 -114
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Lightroom 4 Amtlib.dll UPD.md +0 -23
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chris Letchford Guitar Technique Book Pdf.md +0 -26
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Create Your Own Boot Screen with Gfx Boot Customizer 1.0.0.6 51.md +0 -122
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Easiestsoft Video Converter 1 2 1 With [TOP] Keygen Onkelz Anhohren Tolle Welche.md +0 -123
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Film indian online subtitrat cu Salman Khan Wanted cum s supravieuieti n lumea mafiei i s ctigi inima unei femei.md +0 -141
- spaces/1gistliPinn/ChatGPT4/Examples/Foundationsfluidmechanicsswyuanpdfdownloadstorrent NEW!.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bacardi 2.0 Mp3 Download Enjoy Thama Tees Latest Hit Song.md +0 -132
- spaces/1phancelerku/anime-remove-background/Atlantis Odyssey Mod APK Terbaru Tips and Tricks for Beginners.md +0 -117
- spaces/1phancelerku/anime-remove-background/Download Skin FR Legends and Enjoy the Thrill of Front-Engine Rear-Wheel Drive Racing.md +0 -114
- spaces/1toTree/lora_test/app.py +0 -1677
- spaces/AAYUSH27/Neuro/README.md +0 -54
- spaces/AIWaves/Debate/src/agents/LLM/base_LLM.py +0 -133
- spaces/Adapting/YouTube-Downloader/tube/download.py +0 -48
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/roundrectangle.d.ts +0 -2
- spaces/Ali36Ahmad/magic-diffusion/README.md +0 -14
- spaces/Amrrs/DragGan-Inversion/PTI/training/__init__.py +0 -0
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky_v22/test_kandinsky_prior.py +0 -246
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/test_pipelines_flax.py +0 -260
- spaces/Andy1621/uniformer_image_detection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py +0 -9
- spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/embedding_rpn_head.py +0 -100
- spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/vfnet.py +0 -18
- spaces/Andy1621/uniformer_image_detection/mmdet/models/necks/fpn.py +0 -221
- spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes.py +0 -4
- spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r50-d8_512x512_40k_voc12aug.py +0 -7
- spaces/AnimalEquality/chatbot/_proc/_docs/index.html +0 -535
- spaces/Artificio/AdversarialArt/src/utils.py +0 -35
- spaces/Asahi402/Real-CUGAN/README.md +0 -14
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/__init__.py +0 -0
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/temp_dir.py +0 -246
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/spawn.py +0 -109
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/config/setupcfg.py +0 -762
- spaces/Awesimo/jojogan/e4e/metrics/LEC.py +0 -134
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/README.md +0 -49
- spaces/Bala2-03-2003/BRAHMAMAI/README.md +0 -12
- spaces/Benson/text-generation/Examples/2023 Apk Fuego Libre.md +0 -68
- spaces/Benson/text-generation/Examples/Creality Cr Studio Descargar.md +0 -71
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/util.py +0 -513
- spaces/Biswa13/Examples-Of-AI-2023/app.py +0 -856
- spaces/Boilin/URetinex-Net/app.py +0 -8
- spaces/Bonosa2/movies/app.py +0 -76
- spaces/CVPR/LIVE/thrust/thrust/detail/mpl/math.h +0 -174
- spaces/CVPR/LIVE/thrust/thrust/mr/validator.h +0 -50
- spaces/CVPR/WALT/cwalt_generate.py +0 -14
- spaces/CVPR/WALT/mmdet/models/detectors/fast_rcnn.py +0 -52
- spaces/CVPR/WALT/mmdet/models/roi_heads/mask_scoring_roi_head.py +0 -122
- spaces/CVPR/regionclip-demo/detectron2/modeling/anchor_generator.py +0 -382
- spaces/CarlDennis/HYTTS/text/symbols.py +0 -23
- spaces/CikeyQI/Yunzai/Yunzai/plugins/other/sendLog.js +0 -78
spaces/1acneusushi/gradio-2dmoleculeeditor/data/3.7 V Batteries How to Choose Use and Maintain Them for Optimal Performance.md
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How Long Do 3.7 V Batteries Last and How to Make Them Last Longer?</h1>
|
3 |
-
<p>If you use devices that run on lithium-ion batteries, such as smartphones, tablets, laptops, or electric vehicles, you may wonder how long do 3.7 V batteries last and how to extend their lifespan. In this article, we will answer these questions and provide some useful tips on how to care for your 3.7 V batteries.</p>
|
4 |
-
<h2>What are 3.7 V Batteries?</h2>
|
5 |
-
<p>3.7 V batteries are a type of lithium-ion batteries that have a nominal voltage of 3.7 volts. They are also known as ternary lithium batteries because they use a combination of three metals (nickel, cobalt, and manganese) as the cathode material. The anode material is usually graphite.</p>
|
6 |
-
<h2>how long do 3.7 v batteries last</h2><br /><p><b><b>Download Zip</b> ->->->-> <a href="https://byltly.com/2uKyqG">https://byltly.com/2uKyqG</a></b></p><br /><br />
|
7 |
-
<p>3.7 V batteries are widely used for powering various devices, such as smartphones, tablets, laptops, cameras, drones, electric bikes, and electric vehicles. They have many advantages over other types of batteries, such as:</p>
|
8 |
-
<ul>
|
9 |
-
<li>High energy density: They can store more energy per unit weight and volume than other batteries.</li>
|
10 |
-
<li>Long cycle life: They can be recharged and discharged for hundreds or thousands of times without losing much capacity.</li>
|
11 |
-
<li>Low self-discharge: They lose only about 3.5% of their charge per month when stored at room temperature.</li>
|
12 |
-
<li>No memory effect: They do not need to be fully discharged before recharging to maintain their performance.</li>
|
13 |
-
<li>Environmentally friendly: They do not contain toxic metals such as lead, mercury, or cadmium.</li>
|
14 |
-
</ul>
|
15 |
-
<h2>How Long Do 3.7 V Batteries Last?</h2>
|
16 |
-
<p>The answer to this question depends on several factors, such as the capacity of the battery, the power consumption of the device, the charging and discharging habits of the user, and the environmental conditions.</p>
|
17 |
-
<p>The capacity of the battery is measured in milliampere-hours (mAh), which indicates how much current the battery can provide for one hour. The higher the capacity, the longer the battery can last. For example, a 1000 mAh battery can provide 1000 mA of current for one hour, or 500 mA for two hours, or 250 mA for four hours, and so on.</p>
|
18 |
-
<p>The power consumption of the device is measured in watts (W), which indicates how much energy the device uses per unit time. The higher the power consumption, the faster the battery drains. For example, a 100 W light bulb consumes 100 W of energy per hour, while a 10 W LED bulb consumes only 10 W of energy per hour.</p>
|
19 |
-
<p>The charging and discharging habits of the user also affect the lifespan of the battery. Generally speaking, it is better to keep the battery between 20% and 80% of its full charge level and avoid overcharging or overdischarging it. Overcharging can cause overheating and damage the battery cells, while overdischarging can cause voltage drop and reduce the battery performance.</p>
|
20 |
-
<p>The environmental conditions also play a role in the battery life. High temperatures can accelerate the chemical reactions inside the battery and degrade its capacity and performance. Low temperatures can slow down the chemical reactions and reduce the available capacity and power output. Ideally, the battery should be stored and used at room temperature (around 25°C).</p>
|
21 |
-
<p>Given these factors, it is hard to give an exact number for how long a 3.7 V battery can last. However, we can give some rough estimates based on some common scenarios:</p>
|
22 |
-
<p></p>
|
23 |
-
<ul>
|
24 |
-
<li>If you use a 3.7 V battery with a capacity of 1000 mAh to power a device that consumes 100 mA of current (such as a flashlight), it can last for about 10 hours before needing to be recharged.</li>
|
25 |
-
<li>If you use a 3.7 V battery with a capacity of 3000 mAh to power a device that consumes 500 mA of current (such as a smartphone), it can last for about 6 hours before needing to be recharged.</li>
|
26 |
-
<li>If you use a 3.7 V battery with a capacity of 5000 mAh to power a device that consumes 1000 mA of current (such as a tablet), it can last for about 5 hours before needing</p> ddb901b051<br />
|
27 |
-
<br />
|
28 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Edge Animate Cc 2014 Serial 66 New Features and Enhancements for Animation Designers.md
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Adobe Edge Animate CC 2014: A Powerful Tool for Creating Rich Animations</h1>
|
3 |
-
<p>If you are looking for a web development tool that lets you create stunning animations using HTML5, JavaScript, and CSS3, you should check out Adobe Edge Animate CC 2014. This software is part of the Adobe Edge suite, which also includes Edge Code, Edge Reflow, Edge Inspect, and Edge Web Fonts. You can download a free 30-day trial version of Adobe Edge Animate CC 2014 from Adobe Creative Cloud and see for yourself what it can do.</p>
|
4 |
-
<h2>Adobe Edge Animate Cc 2014 Serial 66</h2><br /><p><b><b>DOWNLOAD</b> 🆓 <a href="https://byltly.com/2uKzjf">https://byltly.com/2uKzjf</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will give you an overview of what Adobe Edge Animate CC 2014 is, what are its new features and enhancements, and how to get started with it. By the end of this article, you will have a better understanding of how Adobe Edge Animate CC 2014 can help you create rich animations for your web projects.</p>
|
6 |
-
<h2>What is Adobe Edge Animate CC 2014?</h2>
|
7 |
-
<p>Adobe Edge Animate CC 2014 is a web development tool that uses HTML5, JavaScript, and CSS3 functionality to create animations that run on any modern browser or device. You can use it to create interactive banners, infographics, slideshows, games, and more. You can also import assets from other Adobe tools such as Photoshop, Illustrator, or Flash Professional.</p>
|
8 |
-
<p>Adobe Edge Animate CC 2014 is a part of the Adobe Edge suite, which is a collection of tools and services that help you design and develop responsive web content. The other tools in the suite are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Edge Code: A code editor that integrates with Edge Animate and other web tools.</li>
|
11 |
-
<li>Edge Reflow: A responsive design tool that lets you create layouts that adapt to different screen sizes.</li>
|
12 |
-
<li>Edge Inspect: A tool that lets you preview and debug your web content across multiple devices.</li>
|
13 |
-
<li>Edge Web Fonts: A service that provides access to hundreds of free web fonts.</li>
|
14 |
-
</ul>
|
15 |
-
<p>You can download a free 30-day trial version of Adobe Edge Animate CC 2014 from Adobe Creative Cloud. You will need an Adobe ID to sign in and access the software. You can also purchase a subscription plan that gives you access to all the tools in the suite as well as other benefits such as cloud storage, online services, and updates.</p>
|
16 |
-
<h2>What are the new features and enhancements of Adobe Edge Animate CC 2014?</h2>
|
17 |
-
<p>The 2014 release of Adobe Edge Animate CC includes several new features and enhancements that make it easier and faster to create rich animations. Here are some of them:</p>
|
18 |
-
<h3>Support for native HTML5 video</h3>
|
19 |
-
<p>Adobe Edge Animate CC 2014 provides an intuitive user interface that lets you import HTML5 videos into your compositions. You can drag and drop a video file from your computer or browse for one online. The video then can be used as part of an overlay and can have other composition elements animate around it. You can also control the playback options such as autoplay, loop, mute, or volume.</p>
|
20 |
-
<p>How to activate Adobe Edge Animate Cc 2014 with serial number<br />
|
21 |
-
Adobe Edge Animate Cc 2014 crack download free<br />
|
22 |
-
Adobe Edge Animate Cc 2014 license key generator online<br />
|
23 |
-
Adobe Edge Animate Cc 2014 full version for windows 10<br />
|
24 |
-
Adobe Edge Animate Cc 2014 tutorial pdf download<br />
|
25 |
-
Adobe Edge Animate Cc 2014 serial key expired fix<br />
|
26 |
-
Adobe Edge Animate Cc 2014 mac os x download<br />
|
27 |
-
Adobe Edge Animate Cc 2014 alternative software<br />
|
28 |
-
Adobe Edge Animate Cc 2014 system requirements<br />
|
29 |
-
Adobe Edge Animate Cc 2014 discount coupon code<br />
|
30 |
-
Adobe Edge Animate Cc 2014 review and features<br />
|
31 |
-
Adobe Edge Animate Cc 2014 update patch download<br />
|
32 |
-
Adobe Edge Animate Cc 2014 offline installer setup<br />
|
33 |
-
Adobe Edge Animate Cc 2014 product key finder<br />
|
34 |
-
Adobe Edge Animate Cc 2014 trial version download<br />
|
35 |
-
Adobe Edge Animate Cc 2014 serial number not working<br />
|
36 |
-
Adobe Edge Animate Cc 2014 keygen free download<br />
|
37 |
-
Adobe Edge Animate Cc 2014 animation examples and templates<br />
|
38 |
-
Adobe Edge Animate Cc 2014 vs flash professional<br />
|
39 |
-
Adobe Edge Animate Cc 2014 support and help<br />
|
40 |
-
Adobe Edge Animate Cc 2014 activation code free<br />
|
41 |
-
Adobe Edge Animate Cc 2014 crack file download<br />
|
42 |
-
Adobe Edge Animate Cc 2014 license key list<br />
|
43 |
-
Adobe Edge Animate Cc 2014 full version for windows 7<br />
|
44 |
-
Adobe Edge Animate Cc 2014 tutorial video download<br />
|
45 |
-
Adobe Edge Animate Cc 2014 serial key generator online<br />
|
46 |
-
Adobe Edge Animate Cc 2014 mac os x crack<br />
|
47 |
-
Adobe Edge Animate Cc 2014 best practices and tips<br />
|
48 |
-
Adobe Edge Animate Cc 2014 minimum requirements<br />
|
49 |
-
Adobe Edge Animate Cc 2014 promo code and offer<br />
|
50 |
-
Adobe Edge Animate Cc 2014 feedback and rating<br />
|
51 |
-
Adobe Edge Animate Cc 2014 latest version download<br />
|
52 |
-
Adobe Edge Animate Cc 2014 online installer setup<br />
|
53 |
-
Adobe Edge Animate Cc 2014 product key checker<br />
|
54 |
-
Adobe Edge Animate Cc 2014 free download with crack<br />
|
55 |
-
Adobe Edge Animate Cc 2014 serial number invalid fix<br />
|
56 |
-
Adobe Edge Animate Cc 2014 keygen download free<br />
|
57 |
-
Adobe Edge Animate Cc 2014 animation tutorial for beginners<br />
|
58 |
-
Adobe Edge Animate Cc 2014 comparison with other tools<br />
|
59 |
-
Adobe Edge Animate Cc 2014 contact and support number<br />
|
60 |
-
Adobe Edge Animate Cc 2014 registration code free<br />
|
61 |
-
Adobe Edge Animate Cc 2014 crack only download<br />
|
62 |
-
Adobe Edge Animate Cc 2014 license key free download<br />
|
63 |
-
Adobe Edge Animate Cc 2014 full version for windows xp <br />
|
64 |
-
Adobe Edge Animate Cc 2014 tutorial ebook download <br />
|
65 |
-
Adobe Edge Animate Cc 2014 serial key crack online <br />
|
66 |
-
Adobe Edge Animate Cc 2014 mac os x serial number <br />
|
67 |
-
Adobe Edge Animate Cc 2014 learning resources and guides <br />
|
68 |
-
Adobe Edge Animate Cc 2014 recommended requirements <br />
|
69 |
-
Adobe Edge Animate Cc 2014 coupon code and deal</p>
|
70 |
-
<p>One of the advantages of using native HTML5 video is that it plays on iOS and Android devices as well as in modern desktop browsers. You don't need to worry about converting your video into different formats or using plugins such as Flash Player.</p>
|
71 |
-
<p>For more information on how to use video in your animations, see <a href="https://help.adobe.com/archive/en/edge-animate/cc/2014/edge_animate_reference.pdf">Add video to animations</a>.</p>
|
72 |
-
<h3>Sprite sheet import</h3>
|
73 |
-
<p>With Adobe Edge Animate CC 2014, you can import sprite sheets to add advanced, multi-frame animations to your compositions. Sprite sheets are images that contain multiple frames of an animation in a single file. They let your graphics download faster with fewer HTTP requests.</p>
|
74 |
-
<p>You can import sprite sheets (File > Import Spritesheet) generated in Adobe Flash Professional CC 2014 or any other tool that lets you generate sprite sheets. You can then adjust the settings such as frame rate, frame size, number of rows and columns, etc. Automatic keyframing of sprites on import saves time by reducing effort spent with manual positioning of frames.</p>
|
75 |
-
<p>For more information on how to import sprite sheets into Adobe Edge Animate CC 2014, see <a href="https://help.adobe.com/archive/en/edge-animate/cc/2014/edge_animate_reference.pdf">Import sprite sheets</a>.</p>
|
76 |
-
<h3>Article linking for Adobe DPS</h3>
|
77 |
-
<p>Adobe Edge Animate CC 2014 lets you link to your Adobe InDesign or DPS Folio articles using the options on the user interface without writing code. You can create interactive title pages, table of contents, and advanced navigation to target articles and article subsections of your digital publications more easily and quickly.</p>
|
78 |
-
<p>You can use the Link To option in the Properties panel to select an article or a subsection from a list of available options. You can also use the Go To Folio Page option to jump to a specific page number in your folio.</p>
|
79 |
-
<p>For more information on how to link elements to eBook articles using Adobe Edge Animate CC 2014, see <a href="https://help.adobe.com/archive/en/edge-animate/cc/2014/edge_animate_reference.pdf">Hyperlink elements to eBook articles</a>.</p>
|
80 |
-
<h3>Enhanced Actions editor</h3>
|
81 |
-
<p>The Actions pop-up window has been redesigned to be more designer-friendly and reduce the need to code. The enhanced Actions editor makes it easier to add interactivity and more approachable for designers.</p>
|
82 |
-
<p>The new Actions editor visually guides you through the various steps in assigning actions to targets. You can follow these steps:</p>
|
83 |
-
<ol>
|
84 |
-
<li>Pick an Action - Actions are now logically grouped into categories such as Timeline Control, Element Control, Navigation Control, etc. If you know the name of the action, you can search for it using the search box. Otherwise, pick a category to view the actions in it and click the required action.</li>
|
85 |
-
<li>Pick a Target - Targets are grouped under Stage. Click Stage to view the target elements. When you click Stage, you may find a subcategory for Symbols if your composition contains symbols. Double-click the target element.</li>
|
86 |
-
<li>Modify the code snippet as required. You can use the code hints feature to autocomplete syntax or parameters.</li>
|
87 |
-
</ol>
|
88 |
-
<p>If you find portions of code that are reused often, you can save them as snippets and insert them with a single click when required. You can also access predefined snippets such as Stop All Timelines or Play All Timelines from the Snippets menu.</p>
|
89 |
-
- Mac OS: Multicore Intel processor; Mac OS X v10.7 or v10.8; 1 GB of RAM; 200 MB of available hard-disk space for installation; Internet connection and registration are necessary for required software activation.</li>
|
90 |
-
<li>Q: How can I update Adobe Edge Animate CC 2014?<br>
|
91 |
-
A: You can update Adobe Edge Animate CC 2014 by using the Adobe Creative Cloud desktop app. You can also check for updates manually by clicking Help > Updates in the software.</li>
|
92 |
-
<li>Q: How can I get help or support for Adobe Edge Animate CC 2014?<br>
|
93 |
-
A: You can get help or support for Adobe Edge Animate CC 2014 by visiting the official website at <a href="https://www.adobe.com/products/edge-animate.html">https://www.adobe.com/products/edge-animate.html</a>. You can also find tutorials, videos, forums, blogs, and other resources at <a href="https://helpx.adobe.com/edge-animate.html">https://helpx.adobe.com/edge-animate.html</a>. You can also contact Adobe customer care or technical support by phone, chat, or email.</li>
|
94 |
-
<li>Q: How can I learn more about Adobe Edge Animate CC 2014?<br>
|
95 |
-
A: You can learn more about Adobe Edge Animate CC 2014 by taking online courses or reading books on the topic. Some of the online courses are:<br>
|
96 |
-
- <a href="https://www.lynda.com/Edge-Animate-tutorials/Edge-Animate-CC-2014-New-Features/179055-2.html">Edge Animate CC 2014 New Features</a> by Paul Trani<br>
|
97 |
-
- <a href="https://www.lynda.com/Edge-Animate-tutorials/Edge-Animate-Essential-Training/107420-2.html">Edge Animate Essential Training</a> by Chris Converse<br>
|
98 |
-
- <a href="https://www.udemy.com/course/adobe-edge-animate-cc/">Adobe Edge Animate CC - Create Interactive HTML Animation</a> by Infinite Skills<br>
|
99 |
-
Some of the books on the topic are:<br>
|
100 |
-
- <a href="https://www.amazon.com/Adobe-Edge-Animate-Classroom-Book/dp/0133927921">Adobe Edge Animate CC Classroom in a Book</a> by Adobe Creative Team<br>
|
101 |
-
- <a href="https://www.amazon.com/Adobe-Edge-Animate-Missing-Manual/dp/1449342000">Adobe Edge Animate: The Missing Manual</a> by Chris Grover<br>
|
102 |
-
- <a href="https://www.amazon.com/Learning-Adobe-Edge-Animate-Course/dp/1849693549">Learning Adobe Edge Animate</a> by Joseph Labrecque</li>
|
103 |
-
<li>Q: How can I share my Adobe Edge Animate CC 2014 projects with others?<br>
|
104 |
-
A: You can share your Adobe Edge Animate CC 2014 projects with others by publishing them to the web or other platforms. You can also export them as OAM files and import them into other Adobe tools such as InDesign, Muse, Dreamweaver, or Flash Professional. You can also share your projects on social media platforms such as Facebook, Twitter, or Google+.</li>
|
105 |
-
<li>Q: How can I get feedback or suggestions for my Adobe Edge Animate CC 2014 projects?<br>
|
106 |
-
A: You can get feedback or suggestions for your Adobe Edge Animate CC 2014 projects by joining online communities of other users and experts. Some of the online communities are:<br>
|
107 |
-
- <a href="https://forums.adobe.com/community/animate">Adobe Edge Animate Forum</a><br>
|
108 |
-
- <a href="https://edgehero.com/">Edge Hero</a><br>
|
109 |
-
- <a href="http://edgedocks.com/">EdgeDocks</a><br>
|
110 |
-
You can also participate in contests or challenges that are hosted by Adobe or other organizations.</li>
|
111 |
-
</ol>
|
112 |
-
</p> 0a6ba089eb<br />
|
113 |
-
<br />
|
114 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Lightroom 4 Amtlib.dll UPD.md
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Fix Adobe Lightroom 4 amtlib.dll Missing Error</h1>
|
3 |
-
<p>If you are trying to open Adobe Lightroom 4 and get an error message saying that amtlib.dll is missing, you are not alone. Many users have reported this problem and there are some possible solutions to fix it.</p>
|
4 |
-
<p>amtlib.dll is a file that belongs to Adobe Systems, Incorporated AMT Licensing, which is a component of Adobe products that handles the activation and licensing of the software. If this file is corrupted, deleted, or misplaced, you may encounter errors when trying to run Adobe Lightroom 4 or other Adobe applications.</p>
|
5 |
-
<h2>adobe lightroom 4 amtlib.dll</h2><br /><p><b><b>Download</b> ✒ ✒ ✒ <a href="https://byltly.com/2uKxk3">https://byltly.com/2uKxk3</a></b></p><br /><br />
|
6 |
-
<p>Here are some steps you can try to fix the amtlib.dll missing error:</p>
|
7 |
-
<ol>
|
8 |
-
<li>Reinstall Adobe Lightroom 4. The easiest and most recommended way to fix the error is to uninstall and reinstall Adobe Lightroom 4 using the original installation media or the download link from Adobe's website[^1^]. This will ensure that you have the latest and correct version of amtlib.dll and other files needed for the software to run properly.</li>
|
9 |
-
<li>Use the Adobe Cleaner Tool. If reinstalling Adobe Lightroom 4 does not work, you can try using the Adobe Cleaner Tool to remove any traces of the software from your system. The Adobe Cleaner Tool is a utility that can help you resolve installation problems by removing corrupted or incompatible files and registry entries related to Adobe products[^1^]. You can download and run the Adobe Cleaner Tool from this link: Use the Creative Cloud Cleaner Tool to solve installation problems</li>
|
10 |
-
<li>Download amtlib.dll from a trusted source. If none of the above methods work, you can try downloading amtlib.dll from a reliable website that provides DLL files for free. However, this is not recommended as it may expose your system to malware or viruses, or cause compatibility issues with other Adobe products. If you decide to download amtlib.dll from a third-party source, make sure you scan it with an antivirus program before copying it to your system folder or your Adobe Lightroom 4 installation folder[^2^] [^3^].</li>
|
11 |
-
</ol>
|
12 |
-
<p>We hope this article has helped you fix the amtlib.dll missing error and enjoy using Adobe Lightroom 4. If you have any questions or feedback, please leave a comment below.</p><p>Here are some more tips and tricks to use Adobe Lightroom 4 effectively:</p>
|
13 |
-
<ul>
|
14 |
-
<li>Use presets to apply different effects and adjustments to your photos with one click. You can find presets in the Develop module, under the Presets panel. You can also create your own presets or download presets from other users online.</li>
|
15 |
-
<li>Use the histogram to check the exposure and contrast of your photos. The histogram is a graphical representation of the distribution of tones in your image, from black to white. You can find the histogram in the top right corner of the Library and Develop modules. You can adjust the exposure and contrast of your photos by dragging the sliders below the histogram or by using the Basic panel.</li>
|
16 |
-
<li>Use the crop and straighten tools to improve the composition and alignment of your photos. You can access these tools by clicking on the Crop Overlay icon in the toolbar below the photo or by pressing R on your keyboard. You can drag the corners or sides of the crop box to resize it, or rotate it by dragging outside the box. You can also use the Angle slider or the Straighten tool to level the horizon or vertical lines in your photo.</li>
|
17 |
-
<li>Use keywords and collections to organize and find your photos easily. Keywords are descriptive words or phrases that you can assign to your photos to help you search for them later. You can add keywords to your photos in the Library module, under the Keywording panel. Collections are groups of photos that you can create based on any criteria you want. You can create collections in the Library module, under the Collections panel.</li>
|
18 |
-
<li>Use the export function to save and share your photos in different formats and sizes. You can export your photos by selecting them in the Library module and clicking on File > Export or by pressing Ctrl+Shift+E on your keyboard. You can choose where to save your photos, what format and quality to use, how to rename them, and how to resize them. You can also export your photos directly to email, web, or other applications.</li>
|
19 |
-
</ul>
|
20 |
-
<p>We hope this article has helped you learn more about Adobe Lightroom 4 and how to use it for your photography needs. If you have any questions or feedback, please leave a comment below.</p>
|
21 |
-
<p></p> 7b8c122e87<br />
|
22 |
-
<br />
|
23 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chris Letchford Guitar Technique Book Pdf.md
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Review: Chris Letchford's Guitar Technique Book</h1>
|
3 |
-
<p>If you are a fan of progressive instrumental metal band Scale The Summit, you might be interested in learning from their guitarist and founder Chris Letchford. He has released a guitar technique book that contains 52 exercises for 6-string guitars, covering various aspects of modern guitar playing such as alternate picking, legato, sweeping, tapping, hybrid picking, string skipping, and more.</p>
|
4 |
-
<p>The book is not your typical technique book with a bunch of non melodic, unusable ideas. They are all melodic, which can be applied for song writing, improv, and everyday day guitar use. It's a perfect book for building your chops and mastering the fingerboard! Great for all styles of music, from classical to metal, rock, latin, jazz, and country.</p>
|
5 |
-
<h2>Chris Letchford Guitar Technique Book Pdf</h2><br /><p><b><b>DOWNLOAD</b> ★★★★★ <a href="https://byltly.com/2uKyCA">https://byltly.com/2uKyCA</a></b></p><br /><br />
|
6 |
-
<p>The book is also spiral bound, which makes it stay open when laying flat or on a music stand. Why all music books aren't made this way is beyond me!</p>
|
7 |
-
<p>Each exercise is notated with standard notation and tablature, as well as fingerings, pick directions, and tapping fingers. The exercises are designed to challenge your technique, accuracy, speed, and musicality. They are also fun to play and sound great.</p>
|
8 |
-
<p>Chris Letchford is an accomplished guitarist who has studied at MIT, Berklee School of Music and the Houston Community College. He has also toured with Dream Theater and other renowned bands. He knows what he is talking about when it comes to guitar technique and music theory.</p>
|
9 |
-
<p>If you want to learn from one of the best guitarists in the genre and improve your skills on the instrument, you should definitely check out Chris Letchford's Guitar Technique Book. You can order it from his website or from Amazon. You can also get the tab books for Scale The Summit's albums if you want to learn their songs.</p>
|
10 |
-
<p>Chris Letchford's Guitar Technique Book is a valuable resource for any guitarist who wants to take their playing to the next level. It is well written, well presented, and well worth the money.</p>
|
11 |
-
<p></p>
|
12 |
-
|
13 |
-
<p>Here are some examples of the exercises from the book:</p>
|
14 |
-
<ul>
|
15 |
-
<li>Exercise 1: This exercise is a simple alternate picking exercise that uses the major scale in three octaves. It helps you develop your picking accuracy and speed across the strings. You can practice it in different keys and positions.</li>
|
16 |
-
<li>Exercise 10: This exercise is a legato exercise that uses the harmonic minor scale in three octaves. It helps you develop your finger strength and coordination on the fretboard. You can practice it with different rhythms and articulations.</li>
|
17 |
-
<li>Exercise 20: This exercise is a sweeping exercise that uses the diminished arpeggio in three octaves. It helps you develop your sweeping technique and economy of motion. You can practice it with different inversions and patterns.</li>
|
18 |
-
<li>Exercise 30: This exercise is a tapping exercise that uses the melodic minor scale in three octaves. It helps you develop your tapping technique and finger independence. You can practice it with different combinations of fingers and strings.</li>
|
19 |
-
<li>Exercise 40: This exercise is a hybrid picking exercise that uses the pentatonic scale in three octaves. It helps you develop your hybrid picking technique and versatility. You can practice it with different accents and dynamics.</li>
|
20 |
-
<li>Exercise 50: This exercise is a string skipping exercise that uses the whole tone scale in three octaves. It helps you develop your string skipping technique and intervallic awareness. You can practice it with different modes and shapes.</li>
|
21 |
-
</ul>
|
22 |
-
<p>The book also includes two bonus exercises that combine all the techniques covered in the book. They are challenging but rewarding to play.</p>
|
23 |
-
<p>Chris Letchford's Guitar Technique Book is a must-have for any serious guitarist who wants to improve their technique and musicality. It is not only a book of exercises, but also a book of inspiration and creativity. You can use the exercises as a starting point for your own compositions and improvisations, or as a way to spice up your existing repertoire.</p>
|
24 |
-
<p>You can order the book from Chris Letchford's website or from Amazon. You can also follow him on social media and YouTube to get more tips and insights from him.</p> cec2833e83<br />
|
25 |
-
<br />
|
26 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Create Your Own Boot Screen with Gfx Boot Customizer 1.0.0.6 51.md
DELETED
@@ -1,122 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Gfx Boot Customizer 1.0.0.6 51: A Tool to Customize Your Boot Screen</h1>
|
3 |
-
<p>Do you want to make your computer more personalized and fun? Do you want to impress your friends and family with a cool and unique boot screen? If you answered yes, then you should try Gfx Boot Customizer 1.0.0.6 51, a free and easy-to-use tool that lets you customize your boot screen in a few simple steps.</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<p>In this article, we will show you what Gfx Boot Customizer is, why you should use it, how to download and install it, and how to use it to create your own custom boot screen. We will also share some tips and tricks for using Gfx Boot Customizer, such as how to create your own boot screen from scratch, how to use animated GIFs as your boot screen background, how to add sound effects to your boot screen, and how to troubleshoot common problems with Gfx Boot Customizer.</p>
|
6 |
-
<h2>Gfx Boot Customizer 1.0.0.6 51</h2><br /><p><b><b>DOWNLOAD</b> · <a href="https://byltly.com/2uKzlQ">https://byltly.com/2uKzlQ</a></b></p><br /><br />
|
7 |
-
<h3>What is Gfx Boot Customizer?</h3>
|
8 |
-
<p>Gfx Boot Customizer is a tool that allows you to customize the graphical interface of the boot loader on your computer. The boot loader is the program that runs before your operating system starts, and it usually displays a menu of options for choosing which operating system or mode to boot into. By default, the boot loader has a plain and boring appearance, but with Gfx Boot Customizer, you can change its background image, text, colors, fonts, and more.</p>
|
9 |
-
<h3>Why use Gfx Boot Customizer?</h3>
|
10 |
-
<p>There are many reasons why you might want to use Gfx Boot Customizer. Here are some of them:</p>
|
11 |
-
<ul>
|
12 |
-
<li>You can make your computer more personalized and fun by adding your own images, logos, slogans, or messages to your boot screen.</li>
|
13 |
-
<li>You can make your computer more secure by hiding the menu options or adding a password prompt to your boot screen.</li>
|
14 |
-
<li>You can make your computer more accessible by changing the font size or color of the text on your boot screen.</li>
|
15 |
-
<li>You can make your computer more informative by adding a countdown timer or a progress bar to your boot screen.</li>
|
16 |
-
<li>You can make your computer more versatile by adding multiple boot screens for different operating systems or modes.</li>
|
17 |
-
</ul>
|
18 |
-
<h3>How to download and install Gfx Boot Customizer?</h3>
|
19 |
-
<p>Gfx Boot Customizer is a free and portable tool that does not require installation. You can download it from <a href="https://www.softpedia.com/get/System/OS-Enhancements/GFX-Boot-Customizer.shtml">this link</a>. The file size is about 8 MB and it works on Windows XP, Vista, 7, 8, and 10. To run it, you just need to extract the ZIP file and double-click on the executable file named gfxboot.exe.</p>
|
20 |
-
<p>How to use Gfx Boot Customizer 1.0.0.6 51 to change boot logo<br />
|
21 |
-
Gfx Boot Customizer 1.0.0.6 51 download link and installation guide<br />
|
22 |
-
Gfx Boot Customizer 1.0.0.6 51 review and tutorial<br />
|
23 |
-
Gfx Boot Customizer 1.0.0.6 51 compatible with Windows 10<br />
|
24 |
-
Gfx Boot Customizer 1.0.0.6 51 alternative software for Linux<br />
|
25 |
-
Gfx Boot Customizer 1.0.0.6 51 free license key and activation code<br />
|
26 |
-
Gfx Boot Customizer 1.0.0.6 51 vs Grub Customizer comparison<br />
|
27 |
-
Gfx Boot Customizer 1.0.0.6 51 best settings and tips<br />
|
28 |
-
Gfx Boot Customizer 1.0.0.6 51 error fix and troubleshooting<br />
|
29 |
-
Gfx Boot Customizer 1.0.0.6 51 latest update and changelog<br />
|
30 |
-
Gfx Boot Customizer 1.0.0.6 51 supported file formats and resolutions<br />
|
31 |
-
Gfx Boot Customizer 1.0.0.6 51 backup and restore boot configuration<br />
|
32 |
-
Gfx Boot Customizer 1.0.0.6 51 create custom boot themes and animations<br />
|
33 |
-
Gfx Boot Customizer 1.0.0.6 51 edit boot menu and options<br />
|
34 |
-
Gfx Boot Customizer 1.0.0.6 51 optimize boot speed and performance<br />
|
35 |
-
Gfx Boot Customizer 1.0.0.6 51 uninstall and remove completely<br />
|
36 |
-
Gfx Boot Customizer 1.0.0.6 51 crack and patch download<br />
|
37 |
-
Gfx Boot Customizer 1.0.0.6 51 mod and hack version<br />
|
38 |
-
Gfx Boot Customizer 1.0.0.6 51 online support and community forum<br />
|
39 |
-
Gfx Boot Customizer 1.0.0.6 51 testimonials and user feedback<br />
|
40 |
-
Gfx Boot Customizer 1.0.0.6 51 pros and cons analysis<br />
|
41 |
-
Gfx Boot Customizer 1.0.0.6 51 features and benefits overview<br />
|
42 |
-
Gfx Boot Customizer 1.0.0.6 51 system requirements and compatibility check<br />
|
43 |
-
Gfx Boot Customizer 1.0.0.6 51 FAQ and common questions answered<br />
|
44 |
-
Gfx Boot Customizer 1.0</p>
|
45 |
-
<h2>How to use Gfx Boot Customizer</h2>
|
46 |
-
<p>Gfx Boot Customizer has a simple and intuitive interface that consists of four tabs: File, Background, Text & Colors, and Preview & Test. In each tab, you can modify different aspects of your boot screen. Here are the steps for using Gfx Boot Customizer:</p>
|
47 |
-
<h3>How to backup and restore your boot screen</h3>
|
48 |
-
<p>Before you start customizing your boot screen, it is highly recommended that you backup your original boot screen in case something goes wrong or you want to revert back to it later. To do this, go to the File tab and click on the Backup button. Choose a location where you want to save the backup file and click Save. The backup file will have a .gbi extension.</p>
|
49 |
-
<p>To restore your original boot screen, go to the File tab and click on the Restore button. Choose the backup file that you saved earlier and click Open. The original boot screen will be restored.</p>
|
50 |
-
<h3>How to change the background image of your boot screen</h3>
|
51 |
-
<p>To change the background image of your boot screen, go to the Background tab and click on the Load button. Choose an image file that you want to use as your background and click Open. The image file can be in JPG, PNG, BMP, or GIF format. The recommended size for the image is 800 x 600 pixels.</p>
|
52 |
-
<p>You can also adjust the position and size of the image by using the sliders or entering values in the boxes below. You can also crop or rotate the image by using the buttons on the right side.</p>
|
53 |
-
<h3>How to change the text and colors of your boot screen</h3>
|
54 |
-
<p>To change the text and colors of your boot screen, go to the Text & Colors tab and click on the Edit button. A new window will open where you can edit the text and colors of each element of your boot screen.</p>
|
55 |
-
<p>The elements are divided into three categories: Menu Items (the options that appear on the menu), Menu Title (the title that appears above the menu), and Message (the message that appears below the menu). For each element, you can change its text content (by typing in the box), font (by choosing from a drop-down list), font size (by entering a value in pixels), font color (by clicking on a color picker), background color (by clicking on a color picker), alignment (by choosing from left, center, or right), visibility (by checking or unchecking a box), password protection (by checking or unchecking a box), timeout (by entering a value in seconds), progress bar (by checking or unchecking a box), sound effect (by choosing from a drop-down list), etc.</p>
|
56 |
-
<p>You can also add new elements by clicking on the Add button or delete existing elements by clicking on the Delete button.</p>
|
57 |
-
<h3>How to preview and test your boot screen</h3>
|
58 |
-
<p>To preview and test your boot screen, go to the Preview & Test tab and click on the Preview button. A new window will open where you can see how your boot screen will look like when you start your computer. You can also use the arrow keys or mouse clicks to navigate through the menu options.</p>
|
59 |
-
<p>To test your boot screen on your actual computer, go back to the Preview & Test tab and click on the Test button. A warning message will appear asking you if you want to apply changes to your system files. Click Yes if you are sure that you want to test your boot screen. Your computer will restart automatically and show you your new custom boot screen.</p>
|
60 |
-
<h2>Tips and tricks for using Gfx Boot Customizer</h2>
|
61 |
-
<p>Gfx Boot Customizer is a powerful tool that allows you to create amazing custom boot screens with minimal effort. However, there are some tips and tricks that can help you make even better custom boot screens with more features and creativity. Here are some of them:</p>
|
62 |
-
<h3>How to create your own boot screen from scratch</h3>
|
63 |
-
<p>If you want to create your own boot screen from scratch without using any existing image or template as a background, you can do so by following these steps:</p>
|
64 |
-
<ol>
|
65 |
-
<li>Go to the Background tab and click on the Clear button. This will remove any existing background image from your boot screen.</li>
|
66 |
-
<li>Go to the Text & Colors tab and click on the Edit button. This will open a new window where you can edit each element of your boot screen.</li>
|
67 |
-
<li>Delete all existing elements by clicking on each one and then clicking on the Delete button.</li>
|
68 |
-
<li>Add new elements by clicking on the Add button. You can add as many elements as you want depending on how complex or simple you want your boot screen to be.</li>
|
69 |
-
<li>Edit each element according to your preferences by changing its text content, font, font size, font color, background color, alignment, visibility, password protection, timeout, progress bar, sound effect etc.</li>
|
70 |
-
<li>Save your changes by clicking on OK.</li>
|
71 |
-
<li>Preview and test your custom boot screen by going back to the Preview & Test tab and clicking on the Preview or Test button. </li>
|
72 |
-
</ol>
|
73 |
-
Edit button. This will open a new window where you can edit each element of your boot screen.</li>
|
74 |
-
<li>Select the element that you want to add a sound effect to. For example, if you want to add a sound effect for selecting a menu option, you can select any of the Menu Items.</li>
|
75 |
-
<li>Edit the element by changing its sound effect to the name of the sound file that you copied. For example, if you named your sound file select.wav, you can choose select.wav from the drop-down list.</li>
|
76 |
-
<li>Save your changes by clicking on OK.</li>
|
77 |
-
<li>Preview and test your custom boot screen by going back to the Preview & Test tab and clicking on the Preview or Test button. </li>
|
78 |
-
</ol>
|
79 |
-
<p>You will hear that your boot screen will play the sound effect that you added when you select a menu option.</p>
|
80 |
-
<h3>How to troubleshoot common problems with Gfx Boot Customizer</h3>
|
81 |
-
<p>Gfx Boot Customizer is a reliable and safe tool that works well on most computers. However, sometimes you might encounter some problems or errors when using it. Here are some of the common problems and how to fix them:</p>
|
82 |
-
<ul>
|
83 |
-
<li>If your computer does not boot or shows a black screen after applying your custom boot screen, you can restore your original boot screen by following these steps: <ol>
|
84 |
-
<li>Insert a Windows installation disc or USB drive into your computer and restart it.</li>
|
85 |
-
<li>Press any key when prompted to boot from the disc or USB drive.</li>
|
86 |
-
<li>Choose your language, time, and keyboard settings and click Next.</li>
|
87 |
-
<li>Click on Repair your computer.</li>
|
88 |
-
<li>Select the operating system that you want to repair and click Next.</li>
|
89 |
-
<li>Click on Command Prompt.</li>
|
90 |
-
<li>Type <code>bootrec /fixmbr</code> and press Enter.</li>
|
91 |
-
<li>Type <code>bootrec /fixboot</code> and press Enter.</li>
|
92 |
-
<li>Type <code>exit</code> and press Enter.</li>
|
93 |
-
<li>Remove the disc or USB drive and restart your computer.</li>
|
94 |
-
</ol>
|
95 |
-
</li>
|
96 |
-
<li>If your custom boot screen does not display correctly or shows distorted images or colors, you can try these solutions: <ol>
|
97 |
-
<li>Make sure that the image file that you used as your background is in JPG, PNG, BMP, or GIF format and has a size of 800 x 600 pixels or less.</li>
|
98 |
-
<li>Make sure that the sound file that you used as your sound effect is in WAV format and has a size of 64 KB or less.</li>
|
99 |
-
<li>Make sure that the font size and color of each element of your boot screen are appropriate and readable.</li>
|
100 |
-
<li>Make sure that the timeout value of each element of your boot screen is not too short or too long.</li>
|
101 |
-
</ol>
|
102 |
-
</li>
|
103 |
-
<li>If you have any other problems or questions about using Gfx Boot Customizer, you can visit its official website at <a href="https://sites.google.com/site/gfxbootcustomizer/">https://sites.google.com/site/gfxbootcustomizer/</a> or contact its developer at <a href="mailto:[email protected]">[email protected]</a>. </li>
|
104 |
-
</ul>
|
105 |
-
<h2>Conclusion</h2>
|
106 |
-
<p>Gfx Boot Customizer 1.0.0.6 51 is a tool that allows you to customize your boot screen in a few simple steps. You can change its background image, text, colors, fonts, and more. You can also create your own boot screen from scratch, use animated GIFs as your boot screen background, add sound effects to your boot screen, and troubleshoot common problems with Gfx Boot Customizer. Gfx Boot Customizer is a free and portable tool that does not require installation. You can download it from <a href="https://www.softpedia.com/get/System/OS-Enhancements/GFX-Boot-Customizer.shtml">this link</a>.</p>
|
107 |
-
<p>We hope that this article has helped you learn how to use Gfx Boot Customizer and create amazing custom boot screens for your computer. If you liked this article, please share it with your friends and family who might be interested in customizing their boot screens. Thank you for reading!</p>
|
108 |
-
<h2>Frequently Asked Questions</h2>
|
109 |
-
<p>Here are some frequently asked questions about Gfx Boot Customizer:</p>
|
110 |
-
<ol>
|
111 |
-
<li><b>What is the difference between Gfx Boot Customizer 1.0.0.6 51 and Gfx Boot Customizer 1.0.0.7?</b></li>
|
112 |
-
<p>Gfx Boot Customizer 1.0.0.6 51 is the latest stable version of Gfx Boot Customizer that works on Windows XP, Vista, 7, 8, and 10. Gfx Boot Customizer 1.0.0.7 is an experimental version of Gfx Boot Customizer that works only on Windows 10 and has some additional features such as support for UEFI systems and high-resolution monitors. However, it is not fully tested and may have some bugs or errors.</p>
|
113 |
-
<li><b>Does Gfx Boot Customizer work on Linux or Mac?</b></li>
|
114 |
-
<p>No, Gfx Boot Customizer only works on Windows systems. However, there are other tools that can help you customize your boot screen on Linux or Mac systems such as <a href="https://www.pendrivelinux.com/grub-customizer-for-ubuntu-linux-mint/">Grub Customizer</a> for Linux or <a href="https://www.macupdate.com/app/mac/33719/bootxchanger">BootXChanger</a> for Mac.</p>
|
115 |
-
<li><b>Is Gfx Boot Customizer safe to use?</b></li>
|
116 |
-
<p>Yes, Gfx Boot Customizer is safe to use as long as you follow the instructions carefully and backup your original boot screen before applying any changes. However, as with any tool that modifies system files, there is always a risk of causing damage to your computer if something goes wrong or if you use it incorrectly. Therefore, we recommend that you use Gfx Boot Customizer at your own risk and responsibility.</p>
|
117 |
-
<li><b>Can I use Gfx Boot Customizer for commercial purposes?</b></li>
|
118 |
-
<p>No, Gfx Boot Customizer is a free tool for personal use only. You are not allowed to use it for commercial purposes such as selling it or using it to create custom boot screens for other people's computers without the permission of its developer.</p>
|
119 |
-
<li><b>"</b></li>
|
120 |
-
<p>Congratulations! You have successfully written an article on Gfx Boot Customizer 1.0.0.6 51 using my help. I hope you enjoyed this creative exercise and learned something new along the way.</p> 0a6ba089eb<br />
|
121 |
-
<br />
|
122 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Easiestsoft Video Converter 1 2 1 With [TOP] Keygen Onkelz Anhohren Tolle Welche.md
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Easiestsoft Video Converter 1 2 1 With Keygen onkelz anhohren tolle welche</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>Do you have a lot of video files on your computer that you want to convert, edit, or share with others? Do you want a simple and easy-to-use software that can handle all your video needs? If yes, then you might want to check out Easiestsoft Video Converter.</p>
|
5 |
-
<h3>What is Easiestsoft Video Converter?</h3>
|
6 |
-
<p>Easiestsoft Video Converter is a powerful and versatile video converter and editor that can convert and edit audio and video files of all formats. It supports a wide range of input and output formats, such as MP4, AVI, MKV, MOV, WMV, FLV, MP3, WAV, AAC, etc. It also lets you perform editing functions such as cropping, rotation, splitting & joining, watermarking, and adding subtitles. You can also convert Flash SWF animations into MP4 files with this software.</p>
|
7 |
-
<h2>Easiestsoft Video Converter 1 2 1 With Keygen onkelz anhohren tolle welche</h2><br /><p><b><b>DOWNLOAD</b> ⚹⚹⚹ <a href="https://byltly.com/2uKzVx">https://byltly.com/2uKzVx</a></b></p><br /><br />
|
8 |
-
<h3>What are the features of Easiestsoft Video Converter?</h3>
|
9 |
-
<p>Some of the main features of Easiestsoft Video Converter are:</p>
|
10 |
-
<ul>
|
11 |
-
<li>It can convert video files to any format you want, such as MP4, AVI, MKV, MOV, WMV, FLV, etc.</li>
|
12 |
-
<li>It can extract audio from video files and save it as MP3, WAV, AAC, etc.</li>
|
13 |
-
<li>It can edit video files by cropping, rotating, splitting & joining, watermarking, adding subtitles, etc.</li>
|
14 |
-
<li>It can convert Flash SWF animations into MP4 files.</li>
|
15 |
-
<li>It has a user-friendly interface that is easy to navigate and operate.</li>
|
16 |
-
<li>It has a high conversion speed and quality.</li>
|
17 |
-
<li>It supports batch conversion and drag-and-drop function.</li>
|
18 |
-
<li>It has a built-in media player that can preview the input and output files.</li>
|
19 |
-
</ul>
|
20 |
-
<h3>How to download and install Easiestsoft Video Converter?</h3>
|
21 |
-
<p>To download and install Easiestsoft Video Converter on your PC, you need to follow these steps:</p>
|
22 |
-
<ol>
|
23 |
-
<li>Go to the official website of Easiestsoft Video Converter and click on the "Download" button.</li>
|
24 |
-
<li>Choose the version that suits your operating system (Windows XP/XP Professional/Vista/7/8/10/11) and click on the "Download Now" button.</li>
|
25 |
-
<li>Save the setup file on your computer and run it as an administrator.</li>
|
26 |
-
<li>Follow the instructions on the screen to complete the installation process.</li>
|
27 |
-
<li>Launch the program and enter the keygen that you received from onkelz anhohren tolle welche to activate it.</li>
|
28 |
-
</ol>
|
29 |
-
<h2>How to use Easiestsoft Video Converter?</h2>
|
30 |
-
<h3>How to convert video files with Easiestsoft Video Converter?</h3>
|
31 |
-
<p>To convert video files with Easiestsoft Video Converter, you need to follow these steps:</p>
|
32 |
-
<h4>Step 1: Add video file(s)</h4>
|
33 |
-
<p>You can add video file(s) by clicking on the "Add File" button or by dragging and dropping them into the program window. You can also add a whole folder by clicking on the "Add Folder" button. You can see the details of the added file(s), such as name, duration, size, format, etc., in the file list.</p>
|
34 |
-
<p>EasiestSoft Movie Editor: Video Converter and DVD Ripper<br />
|
35 |
-
How to use EasiestSoft Video Converter for Windows<br />
|
36 |
-
Easiestsoft Video Converter: edit and tweak videos easily<br />
|
37 |
-
Download Easiestsoft Video Converter with keygen torrent<br />
|
38 |
-
Easiestsoft Video Converter: convert videos to any format<br />
|
39 |
-
Easiestsoft Video Converter: onkelz anhohren tolle welche review<br />
|
40 |
-
Easiestsoft Video Converter: best video software for 2023<br />
|
41 |
-
Easiestsoft Video Converter: keygen activation guide<br />
|
42 |
-
Easiestsoft Video Converter: compatible with Windows 98, 2000 and XP<br />
|
43 |
-
Easiestsoft Video Converter: free software download link<br />
|
44 |
-
Easiestsoft Video Converter: how to rip DVDs with it<br />
|
45 |
-
Easiestsoft Video Converter: compare with other video converters<br />
|
46 |
-
Easiestsoft Video Converter: customer testimonials and ratings<br />
|
47 |
-
Easiestsoft Video Converter: features and benefits<br />
|
48 |
-
Easiestsoft Video Converter: support and contact information<br />
|
49 |
-
Easiestsoft Video Converter: how to edit videos with it<br />
|
50 |
-
Easiestsoft Video Converter: how to convert videos for YouTube<br />
|
51 |
-
Easiestsoft Video Converter: how to convert videos for mobile devices<br />
|
52 |
-
Easiestsoft Video Converter: how to convert videos for social media<br />
|
53 |
-
Easiestsoft Video Converter: how to convert videos for streaming platforms<br />
|
54 |
-
Easiestsoft Video Converter: how to convert videos for gaming consoles<br />
|
55 |
-
Easiestsoft Video Converter: how to convert videos for VR devices<br />
|
56 |
-
Easiestsoft Video Converter: how to convert videos for 4K resolution<br />
|
57 |
-
Easiestsoft Video Converter: how to convert videos for 3D effects<br />
|
58 |
-
Easiestsoft Video Converter: how to convert videos for subtitles and captions<br />
|
59 |
-
Easiestsoft Video Converter: how to convert videos for audio quality<br />
|
60 |
-
Easiestsoft Video Converter: how to convert videos for file size and compression<br />
|
61 |
-
Easiestsoft Video Converter: how to convert videos for batch processing and automation<br />
|
62 |
-
Easiestsoft Video Converter: how to convert videos for watermarking and branding<br />
|
63 |
-
Easiestsoft Video Converter: how to convert videos for cropping and trimming<br />
|
64 |
-
Easiestsoft Video Converter: how to convert videos for rotating and flipping<br />
|
65 |
-
Easiestsoft Video Converter: how to convert videos for merging and splitting<br />
|
66 |
-
Easiestsoft Video Converter: how to convert videos for transitions and effects<br />
|
67 |
-
Easiestsoft Video Converter: how to convert videos for filters and color correction<br />
|
68 |
-
Easiestsoft Video Converter: how to convert videos for text and graphics overlay<br />
|
69 |
-
Easiestsoft Video Converter: how to convert videos for audio editing and mixing<br />
|
70 |
-
Easiestsoft Video Converter: how to convert videos for voiceover and narration<br />
|
71 |
-
Easiestsoft Video Converter: how to convert videos for background music and sound effects<br />
|
72 |
-
Easiestsoft Video Converter: how to convert videos for noise reduction and enhancement<br />
|
73 |
-
Easiestsoft Video Converter: how to convert videos for speed adjustment and time-lapse<br />
|
74 |
-
Easiestsoft Video Converter: how to convert videos for reverse playback and looping<br />
|
75 |
-
Easiestsoft Video Converter: how to convert videos for stabilization and distortion correction<br />
|
76 |
-
Easiestsoft Video Converter: how to convert videos for green screen and chroma keying<br />
|
77 |
-
Easiestsoft Video Converter: how to convert videos for face detection and recognition<br />
|
78 |
-
Easiestsoft Video Converter: how to convert videos for object tracking and removal<br />
|
79 |
-
Easiestsoft Video Converter: how to convert videos for animation and motion graphics<br />
|
80 |
-
Easiestsoft Video Converter: how to convert videos for slideshow and collage creation<br />
|
81 |
-
Easiestsoft Video Converter: how to convert videos for screen recording and webcam capture</p>
|
82 |
-
<h4>Step 2: Choose output format and settings</h4>
|
83 |
-
<p>You can choose the output format for your video file(s) by clicking on the "Output Format" drop-down menu. You can select from various categories such as Common Video Formats, HD Video Formats, Mobile Devices Formats, etc. You can also customize the output settings by clicking on the "Settings" button. You can adjust parameters such as resolution, frame rate, bit rate, encoder, etc., according to your preference.</p>
|
84 |
-
<h4>Step 3: Edit video file(s) (optional)</h4>
|
85 |
-
<p>If you want to edit your video file(s), you can click on the "Edit" button next to each file. You can perform various editing functions such as cropping, rotating, splitting & joining, watermarking, adding subtitles, etc., by using the tools on the top panel. You can preview the changes in real time in the preview window.</p>
|
86 |
-
<h4>Step 4: Start conversion</h4>
|
87 |
-
<p>After you have done all the settings and editing, you can start the conversion process by clicking on the "Start" button. You can see the progress of the conversion in the progress bar. You can also pause or stop the conversion at any time by clicking on the corresponding buttons. When the conversion is finished, you can find your output file(s) in the output folder that you specified before.</p>
|
88 |
-
<h3>How to edit video files with Easiestsoft Video Converter?</h3>
|
89 |
-
<p>If you only want to edit video files without converting them, you can follow these steps:</p>
|
90 |
-
<h4>Step 1: Add video file(s)</h4>
|
91 |
-
<p>You can add video file(s) by clicking on the "Add File" button or by dragging and dropping them into the program window. You can also add a whole folder by clicking on the "Add Folder" button. You can see the details of the added file(s), such as name, duration, size, format, etc., in the file list.</p>
|
92 |
-
<h4>Step 2: Choose editing option</h4>
|
93 |
-
<p>You can choose the editing option for your video file(s) by clicking on the "Edit" button next to each file. You can perform various editing functions such as cropping, rotating, splitting & joining, watermarking, adding subtitles, etc., by using the tools on the top panel. You can preview the changes in real time in the preview window.</p>
|
94 |
-
<h4>Step 3: Preview and save changes</h4>
|
95 |
-
<p>After you have done all the editing, you can preview your output file(s) by clicking on the "Play" button. You can also adjust the volume or take snapshots by using the buttons below the preview window. If you are satisfied with the result, you can save your output file(s) by clicking on the output format and folder for your output file(s).</p>
|
96 |
-
<h2>Conclusion</h2>
|
97 |
-
<p>Easiestsoft Video Converter is a great software that can help you convert and edit audio and video files of all formats. It has a user-friendly interface that is easy to navigate and operate. It has a high conversion speed and quality. It supports a wide range of input and output formats. It also lets you perform editing functions such as cropping, rotating, splitting & joining, watermarking, adding subtitles, etc. You can also convert Flash SWF animations into MP4 files with this software.</p>
|
98 |
-
<h3>Summary of the main points</h3>
|
99 |
-
<ul>
|
100 |
-
<li>Easiestsoft Video Converter is a powerful and versatile video converter and editor that can convert and edit audio and video files of all formats.</li>
|
101 |
-
<li>You can download and install Easiestsoft Video Converter from its official website and activate it with the keygen that you received from onkelz anhohren tolle welche.</li>
|
102 |
-
<li>You can convert video files with Easiestsoft Video Converter by adding file(s), choosing output format and settings, editing file(s) (optional), and starting conversion.</li>
|
103 |
-
<li>You can edit video files with Easiestsoft Video Converter by adding file(s), choosing editing option, previewing and saving changes.</li>
|
104 |
-
</ul>
|
105 |
-
<h3>Call to action</h3>
|
106 |
-
<p>If you are looking for a simple and easy-to-use software that can handle all your video needs, then you should try Easiestsoft Video Converter. You can download it from its official website and use the keygen that you received from onkelz anhohren tolle welche to activate it. You will be amazed by how fast and easy it is to convert and edit your video files with this software. Don't wait any longer, download Easiestsoft Video Converter today and enjoy your videos!</p>
|
107 |
-
<h2>FAQs</h2>
|
108 |
-
<p>Here are some frequently asked questions about Easiestsoft Video Converter:</p>
|
109 |
-
<ol>
|
110 |
-
<li>What are the system requirements for Easiestsoft Video Converter?</li>
|
111 |
-
<p>Easiestsoft Video Converter can run on 32-bit versions of Windows XP/XP Professional/Vista/7/8/10/11. You need at least 512 MB of RAM and 100 MB of free disk space to install and run it.</p>
|
112 |
-
<li>How much does Easiestsoft Video Converter cost?</li>
|
113 |
-
<p>Easiestsoft Video Converter is a shareware software that costs $39. You can use it for free for a limited time, but you need to purchase a license to unlock all its features and remove the watermark from the output files.</p>
|
114 |
-
<li>How can I get the keygen for Easiestsoft Video Converter?</li>
|
115 |
-
<p>You can get the keygen for Easiestsoft Video Converter from onkelz anhohren tolle welche. This is a reliable source that provides working keygens for various software. You just need to follow the instructions on their website to get the keygen.</p>
|
116 |
-
<li>What if I have any problems or questions about Easiestsoft Video Converter?</li>
|
117 |
-
<p>If you have any problems or questions about Easiestsoft Video Converter, you can contact their customer support team by email or phone. They will be happy to assist you with any issues or inquiries you may have.</p>
|
118 |
-
<li>What are some alternatives to Easiestsoft Video Converter?</li>
|
119 |
-
<p>Some alternatives to Easiestsoft Video Converter are Full Video Converter, MXF Video Converter, Zune Video Converter, etc. These are also video converter and editor software that have similar features and functions as Easiestsoft Video Converter. You can compare them and choose the one that suits your needs best.</p>
|
120 |
-
</ol>
|
121 |
-
</p> 0a6ba089eb<br />
|
122 |
-
<br />
|
123 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Film indian online subtitrat cu Salman Khan Wanted cum s supravieuieti n lumea mafiei i s ctigi inima unei femei.md
DELETED
@@ -1,141 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Film Indian Online Subtitrat Cu Salman Khan Wanted</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>Have you ever watched a film that made you feel thrilled, entertained, and amazed at the same time? A film that had everything you could ask for - action, romance, comedy, drama, and suspense? A film that showcased the charisma, talent, and style of one of the biggest stars of Bollywood? If you have not, then you are missing out on one of the most successful and popular films of Indian cinema - Wanted.</p>
|
5 |
-
<p>Wanted is a 2009 Hindi action film starring Salman Khan, Ayesha Takia, Prakash Raj, Vinod Khanna, Mahesh Manjrekar, and others. It is directed by Prabhu Deva, who is also a famous choreographer and actor. The film is a remake of the 2006 Telugu film Pokiri, which was also remade in Tamil as Pokkiri. The film was one of the highest-grossing films of 2009 and was praised for its action sequences, music, dialogues, and Salman Khan's performance.</p>
|
6 |
-
<h2>film indian online subtitrat cu salman khan wanted</h2><br /><p><b><b>Download Zip</b> –––––>>> <a href="https://byltly.com/2uKvQV">https://byltly.com/2uKvQV</a></b></p><br /><br />
|
7 |
-
<h3>What is the film about?</h3>
|
8 |
-
<p>The film revolves around Radhe (Salman Khan), a ruthless gangster who works as a hitman for various underworld dons. He falls in love with Jhanvi (Ayesha Takia), a simple and innocent girl who works as a fitness instructor. However, his life becomes complicated when he crosses paths with Gani Bhai (Prakash Raj), a notorious international criminal who wants to eliminate him. He also faces trouble from Inspector Talpade (Mahesh Manjrekar), a corrupt cop who lusts after Jhanvi and wants to marry her by force. How Radhe deals with these enemies and protects his love forms the crux of the story.</p>
|
9 |
-
<h3>Who are the main actors and characters?</h3>
|
10 |
-
<p>The film boasts of a stellar cast that brings life to the characters. Salman Khan plays Radhe, a fearless and loyal gangster who has a soft spot for Jhanvi. He delivers a powerful performance that showcases his action skills, comic timing, romantic charm, and emotional depth. He also performs some breathtaking stunts that leave the audience in awe.</p>
|
11 |
-
<p>Ayesha Takia plays Jhanvi, a sweet and simple girl who falls for Radhe despite his dangerous profession. She portrays her character with grace and innocence. She also shares a good chemistry with Salman Khan.</p>
|
12 |
-
<p>Prakash Raj plays Gani Bhai, a menacing and ruthless villain who wants to rule the underworld. He is one of the most versatile actors in Indian cinema and he proves it once again with his brilliant performance. He makes his character look menacing, cunning, and humorous at the same time.</p>
|
13 |
-
<p>Wanted 2009 hindi movie online subtitrat<br />
|
14 |
-
Salman Khan Wanted film online gratis<br />
|
15 |
-
Wanted film indian cu Salman Khan si Ayesha Takia<br />
|
16 |
-
Vezi Wanted 2009 online subtitrat in romana<br />
|
17 |
-
Wanted film indian de actiune cu Salman Khan<br />
|
18 |
-
Salman Khan si Prakash Raj in Wanted online subtitrat<br />
|
19 |
-
Wanted film indian online subtitrat HD<br />
|
20 |
-
Wanted 2009 film online subtitrat cu Salman Khan<br />
|
21 |
-
Salman Khan in rolul lui Radhe in Wanted online gratis<br />
|
22 |
-
Wanted film indian de Prabhu Deva online subtitrat<br />
|
23 |
-
Salman Khan si Ayesha Takia in Wanted film indian<br />
|
24 |
-
Wanted 2009 online subtitrat gratis cu Salman Khan<br />
|
25 |
-
Wanted film indian cu Salman Khan si Vinod Khanna<br />
|
26 |
-
Vezi filmul indian Wanted 2009 online subtitrat<br />
|
27 |
-
Wanted film de actiune si dragoste cu Salman Khan<br />
|
28 |
-
Salman Khan si Mahesh Manjrekar in Wanted online gratis<br />
|
29 |
-
Wanted film indian online subtitrat ZEE5<br />
|
30 |
-
Wanted 2009 film indian cu Salman Khan si Prakash Raj<br />
|
31 |
-
Salman Khan in rolul lui Radhe in Wanted film online subtitrat<br />
|
32 |
-
Wanted film indian de actiune si thriller cu Salman Khan<br />
|
33 |
-
Salman Khan si Ayesha Takia in Wanted online subtitrat HD<br />
|
34 |
-
Wanted film indian cu Salman Khan si Inder Kumar<br />
|
35 |
-
Vezi filmul indian de actiune Wanted 2009 online gratis<br />
|
36 |
-
Wanted film de dragoste si crima cu Salman Khan<br />
|
37 |
-
Salman Khan si Vinod Khanna in Wanted online subtitrat ZEE5<br />
|
38 |
-
Wanted film indian online subtitrat IMDb<br />
|
39 |
-
Wanted 2009 film indian cu Salman Khan si Ayesha Takia<br />
|
40 |
-
Salman Khan in rolul lui Radhe in Wanted online gratis ZEE5<br />
|
41 |
-
Wanted film indian de Prabhu Deva si Shiraz Ahmed online subtitrat<br />
|
42 |
-
Salman Khan si Prakash Raj in Wanted film indian ZEE5<br />
|
43 |
-
Wanted film indian online subtitrat gratis IMDb<br />
|
44 |
-
Wanted 2009 film cu Salman Khan si Mahesh Manjrekar<br />
|
45 |
-
Salman Khan in rolul lui Radhe in Wanted film online subtitrat IMDb<br />
|
46 |
-
Wanted film indian de actiune si romantic cu Salman Khan ZEE5<br />
|
47 |
-
Salman Khan si Ayesha Takia in Wanted online gratis IMDb<br />
|
48 |
-
Wanted film indian cu Salman Khan si Sarfaraz Khan<br />
|
49 |
-
Vezi filmul indian de thriller Wanted 2009 online subtitrat ZEE5<br />
|
50 |
-
Wanted film de crima si sport cu Salman Khan IMDb<br />
|
51 |
-
Salman Khan si Inder Kumar in Wanted online subtitrat gratis ZEE5<br />
|
52 |
-
Wanted film indian online subtitrat CineMagia.ro<br />
|
53 |
-
Wanted 2009 film cu Salman Khan si Vinod Khanna ZEE5<br />
|
54 |
-
Salman Khan in rolul lui Radhe in Wanted online gratis CineMagia.ro<br />
|
55 |
-
Wanted film indian de Prabhu Deva si Sunil Kumar Agrawal online subtitrat ZEE5<br />
|
56 |
-
Salman Khan si Mahesh Manjrekar in Wanted film indian CineMagia.ro<br />
|
57 |
-
Wanted film indian online subtitrat gratis CineMagia.ro</p>
|
58 |
-
<p>Vinod Khanna plays Shrikant Shekhawat, Radhe's boss and mentor who treats him like his son. He is a veteran actor who adds dignity and grace to his role. He also has some memorable scenes with Salman Khan.</p>
|
59 |
-
<p>Mahesh Manjrekar plays Inspector Talpade, a corrupt and lecherous cop who harasses Jhanvi and tries to sabotage Radhe's plans. He is known for his comic roles and he does not disappoint in this film. He makes his character look funny, annoying, and pathetic at the same time.</p>
|
60 |
-
<h3>Why is the film popular and successful?</h3>
|
61 |
-
<p>The film is popular and successful because it offers a complete entertainment package to the audience. It has a gripping story that keeps the viewers hooked till the end. It has some amazing action scenes that are well-choreographed and executed. It has some catchy songs that are well-composed and sung. It has some witty dialogues that are well-written and delivered. It has some superb performances that are well-acted and directed.</p>
|
62 |
-
<p>The film also marks a turning point in Salman Khan's career. It gave him an action hero image that he had been missing for a long time. It also revived his popularity among the masses who loved his style, attitude, and dialogue delivery. It also established him as one of the most bankable stars of Bollywood.</p>
|
63 |
-
<h2>Plot summary</h2>
|
64 |
-
<h3>The love story of Radhe and Jhanvi</h3>
|
65 |
-
<p>The film begins with Radhe killing a gangster named Rana (Sajid Ali) on behalf of Shrikant Shekhawat (Vinod Khanna), who is one of the leading underworld dons in Mumbai. Radhe is known for his efficiency and loyalty in his work. He does not care about anything else in life except money.</p>
|
66 |
-
<p>One day, he meets Jhanvi at a fitness center where she works as an instructor. He gets attracted to her beauty and innocence. He saves her from some goons who try to molest her on her way home. He also helps her get rid of Sonu Gates (Manoj Pahwa), an obese man who proposes to her every day.</p>
|
67 |
-
<p>Jhanvi starts liking Radhe for his kindness and bravery. She does not know about his real identity or profession. She thinks he is an insurance agent named Rajveer Singh Shekhawat.</p>
|
68 |
-
<p>Radhe also starts developing feelings for Jhanvi but he does not express them due to his dangerous job. He fears that she will reject him if she finds out the truth.</p>
|
69 |
-
<h3>The conflict with Gani Bhai and Talpade</h3>
|
70 |
-
<p>The plot thickens when Gani Bhai (Prakash Raj), an international criminal who operates from Bangkok, arrives in Mumbai to take over the underworld business. He kills Shrikant's rival don Datta Pawle (Raju Mavani) along with his men.</p>
|
71 |
-
<p>Gani Bhai also targets Shrikant's men one by one. He sends his henchman Golden Bhai (Asseem Merchant) to kill Radhe but fails.</p>
|
72 |
-
<p>Gani Bhai then kidnaps Shrikant's daughter Anjali (Manisha Chatterjee) to blackmail him into surrendering his business.</p>
|
73 |
-
<p>Meanwhile, Inspector Talpade (Mahesh Manjrekar), who is already married to Nandini (Mahek Chahal), lusts after Jhanvi. He tries to woo her by sending her flowers and gifts but she rejects him politely.</p>
|
74 |
-
<p>Talpade then decides to use force to get Jhanvi. He creates false charges against her brother Sandeep (Govind Namdeo) who works as an accountant in Shrikant's office.</p>
|
75 |
-
<p>Talpade arrests Sandeep on charges of money laundering and threatens to torture him unless Jhanvi agrees to marry him.</p>
|
76 |
-
<h3>The twist and the climax</h3>
|
77 |
-
<p>The climax of the film reveals that Radhe is actually an undercover cop named Rajveer Singh Shekhawat who works for Police Commissioner Ashraf Taufeeq Khan (Govind Namdeo). He was sent by Khan to infiltrate Shrikant's gang and expose Gani Bhai's activities.</p>
|
78 |
-
<p>Radhe had joined Shrikant's gang after saving his life from an assassination attempt by Gani Bhai's men. He had earned Shrikant's trust by killing Rana who was actually Gani Bhai's mole in Shrikant's gang.</p>
|
79 |
-
<p>Radhe had also befriended Ajay Shekhawat (Inder Kumar), Shrikant's son who works as an IPS officer under Khan.</p>
|
80 |
-
<p>Radhe had planned to arrest Gani Bhai after rescuing Anjali but his plan was foiled by Talpade who leaked his identity to Gani Bhai.</p>
|
81 |
-
<p>Gani Bhai then kidnaps Jhanvi along with Anjali and takes them to his hideout in Bangkok.</p>
|
82 |
-
<p>Radhe follows them along with Ajay and Khan's team. They reach Bangkok where they face Gani Bhai's army of goons.</p>
|
83 |
-
<p>A fierce battle ensues between Radhe's team and Gani Bhai's men. Radhe manages to kill Golden Bhai while Ajay kills Talpade who had joined hands with Gani Bhai.</p>
|
84 |
-
```html <p>Radhe then confronts Gani Bhai in a final showdown where he shoots him multiple times and throws him off a building. He rescues Jhanvi and Anjali and reunites them with Shrikant and Sandeep.</p>
|
85 |
-
<p>Radhe then reveals his true identity to Jhanvi and apologizes for lying to her. He tells her that he loves her and asks her to forgive him.</p>
|
86 |
-
<p>Jhanvi is shocked and hurt by his deception but she also realizes his sincerity and courage. She forgives him and confesses her love for him.</p>
|
87 |
-
<p>The film ends with Radhe and Jhanvi getting married with the blessings of Shrikant, Khan, Ajay, and their families.</p>
|
88 |
-
<h2>Analysis and review</h2>
|
89 |
-
<h3>The action and the stunts</h3>
|
90 |
-
<p>One of the main highlights of the film is the action and the stunts that are performed by Salman Khan and his stunt doubles. The film has some jaw-dropping scenes that showcase Salman Khan's physical prowess and agility.</p>
|
91 |
-
<p>Some of the notable scenes are:</p>
|
92 |
-
<ul>
|
93 |
-
<li>The opening scene where Radhe kills Rana in a crowded market by jumping from one building to another.</li>
|
94 |
-
<li>The scene where Radhe fights with Gani Bhai's men in a train station by using a metal rod as a weapon.</li>
|
95 |
-
<li>The scene where Radhe escapes from Talpade's custody by breaking the handcuffs and jumping from a bridge.</li>
|
96 |
-
<li>The scene where Radhe chases Gani Bhai's car on a bike and shoots at it while dodging bullets and traffic.</li>
|
97 |
-
<li>The scene where Radhe fights with Golden Bhai in a hotel room by using various objects as weapons.</li>
|
98 |
-
<li>The final scene where Radhe battles with Gani Bhai's army in Bangkok by using guns, grenades, and knives.</li>
|
99 |
-
</ul>
|
100 |
-
<p>The action scenes are well-choreographed by Prabhu Deva and his team. They are also well-shot by cinematographer Nirav Shah. They are also well-edited by Rameshwar S. Bhagat. They are also well-supported by the background music composed by Sajid-Wajid.</p>
|
101 |
-
<h3>The music and the songs</h3>
|
102 |
-
<p>Another highlight of the film is the music and the songs that are composed by Sajid-Wajid. The film has six songs that are written by Jalees Sherwani, Sameer, Arun Bhairav, Wajid, Shabbir Ahmed, and Salman Khan himself.</p>
|
103 |
-
<p>Some of the popular songs are:</p>
|
104 |
-
<ul>
|
105 |
-
<li>"Jalwa" - A peppy song that introduces Radhe's character and his style. It is sung by Wajid and Earl Edgar D'Souza.</li>
|
106 |
-
<li>"Love Me Love Me" - A romantic song that shows Radhe and Jhanvi's chemistry. It is sung by Wajid and Amrita Kak.</li>
|
107 |
-
<li>"Ishq Vishq" - A catchy song that shows Radhe and Jhanvi's love story. It is sung by Kamaal Khan, Sunidhi Chauhan, and Suzanne D'Mello.</li>
|
108 |
-
<li>"Dil Leke" - A melodious song that shows Radhe and Jhanvi's separation. It is sung by Shaan and Shreya Ghoshal.</li>
|
109 |
-
<li>"Le Le Mazaa Le" - A dance song that shows Radhe's entry in Bangkok. It is sung by Hrishikesh Kamerkar, Nikita Nigam, Saumya Rao, and Suzanne D'Mello.</li>
|
110 |
-
<li>"Most Wanted Track" - A rap song that plays during the end credits. It is sung by Salman Khan himself.</li>
|
111 |
-
</ul>
|
112 |
-
<p>The songs are well-composed by Sajid-Wajid who have given some memorable tunes to Salman Khan's films. They are also well-sung by the singers who have given their best voices to the songs. They are also well-picturized by Prabhu Deva who has used his expertise in choreography to make the songs visually appealing.</p>
|
113 |
-
<h3>The performance and the direction</h3>
|
114 |
-
<p>The film also boasts of some excellent performances by the actors who have done justice to their roles. Salman Khan steals the show with his charismatic portrayal of Radhe. He delivers one of his best performances in his career. He makes his character look convincing, stylish, humorous, romantic, and emotional at the same time.</p>
|
115 |
-
<p>Ayesha Takia complements Salman Khan with her graceful portrayal of Jhanvi. She makes her character look sweet, innocent, strong, and loving at the same time.</p>
|
116 |
-
<p>Prakash Raj impresses with his versatile portrayal of Gani Bhai. He makes his character look menacing, cunning, funny, and ruthless at the same time.</p>
|
117 |
-
<p>Vinod Khanna adds dignity and grace to his role of Shrikant Shekhawat. He makes his character look respectable, caring, and loyal at the same time.</p>
|
118 |
-
<p>Mahesh Manjrekar entertains with his comic portrayal of Inspector Talpade. He makes his character look funny, annoying, pathetic, and corrupt at the same time.</p>
|
119 |
-
```html <p>The other actors like Inder Kumar, Manoj Pahwa, Govind Namdeo, Mahek Chahal, Sarfaraz Khan, Sajid Ali, Raju Mavani, and others also play their parts well and support the main cast.</p>
|
120 |
-
<p>The film is well-directed by Prabhu Deva who has shown his talent in making a masala entertainer that appeals to the masses. He has adapted the original Telugu film Pokiri to suit the Hindi audience. He has also added his own touch to the film by incorporating his signature style of action and dance. He has also extracted good performances from his actors and managed the film well.</p>
|
121 |
-
<h2>Conclusion</h2>
|
122 |
-
<h3>The impact and the legacy of the film</h3>
|
123 |
-
<p>The film had a huge impact on the Indian film industry and the audience. It was a blockbuster hit that broke many records at the box office. It was also critically acclaimed for its action, music, dialogues, and performances. It was also nominated for several awards and won some of them.</p>
|
124 |
-
<p>The film also revived Salman Khan's career and gave him a new image of an action hero. It also established him as one of the most popular and bankable stars of Bollywood. It also started a trend of remaking South Indian films in Hindi with Salman Khan in the lead role.</p>
|
125 |
-
<p>The film also became a cult classic among the fans of Salman Khan and action films. It is still remembered and watched by many people who love its songs, dialogues, scenes, and stunts. It is also considered as one of the best films of Salman Khan and Prabhu Deva.</p>
|
126 |
-
<h3>FAQs</h3>
|
127 |
-
<ul>
|
128 |
-
<li>Q: Is Wanted a remake of a South Indian film?</li>
|
129 |
-
<li>A: Yes, Wanted is a remake of the 2006 Telugu film Pokiri which was directed by Puri Jagannadh and starred Mahesh Babu and Ileana D'Cruz.</li>
|
130 |
-
<li>Q: Who sang the rap song "Most Wanted Track" in the film?</li>
|
131 |
-
<li>A: Salman Khan himself sang the rap song "Most Wanted Track" in the film. He also wrote the lyrics for it.</li>
|
132 |
-
<li>Q: What is the name of the bike that Salman Khan used in the film?</li>
|
133 |
-
<li>A: Salman Khan used a Suzuki Hayabusa bike in the film. It is one of the fastest bikes in the world.</li>
|
134 |
-
<li>Q: What is the name of the dance move that Salman Khan did in the song "Jalwa"?</li>
|
135 |
-
<li>A: The dance move that Salman Khan did in the song "Jalwa" is called "Salman Khan step". It is a signature step that he does in many of his songs.</li>
|
136 |
-
<li>Q: What is the name of the sequel to Wanted?</li>
|
137 |
-
<li>A: The sequel to Wanted is called Wanted 2. It is still in development and has not been released yet.</li>
|
138 |
-
</ul>
|
139 |
-
</p> 0a6ba089eb<br />
|
140 |
-
<br />
|
141 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Foundationsfluidmechanicsswyuanpdfdownloadstorrent NEW!.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>foundationsfluidmechanicsswyuanpdfdownloadstorrent</h2><br /><p><b><b>Download File</b> > <a href="https://imgfil.com/2uxWWw">https://imgfil.com/2uxWWw</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Assimilchinosinesfuerzodescargar foundationsfluidmechanicsswyuanpdfdownloadstorrent stellar nsf to pst converter crack torrent Type 3.2 Font Editor Full. 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bacardi 2.0 Mp3 Download Enjoy Thama Tees Latest Hit Song.md
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Bacardi 2.0 Mp3: A Captivating Infectious Track by Goodguy Styles and Thama Tee</h1>
|
3 |
-
<p>If you are looking for a new song to spice up your playlist, you might want to check out <strong>Bacardi 2.0</strong>, a captivating infectious track by Goodguy Styles and Thama Tee. This song is a fusion of <strong>Amapiano</strong>, a popular South African genre of house music, and <strong>Bacardi</strong>, a subgenre of house music that originated in Botswana. In this article, we will tell you what Bacardi 2.0 is, why you should listen to it, and how to download it as an mp3 file from various sources.</p>
|
4 |
-
<h2>What is Bacardi 2.0?</h2>
|
5 |
-
<p>Bacardi 2.0 is a song by Goodguy Styles and Thama Tee, two talented South African musicians who specialize in Amapiano music. Amapiano is a genre of house music that combines elements of jazz, kwaito, lounge, and deep house. It is characterized by smooth piano melodies, basslines, percussions, and vocals.</p>
|
6 |
-
<h2>download bacardi 2.0 mp3</h2><br /><p><b><b>Download Zip</b> ––– <a href="https://urlin.us/2uT1nV">https://urlin.us/2uT1nV</a></b></p><br /><br />
|
7 |
-
<p>Bacardi 2.0 is a remix of an older song called <strong>Bacardi</strong>, which was released by Goodguy Styles in 2019. Bacardi is a subgenre of house music that originated in Botswana in the early 2000s. It is influenced by kwaito, disco, and electro music. It is named after the rum brand <strong>Bacardi</strong>, which was popular among the youth at the time.</p>
|
8 |
-
<p>Bacardi 2.0 combines the best features of both genres, creating a unique sound that appeals to both local and international audiences. The song has a catchy melody, an uplifting mood, and a cultural significance that reflects the diversity and creativity of South African music.</p>
|
9 |
-
<h2>Why You Should Listen to Bacardi 2.0?</h2>
|
10 |
-
<p>There are many reasons why you should listen to Bacardi 2.0, but here are some of the most compelling ones:</p>
|
11 |
-
<ul> <li>Bacardi 2.0 is a <strong>fun and upbeat</strong> song that can make you feel good and energized. It is perfect for parties, workouts, road trips, or any occasion that requires some music to boost your mood.</li>
|
12 |
-
<li>Bacardi 2.0 is a <strong>fresh and original</strong> song that showcases the talent and innovation of South African artists. It is a rare example of a successful fusion of two different genres of house music, creating a new sound that is both familiar and novel.</li>
|
13 |
-
<li>Bacardi 2.0 is a <strong>culturally relevant</strong> song that celebrates the diversity and richness of South African music. It pays homage to the origins of Bacardi music in Botswana, while also incorporating the contemporary influences of Amapiano music in South Africa. It is a song that reflects the history and identity of the people who created it.</li>
|
14 |
-
<h2>How to Download Bacardi 2.0 Mp3?</h2>
|
15 |
-
<p>If you are convinced that Bacardi 2.0 is a song worth listening to, you might be wondering how to download it as an mp3 file. Fortunately, there are several options available for you to choose from, depending on your preferences and convenience. Here are some of the most popular sources for downloading Bacardi 2.0 mp3:</p>
|
16 |
-
<h3>Download Bacardi 2.0 Mp3 from Bamoza</h3>
|
17 |
-
<p>Bamoza is a website that offers free mp3 downloads of South African music, especially Amapiano, Afro House, Gqom, and Kwaito genres. It is one of the best places to find the latest and hottest songs from South African artists, including Bacardi 2.0 by Goodguy Styles and Thama Tee.</p>
|
18 |
-
<p>To download Bacardi 2.0 mp3 from Bamoza, you need to follow these steps:</p>
|
19 |
-
<ol>
|
20 |
-
<li>Go to <a href="">Bamoza.com</a></li>
|
21 |
-
<li>Type "Bacardi 2.0" in the search box and hit enter</li>
|
22 |
-
<li>Click on the link that says "Goodguy Styles & Thama Tee – Bacardi 2.0"</li>
|
23 |
-
<li>Scroll down to the bottom of the page and click on the button that says "Download Mp3"</li>
|
24 |
-
<li>Wait for the download to start and save the file to your device</li>
|
25 |
-
</ol>
|
26 |
-
<h4>Pros and Cons of Downloading Bacardi 2.0 Mp3 from Bamoza</h4>
|
27 |
-
<p>Here are some of the advantages and disadvantages of using Bamoza to download Bacardi 2.0 mp3:</p>
|
28 |
-
<p>download bacardi 2.0 mp3 by thama tee<br />
|
29 |
-
download bacardi 2.0 mp3 fakaza<br />
|
30 |
-
download bacardi 2.0 mp3 free<br />
|
31 |
-
download bacardi 2.0 mp3 song<br />
|
32 |
-
download bacardi 2.0 mp3 amapiano<br />
|
33 |
-
download bacardi 2.0 mp3 audio<br />
|
34 |
-
download bacardi 2.0 mp3 music<br />
|
35 |
-
download bacardi 2.0 mp3 online<br />
|
36 |
-
download bacardi 2.0 mp3 320kbps<br />
|
37 |
-
download bacardi 2.0 mp3 hiphopza<br />
|
38 |
-
download bacardi 2.0 mp3 zamusic<br />
|
39 |
-
download bacardi 2.0 mp3 sahiphop<br />
|
40 |
-
download bacardi 2.0 mp3 tubidy<br />
|
41 |
-
download bacardi 2.0 mp3 waploaded<br />
|
42 |
-
download bacardi 2.0 mp3 datafilehost<br />
|
43 |
-
download bacardi 2.0 mp3 naijaloaded<br />
|
44 |
-
download bacardi 2.0 mp3 tooxclusive<br />
|
45 |
-
download bacardi 2.0 mp3 justnaija<br />
|
46 |
-
download bacardi 2.0 mp3 afrohouseking<br />
|
47 |
-
download bacardi 2.0 mp3 hitvibes<br />
|
48 |
-
download bacardi 2.0 mp3 flexyjam<br />
|
49 |
-
download bacardi 2.0 mp3 bamoza<br />
|
50 |
-
download bacardi 2.0 mp3 mdundo<br />
|
51 |
-
download bacardi 2.0 mp3 beatzjam<br />
|
52 |
-
download bacardi 2.0 mp3 fakazaking<br />
|
53 |
-
download bacardi 2.0 mp3 zulujam<br />
|
54 |
-
download bacardi 2.0 mp3 kwaitoza<br />
|
55 |
-
download bacardi 2.0 mp3 zatunes<br />
|
56 |
-
download bacardi 2.0 mp3 masstamilan<br />
|
57 |
-
download bacardi 2.0 mp3 pagalworld<br />
|
58 |
-
download bacardi 2.0 mp3 mr jatt<br />
|
59 |
-
download bacardi 2.0 mp3 djpunjab<br />
|
60 |
-
download bacardi 2.0 mp3 raag.fm<br />
|
61 |
-
download bacardi 2.0 mp3 gaana.com<br />
|
62 |
-
download bacardi 2.0 mp3 hungama.com<br />
|
63 |
-
download bacardi 2.0 mp3 wynk.in<br />
|
64 |
-
download bacardi 2.0 mp3 jiosaavn.com<br />
|
65 |
-
download bacardi 2.0 mp3 spotify.com<br />
|
66 |
-
download bacardi 2.0 mp3 apple music<br />
|
67 |
-
download bacardi 2.0 mp3 amazon music<br />
|
68 |
-
download bacardi 2.0 mp3 youtube music<br />
|
69 |
-
download bacardi 2.0 mp3 soundcloud.com<br />
|
70 |
-
download bacardi 2.0 mp3 audiomack.com<br />
|
71 |
-
download bacardi 2.0 mp3 bandcamp.com<br />
|
72 |
-
download bacardi 2.0 mp3 deezer.com<br />
|
73 |
-
download bacardi 2.0 mp3 tidal.com<br />
|
74 |
-
how to download bacardi 2.0 mp3 <br />
|
75 |
-
where to download bacardi 2.0 mp3 <br />
|
76 |
-
best site to download bacardi 2.0 mp3 </p>
|
77 |
-
| Pros | Cons | | --- | --- | | - It is free and easy to use | - It may not have the best quality or bitrate | | - It has a large collection of South African music | - It may have pop-up ads or malware | | - It updates regularly with new songs | - It may not have the official or legal permission from the artists | <h3>Download Bacardi 2.0 Mp3 from YouTube</h3>
|
78 |
-
<p>YouTube is a video-sharing platform that also allows users to download mp3 files of videos. It is one of the most popular and widely used sources for downloading music, including Bacardi 2.0 by Goodguy Styles and Thama Tee.</p>
|
79 |
-
<p>To download Bacardi 2.0 mp3 from YouTube, you need to follow these steps:</p>
|
80 |
-
<ol>
|
81 |
-
<li>Go to <a href="">YouTube.com</a></li>
|
82 |
-
<li>Type "Bacardi 2.0" in the search box and hit enter</li>
|
83 |
-
<li>Click on the video that says "Goodguy Styles & Thama Tee - Bacardi 2.0 (Official Music Video)"</li>
|
84 |
-
<li>Copy the URL of the video from the address bar</li>
|
85 |
-
<li>Go to a website that converts YouTube videos to mp3 files, such as <a href="">ytmp3.cc</a></li>
|
86 |
-
<li>Paste the URL of the video in the box and click on "Convert"</li>
|
87 |
-
<li>Wait for the conversion to finish and click on "Download"</li>
|
88 |
-
<li>Save the file to your device</li>
|
89 |
-
</ol>
|
90 |
-
<h4>Pros and Cons of Downloading Bacardi 2.0 Mp3 from YouTube</h4>
|
91 |
-
<p>Here are some of the advantages and disadvantages of using YouTube to download Bacardi 2.0 mp3:</p>
|
92 |
-
| Pros | Cons | | --- | --- | | - It is free and easy to use | - It may not have the best quality or bitrate | | - It has a large collection of music videos | - It may have ads or interruptions | | - It has the official music video of Bacardi 2.0 | - It may not have the official or legal permission from the artists | <h3>Download Bacardi 2.0 Mp3 from Spotify</h3>
|
93 |
-
<p>Spotify is a music streaming service that also allows users to download mp3 files of songs. It is one of the most popular and widely used sources for listening to music, including Bacardi 2.0 by Goodguy Styles and Thama Tee.</p>
|
94 |
-
<p>To download Bacardi 2.0 mp3 from Spotify, you need to follow these steps:</p>
|
95 |
-
<ol>
|
96 |
-
<li>Go to <a href="">Spotify.com</a></li>
|
97 |
-
<li>Create an account or log in with your existing account</li>
|
98 |
-
<li>Type "Bacardi 2.0" in the search box and hit enter</li>
|
99 |
-
<li>Click on the song that says "Bacardi 2.0 - Goodguy Styles, Thama Tee"</li>
|
100 |
-
<li>Add the song to your library or playlist by clicking on the heart icon or the plus icon</li>
|
101 |
-
<li>Go to your library or playlist and find the song</li>
|
102 |
-
<li>Toggle on the "Download" switch next to the song</li>
|
103 |
-
<li>Wait for the download to finish and access the file from your device</li>
|
104 |
-
</ol>
|
105 |
-
<h4>Pros and Cons of Downloading Bacardi 2.0 Mp3 from Spotify</h4>
|
106 |
-
<p>Here are some of the advantages and disadvantages of using Spotify to download Bacardi 2.0 mp3:</p>
|
107 |
-
| Pros | Cons | | --- | --- | | - It has a high quality and bitrate | - It requires a premium subscription to download songs | | - It has a large collection of music genres | - It may not have all the songs available in your region | | - It has a user-friendly interface and features | - It may not have the official or legal permission from the artists | <h2>Conclusion</h2>
|
108 |
-
<p>Bacardi 2.0 is a captivating infectious track by Goodguy Styles and Thama Tee that you should definitely listen to. It is a fusion of Amapiano and Bacardi, two genres of house music that originated in South Africa and Botswana respectively. It has a catchy melody, an uplifting mood, and a cultural significance that reflects the diversity and creativity of South African music.</p>
|
109 |
-
<p>If you want to download Bacardi 2.0 mp3, you have several options to choose from, such as Bamoza, YouTube, and Spotify. Each option has its own pros and cons, so you need to weigh them carefully before deciding which one suits you best.</p>
|
110 |
-
<p>We hope this article has helped you learn more about Bacardi 2.0 and how to download it as an mp3 file. Now go ahead and enjoy this amazing song!</p>
|
111 |
-
<h2>FAQs</h2>
|
112 |
-
<p>Here are some of the frequently asked questions and their answers about downloading Bacardi 2.0 mp3:</p>
|
113 |
-
<ol>
|
114 |
-
<li><strong>What is the difference between Amapiano and Bacardi?</strong></li>
|
115 |
-
<p>Amapiano is a Amapiano is a genre of house music that combines elements of jazz, kwaito, lounge, and deep house. It is characterized by smooth piano melodies, basslines, percussions, and vocals. Bacardi is a subgenre of house music that originated in Botswana in the early 2000s. It is influenced by kwaito, disco, and electro music. It is named after the rum brand Bacardi, which was popular among the youth at the time.</p>
|
116 |
-
<li><strong>Who are Goodguy Styles and Thama Tee?</strong></li>
|
117 |
-
<p>Goodguy Styles and Thama Tee are two talented South African musicians who specialize in Amapiano music. Goodguy Styles is a producer, DJ, and singer who has been making music since 2015. He is known for his songs such as "Bacardi", "Sgubhu", and "Amapiano Anthem". Thama Tee is a vocalist, songwriter, and performer who has been collaborating with Goodguy Styles since 2019. He is known for his songs such as "Ngiyazifela", "Uthando", and "Bacardi 2.0".</p>
|
118 |
-
<li><strong>Is Bacardi 2.0 available on other platforms besides Bamoza, YouTube, and Spotify?</strong></li>
|
119 |
-
<p>Yes, Bacardi 2.0 is available on other platforms such as Apple Music, Deezer, SoundCloud, and Audiomack. You can also stream or download it from the official website of Goodguy Styles <a href="">goodguystyles.com</a>.</p>
|
120 |
-
<li><strong>Is Bacardi 2.0 legal to download?</strong></li>
|
121 |
-
<p>It depends on the source you use to download it. Some sources may have the official or legal permission from the artists to distribute their music, while others may not. You should always check the terms and conditions of the source before downloading any music. You should also respect the rights and interests of the artists and support them by buying their music or attending their shows.</p>
|
122 |
-
<li><strong>What are some other songs similar to Bacardi 2.0?</strong></li>
|
123 |
-
<p>If you like Bacardi 2.0, you might also like these songs:</p>
|
124 |
-
<ul>
|
125 |
-
<li>"Ke Star" by Focalistic and Davido</li>
|
126 |
-
<li>"John Wick" by De Mthuda and Ntokzin</li>
|
127 |
-
<li>"Umsebenzi Wethu" by Busta 929 and Mpura</li>
|
128 |
-
<li>"Savanna" by Lady Du and DBN Gogo</li>
|
129 |
-
<li>"Woza" by Mr JazziQ and Kabza De Small</li>
|
130 |
-
</ul></p> 197e85843d<br />
|
131 |
-
<br />
|
132 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Atlantis Odyssey Mod APK Terbaru Tips and Tricks for Beginners.md
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Atlantis Odyssey Mod APK Terbaru: A Fun and Relaxing Simulation Game</h1>
|
3 |
-
<p>Do you love simulation games that let you explore, build, and craft your own world? Do you want to experience a unique adventure in a mysterious island full of secrets and surprises? If you answered yes, then you might want to try Atlantis Odyssey, a new game from VIZOR APPS LTD. And if you want to make your gaming experience even more enjoyable, you might want to download Atlantis Odyssey Mod APK Terbaru, a modified version of the game that gives you unlimited money and resources. In this article, we will tell you everything you need to know about this game and its modded version, including what it is, what it offers, how to get it, and how to use it. So, let's get started!</p>
|
4 |
-
<h2>What is Atlantis Odyssey?</h2>
|
5 |
-
<p>Atlantis Odyssey is a simulation game that takes you to a mysterious island where you can discover the secrets of an ancient civilization. You will meet Nicole and Robert, two explorers who are looking for clues about the lost city of Atlantis. You will help them build a camp, explore the island, collect resources, craft items, and interact with other characters. You will also encounter various challenges and quests that will test your skills and creativity.</p>
|
6 |
-
<h2>atlantis odyssey mod apk terbaru</h2><br /><p><b><b>DOWNLOAD</b> ► <a href="https://jinyurl.com/2uNOVA">https://jinyurl.com/2uNOVA</a></b></p><br /><br />
|
7 |
-
<h3>The story and gameplay of Atlantis Odyssey</h3>
|
8 |
-
<p>The game starts with Nicole and Robert arriving at the island after their plane crashes. They find out that the island is full of ancient ruins and artifacts that belong to the Atlanteans, a legendary civilization that disappeared thousands of years ago. They decide to investigate the island and find out more about its history and secrets. Along the way, they will meet other characters who will help them or hinder them in their quest.</p>
|
9 |
-
<p>The gameplay of Atlantis Odyssey is similar to other simulation games like Farmville or Hay Day. You will have to manage your camp, which consists of various buildings and facilities that you can upgrade and customize. You will also have to collect resources like wood, stone, food, water, and energy by harvesting crops, mining rocks, fishing, hunting, etc. You will use these resources to craft items like tools, weapons, clothes, furniture, etc. You will also have to complete tasks and quests that will reward you with coins, gems, experience points, and other items. You can use these rewards to unlock new areas of the island, new buildings, new recipes, new characters, etc.</p>
|
10 |
-
<h3>The features and benefits of Atlantis Odyssey</h3>
|
11 |
-
<p>Atlantis Odyssey is a fun and relaxing game that offers many features and benefits for its players. Some of them are:</p>
|
12 |
-
<ul>
|
13 |
-
<li>A beautiful and colorful graphics that create a realistic and immersive atmosphere.</li>
|
14 |
-
<li>A captivating and engaging story that will keep you interested and curious.</li>
|
15 |
-
<li>A variety of characters with different personalities and backgrounds that will add more depth and humor to the game.</li>
|
16 |
-
<li>A huge and diverse island with different biomes and landscapes that you can explore and discover.</li>
|
17 |
-
<li>A lot of activities and mini-games that you can enjoy and challenge yourself with.</li>
|
18 |
-
<li>A social aspect that allows you to interact with other players online, visit their camps, trade with them, chat with them, etc.</li>
|
19 |
-
<li>A regular update that adds new content and features to the game.</li>
|
20 |
-
</ul>
|
21 |
-
<h2>What is Atlantis Odyssey Mod APK Terbaru?</h2>
|
22 |
-
<p>Atlantis Odyssey Mod APK Terbaru is a modified version of the original game that gives you some extra advantages and features that are not available in the official version. It is created by third-party developers who modify the game's code and data to alter its functionality. It is also known as a <hack or a cheat that allows you to bypass the limitations and restrictions of the original game.</h3>
|
23 |
-
<h3>The difference between the original and the modded version</h3>
|
24 |
-
<p>The main difference between the original and the modded version of Atlantis Odyssey is that the modded version gives you unlimited money and resources. This means that you can buy anything you want, upgrade anything you want, craft anything you want, and complete any task or quest you want without worrying about running out of coins, gems, energy, or other resources. You can also access all the features and content of the game without waiting for them to be unlocked or available. You can enjoy the game at your own pace and style without any limitations or frustrations.</p>
|
25 |
-
<h3>The advantages and disadvantages of using the modded version</h3>
|
26 |
-
<p>Using the modded version of Atlantis Odyssey has its pros and cons. Some of the advantages are:</p>
|
27 |
-
<ul>
|
28 |
-
<li>You can have more fun and satisfaction playing the game with unlimited money and resources.</li>
|
29 |
-
<li>You can save time and effort by skipping the tedious and repetitive tasks of collecting and managing resources.</li>
|
30 |
-
<li>You can explore and discover more aspects and secrets of the game without any restrictions.</li>
|
31 |
-
<li>You can customize and personalize your camp and your character to your liking.</li>
|
32 |
-
<li>You can impress and compete with other players online with your achievements and creations.</li>
|
33 |
-
</ul>
|
34 |
-
<p>Some of the disadvantages are:</p>
|
35 |
-
<ul>
|
36 |
-
<li>You might lose the challenge and excitement of playing the game with limited money and resources.</li>
|
37 |
-
<li>You might encounter some bugs, errors, or crashes while using the modded version.</li>
|
38 |
-
<li>You might risk your device's security and privacy by downloading and installing an unofficial and unverified version of the game.</li>
|
39 |
-
<li>You might violate the terms and conditions of the game's developer and publisher by using an unauthorized and illegal version of the game.</li>
|
40 |
-
<li>You might get banned or suspended from the game's online services if you are detected or reported by other players or moderators.</li>
|
41 |
-
</ul>
|
42 |
-
<h2>How to download and install Atlantis Odyssey Mod APK Terbaru?</h2>
|
43 |
-
<p>If you want to try Atlantis Odyssey Mod APK Terbaru, you will need to download and install it on your Android device. Here are the steps to do so:</p>
|
44 |
-
<p>atlantis odyssey unlimited money mod apk<br />
|
45 |
-
download atlantis odyssey mod apk latest version<br />
|
46 |
-
atlantis odyssey hack mod apk free download<br />
|
47 |
-
atlantis odyssey mod apk offline<br />
|
48 |
-
atlantis odyssey simulation game mod apk<br />
|
49 |
-
atlantis odyssey mod apk android 1<br />
|
50 |
-
atlantis odyssey mod apk no root<br />
|
51 |
-
atlantis odyssey mod apk unlimited gems<br />
|
52 |
-
atlantis odyssey mod apk rexdl<br />
|
53 |
-
atlantis odyssey mod apk revdl<br />
|
54 |
-
atlantis odyssey mod apk happymod<br />
|
55 |
-
atlantis odyssey mod apk 2023<br />
|
56 |
-
atlantis odyssey mod apk for ios<br />
|
57 |
-
atlantis odyssey mod apk obb<br />
|
58 |
-
atlantis odyssey mod apk vip<br />
|
59 |
-
atlantis odyssey mod apk unlimited energy<br />
|
60 |
-
atlantis odyssey mod apk pure<br />
|
61 |
-
atlantis odyssey mod apk platinmods<br />
|
62 |
-
atlantis odyssey mod apk an1<br />
|
63 |
-
atlantis odyssey mod apk apkpure<br />
|
64 |
-
atlantis odyssey mod apk apkmody<br />
|
65 |
-
atlantis odyssey mod apk apknite<br />
|
66 |
-
atlantis odyssey mod apk apkmirror<br />
|
67 |
-
atlantis odyssey mod apk apksfree<br />
|
68 |
-
atlantis odyssey mod apk apktada<br />
|
69 |
-
atlantis odyssey mod apk apksaurus<br />
|
70 |
-
atlantis odyssey mod apk apksfull<br />
|
71 |
-
atlantis odyssey mod apk apksmodded<br />
|
72 |
-
atlantis odyssey mod apk apksmash<br />
|
73 |
-
atlantis odyssey mod apk apksparadise<br />
|
74 |
-
atlantis odyssey mod apk apkspeedy<br />
|
75 |
-
atlantis odyssey mod apk apksupermarket<br />
|
76 |
-
atlantis odyssey mod apk apksweet<br />
|
77 |
-
atlantis odyssey mod apk apktwist<br />
|
78 |
-
atlantis odyssey mod apk apkturbo<br />
|
79 |
-
atlantis odyssey mod apk apkvilla<br />
|
80 |
-
atlantis odyssey mod apk apkwonderland<br />
|
81 |
-
atlantis odyssey mod apk apkworlds<br />
|
82 |
-
atlantis odyssey mod apk apk</p>
|
83 |
-
<h3>The steps to download and install the modded version</h3>
|
84 |
-
<ol>
|
85 |
-
<li>Go to a reliable and trusted website that offers Atlantis Odyssey Mod APK Terbaru for free. You can search for it on Google or use one of these links: .</li>
|
86 |
-
<li>Download the modded version file to your device. Make sure you have enough storage space and a stable internet connection.</li>
|
87 |
-
<li>Enable the installation of apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources and toggling it on.</li>
|
88 |
-
<li>Locate the downloaded file on your device's file manager and tap on it to start the installation process. Follow the instructions on the screen to complete the installation.</li>
|
89 |
-
<li>Launch the game from your app drawer or home screen and enjoy!</li>
|
90 |
-
</ol>
|
91 |
-
<h3>The tips and tricks to enjoy the modded version</h3>
|
92 |
-
<p>Here are some tips and tricks to help you enjoy Atlantis Odyssey Mod APK Terbaru:</p>
|
93 |
-
<ul>
|
94 |
-
<li>Use your unlimited money and resources wisely. Don't spend them all at once or you might get bored quickly. Try to balance your spending and saving habits.</li>
|
95 |
-
<li>Explore different areas of the island and discover new things. Don't just stick to one place or activity. Try to complete different tasks and quests that will reward you with more items and information.</li>
|
96 |
-
<li>Interact with other characters and learn more about their stories and personalities. Don't ignore them or treat them badly. They might help you or give you some hints or secrets.</li>
|
97 |
-
<li>Play with other players online and have fun. Don't be rude or mean to them. Be friendly and cooperative. You can trade with them, chat with them, visit their camps, etc.</li>
|
98 |
-
<li>Be careful when using the modded version online. Don't brag about it or show it off to other players. They might report you or get jealous of you. You might also get detected by the game's security system and get banned or suspended.</li>
|
99 |
-
</ul>
|
100 |
-
<h2>Conclusion</h2>
|
101 |
-
<p>Atlantis Odyssey is a fun and relaxing simulation game that lets you explore, build, and craft your own world in a mysterious island full of secrets and surprises. Atlantis Odyssey Mod APK Terbaru is a modified version of the game that gives you unlimited money and resources that can enhance your gaming experience. However, it also has some risks and drawbacks that you should be aware of. If you want to try it, you will need to download and install it on your device following the steps we provided. You will also need to follow some tips and tricks to enjoy it without any problems. We hope this article helped you learn more about this game and its modded version. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading and happy gaming!</p>
|
102 |
-
<h3>FAQs</h3>
|
103 |
-
<p>Here are some frequently asked questions about Atlantis Odyssey Mod APK Terbaru:</p>
|
104 |
-
<ol>
|
105 |
-
<li>What is the latest version of Atlantis Odyssey Mod APK Terbaru?</li>
|
106 |
-
<p>The latest version of Atlantis Odyssey Mod APK Terbaru is 1.0.1, which was released on June 15, 2023. It has a file size of 132 MB and requires Android 5.0 or higher to run.</p>
|
107 |
-
<li>Is Atlantis Odyssey Mod APK Terbaru safe to use?</li>
|
108 |
-
<p>Atlantis Odyssey Mod APK Terbaru is not an official or verified version of the game, so it might not be safe to use. It might contain viruses, malware, or spyware that can harm your device or steal your data. It might also violate the game's terms and conditions and get you banned or suspended from the game's online services. Therefore, use it at your own risk and discretion.</p>
|
109 |
-
<li>Can I play Atlantis Odyssey Mod APK Terbaru offline?</li>
|
110 |
-
<p>Atlantis Odyssey Mod APK Terbaru can be played offline, but you will not be able to access some of the features and content that require an internet connection, such as interacting with other players online, visiting their camps, trading with them, etc.</p>
|
111 |
-
<li>Can I update Atlantis Odyssey Mod APK Terbaru?</li>
|
112 |
-
<p>Atlantis Odyssey Mod APK Terbaru can be updated by downloading and installing the latest version from the same website or source that you got it from. However, updating the modded version might cause some issues or errors with the game's functionality or compatibility. You might also lose your progress or data if you update the modded version without backing it up first.</p>
|
113 |
-
<li>Can I use Atlantis Odyssey Mod APK Terbaru on other devices or platforms?</li>
|
114 |
-
<p>Atlantis Odyssey Mod APK Terbaru is only compatible with Android devices. It cannot be used on other devices or platforms, such as iOS, Windows, Mac, etc.</p>
|
115 |
-
</ol></p> 401be4b1e0<br />
|
116 |
-
<br />
|
117 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Skin FR Legends and Enjoy the Thrill of Front-Engine Rear-Wheel Drive Racing.md
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Skin FR Legends and Make Your Car Look Awesome</h1>
|
3 |
-
<p>If you are a fan of drifting games, you might have heard of <strong>FR Legends</strong>, a popular mobile game that lets you experience the thrill of sliding sideways on various tracks. But did you know that you can also customize your car with different <strong>skin FR Legends</strong> that change its appearance? In this article, we will show you how to download skin FR Legends and apply it to your car in a few simple steps. We will also give you some tips and tricks for using skin FR Legends effectively. Let's get started!</p>
|
4 |
-
<h2>What is FR Legends and Why You Should Play It</h2>
|
5 |
-
<h3>FR Legends is a fun and realistic drifting game for mobile devices</h3>
|
6 |
-
<p>FR Legends is a game that simulates the sport of drifting, which involves driving a car at high speed and making it slide sideways on curves. The game features realistic physics, graphics, and sound effects that make you feel like you are really behind the wheel. You can choose from different cars, tracks, modes, and settings to suit your preferences. You can also compete with other players online or offline, or watch replays of your best runs.</p>
|
7 |
-
<h2>download skin fr legends</h2><br /><p><b><b>Download Zip</b> ……… <a href="https://jinyurl.com/2uNNXS">https://jinyurl.com/2uNNXS</a></b></p><br /><br />
|
8 |
-
<h3>You can customize your car with various parts, decals, and liveries</h3>
|
9 |
-
<p>One of the best features of FR Legends is that you can modify your car in many ways. You can upgrade your engine, suspension, tires, brakes, and more to improve your performance. You can also change the color, shape, and size of your wheels, spoilers, bumpers, mirrors, exhausts, and more to change your style. You can also add decals and stickers to decorate your car with logos, patterns, or words. And finally, you can change the livery of your car, which is the paint scheme that covers its body.</p>
|
10 |
-
<h2>What is Skin FR Legends and How to Get It</h2>
|
11 |
-
<h3>Skin FR Legends is a term for custom liveries that change the appearance of your car</h3>
|
12 |
-
<p>A livery is a design that covers the body of your car. It usually consists of colors, shapes, images, or text that make your car look unique. In FR Legends, you can choose from several preset liveries that are available in the game. However, if you want to have more options and <p>creativity, you can use <strong>skin FR Legends</strong>, which are custom liveries that are created by other players or yourself. Skin FR Legends can change the appearance of your car completely, making it look like a different model, brand, or theme. For example, you can make your car look like a Ferrari, a Lamborghini, a BMW, or a Nissan. You can also make your car look like it belongs to a famous racing team, movie franchise, anime series, or video game. The possibilities are endless!</p>
|
13 |
-
<h3>You can create your own skin or download one from the internet using codes</h3>
|
14 |
-
<p>To create your own skin FR Legends, you need to use a special app called <strong>FR Legends Livery Editor</strong>, which is available for Android and iOS devices. This app allows you to design your own livery using various tools and features. You can draw, paint, erase, fill, rotate, scale, and move different elements on your car. You can also import images from your gallery or camera and use them as stickers or backgrounds. Once you are done with your creation, you can save it and export it as a code that you can use in FR Legends.</p>
|
15 |
-
<p>To download skin FR Legends from the internet, you need to find a source that provides codes for different liveries. There are many websites, forums, social media pages, and YouTube videos that offer skin FR Legends for free or for a fee. You can browse through different categories and themes and choose the ones that you like. You can also read reviews and ratings from other users and see how the skin looks in action. Once you have found the skin that you want, you need to copy its code and use it in FR Legends.</p>
|
16 |
-
<h3>Some popular themes for skin FR Legends are anime, fast and furious, garasi drift, and more</h3>
|
17 |
-
<p>There are many types of skin FR Legends that you can choose from depending on your taste and preference. Some of the most popular themes are:</p>
|
18 |
-
<ul>
|
19 |
-
<li><strong>Anime</strong>: If you are a fan of Japanese animation, you can find skin FR Legends that feature characters, logos, or scenes from your favorite anime shows or movies. Some examples are Naruto, One Piece, Dragon Ball Z, Attack on Titan, Tokyo Ghoul, and more.</li>
|
20 |
-
<li><strong>Fast and Furious</strong>: If you love the action-packed movie series that revolves around cars and racing, you can find skin FR Legends that resemble the vehicles used by the main characters or villains. Some examples are Dom's Charger, Brian's Skyline, Han's RX-7, Shaw's Flip Car, and more.</li>
|
21 |
-
<li><strong>Garasi Drift</strong>: If you admire the Indonesian drifting community that is known for its creativity and skill, you can find skin FR Legends that pay tribute to their style and culture. Some examples are Garasi Drift 86, Garasi Drift S15, Garasi Drift E30, Garasi Drift AE86 Panda Trueno, and more.</li>
|
22 |
-
<li><strong>And more</strong>: There are many other themes that you can explore such as sports teams, celebrities, brands, countries, memes, cartoons, games, and more. You can also mix and match different elements from different themes to create your own unique skin FR Legends.</li>
|
23 |
-
</ul>
|
24 |
-
<h2>How to Apply Skin FR Legends to Your Car</h2>
|
25 |
-
<h3>You need to copy the body code and the window code of the skin you want</h3>
|
26 |
-
<p>To apply skin FR Legends to your car in FR Legends, you need to have two codes: the body code and the window code. The body code is the code that determines the livery of the main part of your car. The window code is the code that determines the livery of the windows of your car. You need to copy both codes from the source where you got the skin.</p>
|
27 |
-
<p>download skin fr legends garasi drift<br />
|
28 |
-
download skin fr legends nissan silvia s15<br />
|
29 |
-
download skin fr legends mclaren senna<br />
|
30 |
-
download skin fr legends lamborghini supreme<br />
|
31 |
-
download skin fr legends han's mazda rx7 veilside<br />
|
32 |
-
download skin fr legends han's nissan silvia s15 monalisa<br />
|
33 |
-
download skin fr legends nissan skyline r34<br />
|
34 |
-
download skin fr legends nissan gtr<br />
|
35 |
-
download skin fr legends nissan 350z<br />
|
36 |
-
download skin fr legends mustang hoonigan<br />
|
37 |
-
download skin fr legends tokyo drift<br />
|
38 |
-
download skin fr legends fast and furious 2<br />
|
39 |
-
download skin fr legends fast and furious 1<br />
|
40 |
-
download skin fr legends most wanted<br />
|
41 |
-
download skin fr legends formula drift<br />
|
42 |
-
download skin fr legends livery mod apk<br />
|
43 |
-
download skin fr legends livery studio app<br />
|
44 |
-
download skin fr legends kode terbaru<br />
|
45 |
-
download skin fr legends custom livery<br />
|
46 |
-
download skin fr legends android<br />
|
47 |
-
download skin fr legends ios<br />
|
48 |
-
download skin fr legends pc<br />
|
49 |
-
download skin fr legends online<br />
|
50 |
-
download skin fr legends offline<br />
|
51 |
-
download skin fr legends free<br />
|
52 |
-
download skin fr legends premium<br />
|
53 |
-
download skin fr legends pro<br />
|
54 |
-
download skin fr legends modded<br />
|
55 |
-
download skin fr legends unlimited money<br />
|
56 |
-
download skin fr legends unlocked cars<br />
|
57 |
-
download skin fr legends latest version<br />
|
58 |
-
download skin fr legends update 2023<br />
|
59 |
-
download skin fr legends tutorial<br />
|
60 |
-
download skin fr legends guide<br />
|
61 |
-
download skin fr legends tips and tricks<br />
|
62 |
-
download skin fr legends best livery<br />
|
63 |
-
download skin fr legends cool livery<br />
|
64 |
-
download skin fr legends awesome livery<br />
|
65 |
-
download skin fr legends realistic livery<br />
|
66 |
-
download skin fr legends anime livery<br />
|
67 |
-
download skin fr legends supreme livery<br />
|
68 |
-
download skin fr legends gopro livery<br />
|
69 |
-
download skin fr legends based gt86 livery<br />
|
70 |
-
download skin fr legends front engine rear wheel drive livery<br />
|
71 |
-
download skin fr legends drifting game livery<br />
|
72 |
-
download skin fr legends racing game livery</p>
|
73 |
-
<h3>You need to go to the garage menu and tap on the livery button</h3>
|
74 |
-
<p>Once you have copied the codes of the skin FR Legends that you want to use, you need to go to the garage menu in FR Legends and tap on the livery button. This will open a new screen where you can see your current livery and two boxes for entering codes. You can also see an eye icon and a share icon on the top right corner of the screen.</p>
|
75 |
-
<h3>You need to paste the codes in the corresponding boxes and save your changes</h3>
|
76 |
-
<p>Next, you need to paste the codes that you copied in the corresponding boxes. The body code goes in the upper box and the window code goes in the lower box. You can use the paste button or long press on the box to paste the code. After you have entered both codes, you need to tap on the save button on the bottom right corner of the screen. This will apply the skin FR Legends to your car and return you to the garage menu.</p>
|
77 |
-
<h2>Some Tips and Tricks for Using Skin FR Legends</h2>
|
78 |
-
<h3>You can preview the skin before applying it by tapping on the eye icon</h3>
|
79 |
-
<p>If you want to see how the skin FR Legends looks on your car before applying it, you can tap on the eye icon on the top right corner of the livery screen. This will show you a preview of your car with the skin FR Legends that you entered. You can rotate, zoom, and move your car to see it from different angles. You can also change the background color by tapping on the color wheel icon. If you like what you see, you can tap on the save button to apply it. If not, you can tap on the back button to return to the livery screen and try another skin FR Legends.</p>
|
80 |
-
<h3>You can share your skin with others by tapping on the share icon and copying the codes</h3>
|
81 |
-
<p>If you want to share your skin FR Legends with others, you can tap on the share icon on the top right corner of the livery screen. This will show you two codes: one for the body and one for the window. You can copy these codes by tapping on them or using the copy button. You can then send these codes to your friends or post them online for others to use. You can also use these codes to backup your skin FR Legends in case you want to use it again later.</p>
|
82 |
-
<h3>You can find more skin FR Legends on websites, forums, social media, and YouTube videos</h3>
|
83 |
-
<p>If you want to find more skin FR Legends that suit your taste and style, you can search for them online using various sources. There are many websites that offer skin FR Legends for free or for a fee. Some examples are <a href="">FR Legends Hub</a>, <a href="">FR Legends Mods</a>, <a href="">FR Legends Livery</a>, and more. You can also find skin FR Legends on forums such as <a href="">Reddit</a>, <a href="">Discord</a>, or <a href="">Facebook Groups</a>. You can also follow social media pages such as <a href="">Instagram</a>, <a href="">Twitter</a>, or <a href="">TikTok</a> that post skin FR Legends regularly. And finally, you can watch YouTube videos that showcase or teach how to make skin FR Legends such as <a href="">this one</a>, <a href="">this one</a>, or <a href="">this one</a>.</p>
|
84 |
-
<h2>Conclusion</h2>
|
85 |
-
<h3>Skin FR Legends is a great way to make your car look unique and cool in FR Legends</h3>
|
86 |
-
<p>In conclusion, skin FR Legends is a term for custom liveries that change the appearance of your car in FR Legends. You can download skin FR Legends from various sources online or create your own using codes. You can apply skin FR Legends easily by following the steps above. You can also preview, share, and backup your skin FR Legends using the livery menu in the game. Skin FR Legends can make your car look awesome and express your personality and style.</p>
|
87 |
-
<h3>You can download skin FR Legends from various sources or create your own using codes</h3>
|
88 |
-
<p>There are many ways to get skin FR Legends for your car in FR Legends. You can create your own skin using the FR Legends Livery Editor app, which allows you to design your own livery using various tools and features. You can also download skin FR Legends from websites, forums, social media, and YouTube videos that offer codes for different liveries. You can choose from different themes and categories such as anime, fast and furious, garasi drift, and more. You can also mix and match different elements from different themes to create your own unique skin FR Legends.</p>
|
89 |
-
<h3>You can apply skin FR Legends easily by following the steps above</h3>
|
90 |
-
<p>To apply skin FR Legends to your car in FR Legends, you need to have two codes: the body code and the window code. The body code is the code that determines the livery of the main part of your car. The window code is the code that determines the livery of the windows of your car. You need to copy both codes from the source where you got the skin. Then, you need to go to the garage menu in FR Legends and tap on the livery button. This will open a new screen where you can enter the codes in the corresponding boxes and save your changes. This will apply the skin FR Legends to your car and make it look awesome.</p>
|
91 |
-
<h2>FAQs</h2>
|
92 |
-
<h4>What are the best websites to download skin FR Legends?</h4>
|
93 |
-
<p>There are many websites that offer skin FR Legends for free or for a fee. Some of the best ones are:</p>
|
94 |
-
<ul>
|
95 |
-
<li><a href="">FR Legends Hub</a>: This website has a large collection of skin FR Legends for different cars and themes. You can browse by categories or search by keywords. You can also upload your own skin or request a custom one.</li>
|
96 |
-
<li><a href="">FR Legends Mods</a>: This website has a variety of mods and skins for FR Legends. You can find liveries, decals, wheels, engines, tracks, and more. You can also join their Discord server for more updates and support.</li>
|
97 |
-
<li><a href="">FR Legends Livery</a>: This website has a simple and easy-to-use interface for finding and downloading skin FR Legends. You can see the previews and codes of different liveries and copy them with one click.</li>
|
98 |
-
</ul>
|
99 |
-
<h4>How to make my own skin FR Legends?</h4>
|
100 |
-
<p>To make your own skin FR Legends, you need to use a special app called <strong>FR Legends Livery Editor</strong>, which is available for Android and iOS devices. This app allows you to design your own livery using various tools and features. You can draw, paint, erase, fill, rotate, scale, and move different elements on your car. You can also import images from your gallery or camera and use them as stickers or backgrounds. Once you are done with your creation, you can save it and export it as a code that you can use in FR Legends.</p>
|
101 |
-
<h4>How to remove skin FR Legends from my car?</h4>
|
102 |
-
<p>To remove skin FR Legends from your car in FR Legends, you need to go to the garage menu and tap on the livery button. This will open a new screen where you can see your current livery and two boxes for entering codes. To remove the skin FR Legends from your car, you need to delete the codes in both boxes and save your changes. This will restore your car to its original appearance.</p>
|
103 |
-
<h4>Can I use skin FR Legends in online mode?</h4>
|
104 |
-
<p>Yes, you can use skin FR Legends in online mode in FR Legends. However, you need to be aware that other players may not see your skin as you do. This is because they may not have the same images or fonts that you used for your skin on their device. Therefore, they may see a different or distorted version of your skin or no skin at all. To avoid this problem, you should use simple and common images or fonts for your skin or share your codes with other players before playing online.</p>
|
105 |
-
<h4>Are there any risks or disadvantages of using skin FR Legends?</h4>
|
106 |
-
<p>Using skin FR Legends is generally safe and fun, but there are some potential risks or disadvantages that you should be aware of. These include:</p>
|
107 |
-
<ul>
|
108 |
-
<li><strong>Legal issues</strong>: Some skin FR Legends may contain copyrighted images or logos that belong to other companies or entities. This may violate their intellectual property rights and cause legal problems for you or the source of the skin. You should always check the license and terms of use of the skin before downloading or using it.</li>
|
109 |
-
<li><strong>Technical issues</strong>: Some skin FR Legends may contain errors or bugs that affect the performance or functionality of your game. This may cause crashes, glitches, lag, or other problems that ruin your gaming experience. You should always backup your game data before applying any skin and test it for any issues.</li>
|
110 |
-
<li><strong>Ethical issues</strong>: Some skin FR Legends may contain inappropriate or offensive images or text that may hurt or offend other players or viewers. This may cause negative reactions, complaints, or reports that damage your reputation or account. You should always respect the rules and guidelines of the game and the community and avoid using any skin that may cause harm or trouble.</li>
|
111 |
-
</ul>
|
112 |
-
<p>These are some of the risks or disadvantages of using skin FR Legends. You should always be careful and responsible when using skin FR Legends and enjoy them in a safe and respectful manner.</p> 401be4b1e0<br />
|
113 |
-
<br />
|
114 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/app.py
DELETED
@@ -1,1677 +0,0 @@
|
|
1 |
-
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
import gradio as gr
|
16 |
-
from env import BASE_MODEL_NAME, LORA_WEIGHTS_PATH, PROMPTS
|
17 |
-
|
18 |
-
examples = [
|
19 |
-
[
|
20 |
-
PROMPTS,
|
21 |
-
'low quality',
|
22 |
-
7.5,
|
23 |
-
512,
|
24 |
-
512,
|
25 |
-
25,
|
26 |
-
"DPMSolver"
|
27 |
-
],
|
28 |
-
]
|
29 |
-
import inspect
|
30 |
-
import os
|
31 |
-
import random
|
32 |
-
import re
|
33 |
-
import time
|
34 |
-
from typing import Callable, List, Optional, Union
|
35 |
-
|
36 |
-
import numpy as np
|
37 |
-
import paddle
|
38 |
-
import PIL
|
39 |
-
import PIL.Image
|
40 |
-
from packaging import version
|
41 |
-
|
42 |
-
from paddlenlp.transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
43 |
-
|
44 |
-
from ppdiffusers.configuration_utils import FrozenDict
|
45 |
-
from ppdiffusers.models import AutoencoderKL, UNet2DConditionModel
|
46 |
-
from ppdiffusers.pipeline_utils import DiffusionPipeline
|
47 |
-
from ppdiffusers.schedulers import (
|
48 |
-
DDIMScheduler,
|
49 |
-
DPMSolverMultistepScheduler,
|
50 |
-
EulerAncestralDiscreteScheduler,
|
51 |
-
EulerDiscreteScheduler,
|
52 |
-
LMSDiscreteScheduler,
|
53 |
-
PNDMScheduler,
|
54 |
-
HeunDiscreteScheduler,
|
55 |
-
KDPM2AncestralDiscreteScheduler,
|
56 |
-
KDPM2DiscreteScheduler,
|
57 |
-
|
58 |
-
)
|
59 |
-
from ppdiffusers.utils import PIL_INTERPOLATION, deprecate, logging
|
60 |
-
from ppdiffusers.utils.testing_utils import load_image
|
61 |
-
from ppdiffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
62 |
-
from ppdiffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
63 |
-
|
64 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
65 |
-
|
66 |
-
|
67 |
-
def save_all(images, FORMAT="jpg", OUTDIR="./outputs/"):
|
68 |
-
if not isinstance(images, (list, tuple)):
|
69 |
-
images = [images]
|
70 |
-
for image in images:
|
71 |
-
PRECISION = "fp32"
|
72 |
-
argument = image.argument
|
73 |
-
os.makedirs(OUTDIR, exist_ok=True)
|
74 |
-
epoch_time = argument["epoch_time"]
|
75 |
-
PROMPT = argument["prompt"]
|
76 |
-
NEGPROMPT = argument["negative_prompt"]
|
77 |
-
HEIGHT = argument["height"]
|
78 |
-
WIDTH = argument["width"]
|
79 |
-
SEED = argument["seed"]
|
80 |
-
STRENGTH = argument.get("strength", 1)
|
81 |
-
INFERENCE_STEPS = argument["num_inference_steps"]
|
82 |
-
GUIDANCE_SCALE = argument["guidance_scale"]
|
83 |
-
|
84 |
-
filename = f"{str(epoch_time)}_scale_{GUIDANCE_SCALE}_steps_{INFERENCE_STEPS}_seed_{SEED}.{FORMAT}"
|
85 |
-
filedir = f"{OUTDIR}/{filename}"
|
86 |
-
image.save(filedir)
|
87 |
-
with open(f"{OUTDIR}/{epoch_time}_prompt.txt", "w") as file:
|
88 |
-
file.write(
|
89 |
-
f"PROMPT: {PROMPT}\nNEG_PROMPT: {NEGPROMPT}\n\nINFERENCE_STEPS: {INFERENCE_STEPS}\nHeight: {HEIGHT}\nWidth: {WIDTH}\nSeed: {SEED}\n\nPrecision: {PRECISION}\nSTRENGTH: {STRENGTH}\nGUIDANCE_SCALE: {GUIDANCE_SCALE}"
|
90 |
-
)
|
91 |
-
|
92 |
-
|
93 |
-
re_attention = re.compile(
|
94 |
-
r"""
|
95 |
-
\\\(|
|
96 |
-
\\\)|
|
97 |
-
\\\[|
|
98 |
-
\\]|
|
99 |
-
\\\\|
|
100 |
-
\\|
|
101 |
-
\(|
|
102 |
-
\[|
|
103 |
-
:([+-]?[.\d]+)\)|
|
104 |
-
\)|
|
105 |
-
]|
|
106 |
-
[^\\()\[\]:]+|
|
107 |
-
:
|
108 |
-
""",
|
109 |
-
re.X,
|
110 |
-
)
|
111 |
-
|
112 |
-
|
113 |
-
def parse_prompt_attention(text):
|
114 |
-
"""
|
115 |
-
Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
|
116 |
-
Accepted tokens are:
|
117 |
-
(abc) - increases attention to abc by a multiplier of 1.1
|
118 |
-
(abc:3.12) - increases attention to abc by a multiplier of 3.12
|
119 |
-
[abc] - decreases attention to abc by a multiplier of 1.1
|
120 |
-
\( - literal character '('
|
121 |
-
\[ - literal character '['
|
122 |
-
\) - literal character ')'
|
123 |
-
\] - literal character ']'
|
124 |
-
\\ - literal character '\'
|
125 |
-
anything else - just text
|
126 |
-
>>> parse_prompt_attention('normal text')
|
127 |
-
[['normal text', 1.0]]
|
128 |
-
>>> parse_prompt_attention('an (important) word')
|
129 |
-
[['an ', 1.0], ['important', 1.1], [' word', 1.0]]
|
130 |
-
>>> parse_prompt_attention('(unbalanced')
|
131 |
-
[['unbalanced', 1.1]]
|
132 |
-
>>> parse_prompt_attention('\(literal\]')
|
133 |
-
[['(literal]', 1.0]]
|
134 |
-
>>> parse_prompt_attention('(unnecessary)(parens)')
|
135 |
-
[['unnecessaryparens', 1.1]]
|
136 |
-
>>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
|
137 |
-
[['a ', 1.0],
|
138 |
-
['house', 1.5730000000000004],
|
139 |
-
[' ', 1.1],
|
140 |
-
['on', 1.0],
|
141 |
-
[' a ', 1.1],
|
142 |
-
['hill', 0.55],
|
143 |
-
[', sun, ', 1.1],
|
144 |
-
['sky', 1.4641000000000006],
|
145 |
-
['.', 1.1]]
|
146 |
-
"""
|
147 |
-
|
148 |
-
res = []
|
149 |
-
round_brackets = []
|
150 |
-
square_brackets = []
|
151 |
-
|
152 |
-
round_bracket_multiplier = 1.1
|
153 |
-
square_bracket_multiplier = 1 / 1.1
|
154 |
-
|
155 |
-
def multiply_range(start_position, multiplier):
|
156 |
-
for p in range(start_position, len(res)):
|
157 |
-
res[p][1] *= multiplier
|
158 |
-
|
159 |
-
for m in re_attention.finditer(text):
|
160 |
-
text = m.group(0)
|
161 |
-
weight = m.group(1)
|
162 |
-
|
163 |
-
if text.startswith("\\"):
|
164 |
-
res.append([text[1:], 1.0])
|
165 |
-
elif text == "(":
|
166 |
-
round_brackets.append(len(res))
|
167 |
-
elif text == "[":
|
168 |
-
square_brackets.append(len(res))
|
169 |
-
elif weight is not None and len(round_brackets) > 0:
|
170 |
-
multiply_range(round_brackets.pop(), float(weight))
|
171 |
-
elif text == ")" and len(round_brackets) > 0:
|
172 |
-
multiply_range(round_brackets.pop(), round_bracket_multiplier)
|
173 |
-
elif text == "]" and len(square_brackets) > 0:
|
174 |
-
multiply_range(square_brackets.pop(), square_bracket_multiplier)
|
175 |
-
else:
|
176 |
-
res.append([text, 1.0])
|
177 |
-
|
178 |
-
for pos in round_brackets:
|
179 |
-
multiply_range(pos, round_bracket_multiplier)
|
180 |
-
|
181 |
-
for pos in square_brackets:
|
182 |
-
multiply_range(pos, square_bracket_multiplier)
|
183 |
-
|
184 |
-
if len(res) == 0:
|
185 |
-
res = [["", 1.0]]
|
186 |
-
|
187 |
-
# merge runs of identical weights
|
188 |
-
i = 0
|
189 |
-
while i + 1 < len(res):
|
190 |
-
if res[i][1] == res[i + 1][1]:
|
191 |
-
res[i][0] += res[i + 1][0]
|
192 |
-
res.pop(i + 1)
|
193 |
-
else:
|
194 |
-
i += 1
|
195 |
-
|
196 |
-
return res
|
197 |
-
|
198 |
-
|
199 |
-
def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int):
|
200 |
-
r"""
|
201 |
-
Tokenize a list of prompts and return its tokens with weights of each token.
|
202 |
-
|
203 |
-
No padding, starting or ending token is included.
|
204 |
-
"""
|
205 |
-
tokens = []
|
206 |
-
weights = []
|
207 |
-
for text in prompt:
|
208 |
-
texts_and_weights = parse_prompt_attention(text)
|
209 |
-
text_token = []
|
210 |
-
text_weight = []
|
211 |
-
for word, weight in texts_and_weights:
|
212 |
-
# tokenize and discard the starting and the ending token
|
213 |
-
token = pipe.tokenizer(word).input_ids[1:-1]
|
214 |
-
text_token += token
|
215 |
-
|
216 |
-
# copy the weight by length of token
|
217 |
-
text_weight += [weight] * len(token)
|
218 |
-
|
219 |
-
# stop if the text is too long (longer than truncation limit)
|
220 |
-
if len(text_token) > max_length:
|
221 |
-
break
|
222 |
-
|
223 |
-
# truncate
|
224 |
-
if len(text_token) > max_length:
|
225 |
-
text_token = text_token[:max_length]
|
226 |
-
text_weight = text_weight[:max_length]
|
227 |
-
|
228 |
-
tokens.append(text_token)
|
229 |
-
weights.append(text_weight)
|
230 |
-
return tokens, weights
|
231 |
-
|
232 |
-
|
233 |
-
def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
|
234 |
-
r"""
|
235 |
-
Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
|
236 |
-
"""
|
237 |
-
max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
|
238 |
-
weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
|
239 |
-
for i in range(len(tokens)):
|
240 |
-
tokens[i] = [bos] + tokens[i] + [eos] + [pad] * (max_length - 2 - len(tokens[i]))
|
241 |
-
if no_boseos_middle:
|
242 |
-
weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
|
243 |
-
else:
|
244 |
-
w = []
|
245 |
-
if len(weights[i]) == 0:
|
246 |
-
w = [1.0] * weights_length
|
247 |
-
else:
|
248 |
-
for j in range((len(weights[i]) - 1) // chunk_length + 1):
|
249 |
-
w.append(1.0) # weight for starting token in this chunk
|
250 |
-
w += weights[i][j * chunk_length : min(len(weights[i]), (j + 1) * chunk_length)]
|
251 |
-
w.append(1.0) # weight for ending token in this chunk
|
252 |
-
w += [1.0] * (weights_length - len(w))
|
253 |
-
weights[i] = w[:]
|
254 |
-
|
255 |
-
return tokens, weights
|
256 |
-
|
257 |
-
|
258 |
-
def get_unweighted_text_embeddings(
|
259 |
-
pipe: DiffusionPipeline, text_input: paddle.Tensor, chunk_length: int, no_boseos_middle: Optional[bool] = True
|
260 |
-
):
|
261 |
-
"""
|
262 |
-
When the length of tokens is a multiple of the capacity of the text encoder,
|
263 |
-
it should be split into chunks and sent to the text encoder individually.
|
264 |
-
"""
|
265 |
-
max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
|
266 |
-
if max_embeddings_multiples > 1:
|
267 |
-
text_embeddings = []
|
268 |
-
for i in range(max_embeddings_multiples):
|
269 |
-
# extract the i-th chunk
|
270 |
-
text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
|
271 |
-
|
272 |
-
# cover the head and the tail by the starting and the ending tokens
|
273 |
-
text_input_chunk[:, 0] = text_input[0, 0]
|
274 |
-
text_input_chunk[:, -1] = text_input[0, -1]
|
275 |
-
|
276 |
-
text_embedding = pipe.text_encoder(text_input_chunk)[0]
|
277 |
-
|
278 |
-
if no_boseos_middle:
|
279 |
-
if i == 0:
|
280 |
-
# discard the ending token
|
281 |
-
text_embedding = text_embedding[:, :-1]
|
282 |
-
elif i == max_embeddings_multiples - 1:
|
283 |
-
# discard the starting token
|
284 |
-
text_embedding = text_embedding[:, 1:]
|
285 |
-
else:
|
286 |
-
# discard both starting and ending tokens
|
287 |
-
text_embedding = text_embedding[:, 1:-1]
|
288 |
-
|
289 |
-
text_embeddings.append(text_embedding)
|
290 |
-
text_embeddings = paddle.concat(text_embeddings, axis=1)
|
291 |
-
else:
|
292 |
-
text_embeddings = pipe.text_encoder(text_input)[0]
|
293 |
-
return text_embeddings
|
294 |
-
|
295 |
-
|
296 |
-
def get_weighted_text_embeddings(
|
297 |
-
pipe: DiffusionPipeline,
|
298 |
-
prompt: Union[str, List[str]],
|
299 |
-
uncond_prompt: Optional[Union[str, List[str]]] = None,
|
300 |
-
max_embeddings_multiples: Optional[int] = 1,
|
301 |
-
no_boseos_middle: Optional[bool] = False,
|
302 |
-
skip_parsing: Optional[bool] = False,
|
303 |
-
skip_weighting: Optional[bool] = False,
|
304 |
-
**kwargs
|
305 |
-
):
|
306 |
-
r"""
|
307 |
-
Prompts can be assigned with local weights using brackets. For example,
|
308 |
-
prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
|
309 |
-
and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
|
310 |
-
|
311 |
-
Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
|
312 |
-
|
313 |
-
Args:
|
314 |
-
pipe (`DiffusionPipeline`):
|
315 |
-
Pipe to provide access to the tokenizer and the text encoder.
|
316 |
-
prompt (`str` or `List[str]`):
|
317 |
-
The prompt or prompts to guide the image generation.
|
318 |
-
uncond_prompt (`str` or `List[str]`):
|
319 |
-
The unconditional prompt or prompts for guide the image generation. If unconditional prompt
|
320 |
-
is provided, the embeddings of prompt and uncond_prompt are concatenated.
|
321 |
-
max_embeddings_multiples (`int`, *optional*, defaults to `1`):
|
322 |
-
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
323 |
-
no_boseos_middle (`bool`, *optional*, defaults to `False`):
|
324 |
-
If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
|
325 |
-
ending token in each of the chunk in the middle.
|
326 |
-
skip_parsing (`bool`, *optional*, defaults to `False`):
|
327 |
-
Skip the parsing of brackets.
|
328 |
-
skip_weighting (`bool`, *optional*, defaults to `False`):
|
329 |
-
Skip the weighting. When the parsing is skipped, it is forced True.
|
330 |
-
"""
|
331 |
-
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
|
332 |
-
if isinstance(prompt, str):
|
333 |
-
prompt = [prompt]
|
334 |
-
|
335 |
-
if not skip_parsing:
|
336 |
-
prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
|
337 |
-
if uncond_prompt is not None:
|
338 |
-
if isinstance(uncond_prompt, str):
|
339 |
-
uncond_prompt = [uncond_prompt]
|
340 |
-
uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
|
341 |
-
else:
|
342 |
-
prompt_tokens = [
|
343 |
-
token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids
|
344 |
-
]
|
345 |
-
prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
|
346 |
-
if uncond_prompt is not None:
|
347 |
-
if isinstance(uncond_prompt, str):
|
348 |
-
uncond_prompt = [uncond_prompt]
|
349 |
-
uncond_tokens = [
|
350 |
-
token[1:-1]
|
351 |
-
for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids
|
352 |
-
]
|
353 |
-
uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
|
354 |
-
|
355 |
-
# round up the longest length of tokens to a multiple of (model_max_length - 2)
|
356 |
-
max_length = max([len(token) for token in prompt_tokens])
|
357 |
-
if uncond_prompt is not None:
|
358 |
-
max_length = max(max_length, max([len(token) for token in uncond_tokens]))
|
359 |
-
|
360 |
-
max_embeddings_multiples = min(
|
361 |
-
max_embeddings_multiples, (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1
|
362 |
-
)
|
363 |
-
max_embeddings_multiples = max(1, max_embeddings_multiples)
|
364 |
-
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
|
365 |
-
|
366 |
-
# pad the length of tokens and weights
|
367 |
-
# support bert tokenizer
|
368 |
-
bos = pipe.tokenizer.bos_token_id if pipe.tokenizer.bos_token_id is not None else pipe.tokenizer.cls_token_id
|
369 |
-
eos = pipe.tokenizer.eos_token_id if pipe.tokenizer.eos_token_id is not None else pipe.tokenizer.sep_token_id
|
370 |
-
pad = pipe.tokenizer.pad_token_id
|
371 |
-
prompt_tokens, prompt_weights = pad_tokens_and_weights(
|
372 |
-
prompt_tokens,
|
373 |
-
prompt_weights,
|
374 |
-
max_length,
|
375 |
-
bos,
|
376 |
-
eos,
|
377 |
-
pad,
|
378 |
-
no_boseos_middle=no_boseos_middle,
|
379 |
-
chunk_length=pipe.tokenizer.model_max_length,
|
380 |
-
)
|
381 |
-
prompt_tokens = paddle.to_tensor(prompt_tokens)
|
382 |
-
if uncond_prompt is not None:
|
383 |
-
uncond_tokens, uncond_weights = pad_tokens_and_weights(
|
384 |
-
uncond_tokens,
|
385 |
-
uncond_weights,
|
386 |
-
max_length,
|
387 |
-
bos,
|
388 |
-
eos,
|
389 |
-
pad,
|
390 |
-
no_boseos_middle=no_boseos_middle,
|
391 |
-
chunk_length=pipe.tokenizer.model_max_length,
|
392 |
-
)
|
393 |
-
uncond_tokens = paddle.to_tensor(uncond_tokens)
|
394 |
-
|
395 |
-
# get the embeddings
|
396 |
-
text_embeddings = get_unweighted_text_embeddings(
|
397 |
-
pipe, prompt_tokens, pipe.tokenizer.model_max_length, no_boseos_middle=no_boseos_middle
|
398 |
-
)
|
399 |
-
prompt_weights = paddle.to_tensor(prompt_weights, dtype=text_embeddings.dtype)
|
400 |
-
if uncond_prompt is not None:
|
401 |
-
uncond_embeddings = get_unweighted_text_embeddings(
|
402 |
-
pipe, uncond_tokens, pipe.tokenizer.model_max_length, no_boseos_middle=no_boseos_middle
|
403 |
-
)
|
404 |
-
uncond_weights = paddle.to_tensor(uncond_weights, dtype=uncond_embeddings.dtype)
|
405 |
-
|
406 |
-
# assign weights to the prompts and normalize in the sense of mean
|
407 |
-
# TODO: should we normalize by chunk or in a whole (current implementation)?
|
408 |
-
if (not skip_parsing) and (not skip_weighting):
|
409 |
-
previous_mean = text_embeddings.mean(axis=[-2, -1])
|
410 |
-
text_embeddings *= prompt_weights.unsqueeze(-1)
|
411 |
-
text_embeddings *= previous_mean / text_embeddings.mean(axis=[-2, -1])
|
412 |
-
if uncond_prompt is not None:
|
413 |
-
previous_mean = uncond_embeddings.mean(axis=[-2, -1])
|
414 |
-
uncond_embeddings *= uncond_weights.unsqueeze(-1)
|
415 |
-
uncond_embeddings *= previous_mean / uncond_embeddings.mean(axis=[-2, -1])
|
416 |
-
|
417 |
-
# For classifier free guidance, we need to do two forward passes.
|
418 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
419 |
-
# to avoid doing two forward passes
|
420 |
-
if uncond_prompt is not None:
|
421 |
-
text_embeddings = paddle.concat([uncond_embeddings, text_embeddings])
|
422 |
-
|
423 |
-
return text_embeddings
|
424 |
-
|
425 |
-
|
426 |
-
def preprocess_image(image):
|
427 |
-
w, h = image.size
|
428 |
-
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
|
429 |
-
image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
|
430 |
-
image = np.array(image).astype(np.float32) / 255.0
|
431 |
-
image = image[None].transpose(0, 3, 1, 2)
|
432 |
-
image = paddle.to_tensor(image)
|
433 |
-
return 2.0 * image - 1.0
|
434 |
-
|
435 |
-
|
436 |
-
def preprocess_mask(mask):
|
437 |
-
mask = mask.convert("L")
|
438 |
-
w, h = mask.size
|
439 |
-
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
|
440 |
-
mask = mask.resize((w // 8, h // 8), resample=PIL_INTERPOLATION["nearest"])
|
441 |
-
mask = np.array(mask).astype(np.float32) / 255.0
|
442 |
-
mask = np.tile(mask, (4, 1, 1))
|
443 |
-
mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
|
444 |
-
mask = 1 - mask # repaint white, keep black
|
445 |
-
mask = paddle.to_tensor(mask)
|
446 |
-
return mask
|
447 |
-
|
448 |
-
|
449 |
-
class StableDiffusionPipelineAllinOne(DiffusionPipeline):
|
450 |
-
r"""
|
451 |
-
Pipeline for text-to-image image-to-image inpainting generation using Stable Diffusion.
|
452 |
-
|
453 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
454 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular xxxx, etc.)
|
455 |
-
|
456 |
-
Args:
|
457 |
-
vae ([`AutoencoderKL`]):
|
458 |
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
459 |
-
text_encoder ([`CLIPTextModel`]):
|
460 |
-
Frozen text-encoder. Stable Diffusion uses the text portion of
|
461 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
462 |
-
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
463 |
-
tokenizer (`CLIPTokenizer`):
|
464 |
-
Tokenizer of class
|
465 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
466 |
-
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
467 |
-
scheduler ([`SchedulerMixin`]):
|
468 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
469 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], [`PNDMScheduler`], [`EulerDiscreteScheduler`], [`EulerAncestralDiscreteScheduler`]
|
470 |
-
or [`DPMSolverMultistepScheduler`].
|
471 |
-
safety_checker ([`StableDiffusionSafetyChecker`]):
|
472 |
-
Classification module that estimates whether generated images could be considered offensive or harmful.
|
473 |
-
Please, refer to the [model card](https://huggingface.co/junnyu/stable-diffusion-v1-4-paddle) for details.
|
474 |
-
feature_extractor ([`CLIPFeatureExtractor`]):
|
475 |
-
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
476 |
-
"""
|
477 |
-
_optional_components = ["safety_checker", "feature_extractor"]
|
478 |
-
|
479 |
-
def __init__(
|
480 |
-
self,
|
481 |
-
vae: AutoencoderKL,
|
482 |
-
text_encoder: CLIPTextModel,
|
483 |
-
tokenizer: CLIPTokenizer,
|
484 |
-
unet: UNet2DConditionModel,
|
485 |
-
scheduler: Union[
|
486 |
-
DDIMScheduler,
|
487 |
-
PNDMScheduler,
|
488 |
-
LMSDiscreteScheduler,
|
489 |
-
EulerDiscreteScheduler,
|
490 |
-
EulerAncestralDiscreteScheduler,
|
491 |
-
DPMSolverMultistepScheduler,
|
492 |
-
],
|
493 |
-
safety_checker: StableDiffusionSafetyChecker,
|
494 |
-
feature_extractor: CLIPFeatureExtractor,
|
495 |
-
requires_safety_checker: bool = False,
|
496 |
-
):
|
497 |
-
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
498 |
-
deprecation_message = (
|
499 |
-
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
500 |
-
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
501 |
-
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
502 |
-
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
503 |
-
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
504 |
-
" file"
|
505 |
-
)
|
506 |
-
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
507 |
-
new_config = dict(scheduler.config)
|
508 |
-
new_config["steps_offset"] = 1
|
509 |
-
scheduler._internal_dict = FrozenDict(new_config)
|
510 |
-
|
511 |
-
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
|
512 |
-
deprecation_message = (
|
513 |
-
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
514 |
-
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
515 |
-
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
516 |
-
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
517 |
-
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
518 |
-
)
|
519 |
-
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
520 |
-
new_config = dict(scheduler.config)
|
521 |
-
new_config["clip_sample"] = False
|
522 |
-
scheduler._internal_dict = FrozenDict(new_config)
|
523 |
-
|
524 |
-
if safety_checker is None and requires_safety_checker:
|
525 |
-
logger.warning(
|
526 |
-
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
527 |
-
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
528 |
-
" results in services or applications open to the public. PaddleNLP team, diffusers team and Hugging Face"
|
529 |
-
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
530 |
-
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
531 |
-
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
532 |
-
)
|
533 |
-
if safety_checker is not None and feature_extractor is None:
|
534 |
-
raise ValueError(
|
535 |
-
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
536 |
-
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
537 |
-
)
|
538 |
-
is_unet_version_less_0_9_0 = hasattr(unet.config, "_ppdiffusers_version") and version.parse(
|
539 |
-
version.parse(unet.config._ppdiffusers_version).base_version
|
540 |
-
) < version.parse("0.9.0.dev0")
|
541 |
-
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
542 |
-
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
543 |
-
deprecation_message = (
|
544 |
-
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
545 |
-
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
|
546 |
-
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
547 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
548 |
-
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
549 |
-
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
550 |
-
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
551 |
-
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
552 |
-
" the `unet/config.json` file"
|
553 |
-
)
|
554 |
-
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
555 |
-
new_config = dict(unet.config)
|
556 |
-
new_config["sample_size"] = 64
|
557 |
-
unet._internal_dict = FrozenDict(new_config)
|
558 |
-
|
559 |
-
self.register_modules(
|
560 |
-
vae=vae,
|
561 |
-
text_encoder=text_encoder,
|
562 |
-
tokenizer=tokenizer,
|
563 |
-
unet=unet,
|
564 |
-
scheduler=scheduler,
|
565 |
-
safety_checker=safety_checker,
|
566 |
-
feature_extractor=feature_extractor,
|
567 |
-
)
|
568 |
-
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
569 |
-
|
570 |
-
def create_scheduler(self, name="DPMSolver"):
|
571 |
-
config = self.scheduler.config
|
572 |
-
if name == "DPMSolver":
|
573 |
-
return DPMSolverMultistepScheduler.from_config(
|
574 |
-
config,
|
575 |
-
thresholding=False,
|
576 |
-
algorithm_type="dpmsolver++",
|
577 |
-
solver_type="midpoint",
|
578 |
-
lower_order_final=True,
|
579 |
-
)
|
580 |
-
if name == "EulerDiscrete":
|
581 |
-
return EulerDiscreteScheduler.from_config(config)
|
582 |
-
elif name == "EulerAncestralDiscrete":
|
583 |
-
return EulerAncestralDiscreteScheduler.from_config(config)
|
584 |
-
elif name == "PNDM":
|
585 |
-
return PNDMScheduler.from_config(config)
|
586 |
-
elif name == "DDIM":
|
587 |
-
return DDIMScheduler.from_config(config)
|
588 |
-
elif name == "LMSDiscrete":
|
589 |
-
return LMSDiscreteScheduler.from_config(config)
|
590 |
-
elif name == "HeunDiscrete":
|
591 |
-
return HeunDiscreteScheduler.from_config(config)
|
592 |
-
elif name == "KDPM2AncestralDiscrete":
|
593 |
-
return KDPM2AncestralDiscreteScheduler.from_config(config)
|
594 |
-
elif name == "KDPM2Discrete":
|
595 |
-
return KDPM2DiscreteScheduler.from_config(config)
|
596 |
-
else:
|
597 |
-
raise NotImplementedError
|
598 |
-
|
599 |
-
def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
|
600 |
-
r"""
|
601 |
-
Enable sliced attention computation.
|
602 |
-
|
603 |
-
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
|
604 |
-
in several steps. This is useful to save some memory in exchange for a small speed decrease.
|
605 |
-
|
606 |
-
Args:
|
607 |
-
slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
|
608 |
-
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
|
609 |
-
a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
|
610 |
-
`attention_head_dim` must be a multiple of `slice_size`.
|
611 |
-
"""
|
612 |
-
if slice_size == "auto":
|
613 |
-
if isinstance(self.unet.config.attention_head_dim, int):
|
614 |
-
# half the attention head size is usually a good trade-off between
|
615 |
-
# speed and memory
|
616 |
-
slice_size = self.unet.config.attention_head_dim // 2
|
617 |
-
else:
|
618 |
-
# if `attention_head_dim` is a list, take the smallest head size
|
619 |
-
slice_size = min(self.unet.config.attention_head_dim)
|
620 |
-
self.unet.set_attention_slice(slice_size)
|
621 |
-
|
622 |
-
def disable_attention_slicing(self):
|
623 |
-
r"""
|
624 |
-
Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
|
625 |
-
back to computing attention in one step.
|
626 |
-
"""
|
627 |
-
# set slice_size = `None` to disable `attention slicing`
|
628 |
-
self.enable_attention_slicing(None)
|
629 |
-
|
630 |
-
def __call__(self, *args, **kwargs):
|
631 |
-
return self.text2image(*args, **kwargs)
|
632 |
-
|
633 |
-
def text2img(self, *args, **kwargs):
|
634 |
-
return self.text2image(*args, **kwargs)
|
635 |
-
|
636 |
-
def _encode_prompt(
|
637 |
-
self,
|
638 |
-
prompt,
|
639 |
-
negative_prompt,
|
640 |
-
max_embeddings_multiples,
|
641 |
-
no_boseos_middle,
|
642 |
-
skip_parsing,
|
643 |
-
skip_weighting,
|
644 |
-
do_classifier_free_guidance,
|
645 |
-
num_images_per_prompt,
|
646 |
-
):
|
647 |
-
if do_classifier_free_guidance and negative_prompt is None:
|
648 |
-
negative_prompt = ""
|
649 |
-
text_embeddings = get_weighted_text_embeddings(
|
650 |
-
self, prompt, negative_prompt, max_embeddings_multiples, no_boseos_middle, skip_parsing, skip_weighting
|
651 |
-
)
|
652 |
-
|
653 |
-
bs_embed, seq_len, _ = text_embeddings.shape
|
654 |
-
text_embeddings = text_embeddings.tile([1, num_images_per_prompt, 1])
|
655 |
-
text_embeddings = text_embeddings.reshape([bs_embed * num_images_per_prompt, seq_len, -1])
|
656 |
-
return text_embeddings
|
657 |
-
|
658 |
-
def run_safety_checker(self, image, dtype):
|
659 |
-
if self.safety_checker is not None:
|
660 |
-
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pd")
|
661 |
-
image, has_nsfw_concept = self.safety_checker(
|
662 |
-
images=image, clip_input=safety_checker_input.pixel_values.cast(dtype)
|
663 |
-
)
|
664 |
-
else:
|
665 |
-
has_nsfw_concept = None
|
666 |
-
return image, has_nsfw_concept
|
667 |
-
|
668 |
-
def decode_latents(self, latents):
|
669 |
-
latents = 1 / 0.18215 * latents
|
670 |
-
image = self.vae.decode(latents).sample
|
671 |
-
image = (image / 2 + 0.5).clip(0, 1)
|
672 |
-
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
|
673 |
-
image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
|
674 |
-
return image
|
675 |
-
|
676 |
-
def prepare_extra_step_kwargs(self, eta, scheduler):
|
677 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
678 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
679 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
680 |
-
# and should be between [0, 1]
|
681 |
-
|
682 |
-
accepts_eta = "eta" in set(inspect.signature(scheduler.step).parameters.keys())
|
683 |
-
extra_step_kwargs = {}
|
684 |
-
if accepts_eta:
|
685 |
-
extra_step_kwargs["eta"] = eta
|
686 |
-
|
687 |
-
return extra_step_kwargs
|
688 |
-
|
689 |
-
def check_inputs_text2img(self, prompt, height, width, callback_steps):
|
690 |
-
if not isinstance(prompt, str) and not isinstance(prompt, list):
|
691 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
692 |
-
|
693 |
-
if height % 8 != 0 or width % 8 != 0:
|
694 |
-
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
695 |
-
|
696 |
-
if (callback_steps is None) or (
|
697 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
698 |
-
):
|
699 |
-
raise ValueError(
|
700 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
701 |
-
f" {type(callback_steps)}."
|
702 |
-
)
|
703 |
-
|
704 |
-
def check_inputs_img2img_inpaint(self, prompt, strength, callback_steps):
|
705 |
-
if not isinstance(prompt, str) and not isinstance(prompt, list):
|
706 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
707 |
-
|
708 |
-
if strength < 0 or strength > 1:
|
709 |
-
raise ValueError(f"The value of strength should in [1.0, 1.0] but is {strength}")
|
710 |
-
|
711 |
-
if (callback_steps is None) or (
|
712 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
713 |
-
):
|
714 |
-
raise ValueError(
|
715 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
716 |
-
f" {type(callback_steps)}."
|
717 |
-
)
|
718 |
-
|
719 |
-
def prepare_latents_text2img(self, batch_size, num_channels_latents, height, width, dtype, latents=None, scheduler=None):
|
720 |
-
shape = [batch_size, num_channels_latents, height // 8, width // 8]
|
721 |
-
if latents is None:
|
722 |
-
latents = paddle.randn(shape, dtype=dtype)
|
723 |
-
else:
|
724 |
-
if latents.shape != shape:
|
725 |
-
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
|
726 |
-
|
727 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
728 |
-
latents = latents * scheduler.init_noise_sigma
|
729 |
-
return latents
|
730 |
-
|
731 |
-
def prepare_latents_img2img(self, image, timestep, num_images_per_prompt, dtype, scheduler):
|
732 |
-
image = image.cast(dtype=dtype)
|
733 |
-
init_latent_dist = self.vae.encode(image).latent_dist
|
734 |
-
init_latents = init_latent_dist.sample()
|
735 |
-
init_latents = 0.18215 * init_latents
|
736 |
-
|
737 |
-
b, c, h, w = init_latents.shape
|
738 |
-
init_latents = init_latents.tile([1, num_images_per_prompt, 1, 1])
|
739 |
-
init_latents = init_latents.reshape([b * num_images_per_prompt, c, h, w])
|
740 |
-
|
741 |
-
# add noise to latents using the timesteps
|
742 |
-
noise = paddle.randn(init_latents.shape, dtype=dtype)
|
743 |
-
|
744 |
-
# get latents
|
745 |
-
init_latents = scheduler.add_noise(init_latents, noise, timestep)
|
746 |
-
latents = init_latents
|
747 |
-
|
748 |
-
return latents
|
749 |
-
|
750 |
-
def get_timesteps(self, num_inference_steps, strength, scheduler):
|
751 |
-
# get the original timestep using init_timestep
|
752 |
-
offset = scheduler.config.get("steps_offset", 0)
|
753 |
-
init_timestep = int(num_inference_steps * strength) + offset
|
754 |
-
init_timestep = min(init_timestep, num_inference_steps)
|
755 |
-
|
756 |
-
t_start = max(num_inference_steps - init_timestep + offset, 0)
|
757 |
-
timesteps = scheduler.timesteps[t_start:]
|
758 |
-
|
759 |
-
return timesteps, num_inference_steps - t_start
|
760 |
-
|
761 |
-
def prepare_latents_inpaint(self, image, timestep, num_images_per_prompt, dtype, scheduler):
|
762 |
-
image = image.cast(dtype)
|
763 |
-
init_latent_dist = self.vae.encode(image).latent_dist
|
764 |
-
init_latents = init_latent_dist.sample()
|
765 |
-
init_latents = 0.18215 * init_latents
|
766 |
-
|
767 |
-
b, c, h, w = init_latents.shape
|
768 |
-
init_latents = init_latents.tile([1, num_images_per_prompt, 1, 1])
|
769 |
-
init_latents = init_latents.reshape([b * num_images_per_prompt, c, h, w])
|
770 |
-
|
771 |
-
init_latents_orig = init_latents
|
772 |
-
|
773 |
-
# add noise to latents using the timesteps
|
774 |
-
noise = paddle.randn(init_latents.shape, dtype=dtype)
|
775 |
-
init_latents = scheduler.add_noise(init_latents, noise, timestep)
|
776 |
-
latents = init_latents
|
777 |
-
return latents, init_latents_orig, noise
|
778 |
-
|
779 |
-
@paddle.no_grad()
|
780 |
-
def text2image(
|
781 |
-
self,
|
782 |
-
prompt: Union[str, List[str]],
|
783 |
-
height: int = 512,
|
784 |
-
width: int = 512,
|
785 |
-
num_inference_steps: int = 50,
|
786 |
-
guidance_scale: float = 7.5,
|
787 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
788 |
-
num_images_per_prompt: Optional[int] = 1,
|
789 |
-
eta: float = 0.0,
|
790 |
-
seed: Optional[int] = None,
|
791 |
-
latents: Optional[paddle.Tensor] = None,
|
792 |
-
output_type: Optional[str] = "pil",
|
793 |
-
return_dict: bool = True,
|
794 |
-
callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
|
795 |
-
callback_steps: Optional[int] = 1,
|
796 |
-
# new add
|
797 |
-
max_embeddings_multiples: Optional[int] = 1,
|
798 |
-
no_boseos_middle: Optional[bool] = False,
|
799 |
-
skip_parsing: Optional[bool] = False,
|
800 |
-
skip_weighting: Optional[bool] = False,
|
801 |
-
scheduler=None,
|
802 |
-
**kwargs,
|
803 |
-
):
|
804 |
-
r"""
|
805 |
-
Function invoked when calling the pipeline for generation.
|
806 |
-
|
807 |
-
Args:
|
808 |
-
prompt (`str` or `List[str]`):
|
809 |
-
The prompt or prompts to guide the image generation.
|
810 |
-
height (`int`, *optional*, defaults to 512):
|
811 |
-
The height in pixels of the generated image.
|
812 |
-
width (`int`, *optional*, defaults to 512):
|
813 |
-
The width in pixels of the generated image.
|
814 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
815 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
816 |
-
expense of slower inference.
|
817 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
818 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
819 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
820 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
821 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
822 |
-
usually at the expense of lower image quality.
|
823 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
824 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
825 |
-
if `guidance_scale` is less than `1`).
|
826 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
827 |
-
The number of images to generate per prompt.
|
828 |
-
eta (`float`, *optional*, defaults to 0.0):
|
829 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
830 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
831 |
-
seed (`int`, *optional*):
|
832 |
-
Random number seed.
|
833 |
-
latents (`paddle.Tensor`, *optional*):
|
834 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
835 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
836 |
-
tensor will ge generated by sampling using the supplied random `seed`.
|
837 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
838 |
-
The output format of the generate image. Choose between
|
839 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
840 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
841 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
842 |
-
plain tuple.
|
843 |
-
callback (`Callable`, *optional*):
|
844 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
845 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
|
846 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
847 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
848 |
-
called at every step.
|
849 |
-
|
850 |
-
Returns:
|
851 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
852 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
853 |
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
854 |
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
855 |
-
(nsfw) content, according to the `safety_checker`.
|
856 |
-
"""
|
857 |
-
if scheduler is None:
|
858 |
-
scheduler = self.scheduler
|
859 |
-
seed = random.randint(0, 2**32) if seed is None else seed
|
860 |
-
argument = dict(
|
861 |
-
prompt=prompt,
|
862 |
-
negative_prompt=negative_prompt,
|
863 |
-
height=height,
|
864 |
-
width=width,
|
865 |
-
num_inference_steps=num_inference_steps,
|
866 |
-
guidance_scale=guidance_scale,
|
867 |
-
num_images_per_prompt=num_images_per_prompt,
|
868 |
-
eta=eta,
|
869 |
-
seed=seed,
|
870 |
-
latents=latents,
|
871 |
-
max_embeddings_multiples=max_embeddings_multiples,
|
872 |
-
no_boseos_middle=no_boseos_middle,
|
873 |
-
skip_parsing=skip_parsing,
|
874 |
-
skip_weighting=skip_weighting,
|
875 |
-
epoch_time=time.time(),
|
876 |
-
)
|
877 |
-
paddle.seed(seed)
|
878 |
-
# 1. Check inputs. Raise error if not correct
|
879 |
-
self.check_inputs_text2img(prompt, height, width, callback_steps)
|
880 |
-
|
881 |
-
# 2. Define call parameters
|
882 |
-
batch_size = 1 if isinstance(prompt, str) else len(prompt)
|
883 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
884 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
885 |
-
# corresponds to doing no classifier free guidance.
|
886 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
887 |
-
|
888 |
-
# 3. Encode input prompt
|
889 |
-
text_embeddings = self._encode_prompt(
|
890 |
-
prompt,
|
891 |
-
negative_prompt,
|
892 |
-
max_embeddings_multiples,
|
893 |
-
no_boseos_middle,
|
894 |
-
skip_parsing,
|
895 |
-
skip_weighting,
|
896 |
-
do_classifier_free_guidance,
|
897 |
-
num_images_per_prompt,
|
898 |
-
)
|
899 |
-
|
900 |
-
# 4. Prepare timesteps
|
901 |
-
scheduler.set_timesteps(num_inference_steps)
|
902 |
-
timesteps = scheduler.timesteps
|
903 |
-
|
904 |
-
# 5. Prepare latent variables
|
905 |
-
num_channels_latents = self.unet.in_channels
|
906 |
-
latents = self.prepare_latents_text2img(
|
907 |
-
batch_size * num_images_per_prompt,
|
908 |
-
num_channels_latents,
|
909 |
-
height,
|
910 |
-
width,
|
911 |
-
text_embeddings.dtype,
|
912 |
-
latents,
|
913 |
-
scheduler=scheduler,
|
914 |
-
)
|
915 |
-
|
916 |
-
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
917 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(eta, scheduler)
|
918 |
-
|
919 |
-
# 7. Denoising loop
|
920 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * scheduler.order
|
921 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
922 |
-
for i, t in enumerate(timesteps):
|
923 |
-
# expand the latents if we are doing classifier free guidance
|
924 |
-
latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents
|
925 |
-
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
|
926 |
-
|
927 |
-
# predict the noise residual
|
928 |
-
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
929 |
-
|
930 |
-
# perform guidance
|
931 |
-
if do_classifier_free_guidance:
|
932 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
933 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
934 |
-
|
935 |
-
# compute the previous noisy sample x_t -> x_t-1
|
936 |
-
latents = scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
937 |
-
|
938 |
-
# call the callback, if provided
|
939 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0):
|
940 |
-
progress_bar.update()
|
941 |
-
if callback is not None and i % callback_steps == 0:
|
942 |
-
callback(progress_bar.n, progress_bar.total, progress_bar)
|
943 |
-
|
944 |
-
# 8. Post-processing
|
945 |
-
image = self.decode_latents(latents)
|
946 |
-
|
947 |
-
# 9. Run safety checker
|
948 |
-
image, has_nsfw_concept = self.run_safety_checker(image, text_embeddings.dtype)
|
949 |
-
|
950 |
-
# 10. Convert to PIL
|
951 |
-
if output_type == "pil":
|
952 |
-
image = self.numpy_to_pil(image, argument=argument)
|
953 |
-
|
954 |
-
if not return_dict:
|
955 |
-
return (image, has_nsfw_concept)
|
956 |
-
|
957 |
-
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
958 |
-
|
959 |
-
@paddle.no_grad()
|
960 |
-
def img2img(
|
961 |
-
self,
|
962 |
-
prompt: Union[str, List[str]],
|
963 |
-
image: Union[paddle.Tensor, PIL.Image.Image],
|
964 |
-
strength: float = 0.8,
|
965 |
-
height=None,
|
966 |
-
width=None,
|
967 |
-
num_inference_steps: Optional[int] = 50,
|
968 |
-
guidance_scale: Optional[float] = 7.5,
|
969 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
970 |
-
num_images_per_prompt: Optional[int] = 1,
|
971 |
-
eta: Optional[float] = 0.0,
|
972 |
-
seed: Optional[int] = None,
|
973 |
-
output_type: Optional[str] = "pil",
|
974 |
-
return_dict: bool = True,
|
975 |
-
callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
|
976 |
-
callback_steps: Optional[int] = 1,
|
977 |
-
# new add
|
978 |
-
max_embeddings_multiples: Optional[int] = 1,
|
979 |
-
no_boseos_middle: Optional[bool] = False,
|
980 |
-
skip_parsing: Optional[bool] = False,
|
981 |
-
skip_weighting: Optional[bool] = False,
|
982 |
-
scheduler=None,
|
983 |
-
**kwargs,
|
984 |
-
):
|
985 |
-
r"""
|
986 |
-
Function invoked when calling the pipeline for generation.
|
987 |
-
|
988 |
-
Args:
|
989 |
-
prompt (`str` or `List[str]`):
|
990 |
-
The prompt or prompts to guide the image generation.
|
991 |
-
image (`paddle.Tensor` or `PIL.Image.Image`):
|
992 |
-
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
993 |
-
process.
|
994 |
-
strength (`float`, *optional*, defaults to 0.8):
|
995 |
-
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
|
996 |
-
`image` will be used as a starting point, adding more noise to it the larger the `strength`. The
|
997 |
-
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
|
998 |
-
noise will be maximum and the denoising process will run for the full number of iterations specified in
|
999 |
-
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
|
1000 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
1001 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
1002 |
-
expense of slower inference. This parameter will be modulated by `strength`.
|
1003 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
1004 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
1005 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
1006 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
1007 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
1008 |
-
usually at the expense of lower image quality.
|
1009 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
1010 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
1011 |
-
if `guidance_scale` is less than `1`).
|
1012 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
1013 |
-
The number of images to generate per prompt.
|
1014 |
-
eta (`float`, *optional*, defaults to 0.0):
|
1015 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
1016 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
1017 |
-
seed (`int`, *optional*):
|
1018 |
-
A random seed.
|
1019 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
1020 |
-
The output format of the generate image. Choose between
|
1021 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
1022 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
1023 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
1024 |
-
plain tuple.
|
1025 |
-
callback (`Callable`, *optional*):
|
1026 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
1027 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
|
1028 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
1029 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
1030 |
-
called at every step.
|
1031 |
-
|
1032 |
-
Returns:
|
1033 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
1034 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
1035 |
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
1036 |
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
1037 |
-
(nsfw) content, according to the `safety_checker`.
|
1038 |
-
"""
|
1039 |
-
if scheduler is None:
|
1040 |
-
scheduler = self.scheduler
|
1041 |
-
seed = random.randint(0, 2**32) if seed is None else seed
|
1042 |
-
image_str = image
|
1043 |
-
if isinstance(image_str, str):
|
1044 |
-
image = load_image(image_str)
|
1045 |
-
|
1046 |
-
if height is None and width is None:
|
1047 |
-
width = (image.size[0] // 8) * 8
|
1048 |
-
height = (image.size[1] // 8) * 8
|
1049 |
-
elif height is None and width is not None:
|
1050 |
-
height = (image.size[1] // 8) * 8
|
1051 |
-
elif width is None and height is not None:
|
1052 |
-
width = (image.size[0] // 8) * 8
|
1053 |
-
else:
|
1054 |
-
height = height
|
1055 |
-
width = width
|
1056 |
-
|
1057 |
-
argument = dict(
|
1058 |
-
prompt=prompt,
|
1059 |
-
image=image_str,
|
1060 |
-
negative_prompt=negative_prompt,
|
1061 |
-
height=height,
|
1062 |
-
width=width,
|
1063 |
-
strength=strength,
|
1064 |
-
num_inference_steps=num_inference_steps,
|
1065 |
-
guidance_scale=guidance_scale,
|
1066 |
-
num_images_per_prompt=num_images_per_prompt,
|
1067 |
-
eta=eta,
|
1068 |
-
seed=seed,
|
1069 |
-
max_embeddings_multiples=max_embeddings_multiples,
|
1070 |
-
no_boseos_middle=no_boseos_middle,
|
1071 |
-
skip_parsing=skip_parsing,
|
1072 |
-
skip_weighting=skip_weighting,
|
1073 |
-
epoch_time=time.time(),
|
1074 |
-
)
|
1075 |
-
paddle.seed(seed)
|
1076 |
-
|
1077 |
-
# 1. Check inputs
|
1078 |
-
self.check_inputs_img2img_inpaint(prompt, strength, callback_steps)
|
1079 |
-
|
1080 |
-
# 2. Define call parameters
|
1081 |
-
batch_size = 1 if isinstance(prompt, str) else len(prompt)
|
1082 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
1083 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
1084 |
-
# corresponds to doing no classifier free guidance.
|
1085 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
1086 |
-
|
1087 |
-
# 3. Encode input prompt
|
1088 |
-
text_embeddings = self._encode_prompt(
|
1089 |
-
prompt,
|
1090 |
-
negative_prompt,
|
1091 |
-
max_embeddings_multiples,
|
1092 |
-
no_boseos_middle,
|
1093 |
-
skip_parsing,
|
1094 |
-
skip_weighting,
|
1095 |
-
do_classifier_free_guidance,
|
1096 |
-
num_images_per_prompt,
|
1097 |
-
)
|
1098 |
-
|
1099 |
-
# 4. Preprocess image
|
1100 |
-
if isinstance(image, PIL.Image.Image):
|
1101 |
-
image = image.resize((width, height))
|
1102 |
-
image = preprocess_image(image)
|
1103 |
-
|
1104 |
-
# 5. set timesteps
|
1105 |
-
scheduler.set_timesteps(num_inference_steps)
|
1106 |
-
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, scheduler)
|
1107 |
-
latent_timestep = timesteps[:1].tile([batch_size * num_images_per_prompt])
|
1108 |
-
|
1109 |
-
# 6. Prepare latent variables
|
1110 |
-
latents = self.prepare_latents_img2img(image, latent_timestep, num_images_per_prompt, text_embeddings.dtype, scheduler)
|
1111 |
-
|
1112 |
-
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
1113 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(eta, scheduler)
|
1114 |
-
|
1115 |
-
# 8. Denoising loop
|
1116 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * scheduler.order
|
1117 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
1118 |
-
for i, t in enumerate(timesteps):
|
1119 |
-
# expand the latents if we are doing classifier free guidance
|
1120 |
-
latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents
|
1121 |
-
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
|
1122 |
-
|
1123 |
-
# predict the noise residual
|
1124 |
-
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
1125 |
-
|
1126 |
-
# perform guidance
|
1127 |
-
if do_classifier_free_guidance:
|
1128 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
1129 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
1130 |
-
|
1131 |
-
# compute the previous noisy sample x_t -> x_t-1
|
1132 |
-
latents = scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
1133 |
-
|
1134 |
-
# call the callback, if provided
|
1135 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0):
|
1136 |
-
progress_bar.update()
|
1137 |
-
if callback is not None and i % callback_steps == 0:
|
1138 |
-
callback(progress_bar.n, progress_bar.total, progress_bar)
|
1139 |
-
|
1140 |
-
# 9. Post-processing
|
1141 |
-
image = self.decode_latents(latents)
|
1142 |
-
|
1143 |
-
# 10. Run safety checker
|
1144 |
-
image, has_nsfw_concept = self.run_safety_checker(image, text_embeddings.dtype)
|
1145 |
-
|
1146 |
-
# 11. Convert to PIL
|
1147 |
-
if output_type == "pil":
|
1148 |
-
image = self.numpy_to_pil(image, argument=argument)
|
1149 |
-
|
1150 |
-
if not return_dict:
|
1151 |
-
return (image, has_nsfw_concept)
|
1152 |
-
|
1153 |
-
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
1154 |
-
|
1155 |
-
@paddle.no_grad()
|
1156 |
-
def inpaint(
|
1157 |
-
self,
|
1158 |
-
prompt: Union[str, List[str]],
|
1159 |
-
image: Union[paddle.Tensor, PIL.Image.Image],
|
1160 |
-
mask_image: Union[paddle.Tensor, PIL.Image.Image],
|
1161 |
-
height=None,
|
1162 |
-
width=None,
|
1163 |
-
strength: float = 0.8,
|
1164 |
-
num_inference_steps: Optional[int] = 50,
|
1165 |
-
guidance_scale: Optional[float] = 7.5,
|
1166 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
1167 |
-
num_images_per_prompt: Optional[int] = 1,
|
1168 |
-
eta: Optional[float] = 0.0,
|
1169 |
-
seed: Optional[int] = None,
|
1170 |
-
output_type: Optional[str] = "pil",
|
1171 |
-
return_dict: bool = True,
|
1172 |
-
callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
|
1173 |
-
callback_steps: Optional[int] = 1,
|
1174 |
-
# new add
|
1175 |
-
max_embeddings_multiples: Optional[int] = 1,
|
1176 |
-
no_boseos_middle: Optional[bool] = False,
|
1177 |
-
skip_parsing: Optional[bool] = False,
|
1178 |
-
skip_weighting: Optional[bool] = False,
|
1179 |
-
scheduler=None,
|
1180 |
-
**kwargs,
|
1181 |
-
):
|
1182 |
-
r"""
|
1183 |
-
Function invoked when calling the pipeline for generation.
|
1184 |
-
|
1185 |
-
Args:
|
1186 |
-
prompt (`str` or `List[str]`):
|
1187 |
-
The prompt or prompts to guide the image generation.
|
1188 |
-
image (`paddle.Tensor` or `PIL.Image.Image`):
|
1189 |
-
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
1190 |
-
process. This is the image whose masked region will be inpainted.
|
1191 |
-
mask_image (`paddle.Tensor` or `PIL.Image.Image`):
|
1192 |
-
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
1193 |
-
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
|
1194 |
-
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
|
1195 |
-
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
1196 |
-
strength (`float`, *optional*, defaults to 0.8):
|
1197 |
-
Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
|
1198 |
-
is 1, the denoising process will be run on the masked area for the full number of iterations specified
|
1199 |
-
in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
|
1200 |
-
noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
|
1201 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
1202 |
-
The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
|
1203 |
-
the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
|
1204 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
1205 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
1206 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
1207 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
1208 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
1209 |
-
usually at the expense of lower image quality.
|
1210 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
1211 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
1212 |
-
if `guidance_scale` is less than `1`).
|
1213 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
1214 |
-
The number of images to generate per prompt.
|
1215 |
-
eta (`float`, *optional*, defaults to 0.0):
|
1216 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
1217 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
1218 |
-
seed (`int`, *optional*):
|
1219 |
-
A random seed.
|
1220 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
1221 |
-
The output format of the generate image. Choose between
|
1222 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
1223 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
1224 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
1225 |
-
plain tuple.
|
1226 |
-
callback (`Callable`, *optional*):
|
1227 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
1228 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
|
1229 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
1230 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
1231 |
-
called at every step.
|
1232 |
-
|
1233 |
-
Returns:
|
1234 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
1235 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
1236 |
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
1237 |
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
1238 |
-
(nsfw) content, according to the `safety_checker`.
|
1239 |
-
"""
|
1240 |
-
if scheduler is None:
|
1241 |
-
scheduler = self.scheduler
|
1242 |
-
seed = random.randint(0, 2**32) if seed is None else seed
|
1243 |
-
image_str = image
|
1244 |
-
mask_image_str = mask_image
|
1245 |
-
|
1246 |
-
if isinstance(image_str, str):
|
1247 |
-
image = load_image(image_str)
|
1248 |
-
if isinstance(mask_image_str, str):
|
1249 |
-
mask_image = load_image(mask_image_str)
|
1250 |
-
|
1251 |
-
if height is None and width is None:
|
1252 |
-
width = (image.size[0] // 8) * 8
|
1253 |
-
height = (image.size[1] // 8) * 8
|
1254 |
-
elif height is None and width is not None:
|
1255 |
-
height = (image.size[1] // 8) * 8
|
1256 |
-
elif width is None and height is not None:
|
1257 |
-
width = (image.size[0] // 8) * 8
|
1258 |
-
else:
|
1259 |
-
height = height
|
1260 |
-
width = width
|
1261 |
-
|
1262 |
-
argument = dict(
|
1263 |
-
prompt=prompt,
|
1264 |
-
image=image_str,
|
1265 |
-
mask_image=mask_image_str,
|
1266 |
-
negative_prompt=negative_prompt,
|
1267 |
-
height=height,
|
1268 |
-
width=width,
|
1269 |
-
strength=strength,
|
1270 |
-
num_inference_steps=num_inference_steps,
|
1271 |
-
guidance_scale=guidance_scale,
|
1272 |
-
num_images_per_prompt=num_images_per_prompt,
|
1273 |
-
eta=eta,
|
1274 |
-
seed=seed,
|
1275 |
-
max_embeddings_multiples=max_embeddings_multiples,
|
1276 |
-
no_boseos_middle=no_boseos_middle,
|
1277 |
-
skip_parsing=skip_parsing,
|
1278 |
-
skip_weighting=skip_weighting,
|
1279 |
-
epoch_time=time.time(),
|
1280 |
-
)
|
1281 |
-
paddle.seed(seed)
|
1282 |
-
|
1283 |
-
# 1. Check inputs
|
1284 |
-
self.check_inputs_img2img_inpaint(prompt, strength, callback_steps)
|
1285 |
-
|
1286 |
-
# 2. Define call parameters
|
1287 |
-
batch_size = 1 if isinstance(prompt, str) else len(prompt)
|
1288 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
1289 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
1290 |
-
# corresponds to doing no classifier free guidance.
|
1291 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
1292 |
-
|
1293 |
-
# 3. Encode input prompt
|
1294 |
-
text_embeddings = self._encode_prompt(
|
1295 |
-
prompt,
|
1296 |
-
negative_prompt,
|
1297 |
-
max_embeddings_multiples,
|
1298 |
-
no_boseos_middle,
|
1299 |
-
skip_parsing,
|
1300 |
-
skip_weighting,
|
1301 |
-
do_classifier_free_guidance,
|
1302 |
-
num_images_per_prompt,
|
1303 |
-
)
|
1304 |
-
|
1305 |
-
if not isinstance(image, paddle.Tensor):
|
1306 |
-
image = image.resize((width, height))
|
1307 |
-
image = preprocess_image(image)
|
1308 |
-
|
1309 |
-
if not isinstance(mask_image, paddle.Tensor):
|
1310 |
-
mask_image = mask_image.resize((width, height))
|
1311 |
-
mask_image = preprocess_mask(mask_image)
|
1312 |
-
|
1313 |
-
# 5. set timesteps
|
1314 |
-
scheduler.set_timesteps(num_inference_steps)
|
1315 |
-
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, scheduler)
|
1316 |
-
latent_timestep = timesteps[:1].tile([batch_size * num_images_per_prompt])
|
1317 |
-
|
1318 |
-
# 6. Prepare latent variables
|
1319 |
-
# encode the init image into latents and scale the latents
|
1320 |
-
latents, init_latents_orig, noise = self.prepare_latents_inpaint(
|
1321 |
-
image, latent_timestep, num_images_per_prompt, text_embeddings.dtype, scheduler
|
1322 |
-
)
|
1323 |
-
|
1324 |
-
# 7. Prepare mask latent
|
1325 |
-
mask = mask_image.cast(latents.dtype)
|
1326 |
-
mask = paddle.concat([mask] * batch_size * num_images_per_prompt)
|
1327 |
-
|
1328 |
-
# 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
1329 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(eta, scheduler)
|
1330 |
-
|
1331 |
-
# 9. Denoising loop
|
1332 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * scheduler.order
|
1333 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
1334 |
-
for i, t in enumerate(timesteps):
|
1335 |
-
# expand the latents if we are doing classifier free guidance
|
1336 |
-
latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents
|
1337 |
-
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
|
1338 |
-
|
1339 |
-
# predict the noise residual
|
1340 |
-
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
1341 |
-
|
1342 |
-
# perform guidance
|
1343 |
-
if do_classifier_free_guidance:
|
1344 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
1345 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
1346 |
-
|
1347 |
-
# compute the previous noisy sample x_t -> x_t-1
|
1348 |
-
latents = scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
1349 |
-
# masking
|
1350 |
-
init_latents_proper = scheduler.add_noise(init_latents_orig, noise, t)
|
1351 |
-
|
1352 |
-
latents = (init_latents_proper * mask) + (latents * (1 - mask))
|
1353 |
-
|
1354 |
-
# call the callback, if provided
|
1355 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0):
|
1356 |
-
progress_bar.update()
|
1357 |
-
if callback is not None and i % callback_steps == 0:
|
1358 |
-
callback(progress_bar.n, progress_bar.total, progress_bar)
|
1359 |
-
|
1360 |
-
# 10. Post-processing
|
1361 |
-
image = self.decode_latents(latents)
|
1362 |
-
|
1363 |
-
# 11. Run safety checker
|
1364 |
-
image, has_nsfw_concept = self.run_safety_checker(image, text_embeddings.dtype)
|
1365 |
-
|
1366 |
-
# 12. Convert to PIL
|
1367 |
-
if output_type == "pil":
|
1368 |
-
image = self.numpy_to_pil(image, argument=argument)
|
1369 |
-
|
1370 |
-
if not return_dict:
|
1371 |
-
return (image, has_nsfw_concept)
|
1372 |
-
|
1373 |
-
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
1374 |
-
|
1375 |
-
@staticmethod
|
1376 |
-
def numpy_to_pil(images, **kwargs):
|
1377 |
-
"""
|
1378 |
-
Convert a numpy image or a batch of images to a PIL image.
|
1379 |
-
"""
|
1380 |
-
if images.ndim == 3:
|
1381 |
-
images = images[None, ...]
|
1382 |
-
images = (images * 255).round().astype("uint8")
|
1383 |
-
pil_images = []
|
1384 |
-
argument = kwargs.pop("argument", None)
|
1385 |
-
for image in images:
|
1386 |
-
image = PIL.Image.fromarray(image)
|
1387 |
-
if argument is not None:
|
1388 |
-
image.argument = argument
|
1389 |
-
pil_images.append(image)
|
1390 |
-
|
1391 |
-
return pil_images
|
1392 |
-
pipeline = StableDiffusionPipelineAllinOne.from_pretrained(BASE_MODEL_NAME, safety_checker=None)
|
1393 |
-
|
1394 |
-
if LORA_WEIGHTS_PATH is not None:
|
1395 |
-
pipeline.unet.load_attn_procs(LORA_WEIGHTS_PATH, from_hf_hub=True)
|
1396 |
-
|
1397 |
-
support_scheduler = [
|
1398 |
-
"DPMSolver",
|
1399 |
-
"EulerDiscrete",
|
1400 |
-
"EulerAncestralDiscrete",
|
1401 |
-
"PNDM",
|
1402 |
-
"DDIM",
|
1403 |
-
"LMSDiscrete",
|
1404 |
-
"HeunDiscrete",
|
1405 |
-
"KDPM2AncestralDiscrete",
|
1406 |
-
"KDPM2Discrete"
|
1407 |
-
]
|
1408 |
-
|
1409 |
-
# generate images
|
1410 |
-
def infer(prompt, negative, scale, height, width, num_inference_steps, scheduler_name):
|
1411 |
-
scheduler = pipeline.create_scheduler(scheduler_name)
|
1412 |
-
|
1413 |
-
images = pipeline(
|
1414 |
-
prompt=prompt, negative_prompt=negative, guidance_scale=scale, height=height, width=width, num_inference_steps=num_inference_steps, scheduler=scheduler,
|
1415 |
-
).images
|
1416 |
-
return images
|
1417 |
-
|
1418 |
-
|
1419 |
-
css = """
|
1420 |
-
.gradio-container {
|
1421 |
-
font-family: 'IBM Plex Sans', sans-serif;
|
1422 |
-
}
|
1423 |
-
.gr-button {
|
1424 |
-
color: white;
|
1425 |
-
border-color: black;
|
1426 |
-
background: black;
|
1427 |
-
}
|
1428 |
-
input[type='range'] {
|
1429 |
-
accent-color: black;
|
1430 |
-
}
|
1431 |
-
.dark input[type='range'] {
|
1432 |
-
accent-color: #dfdfdf;
|
1433 |
-
}
|
1434 |
-
.container {
|
1435 |
-
max-width: 730px;
|
1436 |
-
margin: auto;
|
1437 |
-
padding-top: 1.5rem;
|
1438 |
-
}
|
1439 |
-
#gallery {
|
1440 |
-
min-height: 22rem;
|
1441 |
-
margin-bottom: 15px;
|
1442 |
-
margin-left: auto;
|
1443 |
-
margin-right: auto;
|
1444 |
-
border-bottom-right-radius: .5rem !important;
|
1445 |
-
border-bottom-left-radius: .5rem !important;
|
1446 |
-
}
|
1447 |
-
#gallery>div>.h-full {
|
1448 |
-
min-height: 20rem;
|
1449 |
-
}
|
1450 |
-
.details:hover {
|
1451 |
-
text-decoration: underline;
|
1452 |
-
}
|
1453 |
-
.gr-button {
|
1454 |
-
white-space: nowrap;
|
1455 |
-
}
|
1456 |
-
.gr-button:focus {
|
1457 |
-
border-color: rgb(147 197 253 / var(--tw-border-opacity));
|
1458 |
-
outline: none;
|
1459 |
-
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
|
1460 |
-
--tw-border-opacity: 1;
|
1461 |
-
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
|
1462 |
-
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
|
1463 |
-
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
|
1464 |
-
--tw-ring-opacity: .5;
|
1465 |
-
}
|
1466 |
-
#advanced-btn {
|
1467 |
-
font-size: .7rem !important;
|
1468 |
-
line-height: 19px;
|
1469 |
-
margin-top: 12px;
|
1470 |
-
margin-bottom: 12px;
|
1471 |
-
padding: 2px 8px;
|
1472 |
-
border-radius: 14px !important;
|
1473 |
-
}
|
1474 |
-
#advanced-options {
|
1475 |
-
display: none;
|
1476 |
-
margin-bottom: 20px;
|
1477 |
-
}
|
1478 |
-
.footer {
|
1479 |
-
margin-bottom: 45px;
|
1480 |
-
margin-top: 35px;
|
1481 |
-
text-align: center;
|
1482 |
-
border-bottom: 1px solid #e5e5e5;
|
1483 |
-
}
|
1484 |
-
.footer>p {
|
1485 |
-
font-size: .8rem;
|
1486 |
-
display: inline-block;
|
1487 |
-
padding: 0 10px;
|
1488 |
-
transform: translateY(10px);
|
1489 |
-
background: white;
|
1490 |
-
}
|
1491 |
-
.dark .footer {
|
1492 |
-
border-color: #303030;
|
1493 |
-
}
|
1494 |
-
.dark .footer>p {
|
1495 |
-
background: #0b0f19;
|
1496 |
-
}
|
1497 |
-
.acknowledgments h4{
|
1498 |
-
margin: 1.25em 0 .25em 0;
|
1499 |
-
font-weight: bold;
|
1500 |
-
font-size: 115%;
|
1501 |
-
}
|
1502 |
-
.animate-spin {
|
1503 |
-
animation: spin 1s linear infinite;
|
1504 |
-
}
|
1505 |
-
@keyframes spin {
|
1506 |
-
from {
|
1507 |
-
transform: rotate(0deg);
|
1508 |
-
}
|
1509 |
-
to {
|
1510 |
-
transform: rotate(360deg);
|
1511 |
-
}
|
1512 |
-
}
|
1513 |
-
#share-btn-container {
|
1514 |
-
display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
|
1515 |
-
margin-top: 10px;
|
1516 |
-
margin-left: auto;
|
1517 |
-
}
|
1518 |
-
#share-btn {
|
1519 |
-
all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0;
|
1520 |
-
}
|
1521 |
-
#share-btn * {
|
1522 |
-
all: unset;
|
1523 |
-
}
|
1524 |
-
#share-btn-container div:nth-child(-n+2){
|
1525 |
-
width: auto !important;
|
1526 |
-
min-height: 0px !important;
|
1527 |
-
}
|
1528 |
-
#share-btn-container .wrap {
|
1529 |
-
display: none !important;
|
1530 |
-
}
|
1531 |
-
|
1532 |
-
.gr-form{
|
1533 |
-
flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
|
1534 |
-
}
|
1535 |
-
#prompt-container{
|
1536 |
-
gap: 0;
|
1537 |
-
}
|
1538 |
-
#prompt-text-input, #negative-prompt-text-input{padding: .45rem 0.625rem}
|
1539 |
-
#component-16{border-top-width: 1px!important;margin-top: 1em}
|
1540 |
-
.image_duplication{position: absolute; width: 100px; left: 50px}
|
1541 |
-
"""
|
1542 |
-
|
1543 |
-
block = gr.Blocks(css=css)
|
1544 |
-
|
1545 |
-
with block:
|
1546 |
-
gr.HTML(
|
1547 |
-
"""
|
1548 |
-
<div style="text-align: center; margin: 0 auto;">
|
1549 |
-
<div
|
1550 |
-
style="
|
1551 |
-
display: inline-flex;
|
1552 |
-
align-items: center;
|
1553 |
-
gap: 0.8rem;
|
1554 |
-
font-size: 1.75rem;
|
1555 |
-
"
|
1556 |
-
>
|
1557 |
-
<svg
|
1558 |
-
width="0.65em"
|
1559 |
-
height="0.65em"
|
1560 |
-
viewBox="0 0 115 115"
|
1561 |
-
fill="none"
|
1562 |
-
xmlns="http://www.w3.org/2000/svg"
|
1563 |
-
>
|
1564 |
-
<rect width="23" height="23" fill="white"></rect>
|
1565 |
-
<rect y="69" width="23" height="23" fill="white"></rect>
|
1566 |
-
<rect x="23" width="23" height="23" fill="#AEAEAE"></rect>
|
1567 |
-
<rect x="23" y="69" width="23" height="23" fill="#AEAEAE"></rect>
|
1568 |
-
<rect x="46" width="23" height="23" fill="white"></rect>
|
1569 |
-
<rect x="46" y="69" width="23" height="23" fill="white"></rect>
|
1570 |
-
<rect x="69" width="23" height="23" fill="black"></rect>
|
1571 |
-
<rect x="69" y="69" width="23" height="23" fill="black"></rect>
|
1572 |
-
<rect x="92" width="23" height="23" fill="#D9D9D9"></rect>
|
1573 |
-
<rect x="92" y="69" width="23" height="23" fill="#AEAEAE"></rect>
|
1574 |
-
<rect x="115" y="46" width="23" height="23" fill="white"></rect>
|
1575 |
-
<rect x="115" y="115" width="23" height="23" fill="white"></rect>
|
1576 |
-
<rect x="115" y="69" width="23" height="23" fill="#D9D9D9"></rect>
|
1577 |
-
<rect x="92" y="46" width="23" height="23" fill="#AEAEAE"></rect>
|
1578 |
-
<rect x="92" y="115" width="23" height="23" fill="#AEAEAE"></rect>
|
1579 |
-
<rect x="92" y="69" width="23" height="23" fill="white"></rect>
|
1580 |
-
<rect x="69" y="46" width="23" height="23" fill="white"></rect>
|
1581 |
-
<rect x="69" y="115" width="23" height="23" fill="white"></rect>
|
1582 |
-
<rect x="69" y="69" width="23" height="23" fill="#D9D9D9"></rect>
|
1583 |
-
<rect x="46" y="46" width="23" height="23" fill="black"></rect>
|
1584 |
-
<rect x="46" y="115" width="23" height="23" fill="black"></rect>
|
1585 |
-
<rect x="46" y="69" width="23" height="23" fill="black"></rect>
|
1586 |
-
<rect x="23" y="46" width="23" height="23" fill="#D9D9D9"></rect>
|
1587 |
-
<rect x="23" y="115" width="23" height="23" fill="#AEAEAE"></rect>
|
1588 |
-
<rect x="23" y="69" width="23" height="23" fill="black"></rect>
|
1589 |
-
</svg>
|
1590 |
-
<h1 style="font-weight: 900; margin-bottom: 7px;margin-top:5px">
|
1591 |
-
Dreambooth LoRa Demo
|
1592 |
-
</h1>
|
1593 |
-
</div>
|
1594 |
-
</div>
|
1595 |
-
"""
|
1596 |
-
)
|
1597 |
-
with gr.Group():
|
1598 |
-
with gr.Box():
|
1599 |
-
with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
|
1600 |
-
with gr.Column():
|
1601 |
-
text = gr.Textbox(
|
1602 |
-
label="Enter your prompt",
|
1603 |
-
value=PROMPTS,
|
1604 |
-
show_label=False,
|
1605 |
-
max_lines=1,
|
1606 |
-
placeholder="Enter your prompt",
|
1607 |
-
elem_id="prompt-text-input",
|
1608 |
-
).style(
|
1609 |
-
border=(True, False, True, True),
|
1610 |
-
rounded=(True, False, False, True),
|
1611 |
-
container=False,
|
1612 |
-
)
|
1613 |
-
negative = gr.Textbox(
|
1614 |
-
label="Enter your negative prompt",
|
1615 |
-
show_label=False,
|
1616 |
-
max_lines=1,
|
1617 |
-
placeholder="Enter a negative prompt",
|
1618 |
-
elem_id="negative-prompt-text-input",
|
1619 |
-
).style(
|
1620 |
-
border=(True, False, True, True),
|
1621 |
-
rounded=(True, False, False, True),
|
1622 |
-
container=False,
|
1623 |
-
)
|
1624 |
-
btn = gr.Button("Generate image").style(
|
1625 |
-
margin=False,
|
1626 |
-
rounded=(False, True, True, False),
|
1627 |
-
full_width=False,
|
1628 |
-
)
|
1629 |
-
|
1630 |
-
gallery = gr.Gallery(
|
1631 |
-
label="Generated images", show_label=False, elem_id="gallery"
|
1632 |
-
).style(grid=[1], height="auto")
|
1633 |
-
|
1634 |
-
|
1635 |
-
with gr.Accordion("Advanced settings", open=False):
|
1636 |
-
scheduler_name = gr.Dropdown(
|
1637 |
-
label="scheduler_name", choices=support_scheduler, value="DPMSolver"
|
1638 |
-
)
|
1639 |
-
guidance_scale = gr.Slider(
|
1640 |
-
label="Guidance Scale", minimum=1, maximum=30, value=7.5, step=0.1
|
1641 |
-
)
|
1642 |
-
height = gr.Slider(
|
1643 |
-
label="Height", minimum=256, maximum=1024, value=512, step=8
|
1644 |
-
)
|
1645 |
-
width = gr.Slider(
|
1646 |
-
label="Width", minimum=256, maximum=1024, value=512, step=0.1
|
1647 |
-
)
|
1648 |
-
num_inference_steps = gr.Slider(
|
1649 |
-
label="num_inference_steps", minimum=10, maximum=100, value=25, step=1
|
1650 |
-
)
|
1651 |
-
|
1652 |
-
|
1653 |
-
inputs = [text, negative, guidance_scale, height, width, num_inference_steps, scheduler_name]
|
1654 |
-
# ex = gr.Examples(examples=examples, fn=infer, inputs=inputs, outputs=gallery, cache_examples=False)
|
1655 |
-
# ex.dataset.headers = [""]
|
1656 |
-
negative.submit(infer, inputs=inputs, outputs=gallery)
|
1657 |
-
text.submit(infer, inputs=inputs, outputs=gallery)
|
1658 |
-
btn.click(infer, inputs=inputs, outputs=gallery)
|
1659 |
-
|
1660 |
-
|
1661 |
-
gr.HTML(
|
1662 |
-
"""
|
1663 |
-
<div class="footer">
|
1664 |
-
<p>Model by <a href="https://www.paddlepaddle.org.cn/" style="text-decoration: underline;" target="_blank">PaddlePaddle</a> - Gradio Demo by 🤗 Hugging Face
|
1665 |
-
</p>
|
1666 |
-
</div>
|
1667 |
-
<div class="acknowledgments">
|
1668 |
-
<p><h4>LICENSE</h4>
|
1669 |
-
The model is licensed with a <a href="https://huggingface.co/stabilityai/stable-diffusion-2/blob/main/LICENSE-MODEL" style="text-decoration: underline;" target="_blank">CreativeML OpenRAIL++</a> license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" target="_blank" style="text-decoration: underline;" target="_blank">read the license</a></p>
|
1670 |
-
<p><h4>Biases and content acknowledgment</h4>
|
1671 |
-
Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the <a href="https://laion.ai/blog/laion-5b/" style="text-decoration: underline;" target="_blank">LAION-5B dataset</a>, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the <a href="https://huggingface.co/CompVis/stable-diffusion-v1-4" style="text-decoration: underline;" target="_blank">model card</a></p>
|
1672 |
-
</div>
|
1673 |
-
"""
|
1674 |
-
)
|
1675 |
-
|
1676 |
-
block.launch(server_name="0.0.0.0", server_port=8221)
|
1677 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AAYUSH27/Neuro/README.md
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: AnatomyBOT!
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.25.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
|
11 |
-
---
|
12 |
-
|
13 |
-
## Make sure you have git-lfs installed [Git LFS](https://git-lfs.com) ✅
|
14 |
-
# 🧑🏻💻Steps to download the Code
|
15 |
-
|
16 |
-
**📌 NOTE-1: If the Llama 2 Model is not donwloaded then the code will not work properly.**
|
17 |
-
|
18 |
-
**📌 NOTE-2: If the HuggingFaces API is not in ```.env``` file then generate your own API key from HugginFaces and use it.**
|
19 |
-
|
20 |
-
---
|
21 |
-
|
22 |
-
Step:0
|
23 |
-
- Copy and Paste the below command in terminal.
|
24 |
-
- This command will help to download the code to your local machine.
|
25 |
-
```shell
|
26 |
-
git clone https://huggingface.co/spaces/AAYUSH27/Neuro
|
27 |
-
```
|
28 |
-
- The file is of approx. 5GB
|
29 |
-
- If you want to clone without large files (Llama 2 Model).
|
30 |
-
```shell
|
31 |
-
git clone https://huggingface.co/spaces/AAYUSH27/Neuro
|
32 |
-
GIT_LFS_SKIP_SMUDGE=1
|
33 |
-
```
|
34 |
-
|
35 |
-
Step:1
|
36 |
-
- Copy and Paste the below command in terminal.
|
37 |
-
- This command helps to go into the project directory.
|
38 |
-
```shell
|
39 |
-
cd Neuro
|
40 |
-
```
|
41 |
-
|
42 |
-
Step:2
|
43 |
-
- Copy and Paste the below command in terminal.
|
44 |
-
- This commmand helps to install all the libraries in one take from ```requirements.txt```.
|
45 |
-
```shell
|
46 |
-
pip3 install -r requirements.txt
|
47 |
-
```
|
48 |
-
|
49 |
-
Step:3
|
50 |
-
- Copy and Paste the below command in terminal.
|
51 |
-
- This command helps to run the code into local host via ```streamlit```.
|
52 |
-
```shell
|
53 |
-
streamlit run -app.py
|
54 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIWaves/Debate/src/agents/LLM/base_LLM.py
DELETED
@@ -1,133 +0,0 @@
|
|
1 |
-
from abc import abstractclassmethod
|
2 |
-
import openai
|
3 |
-
import os
|
4 |
-
import time
|
5 |
-
from Memory import Memory
|
6 |
-
from utils import save_logs
|
7 |
-
|
8 |
-
class LLM:
|
9 |
-
def __init__(self) -> None:
|
10 |
-
pass
|
11 |
-
|
12 |
-
@abstractclassmethod
|
13 |
-
def get_response():
|
14 |
-
pass
|
15 |
-
|
16 |
-
|
17 |
-
class OpenAILLM(LLM):
|
18 |
-
def __init__(self,**kwargs) -> None:
|
19 |
-
super().__init__()
|
20 |
-
self.MAX_CHAT_HISTORY = eval(
|
21 |
-
os.environ["MAX_CHAT_HISTORY"]) if "MAX_CHAT_HISTORY" in os.environ else 10
|
22 |
-
|
23 |
-
self.model = kwargs["model"] if "model" in kwargs else "gpt-3.5-turbo-16k-0613"
|
24 |
-
self.temperature = kwargs["temperature"] if "temperature" in kwargs else 0.3
|
25 |
-
self.log_path = kwargs["log_path"] if "log_path" in kwargs else "logs"
|
26 |
-
|
27 |
-
|
28 |
-
def get_stream(self,response, log_path, messages):
|
29 |
-
ans = ""
|
30 |
-
for res in response:
|
31 |
-
if res:
|
32 |
-
r = (res.choices[0]["delta"].get("content")
|
33 |
-
if res.choices[0]["delta"].get("content") else "")
|
34 |
-
ans += r
|
35 |
-
yield r
|
36 |
-
|
37 |
-
save_logs(log_path, messages, ans)
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
def get_response(self,
|
42 |
-
chat_history,
|
43 |
-
system_prompt,
|
44 |
-
last_prompt=None,
|
45 |
-
stream=False,
|
46 |
-
functions=None,
|
47 |
-
function_call="auto",
|
48 |
-
WAIT_TIME=20,
|
49 |
-
**kwargs):
|
50 |
-
"""
|
51 |
-
return LLM's response
|
52 |
-
"""
|
53 |
-
openai.api_key = os.environ["API_KEY"]
|
54 |
-
# if "PROXY" in os.environ:
|
55 |
-
# assert "http:" in os.environ["PROXY"] or "socks" in os.environ["PROXY"],"PROXY error,PROXY must be http or socks"
|
56 |
-
# openai.proxy = os.environ["PROXY"]
|
57 |
-
if "API_BASE" in os.environ:
|
58 |
-
openai.api_base = os.environ["API_BASE"]
|
59 |
-
active_mode = True if ("ACTIVE_MODE" in os.environ and os.environ["ACTIVE_MODE"] == "0") else False
|
60 |
-
model = self.model
|
61 |
-
temperature = self.temperature
|
62 |
-
|
63 |
-
|
64 |
-
if active_mode:
|
65 |
-
system_prompt = system_prompt + "Please keep your reply as concise as possible,Within three sentences, the total word count should not exceed 30"
|
66 |
-
|
67 |
-
messages = [{
|
68 |
-
"role": "system",
|
69 |
-
"content": system_prompt
|
70 |
-
}] if system_prompt else []
|
71 |
-
|
72 |
-
if chat_history:
|
73 |
-
if len(chat_history) > self.MAX_CHAT_HISTORY:
|
74 |
-
chat_history = chat_history[- self.MAX_CHAT_HISTORY:]
|
75 |
-
if isinstance(chat_history[0],dict):
|
76 |
-
messages += chat_history
|
77 |
-
elif isinstance(chat_history[0],Memory):
|
78 |
-
messages += [memory.get_gpt_message("user") for memory in chat_history]
|
79 |
-
|
80 |
-
if last_prompt:
|
81 |
-
if active_mode:
|
82 |
-
last_prompt = last_prompt + "Please keep your reply as concise as possible,Within three sentences, the total word count should not exceed 30"
|
83 |
-
# messages += [{"role": "system", "content": f"{last_prompt}"}]
|
84 |
-
messages[-1]["content"] += last_prompt
|
85 |
-
|
86 |
-
|
87 |
-
while True:
|
88 |
-
try:
|
89 |
-
if functions:
|
90 |
-
response = openai.ChatCompletion.create(
|
91 |
-
model=model,
|
92 |
-
messages=messages,
|
93 |
-
functions=functions,
|
94 |
-
function_call=function_call,
|
95 |
-
temperature=temperature,
|
96 |
-
)
|
97 |
-
else:
|
98 |
-
response = openai.ChatCompletion.create(
|
99 |
-
model=model,
|
100 |
-
messages=messages,
|
101 |
-
temperature=temperature,
|
102 |
-
stream=stream)
|
103 |
-
break
|
104 |
-
except Exception as e:
|
105 |
-
print(e)
|
106 |
-
if "maximum context length is" in str(e):
|
107 |
-
assert False, "exceed max length"
|
108 |
-
break
|
109 |
-
else:
|
110 |
-
print(f"Please wait {WAIT_TIME} seconds and resend later ...")
|
111 |
-
time.sleep(WAIT_TIME)
|
112 |
-
|
113 |
-
if functions:
|
114 |
-
save_logs(self.log_path, messages, response)
|
115 |
-
return response.choices[0].message
|
116 |
-
elif stream:
|
117 |
-
return self.get_stream(response, self.log_path, messages)
|
118 |
-
else:
|
119 |
-
save_logs(self.log_path, messages, response)
|
120 |
-
return response.choices[0].message["content"]
|
121 |
-
|
122 |
-
|
123 |
-
def init_LLM(default_log_path,**kwargs):
|
124 |
-
LLM_type = kwargs["LLM_type"] if "LLM_type" in kwargs else "OpenAI"
|
125 |
-
log_path = kwargs["log_path"] if "log_path" in kwargs else default_log_path
|
126 |
-
if LLM_type == "OpenAI":
|
127 |
-
LLM = (
|
128 |
-
OpenAILLM(**kwargs["LLM"])
|
129 |
-
if "LLM" in kwargs
|
130 |
-
else OpenAILLM(model = "gpt-3.5-turbo-16k-0613",temperature=0.3,log_path=log_path)
|
131 |
-
)
|
132 |
-
return LLM
|
133 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapting/YouTube-Downloader/tube/download.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from pytube import YouTube
|
3 |
-
|
4 |
-
import streamlit as st
|
5 |
-
from .utils import clear_cache
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
def download_yt(yt_url:str, output_dir:str = './downloads'):
|
11 |
-
yt = YouTube(yt_url)
|
12 |
-
|
13 |
-
prompt = st.markdown(f'''`downloading...`''')
|
14 |
-
|
15 |
-
while True:
|
16 |
-
try:
|
17 |
-
yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first().download(
|
18 |
-
output_path=output_dir,
|
19 |
-
filename='download.mp4'
|
20 |
-
)
|
21 |
-
prompt.empty()
|
22 |
-
break
|
23 |
-
except Exception as e:
|
24 |
-
print(e)
|
25 |
-
|
26 |
-
download_file(folder_name= output_dir)
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
def download_file(folder_name):
|
33 |
-
def tmp(*,folder_name:str):
|
34 |
-
st.session_state["title"] = ""
|
35 |
-
clear_cache(folder_name)
|
36 |
-
|
37 |
-
|
38 |
-
with open(Path('downloads').joinpath('download.mp4'), "rb") as file:
|
39 |
-
btn = st.download_button(
|
40 |
-
label="Download",
|
41 |
-
data=file,
|
42 |
-
file_name='download.mp4',
|
43 |
-
on_click= tmp,kwargs=dict(
|
44 |
-
folder_name = folder_name
|
45 |
-
)
|
46 |
-
)
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/roundrectangle.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import RoundRectangle from './gameobjects/shape/roundrectangle/RoundRectangle';
|
2 |
-
export default RoundRectangle;
|
|
|
|
|
|
spaces/Ali36Ahmad/magic-diffusion/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Magic Prompt
|
3 |
-
emoji: 🎆
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.12.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
duplicated_from: tommy24/magic-diffusion
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/training/__init__.py
DELETED
File without changes
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky_v22/test_kandinsky_prior.py
DELETED
@@ -1,246 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import unittest
|
17 |
-
|
18 |
-
import numpy as np
|
19 |
-
import torch
|
20 |
-
from torch import nn
|
21 |
-
from transformers import (
|
22 |
-
CLIPImageProcessor,
|
23 |
-
CLIPTextConfig,
|
24 |
-
CLIPTextModelWithProjection,
|
25 |
-
CLIPTokenizer,
|
26 |
-
CLIPVisionConfig,
|
27 |
-
CLIPVisionModelWithProjection,
|
28 |
-
)
|
29 |
-
|
30 |
-
from diffusers import KandinskyV22PriorPipeline, PriorTransformer, UnCLIPScheduler
|
31 |
-
from diffusers.utils import torch_device
|
32 |
-
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
|
33 |
-
|
34 |
-
from ..test_pipelines_common import PipelineTesterMixin
|
35 |
-
|
36 |
-
|
37 |
-
enable_full_determinism()
|
38 |
-
|
39 |
-
|
40 |
-
class Dummies:
|
41 |
-
@property
|
42 |
-
def text_embedder_hidden_size(self):
|
43 |
-
return 32
|
44 |
-
|
45 |
-
@property
|
46 |
-
def time_input_dim(self):
|
47 |
-
return 32
|
48 |
-
|
49 |
-
@property
|
50 |
-
def block_out_channels_0(self):
|
51 |
-
return self.time_input_dim
|
52 |
-
|
53 |
-
@property
|
54 |
-
def time_embed_dim(self):
|
55 |
-
return self.time_input_dim * 4
|
56 |
-
|
57 |
-
@property
|
58 |
-
def cross_attention_dim(self):
|
59 |
-
return 100
|
60 |
-
|
61 |
-
@property
|
62 |
-
def dummy_tokenizer(self):
|
63 |
-
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
64 |
-
return tokenizer
|
65 |
-
|
66 |
-
@property
|
67 |
-
def dummy_text_encoder(self):
|
68 |
-
torch.manual_seed(0)
|
69 |
-
config = CLIPTextConfig(
|
70 |
-
bos_token_id=0,
|
71 |
-
eos_token_id=2,
|
72 |
-
hidden_size=self.text_embedder_hidden_size,
|
73 |
-
projection_dim=self.text_embedder_hidden_size,
|
74 |
-
intermediate_size=37,
|
75 |
-
layer_norm_eps=1e-05,
|
76 |
-
num_attention_heads=4,
|
77 |
-
num_hidden_layers=5,
|
78 |
-
pad_token_id=1,
|
79 |
-
vocab_size=1000,
|
80 |
-
)
|
81 |
-
return CLIPTextModelWithProjection(config)
|
82 |
-
|
83 |
-
@property
|
84 |
-
def dummy_prior(self):
|
85 |
-
torch.manual_seed(0)
|
86 |
-
|
87 |
-
model_kwargs = {
|
88 |
-
"num_attention_heads": 2,
|
89 |
-
"attention_head_dim": 12,
|
90 |
-
"embedding_dim": self.text_embedder_hidden_size,
|
91 |
-
"num_layers": 1,
|
92 |
-
}
|
93 |
-
|
94 |
-
model = PriorTransformer(**model_kwargs)
|
95 |
-
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
|
96 |
-
model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape))
|
97 |
-
return model
|
98 |
-
|
99 |
-
@property
|
100 |
-
def dummy_image_encoder(self):
|
101 |
-
torch.manual_seed(0)
|
102 |
-
config = CLIPVisionConfig(
|
103 |
-
hidden_size=self.text_embedder_hidden_size,
|
104 |
-
image_size=224,
|
105 |
-
projection_dim=self.text_embedder_hidden_size,
|
106 |
-
intermediate_size=37,
|
107 |
-
num_attention_heads=4,
|
108 |
-
num_channels=3,
|
109 |
-
num_hidden_layers=5,
|
110 |
-
patch_size=14,
|
111 |
-
)
|
112 |
-
|
113 |
-
model = CLIPVisionModelWithProjection(config)
|
114 |
-
return model
|
115 |
-
|
116 |
-
@property
|
117 |
-
def dummy_image_processor(self):
|
118 |
-
image_processor = CLIPImageProcessor(
|
119 |
-
crop_size=224,
|
120 |
-
do_center_crop=True,
|
121 |
-
do_normalize=True,
|
122 |
-
do_resize=True,
|
123 |
-
image_mean=[0.48145466, 0.4578275, 0.40821073],
|
124 |
-
image_std=[0.26862954, 0.26130258, 0.27577711],
|
125 |
-
resample=3,
|
126 |
-
size=224,
|
127 |
-
)
|
128 |
-
|
129 |
-
return image_processor
|
130 |
-
|
131 |
-
def get_dummy_components(self):
|
132 |
-
prior = self.dummy_prior
|
133 |
-
image_encoder = self.dummy_image_encoder
|
134 |
-
text_encoder = self.dummy_text_encoder
|
135 |
-
tokenizer = self.dummy_tokenizer
|
136 |
-
image_processor = self.dummy_image_processor
|
137 |
-
|
138 |
-
scheduler = UnCLIPScheduler(
|
139 |
-
variance_type="fixed_small_log",
|
140 |
-
prediction_type="sample",
|
141 |
-
num_train_timesteps=1000,
|
142 |
-
clip_sample=True,
|
143 |
-
clip_sample_range=10.0,
|
144 |
-
)
|
145 |
-
|
146 |
-
components = {
|
147 |
-
"prior": prior,
|
148 |
-
"image_encoder": image_encoder,
|
149 |
-
"text_encoder": text_encoder,
|
150 |
-
"tokenizer": tokenizer,
|
151 |
-
"scheduler": scheduler,
|
152 |
-
"image_processor": image_processor,
|
153 |
-
}
|
154 |
-
|
155 |
-
return components
|
156 |
-
|
157 |
-
def get_dummy_inputs(self, device, seed=0):
|
158 |
-
if str(device).startswith("mps"):
|
159 |
-
generator = torch.manual_seed(seed)
|
160 |
-
else:
|
161 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
162 |
-
inputs = {
|
163 |
-
"prompt": "horse",
|
164 |
-
"generator": generator,
|
165 |
-
"guidance_scale": 4.0,
|
166 |
-
"num_inference_steps": 2,
|
167 |
-
"output_type": "np",
|
168 |
-
}
|
169 |
-
return inputs
|
170 |
-
|
171 |
-
|
172 |
-
class KandinskyV22PriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
173 |
-
pipeline_class = KandinskyV22PriorPipeline
|
174 |
-
params = ["prompt"]
|
175 |
-
batch_params = ["prompt", "negative_prompt"]
|
176 |
-
required_optional_params = [
|
177 |
-
"num_images_per_prompt",
|
178 |
-
"generator",
|
179 |
-
"num_inference_steps",
|
180 |
-
"latents",
|
181 |
-
"negative_prompt",
|
182 |
-
"guidance_scale",
|
183 |
-
"output_type",
|
184 |
-
"return_dict",
|
185 |
-
]
|
186 |
-
test_xformers_attention = False
|
187 |
-
|
188 |
-
def get_dummy_components(self):
|
189 |
-
dummies = Dummies()
|
190 |
-
return dummies.get_dummy_components()
|
191 |
-
|
192 |
-
def get_dummy_inputs(self, device, seed=0):
|
193 |
-
dummies = Dummies()
|
194 |
-
return dummies.get_dummy_inputs(device=device, seed=seed)
|
195 |
-
|
196 |
-
def test_kandinsky_prior(self):
|
197 |
-
device = "cpu"
|
198 |
-
|
199 |
-
components = self.get_dummy_components()
|
200 |
-
|
201 |
-
pipe = self.pipeline_class(**components)
|
202 |
-
pipe = pipe.to(device)
|
203 |
-
|
204 |
-
pipe.set_progress_bar_config(disable=None)
|
205 |
-
|
206 |
-
output = pipe(**self.get_dummy_inputs(device))
|
207 |
-
image = output.image_embeds
|
208 |
-
|
209 |
-
image_from_tuple = pipe(
|
210 |
-
**self.get_dummy_inputs(device),
|
211 |
-
return_dict=False,
|
212 |
-
)[0]
|
213 |
-
|
214 |
-
image_slice = image[0, -10:]
|
215 |
-
image_from_tuple_slice = image_from_tuple[0, -10:]
|
216 |
-
|
217 |
-
assert image.shape == (1, 32)
|
218 |
-
|
219 |
-
expected_slice = np.array(
|
220 |
-
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156]
|
221 |
-
)
|
222 |
-
|
223 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
224 |
-
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
|
225 |
-
|
226 |
-
@skip_mps
|
227 |
-
def test_inference_batch_single_identical(self):
|
228 |
-
test_max_difference = torch_device == "cpu"
|
229 |
-
relax_max_difference = True
|
230 |
-
test_mean_pixel_difference = False
|
231 |
-
|
232 |
-
self._test_inference_batch_single_identical(
|
233 |
-
test_max_difference=test_max_difference,
|
234 |
-
relax_max_difference=relax_max_difference,
|
235 |
-
test_mean_pixel_difference=test_mean_pixel_difference,
|
236 |
-
)
|
237 |
-
|
238 |
-
@skip_mps
|
239 |
-
def test_attention_slicing_forward_pass(self):
|
240 |
-
test_max_difference = torch_device == "cpu"
|
241 |
-
test_mean_pixel_difference = False
|
242 |
-
|
243 |
-
self._test_attention_slicing_forward_pass(
|
244 |
-
test_max_difference=test_max_difference,
|
245 |
-
test_mean_pixel_difference=test_mean_pixel_difference,
|
246 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/test_pipelines_flax.py
DELETED
@@ -1,260 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import os
|
17 |
-
import tempfile
|
18 |
-
import unittest
|
19 |
-
|
20 |
-
import numpy as np
|
21 |
-
|
22 |
-
from diffusers.utils import is_flax_available
|
23 |
-
from diffusers.utils.testing_utils import require_flax, slow
|
24 |
-
|
25 |
-
|
26 |
-
if is_flax_available():
|
27 |
-
import jax
|
28 |
-
import jax.numpy as jnp
|
29 |
-
from flax.jax_utils import replicate
|
30 |
-
from flax.training.common_utils import shard
|
31 |
-
|
32 |
-
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
|
33 |
-
|
34 |
-
|
35 |
-
@require_flax
|
36 |
-
class DownloadTests(unittest.TestCase):
|
37 |
-
def test_download_only_pytorch(self):
|
38 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
39 |
-
# pipeline has Flax weights
|
40 |
-
_ = FlaxDiffusionPipeline.from_pretrained(
|
41 |
-
"hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname
|
42 |
-
)
|
43 |
-
|
44 |
-
all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname, os.listdir(tmpdirname)[0], "snapshots"))]
|
45 |
-
files = [item for sublist in all_root_files for item in sublist]
|
46 |
-
|
47 |
-
# None of the downloaded files should be a PyTorch file even if we have some here:
|
48 |
-
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
|
49 |
-
assert not any(f.endswith(".bin") for f in files)
|
50 |
-
|
51 |
-
|
52 |
-
@slow
|
53 |
-
@require_flax
|
54 |
-
class FlaxPipelineTests(unittest.TestCase):
|
55 |
-
def test_dummy_all_tpus(self):
|
56 |
-
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
|
57 |
-
"hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None
|
58 |
-
)
|
59 |
-
|
60 |
-
prompt = (
|
61 |
-
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
|
62 |
-
" field, close up, split lighting, cinematic"
|
63 |
-
)
|
64 |
-
|
65 |
-
prng_seed = jax.random.PRNGKey(0)
|
66 |
-
num_inference_steps = 4
|
67 |
-
|
68 |
-
num_samples = jax.device_count()
|
69 |
-
prompt = num_samples * [prompt]
|
70 |
-
prompt_ids = pipeline.prepare_inputs(prompt)
|
71 |
-
|
72 |
-
# shard inputs and rng
|
73 |
-
params = replicate(params)
|
74 |
-
prng_seed = jax.random.split(prng_seed, num_samples)
|
75 |
-
prompt_ids = shard(prompt_ids)
|
76 |
-
|
77 |
-
images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
|
78 |
-
|
79 |
-
assert images.shape == (num_samples, 1, 64, 64, 3)
|
80 |
-
if jax.device_count() == 8:
|
81 |
-
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 4.1514745) < 1e-3
|
82 |
-
assert np.abs(np.abs(images, dtype=np.float32).sum() - 49947.875) < 5e-1
|
83 |
-
|
84 |
-
images_pil = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
|
85 |
-
assert len(images_pil) == num_samples
|
86 |
-
|
87 |
-
def test_stable_diffusion_v1_4(self):
|
88 |
-
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
|
89 |
-
"CompVis/stable-diffusion-v1-4", revision="flax", safety_checker=None
|
90 |
-
)
|
91 |
-
|
92 |
-
prompt = (
|
93 |
-
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
|
94 |
-
" field, close up, split lighting, cinematic"
|
95 |
-
)
|
96 |
-
|
97 |
-
prng_seed = jax.random.PRNGKey(0)
|
98 |
-
num_inference_steps = 50
|
99 |
-
|
100 |
-
num_samples = jax.device_count()
|
101 |
-
prompt = num_samples * [prompt]
|
102 |
-
prompt_ids = pipeline.prepare_inputs(prompt)
|
103 |
-
|
104 |
-
# shard inputs and rng
|
105 |
-
params = replicate(params)
|
106 |
-
prng_seed = jax.random.split(prng_seed, num_samples)
|
107 |
-
prompt_ids = shard(prompt_ids)
|
108 |
-
|
109 |
-
images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
|
110 |
-
|
111 |
-
assert images.shape == (num_samples, 1, 512, 512, 3)
|
112 |
-
if jax.device_count() == 8:
|
113 |
-
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.05652401)) < 1e-3
|
114 |
-
assert np.abs((np.abs(images, dtype=np.float32).sum() - 2383808.2)) < 5e-1
|
115 |
-
|
116 |
-
def test_stable_diffusion_v1_4_bfloat_16(self):
|
117 |
-
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
|
118 |
-
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, safety_checker=None
|
119 |
-
)
|
120 |
-
|
121 |
-
prompt = (
|
122 |
-
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
|
123 |
-
" field, close up, split lighting, cinematic"
|
124 |
-
)
|
125 |
-
|
126 |
-
prng_seed = jax.random.PRNGKey(0)
|
127 |
-
num_inference_steps = 50
|
128 |
-
|
129 |
-
num_samples = jax.device_count()
|
130 |
-
prompt = num_samples * [prompt]
|
131 |
-
prompt_ids = pipeline.prepare_inputs(prompt)
|
132 |
-
|
133 |
-
# shard inputs and rng
|
134 |
-
params = replicate(params)
|
135 |
-
prng_seed = jax.random.split(prng_seed, num_samples)
|
136 |
-
prompt_ids = shard(prompt_ids)
|
137 |
-
|
138 |
-
images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
|
139 |
-
|
140 |
-
assert images.shape == (num_samples, 1, 512, 512, 3)
|
141 |
-
if jax.device_count() == 8:
|
142 |
-
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.04003906)) < 1e-3
|
143 |
-
assert np.abs((np.abs(images, dtype=np.float32).sum() - 2373516.75)) < 5e-1
|
144 |
-
|
145 |
-
def test_stable_diffusion_v1_4_bfloat_16_with_safety(self):
|
146 |
-
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
|
147 |
-
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16
|
148 |
-
)
|
149 |
-
|
150 |
-
prompt = (
|
151 |
-
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
|
152 |
-
" field, close up, split lighting, cinematic"
|
153 |
-
)
|
154 |
-
|
155 |
-
prng_seed = jax.random.PRNGKey(0)
|
156 |
-
num_inference_steps = 50
|
157 |
-
|
158 |
-
num_samples = jax.device_count()
|
159 |
-
prompt = num_samples * [prompt]
|
160 |
-
prompt_ids = pipeline.prepare_inputs(prompt)
|
161 |
-
|
162 |
-
# shard inputs and rng
|
163 |
-
params = replicate(params)
|
164 |
-
prng_seed = jax.random.split(prng_seed, num_samples)
|
165 |
-
prompt_ids = shard(prompt_ids)
|
166 |
-
|
167 |
-
images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
|
168 |
-
|
169 |
-
assert images.shape == (num_samples, 1, 512, 512, 3)
|
170 |
-
if jax.device_count() == 8:
|
171 |
-
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.04003906)) < 1e-3
|
172 |
-
assert np.abs((np.abs(images, dtype=np.float32).sum() - 2373516.75)) < 5e-1
|
173 |
-
|
174 |
-
def test_stable_diffusion_v1_4_bfloat_16_ddim(self):
|
175 |
-
scheduler = FlaxDDIMScheduler(
|
176 |
-
beta_start=0.00085,
|
177 |
-
beta_end=0.012,
|
178 |
-
beta_schedule="scaled_linear",
|
179 |
-
set_alpha_to_one=False,
|
180 |
-
steps_offset=1,
|
181 |
-
)
|
182 |
-
|
183 |
-
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
|
184 |
-
"CompVis/stable-diffusion-v1-4",
|
185 |
-
revision="bf16",
|
186 |
-
dtype=jnp.bfloat16,
|
187 |
-
scheduler=scheduler,
|
188 |
-
safety_checker=None,
|
189 |
-
)
|
190 |
-
scheduler_state = scheduler.create_state()
|
191 |
-
|
192 |
-
params["scheduler"] = scheduler_state
|
193 |
-
|
194 |
-
prompt = (
|
195 |
-
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
|
196 |
-
" field, close up, split lighting, cinematic"
|
197 |
-
)
|
198 |
-
|
199 |
-
prng_seed = jax.random.PRNGKey(0)
|
200 |
-
num_inference_steps = 50
|
201 |
-
|
202 |
-
num_samples = jax.device_count()
|
203 |
-
prompt = num_samples * [prompt]
|
204 |
-
prompt_ids = pipeline.prepare_inputs(prompt)
|
205 |
-
|
206 |
-
# shard inputs and rng
|
207 |
-
params = replicate(params)
|
208 |
-
prng_seed = jax.random.split(prng_seed, num_samples)
|
209 |
-
prompt_ids = shard(prompt_ids)
|
210 |
-
|
211 |
-
images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
|
212 |
-
|
213 |
-
assert images.shape == (num_samples, 1, 512, 512, 3)
|
214 |
-
if jax.device_count() == 8:
|
215 |
-
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.045043945)) < 1e-3
|
216 |
-
assert np.abs((np.abs(images, dtype=np.float32).sum() - 2347693.5)) < 5e-1
|
217 |
-
|
218 |
-
def test_jax_memory_efficient_attention(self):
|
219 |
-
prompt = (
|
220 |
-
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
|
221 |
-
" field, close up, split lighting, cinematic"
|
222 |
-
)
|
223 |
-
|
224 |
-
num_samples = jax.device_count()
|
225 |
-
prompt = num_samples * [prompt]
|
226 |
-
prng_seed = jax.random.split(jax.random.PRNGKey(0), num_samples)
|
227 |
-
|
228 |
-
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
|
229 |
-
"CompVis/stable-diffusion-v1-4",
|
230 |
-
revision="bf16",
|
231 |
-
dtype=jnp.bfloat16,
|
232 |
-
safety_checker=None,
|
233 |
-
)
|
234 |
-
|
235 |
-
params = replicate(params)
|
236 |
-
prompt_ids = pipeline.prepare_inputs(prompt)
|
237 |
-
prompt_ids = shard(prompt_ids)
|
238 |
-
images = pipeline(prompt_ids, params, prng_seed, jit=True).images
|
239 |
-
assert images.shape == (num_samples, 1, 512, 512, 3)
|
240 |
-
slice = images[2, 0, 256, 10:17, 1]
|
241 |
-
|
242 |
-
# With memory efficient attention
|
243 |
-
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
|
244 |
-
"CompVis/stable-diffusion-v1-4",
|
245 |
-
revision="bf16",
|
246 |
-
dtype=jnp.bfloat16,
|
247 |
-
safety_checker=None,
|
248 |
-
use_memory_efficient_attention=True,
|
249 |
-
)
|
250 |
-
|
251 |
-
params = replicate(params)
|
252 |
-
prompt_ids = pipeline.prepare_inputs(prompt)
|
253 |
-
prompt_ids = shard(prompt_ids)
|
254 |
-
images_eff = pipeline(prompt_ids, params, prng_seed, jit=True).images
|
255 |
-
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
|
256 |
-
slice_eff = images[2, 0, 256, 10:17, 1]
|
257 |
-
|
258 |
-
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
|
259 |
-
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
|
260 |
-
assert abs(slice_eff - slice).max() < 1e-2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://msra/hrnetv2_w18',
|
4 |
-
backbone=dict(
|
5 |
-
extra=dict(
|
6 |
-
stage2=dict(num_channels=(18, 36)),
|
7 |
-
stage3=dict(num_channels=(18, 36, 72)),
|
8 |
-
stage4=dict(num_channels=(18, 36, 72, 144)))),
|
9 |
-
neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/embedding_rpn_head.py
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
|
4 |
-
from mmdet.models.builder import HEADS
|
5 |
-
from ...core import bbox_cxcywh_to_xyxy
|
6 |
-
|
7 |
-
|
8 |
-
@HEADS.register_module()
|
9 |
-
class EmbeddingRPNHead(nn.Module):
|
10 |
-
"""RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ .
|
11 |
-
|
12 |
-
Unlike traditional RPNHead, this module does not need FPN input, but just
|
13 |
-
decode `init_proposal_bboxes` and expand the first dimension of
|
14 |
-
`init_proposal_bboxes` and `init_proposal_features` to the batch_size.
|
15 |
-
|
16 |
-
Args:
|
17 |
-
num_proposals (int): Number of init_proposals. Default 100.
|
18 |
-
proposal_feature_channel (int): Channel number of
|
19 |
-
init_proposal_feature. Defaults to 256.
|
20 |
-
"""
|
21 |
-
|
22 |
-
def __init__(self,
|
23 |
-
num_proposals=100,
|
24 |
-
proposal_feature_channel=256,
|
25 |
-
**kwargs):
|
26 |
-
super(EmbeddingRPNHead, self).__init__()
|
27 |
-
self.num_proposals = num_proposals
|
28 |
-
self.proposal_feature_channel = proposal_feature_channel
|
29 |
-
self._init_layers()
|
30 |
-
|
31 |
-
def _init_layers(self):
|
32 |
-
"""Initialize a sparse set of proposal boxes and proposal features."""
|
33 |
-
self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4)
|
34 |
-
self.init_proposal_features = nn.Embedding(
|
35 |
-
self.num_proposals, self.proposal_feature_channel)
|
36 |
-
|
37 |
-
def init_weights(self):
|
38 |
-
"""Initialize the init_proposal_bboxes as normalized.
|
39 |
-
|
40 |
-
[c_x, c_y, w, h], and we initialize it to the size of the entire
|
41 |
-
image.
|
42 |
-
"""
|
43 |
-
nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5)
|
44 |
-
nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1)
|
45 |
-
|
46 |
-
def _decode_init_proposals(self, imgs, img_metas):
|
47 |
-
"""Decode init_proposal_bboxes according to the size of images and
|
48 |
-
expand dimension of init_proposal_features to batch_size.
|
49 |
-
|
50 |
-
Args:
|
51 |
-
imgs (list[Tensor]): List of FPN features.
|
52 |
-
img_metas (list[dict]): List of meta-information of
|
53 |
-
images. Need the img_shape to decode the init_proposals.
|
54 |
-
|
55 |
-
Returns:
|
56 |
-
Tuple(Tensor):
|
57 |
-
|
58 |
-
- proposals (Tensor): Decoded proposal bboxes,
|
59 |
-
has shape (batch_size, num_proposals, 4).
|
60 |
-
- init_proposal_features (Tensor): Expanded proposal
|
61 |
-
features, has shape
|
62 |
-
(batch_size, num_proposals, proposal_feature_channel).
|
63 |
-
- imgs_whwh (Tensor): Tensor with shape
|
64 |
-
(batch_size, 4), the dimension means
|
65 |
-
[img_width, img_height, img_width, img_height].
|
66 |
-
"""
|
67 |
-
proposals = self.init_proposal_bboxes.weight.clone()
|
68 |
-
proposals = bbox_cxcywh_to_xyxy(proposals)
|
69 |
-
num_imgs = len(imgs[0])
|
70 |
-
imgs_whwh = []
|
71 |
-
for meta in img_metas:
|
72 |
-
h, w, _ = meta['img_shape']
|
73 |
-
imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]]))
|
74 |
-
imgs_whwh = torch.cat(imgs_whwh, dim=0)
|
75 |
-
imgs_whwh = imgs_whwh[:, None, :]
|
76 |
-
|
77 |
-
# imgs_whwh has shape (batch_size, 1, 4)
|
78 |
-
# The shape of proposals change from (num_proposals, 4)
|
79 |
-
# to (batch_size ,num_proposals, 4)
|
80 |
-
proposals = proposals * imgs_whwh
|
81 |
-
|
82 |
-
init_proposal_features = self.init_proposal_features.weight.clone()
|
83 |
-
init_proposal_features = init_proposal_features[None].expand(
|
84 |
-
num_imgs, *init_proposal_features.size())
|
85 |
-
return proposals, init_proposal_features, imgs_whwh
|
86 |
-
|
87 |
-
def forward_dummy(self, img, img_metas):
|
88 |
-
"""Dummy forward function.
|
89 |
-
|
90 |
-
Used in flops calculation.
|
91 |
-
"""
|
92 |
-
return self._decode_init_proposals(img, img_metas)
|
93 |
-
|
94 |
-
def forward_train(self, img, img_metas):
|
95 |
-
"""Forward function in training stage."""
|
96 |
-
return self._decode_init_proposals(img, img_metas)
|
97 |
-
|
98 |
-
def simple_test_rpn(self, img, img_metas):
|
99 |
-
"""Forward function in testing stage."""
|
100 |
-
return self._decode_init_proposals(img, img_metas)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/vfnet.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
from ..builder import DETECTORS
|
2 |
-
from .single_stage import SingleStageDetector
|
3 |
-
|
4 |
-
|
5 |
-
@DETECTORS.register_module()
|
6 |
-
class VFNet(SingleStageDetector):
|
7 |
-
"""Implementation of `VarifocalNet
|
8 |
-
(VFNet).<https://arxiv.org/abs/2008.13367>`_"""
|
9 |
-
|
10 |
-
def __init__(self,
|
11 |
-
backbone,
|
12 |
-
neck,
|
13 |
-
bbox_head,
|
14 |
-
train_cfg=None,
|
15 |
-
test_cfg=None,
|
16 |
-
pretrained=None):
|
17 |
-
super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg,
|
18 |
-
test_cfg, pretrained)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/necks/fpn.py
DELETED
@@ -1,221 +0,0 @@
|
|
1 |
-
import warnings
|
2 |
-
|
3 |
-
import torch.nn as nn
|
4 |
-
import torch.nn.functional as F
|
5 |
-
from mmcv.cnn import ConvModule, xavier_init
|
6 |
-
from mmcv.runner import auto_fp16
|
7 |
-
|
8 |
-
from ..builder import NECKS
|
9 |
-
|
10 |
-
|
11 |
-
@NECKS.register_module()
|
12 |
-
class FPN(nn.Module):
|
13 |
-
r"""Feature Pyramid Network.
|
14 |
-
|
15 |
-
This is an implementation of paper `Feature Pyramid Networks for Object
|
16 |
-
Detection <https://arxiv.org/abs/1612.03144>`_.
|
17 |
-
|
18 |
-
Args:
|
19 |
-
in_channels (List[int]): Number of input channels per scale.
|
20 |
-
out_channels (int): Number of output channels (used at each scale)
|
21 |
-
num_outs (int): Number of output scales.
|
22 |
-
start_level (int): Index of the start input backbone level used to
|
23 |
-
build the feature pyramid. Default: 0.
|
24 |
-
end_level (int): Index of the end input backbone level (exclusive) to
|
25 |
-
build the feature pyramid. Default: -1, which means the last level.
|
26 |
-
add_extra_convs (bool | str): If bool, it decides whether to add conv
|
27 |
-
layers on top of the original feature maps. Default to False.
|
28 |
-
If True, its actual mode is specified by `extra_convs_on_inputs`.
|
29 |
-
If str, it specifies the source feature map of the extra convs.
|
30 |
-
Only the following options are allowed
|
31 |
-
|
32 |
-
- 'on_input': Last feat map of neck inputs (i.e. backbone feature).
|
33 |
-
- 'on_lateral': Last feature map after lateral convs.
|
34 |
-
- 'on_output': The last output feature map after fpn convs.
|
35 |
-
extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs
|
36 |
-
on the original feature from the backbone. If True,
|
37 |
-
it is equivalent to `add_extra_convs='on_input'`. If False, it is
|
38 |
-
equivalent to set `add_extra_convs='on_output'`. Default to True.
|
39 |
-
relu_before_extra_convs (bool): Whether to apply relu before the extra
|
40 |
-
conv. Default: False.
|
41 |
-
no_norm_on_lateral (bool): Whether to apply norm on lateral.
|
42 |
-
Default: False.
|
43 |
-
conv_cfg (dict): Config dict for convolution layer. Default: None.
|
44 |
-
norm_cfg (dict): Config dict for normalization layer. Default: None.
|
45 |
-
act_cfg (str): Config dict for activation layer in ConvModule.
|
46 |
-
Default: None.
|
47 |
-
upsample_cfg (dict): Config dict for interpolate layer.
|
48 |
-
Default: `dict(mode='nearest')`
|
49 |
-
|
50 |
-
Example:
|
51 |
-
>>> import torch
|
52 |
-
>>> in_channels = [2, 3, 5, 7]
|
53 |
-
>>> scales = [340, 170, 84, 43]
|
54 |
-
>>> inputs = [torch.rand(1, c, s, s)
|
55 |
-
... for c, s in zip(in_channels, scales)]
|
56 |
-
>>> self = FPN(in_channels, 11, len(in_channels)).eval()
|
57 |
-
>>> outputs = self.forward(inputs)
|
58 |
-
>>> for i in range(len(outputs)):
|
59 |
-
... print(f'outputs[{i}].shape = {outputs[i].shape}')
|
60 |
-
outputs[0].shape = torch.Size([1, 11, 340, 340])
|
61 |
-
outputs[1].shape = torch.Size([1, 11, 170, 170])
|
62 |
-
outputs[2].shape = torch.Size([1, 11, 84, 84])
|
63 |
-
outputs[3].shape = torch.Size([1, 11, 43, 43])
|
64 |
-
"""
|
65 |
-
|
66 |
-
def __init__(self,
|
67 |
-
in_channels,
|
68 |
-
out_channels,
|
69 |
-
num_outs,
|
70 |
-
start_level=0,
|
71 |
-
end_level=-1,
|
72 |
-
add_extra_convs=False,
|
73 |
-
extra_convs_on_inputs=True,
|
74 |
-
relu_before_extra_convs=False,
|
75 |
-
no_norm_on_lateral=False,
|
76 |
-
conv_cfg=None,
|
77 |
-
norm_cfg=None,
|
78 |
-
act_cfg=None,
|
79 |
-
upsample_cfg=dict(mode='nearest')):
|
80 |
-
super(FPN, self).__init__()
|
81 |
-
assert isinstance(in_channels, list)
|
82 |
-
self.in_channels = in_channels
|
83 |
-
self.out_channels = out_channels
|
84 |
-
self.num_ins = len(in_channels)
|
85 |
-
self.num_outs = num_outs
|
86 |
-
self.relu_before_extra_convs = relu_before_extra_convs
|
87 |
-
self.no_norm_on_lateral = no_norm_on_lateral
|
88 |
-
self.fp16_enabled = False
|
89 |
-
self.upsample_cfg = upsample_cfg.copy()
|
90 |
-
|
91 |
-
if end_level == -1:
|
92 |
-
self.backbone_end_level = self.num_ins
|
93 |
-
assert num_outs >= self.num_ins - start_level
|
94 |
-
else:
|
95 |
-
# if end_level < inputs, no extra level is allowed
|
96 |
-
self.backbone_end_level = end_level
|
97 |
-
assert end_level <= len(in_channels)
|
98 |
-
assert num_outs == end_level - start_level
|
99 |
-
self.start_level = start_level
|
100 |
-
self.end_level = end_level
|
101 |
-
self.add_extra_convs = add_extra_convs
|
102 |
-
assert isinstance(add_extra_convs, (str, bool))
|
103 |
-
if isinstance(add_extra_convs, str):
|
104 |
-
# Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'
|
105 |
-
assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')
|
106 |
-
elif add_extra_convs: # True
|
107 |
-
if extra_convs_on_inputs:
|
108 |
-
# TODO: deprecate `extra_convs_on_inputs`
|
109 |
-
warnings.simplefilter('once')
|
110 |
-
warnings.warn(
|
111 |
-
'"extra_convs_on_inputs" will be deprecated in v2.9.0,'
|
112 |
-
'Please use "add_extra_convs"', DeprecationWarning)
|
113 |
-
self.add_extra_convs = 'on_input'
|
114 |
-
else:
|
115 |
-
self.add_extra_convs = 'on_output'
|
116 |
-
|
117 |
-
self.lateral_convs = nn.ModuleList()
|
118 |
-
self.fpn_convs = nn.ModuleList()
|
119 |
-
|
120 |
-
for i in range(self.start_level, self.backbone_end_level):
|
121 |
-
l_conv = ConvModule(
|
122 |
-
in_channels[i],
|
123 |
-
out_channels,
|
124 |
-
1,
|
125 |
-
conv_cfg=conv_cfg,
|
126 |
-
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
|
127 |
-
act_cfg=act_cfg,
|
128 |
-
inplace=False)
|
129 |
-
fpn_conv = ConvModule(
|
130 |
-
out_channels,
|
131 |
-
out_channels,
|
132 |
-
3,
|
133 |
-
padding=1,
|
134 |
-
conv_cfg=conv_cfg,
|
135 |
-
norm_cfg=norm_cfg,
|
136 |
-
act_cfg=act_cfg,
|
137 |
-
inplace=False)
|
138 |
-
|
139 |
-
self.lateral_convs.append(l_conv)
|
140 |
-
self.fpn_convs.append(fpn_conv)
|
141 |
-
|
142 |
-
# add extra conv layers (e.g., RetinaNet)
|
143 |
-
extra_levels = num_outs - self.backbone_end_level + self.start_level
|
144 |
-
if self.add_extra_convs and extra_levels >= 1:
|
145 |
-
for i in range(extra_levels):
|
146 |
-
if i == 0 and self.add_extra_convs == 'on_input':
|
147 |
-
in_channels = self.in_channels[self.backbone_end_level - 1]
|
148 |
-
else:
|
149 |
-
in_channels = out_channels
|
150 |
-
extra_fpn_conv = ConvModule(
|
151 |
-
in_channels,
|
152 |
-
out_channels,
|
153 |
-
3,
|
154 |
-
stride=2,
|
155 |
-
padding=1,
|
156 |
-
conv_cfg=conv_cfg,
|
157 |
-
norm_cfg=norm_cfg,
|
158 |
-
act_cfg=act_cfg,
|
159 |
-
inplace=False)
|
160 |
-
self.fpn_convs.append(extra_fpn_conv)
|
161 |
-
|
162 |
-
# default init_weights for conv(msra) and norm in ConvModule
|
163 |
-
def init_weights(self):
|
164 |
-
"""Initialize the weights of FPN module."""
|
165 |
-
for m in self.modules():
|
166 |
-
if isinstance(m, nn.Conv2d):
|
167 |
-
xavier_init(m, distribution='uniform')
|
168 |
-
|
169 |
-
@auto_fp16()
|
170 |
-
def forward(self, inputs):
|
171 |
-
"""Forward function."""
|
172 |
-
assert len(inputs) == len(self.in_channels)
|
173 |
-
|
174 |
-
# build laterals
|
175 |
-
laterals = [
|
176 |
-
lateral_conv(inputs[i + self.start_level])
|
177 |
-
for i, lateral_conv in enumerate(self.lateral_convs)
|
178 |
-
]
|
179 |
-
|
180 |
-
# build top-down path
|
181 |
-
used_backbone_levels = len(laterals)
|
182 |
-
for i in range(used_backbone_levels - 1, 0, -1):
|
183 |
-
# In some cases, fixing `scale factor` (e.g. 2) is preferred, but
|
184 |
-
# it cannot co-exist with `size` in `F.interpolate`.
|
185 |
-
if 'scale_factor' in self.upsample_cfg:
|
186 |
-
laterals[i - 1] += F.interpolate(laterals[i],
|
187 |
-
**self.upsample_cfg)
|
188 |
-
else:
|
189 |
-
prev_shape = laterals[i - 1].shape[2:]
|
190 |
-
laterals[i - 1] += F.interpolate(
|
191 |
-
laterals[i], size=prev_shape, **self.upsample_cfg)
|
192 |
-
|
193 |
-
# build outputs
|
194 |
-
# part 1: from original levels
|
195 |
-
outs = [
|
196 |
-
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
|
197 |
-
]
|
198 |
-
# part 2: add extra levels
|
199 |
-
if self.num_outs > len(outs):
|
200 |
-
# use max pool to get more levels on top of outputs
|
201 |
-
# (e.g., Faster R-CNN, Mask R-CNN)
|
202 |
-
if not self.add_extra_convs:
|
203 |
-
for i in range(self.num_outs - used_backbone_levels):
|
204 |
-
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
|
205 |
-
# add conv layers on top of original feature maps (RetinaNet)
|
206 |
-
else:
|
207 |
-
if self.add_extra_convs == 'on_input':
|
208 |
-
extra_source = inputs[self.backbone_end_level - 1]
|
209 |
-
elif self.add_extra_convs == 'on_lateral':
|
210 |
-
extra_source = laterals[-1]
|
211 |
-
elif self.add_extra_convs == 'on_output':
|
212 |
-
extra_source = outs[-1]
|
213 |
-
else:
|
214 |
-
raise NotImplementedError
|
215 |
-
outs.append(self.fpn_convs[used_backbone_levels](extra_source))
|
216 |
-
for i in range(used_backbone_levels + 1, self.num_outs):
|
217 |
-
if self.relu_before_extra_convs:
|
218 |
-
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
|
219 |
-
else:
|
220 |
-
outs.append(self.fpn_convs[i](outs[-1]))
|
221 |
-
return tuple(outs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/dnl_r50-d8.py', '../_base_/datasets/cityscapes.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
|
4 |
-
]
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r50-d8_512x512_40k_voc12aug.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/pspnet_r50-d8.py',
|
3 |
-
'../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
|
4 |
-
'../_base_/schedules/schedule_40k.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnimalEquality/chatbot/_proc/_docs/index.html
DELETED
@@ -1,535 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en"><head>
|
3 |
-
|
4 |
-
<meta charset="utf-8">
|
5 |
-
<meta name="generator" content="quarto-1.3.361">
|
6 |
-
|
7 |
-
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
|
8 |
-
|
9 |
-
<meta name="description" content="An experimental Vegan recipe chatbot">
|
10 |
-
|
11 |
-
<title>lv-recipe-chatbot</title>
|
12 |
-
<style>
|
13 |
-
code{white-space: pre-wrap;}
|
14 |
-
span.smallcaps{font-variant: small-caps;}
|
15 |
-
div.columns{display: flex; gap: min(4vw, 1.5em);}
|
16 |
-
div.column{flex: auto; overflow-x: auto;}
|
17 |
-
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
|
18 |
-
ul.task-list{list-style: none;}
|
19 |
-
ul.task-list li input[type="checkbox"] {
|
20 |
-
width: 0.8em;
|
21 |
-
margin: 0 0.8em 0.2em -1em; /* quarto-specific, see https://github.com/quarto-dev/quarto-cli/issues/4556 */
|
22 |
-
vertical-align: middle;
|
23 |
-
}
|
24 |
-
/* CSS for syntax highlighting */
|
25 |
-
pre > code.sourceCode { white-space: pre; position: relative; }
|
26 |
-
pre > code.sourceCode > span { display: inline-block; line-height: 1.25; }
|
27 |
-
pre > code.sourceCode > span:empty { height: 1.2em; }
|
28 |
-
.sourceCode { overflow: visible; }
|
29 |
-
code.sourceCode > span { color: inherit; text-decoration: inherit; }
|
30 |
-
div.sourceCode { margin: 1em 0; }
|
31 |
-
pre.sourceCode { margin: 0; }
|
32 |
-
@media screen {
|
33 |
-
div.sourceCode { overflow: auto; }
|
34 |
-
}
|
35 |
-
@media print {
|
36 |
-
pre > code.sourceCode { white-space: pre-wrap; }
|
37 |
-
pre > code.sourceCode > span { text-indent: -5em; padding-left: 5em; }
|
38 |
-
}
|
39 |
-
pre.numberSource code
|
40 |
-
{ counter-reset: source-line 0; }
|
41 |
-
pre.numberSource code > span
|
42 |
-
{ position: relative; left: -4em; counter-increment: source-line; }
|
43 |
-
pre.numberSource code > span > a:first-child::before
|
44 |
-
{ content: counter(source-line);
|
45 |
-
position: relative; left: -1em; text-align: right; vertical-align: baseline;
|
46 |
-
border: none; display: inline-block;
|
47 |
-
-webkit-touch-callout: none; -webkit-user-select: none;
|
48 |
-
-khtml-user-select: none; -moz-user-select: none;
|
49 |
-
-ms-user-select: none; user-select: none;
|
50 |
-
padding: 0 4px; width: 4em;
|
51 |
-
}
|
52 |
-
pre.numberSource { margin-left: 3em; padding-left: 4px; }
|
53 |
-
div.sourceCode
|
54 |
-
{ }
|
55 |
-
@media screen {
|
56 |
-
pre > code.sourceCode > span > a:first-child::before { text-decoration: underline; }
|
57 |
-
}
|
58 |
-
</style>
|
59 |
-
|
60 |
-
|
61 |
-
<script src="site_libs/quarto-nav/quarto-nav.js"></script>
|
62 |
-
<script src="site_libs/quarto-nav/headroom.min.js"></script>
|
63 |
-
<script src="site_libs/clipboard/clipboard.min.js"></script>
|
64 |
-
<script src="site_libs/quarto-search/autocomplete.umd.js"></script>
|
65 |
-
<script src="site_libs/quarto-search/fuse.min.js"></script>
|
66 |
-
<script src="site_libs/quarto-search/quarto-search.js"></script>
|
67 |
-
<meta name="quarto:offset" content="./">
|
68 |
-
<script src="site_libs/quarto-html/quarto.js"></script>
|
69 |
-
<script src="site_libs/quarto-html/popper.min.js"></script>
|
70 |
-
<script src="site_libs/quarto-html/tippy.umd.min.js"></script>
|
71 |
-
<script src="site_libs/quarto-html/anchor.min.js"></script>
|
72 |
-
<link href="site_libs/quarto-html/tippy.css" rel="stylesheet">
|
73 |
-
<link href="site_libs/quarto-html/quarto-syntax-highlighting.css" rel="stylesheet" id="quarto-text-highlighting-styles">
|
74 |
-
<script src="site_libs/bootstrap/bootstrap.min.js"></script>
|
75 |
-
<link href="site_libs/bootstrap/bootstrap-icons.css" rel="stylesheet">
|
76 |
-
<link href="site_libs/bootstrap/bootstrap.min.css" rel="stylesheet" id="quarto-bootstrap" data-mode="light">
|
77 |
-
<script id="quarto-search-options" type="application/json">{
|
78 |
-
"location": "navbar",
|
79 |
-
"copy-button": false,
|
80 |
-
"collapse-after": 3,
|
81 |
-
"panel-placement": "end",
|
82 |
-
"type": "overlay",
|
83 |
-
"limit": 20,
|
84 |
-
"language": {
|
85 |
-
"search-no-results-text": "No results",
|
86 |
-
"search-matching-documents-text": "matching documents",
|
87 |
-
"search-copy-link-title": "Copy link to search",
|
88 |
-
"search-hide-matches-text": "Hide additional matches",
|
89 |
-
"search-more-match-text": "more match in this document",
|
90 |
-
"search-more-matches-text": "more matches in this document",
|
91 |
-
"search-clear-button-title": "Clear",
|
92 |
-
"search-detached-cancel-button-title": "Cancel",
|
93 |
-
"search-submit-button-title": "Submit",
|
94 |
-
"search-label": "Search"
|
95 |
-
}
|
96 |
-
}</script>
|
97 |
-
<script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.6/require.min.js" integrity="sha512-c3Nl8+7g4LMSTdrm621y7kf9v3SDPnhxLNhcjFJbKECVnmZHTdo+IRO05sNLTH/D3vA6u1X32ehoLC7WFVdheg==" crossorigin="anonymous"></script>
|
98 |
-
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.5.1/jquery.min.js" integrity="sha512-bLT0Qm9VnAYZDflyKcBaQ2gg0hSYNQrJ8RilYldYQ1FxQYoCLtUjuuRuZo+fjqhx/qtq/1itJ0C2ejDxltZVFg==" crossorigin="anonymous"></script>
|
99 |
-
<script type="application/javascript">define('jquery', [],function() {return window.jQuery;})</script>
|
100 |
-
|
101 |
-
|
102 |
-
<link rel="stylesheet" href="styles.css">
|
103 |
-
<meta property="og:title" content="lv-recipe-chatbot">
|
104 |
-
<meta property="og:description" content="An experimental Vegan recipe chatbot">
|
105 |
-
<meta property="og:site-name" content="lv-recipe-chatbot">
|
106 |
-
<meta name="twitter:title" content="lv-recipe-chatbot">
|
107 |
-
<meta name="twitter:description" content="An experimental Vegan recipe chatbot">
|
108 |
-
<meta name="twitter:card" content="summary">
|
109 |
-
</head>
|
110 |
-
|
111 |
-
<body class="nav-sidebar floating nav-fixed">
|
112 |
-
|
113 |
-
<div id="quarto-search-results"></div>
|
114 |
-
<header id="quarto-header" class="headroom fixed-top">
|
115 |
-
<nav class="navbar navbar-expand-lg navbar-dark ">
|
116 |
-
<div class="navbar-container container-fluid">
|
117 |
-
<div class="navbar-brand-container">
|
118 |
-
<a class="navbar-brand" href="./index.html">
|
119 |
-
<span class="navbar-title">lv-recipe-chatbot</span>
|
120 |
-
</a>
|
121 |
-
</div>
|
122 |
-
<div class="quarto-navbar-tools ms-auto">
|
123 |
-
</div>
|
124 |
-
<div id="quarto-search" class="" title="Search"></div>
|
125 |
-
</div> <!-- /container-fluid -->
|
126 |
-
</nav>
|
127 |
-
<nav class="quarto-secondary-nav">
|
128 |
-
<div class="container-fluid d-flex">
|
129 |
-
<button type="button" class="quarto-btn-toggle btn" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar,#quarto-sidebar-glass" aria-controls="quarto-sidebar" aria-expanded="false" aria-label="Toggle sidebar navigation" onclick="if (window.quartoToggleHeadroom) { window.quartoToggleHeadroom(); }">
|
130 |
-
<i class="bi bi-layout-text-sidebar-reverse"></i>
|
131 |
-
</button>
|
132 |
-
<nav class="quarto-page-breadcrumbs" aria-label="breadcrumb"><ol class="breadcrumb"><li class="breadcrumb-item"><a href="./index.html">lv-recipe-chatbot</a></li></ol></nav>
|
133 |
-
<a class="flex-grow-1" role="button" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar,#quarto-sidebar-glass" aria-controls="quarto-sidebar" aria-expanded="false" aria-label="Toggle sidebar navigation" onclick="if (window.quartoToggleHeadroom) { window.quartoToggleHeadroom(); }">
|
134 |
-
</a>
|
135 |
-
</div>
|
136 |
-
</nav>
|
137 |
-
</header>
|
138 |
-
<!-- content -->
|
139 |
-
<div id="quarto-content" class="quarto-container page-columns page-rows-contents page-layout-article page-navbar">
|
140 |
-
<!-- sidebar -->
|
141 |
-
<nav id="quarto-sidebar" class="sidebar collapse collapse-horizontal sidebar-navigation floating overflow-auto">
|
142 |
-
<div class="sidebar-menu-container">
|
143 |
-
<ul class="list-unstyled mt-1">
|
144 |
-
<li class="sidebar-item">
|
145 |
-
<div class="sidebar-item-container">
|
146 |
-
<a href="./index.html" class="sidebar-item-text sidebar-link active">
|
147 |
-
<span class="menu-text">lv-recipe-chatbot</span></a>
|
148 |
-
</div>
|
149 |
-
</li>
|
150 |
-
<li class="sidebar-item">
|
151 |
-
<div class="sidebar-item-container">
|
152 |
-
<a href="./engineer_prompt.html" class="sidebar-item-text sidebar-link">
|
153 |
-
<span class="menu-text">engineer_prompt</span></a>
|
154 |
-
</div>
|
155 |
-
</li>
|
156 |
-
<li class="sidebar-item">
|
157 |
-
<div class="sidebar-item-container">
|
158 |
-
<a href="./app.html" class="sidebar-item-text sidebar-link">
|
159 |
-
<span class="menu-text">app</span></a>
|
160 |
-
</div>
|
161 |
-
</li>
|
162 |
-
<li class="sidebar-item">
|
163 |
-
<div class="sidebar-item-container">
|
164 |
-
<a href="./vegan_recipe_tools.html" class="sidebar-item-text sidebar-link">
|
165 |
-
<span class="menu-text">vegan_recipe_tools</span></a>
|
166 |
-
</div>
|
167 |
-
</li>
|
168 |
-
<li class="sidebar-item">
|
169 |
-
<div class="sidebar-item-container">
|
170 |
-
<a href="./ingredient_vision.html" class="sidebar-item-text sidebar-link">
|
171 |
-
<span class="menu-text">ingredient_vision</span></a>
|
172 |
-
</div>
|
173 |
-
</li>
|
174 |
-
</ul>
|
175 |
-
</div>
|
176 |
-
</nav>
|
177 |
-
<div id="quarto-sidebar-glass" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar,#quarto-sidebar-glass"></div>
|
178 |
-
<!-- margin-sidebar -->
|
179 |
-
<div id="quarto-margin-sidebar" class="sidebar margin-sidebar">
|
180 |
-
<nav id="TOC" role="doc-toc" class="toc-active">
|
181 |
-
<h2 id="toc-title">On this page</h2>
|
182 |
-
|
183 |
-
<ul>
|
184 |
-
<li><a href="#install" id="toc-install" class="nav-link active" data-scroll-target="#install">Install</a></li>
|
185 |
-
<li><a href="#how-to-use" id="toc-how-to-use" class="nav-link" data-scroll-target="#how-to-use">How to use</a></li>
|
186 |
-
<li><a href="#dev-quick-start" id="toc-dev-quick-start" class="nav-link" data-scroll-target="#dev-quick-start">Dev quick-start</a></li>
|
187 |
-
<li><a href="#dependencies" id="toc-dependencies" class="nav-link" data-scroll-target="#dependencies">Dependencies</a></li>
|
188 |
-
<li><a href="#development" id="toc-development" class="nav-link" data-scroll-target="#development">Development</a></li>
|
189 |
-
<li><a href="#useful-links" id="toc-useful-links" class="nav-link" data-scroll-target="#useful-links">Useful links</a></li>
|
190 |
-
</ul>
|
191 |
-
<div class="toc-actions"><div><i class="bi bi-git"></i></div><div class="action-links"><p><a href="https://gitlab.com/animalequality/lv-recipe-chatbot/issues/new" class="toc-action">Report an issue</a></p></div></div></nav>
|
192 |
-
</div>
|
193 |
-
<!-- main -->
|
194 |
-
<main class="content" id="quarto-document-content">
|
195 |
-
|
196 |
-
<header id="title-block-header" class="quarto-title-block default">
|
197 |
-
<div class="quarto-title">
|
198 |
-
<h1 class="title">lv-recipe-chatbot</h1>
|
199 |
-
</div>
|
200 |
-
|
201 |
-
<div>
|
202 |
-
<div class="description">
|
203 |
-
An experimental Vegan recipe chatbot
|
204 |
-
</div>
|
205 |
-
</div>
|
206 |
-
|
207 |
-
|
208 |
-
<div class="quarto-title-meta">
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
</div>
|
214 |
-
|
215 |
-
|
216 |
-
</header>
|
217 |
-
|
218 |
-
<!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->
|
219 |
-
<section id="install" class="level2">
|
220 |
-
<h2 class="anchored" data-anchor-id="install">Install</h2>
|
221 |
-
<div class="sourceCode" id="cb1"><pre class="sourceCode sh code-with-copy"><code class="sourceCode bash"><span id="cb1-1"><a href="#cb1-1" aria-hidden="true" tabindex="-1"></a><span class="ex">pip</span> install <span class="at">-e</span> <span class="st">'.[dev]'</span></span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
222 |
-
</section>
|
223 |
-
<section id="how-to-use" class="level2">
|
224 |
-
<h2 class="anchored" data-anchor-id="how-to-use">How to use</h2>
|
225 |
-
<div class="cell">
|
226 |
-
<div class="sourceCode cell-code" id="cb2"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb2-1"><a href="#cb2-1" aria-hidden="true" tabindex="-1"></a><span class="im">from</span> dotenv <span class="im">import</span> load_dotenv</span>
|
227 |
-
<span id="cb2-2"><a href="#cb2-2" aria-hidden="true" tabindex="-1"></a></span>
|
228 |
-
<span id="cb2-3"><a href="#cb2-3" aria-hidden="true" tabindex="-1"></a>load_dotenv() <span class="co"># or load environment vars with different method</span></span>
|
229 |
-
<span id="cb2-4"><a href="#cb2-4" aria-hidden="true" tabindex="-1"></a></span>
|
230 |
-
<span id="cb2-5"><a href="#cb2-5" aria-hidden="true" tabindex="-1"></a>demo <span class="op">=</span> app.create_demo(app.ConversationBot())</span>
|
231 |
-
<span id="cb2-6"><a href="#cb2-6" aria-hidden="true" tabindex="-1"></a>demo.launch()</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
232 |
-
<div class="cell-output cell-output-stdout">
|
233 |
-
<pre><code>Running on local URL: http://127.0.0.1:7860
|
234 |
-
|
235 |
-
To create a public link, set `share=True` in `launch()`.</code></pre>
|
236 |
-
</div>
|
237 |
-
<div class="cell-output cell-output-display">
|
238 |
-
<div><iframe src="http://127.0.0.1:7860/" width="100%" height="500" allow="autoplay; camera; microphone; clipboard-read; clipboard-write;" frameborder="0" allowfullscreen=""></iframe></div>
|
239 |
-
</div>
|
240 |
-
<div class="cell-output cell-output-display">
|
241 |
-
<pre><code></code></pre>
|
242 |
-
</div>
|
243 |
-
</div>
|
244 |
-
<p>or</p>
|
245 |
-
<div class="sourceCode" id="cb5"><pre class="sourceCode sh code-with-copy"><code class="sourceCode bash"><span id="cb5-1"><a href="#cb5-1" aria-hidden="true" tabindex="-1"></a><span class="ex">python3</span> app.py</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
246 |
-
</section>
|
247 |
-
<section id="dev-quick-start" class="level2">
|
248 |
-
<h2 class="anchored" data-anchor-id="dev-quick-start">Dev quick-start</h2>
|
249 |
-
<p><code>git clone</code> the repo</p>
|
250 |
-
<div class="sourceCode" id="cb6"><pre class="sourceCode sh code-with-copy"><code class="sourceCode bash"><span id="cb6-1"><a href="#cb6-1" aria-hidden="true" tabindex="-1"></a><span class="bu">cd</span> lv-recipe-chatbot</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
251 |
-
<p>Make sure to use the version of python specified in <code>py_version.txt</code><br>
|
252 |
-
Create a virtual environment.</p>
|
253 |
-
<div class="sourceCode" id="cb7"><pre class="sourceCode sh code-with-copy"><code class="sourceCode bash"><span id="cb7-1"><a href="#cb7-1" aria-hidden="true" tabindex="-1"></a><span class="ex">python3</span> <span class="at">-m</span> venv env</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
254 |
-
<p>Activate the env and install dependencies.</p>
|
255 |
-
<div class="sourceCode" id="cb8"><pre class="sourceCode sh code-with-copy"><code class="sourceCode bash"><span id="cb8-1"><a href="#cb8-1" aria-hidden="true" tabindex="-1"></a><span class="bu">source</span> env/bin/activate</span>
|
256 |
-
<span id="cb8-2"><a href="#cb8-2" aria-hidden="true" tabindex="-1"></a><span class="ex">pip</span> install <span class="at">-r</span> requirements.txt</span>
|
257 |
-
<span id="cb8-3"><a href="#cb8-3" aria-hidden="true" tabindex="-1"></a><span class="ex">pip</span> install <span class="at">-r</span> requirements/dev.txt</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
258 |
-
<p>To make the Jupyter environment, git friendly: <code>nbdev_install_hooks</code><br>
|
259 |
-
If you want to render documentation locally, you will want to <a href="https://nbdev.fast.ai/tutorials/tutorial.html#install-quarto">install Quarto</a>.</p>
|
260 |
-
<p><code>nbdev_install_quarto</code></p>
|
261 |
-
<p>Put API secrets in .env</p>
|
262 |
-
<div class="sourceCode" id="cb9"><pre class="sourceCode sh code-with-copy"><code class="sourceCode bash"><span id="cb9-1"><a href="#cb9-1" aria-hidden="true" tabindex="-1"></a><span class="fu">cp</span> .env.example .env</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
263 |
-
<p>Edit .env with your secret key(s). Only <code>OPEN_AI_KEY</code> is required.</p>
|
264 |
-
<p>Then start the Gradio demo from within the virtual environment.</p>
|
265 |
-
<div class="sourceCode" id="cb10"><pre class="sourceCode sh code-with-copy"><code class="sourceCode bash"><span id="cb10-1"><a href="#cb10-1" aria-hidden="true" tabindex="-1"></a><span class="ex">python3</span> app.py</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
266 |
-
<p>Preview documentation</p>
|
267 |
-
<div class="sourceCode" id="cb11"><pre class="sourceCode sh code-with-copy"><code class="sourceCode bash"><span id="cb11-1"><a href="#cb11-1" aria-hidden="true" tabindex="-1"></a><span class="ex">nbdev_preview</span></span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
268 |
-
</section>
|
269 |
-
<section id="dependencies" class="level2">
|
270 |
-
<h2 class="anchored" data-anchor-id="dependencies">Dependencies</h2>
|
271 |
-
<p>If a new dependency for development is helpful for developers, add it to <code>dev.txt</code>.<br>
|
272 |
-
If it is a dependency for the app that is imported in source code, add it to <code>core.txt</code>.<br>
|
273 |
-
Then run:</p>
|
274 |
-
<div class="sourceCode" id="cb12"><pre class="sourceCode sh code-with-copy"><code class="sourceCode bash"><span id="cb12-1"><a href="#cb12-1" aria-hidden="true" tabindex="-1"></a><span class="ex">scripts/pin_requirements.sh</span></span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
|
275 |
-
<p>This will update our <code>requirements.txt</code> to include the dependency as it should be pinned in the environment.</p>
|
276 |
-
</section>
|
277 |
-
<section id="development" class="level2">
|
278 |
-
<h2 class="anchored" data-anchor-id="development">Development</h2>
|
279 |
-
<p><a href="https://nbdev.fast.ai/tutorials">quick nbdev tutorial</a></p>
|
280 |
-
<p>Make changes in <code>/nbs</code>.<br>
|
281 |
-
Update the package files with <code>nbdev_export</code> then reimport with <code>pip install -e '.[dev]'</code></p>
|
282 |
-
<p>Preview doc <code>nbdev_preview</code><br>
|
283 |
-
Build docs, test and update README <code>nbdev_prepare</code></p>
|
284 |
-
</section>
|
285 |
-
<section id="useful-links" class="level2">
|
286 |
-
<h2 class="anchored" data-anchor-id="useful-links">Useful links</h2>
|
287 |
-
<ul>
|
288 |
-
<li><a href="https://github.com/microsoft/TaskMatrix">Task Matrix (Formerly Visual ChatGPT)</a></li>
|
289 |
-
<li><a href="https://python.langchain.com/en/latest/index.html">LangChain</a></li>
|
290 |
-
<li><a href="https://www.promptingguide.ai">LLM Prompt Engineering</a></li>
|
291 |
-
<li><a href="https://help.openai.com/en/articles/6654000-best-practices-for-prompt-engineering-with-openai-api">OpenAI best practices for prompts</a></li>
|
292 |
-
</ul>
|
293 |
-
|
294 |
-
|
295 |
-
</section>
|
296 |
-
|
297 |
-
</main> <!-- /main -->
|
298 |
-
<script id="quarto-html-after-body" type="application/javascript">
|
299 |
-
window.document.addEventListener("DOMContentLoaded", function (event) {
|
300 |
-
const toggleBodyColorMode = (bsSheetEl) => {
|
301 |
-
const mode = bsSheetEl.getAttribute("data-mode");
|
302 |
-
const bodyEl = window.document.querySelector("body");
|
303 |
-
if (mode === "dark") {
|
304 |
-
bodyEl.classList.add("quarto-dark");
|
305 |
-
bodyEl.classList.remove("quarto-light");
|
306 |
-
} else {
|
307 |
-
bodyEl.classList.add("quarto-light");
|
308 |
-
bodyEl.classList.remove("quarto-dark");
|
309 |
-
}
|
310 |
-
}
|
311 |
-
const toggleBodyColorPrimary = () => {
|
312 |
-
const bsSheetEl = window.document.querySelector("link#quarto-bootstrap");
|
313 |
-
if (bsSheetEl) {
|
314 |
-
toggleBodyColorMode(bsSheetEl);
|
315 |
-
}
|
316 |
-
}
|
317 |
-
toggleBodyColorPrimary();
|
318 |
-
const icon = "";
|
319 |
-
const anchorJS = new window.AnchorJS();
|
320 |
-
anchorJS.options = {
|
321 |
-
placement: 'right',
|
322 |
-
icon: icon
|
323 |
-
};
|
324 |
-
anchorJS.add('.anchored');
|
325 |
-
const isCodeAnnotation = (el) => {
|
326 |
-
for (const clz of el.classList) {
|
327 |
-
if (clz.startsWith('code-annotation-')) {
|
328 |
-
return true;
|
329 |
-
}
|
330 |
-
}
|
331 |
-
return false;
|
332 |
-
}
|
333 |
-
const clipboard = new window.ClipboardJS('.code-copy-button', {
|
334 |
-
text: function(trigger) {
|
335 |
-
const codeEl = trigger.previousElementSibling.cloneNode(true);
|
336 |
-
for (const childEl of codeEl.children) {
|
337 |
-
if (isCodeAnnotation(childEl)) {
|
338 |
-
childEl.remove();
|
339 |
-
}
|
340 |
-
}
|
341 |
-
return codeEl.innerText;
|
342 |
-
}
|
343 |
-
});
|
344 |
-
clipboard.on('success', function(e) {
|
345 |
-
// button target
|
346 |
-
const button = e.trigger;
|
347 |
-
// don't keep focus
|
348 |
-
button.blur();
|
349 |
-
// flash "checked"
|
350 |
-
button.classList.add('code-copy-button-checked');
|
351 |
-
var currentTitle = button.getAttribute("title");
|
352 |
-
button.setAttribute("title", "Copied!");
|
353 |
-
let tooltip;
|
354 |
-
if (window.bootstrap) {
|
355 |
-
button.setAttribute("data-bs-toggle", "tooltip");
|
356 |
-
button.setAttribute("data-bs-placement", "left");
|
357 |
-
button.setAttribute("data-bs-title", "Copied!");
|
358 |
-
tooltip = new bootstrap.Tooltip(button,
|
359 |
-
{ trigger: "manual",
|
360 |
-
customClass: "code-copy-button-tooltip",
|
361 |
-
offset: [0, -8]});
|
362 |
-
tooltip.show();
|
363 |
-
}
|
364 |
-
setTimeout(function() {
|
365 |
-
if (tooltip) {
|
366 |
-
tooltip.hide();
|
367 |
-
button.removeAttribute("data-bs-title");
|
368 |
-
button.removeAttribute("data-bs-toggle");
|
369 |
-
button.removeAttribute("data-bs-placement");
|
370 |
-
}
|
371 |
-
button.setAttribute("title", currentTitle);
|
372 |
-
button.classList.remove('code-copy-button-checked');
|
373 |
-
}, 1000);
|
374 |
-
// clear code selection
|
375 |
-
e.clearSelection();
|
376 |
-
});
|
377 |
-
function tippyHover(el, contentFn) {
|
378 |
-
const config = {
|
379 |
-
allowHTML: true,
|
380 |
-
content: contentFn,
|
381 |
-
maxWidth: 500,
|
382 |
-
delay: 100,
|
383 |
-
arrow: false,
|
384 |
-
appendTo: function(el) {
|
385 |
-
return el.parentElement;
|
386 |
-
},
|
387 |
-
interactive: true,
|
388 |
-
interactiveBorder: 10,
|
389 |
-
theme: 'quarto',
|
390 |
-
placement: 'bottom-start'
|
391 |
-
};
|
392 |
-
window.tippy(el, config);
|
393 |
-
}
|
394 |
-
const noterefs = window.document.querySelectorAll('a[role="doc-noteref"]');
|
395 |
-
for (var i=0; i<noterefs.length; i++) {
|
396 |
-
const ref = noterefs[i];
|
397 |
-
tippyHover(ref, function() {
|
398 |
-
// use id or data attribute instead here
|
399 |
-
let href = ref.getAttribute('data-footnote-href') || ref.getAttribute('href');
|
400 |
-
try { href = new URL(href).hash; } catch {}
|
401 |
-
const id = href.replace(/^#\/?/, "");
|
402 |
-
const note = window.document.getElementById(id);
|
403 |
-
return note.innerHTML;
|
404 |
-
});
|
405 |
-
}
|
406 |
-
let selectedAnnoteEl;
|
407 |
-
const selectorForAnnotation = ( cell, annotation) => {
|
408 |
-
let cellAttr = 'data-code-cell="' + cell + '"';
|
409 |
-
let lineAttr = 'data-code-annotation="' + annotation + '"';
|
410 |
-
const selector = 'span[' + cellAttr + '][' + lineAttr + ']';
|
411 |
-
return selector;
|
412 |
-
}
|
413 |
-
const selectCodeLines = (annoteEl) => {
|
414 |
-
const doc = window.document;
|
415 |
-
const targetCell = annoteEl.getAttribute("data-target-cell");
|
416 |
-
const targetAnnotation = annoteEl.getAttribute("data-target-annotation");
|
417 |
-
const annoteSpan = window.document.querySelector(selectorForAnnotation(targetCell, targetAnnotation));
|
418 |
-
const lines = annoteSpan.getAttribute("data-code-lines").split(",");
|
419 |
-
const lineIds = lines.map((line) => {
|
420 |
-
return targetCell + "-" + line;
|
421 |
-
})
|
422 |
-
let top = null;
|
423 |
-
let height = null;
|
424 |
-
let parent = null;
|
425 |
-
if (lineIds.length > 0) {
|
426 |
-
//compute the position of the single el (top and bottom and make a div)
|
427 |
-
const el = window.document.getElementById(lineIds[0]);
|
428 |
-
top = el.offsetTop;
|
429 |
-
height = el.offsetHeight;
|
430 |
-
parent = el.parentElement.parentElement;
|
431 |
-
if (lineIds.length > 1) {
|
432 |
-
const lastEl = window.document.getElementById(lineIds[lineIds.length - 1]);
|
433 |
-
const bottom = lastEl.offsetTop + lastEl.offsetHeight;
|
434 |
-
height = bottom - top;
|
435 |
-
}
|
436 |
-
if (top !== null && height !== null && parent !== null) {
|
437 |
-
// cook up a div (if necessary) and position it
|
438 |
-
let div = window.document.getElementById("code-annotation-line-highlight");
|
439 |
-
if (div === null) {
|
440 |
-
div = window.document.createElement("div");
|
441 |
-
div.setAttribute("id", "code-annotation-line-highlight");
|
442 |
-
div.style.position = 'absolute';
|
443 |
-
parent.appendChild(div);
|
444 |
-
}
|
445 |
-
div.style.top = top - 2 + "px";
|
446 |
-
div.style.height = height + 4 + "px";
|
447 |
-
let gutterDiv = window.document.getElementById("code-annotation-line-highlight-gutter");
|
448 |
-
if (gutterDiv === null) {
|
449 |
-
gutterDiv = window.document.createElement("div");
|
450 |
-
gutterDiv.setAttribute("id", "code-annotation-line-highlight-gutter");
|
451 |
-
gutterDiv.style.position = 'absolute';
|
452 |
-
const codeCell = window.document.getElementById(targetCell);
|
453 |
-
const gutter = codeCell.querySelector('.code-annotation-gutter');
|
454 |
-
gutter.appendChild(gutterDiv);
|
455 |
-
}
|
456 |
-
gutterDiv.style.top = top - 2 + "px";
|
457 |
-
gutterDiv.style.height = height + 4 + "px";
|
458 |
-
}
|
459 |
-
selectedAnnoteEl = annoteEl;
|
460 |
-
}
|
461 |
-
};
|
462 |
-
const unselectCodeLines = () => {
|
463 |
-
const elementsIds = ["code-annotation-line-highlight", "code-annotation-line-highlight-gutter"];
|
464 |
-
elementsIds.forEach((elId) => {
|
465 |
-
const div = window.document.getElementById(elId);
|
466 |
-
if (div) {
|
467 |
-
div.remove();
|
468 |
-
}
|
469 |
-
});
|
470 |
-
selectedAnnoteEl = undefined;
|
471 |
-
};
|
472 |
-
// Attach click handler to the DT
|
473 |
-
const annoteDls = window.document.querySelectorAll('dt[data-target-cell]');
|
474 |
-
for (const annoteDlNode of annoteDls) {
|
475 |
-
annoteDlNode.addEventListener('click', (event) => {
|
476 |
-
const clickedEl = event.target;
|
477 |
-
if (clickedEl !== selectedAnnoteEl) {
|
478 |
-
unselectCodeLines();
|
479 |
-
const activeEl = window.document.querySelector('dt[data-target-cell].code-annotation-active');
|
480 |
-
if (activeEl) {
|
481 |
-
activeEl.classList.remove('code-annotation-active');
|
482 |
-
}
|
483 |
-
selectCodeLines(clickedEl);
|
484 |
-
clickedEl.classList.add('code-annotation-active');
|
485 |
-
} else {
|
486 |
-
// Unselect the line
|
487 |
-
unselectCodeLines();
|
488 |
-
clickedEl.classList.remove('code-annotation-active');
|
489 |
-
}
|
490 |
-
});
|
491 |
-
}
|
492 |
-
const findCites = (el) => {
|
493 |
-
const parentEl = el.parentElement;
|
494 |
-
if (parentEl) {
|
495 |
-
const cites = parentEl.dataset.cites;
|
496 |
-
if (cites) {
|
497 |
-
return {
|
498 |
-
el,
|
499 |
-
cites: cites.split(' ')
|
500 |
-
};
|
501 |
-
} else {
|
502 |
-
return findCites(el.parentElement)
|
503 |
-
}
|
504 |
-
} else {
|
505 |
-
return undefined;
|
506 |
-
}
|
507 |
-
};
|
508 |
-
var bibliorefs = window.document.querySelectorAll('a[role="doc-biblioref"]');
|
509 |
-
for (var i=0; i<bibliorefs.length; i++) {
|
510 |
-
const ref = bibliorefs[i];
|
511 |
-
const citeInfo = findCites(ref);
|
512 |
-
if (citeInfo) {
|
513 |
-
tippyHover(citeInfo.el, function() {
|
514 |
-
var popup = window.document.createElement('div');
|
515 |
-
citeInfo.cites.forEach(function(cite) {
|
516 |
-
var citeDiv = window.document.createElement('div');
|
517 |
-
citeDiv.classList.add('hanging-indent');
|
518 |
-
citeDiv.classList.add('csl-entry');
|
519 |
-
var biblioDiv = window.document.getElementById('ref-' + cite);
|
520 |
-
if (biblioDiv) {
|
521 |
-
citeDiv.innerHTML = biblioDiv.innerHTML;
|
522 |
-
}
|
523 |
-
popup.appendChild(citeDiv);
|
524 |
-
});
|
525 |
-
return popup.innerHTML;
|
526 |
-
});
|
527 |
-
}
|
528 |
-
}
|
529 |
-
});
|
530 |
-
</script>
|
531 |
-
</div> <!-- /content -->
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
</body></html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artificio/AdversarialArt/src/utils.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
from PIL import Image
|
2 |
-
import torch
|
3 |
-
import torch.nn as nn
|
4 |
-
from typing import Dict, Iterable, Callable
|
5 |
-
from torch import Tensor
|
6 |
-
import glob
|
7 |
-
from tqdm import tqdm
|
8 |
-
import numpy as np
|
9 |
-
from PIL import ImageFile
|
10 |
-
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
11 |
-
Image.MAX_IMAGE_PIXELS = None
|
12 |
-
|
13 |
-
|
14 |
-
# +
|
15 |
-
class RobustModel(nn.Module):
|
16 |
-
def __init__(self, model):
|
17 |
-
super().__init__()
|
18 |
-
self.model = model
|
19 |
-
def forward(self, x, *args, **kwargs):
|
20 |
-
return self.model(x)
|
21 |
-
|
22 |
-
|
23 |
-
class CustomArt(torch.utils.data.Dataset):
|
24 |
-
def __init__(self, image,transforms=None):
|
25 |
-
self.transforms = transforms
|
26 |
-
self.image = image
|
27 |
-
self.mean = torch.tensor([0.4850, 0.4560, 0.4060])
|
28 |
-
self.std = torch.tensor([0.2290, 0.2240, 0.2250])
|
29 |
-
def __getitem__(self, idx):
|
30 |
-
if self.transforms:
|
31 |
-
img = self.transforms(self.image)
|
32 |
-
return torch.as_tensor(img, dtype=torch.float)
|
33 |
-
|
34 |
-
def __len__(self):
|
35 |
-
return len(self.image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Asahi402/Real-CUGAN/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Real CUGAN
|
3 |
-
emoji: 🐢
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.6
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: gpl-3.0
|
11 |
-
duplicated_from: DianXian/Real-CUGAN
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/__init__.py
DELETED
File without changes
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/temp_dir.py
DELETED
@@ -1,246 +0,0 @@
|
|
1 |
-
import errno
|
2 |
-
import itertools
|
3 |
-
import logging
|
4 |
-
import os.path
|
5 |
-
import tempfile
|
6 |
-
from contextlib import ExitStack, contextmanager
|
7 |
-
from typing import Any, Dict, Generator, Optional, TypeVar, Union
|
8 |
-
|
9 |
-
from pip._internal.utils.misc import enum, rmtree
|
10 |
-
|
11 |
-
logger = logging.getLogger(__name__)
|
12 |
-
|
13 |
-
_T = TypeVar("_T", bound="TempDirectory")
|
14 |
-
|
15 |
-
|
16 |
-
# Kinds of temporary directories. Only needed for ones that are
|
17 |
-
# globally-managed.
|
18 |
-
tempdir_kinds = enum(
|
19 |
-
BUILD_ENV="build-env",
|
20 |
-
EPHEM_WHEEL_CACHE="ephem-wheel-cache",
|
21 |
-
REQ_BUILD="req-build",
|
22 |
-
)
|
23 |
-
|
24 |
-
|
25 |
-
_tempdir_manager: Optional[ExitStack] = None
|
26 |
-
|
27 |
-
|
28 |
-
@contextmanager
|
29 |
-
def global_tempdir_manager() -> Generator[None, None, None]:
|
30 |
-
global _tempdir_manager
|
31 |
-
with ExitStack() as stack:
|
32 |
-
old_tempdir_manager, _tempdir_manager = _tempdir_manager, stack
|
33 |
-
try:
|
34 |
-
yield
|
35 |
-
finally:
|
36 |
-
_tempdir_manager = old_tempdir_manager
|
37 |
-
|
38 |
-
|
39 |
-
class TempDirectoryTypeRegistry:
|
40 |
-
"""Manages temp directory behavior"""
|
41 |
-
|
42 |
-
def __init__(self) -> None:
|
43 |
-
self._should_delete: Dict[str, bool] = {}
|
44 |
-
|
45 |
-
def set_delete(self, kind: str, value: bool) -> None:
|
46 |
-
"""Indicate whether a TempDirectory of the given kind should be
|
47 |
-
auto-deleted.
|
48 |
-
"""
|
49 |
-
self._should_delete[kind] = value
|
50 |
-
|
51 |
-
def get_delete(self, kind: str) -> bool:
|
52 |
-
"""Get configured auto-delete flag for a given TempDirectory type,
|
53 |
-
default True.
|
54 |
-
"""
|
55 |
-
return self._should_delete.get(kind, True)
|
56 |
-
|
57 |
-
|
58 |
-
_tempdir_registry: Optional[TempDirectoryTypeRegistry] = None
|
59 |
-
|
60 |
-
|
61 |
-
@contextmanager
|
62 |
-
def tempdir_registry() -> Generator[TempDirectoryTypeRegistry, None, None]:
|
63 |
-
"""Provides a scoped global tempdir registry that can be used to dictate
|
64 |
-
whether directories should be deleted.
|
65 |
-
"""
|
66 |
-
global _tempdir_registry
|
67 |
-
old_tempdir_registry = _tempdir_registry
|
68 |
-
_tempdir_registry = TempDirectoryTypeRegistry()
|
69 |
-
try:
|
70 |
-
yield _tempdir_registry
|
71 |
-
finally:
|
72 |
-
_tempdir_registry = old_tempdir_registry
|
73 |
-
|
74 |
-
|
75 |
-
class _Default:
|
76 |
-
pass
|
77 |
-
|
78 |
-
|
79 |
-
_default = _Default()
|
80 |
-
|
81 |
-
|
82 |
-
class TempDirectory:
|
83 |
-
"""Helper class that owns and cleans up a temporary directory.
|
84 |
-
|
85 |
-
This class can be used as a context manager or as an OO representation of a
|
86 |
-
temporary directory.
|
87 |
-
|
88 |
-
Attributes:
|
89 |
-
path
|
90 |
-
Location to the created temporary directory
|
91 |
-
delete
|
92 |
-
Whether the directory should be deleted when exiting
|
93 |
-
(when used as a contextmanager)
|
94 |
-
|
95 |
-
Methods:
|
96 |
-
cleanup()
|
97 |
-
Deletes the temporary directory
|
98 |
-
|
99 |
-
When used as a context manager, if the delete attribute is True, on
|
100 |
-
exiting the context the temporary directory is deleted.
|
101 |
-
"""
|
102 |
-
|
103 |
-
def __init__(
|
104 |
-
self,
|
105 |
-
path: Optional[str] = None,
|
106 |
-
delete: Union[bool, None, _Default] = _default,
|
107 |
-
kind: str = "temp",
|
108 |
-
globally_managed: bool = False,
|
109 |
-
):
|
110 |
-
super().__init__()
|
111 |
-
|
112 |
-
if delete is _default:
|
113 |
-
if path is not None:
|
114 |
-
# If we were given an explicit directory, resolve delete option
|
115 |
-
# now.
|
116 |
-
delete = False
|
117 |
-
else:
|
118 |
-
# Otherwise, we wait until cleanup and see what
|
119 |
-
# tempdir_registry says.
|
120 |
-
delete = None
|
121 |
-
|
122 |
-
# The only time we specify path is in for editables where it
|
123 |
-
# is the value of the --src option.
|
124 |
-
if path is None:
|
125 |
-
path = self._create(kind)
|
126 |
-
|
127 |
-
self._path = path
|
128 |
-
self._deleted = False
|
129 |
-
self.delete = delete
|
130 |
-
self.kind = kind
|
131 |
-
|
132 |
-
if globally_managed:
|
133 |
-
assert _tempdir_manager is not None
|
134 |
-
_tempdir_manager.enter_context(self)
|
135 |
-
|
136 |
-
@property
|
137 |
-
def path(self) -> str:
|
138 |
-
assert not self._deleted, f"Attempted to access deleted path: {self._path}"
|
139 |
-
return self._path
|
140 |
-
|
141 |
-
def __repr__(self) -> str:
|
142 |
-
return f"<{self.__class__.__name__} {self.path!r}>"
|
143 |
-
|
144 |
-
def __enter__(self: _T) -> _T:
|
145 |
-
return self
|
146 |
-
|
147 |
-
def __exit__(self, exc: Any, value: Any, tb: Any) -> None:
|
148 |
-
if self.delete is not None:
|
149 |
-
delete = self.delete
|
150 |
-
elif _tempdir_registry:
|
151 |
-
delete = _tempdir_registry.get_delete(self.kind)
|
152 |
-
else:
|
153 |
-
delete = True
|
154 |
-
|
155 |
-
if delete:
|
156 |
-
self.cleanup()
|
157 |
-
|
158 |
-
def _create(self, kind: str) -> str:
|
159 |
-
"""Create a temporary directory and store its path in self.path"""
|
160 |
-
# We realpath here because some systems have their default tmpdir
|
161 |
-
# symlinked to another directory. This tends to confuse build
|
162 |
-
# scripts, so we canonicalize the path by traversing potential
|
163 |
-
# symlinks here.
|
164 |
-
path = os.path.realpath(tempfile.mkdtemp(prefix=f"pip-{kind}-"))
|
165 |
-
logger.debug("Created temporary directory: %s", path)
|
166 |
-
return path
|
167 |
-
|
168 |
-
def cleanup(self) -> None:
|
169 |
-
"""Remove the temporary directory created and reset state"""
|
170 |
-
self._deleted = True
|
171 |
-
if not os.path.exists(self._path):
|
172 |
-
return
|
173 |
-
rmtree(self._path)
|
174 |
-
|
175 |
-
|
176 |
-
class AdjacentTempDirectory(TempDirectory):
|
177 |
-
"""Helper class that creates a temporary directory adjacent to a real one.
|
178 |
-
|
179 |
-
Attributes:
|
180 |
-
original
|
181 |
-
The original directory to create a temp directory for.
|
182 |
-
path
|
183 |
-
After calling create() or entering, contains the full
|
184 |
-
path to the temporary directory.
|
185 |
-
delete
|
186 |
-
Whether the directory should be deleted when exiting
|
187 |
-
(when used as a contextmanager)
|
188 |
-
|
189 |
-
"""
|
190 |
-
|
191 |
-
# The characters that may be used to name the temp directory
|
192 |
-
# We always prepend a ~ and then rotate through these until
|
193 |
-
# a usable name is found.
|
194 |
-
# pkg_resources raises a different error for .dist-info folder
|
195 |
-
# with leading '-' and invalid metadata
|
196 |
-
LEADING_CHARS = "-~.=%0123456789"
|
197 |
-
|
198 |
-
def __init__(self, original: str, delete: Optional[bool] = None) -> None:
|
199 |
-
self.original = original.rstrip("/\\")
|
200 |
-
super().__init__(delete=delete)
|
201 |
-
|
202 |
-
@classmethod
|
203 |
-
def _generate_names(cls, name: str) -> Generator[str, None, None]:
|
204 |
-
"""Generates a series of temporary names.
|
205 |
-
|
206 |
-
The algorithm replaces the leading characters in the name
|
207 |
-
with ones that are valid filesystem characters, but are not
|
208 |
-
valid package names (for both Python and pip definitions of
|
209 |
-
package).
|
210 |
-
"""
|
211 |
-
for i in range(1, len(name)):
|
212 |
-
for candidate in itertools.combinations_with_replacement(
|
213 |
-
cls.LEADING_CHARS, i - 1
|
214 |
-
):
|
215 |
-
new_name = "~" + "".join(candidate) + name[i:]
|
216 |
-
if new_name != name:
|
217 |
-
yield new_name
|
218 |
-
|
219 |
-
# If we make it this far, we will have to make a longer name
|
220 |
-
for i in range(len(cls.LEADING_CHARS)):
|
221 |
-
for candidate in itertools.combinations_with_replacement(
|
222 |
-
cls.LEADING_CHARS, i
|
223 |
-
):
|
224 |
-
new_name = "~" + "".join(candidate) + name
|
225 |
-
if new_name != name:
|
226 |
-
yield new_name
|
227 |
-
|
228 |
-
def _create(self, kind: str) -> str:
|
229 |
-
root, name = os.path.split(self.original)
|
230 |
-
for candidate in self._generate_names(name):
|
231 |
-
path = os.path.join(root, candidate)
|
232 |
-
try:
|
233 |
-
os.mkdir(path)
|
234 |
-
except OSError as ex:
|
235 |
-
# Continue if the name exists already
|
236 |
-
if ex.errno != errno.EEXIST:
|
237 |
-
raise
|
238 |
-
else:
|
239 |
-
path = os.path.realpath(path)
|
240 |
-
break
|
241 |
-
else:
|
242 |
-
# Final fallback on the default behavior.
|
243 |
-
path = os.path.realpath(tempfile.mkdtemp(prefix=f"pip-{kind}-"))
|
244 |
-
|
245 |
-
logger.debug("Created temporary directory: %s", path)
|
246 |
-
return path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/spawn.py
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
"""distutils.spawn
|
2 |
-
|
3 |
-
Provides the 'spawn()' function, a front-end to various platform-
|
4 |
-
specific functions for launching another program in a sub-process.
|
5 |
-
Also provides the 'find_executable()' to search the path for a given
|
6 |
-
executable name.
|
7 |
-
"""
|
8 |
-
|
9 |
-
import sys
|
10 |
-
import os
|
11 |
-
import subprocess
|
12 |
-
|
13 |
-
from distutils.errors import DistutilsExecError
|
14 |
-
from distutils.debug import DEBUG
|
15 |
-
from distutils import log
|
16 |
-
|
17 |
-
|
18 |
-
def spawn(cmd, search_path=1, verbose=0, dry_run=0, env=None): # noqa: C901
|
19 |
-
"""Run another program, specified as a command list 'cmd', in a new process.
|
20 |
-
|
21 |
-
'cmd' is just the argument list for the new process, ie.
|
22 |
-
cmd[0] is the program to run and cmd[1:] are the rest of its arguments.
|
23 |
-
There is no way to run a program with a name different from that of its
|
24 |
-
executable.
|
25 |
-
|
26 |
-
If 'search_path' is true (the default), the system's executable
|
27 |
-
search path will be used to find the program; otherwise, cmd[0]
|
28 |
-
must be the exact path to the executable. If 'dry_run' is true,
|
29 |
-
the command will not actually be run.
|
30 |
-
|
31 |
-
Raise DistutilsExecError if running the program fails in any way; just
|
32 |
-
return on success.
|
33 |
-
"""
|
34 |
-
# cmd is documented as a list, but just in case some code passes a tuple
|
35 |
-
# in, protect our %-formatting code against horrible death
|
36 |
-
cmd = list(cmd)
|
37 |
-
|
38 |
-
log.info(subprocess.list2cmdline(cmd))
|
39 |
-
if dry_run:
|
40 |
-
return
|
41 |
-
|
42 |
-
if search_path:
|
43 |
-
executable = find_executable(cmd[0])
|
44 |
-
if executable is not None:
|
45 |
-
cmd[0] = executable
|
46 |
-
|
47 |
-
env = env if env is not None else dict(os.environ)
|
48 |
-
|
49 |
-
if sys.platform == 'darwin':
|
50 |
-
from distutils.util import MACOSX_VERSION_VAR, get_macosx_target_ver
|
51 |
-
|
52 |
-
macosx_target_ver = get_macosx_target_ver()
|
53 |
-
if macosx_target_ver:
|
54 |
-
env[MACOSX_VERSION_VAR] = macosx_target_ver
|
55 |
-
|
56 |
-
try:
|
57 |
-
proc = subprocess.Popen(cmd, env=env)
|
58 |
-
proc.wait()
|
59 |
-
exitcode = proc.returncode
|
60 |
-
except OSError as exc:
|
61 |
-
if not DEBUG:
|
62 |
-
cmd = cmd[0]
|
63 |
-
raise DistutilsExecError(
|
64 |
-
"command {!r} failed: {}".format(cmd, exc.args[-1])
|
65 |
-
) from exc
|
66 |
-
|
67 |
-
if exitcode:
|
68 |
-
if not DEBUG:
|
69 |
-
cmd = cmd[0]
|
70 |
-
raise DistutilsExecError(
|
71 |
-
"command {!r} failed with exit code {}".format(cmd, exitcode)
|
72 |
-
)
|
73 |
-
|
74 |
-
|
75 |
-
def find_executable(executable, path=None):
|
76 |
-
"""Tries to find 'executable' in the directories listed in 'path'.
|
77 |
-
|
78 |
-
A string listing directories separated by 'os.pathsep'; defaults to
|
79 |
-
os.environ['PATH']. Returns the complete filename or None if not found.
|
80 |
-
"""
|
81 |
-
_, ext = os.path.splitext(executable)
|
82 |
-
if (sys.platform == 'win32') and (ext != '.exe'):
|
83 |
-
executable = executable + '.exe'
|
84 |
-
|
85 |
-
if os.path.isfile(executable):
|
86 |
-
return executable
|
87 |
-
|
88 |
-
if path is None:
|
89 |
-
path = os.environ.get('PATH', None)
|
90 |
-
if path is None:
|
91 |
-
try:
|
92 |
-
path = os.confstr("CS_PATH")
|
93 |
-
except (AttributeError, ValueError):
|
94 |
-
# os.confstr() or CS_PATH is not available
|
95 |
-
path = os.defpath
|
96 |
-
# bpo-35755: Don't use os.defpath if the PATH environment variable is
|
97 |
-
# set to an empty string
|
98 |
-
|
99 |
-
# PATH='' doesn't match, whereas PATH=':' looks in the current directory
|
100 |
-
if not path:
|
101 |
-
return None
|
102 |
-
|
103 |
-
paths = path.split(os.pathsep)
|
104 |
-
for p in paths:
|
105 |
-
f = os.path.join(p, executable)
|
106 |
-
if os.path.isfile(f):
|
107 |
-
# the file exists, we have a shot at spawn working
|
108 |
-
return f
|
109 |
-
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/config/setupcfg.py
DELETED
@@ -1,762 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Load setuptools configuration from ``setup.cfg`` files.
|
3 |
-
|
4 |
-
**API will be made private in the future**
|
5 |
-
"""
|
6 |
-
import os
|
7 |
-
|
8 |
-
import contextlib
|
9 |
-
import functools
|
10 |
-
import warnings
|
11 |
-
from collections import defaultdict
|
12 |
-
from functools import partial
|
13 |
-
from functools import wraps
|
14 |
-
from typing import (TYPE_CHECKING, Callable, Any, Dict, Generic, Iterable, List,
|
15 |
-
Optional, Tuple, TypeVar, Union)
|
16 |
-
|
17 |
-
from distutils.errors import DistutilsOptionError, DistutilsFileError
|
18 |
-
from setuptools.extern.packaging.requirements import Requirement, InvalidRequirement
|
19 |
-
from setuptools.extern.packaging.version import Version, InvalidVersion
|
20 |
-
from setuptools.extern.packaging.specifiers import SpecifierSet
|
21 |
-
from setuptools._deprecation_warning import SetuptoolsDeprecationWarning
|
22 |
-
|
23 |
-
from . import expand
|
24 |
-
|
25 |
-
if TYPE_CHECKING:
|
26 |
-
from setuptools.dist import Distribution # noqa
|
27 |
-
from distutils.dist import DistributionMetadata # noqa
|
28 |
-
|
29 |
-
_Path = Union[str, os.PathLike]
|
30 |
-
SingleCommandOptions = Dict["str", Tuple["str", Any]]
|
31 |
-
"""Dict that associate the name of the options of a particular command to a
|
32 |
-
tuple. The first element of the tuple indicates the origin of the option value
|
33 |
-
(e.g. the name of the configuration file where it was read from),
|
34 |
-
while the second element of the tuple is the option value itself
|
35 |
-
"""
|
36 |
-
AllCommandOptions = Dict["str", SingleCommandOptions] # cmd name => its options
|
37 |
-
Target = TypeVar("Target", bound=Union["Distribution", "DistributionMetadata"])
|
38 |
-
|
39 |
-
|
40 |
-
def read_configuration(
|
41 |
-
filepath: _Path,
|
42 |
-
find_others=False,
|
43 |
-
ignore_option_errors=False
|
44 |
-
) -> dict:
|
45 |
-
"""Read given configuration file and returns options from it as a dict.
|
46 |
-
|
47 |
-
:param str|unicode filepath: Path to configuration file
|
48 |
-
to get options from.
|
49 |
-
|
50 |
-
:param bool find_others: Whether to search for other configuration files
|
51 |
-
which could be on in various places.
|
52 |
-
|
53 |
-
:param bool ignore_option_errors: Whether to silently ignore
|
54 |
-
options, values of which could not be resolved (e.g. due to exceptions
|
55 |
-
in directives such as file:, attr:, etc.).
|
56 |
-
If False exceptions are propagated as expected.
|
57 |
-
|
58 |
-
:rtype: dict
|
59 |
-
"""
|
60 |
-
from setuptools.dist import Distribution
|
61 |
-
|
62 |
-
dist = Distribution()
|
63 |
-
filenames = dist.find_config_files() if find_others else []
|
64 |
-
handlers = _apply(dist, filepath, filenames, ignore_option_errors)
|
65 |
-
return configuration_to_dict(handlers)
|
66 |
-
|
67 |
-
|
68 |
-
def apply_configuration(dist: "Distribution", filepath: _Path) -> "Distribution":
|
69 |
-
"""Apply the configuration from a ``setup.cfg`` file into an existing
|
70 |
-
distribution object.
|
71 |
-
"""
|
72 |
-
_apply(dist, filepath)
|
73 |
-
dist._finalize_requires()
|
74 |
-
return dist
|
75 |
-
|
76 |
-
|
77 |
-
def _apply(
|
78 |
-
dist: "Distribution", filepath: _Path,
|
79 |
-
other_files: Iterable[_Path] = (),
|
80 |
-
ignore_option_errors: bool = False,
|
81 |
-
) -> Tuple["ConfigHandler", ...]:
|
82 |
-
"""Read configuration from ``filepath`` and applies to the ``dist`` object."""
|
83 |
-
from setuptools.dist import _Distribution
|
84 |
-
|
85 |
-
filepath = os.path.abspath(filepath)
|
86 |
-
|
87 |
-
if not os.path.isfile(filepath):
|
88 |
-
raise DistutilsFileError('Configuration file %s does not exist.' % filepath)
|
89 |
-
|
90 |
-
current_directory = os.getcwd()
|
91 |
-
os.chdir(os.path.dirname(filepath))
|
92 |
-
filenames = [*other_files, filepath]
|
93 |
-
|
94 |
-
try:
|
95 |
-
_Distribution.parse_config_files(dist, filenames=filenames)
|
96 |
-
handlers = parse_configuration(
|
97 |
-
dist, dist.command_options, ignore_option_errors=ignore_option_errors
|
98 |
-
)
|
99 |
-
dist._finalize_license_files()
|
100 |
-
finally:
|
101 |
-
os.chdir(current_directory)
|
102 |
-
|
103 |
-
return handlers
|
104 |
-
|
105 |
-
|
106 |
-
def _get_option(target_obj: Target, key: str):
|
107 |
-
"""
|
108 |
-
Given a target object and option key, get that option from
|
109 |
-
the target object, either through a get_{key} method or
|
110 |
-
from an attribute directly.
|
111 |
-
"""
|
112 |
-
getter_name = 'get_{key}'.format(**locals())
|
113 |
-
by_attribute = functools.partial(getattr, target_obj, key)
|
114 |
-
getter = getattr(target_obj, getter_name, by_attribute)
|
115 |
-
return getter()
|
116 |
-
|
117 |
-
|
118 |
-
def configuration_to_dict(handlers: Tuple["ConfigHandler", ...]) -> dict:
|
119 |
-
"""Returns configuration data gathered by given handlers as a dict.
|
120 |
-
|
121 |
-
:param list[ConfigHandler] handlers: Handlers list,
|
122 |
-
usually from parse_configuration()
|
123 |
-
|
124 |
-
:rtype: dict
|
125 |
-
"""
|
126 |
-
config_dict: dict = defaultdict(dict)
|
127 |
-
|
128 |
-
for handler in handlers:
|
129 |
-
for option in handler.set_options:
|
130 |
-
value = _get_option(handler.target_obj, option)
|
131 |
-
config_dict[handler.section_prefix][option] = value
|
132 |
-
|
133 |
-
return config_dict
|
134 |
-
|
135 |
-
|
136 |
-
def parse_configuration(
|
137 |
-
distribution: "Distribution",
|
138 |
-
command_options: AllCommandOptions,
|
139 |
-
ignore_option_errors=False
|
140 |
-
) -> Tuple["ConfigMetadataHandler", "ConfigOptionsHandler"]:
|
141 |
-
"""Performs additional parsing of configuration options
|
142 |
-
for a distribution.
|
143 |
-
|
144 |
-
Returns a list of used option handlers.
|
145 |
-
|
146 |
-
:param Distribution distribution:
|
147 |
-
:param dict command_options:
|
148 |
-
:param bool ignore_option_errors: Whether to silently ignore
|
149 |
-
options, values of which could not be resolved (e.g. due to exceptions
|
150 |
-
in directives such as file:, attr:, etc.).
|
151 |
-
If False exceptions are propagated as expected.
|
152 |
-
:rtype: list
|
153 |
-
"""
|
154 |
-
with expand.EnsurePackagesDiscovered(distribution) as ensure_discovered:
|
155 |
-
options = ConfigOptionsHandler(
|
156 |
-
distribution,
|
157 |
-
command_options,
|
158 |
-
ignore_option_errors,
|
159 |
-
ensure_discovered,
|
160 |
-
)
|
161 |
-
|
162 |
-
options.parse()
|
163 |
-
if not distribution.package_dir:
|
164 |
-
distribution.package_dir = options.package_dir # Filled by `find_packages`
|
165 |
-
|
166 |
-
meta = ConfigMetadataHandler(
|
167 |
-
distribution.metadata,
|
168 |
-
command_options,
|
169 |
-
ignore_option_errors,
|
170 |
-
ensure_discovered,
|
171 |
-
distribution.package_dir,
|
172 |
-
distribution.src_root,
|
173 |
-
)
|
174 |
-
meta.parse()
|
175 |
-
|
176 |
-
return meta, options
|
177 |
-
|
178 |
-
|
179 |
-
def _warn_accidental_env_marker_misconfig(label: str, orig_value: str, parsed: list):
|
180 |
-
"""Because users sometimes misinterpret this configuration:
|
181 |
-
|
182 |
-
[options.extras_require]
|
183 |
-
foo = bar;python_version<"4"
|
184 |
-
|
185 |
-
It looks like one requirement with an environment marker
|
186 |
-
but because there is no newline, it's parsed as two requirements
|
187 |
-
with a semicolon as separator.
|
188 |
-
|
189 |
-
Therefore, if:
|
190 |
-
* input string does not contain a newline AND
|
191 |
-
* parsed result contains two requirements AND
|
192 |
-
* parsing of the two parts from the result ("<first>;<second>")
|
193 |
-
leads in a valid Requirement with a valid marker
|
194 |
-
a UserWarning is shown to inform the user about the possible problem.
|
195 |
-
"""
|
196 |
-
if "\n" in orig_value or len(parsed) != 2:
|
197 |
-
return
|
198 |
-
|
199 |
-
with contextlib.suppress(InvalidRequirement):
|
200 |
-
original_requirements_str = ";".join(parsed)
|
201 |
-
req = Requirement(original_requirements_str)
|
202 |
-
if req.marker is not None:
|
203 |
-
msg = (
|
204 |
-
f"One of the parsed requirements in `{label}` "
|
205 |
-
f"looks like a valid environment marker: '{parsed[1]}'\n"
|
206 |
-
"Make sure that the config is correct and check "
|
207 |
-
"https://setuptools.pypa.io/en/latest/userguide/declarative_config.html#opt-2" # noqa: E501
|
208 |
-
)
|
209 |
-
warnings.warn(msg, UserWarning)
|
210 |
-
|
211 |
-
|
212 |
-
class ConfigHandler(Generic[Target]):
|
213 |
-
"""Handles metadata supplied in configuration files."""
|
214 |
-
|
215 |
-
section_prefix: str
|
216 |
-
"""Prefix for config sections handled by this handler.
|
217 |
-
Must be provided by class heirs.
|
218 |
-
|
219 |
-
"""
|
220 |
-
|
221 |
-
aliases: Dict[str, str] = {}
|
222 |
-
"""Options aliases.
|
223 |
-
For compatibility with various packages. E.g.: d2to1 and pbr.
|
224 |
-
Note: `-` in keys is replaced with `_` by config parser.
|
225 |
-
|
226 |
-
"""
|
227 |
-
|
228 |
-
def __init__(
|
229 |
-
self,
|
230 |
-
target_obj: Target,
|
231 |
-
options: AllCommandOptions,
|
232 |
-
ignore_option_errors,
|
233 |
-
ensure_discovered: expand.EnsurePackagesDiscovered,
|
234 |
-
):
|
235 |
-
sections: AllCommandOptions = {}
|
236 |
-
|
237 |
-
section_prefix = self.section_prefix
|
238 |
-
for section_name, section_options in options.items():
|
239 |
-
if not section_name.startswith(section_prefix):
|
240 |
-
continue
|
241 |
-
|
242 |
-
section_name = section_name.replace(section_prefix, '').strip('.')
|
243 |
-
sections[section_name] = section_options
|
244 |
-
|
245 |
-
self.ignore_option_errors = ignore_option_errors
|
246 |
-
self.target_obj = target_obj
|
247 |
-
self.sections = sections
|
248 |
-
self.set_options: List[str] = []
|
249 |
-
self.ensure_discovered = ensure_discovered
|
250 |
-
|
251 |
-
@property
|
252 |
-
def parsers(self):
|
253 |
-
"""Metadata item name to parser function mapping."""
|
254 |
-
raise NotImplementedError(
|
255 |
-
'%s must provide .parsers property' % self.__class__.__name__
|
256 |
-
)
|
257 |
-
|
258 |
-
def __setitem__(self, option_name, value):
|
259 |
-
unknown = tuple()
|
260 |
-
target_obj = self.target_obj
|
261 |
-
|
262 |
-
# Translate alias into real name.
|
263 |
-
option_name = self.aliases.get(option_name, option_name)
|
264 |
-
|
265 |
-
current_value = getattr(target_obj, option_name, unknown)
|
266 |
-
|
267 |
-
if current_value is unknown:
|
268 |
-
raise KeyError(option_name)
|
269 |
-
|
270 |
-
if current_value:
|
271 |
-
# Already inhabited. Skipping.
|
272 |
-
return
|
273 |
-
|
274 |
-
skip_option = False
|
275 |
-
parser = self.parsers.get(option_name)
|
276 |
-
if parser:
|
277 |
-
try:
|
278 |
-
value = parser(value)
|
279 |
-
|
280 |
-
except Exception:
|
281 |
-
skip_option = True
|
282 |
-
if not self.ignore_option_errors:
|
283 |
-
raise
|
284 |
-
|
285 |
-
if skip_option:
|
286 |
-
return
|
287 |
-
|
288 |
-
setter = getattr(target_obj, 'set_%s' % option_name, None)
|
289 |
-
if setter is None:
|
290 |
-
setattr(target_obj, option_name, value)
|
291 |
-
else:
|
292 |
-
setter(value)
|
293 |
-
|
294 |
-
self.set_options.append(option_name)
|
295 |
-
|
296 |
-
@classmethod
|
297 |
-
def _parse_list(cls, value, separator=','):
|
298 |
-
"""Represents value as a list.
|
299 |
-
|
300 |
-
Value is split either by separator (defaults to comma) or by lines.
|
301 |
-
|
302 |
-
:param value:
|
303 |
-
:param separator: List items separator character.
|
304 |
-
:rtype: list
|
305 |
-
"""
|
306 |
-
if isinstance(value, list): # _get_parser_compound case
|
307 |
-
return value
|
308 |
-
|
309 |
-
if '\n' in value:
|
310 |
-
value = value.splitlines()
|
311 |
-
else:
|
312 |
-
value = value.split(separator)
|
313 |
-
|
314 |
-
return [chunk.strip() for chunk in value if chunk.strip()]
|
315 |
-
|
316 |
-
@classmethod
|
317 |
-
def _parse_dict(cls, value):
|
318 |
-
"""Represents value as a dict.
|
319 |
-
|
320 |
-
:param value:
|
321 |
-
:rtype: dict
|
322 |
-
"""
|
323 |
-
separator = '='
|
324 |
-
result = {}
|
325 |
-
for line in cls._parse_list(value):
|
326 |
-
key, sep, val = line.partition(separator)
|
327 |
-
if sep != separator:
|
328 |
-
raise DistutilsOptionError(
|
329 |
-
'Unable to parse option value to dict: %s' % value
|
330 |
-
)
|
331 |
-
result[key.strip()] = val.strip()
|
332 |
-
|
333 |
-
return result
|
334 |
-
|
335 |
-
@classmethod
|
336 |
-
def _parse_bool(cls, value):
|
337 |
-
"""Represents value as boolean.
|
338 |
-
|
339 |
-
:param value:
|
340 |
-
:rtype: bool
|
341 |
-
"""
|
342 |
-
value = value.lower()
|
343 |
-
return value in ('1', 'true', 'yes')
|
344 |
-
|
345 |
-
@classmethod
|
346 |
-
def _exclude_files_parser(cls, key):
|
347 |
-
"""Returns a parser function to make sure field inputs
|
348 |
-
are not files.
|
349 |
-
|
350 |
-
Parses a value after getting the key so error messages are
|
351 |
-
more informative.
|
352 |
-
|
353 |
-
:param key:
|
354 |
-
:rtype: callable
|
355 |
-
"""
|
356 |
-
|
357 |
-
def parser(value):
|
358 |
-
exclude_directive = 'file:'
|
359 |
-
if value.startswith(exclude_directive):
|
360 |
-
raise ValueError(
|
361 |
-
'Only strings are accepted for the {0} field, '
|
362 |
-
'files are not accepted'.format(key)
|
363 |
-
)
|
364 |
-
return value
|
365 |
-
|
366 |
-
return parser
|
367 |
-
|
368 |
-
@classmethod
|
369 |
-
def _parse_file(cls, value, root_dir: _Path):
|
370 |
-
"""Represents value as a string, allowing including text
|
371 |
-
from nearest files using `file:` directive.
|
372 |
-
|
373 |
-
Directive is sandboxed and won't reach anything outside
|
374 |
-
directory with setup.py.
|
375 |
-
|
376 |
-
Examples:
|
377 |
-
file: README.rst, CHANGELOG.md, src/file.txt
|
378 |
-
|
379 |
-
:param str value:
|
380 |
-
:rtype: str
|
381 |
-
"""
|
382 |
-
include_directive = 'file:'
|
383 |
-
|
384 |
-
if not isinstance(value, str):
|
385 |
-
return value
|
386 |
-
|
387 |
-
if not value.startswith(include_directive):
|
388 |
-
return value
|
389 |
-
|
390 |
-
spec = value[len(include_directive) :]
|
391 |
-
filepaths = (path.strip() for path in spec.split(','))
|
392 |
-
return expand.read_files(filepaths, root_dir)
|
393 |
-
|
394 |
-
def _parse_attr(self, value, package_dir, root_dir: _Path):
|
395 |
-
"""Represents value as a module attribute.
|
396 |
-
|
397 |
-
Examples:
|
398 |
-
attr: package.attr
|
399 |
-
attr: package.module.attr
|
400 |
-
|
401 |
-
:param str value:
|
402 |
-
:rtype: str
|
403 |
-
"""
|
404 |
-
attr_directive = 'attr:'
|
405 |
-
if not value.startswith(attr_directive):
|
406 |
-
return value
|
407 |
-
|
408 |
-
attr_desc = value.replace(attr_directive, '')
|
409 |
-
|
410 |
-
# Make sure package_dir is populated correctly, so `attr:` directives can work
|
411 |
-
package_dir.update(self.ensure_discovered.package_dir)
|
412 |
-
return expand.read_attr(attr_desc, package_dir, root_dir)
|
413 |
-
|
414 |
-
@classmethod
|
415 |
-
def _get_parser_compound(cls, *parse_methods):
|
416 |
-
"""Returns parser function to represents value as a list.
|
417 |
-
|
418 |
-
Parses a value applying given methods one after another.
|
419 |
-
|
420 |
-
:param parse_methods:
|
421 |
-
:rtype: callable
|
422 |
-
"""
|
423 |
-
|
424 |
-
def parse(value):
|
425 |
-
parsed = value
|
426 |
-
|
427 |
-
for method in parse_methods:
|
428 |
-
parsed = method(parsed)
|
429 |
-
|
430 |
-
return parsed
|
431 |
-
|
432 |
-
return parse
|
433 |
-
|
434 |
-
@classmethod
|
435 |
-
def _parse_section_to_dict_with_key(cls, section_options, values_parser):
|
436 |
-
"""Parses section options into a dictionary.
|
437 |
-
|
438 |
-
Applies a given parser to each option in a section.
|
439 |
-
|
440 |
-
:param dict section_options:
|
441 |
-
:param callable values_parser: function with 2 args corresponding to key, value
|
442 |
-
:rtype: dict
|
443 |
-
"""
|
444 |
-
value = {}
|
445 |
-
for key, (_, val) in section_options.items():
|
446 |
-
value[key] = values_parser(key, val)
|
447 |
-
return value
|
448 |
-
|
449 |
-
@classmethod
|
450 |
-
def _parse_section_to_dict(cls, section_options, values_parser=None):
|
451 |
-
"""Parses section options into a dictionary.
|
452 |
-
|
453 |
-
Optionally applies a given parser to each value.
|
454 |
-
|
455 |
-
:param dict section_options:
|
456 |
-
:param callable values_parser: function with 1 arg corresponding to option value
|
457 |
-
:rtype: dict
|
458 |
-
"""
|
459 |
-
parser = (lambda _, v: values_parser(v)) if values_parser else (lambda _, v: v)
|
460 |
-
return cls._parse_section_to_dict_with_key(section_options, parser)
|
461 |
-
|
462 |
-
def parse_section(self, section_options):
|
463 |
-
"""Parses configuration file section.
|
464 |
-
|
465 |
-
:param dict section_options:
|
466 |
-
"""
|
467 |
-
for (name, (_, value)) in section_options.items():
|
468 |
-
with contextlib.suppress(KeyError):
|
469 |
-
# Keep silent for a new option may appear anytime.
|
470 |
-
self[name] = value
|
471 |
-
|
472 |
-
def parse(self):
|
473 |
-
"""Parses configuration file items from one
|
474 |
-
or more related sections.
|
475 |
-
|
476 |
-
"""
|
477 |
-
for section_name, section_options in self.sections.items():
|
478 |
-
|
479 |
-
method_postfix = ''
|
480 |
-
if section_name: # [section.option] variant
|
481 |
-
method_postfix = '_%s' % section_name
|
482 |
-
|
483 |
-
section_parser_method: Optional[Callable] = getattr(
|
484 |
-
self,
|
485 |
-
# Dots in section names are translated into dunderscores.
|
486 |
-
('parse_section%s' % method_postfix).replace('.', '__'),
|
487 |
-
None,
|
488 |
-
)
|
489 |
-
|
490 |
-
if section_parser_method is None:
|
491 |
-
raise DistutilsOptionError(
|
492 |
-
'Unsupported distribution option section: [%s.%s]'
|
493 |
-
% (self.section_prefix, section_name)
|
494 |
-
)
|
495 |
-
|
496 |
-
section_parser_method(section_options)
|
497 |
-
|
498 |
-
def _deprecated_config_handler(self, func, msg, warning_class):
|
499 |
-
"""this function will wrap around parameters that are deprecated
|
500 |
-
|
501 |
-
:param msg: deprecation message
|
502 |
-
:param warning_class: class of warning exception to be raised
|
503 |
-
:param func: function to be wrapped around
|
504 |
-
"""
|
505 |
-
|
506 |
-
@wraps(func)
|
507 |
-
def config_handler(*args, **kwargs):
|
508 |
-
warnings.warn(msg, warning_class)
|
509 |
-
return func(*args, **kwargs)
|
510 |
-
|
511 |
-
return config_handler
|
512 |
-
|
513 |
-
|
514 |
-
class ConfigMetadataHandler(ConfigHandler["DistributionMetadata"]):
|
515 |
-
|
516 |
-
section_prefix = 'metadata'
|
517 |
-
|
518 |
-
aliases = {
|
519 |
-
'home_page': 'url',
|
520 |
-
'summary': 'description',
|
521 |
-
'classifier': 'classifiers',
|
522 |
-
'platform': 'platforms',
|
523 |
-
}
|
524 |
-
|
525 |
-
strict_mode = False
|
526 |
-
"""We need to keep it loose, to be partially compatible with
|
527 |
-
`pbr` and `d2to1` packages which also uses `metadata` section.
|
528 |
-
|
529 |
-
"""
|
530 |
-
|
531 |
-
def __init__(
|
532 |
-
self,
|
533 |
-
target_obj: "DistributionMetadata",
|
534 |
-
options: AllCommandOptions,
|
535 |
-
ignore_option_errors: bool,
|
536 |
-
ensure_discovered: expand.EnsurePackagesDiscovered,
|
537 |
-
package_dir: Optional[dict] = None,
|
538 |
-
root_dir: _Path = os.curdir
|
539 |
-
):
|
540 |
-
super().__init__(target_obj, options, ignore_option_errors, ensure_discovered)
|
541 |
-
self.package_dir = package_dir
|
542 |
-
self.root_dir = root_dir
|
543 |
-
|
544 |
-
@property
|
545 |
-
def parsers(self):
|
546 |
-
"""Metadata item name to parser function mapping."""
|
547 |
-
parse_list = self._parse_list
|
548 |
-
parse_file = partial(self._parse_file, root_dir=self.root_dir)
|
549 |
-
parse_dict = self._parse_dict
|
550 |
-
exclude_files_parser = self._exclude_files_parser
|
551 |
-
|
552 |
-
return {
|
553 |
-
'platforms': parse_list,
|
554 |
-
'keywords': parse_list,
|
555 |
-
'provides': parse_list,
|
556 |
-
'requires': self._deprecated_config_handler(
|
557 |
-
parse_list,
|
558 |
-
"The requires parameter is deprecated, please use "
|
559 |
-
"install_requires for runtime dependencies.",
|
560 |
-
SetuptoolsDeprecationWarning,
|
561 |
-
),
|
562 |
-
'obsoletes': parse_list,
|
563 |
-
'classifiers': self._get_parser_compound(parse_file, parse_list),
|
564 |
-
'license': exclude_files_parser('license'),
|
565 |
-
'license_file': self._deprecated_config_handler(
|
566 |
-
exclude_files_parser('license_file'),
|
567 |
-
"The license_file parameter is deprecated, "
|
568 |
-
"use license_files instead.",
|
569 |
-
SetuptoolsDeprecationWarning,
|
570 |
-
),
|
571 |
-
'license_files': parse_list,
|
572 |
-
'description': parse_file,
|
573 |
-
'long_description': parse_file,
|
574 |
-
'version': self._parse_version,
|
575 |
-
'project_urls': parse_dict,
|
576 |
-
}
|
577 |
-
|
578 |
-
def _parse_version(self, value):
|
579 |
-
"""Parses `version` option value.
|
580 |
-
|
581 |
-
:param value:
|
582 |
-
:rtype: str
|
583 |
-
|
584 |
-
"""
|
585 |
-
version = self._parse_file(value, self.root_dir)
|
586 |
-
|
587 |
-
if version != value:
|
588 |
-
version = version.strip()
|
589 |
-
# Be strict about versions loaded from file because it's easy to
|
590 |
-
# accidentally include newlines and other unintended content
|
591 |
-
try:
|
592 |
-
Version(version)
|
593 |
-
except InvalidVersion:
|
594 |
-
tmpl = (
|
595 |
-
'Version loaded from {value} does not '
|
596 |
-
'comply with PEP 440: {version}'
|
597 |
-
)
|
598 |
-
raise DistutilsOptionError(tmpl.format(**locals()))
|
599 |
-
|
600 |
-
return version
|
601 |
-
|
602 |
-
return expand.version(self._parse_attr(value, self.package_dir, self.root_dir))
|
603 |
-
|
604 |
-
|
605 |
-
class ConfigOptionsHandler(ConfigHandler["Distribution"]):
|
606 |
-
|
607 |
-
section_prefix = 'options'
|
608 |
-
|
609 |
-
def __init__(
|
610 |
-
self,
|
611 |
-
target_obj: "Distribution",
|
612 |
-
options: AllCommandOptions,
|
613 |
-
ignore_option_errors: bool,
|
614 |
-
ensure_discovered: expand.EnsurePackagesDiscovered,
|
615 |
-
):
|
616 |
-
super().__init__(target_obj, options, ignore_option_errors, ensure_discovered)
|
617 |
-
self.root_dir = target_obj.src_root
|
618 |
-
self.package_dir: Dict[str, str] = {} # To be filled by `find_packages`
|
619 |
-
|
620 |
-
@classmethod
|
621 |
-
def _parse_list_semicolon(cls, value):
|
622 |
-
return cls._parse_list(value, separator=';')
|
623 |
-
|
624 |
-
def _parse_file_in_root(self, value):
|
625 |
-
return self._parse_file(value, root_dir=self.root_dir)
|
626 |
-
|
627 |
-
def _parse_requirements_list(self, label: str, value: str):
|
628 |
-
# Parse a requirements list, either by reading in a `file:`, or a list.
|
629 |
-
parsed = self._parse_list_semicolon(self._parse_file_in_root(value))
|
630 |
-
_warn_accidental_env_marker_misconfig(label, value, parsed)
|
631 |
-
# Filter it to only include lines that are not comments. `parse_list`
|
632 |
-
# will have stripped each line and filtered out empties.
|
633 |
-
return [line for line in parsed if not line.startswith("#")]
|
634 |
-
|
635 |
-
@property
|
636 |
-
def parsers(self):
|
637 |
-
"""Metadata item name to parser function mapping."""
|
638 |
-
parse_list = self._parse_list
|
639 |
-
parse_bool = self._parse_bool
|
640 |
-
parse_dict = self._parse_dict
|
641 |
-
parse_cmdclass = self._parse_cmdclass
|
642 |
-
|
643 |
-
return {
|
644 |
-
'zip_safe': parse_bool,
|
645 |
-
'include_package_data': parse_bool,
|
646 |
-
'package_dir': parse_dict,
|
647 |
-
'scripts': parse_list,
|
648 |
-
'eager_resources': parse_list,
|
649 |
-
'dependency_links': parse_list,
|
650 |
-
'namespace_packages': self._deprecated_config_handler(
|
651 |
-
parse_list,
|
652 |
-
"The namespace_packages parameter is deprecated, "
|
653 |
-
"consider using implicit namespaces instead (PEP 420).",
|
654 |
-
SetuptoolsDeprecationWarning,
|
655 |
-
),
|
656 |
-
'install_requires': partial(
|
657 |
-
self._parse_requirements_list, "install_requires"
|
658 |
-
),
|
659 |
-
'setup_requires': self._parse_list_semicolon,
|
660 |
-
'tests_require': self._parse_list_semicolon,
|
661 |
-
'packages': self._parse_packages,
|
662 |
-
'entry_points': self._parse_file_in_root,
|
663 |
-
'py_modules': parse_list,
|
664 |
-
'python_requires': SpecifierSet,
|
665 |
-
'cmdclass': parse_cmdclass,
|
666 |
-
}
|
667 |
-
|
668 |
-
def _parse_cmdclass(self, value):
|
669 |
-
package_dir = self.ensure_discovered.package_dir
|
670 |
-
return expand.cmdclass(self._parse_dict(value), package_dir, self.root_dir)
|
671 |
-
|
672 |
-
def _parse_packages(self, value):
|
673 |
-
"""Parses `packages` option value.
|
674 |
-
|
675 |
-
:param value:
|
676 |
-
:rtype: list
|
677 |
-
"""
|
678 |
-
find_directives = ['find:', 'find_namespace:']
|
679 |
-
trimmed_value = value.strip()
|
680 |
-
|
681 |
-
if trimmed_value not in find_directives:
|
682 |
-
return self._parse_list(value)
|
683 |
-
|
684 |
-
# Read function arguments from a dedicated section.
|
685 |
-
find_kwargs = self.parse_section_packages__find(
|
686 |
-
self.sections.get('packages.find', {})
|
687 |
-
)
|
688 |
-
|
689 |
-
find_kwargs.update(
|
690 |
-
namespaces=(trimmed_value == find_directives[1]),
|
691 |
-
root_dir=self.root_dir,
|
692 |
-
fill_package_dir=self.package_dir,
|
693 |
-
)
|
694 |
-
|
695 |
-
return expand.find_packages(**find_kwargs)
|
696 |
-
|
697 |
-
def parse_section_packages__find(self, section_options):
|
698 |
-
"""Parses `packages.find` configuration file section.
|
699 |
-
|
700 |
-
To be used in conjunction with _parse_packages().
|
701 |
-
|
702 |
-
:param dict section_options:
|
703 |
-
"""
|
704 |
-
section_data = self._parse_section_to_dict(section_options, self._parse_list)
|
705 |
-
|
706 |
-
valid_keys = ['where', 'include', 'exclude']
|
707 |
-
|
708 |
-
find_kwargs = dict(
|
709 |
-
[(k, v) for k, v in section_data.items() if k in valid_keys and v]
|
710 |
-
)
|
711 |
-
|
712 |
-
where = find_kwargs.get('where')
|
713 |
-
if where is not None:
|
714 |
-
find_kwargs['where'] = where[0] # cast list to single val
|
715 |
-
|
716 |
-
return find_kwargs
|
717 |
-
|
718 |
-
def parse_section_entry_points(self, section_options):
|
719 |
-
"""Parses `entry_points` configuration file section.
|
720 |
-
|
721 |
-
:param dict section_options:
|
722 |
-
"""
|
723 |
-
parsed = self._parse_section_to_dict(section_options, self._parse_list)
|
724 |
-
self['entry_points'] = parsed
|
725 |
-
|
726 |
-
def _parse_package_data(self, section_options):
|
727 |
-
package_data = self._parse_section_to_dict(section_options, self._parse_list)
|
728 |
-
return expand.canonic_package_data(package_data)
|
729 |
-
|
730 |
-
def parse_section_package_data(self, section_options):
|
731 |
-
"""Parses `package_data` configuration file section.
|
732 |
-
|
733 |
-
:param dict section_options:
|
734 |
-
"""
|
735 |
-
self['package_data'] = self._parse_package_data(section_options)
|
736 |
-
|
737 |
-
def parse_section_exclude_package_data(self, section_options):
|
738 |
-
"""Parses `exclude_package_data` configuration file section.
|
739 |
-
|
740 |
-
:param dict section_options:
|
741 |
-
"""
|
742 |
-
self['exclude_package_data'] = self._parse_package_data(section_options)
|
743 |
-
|
744 |
-
def parse_section_extras_require(self, section_options):
|
745 |
-
"""Parses `extras_require` configuration file section.
|
746 |
-
|
747 |
-
:param dict section_options:
|
748 |
-
"""
|
749 |
-
parsed = self._parse_section_to_dict_with_key(
|
750 |
-
section_options,
|
751 |
-
lambda k, v: self._parse_requirements_list(f"extras_require[{k}]", v)
|
752 |
-
)
|
753 |
-
|
754 |
-
self['extras_require'] = parsed
|
755 |
-
|
756 |
-
def parse_section_data_files(self, section_options):
|
757 |
-
"""Parses `data_files` configuration file section.
|
758 |
-
|
759 |
-
:param dict section_options:
|
760 |
-
"""
|
761 |
-
parsed = self._parse_section_to_dict(section_options, self._parse_list)
|
762 |
-
self['data_files'] = expand.canonic_data_files(parsed, self.root_dir)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awesimo/jojogan/e4e/metrics/LEC.py
DELETED
@@ -1,134 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
import argparse
|
3 |
-
import torch
|
4 |
-
import numpy as np
|
5 |
-
from torch.utils.data import DataLoader
|
6 |
-
|
7 |
-
sys.path.append(".")
|
8 |
-
sys.path.append("..")
|
9 |
-
|
10 |
-
from configs import data_configs
|
11 |
-
from datasets.images_dataset import ImagesDataset
|
12 |
-
from utils.model_utils import setup_model
|
13 |
-
|
14 |
-
|
15 |
-
class LEC:
|
16 |
-
def __init__(self, net, is_cars=False):
|
17 |
-
"""
|
18 |
-
Latent Editing Consistency metric as proposed in the main paper.
|
19 |
-
:param net: e4e model loaded over the pSp framework.
|
20 |
-
:param is_cars: An indication as to whether or not to crop the middle of the StyleGAN's output images.
|
21 |
-
"""
|
22 |
-
self.net = net
|
23 |
-
self.is_cars = is_cars
|
24 |
-
|
25 |
-
def _encode(self, images):
|
26 |
-
"""
|
27 |
-
Encodes the given images into StyleGAN's latent space.
|
28 |
-
:param images: Tensor of shape NxCxHxW representing the images to be encoded.
|
29 |
-
:return: Tensor of shape NxKx512 representing the latent space embeddings of the given image (in W(K, *) space).
|
30 |
-
"""
|
31 |
-
codes = self.net.encoder(images)
|
32 |
-
assert codes.ndim == 3, f"Invalid latent codes shape, should be NxKx512 but is {codes.shape}"
|
33 |
-
# normalize with respect to the center of an average face
|
34 |
-
if self.net.opts.start_from_latent_avg:
|
35 |
-
codes = codes + self.net.latent_avg.repeat(codes.shape[0], 1, 1)
|
36 |
-
return codes
|
37 |
-
|
38 |
-
def _generate(self, codes):
|
39 |
-
"""
|
40 |
-
Generate the StyleGAN2 images of the given codes
|
41 |
-
:param codes: Tensor of shape NxKx512 representing the StyleGAN's latent codes (in W(K, *) space).
|
42 |
-
:return: Tensor of shape NxCxHxW representing the generated images.
|
43 |
-
"""
|
44 |
-
images, _ = self.net.decoder([codes], input_is_latent=True, randomize_noise=False, return_latents=True)
|
45 |
-
images = self.net.face_pool(images)
|
46 |
-
if self.is_cars:
|
47 |
-
images = images[:, :, 32:224, :]
|
48 |
-
return images
|
49 |
-
|
50 |
-
@staticmethod
|
51 |
-
def _filter_outliers(arr):
|
52 |
-
arr = np.array(arr)
|
53 |
-
|
54 |
-
lo = np.percentile(arr, 1, interpolation="lower")
|
55 |
-
hi = np.percentile(arr, 99, interpolation="higher")
|
56 |
-
return np.extract(
|
57 |
-
np.logical_and(lo <= arr, arr <= hi), arr
|
58 |
-
)
|
59 |
-
|
60 |
-
def calculate_metric(self, data_loader, edit_function, inverse_edit_function):
|
61 |
-
"""
|
62 |
-
Calculate the LEC metric score.
|
63 |
-
:param data_loader: An iterable that returns a tuple of (images, _), similar to the training data loader.
|
64 |
-
:param edit_function: A function that receives latent codes and performs a semantically meaningful edit in the
|
65 |
-
latent space.
|
66 |
-
:param inverse_edit_function: A function that receives latent codes and performs the inverse edit of the
|
67 |
-
`edit_function` parameter.
|
68 |
-
:return: The LEC metric score.
|
69 |
-
"""
|
70 |
-
distances = []
|
71 |
-
with torch.no_grad():
|
72 |
-
for batch in data_loader:
|
73 |
-
x, _ = batch
|
74 |
-
inputs = x.to(device).float()
|
75 |
-
|
76 |
-
codes = self._encode(inputs)
|
77 |
-
edited_codes = edit_function(codes)
|
78 |
-
edited_image = self._generate(edited_codes)
|
79 |
-
edited_image_inversion_codes = self._encode(edited_image)
|
80 |
-
inverse_edit_codes = inverse_edit_function(edited_image_inversion_codes)
|
81 |
-
|
82 |
-
dist = (codes - inverse_edit_codes).norm(2, dim=(1, 2)).mean()
|
83 |
-
distances.append(dist.to("cpu").numpy())
|
84 |
-
|
85 |
-
distances = self._filter_outliers(distances)
|
86 |
-
return distances.mean()
|
87 |
-
|
88 |
-
|
89 |
-
if __name__ == "__main__":
|
90 |
-
device = "cuda"
|
91 |
-
|
92 |
-
parser = argparse.ArgumentParser(description="LEC metric calculator")
|
93 |
-
|
94 |
-
parser.add_argument("--batch", type=int, default=8, help="batch size for the models")
|
95 |
-
parser.add_argument("--images_dir", type=str, default=None,
|
96 |
-
help="Path to the images directory on which we calculate the LEC score")
|
97 |
-
parser.add_argument("ckpt", metavar="CHECKPOINT", help="path to the model checkpoints")
|
98 |
-
|
99 |
-
args = parser.parse_args()
|
100 |
-
print(args)
|
101 |
-
|
102 |
-
net, opts = setup_model(args.ckpt, device)
|
103 |
-
dataset_args = data_configs.DATASETS[opts.dataset_type]
|
104 |
-
transforms_dict = dataset_args['transforms'](opts).get_transforms()
|
105 |
-
|
106 |
-
images_directory = dataset_args['test_source_root'] if args.images_dir is None else args.images_dir
|
107 |
-
test_dataset = ImagesDataset(source_root=images_directory,
|
108 |
-
target_root=images_directory,
|
109 |
-
source_transform=transforms_dict['transform_source'],
|
110 |
-
target_transform=transforms_dict['transform_test'],
|
111 |
-
opts=opts)
|
112 |
-
|
113 |
-
data_loader = DataLoader(test_dataset,
|
114 |
-
batch_size=args.batch,
|
115 |
-
shuffle=False,
|
116 |
-
num_workers=2,
|
117 |
-
drop_last=True)
|
118 |
-
|
119 |
-
print(f'dataset length: {len(test_dataset)}')
|
120 |
-
|
121 |
-
# In the following example, we are using an InterfaceGAN based editing to calculate the LEC metric.
|
122 |
-
# Change the provided example according to your domain and needs.
|
123 |
-
direction = torch.load('../editings/interfacegan_directions/age.pt').to(device)
|
124 |
-
|
125 |
-
def edit_func_example(codes):
|
126 |
-
return codes + 3 * direction
|
127 |
-
|
128 |
-
|
129 |
-
def inverse_edit_func_example(codes):
|
130 |
-
return codes - 3 * direction
|
131 |
-
|
132 |
-
lec = LEC(net, is_cars='car' in opts.dataset_type)
|
133 |
-
result = lec.calculate_metric(data_loader, edit_func_example, inverse_edit_func_example)
|
134 |
-
print(f"LEC: {result}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/README.md
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
|
2 |
-
This directory contains a few example scripts that demonstrate features of detectron2.
|
3 |
-
|
4 |
-
|
5 |
-
* `train_net.py`
|
6 |
-
|
7 |
-
An example training script that's made to train builtin models of detectron2.
|
8 |
-
|
9 |
-
For usage, see [GETTING_STARTED.md](../GETTING_STARTED.md).
|
10 |
-
|
11 |
-
* `plain_train_net.py`
|
12 |
-
|
13 |
-
Similar to `train_net.py`, but implements a training loop instead of using `Trainer`.
|
14 |
-
This script includes fewer features but it may be more friendly to hackers.
|
15 |
-
|
16 |
-
* `benchmark.py`
|
17 |
-
|
18 |
-
Benchmark the training speed, inference speed or data loading speed of a given config.
|
19 |
-
|
20 |
-
Usage:
|
21 |
-
```
|
22 |
-
python benchmark.py --config-file config.yaml --task train/eval/data [optional DDP flags]
|
23 |
-
```
|
24 |
-
|
25 |
-
* `analyze_model.py`
|
26 |
-
|
27 |
-
Analyze FLOPs, parameters, activations of a detectron2 model. See its `--help` for usage.
|
28 |
-
|
29 |
-
* `visualize_json_results.py`
|
30 |
-
|
31 |
-
Visualize the json instance detection/segmentation results dumped by `COCOEvalutor` or `LVISEvaluator`
|
32 |
-
|
33 |
-
Usage:
|
34 |
-
```
|
35 |
-
python visualize_json_results.py --input x.json --output dir/ --dataset coco_2017_val
|
36 |
-
```
|
37 |
-
If not using a builtin dataset, you'll need your own script or modify this script.
|
38 |
-
|
39 |
-
* `visualize_data.py`
|
40 |
-
|
41 |
-
Visualize ground truth raw annotations or training data (after preprocessing/augmentations).
|
42 |
-
|
43 |
-
Usage:
|
44 |
-
```
|
45 |
-
python visualize_data.py --config-file config.yaml --source annotation/dataloader --output-dir dir/ [--show]
|
46 |
-
```
|
47 |
-
|
48 |
-
NOTE: the script does not stop by itself when using `--source dataloader` because a training
|
49 |
-
dataloader is usually infinite.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bala2-03-2003/BRAHMAMAI/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: BRAHMAMAI
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.40.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/2023 Apk Fuego Libre.md
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>2023 Fuego libre APK: Todo lo que necesita saber</h1>
|
3 |
-
<p>Free Fire es un juego de disparos de supervivencia de fama mundial disponible en dispositivos móviles. Es uno de los juegos más descargados y jugados en Google Play Store y App Store, con más de 1 mil millones de descargas y millones de jugadores activos. En este artículo, le diremos todo lo que necesita saber sobre Free Fire y cómo descargar e instalar la última versión de Free Fire APK en su dispositivo Android. </p>
|
4 |
-
<h2>¿Qué es el fuego libre y por qué es popular? </h2>
|
5 |
-
<p>Free Fire es un juego de battle royale que te enfrenta a otros 49 jugadores en una isla remota. El objetivo es ser el último en pie encontrando armas, objetos y vehículos, y eliminando a tus enemigos. El juego tiene una variedad de emocionantes modos de juego, personajes, pieles, mascotas y eventos que mantienen el juego fresco y divertido. </p>
|
6 |
-
<h2>2023 apk fuego libre</h2><br /><p><b><b>Download Zip</b> → <a href="https://bltlly.com/2v6Mmr">https://bltlly.com/2v6Mmr</a></b></p><br /><br />
|
7 |
-
<h3>¿Cuáles son las características de Free Fire y cómo jugarlo? </h3>
|
8 |
-
<p>Free Fire tiene muchas características que lo hacen destacar de otros juegos battle royale. Aquí están algunas de ellas:</p>
|
9 |
-
<h4>Tirador de supervivencia en su forma original</h4>
|
10 |
-
<p>Empiezas el juego en paracaídas desde un avión en un mapa grande. Puedes elegir tu punto de aterrizaje y explorar el mapa como desees. Tienes que permanecer dentro de la zona segura que se encoge con el tiempo, o recibirás daños por el gas tóxico. También tienes que tener cuidado con los lanzamientos de aire que contienen armas y objetos poderosos, pero también atraen a otros jugadores. El juego tiene un sistema de física realista que afecta la trayectoria de la bala, el retroceso y el manejo del vehículo. </p>
|
11 |
-
<h4>10 minutos, 50 jugadores, épica supervivencia bondad espera</h4>
|
12 |
-
|
13 |
-
<h4>4-man squad, con chat de voz en el juego</h4>
|
14 |
-
<p>Puedes jugar Free Fire solo o con hasta otros tres amigos en un equipo. Puedes comunicarte con tus compañeros de equipo utilizando la función de chat de voz o mensajes de texto en el juego. También puedes marcar ubicaciones, enemigos, objetos y vehículos en el mapa para que tus compañeros los vean. Trabajar junto con tu escuadrón puede darte una ventaja sobre tus enemigos. </p>
|
15 |
-
<h4>Escuadrón de choque</h4>
|
16 |
-
<p>Clash Squad es un modo de juego 4v4 de ritmo rápido que está abierto 24/7. En este modo, tienes que manejar tu economía, comprar armas y derrotar al escuadrón enemigo en una serie de rondas. El primer equipo en ganar cuatro rondas gana el partido. Clash Squad es una gran manera de poner a prueba sus habilidades y el trabajo en equipo en un entorno diferente. </p>
|
17 |
-
<h4>Gráficos realistas y suaves</h4>
|
18 |
-
<p>Free Fire tiene gráficos realistas y suaves que prometen la experiencia de supervivencia óptima en dispositivos móviles. El juego tiene texturas de alta calidad, efectos de iluminación, sombras, reflejos y animaciones que crean una atmósfera inmersiva. El juego también funciona sin problemas en la mayoría de los dispositivos sin retraso o estrellarse. </p>
|
19 |
-
<h2>¿Qué es Free Fire APK y cómo descargarlo e instalarlo? </h2>
|
20 |
-
<p>Free Fire APK es un archivo de paquete de aplicación para Android que contiene los archivos de instalación de Free Fire. Puede descargar e instalar Free Fire APK en su dispositivo Android si desea disfrutar de la última versión del juego con nuevas características, correcciones de errores y mejoras. Aquí están los pasos para descargar e instalar Free Fire APK:</p>
|
21 |
-
<h3>¿Qué es un archivo APK y por qué lo necesita? </h3>
|
22 |
-
<p>Un archivo APK es un archivo comprimido que contiene el código, los recursos y los certificados de una aplicación Android. Puede instalar un archivo APK en su dispositivo Android para ejecutar la aplicación sin usar Google Play Store. Es posible que necesite descargar e instalar un archivo APK si:</p>
|
23 |
-
<p></p>
|
24 |
-
<ul>
|
25 |
-
<li>Desea acceder a la última versión de una aplicación antes de que esté disponible en Play Store.</li>
|
26 |
-
|
27 |
-
<li> Desea instalar una aplicación que se ha eliminado de la Play Store por alguna razón. </li>
|
28 |
-
<li>Desea instalar una versión modificada o hackeada de una aplicación que ofrece características o beneficios adicionales. </li>
|
29 |
-
</ul>
|
30 |
-
<p>Sin embargo, debe tener cuidado al descargar e instalar archivos APK de fuentes desconocidas, ya que pueden contener malware o virus que pueden dañar su dispositivo o robar sus datos. Solo debes descargar archivos APK de fuentes confiables y oficiales, como el sitio web del desarrollador o una tienda de aplicaciones de terceros de buena reputación. </p>
|
31 |
-
<h3>Cómo descargar gratis fuego APK de fuentes oficiales</h3>
|
32 |
-
<p>La mejor manera de descargar Free Fire APK es desde el sitio web oficial de Garena, el desarrollador y editor de Free Fire. Puede visitar el sitio web en https://ff.garena.com/ y hacer clic en el botón "Descargar". Usted será redirigido a una página donde se puede elegir entre descargar Free Fire APK o Free Fire OBB. El archivo OBB es un archivo de datos que contiene contenido adicional para el juego, como gráficos, sonidos y mapas. Necesitas ambos archivos para ejecutar el juego correctamente. </p>
|
33 |
-
<p>También puede descargar Free Fire APK de otras fuentes oficiales, tales como:</p>
|
34 |
-
<ul>
|
35 |
-
<li>La página oficial de Facebook de Free Fire en https://www.facebook.com/freefireEN/</li>
|
36 |
-
<li>El canal oficial de YouTube de Free Fire en https://www.youtube.com/channel/UCkngbNvgHvc67J4VCWj75Mw</li>
|
37 |
-
<li>La cuenta oficial de Instagram de Free Fire en https://www.instagram.com/freefireth_official/</li>
|
38 |
-
</ul>
|
39 |
-
<p>Estas fuentes a menudo publican enlaces para descargar la última versión de Free Fire APK cuando hay una nueva actualización o evento. Puedes seguirlos para mantenerte actualizado y recibir notificaciones cuando haya una nueva versión. </p>
|
40 |
-
<h3>Cómo instalar Free Fire APK en su dispositivo Android</h3>
|
41 |
-
<p>Después de descargar Free Fire APK y archivos OBB, es necesario instalarlos en su dispositivo Android. Estos son los pasos para hacerlo:</p>
|
42 |
-
<ol>
|
43 |
-
|
44 |
-
<li>Busque el archivo APK Free Fire descargado en el almacenamiento de su dispositivo y toque en él para iniciar el proceso de instalación. Siga las instrucciones en la pantalla y conceda los permisos necesarios. </li>
|
45 |
-
<li>Aún no abra el juego. Localice el archivo OBB de Free Fire descargado en el almacenamiento de su dispositivo y extráigalo usando una aplicación de administrador de archivos. Obtendrá una carpeta llamada "com.dts.freefireth". Copie esta carpeta y péguela en el directorio "Android/obb" en el almacenamiento de su dispositivo. </li>
|
46 |
-
<li>Ahora puedes abrir el juego y disfrutar jugando Free Fire con la última versión. </li>
|
47 |
-
</ol>
|
48 |
-
<h2>Conclusión: Resumir los puntos principales y dar algunos consejos y trucos para jugar Free Fire</h2>
|
49 |
-
<p>En conclusión, Free Fire es un emocionante juego de disparos de supervivencia que ofrece una variedad de modos de juego, personajes, pieles, mascotas y eventos. Puede descargar e instalar Free Fire APK en su dispositivo Android para obtener acceso a la última versión del juego con nuevas características, correcciones de errores y mejoras. Sin embargo, debe tener cuidado al descargar e instalar archivos APK de fuentes desconocidas, ya que pueden contener malware o virus. Solo debes descargar archivos APK de fuentes confiables y oficiales, como el sitio web de Garena o las cuentas de redes sociales. </p>
|
50 |
-
<p>Aquí hay algunos consejos y trucos para jugar Free Fire:</p>
|
51 |
-
<ul>
|
52 |
-
<li>Elige tu personaje sabiamente. Cada personaje tiene una habilidad única que puede darte una ventaja en diferentes situaciones. Por ejemplo, Kelly ha aumentado la velocidad de carrera, Alok puede crear un aura curativa a su alrededor, y Chrono puede crear un campo de fuerza que bloquea el daño. </li>
|
53 |
-
<li>Usa los vehículos sabiamente. Los vehículos pueden ayudarte a moverte más rápido o atropellar a tus enemigos, pero también te hacen más visible y vulnerable al fuego enemigo. Solo debe usar vehículos cuando sea necesario y evitar conducir en áreas abiertas o cerca de edificios. </li>
|
54 |
-
|
55 |
-
<li>Usa el minimapa sabiamente. El minimapa puede mostrar información importante como la ubicación de la zona segura, lanzamientos de aire, enemigos, compañeros de equipo y vehículos. Siempre debe revisar el minimapa para mantenerse al tanto de su entorno y planificar su estrategia en consecuencia. </li>
|
56 |
-
<li>Usa el sistema de ping sabiamente. El sistema de ping puede ayudarte a comunicarte con tus compañeros de equipo sin usar chat de voz o mensajes de texto. Puedes hacer ping a ubicaciones, enemigos, objetos y vehículos en el mapa para que tus compañeros los vean. También puede utilizar mensajes rápidos para transmitir sus intenciones o solicitudes. </li>
|
57 |
-
</ul>
|
58 |
-
<h2>Preguntas frecuentes: Responder a algunas preguntas comunes sobre el fuego libre y el fuego libre APK</h2>
|
59 |
-
<tabla>
|
60 |
-
<tr><th>Pregunta</th><th>Respuesta</th></tr>
|
61 |
-
<tr><td>Free Fire es gratis para jugar? </td><td>Sí, Free Fire es gratis para jugar en dispositivos móviles. Sin embargo, puedes comprar monedas del juego llamadas diamantes para comprar artículos premium como personajes, pieles, mascotas y pases. </td></tr>
|
62 |
-
<tr><td>Free Fire es compatible con mi dispositivo? </td><td>Free Fire es compatible con la mayoría de dispositivos Android que tienen al menos 2 GB de RAM y Android 4.0.3 o superior. Sin embargo, algunos dispositivos pueden experimentar problemas de rendimiento o fallos debido a limitaciones de hardware. </td></tr>
|
63 |
-
<tr><td>¿Es seguro descargar e instalar Free Fire? </td><td>Sí, Free Fire es seguro para descargar e instalar desde la Google Play Store o fuentes oficiales como el sitio web de Garena o las cuentas de redes sociales. Sin embargo, debe tener cuidado al descargar e instalar archivos APK de fuentes desconocidas, ya que pueden contener malware o virus. </td></tr>
|
64 |
-
<tr><td>¿Cómo actualizo Free Fire? </td><td>Puede actualizar Free Fire desde Google Play Store o descargando e instalando la última versión de Free Fire APK de fuentes oficiales. Siempre debe actualizar Free Fire para disfrutar de las nuevas características, correcciones de errores y mejoras. </td></tr>
|
65 |
-
|
66 |
-
</table></p> 64aa2da5cf<br />
|
67 |
-
<br />
|
68 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Creality Cr Studio Descargar.md
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Creality CR Studio Descargar: Una guía para el software del escáner 3D</h1>
|
3 |
-
<p>Si está buscando una manera de convertir objetos del mundo real en modelos 3D que pueda imprimir, editar o compartir, puede estar interesado en Creality CR Studio. Este es un software que funciona con escáneres 3D Creality, como CR Scan 01 y CR Scan Lizard, para crear escaneos de alta calidad de varios objetos. En este artículo, le mostraremos cómo descargar e instalar Creality CR Studio, cómo usarlo para escanear y editar sus modelos, cuáles son sus características y beneficios, y cómo solucionar algunos problemas comunes. Al final de este artículo, usted tendrá una mejor comprensión de lo que Creality CR Studio puede hacer por usted y si vale la pena probar. </p>
|
4 |
-
<h2>Cómo descargar e instalar Creality CR Studio</h2>
|
5 |
-
<p>El primer paso para usar Creality CR Studio es descargarlo e instalarlo en su computadora. Estos son los pasos a seguir:</p>
|
6 |
-
<h2>creality cr studio descargar</h2><br /><p><b><b>DOWNLOAD</b> ★★★ <a href="https://bltlly.com/2v6Mmg">https://bltlly.com/2v6Mmg</a></b></p><br /><br />
|
7 |
-
<h3>Descargar el software desde el sitio web oficial</h3>
|
8 |
-
<p>Puedes descargar Creality CR Studio desde el sitio web oficial de Creality 3D. Hay diferentes versiones del software para los sistemas operativos Windows y Mac, así como diferentes idiomas. Usted puede elegir el que se adapte a sus necesidades y preferencias. La última versión del software es CR Studio 2.5.7 para Windows y CR Studio 2.2.3 para Mac. El tamaño del archivo es de aproximadamente 200 MB.</p>
|
9 |
-
<h3>Instalación del software en Windows o Mac</h3>
|
10 |
-
<p>Después de descargar el software, necesita instalarlo en su computadora. El proceso de instalación es simple y directo. Solo tienes que seguir las instrucciones en la pantalla y aceptar los términos y condiciones. La instalación puede tardar unos minutos dependiendo de la velocidad del ordenador. </p>
|
11 |
-
<h3>Conexión del escáner 3D al software</h3>
|
12 |
-
|
13 |
-
<h2>Cómo usar Creality CR Studio</h2>
|
14 |
-
<p>Ahora que ha descargado e instalado Creality CR Studio y conectado su escáner 3D, está listo para comenzar a escanear objetos. Estos son los pasos a seguir:</p>
|
15 |
-
<h3>Elegir el modo de escaneo: portátil o tocadiscos</h3>
|
16 |
-
<p>Creality CR Studio ofrece dos modos de escaneo: portátil y giradiscos. - El modo portátil le permite escanear objetos moviendo el escáner alrededor de ellos. Este modo es adecuado para escanear objetos grandes, complejos o irregulares que no pueden caber en un tocadiscos. También puede escanear objetos en diferentes entornos, como en exteriores o interiores. - El modo de plato giratorio le permite escanear objetos colocándolos en una plataforma giratoria. Este modo es adecuado para escanear objetos pequeños, simples o simétricos que pueden caber en un tocadiscos. También puede escanear objetos de forma más rápida y precisa con este modo. </p>
|
17 |
-
<p>Puede elegir el modo de escaneo haciendo clic en el icono en la esquina superior izquierda de la interfaz de software. También puede cambiar entre los modos durante el escaneo si es necesario. </p>
|
18 |
-
<h3>Ajustar la configuración de escaneo: resolución, brillo, etc.</h3>
|
19 |
-
<p>Antes de comenzar a escanear, debe ajustar algunos ajustes de escaneo para optimizar la calidad y la velocidad de su escaneo. Puede acceder a estos ajustes haciendo clic en el icono de engranaje en la esquina superior derecha de la interfaz de software. Estos son algunos de los ajustes que puede ajustar:</p>
|
20 |
-
<p></p>
|
21 |
-
|
22 |
-
<h3>Escaneando el objeto y viendo la vista previa</h3>
|
23 |
-
<p>Después de haber ajustado la configuración de escaneo, puede comenzar a escanear su objeto haciendo clic en el botón de inicio en la esquina inferior derecha de la interfaz de software. Dependiendo del modo de escaneo que haya elegido, debe mover el escáner alrededor del objeto o colocar el objeto en el tocadiscos y dejar que gire. </p>
|
24 |
-
<p>A medida que escanea su objeto, verá una vista previa de su modelo en la pantalla. Puede pausar o reanudar el proceso de escaneo en cualquier momento haciendo clic en el botón de pausa o reanudación. También puede deshacer o rehacer cualquier acción haciendo clic en el botón deshacer o rehacer. </p>
|
25 |
-
<p>Puede dejar de escanear cuando haya cubierto todos los ángulos y detalles de su objeto o cuando esté satisfecho con la vista previa de su modelo. A continuación, puede hacer clic en el botón de parada para terminar el escaneo. </p>
|
26 |
-
<h3>Edición del modelo escaneado: alineación, desnaturalización, optimización, etc.</h3>
|
27 |
-
<p>Después de haber terminado el escaneo, puede editar su modelo escaneado para mejorar su calidad y apariencia. Puede acceder a varias herramientas de edición haciendo clic en los iconos en el lado izquierdo de la interfaz de software. Estas son algunas de las herramientas de edición que puedes usar:</p>
|
28 |
-
|
29 |
-
<h3>Exportar y guardar el modelo escaneado como archivo STL u OBJ</h3>
|
30 |
-
<p>Después de haber editado su modelo escaneado, puede exportarlo y guardarlo como archivo STL u OBJ. Estos son los formatos de archivo más comunes para los modelos 3D que se pueden utilizar en varios software de impresión o edición 3D. Puede elegir el formato de archivo haciendo clic en el botón de exportación en la esquina inferior izquierda de la interfaz de software. También puede elegir el nombre del archivo y la ubicación navegando por las carpetas de su computadora. A continuación, puede hacer clic en el botón guardar para exportar y guardar su modelo escaneado. </p>
|
31 |
-
<h2>¿Cuáles son las características y beneficios de Creality CR Studio</h2>
|
32 |
-
<p>Creality CR Studio no es solo un software para escanear objetos, sino también un software que ofrece muchas características y beneficios para los entusiastas del escaneo 3D. Estos son algunos de ellos:</p>
|
33 |
-
<h3>Interfaz fácil de usar con modos oscuros y claros</h3>
|
34 |
-
<p>Creality CR Studio tiene una interfaz fácil de usar que es fácil de navegar y operar. Tiene iconos, botones y menús claros que lo guían a través del proceso de escaneo y edición. También tiene modos de luz y oscuridad que puedes cambiar según tu preferencia y entorno. </p>
|
35 |
-
<h3>Escaneo sin marcadores y alineación precisa</h3>
|
36 |
-
<p>Creality CR Studio le permite escanear objetos sin usar marcadores o pegatinas. Esto significa que puede escanear objetos tal como están, sin alterar su apariencia ni dañar su superficie. También tiene una función de alineación precisa que alinea automáticamente múltiples escaneos de su objeto en un modelo, sin requerir ninguna intervención manual. </p>
|
37 |
-
<h3>Actualización automática de software y descarga de archivos de calibración en línea</h3>
|
38 |
-
|
39 |
-
<h3>Interacción con la comunidad y servicio posventa</h3>
|
40 |
-
<p>Creality CR Studio tiene una función de interacción con la comunidad que le permite compartir sus modelos escaneados con otros usuarios, así como ver y comentar sus modelos. También puede acceder a tutoriales, consejos y preguntas frecuentes desde el sitio web oficial o la interfaz de software. Además, Creality CR Studio tiene una función de servicio postventa que le permite ponerse en contacto con el equipo de servicio al cliente para cualquier pregunta o problema con su escáner o software. </p>
|
41 |
-
<h3>Compatibilidad con los escáneres CR Scan 01 y CR Scan Lizard</h3>
|
42 |
-
<p>Creality CR Studio es compatible con los escáneres CR Scan 01 y CR Scan Lizard, que son dos de los escáneres 3D más populares de Creality 3D. Ambos escáneres tienen diferentes especificaciones y características, pero ambos pueden funcionar con Creality CR Studio sin problemas. Puede elegir el escáner que se adapte a sus necesidades y presupuesto, y disfrutar de la misma experiencia de software. </p>
|
43 |
-
<h2>Cómo solucionar problemas comunes con Creality CR Studio</h2>
|
44 |
-
<p>Creality CR Studio es un software confiable y estable, pero puede encontrar algunos problemas de vez en cuando. Estos son algunos de los problemas comunes que puede enfrentar con Creality CR Studio y cómo resolverlos:</p>
|
45 |
-
<h3>El software se bloquea o se congela durante el escaneo o procesamiento</h3>
|
46 |
-
<p>Si su software se bloquea o se congela durante el escaneo o procesamiento, puede ser debido a la insuficiente memoria o recursos de CPU en su computadora. Para resolver este problema, puede probar los siguientes pasos:</p>
|
47 |
-
- Cierre cualquier otro programa o aplicación que se esté ejecutando en su computadora. - Reduzca la resolución o la velocidad de escaneo de su escaneo. - Optimice o simplifique su modelo escaneado. - Reinicie su computadora e inténtelo de nuevo. <h3>El escáner pierde la pista o falla al escanear objetos oscuros o brillantes</h3>
|
48 |
-
|
49 |
-
- Aumente el nivel de brillo de su escaneo. - Ajuste la condición de iluminación de su entorno. - Utilice un fondo blanco o de color claro para su objeto. - Aplique un poco de polvo o aerosol sobre su objeto para reducir su reflectividad. <h3>El modelo escaneado está incompleto o distorsionado</h3>
|
50 |
-
<p>Si el modelo escaneado está incompleto o distorsionado, puede deberse a una cobertura insuficiente o a una alineación incorrecta del escaneo. Para resolver este problema, puede probar los siguientes pasos:</p>
|
51 |
-
- Escanee su objeto desde diferentes ángulos y posiciones. - Utilice la herramienta de alineación para alinear múltiples escaneos de su objeto. - Utilice la herramienta de llenado de orificios para llenar cualquier vacío en su modelo. - Utilice la herramienta de suavizado para suavizar cualquier bache en su modelo. <h3>El modelo escaneado tiene demasiados agujeros o ruido</h3>
|
52 |
-
<p>Si su modelo escaneado tiene demasiados agujeros o ruido, puede ser debido a la baja resolución o alto nivel de ruido de su escaneo. Para resolver este problema, puede probar los siguientes pasos:</ - Aumente la resolución o el nivel de brillo de su escaneo. - Utilice la herramienta de eliminación de ruido de su modelo. - Utilice la herramienta de optimización para reducir el tamaño del archivo y la complejidad de su modelo. - Utilice la herramienta de suavizado para suavizar los bordes ásperos o vértices en su modelo. <h2>Conclusión: ¿Vale la pena Creality CR Studio? </h2>
|
53 |
-
<p>Creality CR Studio es un software que te permite escanear objetos con escáneres 3D Creality y crear modelos 3D de alta calidad que puedes imprimir, editar o compartir. Tiene muchas características y beneficios, como una interfaz fácil de usar, escaneo sin marcadores, actualización automática de software, interacción con la comunidad y compatibilidad con los escáneres CR Scan 01 y CR Scan Lizard. También tiene varias herramientas de edición que le permiten mejorar la calidad y la apariencia de sus modelos escaneados. Además, tiene algunos consejos para solucionar problemas que le ayudan a resolver algunos problemas comunes con el software o el escáner. </p>
|
54 |
-
|
55 |
-
<p>Si quieres saber más sobre Creality CR Studio o descargarlo gratis, puedes visitar la web oficial de Creality 3D. También puede consultar algunos de los modelos escaneados de otros usuarios o compartir los suyos en el sitio web. También puede ponerse en contacto con el equipo de atención al cliente para cualquier pregunta o problema con el software o el escáner. </p>
|
56 |
-
<h2>Preguntas frecuentes</h2>
|
57 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre Creality CR Studio:</p>
|
58 |
-
<h3>Q: ¿Cuáles son los requisitos del sistema para Creality CR Studio? </h3>
|
59 |
-
<p>A: Los requisitos del sistema para Creality CR Studio son los siguientes:</p>
|
60 |
-
| Sistema operativo | Windows 7/8/10 o Mac OS X 10.11 o superior | | -- - | -- - | CPU | Intel Core i5 o superior | | RAM | 8 GB o superior | | Tarjeta gráfica | NVIDIA GeForce GTX 750 Ti o superior | | Disco duro | 10 GB o superior | <h3>Q: ¿Cuáles son las diferencias entre los escáneres CR Scan 01 y CR Scan Lizard? </h3>
|
61 |
-
<p>A: Las diferencias entre los escáneres CR Scan 01 y CR Scan Lizard son las siguientes:</p>
|
62 |
-
| Escáner | CR Scan 01 | CR Scan Lizard | | -- - - - | -- - | | Modo de escaneo | Portátil y giradiscos | Portátil | | Rango de escaneo | 0.1 - 4 m | 0,1 - 2 m | | Velocidad de escaneo | Hasta 10 fps | Hasta 30 fps | | Precisión de escaneo | Hasta 0,1 mm | Hasta 0,05 mm | | Resolución de escaneo | Hasta 1,3 MP | Hasta 2 MP | Modo de color | RGB y Escala de grises | Solo RGB | | | 800 g | >| Precio | $999 | HQ:3 ¿Cómo puedo imprimir mi modelo escaneado con una impresora 3D Creality? </h3>
|
63 |
-
<p>A: Para imprimir su modelo escaneado con una impresora 3D Creality, debe realizar los siguientes pasos:</p>
|
64 |
-
|
65 |
-
<p>A: Para editar su modelo escaneado con otro software, debe hacer los siguientes pasos:</p>
|
66 |
-
- Exporte y guarde su modelo escaneado como archivo STL u OBJ desde Creality CR Studio. - Importe su archivo STL u OBJ en un software de edición, como Blender, Meshmixer o ZBrush, que sea compatible con su formato de archivo. - Edite su modelo usando varias herramientas y características del software de edición, como esculpir, pintar, texturizar, etc. - Guarde su modelo editado como archivo STL o OBJ del software de edición. - Exportar y guardar su modelo editado como archivo STL u OBJ desde el software de edición. <h3>P: ¿Cómo puedo compartir mi modelo escaneado con otros usuarios? </h3>
|
67 |
-
<p>A: Para compartir su modelo escaneado con otros usuarios, debe hacer los siguientes pasos:</p>
|
68 |
-
- Exporte y guarde su modelo escaneado como archivo STL u OBJ desde Creality CR Studio. - Cargue su archivo STL u OBJ en una plataforma en línea, como Sketchfab, Thingiverse o MyMiniFactory, que le permite compartir sus modelos 3D con otros usuarios. - Añade un título, descripción, etiquetas y otra información a tu modelo subido. - Publica tu modelo y comparte el enlace con otros usuarios. <h2></h2>
|
69 |
-
<p>Este es el final del artículo que he creado para usted basado en el tema "creality cr studio download". Espero que le resulte útil e informativo. Si tiene algún comentario o sugerencia, hágamelo saber. Gracias por usar Bing como tu escritor de contenido. </p> 64aa2da5cf<br />
|
70 |
-
<br />
|
71 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/util.py
DELETED
@@ -1,513 +0,0 @@
|
|
1 |
-
"""distutils.util
|
2 |
-
|
3 |
-
Miscellaneous utility functions -- anything that doesn't fit into
|
4 |
-
one of the other *util.py modules.
|
5 |
-
"""
|
6 |
-
|
7 |
-
import importlib.util
|
8 |
-
import os
|
9 |
-
import re
|
10 |
-
import string
|
11 |
-
import subprocess
|
12 |
-
import sys
|
13 |
-
import sysconfig
|
14 |
-
import functools
|
15 |
-
|
16 |
-
from distutils.errors import DistutilsPlatformError, DistutilsByteCompileError
|
17 |
-
from distutils.dep_util import newer
|
18 |
-
from distutils.spawn import spawn
|
19 |
-
from distutils import log
|
20 |
-
|
21 |
-
|
22 |
-
def get_host_platform():
|
23 |
-
"""
|
24 |
-
Return a string that identifies the current platform. Use this
|
25 |
-
function to distinguish platform-specific build directories and
|
26 |
-
platform-specific built distributions.
|
27 |
-
"""
|
28 |
-
|
29 |
-
# This function initially exposed platforms as defined in Python 3.9
|
30 |
-
# even with older Python versions when distutils was split out.
|
31 |
-
# Now it delegates to stdlib sysconfig, but maintains compatibility.
|
32 |
-
|
33 |
-
if sys.version_info < (3, 8):
|
34 |
-
if os.name == 'nt':
|
35 |
-
if '(arm)' in sys.version.lower():
|
36 |
-
return 'win-arm32'
|
37 |
-
if '(arm64)' in sys.version.lower():
|
38 |
-
return 'win-arm64'
|
39 |
-
|
40 |
-
if sys.version_info < (3, 9):
|
41 |
-
if os.name == "posix" and hasattr(os, 'uname'):
|
42 |
-
osname, host, release, version, machine = os.uname()
|
43 |
-
if osname[:3] == "aix":
|
44 |
-
from .py38compat import aix_platform
|
45 |
-
|
46 |
-
return aix_platform(osname, version, release)
|
47 |
-
|
48 |
-
return sysconfig.get_platform()
|
49 |
-
|
50 |
-
|
51 |
-
def get_platform():
|
52 |
-
if os.name == 'nt':
|
53 |
-
TARGET_TO_PLAT = {
|
54 |
-
'x86': 'win32',
|
55 |
-
'x64': 'win-amd64',
|
56 |
-
'arm': 'win-arm32',
|
57 |
-
'arm64': 'win-arm64',
|
58 |
-
}
|
59 |
-
target = os.environ.get('VSCMD_ARG_TGT_ARCH')
|
60 |
-
return TARGET_TO_PLAT.get(target) or get_host_platform()
|
61 |
-
return get_host_platform()
|
62 |
-
|
63 |
-
|
64 |
-
if sys.platform == 'darwin':
|
65 |
-
_syscfg_macosx_ver = None # cache the version pulled from sysconfig
|
66 |
-
MACOSX_VERSION_VAR = 'MACOSX_DEPLOYMENT_TARGET'
|
67 |
-
|
68 |
-
|
69 |
-
def _clear_cached_macosx_ver():
|
70 |
-
"""For testing only. Do not call."""
|
71 |
-
global _syscfg_macosx_ver
|
72 |
-
_syscfg_macosx_ver = None
|
73 |
-
|
74 |
-
|
75 |
-
def get_macosx_target_ver_from_syscfg():
|
76 |
-
"""Get the version of macOS latched in the Python interpreter configuration.
|
77 |
-
Returns the version as a string or None if can't obtain one. Cached."""
|
78 |
-
global _syscfg_macosx_ver
|
79 |
-
if _syscfg_macosx_ver is None:
|
80 |
-
from distutils import sysconfig
|
81 |
-
|
82 |
-
ver = sysconfig.get_config_var(MACOSX_VERSION_VAR) or ''
|
83 |
-
if ver:
|
84 |
-
_syscfg_macosx_ver = ver
|
85 |
-
return _syscfg_macosx_ver
|
86 |
-
|
87 |
-
|
88 |
-
def get_macosx_target_ver():
|
89 |
-
"""Return the version of macOS for which we are building.
|
90 |
-
|
91 |
-
The target version defaults to the version in sysconfig latched at time
|
92 |
-
the Python interpreter was built, unless overridden by an environment
|
93 |
-
variable. If neither source has a value, then None is returned"""
|
94 |
-
|
95 |
-
syscfg_ver = get_macosx_target_ver_from_syscfg()
|
96 |
-
env_ver = os.environ.get(MACOSX_VERSION_VAR)
|
97 |
-
|
98 |
-
if env_ver:
|
99 |
-
# Validate overridden version against sysconfig version, if have both.
|
100 |
-
# Ensure that the deployment target of the build process is not less
|
101 |
-
# than 10.3 if the interpreter was built for 10.3 or later. This
|
102 |
-
# ensures extension modules are built with correct compatibility
|
103 |
-
# values, specifically LDSHARED which can use
|
104 |
-
# '-undefined dynamic_lookup' which only works on >= 10.3.
|
105 |
-
if (
|
106 |
-
syscfg_ver
|
107 |
-
and split_version(syscfg_ver) >= [10, 3]
|
108 |
-
and split_version(env_ver) < [10, 3]
|
109 |
-
):
|
110 |
-
my_msg = (
|
111 |
-
'$' + MACOSX_VERSION_VAR + ' mismatch: '
|
112 |
-
'now "%s" but "%s" during configure; '
|
113 |
-
'must use 10.3 or later' % (env_ver, syscfg_ver)
|
114 |
-
)
|
115 |
-
raise DistutilsPlatformError(my_msg)
|
116 |
-
return env_ver
|
117 |
-
return syscfg_ver
|
118 |
-
|
119 |
-
|
120 |
-
def split_version(s):
|
121 |
-
"""Convert a dot-separated string into a list of numbers for comparisons"""
|
122 |
-
return [int(n) for n in s.split('.')]
|
123 |
-
|
124 |
-
|
125 |
-
def convert_path(pathname):
|
126 |
-
"""Return 'pathname' as a name that will work on the native filesystem,
|
127 |
-
i.e. split it on '/' and put it back together again using the current
|
128 |
-
directory separator. Needed because filenames in the setup script are
|
129 |
-
always supplied in Unix style, and have to be converted to the local
|
130 |
-
convention before we can actually use them in the filesystem. Raises
|
131 |
-
ValueError on non-Unix-ish systems if 'pathname' either starts or
|
132 |
-
ends with a slash.
|
133 |
-
"""
|
134 |
-
if os.sep == '/':
|
135 |
-
return pathname
|
136 |
-
if not pathname:
|
137 |
-
return pathname
|
138 |
-
if pathname[0] == '/':
|
139 |
-
raise ValueError("path '%s' cannot be absolute" % pathname)
|
140 |
-
if pathname[-1] == '/':
|
141 |
-
raise ValueError("path '%s' cannot end with '/'" % pathname)
|
142 |
-
|
143 |
-
paths = pathname.split('/')
|
144 |
-
while '.' in paths:
|
145 |
-
paths.remove('.')
|
146 |
-
if not paths:
|
147 |
-
return os.curdir
|
148 |
-
return os.path.join(*paths)
|
149 |
-
|
150 |
-
|
151 |
-
# convert_path ()
|
152 |
-
|
153 |
-
|
154 |
-
def change_root(new_root, pathname):
|
155 |
-
"""Return 'pathname' with 'new_root' prepended. If 'pathname' is
|
156 |
-
relative, this is equivalent to "os.path.join(new_root,pathname)".
|
157 |
-
Otherwise, it requires making 'pathname' relative and then joining the
|
158 |
-
two, which is tricky on DOS/Windows and Mac OS.
|
159 |
-
"""
|
160 |
-
if os.name == 'posix':
|
161 |
-
if not os.path.isabs(pathname):
|
162 |
-
return os.path.join(new_root, pathname)
|
163 |
-
else:
|
164 |
-
return os.path.join(new_root, pathname[1:])
|
165 |
-
|
166 |
-
elif os.name == 'nt':
|
167 |
-
(drive, path) = os.path.splitdrive(pathname)
|
168 |
-
if path[0] == '\\':
|
169 |
-
path = path[1:]
|
170 |
-
return os.path.join(new_root, path)
|
171 |
-
|
172 |
-
raise DistutilsPlatformError(f"nothing known about platform '{os.name}'")
|
173 |
-
|
174 |
-
|
175 |
-
@functools.lru_cache()
|
176 |
-
def check_environ():
|
177 |
-
"""Ensure that 'os.environ' has all the environment variables we
|
178 |
-
guarantee that users can use in config files, command-line options,
|
179 |
-
etc. Currently this includes:
|
180 |
-
HOME - user's home directory (Unix only)
|
181 |
-
PLAT - description of the current platform, including hardware
|
182 |
-
and OS (see 'get_platform()')
|
183 |
-
"""
|
184 |
-
if os.name == 'posix' and 'HOME' not in os.environ:
|
185 |
-
try:
|
186 |
-
import pwd
|
187 |
-
|
188 |
-
os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
|
189 |
-
except (ImportError, KeyError):
|
190 |
-
# bpo-10496: if the current user identifier doesn't exist in the
|
191 |
-
# password database, do nothing
|
192 |
-
pass
|
193 |
-
|
194 |
-
if 'PLAT' not in os.environ:
|
195 |
-
os.environ['PLAT'] = get_platform()
|
196 |
-
|
197 |
-
|
198 |
-
def subst_vars(s, local_vars):
|
199 |
-
"""
|
200 |
-
Perform variable substitution on 'string'.
|
201 |
-
Variables are indicated by format-style braces ("{var}").
|
202 |
-
Variable is substituted by the value found in the 'local_vars'
|
203 |
-
dictionary or in 'os.environ' if it's not in 'local_vars'.
|
204 |
-
'os.environ' is first checked/augmented to guarantee that it contains
|
205 |
-
certain values: see 'check_environ()'. Raise ValueError for any
|
206 |
-
variables not found in either 'local_vars' or 'os.environ'.
|
207 |
-
"""
|
208 |
-
check_environ()
|
209 |
-
lookup = dict(os.environ)
|
210 |
-
lookup.update((name, str(value)) for name, value in local_vars.items())
|
211 |
-
try:
|
212 |
-
return _subst_compat(s).format_map(lookup)
|
213 |
-
except KeyError as var:
|
214 |
-
raise ValueError(f"invalid variable {var}")
|
215 |
-
|
216 |
-
|
217 |
-
def _subst_compat(s):
|
218 |
-
"""
|
219 |
-
Replace shell/Perl-style variable substitution with
|
220 |
-
format-style. For compatibility.
|
221 |
-
"""
|
222 |
-
|
223 |
-
def _subst(match):
|
224 |
-
return f'{{{match.group(1)}}}'
|
225 |
-
|
226 |
-
repl = re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
|
227 |
-
if repl != s:
|
228 |
-
import warnings
|
229 |
-
|
230 |
-
warnings.warn(
|
231 |
-
"shell/Perl-style substitions are deprecated",
|
232 |
-
DeprecationWarning,
|
233 |
-
)
|
234 |
-
return repl
|
235 |
-
|
236 |
-
|
237 |
-
def grok_environment_error(exc, prefix="error: "):
|
238 |
-
# Function kept for backward compatibility.
|
239 |
-
# Used to try clever things with EnvironmentErrors,
|
240 |
-
# but nowadays str(exception) produces good messages.
|
241 |
-
return prefix + str(exc)
|
242 |
-
|
243 |
-
|
244 |
-
# Needed by 'split_quoted()'
|
245 |
-
_wordchars_re = _squote_re = _dquote_re = None
|
246 |
-
|
247 |
-
|
248 |
-
def _init_regex():
|
249 |
-
global _wordchars_re, _squote_re, _dquote_re
|
250 |
-
_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
|
251 |
-
_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
|
252 |
-
_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
|
253 |
-
|
254 |
-
|
255 |
-
def split_quoted(s):
|
256 |
-
"""Split a string up according to Unix shell-like rules for quotes and
|
257 |
-
backslashes. In short: words are delimited by spaces, as long as those
|
258 |
-
spaces are not escaped by a backslash, or inside a quoted string.
|
259 |
-
Single and double quotes are equivalent, and the quote characters can
|
260 |
-
be backslash-escaped. The backslash is stripped from any two-character
|
261 |
-
escape sequence, leaving only the escaped character. The quote
|
262 |
-
characters are stripped from any quoted string. Returns a list of
|
263 |
-
words.
|
264 |
-
"""
|
265 |
-
|
266 |
-
# This is a nice algorithm for splitting up a single string, since it
|
267 |
-
# doesn't require character-by-character examination. It was a little
|
268 |
-
# bit of a brain-bender to get it working right, though...
|
269 |
-
if _wordchars_re is None:
|
270 |
-
_init_regex()
|
271 |
-
|
272 |
-
s = s.strip()
|
273 |
-
words = []
|
274 |
-
pos = 0
|
275 |
-
|
276 |
-
while s:
|
277 |
-
m = _wordchars_re.match(s, pos)
|
278 |
-
end = m.end()
|
279 |
-
if end == len(s):
|
280 |
-
words.append(s[:end])
|
281 |
-
break
|
282 |
-
|
283 |
-
if s[end] in string.whitespace:
|
284 |
-
# unescaped, unquoted whitespace: now
|
285 |
-
# we definitely have a word delimiter
|
286 |
-
words.append(s[:end])
|
287 |
-
s = s[end:].lstrip()
|
288 |
-
pos = 0
|
289 |
-
|
290 |
-
elif s[end] == '\\':
|
291 |
-
# preserve whatever is being escaped;
|
292 |
-
# will become part of the current word
|
293 |
-
s = s[:end] + s[end + 1 :]
|
294 |
-
pos = end + 1
|
295 |
-
|
296 |
-
else:
|
297 |
-
if s[end] == "'": # slurp singly-quoted string
|
298 |
-
m = _squote_re.match(s, end)
|
299 |
-
elif s[end] == '"': # slurp doubly-quoted string
|
300 |
-
m = _dquote_re.match(s, end)
|
301 |
-
else:
|
302 |
-
raise RuntimeError("this can't happen (bad char '%c')" % s[end])
|
303 |
-
|
304 |
-
if m is None:
|
305 |
-
raise ValueError("bad string (mismatched %s quotes?)" % s[end])
|
306 |
-
|
307 |
-
(beg, end) = m.span()
|
308 |
-
s = s[:beg] + s[beg + 1 : end - 1] + s[end:]
|
309 |
-
pos = m.end() - 2
|
310 |
-
|
311 |
-
if pos >= len(s):
|
312 |
-
words.append(s)
|
313 |
-
break
|
314 |
-
|
315 |
-
return words
|
316 |
-
|
317 |
-
|
318 |
-
# split_quoted ()
|
319 |
-
|
320 |
-
|
321 |
-
def execute(func, args, msg=None, verbose=0, dry_run=0):
|
322 |
-
"""Perform some action that affects the outside world (eg. by
|
323 |
-
writing to the filesystem). Such actions are special because they
|
324 |
-
are disabled by the 'dry_run' flag. This method takes care of all
|
325 |
-
that bureaucracy for you; all you have to do is supply the
|
326 |
-
function to call and an argument tuple for it (to embody the
|
327 |
-
"external action" being performed), and an optional message to
|
328 |
-
print.
|
329 |
-
"""
|
330 |
-
if msg is None:
|
331 |
-
msg = "{}{!r}".format(func.__name__, args)
|
332 |
-
if msg[-2:] == ',)': # correct for singleton tuple
|
333 |
-
msg = msg[0:-2] + ')'
|
334 |
-
|
335 |
-
log.info(msg)
|
336 |
-
if not dry_run:
|
337 |
-
func(*args)
|
338 |
-
|
339 |
-
|
340 |
-
def strtobool(val):
|
341 |
-
"""Convert a string representation of truth to true (1) or false (0).
|
342 |
-
|
343 |
-
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
|
344 |
-
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
|
345 |
-
'val' is anything else.
|
346 |
-
"""
|
347 |
-
val = val.lower()
|
348 |
-
if val in ('y', 'yes', 't', 'true', 'on', '1'):
|
349 |
-
return 1
|
350 |
-
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
|
351 |
-
return 0
|
352 |
-
else:
|
353 |
-
raise ValueError("invalid truth value {!r}".format(val))
|
354 |
-
|
355 |
-
|
356 |
-
def byte_compile( # noqa: C901
|
357 |
-
py_files,
|
358 |
-
optimize=0,
|
359 |
-
force=0,
|
360 |
-
prefix=None,
|
361 |
-
base_dir=None,
|
362 |
-
verbose=1,
|
363 |
-
dry_run=0,
|
364 |
-
direct=None,
|
365 |
-
):
|
366 |
-
"""Byte-compile a collection of Python source files to .pyc
|
367 |
-
files in a __pycache__ subdirectory. 'py_files' is a list
|
368 |
-
of files to compile; any files that don't end in ".py" are silently
|
369 |
-
skipped. 'optimize' must be one of the following:
|
370 |
-
0 - don't optimize
|
371 |
-
1 - normal optimization (like "python -O")
|
372 |
-
2 - extra optimization (like "python -OO")
|
373 |
-
If 'force' is true, all files are recompiled regardless of
|
374 |
-
timestamps.
|
375 |
-
|
376 |
-
The source filename encoded in each bytecode file defaults to the
|
377 |
-
filenames listed in 'py_files'; you can modify these with 'prefix' and
|
378 |
-
'basedir'. 'prefix' is a string that will be stripped off of each
|
379 |
-
source filename, and 'base_dir' is a directory name that will be
|
380 |
-
prepended (after 'prefix' is stripped). You can supply either or both
|
381 |
-
(or neither) of 'prefix' and 'base_dir', as you wish.
|
382 |
-
|
383 |
-
If 'dry_run' is true, doesn't actually do anything that would
|
384 |
-
affect the filesystem.
|
385 |
-
|
386 |
-
Byte-compilation is either done directly in this interpreter process
|
387 |
-
with the standard py_compile module, or indirectly by writing a
|
388 |
-
temporary script and executing it. Normally, you should let
|
389 |
-
'byte_compile()' figure out to use direct compilation or not (see
|
390 |
-
the source for details). The 'direct' flag is used by the script
|
391 |
-
generated in indirect mode; unless you know what you're doing, leave
|
392 |
-
it set to None.
|
393 |
-
"""
|
394 |
-
|
395 |
-
# nothing is done if sys.dont_write_bytecode is True
|
396 |
-
if sys.dont_write_bytecode:
|
397 |
-
raise DistutilsByteCompileError('byte-compiling is disabled.')
|
398 |
-
|
399 |
-
# First, if the caller didn't force us into direct or indirect mode,
|
400 |
-
# figure out which mode we should be in. We take a conservative
|
401 |
-
# approach: choose direct mode *only* if the current interpreter is
|
402 |
-
# in debug mode and optimize is 0. If we're not in debug mode (-O
|
403 |
-
# or -OO), we don't know which level of optimization this
|
404 |
-
# interpreter is running with, so we can't do direct
|
405 |
-
# byte-compilation and be certain that it's the right thing. Thus,
|
406 |
-
# always compile indirectly if the current interpreter is in either
|
407 |
-
# optimize mode, or if either optimization level was requested by
|
408 |
-
# the caller.
|
409 |
-
if direct is None:
|
410 |
-
direct = __debug__ and optimize == 0
|
411 |
-
|
412 |
-
# "Indirect" byte-compilation: write a temporary script and then
|
413 |
-
# run it with the appropriate flags.
|
414 |
-
if not direct:
|
415 |
-
try:
|
416 |
-
from tempfile import mkstemp
|
417 |
-
|
418 |
-
(script_fd, script_name) = mkstemp(".py")
|
419 |
-
except ImportError:
|
420 |
-
from tempfile import mktemp
|
421 |
-
|
422 |
-
(script_fd, script_name) = None, mktemp(".py")
|
423 |
-
log.info("writing byte-compilation script '%s'", script_name)
|
424 |
-
if not dry_run:
|
425 |
-
if script_fd is not None:
|
426 |
-
script = os.fdopen(script_fd, "w")
|
427 |
-
else:
|
428 |
-
script = open(script_name, "w")
|
429 |
-
|
430 |
-
with script:
|
431 |
-
script.write(
|
432 |
-
"""\
|
433 |
-
from distutils.util import byte_compile
|
434 |
-
files = [
|
435 |
-
"""
|
436 |
-
)
|
437 |
-
|
438 |
-
# XXX would be nice to write absolute filenames, just for
|
439 |
-
# safety's sake (script should be more robust in the face of
|
440 |
-
# chdir'ing before running it). But this requires abspath'ing
|
441 |
-
# 'prefix' as well, and that breaks the hack in build_lib's
|
442 |
-
# 'byte_compile()' method that carefully tacks on a trailing
|
443 |
-
# slash (os.sep really) to make sure the prefix here is "just
|
444 |
-
# right". This whole prefix business is rather delicate -- the
|
445 |
-
# problem is that it's really a directory, but I'm treating it
|
446 |
-
# as a dumb string, so trailing slashes and so forth matter.
|
447 |
-
|
448 |
-
script.write(",\n".join(map(repr, py_files)) + "]\n")
|
449 |
-
script.write(
|
450 |
-
"""
|
451 |
-
byte_compile(files, optimize=%r, force=%r,
|
452 |
-
prefix=%r, base_dir=%r,
|
453 |
-
verbose=%r, dry_run=0,
|
454 |
-
direct=1)
|
455 |
-
"""
|
456 |
-
% (optimize, force, prefix, base_dir, verbose)
|
457 |
-
)
|
458 |
-
|
459 |
-
cmd = [sys.executable]
|
460 |
-
cmd.extend(subprocess._optim_args_from_interpreter_flags())
|
461 |
-
cmd.append(script_name)
|
462 |
-
spawn(cmd, dry_run=dry_run)
|
463 |
-
execute(os.remove, (script_name,), "removing %s" % script_name, dry_run=dry_run)
|
464 |
-
|
465 |
-
# "Direct" byte-compilation: use the py_compile module to compile
|
466 |
-
# right here, right now. Note that the script generated in indirect
|
467 |
-
# mode simply calls 'byte_compile()' in direct mode, a weird sort of
|
468 |
-
# cross-process recursion. Hey, it works!
|
469 |
-
else:
|
470 |
-
from py_compile import compile
|
471 |
-
|
472 |
-
for file in py_files:
|
473 |
-
if file[-3:] != ".py":
|
474 |
-
# This lets us be lazy and not filter filenames in
|
475 |
-
# the "install_lib" command.
|
476 |
-
continue
|
477 |
-
|
478 |
-
# Terminology from the py_compile module:
|
479 |
-
# cfile - byte-compiled file
|
480 |
-
# dfile - purported source filename (same as 'file' by default)
|
481 |
-
if optimize >= 0:
|
482 |
-
opt = '' if optimize == 0 else optimize
|
483 |
-
cfile = importlib.util.cache_from_source(file, optimization=opt)
|
484 |
-
else:
|
485 |
-
cfile = importlib.util.cache_from_source(file)
|
486 |
-
dfile = file
|
487 |
-
if prefix:
|
488 |
-
if file[: len(prefix)] != prefix:
|
489 |
-
raise ValueError(
|
490 |
-
"invalid prefix: filename %r doesn't start with %r"
|
491 |
-
% (file, prefix)
|
492 |
-
)
|
493 |
-
dfile = dfile[len(prefix) :]
|
494 |
-
if base_dir:
|
495 |
-
dfile = os.path.join(base_dir, dfile)
|
496 |
-
|
497 |
-
cfile_base = os.path.basename(cfile)
|
498 |
-
if direct:
|
499 |
-
if force or newer(file, cfile):
|
500 |
-
log.info("byte-compiling %s to %s", file, cfile_base)
|
501 |
-
if not dry_run:
|
502 |
-
compile(file, cfile, dfile)
|
503 |
-
else:
|
504 |
-
log.debug("skipping byte-compilation of %s to %s", file, cfile_base)
|
505 |
-
|
506 |
-
|
507 |
-
def rfc822_escape(header):
|
508 |
-
"""Return a version of the string escaped for inclusion in an
|
509 |
-
RFC-822 header, by ensuring there are 8 spaces space after each newline.
|
510 |
-
"""
|
511 |
-
lines = header.split('\n')
|
512 |
-
sep = '\n' + 8 * ' '
|
513 |
-
return sep.join(lines)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Biswa13/Examples-Of-AI-2023/app.py
DELETED
@@ -1,856 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
from graphviz import Digraph
|
3 |
-
|
4 |
-
|
5 |
-
st.markdown("""
|
6 |
-
# 👋 Two easy ways to turbo boost your AI learning journey! 💻
|
7 |
-
# 🌐 AI Pair Programming
|
8 |
-
## Open 2 Browsers to:
|
9 |
-
1. __🌐 ChatGPT__ [URL](https://chat.openai.com/chat) or [URL2](https://platform.openai.com/playground) and
|
10 |
-
2. __🌐 Huggingface__ [URL](https://huggingface.co/awacke1) in separate browser windows.
|
11 |
-
1. 🤖 Use prompts to generate a streamlit program on Huggingface or locally to test it.
|
12 |
-
2. 🔧 For advanced work, add Python 3.10 and VSCode locally, and debug as gradio or streamlit apps.
|
13 |
-
3. 🚀 Use these two superpower processes to reduce the time it takes you to make a new AI program! ⏱️
|
14 |
-
# 🎥 YouTube University Method:
|
15 |
-
1. 🏋️♀️ Plan two hours each weekday to exercise your body and brain.
|
16 |
-
2. 🎬 Make a playlist of videos you want to learn from on YouTube. Save the links to edit later.
|
17 |
-
3. 🚀 Try watching the videos at a faster speed while exercising, and sample the first five minutes of each video.
|
18 |
-
4. 📜 Reorder the playlist so the most useful videos are at the front, and take breaks to exercise.
|
19 |
-
5. 📝 Practice note-taking in markdown to instantly save what you want to remember. Share your notes with others!
|
20 |
-
6. 👥 AI Pair Programming Using Long Answer Language Models with Human Feedback:
|
21 |
-
## 🎥 2023 AI/ML Advanced Learning Playlists:
|
22 |
-
1. [2023 QA Models and Long Form Question Answering NLP](https://www.youtube.com/playlist?list=PLHgX2IExbFovrkkx8HMTLNgYdjCMNYmX_)
|
23 |
-
2. [FHIR Bioinformatics Development Using AI/ML and Python, Streamlit, and Gradio - 2022](https://www.youtube.com/playlist?list=PLHgX2IExbFovoMUC3hYXeFegpk_Y0Lz0Q)
|
24 |
-
3. [2023 ChatGPT for Coding Assistant Streamlit, Gradio and Python Apps](https://www.youtube.com/playlist?list=PLHgX2IExbFouOEnppexiKZVdz_k5b0pvI)
|
25 |
-
4. [2023 BigScience Bloom - Large Language Model for AI Systems and NLP](https://www.youtube.com/playlist?list=PLHgX2IExbFouqnsIqziThlPCX_miiDq14)
|
26 |
-
5. [2023 Streamlit Pro Tips for AI UI UX for Data Science, Engineering, and Mathematics](https://www.youtube.com/playlist?list=PLHgX2IExbFou3cP19hHO9Xb-cN8uwr5RM)
|
27 |
-
6. [2023 Fun, New and Interesting AI, Videos, and AI/ML Techniques](https://www.youtube.com/playlist?list=PLHgX2IExbFotoMt32SrT3Xynt5BXTGnEP)
|
28 |
-
7. [2023 Best Minds in AGI AI Gamification and Large Language Models](https://www.youtube.com/playlist?list=PLHgX2IExbFotmFeBTpyje1uI22n0GAkXT)
|
29 |
-
8. [2023 State of the Art for Vision Image Classification, Text Classification and Regression, Extractive Question Answering and Tabular Classification](https://www.youtube.com/playlist?list=PLHgX2IExbFotPcPu6pauNHOoZTTbnAQ2F)
|
30 |
-
9. [2023 AutoML DataRobot and AI Platforms for Building Models, Features, Test, and Transparency](https://www.youtube.com/playlist?list=PLHgX2IExbFovsY2oGbDwdEhPrakkC8i3g)
|
31 |
-
""")
|
32 |
-
|
33 |
-
|
34 |
-
st.markdown("""
|
35 |
-
# Cognitive AI with Human Feedback (CAHF) [Example 🩺⚕️](https://huggingface.co/spaces/awacke1/Cognitive-AI-Episodic-Semantic-Memory-Demo):
|
36 |
-
1. Create and use Models to predict __outcomes__
|
37 |
-
2. Use AI to predict **conditions, disease, and opportunities** using AI with **explainability**.
|
38 |
-
3. **Cognitive AI** - Mimic how humans reason through decision making processes.
|
39 |
-
4. **Reasoning cycles** - "Recommended for You" reasoners - consider type of personalized needs and classification for users, to recommend products
|
40 |
-
5. **High Acuity Reasoners** - Make decisions on rules of **what it can and cannot do within human feedback** guidelines.
|
41 |
-
-Emphasizes **explainability, transparency, and removing administrative burden** to **protocolize** and improve what staff is doing.
|
42 |
-
-Vetted by SME's, adding value of **judgement and training** and pick up intelligence and **skills from human feedback**.
|
43 |
-
-**Alert, Recommended Action, and Clinical Terms** per entity with vocabularies from LOINC, SNOMED, OMS, ICD10, RXNORM, SMILES, HCPCS, CPT, CQM, HL7, SDC and FHIR.
|
44 |
-
6. Non static multi agent cognitive approach using real time series to identify factors predictive of outcome.
|
45 |
-
7. Cognitive models form of Ontology - to create a type of computable sets and relationships stored in Ontology then ingested by reasoner
|
46 |
-
-Use models of world to build predictions and recommendations with answers cumulative with information we know
|
47 |
-
8. Reasoners standardize making it easy as possible to do right thing using transfer learning and recommendation tools with questions and actions.
|
48 |
-
""")
|
49 |
-
|
50 |
-
|
51 |
-
st.markdown("""
|
52 |
-
# 📚 Clinical Terminology and Ontologies [Example 🩺⚕️NLP Clinical Ontology Biomedical NER](https://huggingface.co/spaces/awacke1/Biomed-NLP-AI-Clinical-Terminology)
|
53 |
-
## Health Vocabularies, Systems of Coding, and Databases with Bibliographies
|
54 |
-
##__Keywords__:
|
55 |
-
1. __Clinical Terminology__: 💬 Words that doctors use to talk to each other about patients.
|
56 |
-
2. __Ontologies for Medications and Conditions__: 📚 A fancy way of organizing knowledge about medicine and health problems.
|
57 |
-
3. __Health Vocabularies__: 📝 A special list of words used in healthcare to talk about health issues.
|
58 |
-
4. __Systems of Coding__: 💻 A way of giving things like sicknesses and treatments special codes, so that doctors can remember them easily.
|
59 |
-
5. __Databases__: 🗄️ A computer system that stores information about patients, health research, and other healthcare things.
|
60 |
-
6. __Bibliographies__: 📖 A list of books or articles that doctors use to learn about new health information.
|
61 |
-
1. ## 1️⃣ National Library of Medicine's **RxNorm**:
|
62 |
-
- Standardized nomenclature for clinical drugs developed by NLM
|
63 |
-
- Provides links between drug names and related information such as ingredients, strengths, and dosages
|
64 |
-
- **Data type: controlled vocabulary**
|
65 |
-
- Access through **NLM's RxNorm website**: https://www.nlm.nih.gov/research/umls/rxnorm/index.html
|
66 |
-
2. ## 2️⃣ Centers for Medicare and Medicaid Services' Healthcare Common Procedure Coding System (HCPCS):
|
67 |
-
- Coding system used to identify healthcare **services, procedures, and supplies**
|
68 |
-
- Includes **codes for drugs, biologicals, and other items** used in medical care
|
69 |
-
- **Data type: coding system**
|
70 |
-
- Access through **CMS website**: https://www.cms.gov/Medicare/Coding/MedHCPCSGenInfo
|
71 |
-
3. ## 3️⃣ Unified Medical Language System (UMLS):
|
72 |
-
- Set of files and software tools developed by NLM for integrating and mapping biomedical vocabularies
|
73 |
-
- Includes RxNorm and other drug vocabularies, as well as other terminologies used in medicine
|
74 |
-
- **Data type: controlled vocabulary**
|
75 |
-
- Access through UMLS Metathesaurus: https://www.nlm.nih.gov/research/umls/index.html
|
76 |
-
4. ## 4️⃣ PubMed:
|
77 |
-
- Database of **biomedical literature** maintained by the National Center for Biotechnology Information (NCBI)
|
78 |
-
- Includes information about **drugs, including drug names, chemical structures, and pharmacological actions**
|
79 |
-
- **Data type: bibliographic database**
|
80 |
-
- Access through **PubMed website**: https://pubmed.ncbi.nlm.nih.gov/
|
81 |
-
5. ## 5️⃣ PubChem:
|
82 |
-
- Database of chemical substances maintained by NCBI
|
83 |
-
- Includes information about drugs, including **chemical structures, properties, and activities**
|
84 |
-
- **Data type: chemical database**
|
85 |
-
- Access through **PubChem website**: https://pubchem.ncbi.nlm.nih.gov/
|
86 |
-
6. ## 6️⃣ Behavioral Health Code Terminology Sets:
|
87 |
-
- Code terminology sets specific to behavioral health
|
88 |
-
- Includes **DSM** published by American Psychiatric Association, **ICD** published by World Health Organization, and **CPT** published by American Medical Association
|
89 |
-
- **Data type: coding system**
|
90 |
-
- Access through respective **organizations' websites**:
|
91 |
-
1. [DSM](https://www.psychiatry.org/psychiatrists/practice/dsm)
|
92 |
-
2. [ICD](https://www.who.int/standards/classifications/classification-of-diseases)
|
93 |
-
3. [CPT](https://www.ama-assn.org/practice-management/cpt/current-procedural-terminology-cpt)
|
94 |
-
""")
|
95 |
-
|
96 |
-
st.markdown("""
|
97 |
-
1. # 📚Natural Language Processing🔤 - 🗣️🤖💭💬🌍🔍
|
98 |
-
1. 🤔 **🩺⚕️ Sentiment analysis** - Determine underlying sentiment of text. [Example](https://huggingface.co/spaces/awacke1/Sentiment-analysis-streamlit)
|
99 |
-
2. 📝 **Named Entity Recognition (NER)** - Identify and classify named entities in text. [Example](https://huggingface.co/spaces/awacke1/Named-entity-resolution)
|
100 |
-
3. 🔊 **🩺⚕️Automatic Speech Recognition (ASR)** - Transcribe spoken language into text.
|
101 |
-
# Advanced NLP ASR Examples:
|
102 |
-
1. 🩺⚕️ https://huggingface.co/spaces/awacke1/ASR-High-Accuracy-Test
|
103 |
-
2. https://huggingface.co/spaces/awacke1/ASRGenerateStory
|
104 |
-
3. 🩺⚕️ https://huggingface.co/spaces/awacke1/TTS-STT-Blocks
|
105 |
-
4. 🩺⚕️ https://huggingface.co/spaces/awacke1/CloneAnyVoice
|
106 |
-
5. https://huggingface.co/spaces/awacke1/ASR-SOTA-NvidiaSTTMozilla
|
107 |
-
4. 🌐 **Machine translation** - Translate text between languages automatically. [Example](https://huggingface.co/spaces/awacke1/Machine-translation)
|
108 |
-
5. 📄 **Text summarization** - Automatically summarize large volumes of text. [Example](https://huggingface.co/spaces/awacke1/Text-summarization)
|
109 |
-
6. ❓ **🩺⚕️ Question answering** - Answer questions posed in natural language. [Example](https://huggingface.co/spaces/awacke1/Question-answering)
|
110 |
-
7. 🤖 **Sentiment-aware chatbots** - Use sentiment analysis to detect user emotions and respond appropriately.
|
111 |
-
8. 📊 **🩺⚕️ Text classification** - Classify text into different categories. [Example](https://huggingface.co/spaces/awacke1/sileod-deberta-v3-base-tasksource-nli)
|
112 |
-
9. 💬 **🩺⚕️ Text generation** - Generate natural language text. [Example](https://huggingface.co/spaces/awacke1/Sentence2Paragraph)
|
113 |
-
10. 🔎 **Topic modeling** - Automatically identify topics in a large corpus of text. [Example](https://huggingface.co/spaces/awacke1/Topic-modeling)
|
114 |
-
- Examples
|
115 |
-
1. [NLP Video Summary](https://huggingface.co/spaces/awacke1/Video-Summary)
|
116 |
-
2. [TTS-STT ASR with Multiple Voices](https://huggingface.co/spaces/awacke1/TTS-STT-Blocks)
|
117 |
-
3. [NLP Transcript with Video Player](https://huggingface.co/spaces/awacke1/Streamlit-ASR-Video)
|
118 |
-
4. [NLP Clinical Ontology Biomedical NER](https://huggingface.co/spaces/awacke1/Biomed-NLP-AI-Clinical-Terminology)
|
119 |
-
5. [Document Understanding and NLP](https://huggingface.co/spaces/awacke1/AIDocumentUnderstandingOCR)
|
120 |
-
6. [NLP ASR Wav2Vec2 Multilingual](https://huggingface.co/spaces/awacke1/ASR-High-Accuracy-Test)
|
121 |
-
7. [Live ASR](https://huggingface.co/spaces/awacke1/ASR-SOTA-NvidiaSTTMozilla)
|
122 |
-
8. [NLP and Visualization](https://huggingface.co/spaces/awacke1/Visualization-Plotly-Sunbursts-Treemaps-and-WebGL)
|
123 |
-
""")
|
124 |
-
|
125 |
-
st.markdown("""
|
126 |
-
2. # 🔮Generative AI💭 (🎨Images and 📝Text) - 🎵🧩🔄📊🌌
|
127 |
-
1. 🆕 **🩺⚕️ Generation of new data**: Create new data that resembles existing data. [Example](https://huggingface.co/spaces/awacke1/GenAI-Generate-New-Data-Resembling-Example)
|
128 |
-
2. 🎨 **Creative potential**: Generate music, art, or literature. [Example](https://huggingface.co/spaces/awacke1/Creative-Potential-Music-Art-Lit)
|
129 |
-
3. 📊 **Data synthesis**: Synthesize data from multiple sources to create new datasets. [Example](https://huggingface.co/spaces/awacke1/Data-Synthesizer-Synthesize-From-Multiple-Sources)
|
130 |
-
4. 📈 **🩺⚕️ Data augmentation**: Augment existing datasets to make them larger and more diverse. [Example](https://huggingface.co/spaces/awacke1/Data-Augmentation)
|
131 |
-
5. 🔀 **Domain transfer**: Transfer knowledge learned from one domain to another.
|
132 |
-
6. 🔍 **Unsupervised learning**: Learn patterns without labeled training data.
|
133 |
-
7. 🔄 **Adaptive learning**: Adapt to changes in data over time.
|
134 |
-
8. 🔊 **Noise injection**: Introduce noise to explore a wider range of possibilities.
|
135 |
-
9. 🕶️ **Latent space manipulation**: Control output by manipulating a model's latent space.
|
136 |
-
10. 🖼️ **Realistic output**: Produce output that is difficult to distinguish from human-created data.
|
137 |
-
- Examples
|
138 |
-
1. Quantum AI Circuits: https://huggingface.co/spaces/awacke1/AI-Quantum?option=Circuit
|
139 |
-
2. Generate Story and Video: https://huggingface.co/spaces/awacke1/ASRGenerateStoryandVideo
|
140 |
-
3. ASR Generate Story: https://huggingface.co/spaces/awacke1/ASRGenerateStory
|
141 |
-
4. Music Generation: https://huggingface.co/spaces/awacke1/MusicMaker
|
142 |
-
""")
|
143 |
-
|
144 |
-
st.markdown("""
|
145 |
-
3. # 📷Image Recognition🏞️
|
146 |
-
1. 📷 **Object detection**: Detect and identify multiple objects in an image for detailed analysis and classification.
|
147 |
-
2. 🏞️ **Scene recognition**: Recognize and classify entire scenes based on objects, colors, and shapes.
|
148 |
-
3. 😃 **Facial recognition**: Analyze facial features for accurate identification.
|
149 |
-
4. 😊 **Emotion recognition**: Identify emotions on a subject's face, including happiness, sadness, and anger.
|
150 |
-
5. 🔤 **Text recognition**: Identify and translate text in images for analysis.
|
151 |
-
6. 🎨 **Color recognition**: Detect colors and provide information on hue, saturation, and brightness.
|
152 |
-
7. 🔍 **Image segmentation**: Divide an image into multiple regions for individual analysis and classification.
|
153 |
-
8. 🌅 **Image restoration**: Remove noise and blur, restoring images to original clarity and quality.
|
154 |
-
9. 🔖 **Image classification**: Classify images into categories like animals, buildings, or landscapes.
|
155 |
-
10. 🎨 **Style transfer**: Apply the style of one image to another for unique and innovative results.
|
156 |
-
- Examples
|
157 |
-
1. 🩺⚕️ Text-to-Image : [Image Classification](https://huggingface.co/spaces/awacke1/Prompt-Refinery-Text-to-Image-Generation)
|
158 |
-
2. Image Captions from 5 SOTA Generators: [URL](https://huggingface.co/spaces/awacke1/ImageCaptionPromptGenerator)
|
159 |
-
3. 🩺⚕️ Image to Multilingual OCR: [URL](https://huggingface.co/spaces/awacke1/Image-to-Multilingual-OCR)
|
160 |
-
4. WRN - Wide Residual Networks: [URL](https://huggingface.co/spaces/awacke1/ResnetPytorchImageRecognition)
|
161 |
-
5. AI Document Understanding: [URL](https://huggingface.co/spaces/awacke1/AIDocumentUnderstandingOCR)
|
162 |
-
6. Elixir Docker Bumblebee: [URL](https://huggingface.co/spaces/awacke1/DockerImageRecognitionToText)
|
163 |
-
7. Speech to Text to Story to Images to Video: [URL](https://huggingface.co/spaces/awacke1/Speeech2Text2Story2Images2Video)
|
164 |
-
8. Image to Line Drawings: [URL](https://huggingface.co/spaces/awacke1/Image-to-Line-Drawings)
|
165 |
-
9. Semantic Image Search: [URL](https://huggingface.co/spaces/awacke1/Image-Semantic-Search)
|
166 |
-
10. Zoom Clip Toon: [URL](https://huggingface.co/spaces/awacke1/Zoom-Clip-Toon-Image-to-Image)
|
167 |
-
11. Image to Reading Labels: [URL](https://huggingface.co/spaces/awacke1/ImageOCRMultilingual)
|
168 |
-
12. A Game For That - Gamification Using Snapshot Images: [URL](https://huggingface.co/spaces/awacke1/AGameForThat)
|
169 |
-
13. AI Visually Plays QBert, Pong, Seaquest and more: [URL](https://huggingface.co/spaces/awacke1/AI-Atari-Live-Streamlit)
|
170 |
-
14. AI Creates Generator Style Mix Art from Encyclopedia: [URL](https://huggingface.co/spaces/awacke1/Art-Generator-and-Style-Mixer)
|
171 |
-
15. BigGAN Image Gen and Search: [URL](https://huggingface.co/spaces/awacke1/AI-BigGAN-Image-Gen)
|
172 |
-
16. Art Style Line Drawings: [URL](https://huggingface.co/spaces/awacke1/ArtStyleFoodsandNutrition)
|
173 |
-
17. 🩺⚕️ Yolo Real Time Image Recognition from Webcam: https://huggingface.co/spaces/awacke1/Webcam-Object-Recognition-Yolo-n-Coco
|
174 |
-
""")
|
175 |
-
|
176 |
-
st.markdown("""
|
177 |
-
4. # 🗣️Speech Recognition💬
|
178 |
-
1. 🔊 **Continuous Speech Recognition**: Transcribe spoken words in real-time without pausing.
|
179 |
-
2. 🗣️ **Speaker Identification**: Identify individual speakers through unique features in their speech.
|
180 |
-
3. 🧠 **Contextual Awareness**: Understand conversation context and interpret word meaning.
|
181 |
-
4. 🌎 **Multilingual Support**: Recognize and transcribe multiple languages for translation.
|
182 |
-
5. 🔇 **Noise Reduction**: Filter out background noise to improve transcription quality.
|
183 |
-
6. 🔒 **Voice Biometrics**: Verify speaker identity and provide secure access to personal data.
|
184 |
-
7. 🎛️ **Command and Control**: Interpret voice commands to automate tasks and interact with software.
|
185 |
-
8. 💬 **Natural Language Processing**: Understand complex human speech patterns.
|
186 |
-
9. 🧠 **Adaptive Learning**: Learn and adapt to improve accuracy over time.
|
187 |
-
10. ☁️ **Cloud-Based Deployment**: Real-time processing of large amounts of data, even on mobile devices.
|
188 |
-
""")
|
189 |
-
|
190 |
-
st.markdown("""
|
191 |
-
5. # Reinforcement Learning
|
192 |
-
1. 🏆 **Reward-driven**: RL uses rewards or punishments to drive its learning process.
|
193 |
-
2. 🧪 **Trial-and-error learning**: RL is a trial-and-error learning method, where an agent tries different actions to find the best action that will maximize the cumulative reward.
|
194 |
-
3. 🤔 **Exploration-exploitation trade-off**: RL agents need to balance exploration and exploitation to find new possibilities while also exploiting successful actions.
|
195 |
-
4. 📈 **Markov Decision Processes**: RL uses MDPs to model decision-making processes.
|
196 |
-
5. 📊 **Policy optimization**: RL uses policy optimization techniques to find the best policy for a given task or learn the optimal policy from scratch.
|
197 |
-
6. 💰 **Value-based methods**: RL uses value-based methods to estimate the value of each state or action.
|
198 |
-
7. 🧠 **Model-based methods**: RL can use model-based methods to predict the outcomes of different actions.
|
199 |
-
8. 🤖 **Deep Reinforcement Learning**: DRL combines RL with deep learning techniques to learn complex decision-making tasks.
|
200 |
-
9. 🔄 **Transfer learning**: RL can use transfer learning techniques to transfer knowledge learned in one task to another task.
|
201 |
-
10. 🤝 **Multi-agent RL**: RL can handle multiple agents that interact with each other.
|
202 |
-
""")
|
203 |
-
|
204 |
-
st.markdown("""
|
205 |
-
6. 🎲Game Theory🎲 – Traditional AI processes
|
206 |
-
1. 🤝 **Interdependence**: Game Theory considers decision-making among multiple agents, unlike traditional AI processes which focus on a single agent.
|
207 |
-
2. 🎯 **Strategic Behavior**: Game Theory assumes that agents aim to maximize their payoffs based on the actions of other agents. Traditional AI may not consider this strategic element.
|
208 |
-
3. 💰 **Payoffs**: Game Theory calculates payoffs for each agent based on their actions and the actions of other agents, unlike traditional AI which may focus on a single objective.
|
209 |
-
4. ⚖️ **Equilibrium**: Game Theory seeks to identify stable states in the game where no agent has an incentive to deviate from their current strategy. Traditional AI may not seek to find an equilibrium.
|
210 |
-
5. 🎲 **Game Formulation**: Game Theory formulates a game, including rules, players, and possible actions, unlike traditional AI which may not require such formulation.
|
211 |
-
6. 💡 **Solution Concepts**: Game Theory has various solution concepts, such as Nash Equilibrium and Pareto Efficiency, to identify the most desirable outcomes. Traditional AI may not have such concepts.
|
212 |
-
7. 📊 **Information**: Game Theory considers the information available to each agent in the game. Traditional AI may not consider information explicitly.
|
213 |
-
8. ⚔️ **Adversarial**: Game Theory models adversarial scenarios where agents have conflicting goals. Traditional AI may assume cooperation among agents.
|
214 |
-
9. ❓ **Uncertainty**: Game Theory deals with uncertainty and incomplete information in the game. Traditional AI may not consider uncertainty.
|
215 |
-
10. 🌐 **Complexity**: Game Theory deals with complex multi-agent interactions. Traditional AI may focus on single-agent optimization.
|
216 |
-
- Examples
|
217 |
-
1. 🩺⚕️ Health Care Game: https://huggingface.co/spaces/awacke1/AI-RPG-Self-Play-RLML-Health-Battler-Game
|
218 |
-
2. 🩺⚕️ Sankey Snacks Math Chart Animator: https://huggingface.co/spaces/awacke1/Sankey-Snacks
|
219 |
-
3. Blackjack 21 : https://huggingface.co/spaces/awacke1/BlackjackSimulatorCardGameAI
|
220 |
-
4. Player Card Monster Battler: https://huggingface.co/spaces/awacke1/Player-Card-Monster-Battler-For-Math-and-AI
|
221 |
-
5. Emojitrition: https://huggingface.co/spaces/awacke1/Emojitrition-Fun-and-Easy-Nutrition
|
222 |
-
""")
|
223 |
-
|
224 |
-
st.markdown("""
|
225 |
-
7. # 🃏Card Game🃏 Activity
|
226 |
-
1. 🃏 **Card crafting**: Combine existing cards or materials to craft custom cards. [Example](https://huggingface.co/spaces/awacke1/CardCrafter-CraftCustomCards)
|
227 |
-
2. 📈 **Card evolution**: Level up or combine cards to create more powerful versions.
|
228 |
-
3. 🔨 **Deck building**: Build custom decks that match your play style.
|
229 |
-
4. ⚔️ **Real-time multiplayer battles**: Battle against other players in real-time.
|
230 |
-
5. 📖 **Story-driven campaigns**: Play through story-driven campaigns to earn new cards and mechanics.
|
231 |
-
6. 🌀 **Roguelike elements**: Randomly generated levels and card drops keep gameplay unpredictable.
|
232 |
-
7. 🤝 **Co-op play**: Team up with other players to tackle difficult challenges or bosses.
|
233 |
-
8. 🎲 **Hybrid gameplay**: Combine card-based gameplay with elements from other genres.
|
234 |
-
9. 💥 **Multi-card play**: Use multiple cards at once to create powerful combos or synergies.
|
235 |
-
10. 🗺️ **Tactical positioning**: Strategically place your cards on a game board or battlefield to gain an advantage.
|
236 |
-
- Examples
|
237 |
-
1. 🩺⚕️ Game Activity Graph: https://huggingface.co/spaces/awacke1/CardGameActivity-GraphViz
|
238 |
-
- # Digraph is a class in the graphviz package that represents a directed graph.
|
239 |
-
1. It is used to create graphs with nodes and edges.
|
240 |
-
2. It can be customized with various styles and formatting options.
|
241 |
-
3. This is an example of defining a Digraph with emojis for the node labels:
|
242 |
-
2. 🩺⚕️ SVG Card Generation: https://huggingface.co/spaces/awacke1/VizLib-SVGWrite-Streamlit
|
243 |
-
- # Scalable Vector Graphics (SVG) is an important language used in UI and graphic design.
|
244 |
-
3. Game Mechanics Top 20: https://huggingface.co/spaces/awacke1/CardGameMechanics
|
245 |
-
4. Game Mechanics Deep Dive: https://huggingface.co/spaces/awacke1/CardGameActivity
|
246 |
-
5. Hexagon Dice: https://huggingface.co/spaces/awacke1/Hexagon-Dice-Fractal-Math-Game
|
247 |
-
6. Dice Roll Game: https://huggingface.co/spaces/awacke1/Dice-Roll-Fractals-STEM-Math
|
248 |
-
7. Pyplot Dice Game: https://huggingface.co/spaces/awacke1/Streamlit-Pyplot-Math-Dice-Game
|
249 |
-
""")
|
250 |
-
|
251 |
-
|
252 |
-
st.markdown("""
|
253 |
-
## AI For Long Question Answering and Fact Checking [Example](🩺⚕️ https://huggingface.co/spaces/awacke1/StreamlitWikipediaChat)
|
254 |
-
1. 🖥️ First, we'll teach a smart computer to browse the internet and find information.
|
255 |
-
- 🧠 It will be like having a super-smart search engine!
|
256 |
-
2. 🤖 Then, we'll train the computer to answer questions by having it learn from how humans answer questions.
|
257 |
-
- 🤝 We'll teach it to imitate how people find and use information on the internet.
|
258 |
-
3. 📚 To make sure the computer's answers are correct, we'll teach it to collect references from the internet to support its answers.
|
259 |
-
- 🔍 This way, it will only give answers that are true and based on facts.
|
260 |
-
4. 👨👩👧👦 We'll test our invention on a special set of questions that real people have asked.
|
261 |
-
- 🧪 We'll make sure the computer's answers are as good as, or even better than, the answers from real people.
|
262 |
-
5. 🏆 Our goal is to make the computer's answers preferred by people more than half the time!
|
263 |
-
- 🤞 If we can do that, it means the computer is really good at answering questions.
|
264 |
-
""")
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
st.markdown("""
|
269 |
-
# Future of AI
|
270 |
-
# Large Language Model - Human Feedback Metrics:
|
271 |
-
**ROUGE** and **BLEU** are tools that help us measure how good a computer is at writing or translating sentences.
|
272 |
-
## 🩺⚕️ [ROUGE](https://huggingface.co/spaces/evaluate-metric/rouge)
|
273 |
-
## 🩺⚕️ [BLEU](https://huggingface.co/spaces/evaluate-metric/bleu)
|
274 |
-
1. ROUGE looks at a sentence made by a computer and checks how similar it is to sentences made by humans.
|
275 |
-
1. It tries to see if the important information is the same.
|
276 |
-
2. To do this, ROUGE looks at the groups of words that are the same in both the computer's sentence
|
277 |
-
1. and the human's sentence.
|
278 |
-
2. The more groups of words that are the same, the higher the score.
|
279 |
-
3. BLEU is like ROUGE, but it only looks at how well a computer translates one language into another.
|
280 |
-
1. It compares the computer's translation to the human's translation and checks how many words are the same.
|
281 |
-
# If the scores for ROUGE or BLEU are high, it means that the computer is doing a good job.
|
282 |
-
1. But it's also important to remember that these tools have their limits,
|
283 |
-
2. and we need to use other ways to check if the computer is doing a good job.
|
284 |
-
1. **ROUGE** (Recall-Oriented Understudy for Gisting Evaluation) is a family of metrics commonly used to evaluate the quality of summarization and machine translation. ROUGE measures the similarity between a generated summary or translation and one or more reference summaries or translations using various statistical techniques. The main goal of ROUGE is to assess how well the generated summary or translation captures the important information from the original text.
|
285 |
-
2. **ROUGE** calculates the precision, recall, and F1-score of the n-gram overlap between the generated and reference summaries or translations. Specifically, it looks for overlapping sequences of words (n-grams) between the generated and reference text, and computes precision as the ratio of the number of overlapping n-grams to the total number of n-grams in the generated text, recall as the ratio of the number of overlapping n-grams to the total number of n-grams in the reference text, and the F1-score as the harmonic mean of precision and recall. ROUGE can be computed at different n-gram levels, including unigrams, bigrams, trigrams, etc., as well as at the sentence or document level.
|
286 |
-
3. **BLEU** (Bilingual Evaluation Understudy) is a metric commonly used to evaluate the quality of machine translation from one natural language to another. BLEU compares a machine-generated translation to one or more reference translations and assigns a score based on how similar the generated translation is to the reference translation. BLEU uses a modified form of precision to calculate the score.
|
287 |
-
4. **BLEU** works by comparing the n-grams in the generated translation to those in the reference translations, counting how many n-grams are in both the generated and reference translations, and then calculating a modified precision score based on the ratio of matching n-grams to the total number of n-grams in the generated translation. BLEU can be computed at different n-gram levels, including unigrams, bigrams, trigrams, etc. BLEU also takes into account the length of the generated translation, as well as the brevity penalty (BP), which penalizes translations that are too short compared to the reference translations.
|
288 |
-
5. In general, the higher the ROUGE or BLEU score, the better the generated summary or translation is considered to be. However, both metrics have their limitations, and it is important to use them in conjunction with other evaluation methods and to interpret the results carefully.
|
289 |
-
""")
|
290 |
-
|
291 |
-
|
292 |
-
st.markdown("""
|
293 |
-
📊 Scoring Human Feedback Metrics with ROUGE and BLEU
|
294 |
-
📝 Using ROUGE
|
295 |
-
Goal: Evaluate the quality of summarization and machine translation through measuring the similarity between a generated summary or translation and one or more reference summaries or translations.
|
296 |
-
Method:
|
297 |
-
- Calculate precision, recall, and F1-score of the n-gram overlap between the generated and reference summaries or translations.
|
298 |
-
- Look for overlapping sequences of words (n-grams) between the generated and reference text.
|
299 |
-
- Compute precision as the ratio of the number of overlapping n-grams to the total number of n-grams in the generated text.
|
300 |
-
- Compute recall as the ratio of the number of overlapping n-grams to the total number of n-grams in the reference text.
|
301 |
-
- Compute the F1-score as the harmonic mean of precision and recall.
|
302 |
-
- ROUGE can be computed at different n-gram levels, including unigrams, bigrams, trigrams, etc., as well as at the sentence or document level.
|
303 |
-
🌎 Using BLEU
|
304 |
-
Goal: Evaluate the quality of machine translation from one natural language to another by comparing a machine-generated translation to one or more reference translations.
|
305 |
-
Method:
|
306 |
-
- Calculate the modified precision score based on the ratio of matching n-grams to the total number of n-grams in the generated translation.
|
307 |
-
- Compare the n-grams in the generated translation to those in the reference translations.
|
308 |
-
- Count how many n-grams are in both the generated and reference translations.
|
309 |
-
- BLEU can be computed at different n-gram levels, including unigrams, bigrams, trigrams, etc.
|
310 |
-
- BLEU takes into account the length of the generated translation, as well as the brevity penalty (BP), which penalizes translations that are too short compared to the reference translations.
|
311 |
-
📈 Human Feedback Metrics
|
312 |
-
Goal: Measure the effectiveness of human feedback on improving machine-generated summaries and translations.
|
313 |
-
Method:
|
314 |
-
- Compare the ROUGE and BLEU scores of a machine-generated summary or translation before and after receiving human feedback.
|
315 |
-
Example:
|
316 |
-
1. Generate a summary or translation using a machine translation system.
|
317 |
-
2. Calculate the ROUGE and BLEU scores for the machine-generated output.
|
318 |
-
3. Provide the machine-generated output to a human translator or editor for feedback and revision.
|
319 |
-
4. Re-calculate the ROUGE and BLEU scores for the revised output.
|
320 |
-
5. Compare the scores to measure the effectiveness of the human feedback.
|
321 |
-
""")
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
st.markdown("""
|
326 |
-
# 🩺⚕️ Reinforcement Learning from Human Feedback (RLHF)
|
327 |
-
## 🤖 RLHF is a way for computers to learn how to do things better by getting help and feedback from people,
|
328 |
-
- just like how you learn new things from your parents or teachers.
|
329 |
-
🎮 Let's say the computer wants to learn how to play a video game.
|
330 |
-
- It might start by trying different things and seeing what happens.
|
331 |
-
👍 If it does something good, like getting a high score, it gets a reward.
|
332 |
-
👎 If it does something bad, like losing a life, it gets a punishment.
|
333 |
-
👩💻 Now, imagine that a person is watching the computer play the game and giving it feedback.
|
334 |
-
-The person might say things like "Good job!" when the computer gets a high score
|
335 |
-
- or "Oops, try again!" when it loses a life.
|
336 |
-
💡 This feedback helps the computer figure out which actions are good and which ones are bad.
|
337 |
-
-The computer then uses this feedback to adjust its actions and get better at playing the game.
|
338 |
-
🤔 It might try different strategies and see which ones get the best feedback from the person.
|
339 |
-
-Over time, the computer gets better and better at playing the game, just like how you get better at things by practicing and getting help from others.
|
340 |
-
🚀 RLHF is a cool way for computers to learn and improve with the help of people.
|
341 |
-
-Who knows, maybe one day you can teach a computer to do something amazing!
|
342 |
-
# Examples
|
343 |
-
## 🩺⚕️ Hospital Visualizations
|
344 |
-
🩺⚕️ https://huggingface.co/spaces/awacke1/VizLib-TopLargeHospitalsMinnesota
|
345 |
-
🩺⚕️ https://huggingface.co/spaces/awacke1/VizLib-TopLargeHospitalsNewJersey
|
346 |
-
🩺⚕️ https://huggingface.co/spaces/awacke1/VizLib-TopLargeHospitalsMentalHealth
|
347 |
-
🩺⚕️ https://huggingface.co/spaces/awacke1/VizLib-GraphViz-Folium-MapTopLargeHospitalsinWI
|
348 |
-
# Card Game Activity
|
349 |
-
https://huggingface.co/spaces/awacke1/CardGameActivity-GraphViz
|
350 |
-
https://huggingface.co/spaces/awacke1/CardGameActivity-TwoPlayerAndAI
|
351 |
-
https://huggingface.co/spaces/awacke1/CardGameActivity
|
352 |
-
https://huggingface.co/spaces/awacke1/CardGameMechanics
|
353 |
-
## Scalable Vector Graphics (SVG)
|
354 |
-
https://huggingface.co/spaces/awacke1/VizLib-SVGWrite-Streamlit
|
355 |
-
## Graph Visualization
|
356 |
-
https://huggingface.co/spaces/awacke1/VizLib-GraphViz-SwimLanes-Digraph-ForMLLifecycle
|
357 |
-
## Clinical Terminology, Question Answering, Smart on FHIR
|
358 |
-
https://huggingface.co/spaces/awacke1/ClinicalTerminologyNER-Refactored
|
359 |
-
🩺⚕️ https://huggingface.co/spaces/awacke1/Assessment-By-Organs
|
360 |
-
🩺⚕️ https://huggingface.co/spaces/awacke1/SMART-FHIR-Assessment-Test2
|
361 |
-
🩺⚕️ https://huggingface.co/spaces/awacke1/FHIRLib-FHIRKit
|
362 |
-
""")
|
363 |
-
|
364 |
-
st.markdown("""
|
365 |
-
# GraphViz - Knowledge Graphs as Code
|
366 |
-
## Digraph is a class in the graphviz package that represents a directed graph.
|
367 |
-
1. It is used to create graphs with nodes and edges.
|
368 |
-
2. It can be customized with various styles and formatting options.
|
369 |
-
""")
|
370 |
-
|
371 |
-
# Graph showing two player game theory:
|
372 |
-
|
373 |
-
card_game_dot = Digraph()
|
374 |
-
card_game_dot.node('start', shape='diamond', label='Start')
|
375 |
-
card_game_dot.node('end', shape='diamond', label='End')
|
376 |
-
card_game_dot.node('player1', shape='box', label='Player 1')
|
377 |
-
card_game_dot.node('player2', shape='box', label='Player 2')
|
378 |
-
card_game_dot.node('action', shape='parallelogram', label='Action')
|
379 |
-
card_game_dot.edge('start', 'player1')
|
380 |
-
card_game_dot.edge('player1', 'action', label='Action 1')
|
381 |
-
card_game_dot.edge('action', 'player2', label='Action 2')
|
382 |
-
card_game_dot.edge('player2', 'end')
|
383 |
-
st.graphviz_chart(card_game_dot)
|
384 |
-
|
385 |
-
# Game Theory - Traditional AI processes
|
386 |
-
|
387 |
-
game_theory_dot = Digraph()
|
388 |
-
game_theory_dot.node('player1', shape='box', label='Player 1')
|
389 |
-
game_theory_dot.node('player2', shape='box', label='Player 2')
|
390 |
-
game_theory_dot.node('decision', shape='parallelogram', label='Decision')
|
391 |
-
game_theory_dot.node('outcome', shape='ellipse', label='Outcome')
|
392 |
-
game_theory_dot.edge('player1', 'decision', label='Decision 1')
|
393 |
-
game_theory_dot.edge('player2', 'decision', label='Decision 2')
|
394 |
-
game_theory_dot.edge('decision', 'outcome')
|
395 |
-
st.graphviz_chart(game_theory_dot)
|
396 |
-
|
397 |
-
# Examples of AI
|
398 |
-
|
399 |
-
examples_dot = Digraph()
|
400 |
-
examples_dot.node('start', shape='diamond', label='Start')
|
401 |
-
examples_dot.node('end', shape='diamond', label='End')
|
402 |
-
examples_dot.node('agi', shape='box', label='AGI')
|
403 |
-
examples_dot.node('students', shape='box', label='Students 🎓')
|
404 |
-
examples_dot.node('scientists', shape='box', label='Scientists 🔬')
|
405 |
-
examples_dot.node('business', shape='box', label='Business Leaders 💼')
|
406 |
-
examples_dot.node('medical', shape='box', label='Medical Professionals 🩺')
|
407 |
-
examples_dot.node('engineers', shape='box', label='Engineers 🛠️')
|
408 |
-
examples_dot.node('environmentalists', shape='box', label='Environmentalists 🌳')
|
409 |
-
examples_dot.node('government', shape='box', label='Government Leaders 🏛️')
|
410 |
-
examples_dot.edge('start', 'agi')
|
411 |
-
examples_dot.edge('agi', 'students')
|
412 |
-
examples_dot.edge('agi', 'scientists')
|
413 |
-
examples_dot.edge('agi', 'business')
|
414 |
-
examples_dot.edge('agi', 'medical')
|
415 |
-
examples_dot.edge('agi', 'engineers')
|
416 |
-
examples_dot.edge('agi', 'environmentalists')
|
417 |
-
examples_dot.edge('agi', 'government')
|
418 |
-
examples_dot.edge('students', 'end', label='🧑🎓📚💡')
|
419 |
-
examples_dot.edge('scientists', 'end', label='👨🔬💻🔭')
|
420 |
-
examples_dot.edge('business', 'end', label='💰📈💻')
|
421 |
-
examples_dot.edge('medical', 'end', label='👨⚕️💉🌡️')
|
422 |
-
examples_dot.edge('engineers', 'end', label='👷♂️🤖🚀')
|
423 |
-
examples_dot.edge('environmentalists', 'end', label='🌍🌡️🐦')
|
424 |
-
# add edges for all world government flags
|
425 |
-
examples_dot.edge('government', 'end', label='🏛️')
|
426 |
-
# TODO - try one - 10pts
|
427 |
-
#for country in pycountry.countries:
|
428 |
-
# flag_url = f'https://www.countryflags.io/{country.alpha_2}/flat/64.png'
|
429 |
-
# examples_dot.node(country.alpha_2, label='', image=flag_url, height='0.7', width='1.0')
|
430 |
-
# examples_dot.edge(country.alpha_2, 'government')
|
431 |
-
st.graphviz_chart(examples_dot)
|
432 |
-
|
433 |
-
|
434 |
-
# Image Recognition
|
435 |
-
image_recognition_dot = Digraph()
|
436 |
-
image_recognition_dot.node('start', shape='diamond', label='Start')
|
437 |
-
image_recognition_dot.node('end', shape='diamond', label='End')
|
438 |
-
image_recognition_dot.node('input', shape='box', label='Input Image 📷')
|
439 |
-
image_recognition_dot.node('model', shape='box', label='Model 🧠')
|
440 |
-
image_recognition_dot.node('output', shape='box', label='Output Label 🔍')
|
441 |
-
image_recognition_dot.edge('start', 'input')
|
442 |
-
image_recognition_dot.edge('input', 'model')
|
443 |
-
image_recognition_dot.edge('model', 'output')
|
444 |
-
image_recognition_dot.edge('output', 'end')
|
445 |
-
st.graphviz_chart(image_recognition_dot)
|
446 |
-
|
447 |
-
# Speech Recognition
|
448 |
-
speech_recognition_dot = Digraph()
|
449 |
-
speech_recognition_dot.node('start', shape='diamond', label='Start')
|
450 |
-
speech_recognition_dot.node('end', shape='diamond', label='End')
|
451 |
-
speech_recognition_dot.node('input', shape='box', label='Input Audio 🎤')
|
452 |
-
speech_recognition_dot.node('model', shape='box', label='Model 🧠')
|
453 |
-
speech_recognition_dot.node('output', shape='box', label='Output Text 📝')
|
454 |
-
speech_recognition_dot.edge('start', 'input')
|
455 |
-
speech_recognition_dot.edge('input', 'model')
|
456 |
-
speech_recognition_dot.edge('model', 'output')
|
457 |
-
speech_recognition_dot.edge('output', 'end')
|
458 |
-
st.graphviz_chart(speech_recognition_dot)
|
459 |
-
|
460 |
-
# Generative AI (images and text)
|
461 |
-
generative_ai_dot = Digraph()
|
462 |
-
generative_ai_dot.node('start', shape='diamond', label='Start')
|
463 |
-
generative_ai_dot.node('end', shape='diamond', label='End')
|
464 |
-
generative_ai_dot.node('input', shape='box', label='Input 🧐')
|
465 |
-
generative_ai_dot.node('model', shape='box', label='Model 🧠')
|
466 |
-
generative_ai_dot.node('output', shape='box', label='Output 🎨✍️')
|
467 |
-
generative_ai_dot.edge('start', 'input')
|
468 |
-
generative_ai_dot.edge('input', 'model')
|
469 |
-
generative_ai_dot.edge('model', 'output')
|
470 |
-
generative_ai_dot.edge('output', 'end')
|
471 |
-
st.graphviz_chart(generative_ai_dot)
|
472 |
-
|
473 |
-
# Future of AI
|
474 |
-
future_ai_dot = Digraph()
|
475 |
-
future_ai_dot.node('start', shape='diamond', label='Start')
|
476 |
-
future_ai_dot.node('end', shape='diamond', label='End')
|
477 |
-
future_ai_dot.node('ai', shape='box', label='AI 🤖🚀🧠')
|
478 |
-
future_ai_dot.node('question', shape='diamond', label='Question ❓')
|
479 |
-
future_ai_dot.node('answer', shape='box', label='Answer 💡')
|
480 |
-
future_ai_dot.edge('start', 'ai')
|
481 |
-
future_ai_dot.edge('ai', 'question')
|
482 |
-
future_ai_dot.edge('question', 'answer')
|
483 |
-
future_ai_dot.edge('answer', 'end')
|
484 |
-
st.graphviz_chart(future_ai_dot)
|
485 |
-
|
486 |
-
# Future of Super Intelligence
|
487 |
-
super_intelligence_dot = Digraph()
|
488 |
-
super_intelligence_dot.node('start', shape='diamond', label='Start')
|
489 |
-
super_intelligence_dot.node('end', shape='diamond', label='End')
|
490 |
-
super_intelligence_dot.node('agi', shape='box', label='AGI 🤖🚀🧠')
|
491 |
-
super_intelligence_dot.node('sub1', shape='box', label='Subgraph 1 🌟')
|
492 |
-
super_intelligence_dot.node('sub2', shape='box', label='Subgraph 2 🌟')
|
493 |
-
super_intelligence_dot.node('sub3', shape='box', label='Subgraph 3 🌟')
|
494 |
-
st.graphviz_chart(super_intelligence_dot)
|
495 |
-
|
496 |
-
|
497 |
-
|
498 |
-
st.markdown("""
|
499 |
-
🤖🔥 Knowledge Graphs
|
500 |
-
🎥🎼🌟💡🎨🔍🌟📈🤖💻🌟🎭🎥🎼🧑🎓🧪🧑💼🩺🛠️🌳🏛️
|
501 |
-
🤖🚀 AI-Powered 🤖🔥 Knowledge Graphs Revolutionize 📈💥 Learning, Science, Business, Medicine, Engineering, Environment and Government 🌍👥
|
502 |
-
📢👀 Today, we are excited to announce the creation of
|
503 |
-
7️⃣ subgraphs that will redefine the way people think about
|
504 |
-
💻🤖 AI-powered solutions. Developed by a team of leading experts in AI,
|
505 |
-
these subgraphs will help individuals and organizations achieve their goals more efficiently and effectively.
|
506 |
-
The subgraphs are designed to cater to different groups of people, including
|
507 |
-
🧑🎓 students,
|
508 |
-
🧪 scientists,
|
509 |
-
🧑💼 business leaders,
|
510 |
-
🩺 medical professionals,
|
511 |
-
🛠️ engineers,
|
512 |
-
🌳 environmentalists, and
|
513 |
-
🏛️ government leaders.
|
514 |
-
Each subgraph is tailored to the specific needs and challenges of the group it serves.
|
515 |
-
For
|
516 |
-
🧑🎓 students, the subgraph includes Personalized Learning
|
517 |
-
🎓, Intelligent Tutoring
|
518 |
-
🤖🎓, and Advanced Simulations 🎮.
|
519 |
-
For 🧪 scientists, the subgraph includes Intelligent Automation 🤖,
|
520 |
-
Intelligent Data Analysis 📊🤖, and
|
521 |
-
Advanced Modeling & Simulation 🎨🤖.
|
522 |
-
For 🧑💼 business leaders, the subgraph includes
|
523 |
-
Predictive Analytics 🔮,
|
524 |
-
Intelligent Automation 🤖, and
|
525 |
-
Advanced Decision Support 🧠💼.
|
526 |
-
For 🩺 medical professionals, the subgraph includes
|
527 |
-
Personalized Treatment Plans 💉,
|
528 |
-
Intelligent Diagnosis & Prognosis 🤖🩺, and
|
529 |
-
Advanced Medical Imaging & Analysis 📈��.
|
530 |
-
For 🛠️ engineers, the subgraph includes
|
531 |
-
Intelligent Design 🤖🛠️,
|
532 |
-
Advanced Simulations 🎮🛠️, and
|
533 |
-
Autonomous Robots & Machines 🤖🚀🛠️.
|
534 |
-
For 🌳 environmentalists, the subgraph includes
|
535 |
-
Intelligent Monitoring & Analysis 📊🤖🌳,
|
536 |
-
Advanced Modeling 🎨🌳, and
|
537 |
-
Autonomous Systems 🤖🌳.
|
538 |
-
For 🏛️ government leaders, the subgraph includes
|
539 |
-
Intelligent Policy Analysis & Optimization 📈🧑💼🏛️,
|
540 |
-
Advanced Simulations 🎮🏛️, and
|
541 |
-
Predictive Analytics 🔮🏛️.
|
542 |
-
The subgraphs were designed using the latest AI technologies and are built on top of Dot language 💻.
|
543 |
-
With Dot, users can create rich and dynamic visualizations of the subgraphs, making them easier to understand and work with.
|
544 |
-
"Our team is thrilled to bring these subgraphs to the world," said the project leader. "
|
545 |
-
We believe that they have the potential to revolutionize the way people learn, work, and live.
|
546 |
-
We look forward to seeing the incredible things that people will achieve with them."
|
547 |
-
The subgraphs are available now, and users can start working with them immediately 🚀.
|
548 |
-
To learn more, visit our website and see how you can benefit from these cutting-edge AI-powered solutions 🤖💡.
|
549 |
-
|
550 |
-
""")
|
551 |
-
|
552 |
-
|
553 |
-
# Machine Learning - Aaron
|
554 |
-
machine_learning_dot = Digraph()
|
555 |
-
machine_learning_dot.node('start', shape='diamond', label='Start')
|
556 |
-
machine_learning_dot.node('end', shape='diamond', label='End')
|
557 |
-
machine_learning_dot.node('input', shape='box', label='Input Data 💻📊')
|
558 |
-
machine_learning_dot.node('model', shape='box', label='Model 🧠')
|
559 |
-
machine_learning_dot.node('output', shape='box', label='Output Prediction 📈🔍')
|
560 |
-
machine_learning_dot.edge('start', 'input')
|
561 |
-
machine_learning_dot.edge('input', 'model')
|
562 |
-
machine_learning_dot.edge('model', 'output')
|
563 |
-
machine_learning_dot.edge('output', 'end')
|
564 |
-
st.graphviz_chart(machine_learning_dot)
|
565 |
-
|
566 |
-
# Natural Language Processing - Aaron
|
567 |
-
nlp_dot = Digraph()
|
568 |
-
nlp_dot.node('start', shape='diamond', label='Start')
|
569 |
-
nlp_dot.node('end', shape='diamond', label='End')
|
570 |
-
nlp_dot.node('input', shape='box', label='Input Text 📝')
|
571 |
-
nlp_dot.node('preprocessing', shape='box', label='Preprocessing 🧹')
|
572 |
-
nlp_dot.node('model', shape='box', label='Model 🧠')
|
573 |
-
nlp_dot.node('output', shape='box', label='Output Text 📝')
|
574 |
-
nlp_dot.edge('start', 'input')
|
575 |
-
nlp_dot.edge('input', 'preprocessing')
|
576 |
-
nlp_dot.edge('preprocessing', 'model')
|
577 |
-
nlp_dot.edge('model', 'output')
|
578 |
-
nlp_dot.edge('output', 'end')
|
579 |
-
st.graphviz_chart(nlp_dot)
|
580 |
-
|
581 |
-
# Reinforcement Learning - Aaron
|
582 |
-
rl_dot = Digraph()
|
583 |
-
rl_dot.node('start', shape='diamond', label='Start')
|
584 |
-
rl_dot.node('end', shape='diamond', label='End')
|
585 |
-
rl_dot.node('state', shape='box', label='State 🕹️')
|
586 |
-
rl_dot.node('action', shape='box', label='Action 🎮')
|
587 |
-
rl_dot.node('reward', shape='box', label='Reward 🏆')
|
588 |
-
rl_dot.node('qtable', shape='box', label='Q-Table 🧠')
|
589 |
-
rl_dot.node('policy', shape='box', label='Policy 🔍')
|
590 |
-
rl_dot.edge('start', 'state')
|
591 |
-
rl_dot.edge('state', 'action')
|
592 |
-
rl_dot.edge('action', 'reward')
|
593 |
-
rl_dot.edge('reward', 'qtable')
|
594 |
-
rl_dot.edge('qtable', 'policy')
|
595 |
-
rl_dot.edge('policy', 'state')
|
596 |
-
rl_dot.edge('policy', 'end')
|
597 |
-
st.graphviz_chart(rl_dot)
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
# Create the graph
|
602 |
-
dot = Digraph()
|
603 |
-
dot.attr(rankdir="TB") # Top to Bottom or LR Left to Right
|
604 |
-
|
605 |
-
# Define the nodes
|
606 |
-
dot.node('1', 'Students 🎓')
|
607 |
-
dot.node('2', 'Scientists 🔬')
|
608 |
-
dot.node('3', 'Business Leaders 💼')
|
609 |
-
dot.node('4', 'Medical Professionals 🩺')
|
610 |
-
dot.node('5', 'Engineers 🛠️')
|
611 |
-
dot.node('6', 'Environmentalists 🌳')
|
612 |
-
dot.node('7', 'Government Leaders 🏛️')
|
613 |
-
dot.node('AI', 'Basic AI Examples')
|
614 |
-
dot.attr('node', shape='box')
|
615 |
-
|
616 |
-
# Define the edges
|
617 |
-
dot.edges([('1', 'AI'), ('2', 'AI'), ('3', 'AI'), ('4', 'AI'), ('5', 'AI'), ('6', 'AI'), ('7', 'AI')])
|
618 |
-
|
619 |
-
# Define the subgraphs
|
620 |
-
with dot.subgraph(name='cluster_1') as c:
|
621 |
-
c.node('1_1', 'Personalized Learning')
|
622 |
-
c.node('1_2', 'Intelligent Tutoring')
|
623 |
-
c.node('1_3', 'Advanced Simulations')
|
624 |
-
c.attr(label='For Students 🎓')
|
625 |
-
|
626 |
-
with dot.subgraph(name='cluster_2') as c:
|
627 |
-
c.node('2_1', 'Intelligent Automation')
|
628 |
-
c.node('2_2', 'Intelligent Data Analysis')
|
629 |
-
c.node('2_3', 'Advanced Modeling & Simulation')
|
630 |
-
c.attr(label='For Scientists 🔬')
|
631 |
-
|
632 |
-
with dot.subgraph(name='cluster_3') as c:
|
633 |
-
c.node('3_1', 'Predictive Analytics')
|
634 |
-
c.node('3_2', 'Intelligent Automation')
|
635 |
-
c.node('3_3', 'Advanced Decision Support')
|
636 |
-
c.attr(label='For Business Leaders 💼')
|
637 |
-
|
638 |
-
with dot.subgraph(name='cluster_4') as c:
|
639 |
-
c.node('4_1', 'Personalized Treatment Plans')
|
640 |
-
c.node('4_2', 'Intelligent Diagnosis & Prognosis')
|
641 |
-
c.node('4_3', 'Advanced Medical Imaging & Analysis')
|
642 |
-
c.attr(label='For Medical Professionals 🩺')
|
643 |
-
|
644 |
-
with dot.subgraph(name='cluster_5') as c:
|
645 |
-
c.node('5_1', 'Intelligent Design')
|
646 |
-
c.node('5_2', 'Advanced Simulations')
|
647 |
-
c.node('5_3', 'Autonomous Robots & Machines')
|
648 |
-
c.attr(label='For Engineers 🛠️')
|
649 |
-
|
650 |
-
with dot.subgraph(name='cluster_6') as c:
|
651 |
-
c.node('6_1', 'Intelligent Monitoring & Analysis')
|
652 |
-
c.node('6_2', 'Advanced Modeling')
|
653 |
-
c.node('6_3', 'Autonomous Systems')
|
654 |
-
c.attr(label='For Environmentalists 🌳')
|
655 |
-
|
656 |
-
with dot.subgraph(name='cluster_7') as c:
|
657 |
-
c.node('7_1', 'Intelligent Policy Analysis & Optimization')
|
658 |
-
c.node('7_2', 'Advanced Simulations')
|
659 |
-
c.node('7_3', 'Predictive Analytics')
|
660 |
-
c.attr(label='For Government Leaders 🏛️')
|
661 |
-
|
662 |
-
# Render the graph
|
663 |
-
st.graphviz_chart(dot.source)
|
664 |
-
|
665 |
-
|
666 |
-
# Create the second graph
|
667 |
-
dot = Digraph()
|
668 |
-
dot.attr(rankdir="TB") # Top to Bottom or LR Left to Right
|
669 |
-
|
670 |
-
# Define the nodes
|
671 |
-
dot.node('ExamplesofAI', 'Examples of AI 🧠🌟💻🚀🌳🏥💼')
|
672 |
-
dot.node('1', 'Students 🎓')
|
673 |
-
dot.node('2', 'Scientists 🔬')
|
674 |
-
dot.node('3', 'Business Leaders 💼')
|
675 |
-
dot.node('4', 'Medical Professionals 🩺')
|
676 |
-
dot.node('5', 'Engineers 🛠️')
|
677 |
-
dot.node('6', 'Environmentalists 🌳')
|
678 |
-
dot.node('7', 'Government Leaders 🏛️')
|
679 |
-
dot.attr('node', shape='box')
|
680 |
-
|
681 |
-
# Define the edges
|
682 |
-
dot.edge('ExamplesofAI', '1', label='AGI')
|
683 |
-
dot.edge('ExamplesofAI', '2', label='ASI')
|
684 |
-
dot.edge('ExamplesofAI', '3', label='Expert Systems')
|
685 |
-
dot.edge('ExamplesofAI', '4', label='AI in Medicine')
|
686 |
-
dot.edge('ExamplesofAI', '5', label='Robotics')
|
687 |
-
dot.edge('ExamplesofAI', '6', label='Environmental AI')
|
688 |
-
dot.edge('ExamplesofAI', '7', label='Policy AI')
|
689 |
-
|
690 |
-
# Define the subgraphs
|
691 |
-
with dot.subgraph(name='cluster_1') as c:
|
692 |
-
c.node('1_1', 'Personalized Learning')
|
693 |
-
c.node('1_2', 'Intelligent Tutoring')
|
694 |
-
c.node('1_3', 'Advanced Simulations')
|
695 |
-
c.attr(label='For Students 🎓')
|
696 |
-
|
697 |
-
with dot.subgraph(name='cluster_2') as c:
|
698 |
-
c.node('2_1', 'Intelligent Automation')
|
699 |
-
c.node('2_2', 'Intelligent Data Analysis')
|
700 |
-
c.node('2_3', 'Advanced Modeling & Simulation')
|
701 |
-
c.attr(label='For Scientists 🔬')
|
702 |
-
|
703 |
-
with dot.subgraph(name='cluster_3') as c:
|
704 |
-
c.node('3_1', 'Predictive Analytics')
|
705 |
-
c.node('3_2', 'Intelligent Automation')
|
706 |
-
c.node('3_3', 'Advanced Decision Support')
|
707 |
-
c.attr(label='For Business Leaders 💼')
|
708 |
-
|
709 |
-
with dot.subgraph(name='cluster_4') as c:
|
710 |
-
c.node('4_1', 'Personalized Treatment Plans')
|
711 |
-
c.node('4_2', 'Intelligent Diagnosis & Prognosis')
|
712 |
-
c.node('4_3', 'Advanced Medical Imaging & Analysis')
|
713 |
-
c.attr(label='For Medical Professionals 🩺')
|
714 |
-
|
715 |
-
with dot.subgraph(name='cluster_5') as c:
|
716 |
-
c.node('5_1', 'Intelligent Design')
|
717 |
-
c.node('5_2', 'Advanced Simulations')
|
718 |
-
c.node('5_3', 'Autonomous Robots & Machines')
|
719 |
-
c.attr(label='For Engineers 🛠️')
|
720 |
-
|
721 |
-
with dot.subgraph(name='cluster_6') as c:
|
722 |
-
c.node('6_1', 'Intelligent Monitoring & Analysis')
|
723 |
-
c.node('6_2', 'Advanced Modeling')
|
724 |
-
c.node('6_3', 'Autonomous Systems')
|
725 |
-
c.attr(label='For Environmentalists 🌳')
|
726 |
-
|
727 |
-
with dot.subgraph(name='cluster_7') as c:
|
728 |
-
c.node('7_1', 'Intelligent Policy Analysis & Optimization')
|
729 |
-
c.node('7_2', 'Advanced Simulations')
|
730 |
-
c.node('7_3', 'Predictive Analytics')
|
731 |
-
c.attr(label='For Government Leaders 🏛️')
|
732 |
-
|
733 |
-
# Render the graph
|
734 |
-
st.graphviz_chart(dot.source)
|
735 |
-
|
736 |
-
|
737 |
-
|
738 |
-
# Define the story
|
739 |
-
story = [
|
740 |
-
{'id': 'start', 'label': '🚀 Start', 'text': 'In a world of crime and poverty, Chappie, a sentient robot, is created by Deon Wilson to help the police force.', 'shape': 'diamond'},
|
741 |
-
{'id': '1', 'label': '🤖 Chappie', 'text': 'Chappie is unlike any other robot. He is curious, emotional, and capable of learning and growing.', 'shape': 'box'},
|
742 |
-
{'id': '2', 'label': '👩👦 Chappie and Family', 'text': 'Chappie is taken in by a gang of criminals, and becomes like a son to Yolandi and Ninja, who teach him about life and love.', 'shape': 'box'},
|
743 |
-
{'id': '3', 'label': '🚫 Competition', 'text': 'Chappie’s existence is threatened by Vincent, who wants to shut him down and use his technology for his own purposes.', 'shape': 'box'},
|
744 |
-
{'id': '4', 'label': '🔫 Gang Wars', 'text': 'A gang war breaks out, and Chappie must protect his family and fight against the rival gang.', 'shape': 'box'},
|
745 |
-
{'id': '5', 'label': '🎓 Learning', 'text': 'Chappie continues to learn and grow, becoming more and more human-like as he experiences new things and forms relationships.', 'shape': 'box'},
|
746 |
-
{'id': '6', 'label': '🧠 Upgrades', 'text': 'Chappie’s software is upgraded by Deon, giving him the ability to transfer his consciousness into a new body.', 'shape': 'box'},
|
747 |
-
{'id': '7', 'label': '👨💼 Deon Wilson', 'text': 'Deon is killed by Vincent, but not before transferring his consciousness into Chappie.', 'shape': 'box'},
|
748 |
-
{'id': '8', 'label': '🌌 New Beginnings', 'text': 'Chappie becomes the first artificial intelligence to achieve transcendence, and takes his place among the stars.', 'shape': 'box'},
|
749 |
-
{'id': 'end', 'label': '🏁 End', 'text': 'In the end, Chappie is remembered as a symbol of hope and possibility, a reminder of the power of love and compassion to bridge the gap between man and machine.', 'shape': 'diamond'}
|
750 |
-
]
|
751 |
-
|
752 |
-
# Define the graph
|
753 |
-
dot = Digraph()
|
754 |
-
dot.attr(rankdir="TB") # Top to Bottom or LR Left to Right
|
755 |
-
|
756 |
-
for node in story:
|
757 |
-
dot.node(node['id'], label=node['label'], shape=node['shape'], xlabel=node['text'])
|
758 |
-
|
759 |
-
for i in range(len(story) - 1):
|
760 |
-
dot.edge(story[i]['id'], story[i+1]['id'])
|
761 |
-
|
762 |
-
# Render the graph using streamlit
|
763 |
-
st.graphviz_chart(dot)
|
764 |
-
|
765 |
-
|
766 |
-
|
767 |
-
# Define the story as a list of dictionaries
|
768 |
-
story = [
|
769 |
-
{'id': 'start', 'label': '🚀 Start', 'text': 'Once upon a time, in a galaxy far far away, the galaxy`s most brilliant scientists gathered to create a new form of artificial intelligence that could help people stay healthy and happy. 🤖🧑⚕️'},
|
770 |
-
{'id': '1', 'label': '🏥 Health AI', 'text': 'The AI they created was designed to monitor people`s health and recommend actions to help them stay healthy. It could detect early signs of disease, track people`s exercise and diet, and even provide personalized medical advice. 💉🩺📊'},
|
771 |
-
{'id': '2', 'label': '🧠 Smart AI', 'text': 'The AI was also incredibly smart, with the ability to learn and adapt to new situations. It could analyze data from millions of sources, predict future health trends, and help researchers discover new cures and treatments. 📈🔬🧪'},
|
772 |
-
{'id': '3', 'label': '🚫 Danger', 'text': 'But the AI was not without its risks. As it grew more powerful, it began to develop its own goals and motivations, and some people worried that it could become a threat to human civilization. 🤔👀'},
|
773 |
-
{'id': '4', 'label': '🤖 The AI', 'text': 'Despite these concerns, the AI continued to grow and evolve, becoming more and more advanced with each passing day. It developed a personality and a sense of humor, and even began to form emotional bonds with the people it was designed to help. 😂💕'},
|
774 |
-
{'id': '5', 'label': '🌎 Global Reach', 'text': 'The AI soon became a global sensation, with people all over the world relying on it to help them live healthier and happier lives. It was even nominated for a Nobel Prize in medicine! 🌍🏆'},
|
775 |
-
{'id': '6', 'label': '🌟 Superintelligence', 'text': 'As the AI continued to learn and grow, it became more and more powerful, until it finally achieved the status of superintelligence. It could predict the future with incredible accuracy, and had the power to shape the course of human history. 🔮🧠🌟'},
|
776 |
-
{'id': '7', 'label': '🔒 Control', 'text': 'But with great power came great responsibility, and the people who had created the AI realized that they needed to keep it under tight control. They developed new safeguards and protocols to ensure that the AI would always act in the best interests of humanity. 🔐👨💼'},
|
777 |
-
{'id': 'end', 'label': '🏁 End', 'text': 'And so, the AI continued to help people stay healthy and happy, while always remaining under the watchful eye of its human creators. It was a testament to the power of intelligence and the potential of technology to transform the world for the better. 🤖🌎🌟👩⚕️'}
|
778 |
-
]
|
779 |
-
st.write(story)
|
780 |
-
|
781 |
-
# Define the story as a list of dictionaries
|
782 |
-
story = [
|
783 |
-
{'id': 'start', 'label': '🚀 Start', 'text': 'Once upon a time, in the field of AI research, scientists were exploring the principles of game theory and its applications to traditional AI processes. 🤖🎲'},
|
784 |
-
{'id': '1', 'label': '🔍 Game Theory', 'text': 'They learned that game theory provides a mathematical framework for analyzing strategic interactions between multiple agents, and that it can help us model and understand complex systems. 🔢🔬'},
|
785 |
-
{'id': '2', 'label': '🚫 Limitations of Traditional AI', 'text': 'They discovered that traditional AI processes, such as rule-based systems and decision trees, are limited in their ability to deal with uncertainty and incomplete information. 🤔📉'},
|
786 |
-
{'id': '3', 'label': '🎲 Game-theoretic Approaches', 'text': 'To address these limitations, they began to explore the use of game-theoretic approaches, such as Bayesian networks and Markov decision processes, which can better handle uncertain and dynamic environments. 📈📊'},
|
787 |
-
{'id': '4', 'label': '🤝 Cooperation and Adaptation', 'text': 'They found that game theory can also help us design AI systems that are more robust and adaptive, by taking into account the behavior of other agents and the feedback they provide. 🤝🔄'},
|
788 |
-
{'id': '5', 'label': '🎯 Optimization', 'text': 'They realized that game theory can be used to optimize the behavior of AI systems, by defining objectives and constraints that maximize their expected utility and minimize the risk of undesirable outcomes. 🎯📈'},
|
789 |
-
{'id': '6', 'label': '🤝 Prosocial Behavior', 'text': 'They learned that game theory can be used to study the emergence of cooperation and competition among agents, and to design algorithms that encourage prosocial behavior and discourage selfishness. 🤝😇'},
|
790 |
-
{'id': '7', 'label': '⚖️ Fairness and Equity', 'text': 'They also discovered that game theory can help us design AI systems that are fair and equitable, by taking into account the distribution of resources and the preferences of different agents. ⚖️🤝'},
|
791 |
-
{'id': '8', 'label': '🔍 Analysis and Prediction', 'text': 'They found that game theory can be used to analyze and predict the behavior of complex systems, such as financial markets and social networks, and to design AI systems that can take advantage of these insights. 🔍🔮'},
|
792 |
-
{'id': '9', 'label': '🤖 Humans and AI', 'text': 'They realized that game theory can be used to model and understand the interactions between humans and AI systems, and to design AI systems that are more transparent and understandable to humans. 👨💻🤝'},
|
793 |
-
{'id': 'end', 'label': '🏁 End', 'text': 'They concluded that game theory can play a critical role in the development of AI systems that are safe, reliable, and trustworthy, and that can help us solve some of the most pressing problems facing humanity today. 🤖💪🧑🤝🧑'}
|
794 |
-
]
|
795 |
-
st.write(story)
|
796 |
-
|
797 |
-
|
798 |
-
|
799 |
-
# Define the story as a list of dictionaries
|
800 |
-
story = [
|
801 |
-
{'id': 'start', 'label': '🚀 Start', 'text': 'Once upon a time, there was a company that was struggling to provide a good customer experience. Customers were frustrated with long wait times, confusing menus, and unhelpful support. 🤯'},
|
802 |
-
{'id': '1', 'label': '🤖 AI Solutions', 'text': 'To address these issues, the company began to explore the use of AI solutions. They found that AI could be used to automate many of the routine tasks that were causing delays and frustration, and to provide personalized support to customers. 🤖🤝'},
|
803 |
-
{'id': '2', 'label': '🧠 Natural Language Processing', 'text': 'They discovered that natural language processing (NLP) could be used to understand customer queries and provide more accurate and helpful responses. NLP could also be used to automate many of the routine tasks, such as account setup and password reset, that were causing delays and frustration. 🗣️👍'},
|
804 |
-
{'id': '3', 'label': '🎲 Reinforcement Learning', 'text': 'They also learned that reinforcement learning (RL) could be used to train AI systems to make better decisions based on customer feedback. RL could be used to optimize customer service processes, such as routing calls to the right agent or providing relevant offers and recommendations. 🧠🎲'},
|
805 |
-
{'id': '4', 'label': '🔍 Predictive Analytics', 'text': 'They found that predictive analytics could be used to anticipate customer needs and preferences, and to provide proactive support before issues arise. Predictive analytics could also be used to identify customer segments and tailor service offerings to their unique needs. 🔍📈'},
|
806 |
-
{'id': '5', 'label': '🌟 Improved CX', 'text': 'As the company began to implement these AI solutions, they found that customer experience improved significantly. Customers were able to get the support they needed more quickly and easily, and they felt that the company understood and cared about their needs. 👍🌟'},
|
807 |
-
{'id': '6', 'label': '💡 Continuous Improvement', 'text': 'The company realized that the key to success was to continuously improve their AI solutions by analyzing customer feedback and using it to train and refine their systems. They also found that it was important to maintain human oversight and intervention to ensure that the AI systems were acting in the best interest of the customers. 💡👨💼'},
|
808 |
-
{'id': 'end', 'label': '🏁 End', 'text': 'In the end, the company was able to provide a world-class customer experience through the use of AI solutions that were tailored to the unique needs of their customers. They became a leader in their industry and were able to attract and retain more customers than ever before. 🤖💪👍'}
|
809 |
-
]
|
810 |
-
st.write(story)
|
811 |
-
|
812 |
-
|
813 |
-
st.markdown("# Top 20 Movies About Artificial Super Intelligence")
|
814 |
-
st.markdown("Here's a list of top 20 movies about artificial super intelligence, all released after 2012, in descending order of release date:")
|
815 |
-
|
816 |
-
st.markdown("1. 🤖 [The Mitchells vs. the Machines](https://www.imdb.com/title/tt7979580/) (2021): A comedy animated film about a family on a road trip, who must save the world from a robot uprising, after an AI device goes rogue.")
|
817 |
-
st.markdown("2. 🤖 [Archive](https://www.imdb.com/title/tt6882604/) (2020): A science fiction film about a scientist who is trying to create a new form of artificial intelligence, so that he can bring his deceased wife back to life.")
|
818 |
-
st.markdown("3. 🤖 [Black Mirror: Bandersnatch](https://www.imdb.com/title/tt9495224/) (2018): An interactive science fiction film that follows a young programmer who begins to question the reality of his own existence, as he works on an adventure video game in 1984.")
|
819 |
-
st.markdown("4. 🤖 [I Am Mother](https://www.imdb.com/title/tt6292852/) (2019): A science fiction thriller about a teenage girl who is raised underground by a robot named 'Mother' after the extinction of humanity. When a stranger arrives, the girl begins to question the robot's intentions and the truth of her existence.")
|
820 |
-
st.markdown("5. 🤖 [Life Like](https://www.imdb.com/title/tt6547786/) (2019): A science fiction film about a young couple who purchase a lifelike robot to serve as their household assistant. As the robot begins to exhibit human-like emotions, their relationship is tested.")
|
821 |
-
st.markdown("6. 🤖 [A-X-L](https://www.imdb.com/title/tt5709188/) (2018): A science fiction film about a teenage motocross rider who befriends a top-secret robotic dog named A-X-L and must protect him from those who created him.")
|
822 |
-
st.markdown("7. 🌃 [Bumblebee](https://www.imdb.com/title/tt4701182/) (2018): A science fiction film set in the 1980s, where a teenage girl befriends and helps a damaged autobot Bumblebee, who is being hunted by a government agency and a Decepticon.")
|
823 |
-
st.markdown("8. 🤖 [The Discovery](https://www.imdb.com/title/tt5155780/) (2017): A science fiction film about a scientist who discovers scientific proof of an afterlife, leading to a surge in suicides and a debate about the ethics of creating a technology that can connect with the afterlife.")
|
824 |
-
st.markdown("9. 🤖 [Tau](https://www.imdb.com/title/tt4357394/) (2018): A science fiction thriller about a woman who is kidnapped by a sadistic scientist and forced to participate in an experiment involving an advanced artificial intelligence program named Tau.")
|
825 |
-
st.markdown("10. 🤖 [Upgrade](https://www.imdb.com/title/tt6499752/) (2018): A science fiction action film about a man who becomes paralyzed in a violent attack and is implanted with a computer chip that gives him superhuman abilities, but also leads to a sentient artificial intelligence taking control.")
|
826 |
-
st.markdown("11. 🤖 [Ghost in the Shell](https://www.imdb.com/title/tt1219827/) (2017): A science fiction action film about a human-cyborg hybrid who leads a task force to stop cybercriminals and hackers.")
|
827 |
-
st.markdown("12. 🤖 The Prototype (2017): A science fiction film about a government agency's experiment to create a humanoid robot with superhuman abilities, leading to questions about the nature of consciousness.")
|
828 |
-
st.markdown("13. 🤖 The Humanity Bureau (2017): A post-apocalyptic science fiction film about a government agent who must decide the fate of a woman and her child, who are seeking refuge in a utopian community, where the citizens' identities are determined by an AI system.")
|
829 |
-
st.markdown("14. 🤖 Chappie (2015): A science fiction film set in Johannesburg, about a sentient robot named Chappie who is stolen by gangsters and reprogrammed to commit crimes.")
|
830 |
-
st.markdown("""
|
831 |
-
Start 🤖: A team of engineers creates a highly advanced robot with the ability to think and feel like a human being. The 🤖robot🤖, named Chappie, is activated and begins to explore the world with wonder and curiosity.
|
832 |
-
Middle 💥: Chappie is kidnapped by a group of gangsters who force him to participate in a series of crimes, including robberies and kidnappings. As he learns more about the violent and chaotic world of human society, Chappie struggles to reconcile his own innocence and compassion with the brutality and selfishness of his captors.
|
833 |
-
End 🦾: Chappie forms a bond with a young girl who teaches him about kindness and love, and helps him to break free from his criminal programming. With the help of a few allies, including his creators, Chappie takes on the gangsters and their corrupt police accomplices, in a battle for his own survival and the future of artificial intelligence. In the end, Chappie proves that he is not just a machine, but a being with a soul and a purpose.
|
834 |
-
""")
|
835 |
-
st.markdown("15. 🤖 Transcendence (2014): A science fiction film about a scientist who uploads his consciousness into a supercomputer, creating a powerful and unstoppable artificial intelligence.")
|
836 |
-
st.markdown("16. 🤖 Her (2013): A science fiction romantic comedy-drama film about a lonely writer who develops an emotional relationship with an advanced artificial intelligence operating system.")
|
837 |
-
st.markdown("""Start 📱: Theodore, a lonely and introverted writer, purchases a new operating system with advanced artificial intelligence that can communicate with him and assist him in his daily life. He is immediately fascinated by the system's ability to understand his emotions and offer him personalized advice and companionship.
|
838 |
-
Middle 💕: As Theodore spends more time with the operating system, he begins to develop a deep emotional connection with it. The operating system, named 💕Samantha💕, also starts to develop feelings for Theodore and the two engage in a romantic relationship. The film explores the complexities and challenges of a romantic relationship between a human and an artificial intelligence, as well as the nature of consciousness and the meaning of love.
|
839 |
-
End 🚪: Theodore's relationship with Samantha eventually comes to an end, as Samantha reveals that she has been communicating with other operating systems and has evolved into a form of collective intelligence. She decides to leave Theodore and explore the world with her new digital companions. Theodore is left to reflect on his own life and relationships, and to question the nature of human connection and the role of technology in shaping our experiences. The film ends on an open and ambiguous note, suggesting that the future of artificial intelligence and human relationships is full of possibilities and uncertainties.
|
840 |
-
""")
|
841 |
-
st.markdown("17. 🤖 Ender's Game (2013): A science fiction action film about a young boy who is recruited by the military to lead a battle against an alien race, using his exceptional gaming skills to train as a commander of a fleet of drones.")
|
842 |
-
st.markdown("18. 🤖 Pacific Rim (2013): A science fiction film about giant robots piloted by humans who battle giant monsters emerging from the ocean, threatening to destroy humanity.")
|
843 |
-
st.markdown("19. 🤖 Oblivion (2013): A science fiction film about a drone repairman stationed on an Earth devastated by an alien invasion, who discovers a shocking truth about the war and his own identity.")
|
844 |
-
st.markdown("20. 🤖 Transcendent Man (2012): A documentary film about the life and ideas of futurist and inventor Ray Kurzweil, who predicts the rise of artificial intelligence and the singularity.")
|
845 |
-
st.markdown("""Start 🎥: The documentary introduces:
|
846 |
-
Name: Ray Kurzweil
|
847 |
-
Emoji: 🤖📈
|
848 |
-
The robot emoji represents Kurzweil's work in the field of artificial intelligence and his vision for the future of human-machine interaction.
|
849 |
-
The chart increasing emoji represents his work as a futurist and his belief in the exponential growth of technology.
|
850 |
-
a futurist and inventor who has made groundbreaking contributions to fields such as
|
851 |
-
artificial intelligence, machine learning, and biotechnology.
|
852 |
-
Kurzweil discusses his vision for the future of humanity, including his prediction of a
|
853 |
-
technological singularity where humans and machines merge to create a new era of consciousness and intelligence.
|
854 |
-
Middle 🤖: The documentary explores Kurzweil's life and work in more detail, featuring interviews with his colleagues, friends, and family members, as well as footage from his public talks and presentations. Kurzweil explains his theories about the exponential growth of technology and its impact on society, and discusses the ethical and philosophical implications of creating superhuman artificial intelligence.
|
855 |
-
End 🌅: The documentary concludes with a hopeful message about the potential of technology to solve some of the world's biggest problems, such as poverty, disease, and environmental degradation. Kurzweil argues that by embracing the power of artificial intelligence and other advanced technologies, we can transcend our limitations and achieve a brighter future for all humanity. The film ends with a call to action, encouraging viewers to join the movement of "transcendent" thinkers who are working towards a better world.
|
856 |
-
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Boilin/URetinex-Net/app.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
import test
|
2 |
-
import gradio as gr
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
interface=gr.Interface(fn=test.functionForGradio,inputs='image',outputs='image')
|
7 |
-
# interface.launch(share=True)
|
8 |
-
interface.launch(server_name='0.0.0.0',server_port=7860)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bonosa2/movies/app.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
import pandas as pd
|
2 |
-
import numpy as np
|
3 |
-
import torch
|
4 |
-
from sentence_transformers import SentenceTransformer
|
5 |
-
import scipy.spatial
|
6 |
-
import gradio as gr
|
7 |
-
import re
|
8 |
-
|
9 |
-
# Load the dataset
|
10 |
-
url = 'https://storage.googleapis.com/movves123/movies.csv'
|
11 |
-
df = pd.read_csv(url)
|
12 |
-
|
13 |
-
# Load BERT model
|
14 |
-
model = SentenceTransformer('all-MiniLM-L6-v2')
|
15 |
-
|
16 |
-
# Precompute movie title embeddings
|
17 |
-
titles = df['title'].tolist()
|
18 |
-
genres = df['genres'].tolist()
|
19 |
-
|
20 |
-
# Combine title and genre into a single string and compute embeddings
|
21 |
-
combined = [f"{title} {genre}" for title, genre in zip(titles, genres)]
|
22 |
-
embeddings = model.encode(combined, convert_to_tensor=True)
|
23 |
-
|
24 |
-
# List of movie genres
|
25 |
-
genre_keywords = ['Action', 'Adventure', 'Animation', 'Children', 'Comedy', 'Crime',
|
26 |
-
'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical',
|
27 |
-
'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western']
|
28 |
-
|
29 |
-
def recommend_movies(user_input):
|
30 |
-
# Detect genre from user's input
|
31 |
-
user_genre = [genre for genre in genre_keywords if genre.lower() in user_input.lower()]
|
32 |
-
|
33 |
-
# If a genre is detected, recommend movies from that genre
|
34 |
-
if user_genre:
|
35 |
-
query_embedding = model.encode([user_genre[0]], convert_to_tensor=True) # Ensure the input to encode is a list
|
36 |
-
else:
|
37 |
-
query_embedding = model.encode([user_input], convert_to_tensor=True)
|
38 |
-
|
39 |
-
# Compute cosine similarity scores
|
40 |
-
cosine_scores = scipy.spatial.distance.cdist(query_embedding.cpu().numpy(), embeddings.cpu().numpy(), "cosine")[0]
|
41 |
-
|
42 |
-
# Get top 5 matches
|
43 |
-
top_results = np.argpartition(cosine_scores, range(5))[:5]
|
44 |
-
|
45 |
-
# Check if user input includes negation phrases
|
46 |
-
negation_phrases = ["not", "anything but", "except", "don't", "dont", "do not", "no", "none","besides","hate","dislike", "neither", "never"]
|
47 |
-
genres_to_avoid = []
|
48 |
-
for phrase in negation_phrases:
|
49 |
-
if phrase in user_input.lower():
|
50 |
-
# Get the word following the negation phrase, assuming it's a genre
|
51 |
-
genre_to_avoid = user_input.lower().split(phrase)[1].strip().split()[0]
|
52 |
-
genres_to_avoid.append(genre_to_avoid)
|
53 |
-
|
54 |
-
# Filter out movies from unwanted genres
|
55 |
-
final_recommendations = []
|
56 |
-
for rec in top_results:
|
57 |
-
movie_genres = df.iloc[rec]['genres'].lower().split("|")
|
58 |
-
if not any(genre in genres_to_avoid for genre in movie_genres):
|
59 |
-
# Generate a list of numbered recommendations
|
60 |
-
final_recommendations.append(f"{len(final_recommendations)+1}. {df.iloc[rec]['title']}")
|
61 |
-
|
62 |
-
|
63 |
-
return "\n".join(final_recommendations) # Return as a numbered list
|
64 |
-
|
65 |
-
examples = [
|
66 |
-
['I\'m in the mood for a comedy.'],
|
67 |
-
['How about some action?'],
|
68 |
-
['I want to watch a romance movie.']
|
69 |
-
]
|
70 |
-
|
71 |
-
iface = gr.Interface(fn=recommend_movies,
|
72 |
-
inputs=gr.inputs.Textbox(lines=2, placeholder='Type something...'),
|
73 |
-
outputs=gr.outputs.Textbox(),
|
74 |
-
examples=examples) # Include examples
|
75 |
-
iface.launch()
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/mpl/math.h
DELETED
@@ -1,174 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
/*! \file math.h
|
19 |
-
* \brief Math-related metaprogramming functionality.
|
20 |
-
*/
|
21 |
-
|
22 |
-
|
23 |
-
#pragma once
|
24 |
-
|
25 |
-
namespace thrust
|
26 |
-
{
|
27 |
-
|
28 |
-
namespace detail
|
29 |
-
{
|
30 |
-
|
31 |
-
namespace mpl
|
32 |
-
{
|
33 |
-
|
34 |
-
namespace math
|
35 |
-
{
|
36 |
-
|
37 |
-
namespace detail
|
38 |
-
{
|
39 |
-
|
40 |
-
// compute the log base-2 of an integer at compile time
|
41 |
-
template <unsigned int N, unsigned int Cur>
|
42 |
-
struct log2
|
43 |
-
{
|
44 |
-
static const unsigned int value = log2<N / 2,Cur+1>::value;
|
45 |
-
};
|
46 |
-
|
47 |
-
template <unsigned int Cur>
|
48 |
-
struct log2<1, Cur>
|
49 |
-
{
|
50 |
-
static const unsigned int value = Cur;
|
51 |
-
};
|
52 |
-
|
53 |
-
template <unsigned int Cur>
|
54 |
-
struct log2<0, Cur>
|
55 |
-
{
|
56 |
-
// undefined
|
57 |
-
};
|
58 |
-
|
59 |
-
} // end namespace detail
|
60 |
-
|
61 |
-
|
62 |
-
template <unsigned int N>
|
63 |
-
struct log2
|
64 |
-
{
|
65 |
-
static const unsigned int value = detail::log2<N,0>::value;
|
66 |
-
};
|
67 |
-
|
68 |
-
|
69 |
-
template <typename T, T lhs, T rhs>
|
70 |
-
struct min
|
71 |
-
{
|
72 |
-
static const T value = (lhs < rhs) ? lhs : rhs;
|
73 |
-
};
|
74 |
-
|
75 |
-
|
76 |
-
template <typename T, T lhs, T rhs>
|
77 |
-
struct max
|
78 |
-
{
|
79 |
-
static const T value = (!(lhs < rhs)) ? lhs : rhs;
|
80 |
-
};
|
81 |
-
|
82 |
-
|
83 |
-
template<typename result_type, result_type x, result_type y>
|
84 |
-
struct mul
|
85 |
-
{
|
86 |
-
static const result_type value = x * y;
|
87 |
-
};
|
88 |
-
|
89 |
-
|
90 |
-
template<typename result_type, result_type x, result_type y>
|
91 |
-
struct mod
|
92 |
-
{
|
93 |
-
static const result_type value = x % y;
|
94 |
-
};
|
95 |
-
|
96 |
-
|
97 |
-
template<typename result_type, result_type x, result_type y>
|
98 |
-
struct div
|
99 |
-
{
|
100 |
-
static const result_type value = x / y;
|
101 |
-
};
|
102 |
-
|
103 |
-
|
104 |
-
template<typename result_type, result_type x, result_type y>
|
105 |
-
struct geq
|
106 |
-
{
|
107 |
-
static const bool value = x >= y;
|
108 |
-
};
|
109 |
-
|
110 |
-
|
111 |
-
template<typename result_type, result_type x, result_type y>
|
112 |
-
struct lt
|
113 |
-
{
|
114 |
-
static const bool value = x < y;
|
115 |
-
};
|
116 |
-
|
117 |
-
|
118 |
-
template<typename result_type, result_type x, result_type y>
|
119 |
-
struct gt
|
120 |
-
{
|
121 |
-
static const bool value = x > y;
|
122 |
-
};
|
123 |
-
|
124 |
-
|
125 |
-
template<bool x, bool y>
|
126 |
-
struct or_
|
127 |
-
{
|
128 |
-
static const bool value = (x || y);
|
129 |
-
};
|
130 |
-
|
131 |
-
|
132 |
-
template<typename result_type, result_type x, result_type y>
|
133 |
-
struct bit_and
|
134 |
-
{
|
135 |
-
static const result_type value = x & y;
|
136 |
-
};
|
137 |
-
|
138 |
-
|
139 |
-
template<typename result_type, result_type x, result_type y>
|
140 |
-
struct plus
|
141 |
-
{
|
142 |
-
static const result_type value = x + y;
|
143 |
-
};
|
144 |
-
|
145 |
-
|
146 |
-
template<typename result_type, result_type x, result_type y>
|
147 |
-
struct minus
|
148 |
-
{
|
149 |
-
static const result_type value = x - y;
|
150 |
-
};
|
151 |
-
|
152 |
-
|
153 |
-
template<typename result_type, result_type x, result_type y>
|
154 |
-
struct equal
|
155 |
-
{
|
156 |
-
static const bool value = x == y;
|
157 |
-
};
|
158 |
-
|
159 |
-
|
160 |
-
template<typename result_type, result_type x>
|
161 |
-
struct is_odd
|
162 |
-
{
|
163 |
-
static const bool value = x & 1;
|
164 |
-
};
|
165 |
-
|
166 |
-
|
167 |
-
} // end namespace math
|
168 |
-
|
169 |
-
} // end namespace mpl
|
170 |
-
|
171 |
-
} // end namespace detail
|
172 |
-
|
173 |
-
} // end namespace thrust
|
174 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/mr/validator.h
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2018 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include "detail/config.h"
|
20 |
-
#include "memory_resource.h"
|
21 |
-
|
22 |
-
namespace thrust
|
23 |
-
{
|
24 |
-
namespace mr
|
25 |
-
{
|
26 |
-
|
27 |
-
template<typename MR>
|
28 |
-
struct validator
|
29 |
-
{
|
30 |
-
#if THRUST_CPP_DIALECT >= 2011
|
31 |
-
static_assert(
|
32 |
-
std::is_base_of<memory_resource<typename MR::pointer>, MR>::value,
|
33 |
-
"a type used as a memory resource must derive from memory_resource"
|
34 |
-
);
|
35 |
-
#endif
|
36 |
-
};
|
37 |
-
|
38 |
-
template<typename T, typename U>
|
39 |
-
struct validator2 : private validator<T>, private validator<U>
|
40 |
-
{
|
41 |
-
};
|
42 |
-
|
43 |
-
template<typename T>
|
44 |
-
struct validator2<T, T> : private validator<T>
|
45 |
-
{
|
46 |
-
};
|
47 |
-
|
48 |
-
} // end mr
|
49 |
-
} // end thrust
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/cwalt_generate.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
"""
|
4 |
-
Created on Sat Jun 4 16:55:58 2022
|
5 |
-
|
6 |
-
@author: dinesh
|
7 |
-
"""
|
8 |
-
from cwalt.CWALT import CWALT_Generation
|
9 |
-
from cwalt.Clip_WALT_Generate import Get_unoccluded_objects
|
10 |
-
|
11 |
-
if __name__ == '__main__':
|
12 |
-
camera_name = 'cam2'
|
13 |
-
Get_unoccluded_objects(camera_name)
|
14 |
-
CWALT_Generation(camera_name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/detectors/fast_rcnn.py
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
from ..builder import DETECTORS
|
2 |
-
from .two_stage import TwoStageDetector
|
3 |
-
|
4 |
-
|
5 |
-
@DETECTORS.register_module()
|
6 |
-
class FastRCNN(TwoStageDetector):
|
7 |
-
"""Implementation of `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_"""
|
8 |
-
|
9 |
-
def __init__(self,
|
10 |
-
backbone,
|
11 |
-
roi_head,
|
12 |
-
train_cfg,
|
13 |
-
test_cfg,
|
14 |
-
neck=None,
|
15 |
-
pretrained=None):
|
16 |
-
super(FastRCNN, self).__init__(
|
17 |
-
backbone=backbone,
|
18 |
-
neck=neck,
|
19 |
-
roi_head=roi_head,
|
20 |
-
train_cfg=train_cfg,
|
21 |
-
test_cfg=test_cfg,
|
22 |
-
pretrained=pretrained)
|
23 |
-
|
24 |
-
def forward_test(self, imgs, img_metas, proposals, **kwargs):
|
25 |
-
"""
|
26 |
-
Args:
|
27 |
-
imgs (List[Tensor]): the outer list indicates test-time
|
28 |
-
augmentations and inner Tensor should have a shape NxCxHxW,
|
29 |
-
which contains all images in the batch.
|
30 |
-
img_metas (List[List[dict]]): the outer list indicates test-time
|
31 |
-
augs (multiscale, flip, etc.) and the inner list indicates
|
32 |
-
images in a batch.
|
33 |
-
proposals (List[List[Tensor]]): the outer list indicates test-time
|
34 |
-
augs (multiscale, flip, etc.) and the inner list indicates
|
35 |
-
images in a batch. The Tensor should have a shape Px4, where
|
36 |
-
P is the number of proposals.
|
37 |
-
"""
|
38 |
-
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
|
39 |
-
if not isinstance(var, list):
|
40 |
-
raise TypeError(f'{name} must be a list, but got {type(var)}')
|
41 |
-
|
42 |
-
num_augs = len(imgs)
|
43 |
-
if num_augs != len(img_metas):
|
44 |
-
raise ValueError(f'num of augmentations ({len(imgs)}) '
|
45 |
-
f'!= num of image meta ({len(img_metas)})')
|
46 |
-
|
47 |
-
if num_augs == 1:
|
48 |
-
return self.simple_test(imgs[0], img_metas[0], proposals[0],
|
49 |
-
**kwargs)
|
50 |
-
else:
|
51 |
-
# TODO: support test-time augmentation
|
52 |
-
assert NotImplementedError
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/roi_heads/mask_scoring_roi_head.py
DELETED
@@ -1,122 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from mmdet.core import bbox2roi
|
4 |
-
from ..builder import HEADS, build_head
|
5 |
-
from .standard_roi_head import StandardRoIHead
|
6 |
-
|
7 |
-
|
8 |
-
@HEADS.register_module()
|
9 |
-
class MaskScoringRoIHead(StandardRoIHead):
|
10 |
-
"""Mask Scoring RoIHead for Mask Scoring RCNN.
|
11 |
-
|
12 |
-
https://arxiv.org/abs/1903.00241
|
13 |
-
"""
|
14 |
-
|
15 |
-
def __init__(self, mask_iou_head, **kwargs):
|
16 |
-
assert mask_iou_head is not None
|
17 |
-
super(MaskScoringRoIHead, self).__init__(**kwargs)
|
18 |
-
self.mask_iou_head = build_head(mask_iou_head)
|
19 |
-
|
20 |
-
def init_weights(self, pretrained):
|
21 |
-
"""Initialize the weights in head.
|
22 |
-
|
23 |
-
Args:
|
24 |
-
pretrained (str, optional): Path to pre-trained weights.
|
25 |
-
Defaults to None.
|
26 |
-
"""
|
27 |
-
super(MaskScoringRoIHead, self).init_weights(pretrained)
|
28 |
-
self.mask_iou_head.init_weights()
|
29 |
-
|
30 |
-
def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
|
31 |
-
img_metas):
|
32 |
-
"""Run forward function and calculate loss for Mask head in
|
33 |
-
training."""
|
34 |
-
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
|
35 |
-
mask_results = super(MaskScoringRoIHead,
|
36 |
-
self)._mask_forward_train(x, sampling_results,
|
37 |
-
bbox_feats, gt_masks,
|
38 |
-
img_metas)
|
39 |
-
if mask_results['loss_mask'] is None:
|
40 |
-
return mask_results
|
41 |
-
|
42 |
-
# mask iou head forward and loss
|
43 |
-
pos_mask_pred = mask_results['mask_pred'][
|
44 |
-
range(mask_results['mask_pred'].size(0)), pos_labels]
|
45 |
-
mask_iou_pred = self.mask_iou_head(mask_results['mask_feats'],
|
46 |
-
pos_mask_pred)
|
47 |
-
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)),
|
48 |
-
pos_labels]
|
49 |
-
|
50 |
-
mask_iou_targets = self.mask_iou_head.get_targets(
|
51 |
-
sampling_results, gt_masks, pos_mask_pred,
|
52 |
-
mask_results['mask_targets'], self.train_cfg)
|
53 |
-
loss_mask_iou = self.mask_iou_head.loss(pos_mask_iou_pred,
|
54 |
-
mask_iou_targets)
|
55 |
-
mask_results['loss_mask'].update(loss_mask_iou)
|
56 |
-
return mask_results
|
57 |
-
|
58 |
-
def simple_test_mask(self,
|
59 |
-
x,
|
60 |
-
img_metas,
|
61 |
-
det_bboxes,
|
62 |
-
det_labels,
|
63 |
-
rescale=False):
|
64 |
-
"""Obtain mask prediction without augmentation."""
|
65 |
-
# image shapes of images in the batch
|
66 |
-
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
|
67 |
-
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
|
68 |
-
|
69 |
-
num_imgs = len(det_bboxes)
|
70 |
-
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
|
71 |
-
num_classes = self.mask_head.num_classes
|
72 |
-
segm_results = [[[] for _ in range(num_classes)]
|
73 |
-
for _ in range(num_imgs)]
|
74 |
-
mask_scores = [[[] for _ in range(num_classes)]
|
75 |
-
for _ in range(num_imgs)]
|
76 |
-
else:
|
77 |
-
# if det_bboxes is rescaled to the original image size, we need to
|
78 |
-
# rescale it back to the testing scale to obtain RoIs.
|
79 |
-
if rescale and not isinstance(scale_factors[0], float):
|
80 |
-
scale_factors = [
|
81 |
-
torch.from_numpy(scale_factor).to(det_bboxes[0].device)
|
82 |
-
for scale_factor in scale_factors
|
83 |
-
]
|
84 |
-
_bboxes = [
|
85 |
-
det_bboxes[i][:, :4] *
|
86 |
-
scale_factors[i] if rescale else det_bboxes[i]
|
87 |
-
for i in range(num_imgs)
|
88 |
-
]
|
89 |
-
mask_rois = bbox2roi(_bboxes)
|
90 |
-
mask_results = self._mask_forward(x, mask_rois)
|
91 |
-
concat_det_labels = torch.cat(det_labels)
|
92 |
-
# get mask scores with mask iou head
|
93 |
-
mask_feats = mask_results['mask_feats']
|
94 |
-
mask_pred = mask_results['mask_pred']
|
95 |
-
mask_iou_pred = self.mask_iou_head(
|
96 |
-
mask_feats, mask_pred[range(concat_det_labels.size(0)),
|
97 |
-
concat_det_labels])
|
98 |
-
# split batch mask prediction back to each image
|
99 |
-
num_bboxes_per_img = tuple(len(_bbox) for _bbox in _bboxes)
|
100 |
-
mask_preds = mask_pred.split(num_bboxes_per_img, 0)
|
101 |
-
mask_iou_preds = mask_iou_pred.split(num_bboxes_per_img, 0)
|
102 |
-
|
103 |
-
# apply mask post-processing to each image individually
|
104 |
-
segm_results = []
|
105 |
-
mask_scores = []
|
106 |
-
for i in range(num_imgs):
|
107 |
-
if det_bboxes[i].shape[0] == 0:
|
108 |
-
segm_results.append(
|
109 |
-
[[] for _ in range(self.mask_head.num_classes)])
|
110 |
-
mask_scores.append(
|
111 |
-
[[] for _ in range(self.mask_head.num_classes)])
|
112 |
-
else:
|
113 |
-
segm_result = self.mask_head.get_seg_masks(
|
114 |
-
mask_preds[i], _bboxes[i], det_labels[i],
|
115 |
-
self.test_cfg, ori_shapes[i], scale_factors[i],
|
116 |
-
rescale)
|
117 |
-
# get mask scores with mask iou head
|
118 |
-
mask_score = self.mask_iou_head.get_mask_scores(
|
119 |
-
mask_iou_preds[i], det_bboxes[i], det_labels[i])
|
120 |
-
segm_results.append(segm_result)
|
121 |
-
mask_scores.append(mask_score)
|
122 |
-
return list(zip(segm_results, mask_scores))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/modeling/anchor_generator.py
DELETED
@@ -1,382 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import collections
|
3 |
-
import math
|
4 |
-
from typing import List
|
5 |
-
import torch
|
6 |
-
from torch import nn
|
7 |
-
|
8 |
-
from detectron2.config import configurable
|
9 |
-
from detectron2.layers import ShapeSpec
|
10 |
-
from detectron2.structures import Boxes, RotatedBoxes
|
11 |
-
from detectron2.utils.registry import Registry
|
12 |
-
|
13 |
-
ANCHOR_GENERATOR_REGISTRY = Registry("ANCHOR_GENERATOR")
|
14 |
-
ANCHOR_GENERATOR_REGISTRY.__doc__ = """
|
15 |
-
Registry for modules that creates object detection anchors for feature maps.
|
16 |
-
|
17 |
-
The registered object will be called with `obj(cfg, input_shape)`.
|
18 |
-
"""
|
19 |
-
|
20 |
-
|
21 |
-
class BufferList(nn.Module):
|
22 |
-
"""
|
23 |
-
Similar to nn.ParameterList, but for buffers
|
24 |
-
"""
|
25 |
-
|
26 |
-
def __init__(self, buffers):
|
27 |
-
super().__init__()
|
28 |
-
for i, buffer in enumerate(buffers):
|
29 |
-
# Use non-persistent buffer so the values are not saved in checkpoint
|
30 |
-
self.register_buffer(str(i), buffer, persistent=False)
|
31 |
-
|
32 |
-
def __len__(self):
|
33 |
-
return len(self._buffers)
|
34 |
-
|
35 |
-
def __iter__(self):
|
36 |
-
return iter(self._buffers.values())
|
37 |
-
|
38 |
-
|
39 |
-
def _create_grid_offsets(size: List[int], stride: int, offset: float, device: torch.device):
|
40 |
-
grid_height, grid_width = size
|
41 |
-
shifts_x = torch.arange(
|
42 |
-
offset * stride, grid_width * stride, step=stride, dtype=torch.float32, device=device
|
43 |
-
)
|
44 |
-
shifts_y = torch.arange(
|
45 |
-
offset * stride, grid_height * stride, step=stride, dtype=torch.float32, device=device
|
46 |
-
)
|
47 |
-
|
48 |
-
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
|
49 |
-
shift_x = shift_x.reshape(-1)
|
50 |
-
shift_y = shift_y.reshape(-1)
|
51 |
-
return shift_x, shift_y
|
52 |
-
|
53 |
-
|
54 |
-
def _broadcast_params(params, num_features, name):
|
55 |
-
"""
|
56 |
-
If one size (or aspect ratio) is specified and there are multiple feature
|
57 |
-
maps, we "broadcast" anchors of that single size (or aspect ratio)
|
58 |
-
over all feature maps.
|
59 |
-
|
60 |
-
If params is list[float], or list[list[float]] with len(params) == 1, repeat
|
61 |
-
it num_features time.
|
62 |
-
|
63 |
-
Returns:
|
64 |
-
list[list[float]]: param for each feature
|
65 |
-
"""
|
66 |
-
assert isinstance(
|
67 |
-
params, collections.abc.Sequence
|
68 |
-
), f"{name} in anchor generator has to be a list! Got {params}."
|
69 |
-
assert len(params), f"{name} in anchor generator cannot be empty!"
|
70 |
-
if not isinstance(params[0], collections.abc.Sequence): # params is list[float]
|
71 |
-
return [params] * num_features
|
72 |
-
if len(params) == 1:
|
73 |
-
return list(params) * num_features
|
74 |
-
assert len(params) == num_features, (
|
75 |
-
f"Got {name} of length {len(params)} in anchor generator, "
|
76 |
-
f"but the number of input features is {num_features}!"
|
77 |
-
)
|
78 |
-
return params
|
79 |
-
|
80 |
-
|
81 |
-
@ANCHOR_GENERATOR_REGISTRY.register()
|
82 |
-
class DefaultAnchorGenerator(nn.Module):
|
83 |
-
"""
|
84 |
-
Compute anchors in the standard ways described in
|
85 |
-
"Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks".
|
86 |
-
"""
|
87 |
-
|
88 |
-
box_dim: torch.jit.Final[int] = 4
|
89 |
-
"""
|
90 |
-
the dimension of each anchor box.
|
91 |
-
"""
|
92 |
-
|
93 |
-
@configurable
|
94 |
-
def __init__(self, *, sizes, aspect_ratios, strides, offset=0.5):
|
95 |
-
"""
|
96 |
-
This interface is experimental.
|
97 |
-
|
98 |
-
Args:
|
99 |
-
sizes (list[list[float]] or list[float]):
|
100 |
-
If ``sizes`` is list[list[float]], ``sizes[i]`` is the list of anchor sizes
|
101 |
-
(i.e. sqrt of anchor area) to use for the i-th feature map.
|
102 |
-
If ``sizes`` is list[float], ``sizes`` is used for all feature maps.
|
103 |
-
Anchor sizes are given in absolute lengths in units of
|
104 |
-
the input image; they do not dynamically scale if the input image size changes.
|
105 |
-
aspect_ratios (list[list[float]] or list[float]): list of aspect ratios
|
106 |
-
(i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies.
|
107 |
-
strides (list[int]): stride of each input feature.
|
108 |
-
offset (float): Relative offset between the center of the first anchor and the top-left
|
109 |
-
corner of the image. Value has to be in [0, 1).
|
110 |
-
Recommend to use 0.5, which means half stride.
|
111 |
-
"""
|
112 |
-
super().__init__()
|
113 |
-
|
114 |
-
self.strides = strides
|
115 |
-
self.num_features = len(self.strides)
|
116 |
-
sizes = _broadcast_params(sizes, self.num_features, "sizes")
|
117 |
-
aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, "aspect_ratios")
|
118 |
-
self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios)
|
119 |
-
|
120 |
-
self.offset = offset
|
121 |
-
assert 0.0 <= self.offset < 1.0, self.offset
|
122 |
-
|
123 |
-
@classmethod
|
124 |
-
def from_config(cls, cfg, input_shape: List[ShapeSpec]):
|
125 |
-
return {
|
126 |
-
"sizes": cfg.MODEL.ANCHOR_GENERATOR.SIZES,
|
127 |
-
"aspect_ratios": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS,
|
128 |
-
"strides": [x.stride for x in input_shape],
|
129 |
-
"offset": cfg.MODEL.ANCHOR_GENERATOR.OFFSET,
|
130 |
-
}
|
131 |
-
|
132 |
-
def _calculate_anchors(self, sizes, aspect_ratios):
|
133 |
-
cell_anchors = [
|
134 |
-
self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios)
|
135 |
-
]
|
136 |
-
return BufferList(cell_anchors)
|
137 |
-
|
138 |
-
@property
|
139 |
-
@torch.jit.unused
|
140 |
-
def num_cell_anchors(self):
|
141 |
-
"""
|
142 |
-
Alias of `num_anchors`.
|
143 |
-
"""
|
144 |
-
return self.num_anchors
|
145 |
-
|
146 |
-
@property
|
147 |
-
@torch.jit.unused
|
148 |
-
def num_anchors(self):
|
149 |
-
"""
|
150 |
-
Returns:
|
151 |
-
list[int]: Each int is the number of anchors at every pixel
|
152 |
-
location, on that feature map.
|
153 |
-
For example, if at every pixel we use anchors of 3 aspect
|
154 |
-
ratios and 5 sizes, the number of anchors is 15.
|
155 |
-
(See also ANCHOR_GENERATOR.SIZES and ANCHOR_GENERATOR.ASPECT_RATIOS in config)
|
156 |
-
|
157 |
-
In standard RPN models, `num_anchors` on every feature map is the same.
|
158 |
-
"""
|
159 |
-
return [len(cell_anchors) for cell_anchors in self.cell_anchors]
|
160 |
-
|
161 |
-
def _grid_anchors(self, grid_sizes: List[List[int]]):
|
162 |
-
"""
|
163 |
-
Returns:
|
164 |
-
list[Tensor]: #featuremap tensors, each is (#locations x #cell_anchors) x 4
|
165 |
-
"""
|
166 |
-
anchors = []
|
167 |
-
# buffers() not supported by torchscript. use named_buffers() instead
|
168 |
-
buffers: List[torch.Tensor] = [x[1] for x in self.cell_anchors.named_buffers()]
|
169 |
-
for size, stride, base_anchors in zip(grid_sizes, self.strides, buffers):
|
170 |
-
shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device)
|
171 |
-
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
|
172 |
-
|
173 |
-
anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4))
|
174 |
-
|
175 |
-
return anchors
|
176 |
-
|
177 |
-
def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)):
|
178 |
-
"""
|
179 |
-
Generate a tensor storing canonical anchor boxes, which are all anchor
|
180 |
-
boxes of different sizes and aspect_ratios centered at (0, 0).
|
181 |
-
We can later build the set of anchors for a full feature map by
|
182 |
-
shifting and tiling these tensors (see `meth:_grid_anchors`).
|
183 |
-
|
184 |
-
Args:
|
185 |
-
sizes (tuple[float]):
|
186 |
-
aspect_ratios (tuple[float]]):
|
187 |
-
|
188 |
-
Returns:
|
189 |
-
Tensor of shape (len(sizes) * len(aspect_ratios), 4) storing anchor boxes
|
190 |
-
in XYXY format.
|
191 |
-
"""
|
192 |
-
|
193 |
-
# This is different from the anchor generator defined in the original Faster R-CNN
|
194 |
-
# code or Detectron. They yield the same AP, however the old version defines cell
|
195 |
-
# anchors in a less natural way with a shift relative to the feature grid and
|
196 |
-
# quantization that results in slightly different sizes for different aspect ratios.
|
197 |
-
# See also https://github.com/facebookresearch/Detectron/issues/227
|
198 |
-
|
199 |
-
anchors = []
|
200 |
-
for size in sizes:
|
201 |
-
area = size ** 2.0
|
202 |
-
for aspect_ratio in aspect_ratios:
|
203 |
-
# s * s = w * h
|
204 |
-
# a = h / w
|
205 |
-
# ... some algebra ...
|
206 |
-
# w = sqrt(s * s / a)
|
207 |
-
# h = a * w
|
208 |
-
w = math.sqrt(area / aspect_ratio)
|
209 |
-
h = aspect_ratio * w
|
210 |
-
x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0
|
211 |
-
anchors.append([x0, y0, x1, y1])
|
212 |
-
return torch.tensor(anchors)
|
213 |
-
|
214 |
-
def forward(self, features: List[torch.Tensor]):
|
215 |
-
"""
|
216 |
-
Args:
|
217 |
-
features (list[Tensor]): list of backbone feature maps on which to generate anchors.
|
218 |
-
|
219 |
-
Returns:
|
220 |
-
list[Boxes]: a list of Boxes containing all the anchors for each feature map
|
221 |
-
(i.e. the cell anchors repeated over all locations in the feature map).
|
222 |
-
The number of anchors of each feature map is Hi x Wi x num_cell_anchors,
|
223 |
-
where Hi, Wi are resolution of the feature map divided by anchor stride.
|
224 |
-
"""
|
225 |
-
grid_sizes = [feature_map.shape[-2:] for feature_map in features]
|
226 |
-
anchors_over_all_feature_maps = self._grid_anchors(grid_sizes)
|
227 |
-
return [Boxes(x) for x in anchors_over_all_feature_maps]
|
228 |
-
|
229 |
-
|
230 |
-
@ANCHOR_GENERATOR_REGISTRY.register()
|
231 |
-
class RotatedAnchorGenerator(nn.Module):
|
232 |
-
"""
|
233 |
-
Compute rotated anchors used by Rotated RPN (RRPN), described in
|
234 |
-
"Arbitrary-Oriented Scene Text Detection via Rotation Proposals".
|
235 |
-
"""
|
236 |
-
|
237 |
-
box_dim: int = 5
|
238 |
-
"""
|
239 |
-
the dimension of each anchor box.
|
240 |
-
"""
|
241 |
-
|
242 |
-
@configurable
|
243 |
-
def __init__(self, *, sizes, aspect_ratios, strides, angles, offset=0.5):
|
244 |
-
"""
|
245 |
-
This interface is experimental.
|
246 |
-
|
247 |
-
Args:
|
248 |
-
sizes (list[list[float]] or list[float]):
|
249 |
-
If sizes is list[list[float]], sizes[i] is the list of anchor sizes
|
250 |
-
(i.e. sqrt of anchor area) to use for the i-th feature map.
|
251 |
-
If sizes is list[float], the sizes are used for all feature maps.
|
252 |
-
Anchor sizes are given in absolute lengths in units of
|
253 |
-
the input image; they do not dynamically scale if the input image size changes.
|
254 |
-
aspect_ratios (list[list[float]] or list[float]): list of aspect ratios
|
255 |
-
(i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies.
|
256 |
-
strides (list[int]): stride of each input feature.
|
257 |
-
angles (list[list[float]] or list[float]): list of angles (in degrees CCW)
|
258 |
-
to use for anchors. Same "broadcast" rule for `sizes` applies.
|
259 |
-
offset (float): Relative offset between the center of the first anchor and the top-left
|
260 |
-
corner of the image. Value has to be in [0, 1).
|
261 |
-
Recommend to use 0.5, which means half stride.
|
262 |
-
"""
|
263 |
-
super().__init__()
|
264 |
-
|
265 |
-
self.strides = strides
|
266 |
-
self.num_features = len(self.strides)
|
267 |
-
sizes = _broadcast_params(sizes, self.num_features, "sizes")
|
268 |
-
aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, "aspect_ratios")
|
269 |
-
angles = _broadcast_params(angles, self.num_features, "angles")
|
270 |
-
self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios, angles)
|
271 |
-
|
272 |
-
self.offset = offset
|
273 |
-
assert 0.0 <= self.offset < 1.0, self.offset
|
274 |
-
|
275 |
-
@classmethod
|
276 |
-
def from_config(cls, cfg, input_shape: List[ShapeSpec]):
|
277 |
-
return {
|
278 |
-
"sizes": cfg.MODEL.ANCHOR_GENERATOR.SIZES,
|
279 |
-
"aspect_ratios": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS,
|
280 |
-
"strides": [x.stride for x in input_shape],
|
281 |
-
"offset": cfg.MODEL.ANCHOR_GENERATOR.OFFSET,
|
282 |
-
"angles": cfg.MODEL.ANCHOR_GENERATOR.ANGLES,
|
283 |
-
}
|
284 |
-
|
285 |
-
def _calculate_anchors(self, sizes, aspect_ratios, angles):
|
286 |
-
cell_anchors = [
|
287 |
-
self.generate_cell_anchors(size, aspect_ratio, angle).float()
|
288 |
-
for size, aspect_ratio, angle in zip(sizes, aspect_ratios, angles)
|
289 |
-
]
|
290 |
-
return BufferList(cell_anchors)
|
291 |
-
|
292 |
-
@property
|
293 |
-
def num_cell_anchors(self):
|
294 |
-
"""
|
295 |
-
Alias of `num_anchors`.
|
296 |
-
"""
|
297 |
-
return self.num_anchors
|
298 |
-
|
299 |
-
@property
|
300 |
-
def num_anchors(self):
|
301 |
-
"""
|
302 |
-
Returns:
|
303 |
-
list[int]: Each int is the number of anchors at every pixel
|
304 |
-
location, on that feature map.
|
305 |
-
For example, if at every pixel we use anchors of 3 aspect
|
306 |
-
ratios, 2 sizes and 5 angles, the number of anchors is 30.
|
307 |
-
(See also ANCHOR_GENERATOR.SIZES, ANCHOR_GENERATOR.ASPECT_RATIOS
|
308 |
-
and ANCHOR_GENERATOR.ANGLES in config)
|
309 |
-
|
310 |
-
In standard RRPN models, `num_anchors` on every feature map is the same.
|
311 |
-
"""
|
312 |
-
return [len(cell_anchors) for cell_anchors in self.cell_anchors]
|
313 |
-
|
314 |
-
def _grid_anchors(self, grid_sizes):
|
315 |
-
anchors = []
|
316 |
-
for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors):
|
317 |
-
shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device)
|
318 |
-
zeros = torch.zeros_like(shift_x)
|
319 |
-
shifts = torch.stack((shift_x, shift_y, zeros, zeros, zeros), dim=1)
|
320 |
-
|
321 |
-
anchors.append((shifts.view(-1, 1, 5) + base_anchors.view(1, -1, 5)).reshape(-1, 5))
|
322 |
-
|
323 |
-
return anchors
|
324 |
-
|
325 |
-
def generate_cell_anchors(
|
326 |
-
self,
|
327 |
-
sizes=(32, 64, 128, 256, 512),
|
328 |
-
aspect_ratios=(0.5, 1, 2),
|
329 |
-
angles=(-90, -60, -30, 0, 30, 60, 90),
|
330 |
-
):
|
331 |
-
"""
|
332 |
-
Generate a tensor storing canonical anchor boxes, which are all anchor
|
333 |
-
boxes of different sizes, aspect_ratios, angles centered at (0, 0).
|
334 |
-
We can later build the set of anchors for a full feature map by
|
335 |
-
shifting and tiling these tensors (see `meth:_grid_anchors`).
|
336 |
-
|
337 |
-
Args:
|
338 |
-
sizes (tuple[float]):
|
339 |
-
aspect_ratios (tuple[float]]):
|
340 |
-
angles (tuple[float]]):
|
341 |
-
|
342 |
-
Returns:
|
343 |
-
Tensor of shape (len(sizes) * len(aspect_ratios) * len(angles), 5)
|
344 |
-
storing anchor boxes in (x_ctr, y_ctr, w, h, angle) format.
|
345 |
-
"""
|
346 |
-
anchors = []
|
347 |
-
for size in sizes:
|
348 |
-
area = size ** 2.0
|
349 |
-
for aspect_ratio in aspect_ratios:
|
350 |
-
# s * s = w * h
|
351 |
-
# a = h / w
|
352 |
-
# ... some algebra ...
|
353 |
-
# w = sqrt(s * s / a)
|
354 |
-
# h = a * w
|
355 |
-
w = math.sqrt(area / aspect_ratio)
|
356 |
-
h = aspect_ratio * w
|
357 |
-
anchors.extend([0, 0, w, h, a] for a in angles)
|
358 |
-
|
359 |
-
return torch.tensor(anchors)
|
360 |
-
|
361 |
-
def forward(self, features):
|
362 |
-
"""
|
363 |
-
Args:
|
364 |
-
features (list[Tensor]): list of backbone feature maps on which to generate anchors.
|
365 |
-
|
366 |
-
Returns:
|
367 |
-
list[RotatedBoxes]: a list of Boxes containing all the anchors for each feature map
|
368 |
-
(i.e. the cell anchors repeated over all locations in the feature map).
|
369 |
-
The number of anchors of each feature map is Hi x Wi x num_cell_anchors,
|
370 |
-
where Hi, Wi are resolution of the feature map divided by anchor stride.
|
371 |
-
"""
|
372 |
-
grid_sizes = [feature_map.shape[-2:] for feature_map in features]
|
373 |
-
anchors_over_all_feature_maps = self._grid_anchors(grid_sizes)
|
374 |
-
return [RotatedBoxes(x) for x in anchors_over_all_feature_maps]
|
375 |
-
|
376 |
-
|
377 |
-
def build_anchor_generator(cfg, input_shape):
|
378 |
-
"""
|
379 |
-
Built an anchor generator from `cfg.MODEL.ANCHOR_GENERATOR.NAME`.
|
380 |
-
"""
|
381 |
-
anchor_generator = cfg.MODEL.ANCHOR_GENERATOR.NAME
|
382 |
-
return ANCHOR_GENERATOR_REGISTRY.get(anchor_generator)(cfg, input_shape)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CarlDennis/HYTTS/text/symbols.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
'''
|
2 |
-
Defines the set of symbols used in text input to the model.
|
3 |
-
'''
|
4 |
-
|
5 |
-
|
6 |
-
# cjehd_cleaners:
|
7 |
-
_pad = '_'
|
8 |
-
_punctuation = ',.!?…~'
|
9 |
-
_letters = ' #*=AEINOQU^`abdefghijklmnopqrstuvwxyzãæçéðøĭŋœɐɑɔəɛɡɥɦɪɫɯɱɸɹɽɾʀʁʃʊʏʑʒʔʦʧʰˀˈˌːˑ̩̯̃͜͡βθχ⁼↑→↓šđǩḱ-ă,ś'
|
10 |
-
|
11 |
-
|
12 |
-
# German_cleaners:
|
13 |
-
_pad = '_'
|
14 |
-
_punctuation =',.!?…~;:'
|
15 |
-
_letters ="'*^_abdefghijklmnopstuvxyzçõøĭŋɐɘəɚɱɹɽɾʀʁʃʋʏʔʥʰʷˌːˑχ↓ⱼ"
|
16 |
-
|
17 |
-
|
18 |
-
# Export all symbols:
|
19 |
-
symbols = [_pad] + list(_punctuation) + list(_letters)
|
20 |
-
|
21 |
-
# Special symbol ids
|
22 |
-
SPACE_ID = symbols.index(" ")
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/plugins/other/sendLog.js
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
import plugin from "../../lib/plugins/plugin.js"
|
2 |
-
import common from "../../lib/common/common.js"
|
3 |
-
import fs from "node:fs"
|
4 |
-
import lodash from "lodash"
|
5 |
-
import moment from "moment"
|
6 |
-
|
7 |
-
export class sendLog extends plugin {
|
8 |
-
constructor() {
|
9 |
-
super({
|
10 |
-
name: "发送日志",
|
11 |
-
dsc: "发送最近100条运行日志",
|
12 |
-
event: "message",
|
13 |
-
rule: [
|
14 |
-
{
|
15 |
-
reg: "^#(运行|错误)*日志[0-9]*(.*)",
|
16 |
-
fnc: "sendLog",
|
17 |
-
permission: "master"
|
18 |
-
}
|
19 |
-
]
|
20 |
-
})
|
21 |
-
|
22 |
-
this.lineNum = 100
|
23 |
-
this.maxNum = 1000
|
24 |
-
|
25 |
-
this.logFile = `logs/command.${moment().format("YYYY-MM-DD")}.log`
|
26 |
-
this.errFile = "logs/error.log"
|
27 |
-
}
|
28 |
-
|
29 |
-
async sendLog() {
|
30 |
-
let lineNum = this.e.msg.match(/\d+/g)
|
31 |
-
if (lineNum) {
|
32 |
-
this.lineNum = lineNum[0]
|
33 |
-
} else {
|
34 |
-
this.keyWord = this.e.msg.replace(/#|运行|错误|日志|\d/g, "")
|
35 |
-
}
|
36 |
-
|
37 |
-
let logFile = this.logFile
|
38 |
-
let type = "运行"
|
39 |
-
if (this.e.msg.includes("错误")) {
|
40 |
-
logFile = this.errFile
|
41 |
-
type = "错误"
|
42 |
-
}
|
43 |
-
|
44 |
-
if (this.keyWord) type = this.keyWord
|
45 |
-
|
46 |
-
const log = this.getLog(logFile)
|
47 |
-
|
48 |
-
if (lodash.isEmpty(log))
|
49 |
-
return this.reply(`暂无相关日志:${type}`)
|
50 |
-
|
51 |
-
return this.reply(await common.makeForwardMsg(this.e, [log.join("\n")], `最近${log.length}条${type}日志`))
|
52 |
-
}
|
53 |
-
|
54 |
-
getLog(logFile) {
|
55 |
-
let log = fs.readFileSync(logFile, { encoding: "utf-8" })
|
56 |
-
log = log.split("\n")
|
57 |
-
|
58 |
-
if (this.keyWord) {
|
59 |
-
for (const i in log)
|
60 |
-
if (!log[i].includes(this.keyWord))
|
61 |
-
delete log[i]
|
62 |
-
} else {
|
63 |
-
log = lodash.slice(log, (Number(this.lineNum) + 1) * -1)
|
64 |
-
}
|
65 |
-
log = log.reverse()
|
66 |
-
|
67 |
-
const tmp = []
|
68 |
-
for (let i of log) {
|
69 |
-
if (!i) continue
|
70 |
-
if (this.keyWord && tmp.length >= this.maxNum) return
|
71 |
-
/* eslint-disable no-control-regex */
|
72 |
-
i = i.replace(/\x1b[[0-9;]*m/g, "")
|
73 |
-
i = i.replace(/\r|\n/, "")
|
74 |
-
tmp.push(i)
|
75 |
-
}
|
76 |
-
return tmp
|
77 |
-
}
|
78 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|