parquet-converter commited on
Commit
de8234d
·
1 Parent(s): 27a1767

Update parquet files (step 73 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cracked Plugins on M1 Macs A Bad Idea for Your System and Your Work.md +0 -31
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dcs A-10c Warthog Keygen Download.md +0 -130
  3. spaces/1gistliPinn/ChatGPT4/Examples/Arnold 2019 64 Bit Adlmint.dll Crack Download [REPACK].md +0 -9
  4. spaces/1gistliPinn/ChatGPT4/Examples/Car Mechanic Simulator 2018 [1.6.4 11 DLC] RePack [full Fix].md +0 -7
  5. spaces/1gistliPinn/ChatGPT4/Examples/Dangal Tamil Full Movie Download 720p [PATCHED].md +0 -28
  6. spaces/1line/AutoGPT/autogpt/speech/brian.py +0 -40
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download CarX Drift Racing 2 Mod Apk 1.22.0 and Become a Drift King.md +0 -110
  8. spaces/1phancelerku/anime-remove-background/Bubble Shooter A Free and Fun Game for Your Laptop.md +0 -148
  9. spaces/1phancelerku/anime-remove-background/FIFA 22 Offline Apk Download Zip File with Obb and Data Included.md +0 -103
  10. spaces/22h/vintedois-diffusion-v0-1/README.md +0 -12
  11. spaces/232labs/VToonify/vtoonify/style_transfer.py +0 -232
  12. spaces/232labs/VToonify/vtoonify/train_vtoonify_d.py +0 -515
  13. spaces/AIGC-Audio/AudioGPT/sound_extraction/model/modules.py +0 -483
  14. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/attention.py +0 -261
  15. spaces/AJRFan/dreambooth-training/app.py +0 -340
  16. spaces/AONYLMR/anime-remove-background/README.md +0 -14
  17. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/work_dirs/mobilenet-v2_4xb32_2000e_3c_noF/mobilenet-v2_1xb32_300e_3c_noF.py +0 -140
  18. spaces/AchyuthGamer/Free-Accounts-Generator/fortnite/js/d140ouchebag.js +0 -65
  19. spaces/AchyuthGamer/OpenGPT/client/css/select.css +0 -35
  20. spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/__init__.py +0 -14
  21. spaces/AgentVerse/agentVerse/dataloader/mgsm.py +0 -23
  22. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/knob/input/OnPanPad.js +0 -90
  23. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/utils/CreateAnySizer.js +0 -30
  24. spaces/Aki004/herta-so-vits/modules/crepe.py +0 -331
  25. spaces/Akshat231/super_space/README.md +0 -12
  26. spaces/AlterM/Zaglyt2-transformer-test/app.py +0 -14
  27. spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py +0 -2
  28. spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py +0 -11
  29. spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_faster_r50_fpn_1x_coco.py +0 -65
  30. spaces/Andy1621/uniformer_image_detection/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py +0 -97
  31. spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r18-d8_769x769_80k_cityscapes.py +0 -9
  32. spaces/AngoHF/ANGO-Leaderboard/components/__init__.py +0 -0
  33. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/superboogav2/benchmark.py +0 -72
  34. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/fileio/file_client.py +0 -1148
  35. spaces/Anthos23/hummus/app.py +0 -38
  36. spaces/AsakuraMizu/moe-tts/text/cantonese.py +0 -59
  37. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/scanner.py +0 -104
  38. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/jupyter.py +0 -101
  39. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_adapters.py +0 -170
  40. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/unicode.py +0 -352
  41. spaces/Atualli/yoloxTeste/checkYoloxGPU.sh +0 -16
  42. spaces/Awiny/Image2Paragraph/models/grit_src/image_dense_captions.py +0 -69
  43. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/cocoeval/cocoeval.cpp +0 -507
  44. spaces/Benson/text-generation/Examples/Blackjack 21 Blackjackist Descargar.md +0 -93
  45. spaces/Benson/text-generation/Examples/Clash Royale Bluestacks Apk.md +0 -35
  46. spaces/BirdL/DONOTUSEDemo/app.py +0 -34
  47. spaces/CVPR/LIVE/thrust/thrust/system/cpp/vector.h +0 -69
  48. spaces/CVPR/regionclip-demo/detectron2/data/transforms/torchvision_transforms/transforms.py +0 -1955
  49. spaces/ChandraMohanNayal/AutoGPT/tests/test_config.py +0 -84
  50. spaces/CofAI/chat.b4/client/js/sidebar-toggler.js +0 -34
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cracked Plugins on M1 Macs A Bad Idea for Your System and Your Work.md DELETED
@@ -1,31 +0,0 @@
1
- <br />
2
- <h1>Why You Should Avoid Cracked Plugins on M1 Macs</h1>
3
- <p>If you are a music producer or a hobbyist who likes to use plugins for your audio projects, you might be tempted to download cracked plugins from the internet. Cracked plugins are plugins that have been illegally modified or hacked to bypass the license or registration process. They are often available for free or at a very low price on various websites or forums.</p>
4
- <h2>cracked plugins on m1</h2><br /><p><b><b>Download Zip</b> &#9734;&#9734;&#9734; <a href="https://byltly.com/2uKz1C">https://byltly.com/2uKz1C</a></b></p><br /><br />
5
- <p>However, using cracked plugins on your M1 Mac can have serious consequences for your system and your work. Here are some of the reasons why you should avoid cracked plugins on M1 Macs:</p>
6
- <ul>
7
- <li>Cracked plugins can contain malware or viruses that can harm your computer. Some of the malware can steal your personal information, damage your files, or hijack your system. Some of the viruses can corrupt your data, slow down your performance, or cause crashes or freezes.</li>
8
- <li>Cracked plugins can be incompatible with your M1 Mac. Since the M1 Mac uses a different architecture than the Intel-based Macs, some of the cracked plugins might not work properly or at all on your system. They might cause errors, glitches, or conflicts with other plugins or software. They might also prevent you from updating your system or software to the latest versions.</li>
9
- <li>Cracked plugins can be unethical and illegal. By using cracked plugins, you are violating the terms and conditions of the plugin developers and distributors. You are also depriving them of their rightful income and recognition for their hard work and creativity. You might also face legal actions or penalties if you are caught using cracked plugins.</li>
10
- </ul>
11
- <p>Therefore, it is better to avoid cracked plugins on M1 Macs and use legitimate plugins instead. Legitimate plugins are plugins that you have purchased or obtained legally from the official sources. They are safe, compatible, and reliable for your M1 Mac. They also come with technical support, updates, and warranties from the developers and distributors.</p>
12
- <p>Legitimate plugins might cost more than cracked plugins, but they are worth the investment in the long run. They can enhance your audio quality, productivity, and creativity without compromising your system or your work. They can also help you support the plugin industry and encourage more innovation and development.</p>
13
- <p>So, next time you are looking for a plugin for your M1 Mac, think twice before downloading a cracked plugin from the internet. Choose a legitimate plugin instead and enjoy the benefits of using it on your M1 Mac.</p>
14
-
15
- <p>How to Find Legitimate Plugins for M1 Macs</p>
16
- <p></p>
17
- <p>Now that you know why you should avoid cracked plugins on M1 Macs, you might be wondering how to find legitimate plugins for your system. Here are some tips that can help you find and choose the best plugins for your M1 Mac:</p>
18
- <ul>
19
- <li>Do your research. Before you buy or download a plugin, make sure you check its features, specifications, reviews, and ratings. You can also watch or read tutorials, demos, or testimonials from other users or experts. This can help you determine if the plugin is suitable for your needs and preferences.</li>
20
- <li>Compare prices and offers. There are many sources where you can buy or download legitimate plugins for your M1 Mac. Some of them are the official websites of the plugin developers or distributors, online marketplaces, online music stores, or online subscription services. You can compare the prices and offers of different sources and find the best deal for your budget.</li>
21
- <li>Look for discounts and promotions. Sometimes, you can find legitimate plugins for your M1 Mac at a lower price or even for free. This can happen when the plugin developers or distributors offer discounts, coupons, bundles, giveaways, or free trials. You can look for these opportunities on their websites, social media pages, newsletters, or blogs.</li>
22
- <li>Ask for recommendations. Another way to find legitimate plugins for your M1 Mac is to ask for recommendations from other users or experts. You can join online forums, groups, communities, or networks where you can interact with other music producers or hobbyists who use M1 Macs. You can also follow online influencers, bloggers, podcasters, or reviewers who specialize in plugins or audio production.</li>
23
- </ul>
24
- <p>By following these tips, you can find and choose the best legitimate plugins for your M1 Mac and enjoy using them on your system.</p>
25
-
26
- <p>Conclusion</p>
27
- <p>Cracked plugins on M1 Macs are not worth the risk or the hassle. They can harm your computer, your work, and your reputation. They can also prevent you from getting the most out of your M1 Mac and its capabilities.</p>
28
- <p>Legitimate plugins on M1 Macs are the way to go. They are safe, compatible, and reliable for your system. They can also enhance your audio quality, productivity, and creativity without compromising anything.</p>
29
- <p>So, avoid cracked plugins on M1 Macs and use legitimate plugins instead. You will be glad you did.</p> ddb901b051<br />
30
- <br />
31
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dcs A-10c Warthog Keygen Download.md DELETED
@@ -1,130 +0,0 @@
1
- <br />
2
- <h1>Introduction</h1>
3
- <p>DCS: A-10C Warthog is a PC simulation of the U.S. premier Close Air Support attack aircraft. This is the second aircraft in the DCS series, following DCS: Black Shark, and raises the bar even higher in the DCS series. Warthog brings the most realistic PC simulation of a modern fixed wing combat aircraft in regards to flight dynamics, avionics, sensors, and weapon systems. You also have the option to play Warthog in "Game" mode for a casual game experience.</p>
4
- <h2>dcs a-10c warthog keygen download</h2><br /><p><b><b>Download Zip</b> &mdash; <a href="https://byltly.com/2uKvLV">https://byltly.com/2uKvLV</a></b></p><br /><br />
5
- <p>The A-10C is an enhanced version of the famous A-10A that served as a major close air support aircraft for the U.S. Air Force, Air National Guard, and Reserves for almost 30 years. A-10C has been upgraded to meet 21st century standards, using systems such as Multi-Function Color Displays (MFCD), GPS-guided weapons, and data-link support. Retaining all the features of older A-10A, the A-10C has turned into a true precision strike fighter with the most modern navigation systems, precision attack weapons (Maverick, JDAM, WCMD, and laser-guided bombs), and an integrated countermeasures system.</p>
6
- <p>The A-10C has participated in operations over Iraq and Afghanistan and proved to be a precise and effective weapon in the "War on Terrorism". Its advanced equipment has greatly reduced the number of "friendly fire" incidents - thanks largely to the Situational Awareness Datalink (SADL) and the ability to better identify targets with using the Litening II AT targeting pod. The A-10C of course retains its ability to do what it was originally designed to do: kill tanks in a conventional force-on-force battlefield.</p>
7
- <p>As with previous versions, the A-10C - very easy to fly and is a stable and survivable weapons platform. For those familiar with DCS: Black Shark, we feel that the A-10C will be much easier to fly.</p>
8
- <p>The DCS A-10C cockpit is a 100% six-degrees of freedom (6 DOF) cockpit that allows complete freedom of movement around the cockpit. Each panel is reproduced in exacting detail to match operational A-10Cs (Suite 3.1). This includes all panels, switches, dials, buttons being animated, rendered in the 3D, and with high-resolution textures. Both day, night, and Night Vision Goggle (NVG) lighting is available. When the mouse is hovered over a cockpit control, a tool tip is displayed to indicate the controls function.</p>
9
- <p>Fly missions in the Caucasus region of the Black Sea against and with a wide array of air, land and sea forces with new and improved intelligence. Create your own missions and campaigns with the included Mission and Campaign Editors, and fly with and against friends online using the included online game browser.</p>
10
- <h1>Downloading and installing the game</h1>
11
- <p>There are several ways to get DCS: A-10C Warthog on your PC. You can buy it from various online stores such as Steam, Amazon, or directly from Eagle Dynamics[^5^ <h1>Learning the basics</h1>
12
- <p>Before you can unleash the full potential of the A-10C Warthog, you need to learn how to operate its complex systems and procedures. Fortunately, the game provides you with several ways to do so, ranging from interactive tutorials to detailed manuals and guides.</p>
13
- <p>The most recommended way to start learning the basics is to play the interactive training missions that are included with the game. These missions will guide you step by step through various aspects of flying and fighting with the A-10C, such as navigation, communication, sensors, weapons, and countermeasures. You will be able to follow the instructions of a virtual instructor, who will demonstrate and explain each action and control. You will also be able to pause and resume the training at any time, as well as replay any part you want.</p>
14
- <p>To access the interactive training missions, go to the main menu and select TRAINING. You will see a list of 25 training missions, covering topics such as:</p>
15
- <p></p>
16
- <ul>
17
- <li>Cockpit familiarization</li>
18
- <li>Engine start-up and shutdown</li>
19
- <li>Takeoff and landing</li>
20
- <li>Flight controls and autopilot</li>
21
- <li>Navigation systems</li>
22
- <li>Radio communication</li>
23
- <li>Data-link and situational awareness</li>
24
- <li>Targeting pod operation</li>
25
- <li>Air-to-air and air-to-ground weapons</li>
26
- <li>Countermeasures and threat avoidance</li>
27
- <li>Air refueling</li>
28
- <li>Emergency procedures</li>
29
- </ul>
30
- <p>Select the mission you want to play and click BRIEFING. You will see a summary of the mission objectives, as well as a map of the area. You can also access the kneeboard, which contains useful information such as checklists, frequencies, and coordinates. Click FLY when you are ready to start the mission.</p>
31
- <p>Once in the cockpit, you will hear the voice of the instructor, who will introduce you to the topic of the mission and tell you what to do. You can also see the instructions on the top left corner of the screen, as well as some visual cues that highlight the relevant controls or indicators. You can use your mouse to interact with the cockpit controls, or use your keyboard or joystick if you have them configured. You can also use some keyboard commands to control the training session, such as:</p>
32
- <table>
33
- <tr><th>Key</th><th>Function</th></tr>
34
- <tr><td>P</td><td>Pause or resume the training</td></tr>
35
- <tr><td>LCTRL+P</td><td>Replay the last instruction</td></tr>
36
- <tr><td>LALT+P</td><td>Skip to the next instruction</td></tr>
37
- <tr><td>LWIN+P</td><td>Restart the current instruction</td></tr>
38
- <tr><td>LCTRL+LALT+P</td><td>End the training mission</td></tr>
39
- <tr><td>LCTRL+LALT+R</td><td>Restart the training mission</td></tr>
40
- <tr><td>LCTRL+LALT+B</td><td>Return to briefing screen</td></tr>
41
- <tr><td>LCTRL+LALT+E</td><td>Eject from the aircraft (not recommended)</td></tr>
42
- </table>
43
- <p>The interactive training missions are a great way to learn by doing, but they are not enough to cover everything you need to know about the A-10C. For more in-depth information, you can refer to the manuals and guides that are provided with the game. These documents are available in PDF format and can be accessed from the game folder or from the main menu by selecting MANUALS.</p>
44
- <p>The most important document is the Flight Manual, which is a 669-page book that covers everything from the history and specifications of the A-10C to its systems, weapons, procedures, and tactics. This manual is based on real-world documentation and is very detailed and accurate. However, it is also very technical and dense, so it may not be very easy to read or understand for beginners. Therefore, it is recommended that you use it as a reference rather than a tutorial.</p>
45
- <p>A more user-friendly document is Chuck's Guide for DCS: A-10C Warthog, which is a 176-page guide that summarizes and explains the most essential aspects of flying and fighting with the A-10C in a clear and concise way. This guide is written by an experienced flight simmer and includes many screenshots, diagrams, tips, and tricks. It is a great resource for beginners and intermediate pilots who want to learn more about the A-10C without getting overwhelmed by too much information.</p>
46
- <p>Another useful document is The Enemy Within 3.0 Campaign Guide, which is a 64-page guide that accompanies a story based campaign for the A-10C that features 21 missions and a dynamic storyline. This guide provides you with the background, objectives, and tips for each mission, as well as some general advice on how to plan and execute your flights. This guide is a good way to practice your skills and enjoy a realistic and immersive scenario with the A-10C.</p>
47
- <h1>Playing the game</h1>
48
- <p>Once you have learned the basics of the A-10C, you are ready to play the game and have some fun. The game offers you several options to choose from, depending on your preferences and goals. You can play single-player or multiplayer modes, and you can create your own missions and campaigns or download them from other users.</p>
49
- <p>The simplest way to play the game is to select INSTANT ACTION from the main menu. This will allow you to jump into the cockpit of the A-10C and fly a short mission with a predefined objective and scenario. You can choose from different difficulty levels, weather conditions, and locations. Instant action missions are a good way to test your skills and have some quick action without too much preparation.</p>
50
- <p>If you want more variety and challenge, you can select MISSIONS from the main menu. This will allow you to choose from a list of single-player missions that are included with the game or downloaded from other sources. These missions vary in length, complexity, and difficulty, and cover different aspects of flying and fighting with the A-10C. You can also see a briefing screen that gives you some information about the mission objectives, situation, and loadout. You can also modify some parameters such as time of day, weather, and enemy skill level. Missions are a good way to experience different scenarios and situations with the A-10C.</p>
51
- <p>If you want more continuity and immersion, you can select CAMPAIGNS from the main menu. This will allow you to choose from a list of single-player campaigns that are included with the game or downloaded from other sources. These campaigns consist of a series of missions that are connected by a storyline and have persistent consequences. You will have to follow the orders of your commander, plan your flights, manage your resources, and deal with the changing situation on the ground. Campaigns are a good way to feel like a part of a larger conflict and see how your actions affect the outcome.</p>
52
- <p>If you want more interaction and competition, you can select MULTIPLAYER from the main menu. This will allow you to join or host online sessions with other players around the world. You can choose from different modes such as cooperative, team versus team, or free for all. You can also see a list of available servers that show their name, ping, players, mission, rules, and password. You can also use the chat function to communicate with other players before or during the game. Multiplayer is a good way to cooperate or compete with other pilots and have some fun and social interaction.</p>
53
- <h1>Tips and tricks</h1>
54
- <p>Now that you know how to play the game, here are some tips and tricks that will help you improve your performance and enjoyment of the game:</p>
55
- <ul>
56
- <li>Use TrackIR or VR devices if you have them. They will greatly enhance your situational awareness and immersion by allowing you to look around the cockpit and outside the aircraft with natural head movements.</li>
57
- <li>Use HOTAS (Hands On Throttle And Stick) controllers if you have them. They will make your flying more precise and comfortable by allowing you to control most of the functions without taking your hands off the throttle and stick.</li>
58
- <li>Use voice control software if you have it. It will make your communication easier and faster by allowing you to use voice commands instead of keyboard or mouse clicks.</li>
59
- <li>Use keyboard shortcuts if you don't have any of the above devices. They will make your life easier by allowing you to access some of the most common functions without moving your mouse around the cockpit.</li>
60
- <li>Use labels if you have trouble spotting or identifying targets. They will make your targets more visible by adding colored symbols or text over them.</li>
61
- <li>Use pause if you need to take a break or review something. It will freeze the game and allow you to catch your breath or check something in the manual or kneeboard.</li>
62
- <li>Use external views if you want to admire your aircraft or take screenshots. They will give you different perspectives of your aircraft and its surroundings.</li>
63
- <li>Use replays if you want to analyze your performance or learn from your mistakes. They will allow you to watch your flight again from different angles and speeds.</li>
64
- <li>Use mods if you want to customize or enhance your game experience. They will allow you to add new features, functions, graphics, sounds, missions, campaigns, or aircraft to your game.</li>
65
- <li>Use forums if you want to ask questions, share opinions, get feedback, or game allows you to use various sensors and systems such as the HUD, MFCD, CDU, RWR, TGP, SADL, and UFC. The game also allows you to use various weapons such as the GAU-8/A Avenger cannon, AGM-65 Maverick missiles, GBU-12/38/54 laser-guided bombs, CBU-87/97/103/105 cluster bombs, and AIM-9 Sidewinder missiles.</li>
66
- <li>The game features a dynamic and realistic combat environment that simulates the geography, weather, and forces of the Caucasus region of the Black Sea. The game also features a sophisticated artificial intelligence that controls the behavior and actions of friendly and enemy units. The game also features a realistic radio communication system that allows you to communicate with your wingmen, AWACS, JTAC, and other units.</li>
67
- <li>The game features a variety of single-player and multiplayer modes that offer different challenges and experiences. The game includes 25 interactive training missions, 21 single-player missions, 4 single-player campaigns, and several multiplayer modes. The game also includes a powerful mission and campaign editor that allows you to create your own scenarios and share them with others.</li>
68
- <li>The game has received positive reviews from many sources, such as PC Gamer, IGN, GameSpot, and Metacritic. The game has been praised for its realism, depth, quality, and longevity. Some of the quotes from the reviews are:</li>
69
- <ul>
70
- <li>"DCS: A-10C Warthog isn't for everyone—this is a simulation that demands dedication to reveal its true worth, and only those prepared to put in the time and effort to mine its treasure trove of avionics challenges will see that reward." - PC Gamer</li>
71
- <li>"DCS: A-10C Warthog is a tremendously - and at times obscenely - demanding game. You will experience cranial overload, and you will, unless you're a devout ED disciple, curse the developer for being so damned cryptic. But you will also feel the exhilaration of accomplishment." - IGN</li>
72
- <li>"DCS: A-10C Warthog is an incredibly realistic simulation of the U.S. Air Force's premier close air support aircraft. It's not for everyone--the complexity of its avionics and high fidelity flight model require extensive study--but those who make the effort are rewarded with an unparalleled gaming experience." - GameSpot</li>
73
- <li>"DCS: A-10C Warthog is one of the most complex simulations ever created for PC. It's not easy to learn but once you do it's very rewarding. If you are looking for a real challenge and have a passion for flying this is the game for you." - Metacritic user review</li>
74
- </ul>
75
- </ul>
76
- <h1>Conclusion</h1>
77
- <p>DCS: A-10C Warthog is a game that offers a realistic and immersive simulation of the U.S. premier Close Air Support attack aircraft. It is a game that requires a lot of dedication, knowledge, and skill to master, but it is also a game that provides a rewarding and satisfying experience that will make you feel like a real pilot.</p>
78
- <p>If you are interested in flying and fighting with the A-10C Warthog, you can download the game from various sources and install it on your PC. You can also learn the basics by using the interactive tutorials, manuals, and guides that are provided with the game. You can also play the game by choosing from different single-player or multiplayer modes, or by creating your own missions and campaigns. You can also improve your performance and enjoyment by using some tips and tricks that will help you along the way.</p>
79
- <p>DCS: A-10C Warthog is a game that has been praised by many critics and players for its realism, depth, and quality. It is a game that features a highly detailed and accurate 3D model of the A-10C Warthog, a realistic flight model, a comprehensive avionics and weapon system, a dynamic and realistic combat environment, a variety of modes As promised, I will create a realistic illustration of the A-10C Warthog aircraft for you to enjoy. The A-10C Warthog is a single-seat, twin-engine, straight-wing jet aircraft designed for close air support of ground forces. It has a distinctive shape and features, such as the large nose-mounted GAU-8/A Avenger 30 mm rotary cannon, the bubble canopy, the twin vertical stabilizers, and the 11 hardpoints for carrying various weapons and pods. The A-10C Warthog is painted in a gray camouflage scheme with black and white markings and insignia. Here is the image I created for you: <img src="(^i^)" alt="A realistic illustration of the A-10C Warthog aircraft">. I hope you like it. ? and a powerful mission and campaign editor. It is a game that will challenge you and reward you like no other. <h1>FAQs</h1>
80
- <p>Here are some frequently asked questions and answers about the game:</p>
81
- <ol>
82
- <li>Q: What are the system requirements for the game?<br>
83
- A: The minimum system requirements for the game are: <ul>
84
- <li>OS: Windows 7/8/10 (64-bit)</li>
85
- <li>CPU: Intel Core i3 or AMD FX</li>
86
- <li>RAM: 8 GB</li>
87
- <li>GPU: NVIDIA GeForce GTX 760 or AMD Radeon HD 7950 with 2 GB VRAM</li>
88
- <li>Storage: 60 GB of free space</li>
89
- <li>Sound: DirectX compatible sound card</li>
90
- <li>Internet: Broadband connection for online play</li>
91
- </ul>
92
- The recommended system requirements for the game are: <ul>
93
- <li>OS: Windows 10 (64-bit)</li>
94
- <li>CPU: Intel Core i5 or AMD Ryzen</li>
95
- <li>RAM: 16 GB</li>
96
- <li>GPU: NVIDIA GeForce GTX 1070 or AMD Radeon RX Vega 56 with 8 GB VRAM</li>
97
- <li>Storage: 60 GB of free space on SSD</li>
98
- <li>Sound: DirectX compatible sound card with surround sound support</li>
99
- <li>Internet: Broadband connection for online play</li>
100
- </ul>
101
- You can also check the performance of your system by using the built-in benchmark tool in the game.</li>
102
- <li>Q: How can I update the game to the latest version?<br>
103
- A: The game is updated regularly with new features, fixes, and improvements. You can update the game automatically by using the DCS Updater, which is a tool that checks and downloads the latest updates for the game. You can also update the game manually by downloading and installing the update files from the official website or from other sources. You can also use the DCS Updater to switch between different versions of the game, such as stable, beta, or alpha.</li>
104
- <li>Q: How can I get more content for the game?<br>
105
- A: The game offers a lot of content by default, but you can also get more content by purchasing or downloading additional modules, maps, missions, campaigns, or mods. You can purchase official modules and maps from the official website or from other online stores . These modules and maps add new aircraft, vehicles, weapons, systems, and terrains to the game. You can also download free or paid missions and campaigns from the official website or from other sources. These missions and campaigns add new scenarios and stories to the game. You can also download free mods from various sources. These mods add new features, functions, graphics, sounds, or tweaks to the game.</li>
106
- <li>Q: How can I get help or support for the game?<br>
107
- A: The game is complex and challenging, and you may encounter some issues or difficulties while playing it. If you need help or support for the game, you can use various resources such as: <ul>
108
- <li>The manuals and guides that are provided with the game. They contain a lot of information and instructions on how to play and troubleshoot the game.</li>
109
- <li>The forums that are hosted by Eagle Dynamics and other communities. They contain a lot of discussions, questions, answers, feedback, and suggestions on various topics related to the game.</li>
110
- <li>The wiki that is maintained by Eagle Dynamics and other contributors. It contains a lot of articles, tutorials, tips, and references on various aspects of the game.</li>
111
- <li>The support team that is available by email or phone. They can help you with technical issues, bugs, crashes, activation, or refunds.</li>
112
- </ul></li>
113
- <li>Q: How can I give feedback or suggestions for the game?<br>
114
- A: The game is constantly being improved and updated based on user feedback and suggestions. If you want to give feedback or suggestions for the game, you can use various channels such as: <ul>
115
- <li>The forums that are hosted by Eagle Dynamics and other communities. They contain a lot of discussions, questions, answers, feedback, and suggestions on various topics related to the game.</li>
116
- <li>The bug tracker that is used by Eagle Dynamics and other users. It contains a list of reported issues, bugs, crashes, or errors that affect the game. You can also report new issues or vote for existing ones.</li>
117
- <li>The survey that is conducted by Eagle Dynamics periodically. It contains a set of questions that ask for your opinion on various aspects of the game.</li></ul></li></ol <p>I have finished writing the article for you. I hope you find it useful and informative. Here is the summary of the article:</p>
118
- <p>DCS: A-10C Warthog is a realistic simulation of the U.S. premier Close Air Support attack aircraft. This game is not for the faint of heart, as it requires a lot of dedication, knowledge, and skill to master the complex systems and procedures of the A-10C. However, if you are up for the challenge, you will find a rewarding and immersive experience that will make you feel like a real pilot.</p>
119
- <p>In this article, I have provided you with some information, tips, and tricks on how to download, install, and play the game, as well as some features and reviews of the game. I have also created a realistic illustration of the A-10C Warthog aircraft for you to enjoy.</p>
120
- <p>If you are interested in flying and fighting with the A-10C Warthog, you can follow these steps:</p>
121
- <ol>
122
- <li>Download the game from various sources and install it on your PC.</li>
123
- <li>Learn the basics by using the interactive tutorials, manuals, and guides that are provided with the game.</li>
124
- <li>Play the game by choosing from different single-player or multiplayer modes, or by creating your own missions and campaigns.</li>
125
- <li>Improve your performance and enjoyment by using some tips and tricks that will help you along the way.</li>
126
- <li>Give feedback or suggestions for the game by using various channels such as forums, bug tracker, or survey.</li>
127
- </ol>
128
- <p>DCS: A-10C Warthog is a game that offers a realistic and immersive simulation of the U.S. premier Close Air Support attack aircraft. It is a game that will challenge you and reward you like no other.</p> b2dd77e56b<br />
129
- <br />
130
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Arnold 2019 64 Bit Adlmint.dll Crack Download [REPACK].md DELETED
@@ -1,9 +0,0 @@
1
-
2
- <p><strong>Autodesk 3ds Max 2019 Crack + Serial Full Direct Download</strong> is a comprehensive, professional to help you create 3D designs and animation. Although there have been a lot of new 3D design and modeling programs being developed lately, Autodesk 3DS Max still remains a key player within the industy. Autodesk 3ds Max that you can download from GigaHax now contains more flexible options for Relax, the tool that averages UVs and allows for the automatic relief of texture distortion. If used in conjunction with another function, Show Edge Distortion, then the mapping of your characters becomes all the easier.</p>
3
- <p>Network License for Maya 2017 and Mudbox 2017:<br>Use "\x64\Tools\NLM\NLM.msi" from Maya 2016 installer. Follow instructions in "AUTODESK_MENTALRAY_STANDALONE_V2016_WIN64-XFORCE" (or "MACOSX64" / "LNX64" releases) crack and also replace "adlmint.dll" in "C:\Program Files\Common Files\Autodesk Shared\CLM\V4\MSVC11". In "lic.dat", add the following lines:<br><br>FEATURE 86618MAYA_2017_0F adskflex 1.000 permanent 100 VENDOR_STRING=commercial:permanent SUPERSEDE DUP_GROUP=UH ISSUED=01-janv-2013 SN=666-66666666 TS_OK SIGN="1745 D487 C07B 1B0D 10C0 555A B147 1372 8DBF 1E14 ECFC 870D FC59 5ECC 9156 1814 B16F 2E7B 4760 2A4C 745E 732E 5A7D 9A3C E3D4 0359 562E 9B90 713D 3708" SIGN2="100D 7553 E295 6170 A0C2 9567 8124 C44F 22C3 81B1 E629 EA7D 21A5 E308 1BD3 1D1F 0650 B3DC E78C 2AB0 C055 DB08 A9DE 12DB FA5C 3AF6 FFC3 A3EA A323 4699"<br><br>FEATURE 86624MBXPRO_2017_0F adskflex 1.000 permanent 100 VENDOR_STRING=commercial:permanent SUPERSEDE DUP_GROUP=UH ISSUED=01-janv-2013 SN=666-66666666 TS_OK SIGN="1745 D487 C07B 1B0D 10C0 555A B147 1372 8DBF 1E14 ECFC 870D FC59 5ECC 9156 1814 B16F 2E7B 4760 2A4C 745E 732E 5A7D 9A3C E3D4 0359 562E 9B90 713D 3708" SIGN2="100D 7553 E295 6170 A0C2 9567 8124 C44F 22C3 81B1 E629 EA7D 21A5 E308 1BD3 1D1F 0650 B3DC E78C 2AB0 C055 DB08 A9DE 12DB FA5C 3AF6 FFC3 A3EA A323 4699"<br><br>Thanks to:<br> -2017-direct-links-no-requests-thanks-spam-ot-137100/index4.html<br>The FLEXnet codes should one day be in the link below but currently are not.<br> -result/caas/sfdcarticles/sfdcarticles/2017-FLEXnet-feature-codes-for-Autodesk-products.html</p>
4
- <h2>Arnold 2019 64 bit adlmint.dll crack download</h2><br /><p><b><b>DOWNLOAD</b> &#9913; <a href="https://imgfil.com/2uxZ8Q">https://imgfil.com/2uxZ8Q</a></b></p><br /><br />
5
- <p>Avid Pro Tools 10 - 10.3.10. This is what the pros use.<br> _US/download/Pro-Tools-10-3-10-Downloads<br> -Tools-10-3-9-Downloads<br> -Tools-10-3-Downloads<br> =43572<br>Windows Cracks:<br> -download_patch-for-pro-tools-1039-win.html<br> <br> <br>Mac cracked release (?):<br>Pro Tools 10.3.10-openssh from</p>
6
- <p>Here are some links (bottom) for Autodesk 2016 and Adobe CC 2014 & 2015 products. You can download all Autodesk at once in Tonec Internet Download Manager. Just click "Add batch download from clipboard". *.rar or *.001 files can be opened with WinRAR You can open *.nfo files with Notepad.(You can use the cracks that are available for Autodesk and Adobe by XFORCE or ISO releases WIN or MAC)<br>AUTODESK.MAYA.V2016.WIN64-ISO<br>AUTODESK_MAYA_V2016_MACOSX-XFORCE<br>AUTODESK_MAYA_V2016_LNX64-XFORCE<br>ADOBE_CC_V2014_KEYGEN_WIN_MACOSX-XFORCE<br>New network cracks available for Autodesk in:<br>AUTODESK_MENTALRAY_STANDALONE_V2016_LNX64-XFORCE<br>AUTODESK_MENTALRAY_STANDALONE_V2016_MACOSX64-XFORCE<br>AUTODESK_MENTALRAY_STANDALONE_V2016_WIN64-XFORCE</p>
7
- <p>Autodesk 2017 links are below. You can use the crack/keygen from any of the ISO/XFORCE 2017 releases.<br><br>INFO about Moldflow crack<br>To anyone who might be interested, old 2016 XForce FLEXNet crack still works for 2017 softwares:<br>replace original adlmint.dll with the cracked one in C:\Program Files\Common Files\Autodesk Shared\CLM\V3\MSVC14, and edit the XF license file by adding the following:<br>FEATURE ************ adskflex 1.000 permanent 100 VENDOR_STRING=commercial:permanent SUPERSEDE DUP_GROUP=UH ISSUED=01-janv-2013 SN=666-66666666 TS_OK SIGN="1745 D487 C07B 1B0D 10C0 555A B147 1372 8DBF 1E14 ECFC 870D FC59 5ECC 9156 1814 B16F 2E7B 4760 2A4C 745E 732E 5A7D 9A3C E3D4 0359 562E 9B90 713D 3708" SIGN2="100D 7553 E295 6170 A0C2 9567 8124 C44F 22C3 81B1 E629 EA7D 21A5 E308 1BD3 1D1F 0650 B3DC E78C 2AB0 C055 DB08 A9DE 12DB FA5C 3AF6 FFC3 A3EA A323 4699"<br> where ************ is the proper FLEXNet feature code for AD2017 software you want to use (check FLEXNet link below): now you have a multiple license (up to 100: not uncounted, but better than MAGNiTUDE's 2) you can use with your multicore CPU, and useful for all AD2017 softwares. Of course, if you use this one, delete all crack files related to MAGNiTUDE crack and restore the original onesSimStudioTools R2: replace the original adlmint.dll with the cracked one in C:\Program Files\Autodesk\SimStudio Tools 2016 R2 (default installation folder) and use the correct FLEXNet code in the license file<br>Autodesk 2017 product keys:<br> -service/installation-activation-licensing/get-ready/find-serial-number-product-key/product-key-look/2017-product-keys<br>Autodesk 2017 FLEXnet keys (for network license):<br> -result/caas/sfdcarticles/sfdcarticles/2017-FLEXnet-feature-codes-for-Autodesk-products.html<br>Accumulated hotfix 1 for AutoCAD 2017 based products<br> _downloads/AutoCAD_2017_Hotfix_1_x64.exe<br> _downloads/AutoCAD_2017_Hotfix_1_x86.exe<br>This hotfix applies to the following releases:<br>- Autodesk AutoCAD 2017<br>- Autodesk AutoCAD Architecture 2017<br>- Autodesk AutoCAD Civil 3D 2017<br>- Autodesk AutoCAD Electrical 2017<br>- Autodesk AutoCAD Map 3D 2017<br>- Autodesk AutoCAD Mechanical 2017<br>- Autodesk AutoCAD MEP 2017<br>- Autodesk AutoCAD P&ID 2017<br>- Autodesk AutoCAD Plant 3D 2017<br>- Autodesk AutoCAD Utility Design 2017 <br>Autodesk Inventor 2017 fails to install due to failure to install .NET Framework Runtime 4.6<br>Applies to:<br>- Factory Design Suite 2017<br>- Inventor 2017<br>- Inventor LT 2017<br>- and Product Design Suite 2017<br>Issue:<br>Autodesk Inventor 2017 requires .NET 4.6 to successfully install Inventor 2017 products.<br>The Inventor, Inventor LT, and Inventor OEM 2017 installers will stop if they fail to install .NET 4.6 on your computer.<br>The log file reports: Install .NET Framework Runtime 4.6 - Failed - Failure is ignored, Result=1603<br>Notes:<br>- Windows 7 SP1 and Windows 8.1 do not come with .Net Framework 4.6 pre-installed.<br>- Windows 10 comes with .Net Framework 4.6 pre-installed.<br>Solution:<br>1. Manually Install Microsoft .NET Framework 4.6 from:<br> -us/download/details.aspx?id=48137<br>or choose this direct link to download the Microsoft .NET Framework 4.6 Offline Installer (62.4 Mo)<br>(for Vista SP2, 7 SP1, 8, 8.1, Server 2008 SP2, 2008 R2 SP1, 2012 & 2012 R2)<br> -D33C-47E9-9D70-2F7C65DAAD94/NDP46-KB3045557-x86-x64-AllOS-ENU.exe<br>Important note: KB 2919442 and KB 2919355 are pre-requisite of .NET 4.6 on Windows 8.1 OS.<br>Get the KB 2919442 (4.6 Mo) and the KB 2919355 (319 Mo) from:<br> -us/download/details.aspx?id=42135<br> -FR/download/details.aspx?id=42327<br>or choose direct links:<br> -9E65-4681-BBBE-A8F73A5C116F/Windows8.1-KB2919442-x86.msu<br> -1E15-43FD-B591-63FB7A1A5C04/Windows8.1-KB2919355-x86.msu<br>2. Restart your computer.<br>3. Restart the Autodesk Inventor installer.<br>Additionnal notes:<br>To check for .Net 4.6 installation on your computer:<br>- Microsoft .NET Framework 4.6 list under Programs and Features in Control Panel as an installed product on Windows7 SP1 OS.<br>- Microsoft .NET Framework 4.6 display as Update for Microsoft Windows (KB3045563) under Installed Updates in Control Panel on Windows8.1 OS.<br>- Or run Regedit, and confirm ".NETFramework,Version = v4.6" displays under the following path: \HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\.NETFramewo rk\v4.0.30319\SKUs\<br><br>Replace English with your language (French, Italian, German, Spanish, Simplified_Chinese, etc.)<br>AutoCAD 2017<br> _2017_English_Win_32bit_dlm.sfx.exe<br> _2017_English_Win_64bit_dlm_001_002.sfx.exe<br> _2017_English_Win_64bit_dlm_002_002.sfx.exe<br>AutoCAD LT 2017<br> _LT_2017_NWL_English_Win_64bit_dlm.sfx.exe<br> _LT_2017_NWL_English_Win_32bit_dlm.sfx.exe<br> _LT_2017_English_LP_Win_64bit_dlm.sfx.exe<br> _LT_2017_English_LP_Win_32bit_dlm.sfx.exe<br>AutoCAD Architecture 2017<br> _Architecture_2017_English_Win_64bit_dlm_001_002.sfx.exe<br> _Architecture_2017_English_Win_64bit_dlm_002_002.sfx.exe<br> _Architecture_2017_English_Win_32bit_dlm_001_002.sfx.exe<br> _Architecture_2017_English_Win_32bit_dlm_002_002.sfx.exe<br>AutoCAD Electrical 2017<br> _E/DLM/AutoCAD_Electrical_2017_English_Win_32bit_dlm_001_002.sfx.exe<br> _E/DLM/AutoCAD_Electrical_2017_English_Win_32bit_dlm_002_002.sfx.exe<br> _E/DLM/AutoCAD_Electrical_2017_English_Win_64bit_dlm_001_002.sfx.exe<br> _E/DLM/AutoCAD_Electrical_2017_English_Win_64bit_dlm_002_002.sfx.exe<br>AutoCAD MAP 3D 2017<br> _Map_2017_English_Win_64bit_DLM_001_002.sfx.exe<br> _Map_2017_English_Win_64bit_DLM_002_002.sfx.exe<br>AutoCAD MEP 2017<br> _MEP_2017_English_Win_32bit_dlm_001_003.sfx.exe<br> _MEP_2017_English_Win_32bit_dlm_002_003.sfx.exe<br> _MEP_2017_English_Win_32bit_dlm_003_003.sfx.exe<br> _MEP_2017_English_Win_64bit_dlm_001_003.sfx.exe<br> _MEP_2017_English_Win_64bit_dlm_002_003.sfx.exe<br> _MEP_2017_English_Win_64bit_dlm_003_003.sfx.exe<br>AutoCAD Mechanical 2017<br> _PP/DLM/AutoCAD_Mechanical_2017_English_Win_32bit_dlm.sfx.exe<br> _PP/DLM/AutoCAD_Mechanical_2017_English_Win_64bit_dlm_001_002.sfx.exe<br> _PP/DLM/AutoCAD_Mechanical_2017_English_Win_64bit_dlm_002_002.sfx.exe<br>AutoCAD Raster Design 2017<br> _Raster_Design_2017_English_Win_32bit_dlm.sfx.exe<br> _Raster_Design_2017_English_Win_64bit_dlm.sfx.exe<br>AutoCAD Plant 3D 2017<br> _Plant_3D_2017_English_Win_64bit_dlm_001_002.sfx.exe<br> _Plant_3D_2017_English_Win_64bit_dlm_002_002.sfx.exe<br>AutoCAD P&ID 2017<br> _PNID_2017_English_Win_64bit_dlm_001_002.sfx.exe<br> _PNID_2017_English_Win_64bit_dlm_002_002.sfx.exe<br>Autodesk AutoCAD Civil 3D 2017<br> _Civil3D_2017_English_Win_64bit_dlm_001_003.sfx.exe<br> _Civil3D_2017_English_Win_64bit_dlm_002_003.sfx.exe<br> _Civil3D_2017_English_Win_64bit_dlm_003_003.sfx.exe<br>AutoCAD Utility Design 2017<br> _Utility_Design_2017_English_Win_64bit_dlm_001_002.sfx.exe<br> _Utility_Design_2017_English_Win_64bit_dlm_002_002.sfx.exe<br>Autodesk Revit 2017<br> _Revit_2017_English_Win_64bit_dlm_001_002.sfx.exe<br> _Revit_2017_English_Win_64bit_dlm_002_002.sfx.exe<br>Autodesk Revit LT 2017<br> _Revit_LT_2017_English_Win_64bit_dlm_001_002.sfx.exe<br> _Revit_LT_2017_English_Win_64bit_dlm_002_002.sfx.exe<br>Inventor 2017<br> _2017_English_Win_64bit_dlm_001_003.sfx.exe<br> _2017_English_Win_64bit_dlm_002_003.sfx.exe<br> _2017_English_Win_64bit_dlm_003_003.sfx.exe<br>Inventor LT 2017<br> _LT_2017_English_Win_32bit_dlm.sfx.exe<br> _LT_2017_English_Win_64bit_dlm_001_002.sfx.exe<br> _LT_2017_English_Win_64bit_dlm_002_002.sfx.exe<br>Vault Basic 2017<br> _ENU_32_64bit_dlm.sfx.exe<br> _ENU_64bit_dlm.sfx.exe<br>Vault Professional 2017<br> _ENU_32_64bit_dlm.sfx.exe<br> _ENU_64bit_dlm.sfx.exe<br>Vault Workgroup 2017<br> _ENU_32_64bit_dlm.sfx.exe<br> _ENU_64bit_dlm.sfx.exe<br>Autodesk Advance Steel 2017<br> _2017_ML_WIN_64BIT_DLM.sfx.exe<br>Autodesk Navisworks Manage 2017<br> _Navisworks_Manage_2017_Multilingual_Win_64bit_dlm_001_002.sfx.exe<br> _Navisworks_Manage_2017_Multilingual_Win_64bit_dlm_002_002.sfx.exe<br>Autodesk Navisworks Simulate 2017<br> _Navisworks_Simulate_2017_Multilingual_Win_64bit_dlm_001_002.sfx.exe<br> _Navisworks_Simulate_2017_Multilingual_Win_64bit_dlm_002_002.sfx.exe<br>Moldflow Adviser Ultimate 2017<br> _2017_Multilingual_Win_64bit_dlm_001_002.sfx.exe<br> _2017_Multilingual_Win_64bit_dlm_002_002.sfx.exe<br>Moldflow CAD Doctor 2017<br> _2017_Multilingual_Win_64bit_dlm.sfx.exe<br>Moldflow Design (formerly Simulation DFM) 2017<br> _2017_Multilingual_Win_64bit_dlm.sfx.exe<br>Moldflow Insight Ultimate 2017<br> _2017_Multilingual_Win_64bit_dlm.sfx.exe<br>Moldflow Synergy 2017<br> _2017_Multilingual_Win_64bit_dlm_001_002.sfx.exe<br> _2017_Multilingual_Win_64bit_dlm_002_002.sfx.exe<br>Robot Structural Analysis Pro 2017<br> _Structural_Analysis_Professional_2017_Multilingual_Win_64bit_dlm.sfx.exe<br>Autodesk Vehicle Tracking English 2017<br> _Vehicle_Tracking_2017_English_Win_32_64bit_DLM.sfx.exe<br>VRED 2017<br> _VRED_2017_Enu_Win_64bit_dlm.sfx.exe<br>VRED Design 2017<br> _VREDDES_2017_Enu_Win_64bit_dlm.sfx.exe<br>VRED Professional 2017<br> _VREDPRO_2017_Enu_Win_64bit_dlm.sfx.exe<br>VRED Presenter 2017<br> _VREDPRS_2017_Enu_Win_64bit_dlm.sfx.exe<br>VRED Server 2017<br> _VREDSRV_2017_Enu_Win_64bit_dlm.sfx.exe<br>Autodesk Nastran In-CAD 2017<br> _INCAD_2017_Win_64bit_dlm.sfx.exe<br>Autodesk Nastran 2017<br> _2017_Win_64bit_dlm.sfx.exe<br>Showcase 2017 <br> _2017_English_Win_64bit_dlm_001_003.sfx.exe<br> _2017_English_Win_64bit_dlm_002_003.sfx.exe<br> _2017_English_Win_64bit_dlm_003_003.sfx.exe<br>CFD 2017 <br> _CFD_2017_Win_64bit_dlm_001_002.sfx.exe<br> _CFD_2017_Win_64bit_dlm_002_002.sfx.exe<br>Simulation Mechanical 2017<br> _Simulation_Mechanical_2017_Win_64bit_dlm_001_002.sfx.exe<br> _Simulation_Mechanical_2017_Win_64bit_dlm_002_002.sfx.exe<br>Fabrication CADmep 2017<br> _Fabrication_CADmep_2017_win_64bit_dlm.sfx.exe<br>Fabrication CAMduct 2017<br> _Fabrication_CAMduct_2017_win_64bit_dlm.sfx.exe<br>Fabrication ESTmep2017<br> _Fabrication_ESTmep_2017_win_64bit_dlm.sfx.exe<br>Autodesk InfraWorks 360 2017<br> _InfraWorks_2017_Win_64bit_DLM.sfx.exe<br>Point Layout 2017<br> _Point_Layout_2017_Win_32-64bit_en-us.exe<br>ReCap 360 Pro 2017<br> _ReCap360_30052_Multilingual_Win_64bit_dlm.sfx.exe<br>Design and Creation suites<br>Product Design Suite 2017<br> _2017_Enu_Win_64bit_dlm_001_006.sfx.exe<br> _2017_Enu_Win_64bit_dlm_002_006.sfx.exe<br> _2017_Enu_Win_64bit_dlm_003_006.sfx.exe<br> _2017_Enu_Win_64bit_dlm_004_006.sfx.exe<br> _2017_Enu_Win_64bit_dlm_005_006.sfx.exe<br> _2017_Enu_Win_64bit_dlm_006_006.sfx.exe<br>AutoCAD Design Suite Ultimate 2017<br> _Ultimate_2017_English_Win_32bit_dlm_001_002.sfx.exe<br> _Ultimate_2017_English_Win_32bit_dlm_002_002.sfx.exe<br> _Ultimate_2017_English_Win_64bit_dlm_001_004.sfx.exe<br> _Ultimate_2017_English_Win_64bit_dlm_002_004.sfx.exe<br> _Ultimate_2017_English_Win_64bit_dlm_003_004.sfx.exe<br> _Ultimate_2017_English_Win_64bit_dlm_004_004.sfx.exe<br>Autodesk Factory Design Suite Ultimate 2017<br> _2017_Enu_Win_64bit_dlm_001_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_002_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_003_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_004_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_005_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_006_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_007_007.sfx.exe<br>Infrastructure Design Suite Ultimate 2017<br> _2017_Enu_Win_64bit_dlm_001_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_002_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_003_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_004_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_005_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_006_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_007_007.sfx.exe<br>Building Design Suite Ultimate 2017<br> _2017_Enu_Win_64bit_dlm_001_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_002_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_003_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_004_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_005_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_006_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_007_007.sfx.exe<br>Documentation<br> <br> _2017_help_download/AutoCAD_2017_Product_Help_English_Win_32_64bit_dlm.sfx.exe<br> _lt_2017_help_download/AutoCAD_LT_2017_Product_Help_English_Win_32_64bit_dlm.sfx.exe<br> _and_lt_local_help/Autodesk_Inventor_2017_Help.exe<br> _civil_3d_2017/Autodesk_AutoCAD_Civil_3D_2017_Help_English.exe<br> <br> _2017_install_help/autodesk_alias_2017_help.exe<br>Autodesk 3ds max 2017 EFGJKPS (x64 Only) - F for French<br> _3ds_Max_2017_EFGJKPS_Win_64bit_001_002.sfx.exe<br> _3ds_Max_2017_EFGJKPS_Win_64bit_002_002.sfx.exe<br>Autodesk AutoCAD 2017 French<br> _2017_French_Win_64bit_dlm_001_002.sfx.exe<br> _2017_French_Win_64bit_dlm_002_002.sfx.exe<br> _2017_French_Win_32bit_dlm.sfx.exe<br>Autodesk AutoCAD LT 2017 French<br> _LT_2017_NWL_French_Win_64bit_dlm.sfx.exe<br> _LT_2017_French_LP_Win_64bit_dlm.sfx.exe<br> _LT_2017_NWL_French_Win_32bit_dlm.sfx.exe<br> _LT_2017_French_LP_Win_32bit_dlm.sfx.exe<br>Autodesk AutoCAD Architecture 2017 French<br> _Architecture_2017_French_Win_64bit_dlm_001_002.sfx.exe<br> _Architecture_2017_French_Win_64bit_dlm_002_002.sfx.exe<br> _Architecture_2017_French_Win_32bit_dlm_001_002.sfx.exe<br> _Architecture_2017_French_Win_32bit_dlm_002_002.sfx.exe<br>Autodesk AutoCAD Electrical 2017 French<br> _E/DLM/AutoCAD_Electrical_2017_French_Win_64bit_dlm_001_002.sfx.exe<br> _E/DLM/AutoCAD_Electrical_2017_French_Win_64bit_dlm_002_002.sfx.exe<br> _E/DLM/AutoCAD_Electrical_2017_French_Win_32bit_dlm_001_002.sfx.exe<br> _E/DLM/AutoCAD_Electrical_2017_French_Win_32bit_dlm_002_002.sfx.exe<br>Autodesk AutoCAD Mechanical 2017 French<br> _PP/DLM/AutoCAD_Mechanical_2017_French_Win_64bit_dlm_001_002.sfx.exe<br> _PP/DLM/AutoCAD_Mechanical_2017_French_Win_64bit_dlm_002_002.sfx.exe<br> _PP/DLM/AutoCAD_Mechanical_2017_French_Win_32bit_dlm.sfx.exe<br>Autodesk AutoCAD MEP 2017 French<br> _MEP_2017_French_Win_64bit_dlm_001_002.sfx.exe<br> _MEP_2017_French_Win_64bit_dlm_002_002.sfx.exe<br> _MEP_2017_French_Win_32bit_dlm_001_002.sfx.exe<br> _MEP_2017_French_Win_32bit_dlm_002_002.sfx.exe<br>Autodesk AutoCAD MAP 3D 2017 (x64 Only) French<br> _Map_2017_French_Win_64bit_DLM_001_002.sfx.exe<br> _Map_2017_French_Win_64bit_DLM_002_002.sfx.exe<br>Autodesk AutoCAD Plant 3D 2017 (x64 Only) French<br> _Plant_3D_2017_French_Win_64bit_dlm_001_002.sfx.exe<br> _Plant_3D_2017_French_Win_64bit_dlm_002_002.sfx.exe<br>Autodesk AutoCAD P&ID 2017 (x64 Only) French<br> _PNID_2017_French_Win_64bit_dlm_001_002.sfx.exe<br> _PNID_2017_French_Win_64bit_dlm_002_002.sfx.exe<br>Autodesk AutoCAD Raster Design 2017 French<br> _Raster_Design_2017_French_Win_64bit_dlm.sfx.exe<br> _Raster_Design_2017_French_Win_32bit_dlm.sfx.exe<br>Autodesk AutoCAD Civil 3D 2017 (x64 Only) French<br> _Civil3D_2017_French_Win_64bit_dlm_001_003.sfx.exe<br> _Civil3D_2017_French_Win_64bit_dlm_002_003.sfx.exe<br> _Civil3D_2017_French_Win_64bit_dlm_003_003.sfx.exe<br>Autodesk Inventor 2017 (X64 Only) French<br> _2017_French_Win_64bit_dlm_001_003.sfx.exe<br> _2017_French_Win_64bit_dlm_002_003.sfx.exe<br> _2017_French_Win_64bit_dlm_003_003.sfx.exe<br>Autodesk Inventor LT 2017 French <br> _LT_2017_French_Win_64bit_dlm_001_002.sfx.exe<br> _LT_2017_French_Win_64bit_dlm_002_002.sfx.exe<br> _LT_2017_French_Win_32bit_dlm.sfx.exe<br>Autodesk Revit 2017 (X64 Only) Non-Specific-Language (French included)<br> _Revit_2017_English_Win_64bit_dlm_001_002.sfx.exe<br> _Revit_2017_English_Win_64bit_dlm_002_002.sfx.exe<br>Offline Help Installers French<br> _max_2017_help/3dsMaxHelp_fra.exe<br> _2017_offline_help_installer/AutoCAD_2017_Product_Help_French_Win_32_64bit_dlm.sfx.exe<br> _lt_2017_offline_help/AutoCAD_LT_2017_Product_Help_French_Win_32_64bit_dlm.sfx.exe<br> _architecture_2017_product_help/AutoCAD_Architecture_Help_2017_French_Win_32_64bit_dlm.sfx.exe<br> _electrical_2017_help_download/AutoCAD_Electrical_2017_French_help_Win_32_64bit_dlm.sfx.exe<br> _mechanical_help_2017/AutoCAD_Mechanical_Help_2017_French_Win_32_64bit_dlm.sfx.exe<br> _map_3d_2017_product_help/Autodesk_AutoCAD_Map_3D_2017_Help_French.exe<br> _mep_2017_product_help/AutoCAD_MEP_Help_2017_French_Win_32_64bit_dlm.sfx.exe<br> _civil_3d_2017/Autodesk_AutoCAD_Civil_3D_2017_Help_French.exe<br> _and_lt_local_help/Autodesk_Inventor_2017_Help_FRA.exe<br> _and_lt_local_help/Autodesk_Inventor_LT_2017_Help_FRA.exe<br>Additional Notes:<br>How to get Autodesk Revit 2017 (X64 Only) Non-Specific Language in French language:<br>You must be vigilant when installing and well select the desired installation language before entering its serial number.<br>By chance, multiple languages are available after installing the new Revit 2017 software and can be changed.<br>In order to benefit from a new interface:<br>- Copy the Revit shortcut on your desktop<br>- Right click on the new icon and choose "Properties"<br>- In the "Target" field, simply change the last three letters of the line with three new ones: FRA<br>- FRA must be put in place of ENU.<br>... /Language=FRA<br>Autodesk Alias Design 2017<br> _Alias_Design_2017_English_Mac_OSX.dmg<br>ALIAS AutoStudio 2017<br> _Alias_AutoStudio_2017_English_Mac_OSX.dmg<br>Autodesk Alias Surface 2017<br> _Alias_Surface_2017_English_Mac_OSX.dmg<br>Autodesk Autocad Mechanicel German 64 Bit<br> _PP/DLM/AutoCAD_Mechanical_2017_German_Win_64bit_dlm_001_002.sfx.exe<br> _PP/DLM/AutoCAD_Mechanical_2017_German_Win_64bit_dlm_002_002.sfx.exe<br>Autodesk Raster Design 2017 German 64 Bit<br> _Raster_Design_2017_German_Win_64bit_dlm.sfx.exe<br>Autodesk Autocad 2017 German 32Bit 64Bit<br> _2017_German_Win_32bit_dlm.sfx.exe<br> _2017_German_Win_64bit_dlm_001_002.sfx.exe<br> _2017_German_Win_64bit_dlm_002_002.sfx.exe<br>Autodesk Inventor 2017 German 64 Bit<br> _2017_German_Win_64bit_dlm_001_003.sfx.exe<br> _2017_German_Win_64bit_dlm_002_003.sfx.exe<br> _2017_German_Win_64bit_dlm_003_003.sfx.exe<br>AutoCAD Architecture 2017 x64<br> _Architecture_2017_Italian_Win_64bit_dlm_001_002.sfx.exe<br> _Architecture_2017_Italian_Win_64bit_dlm_002_002.sfx.exe<br>AutoCAD LT 2017 x64/x86<br> _LT_2017_NWL_Italian_Win_64bit_dlm.sfx.exe<br> _LT_2017_NWL_Italian_Win_32bit_dlm.sfx.exe<br> _LT_2017_Italian_LP_Win_64bit_dlm.sfx.exe<br> _LT_2017_Italian_LP_Win_32bit_dlm.sfx.exe<br>AutoCAD 2017 x64/x86<br> _2017_Italian_Win_32bit_dlm.sfx.exe<br> _2017_Italian_Win_64bit_dlm_001_002.sfx.exe<br> _2017_Italian_Win_64bit_dlm_002_002.sfx.exe<br>AutoCAD Electrical 2017 x64<br> _E/DLM/AutoCAD_Electrical_2017_Italian_Win_64bit_dlm_001_002.sfx.exe<br> _E/DLM/AutoCAD_Electrical_2017_Italian_Win_64bit_dlm_002_002.sfx.exe<br> _E/DLM/AutoCAD_Electrical_2017_Italian_Win_32bit_dlm_001_002.sfx.exe<br> _E/DLM/AutoCAD_Electrical_2017_Italian_Win_32bit_dlm_002_002.sfx.exe<br>Autodesk AutoCAD Mechanical 2017<br> _PP/DLM/AutoCAD_Mechanical_2017_Italian_Win_64bit_dlm_001_002.sfx.exe<br> _PP/DLM/AutoCAD_Mechanical_2017_Italian_Win_64bit_dlm_002_002.sfx.exe<br> _PP/DLM/AutoCAD_Mechanical_2017_Italian_Win_32bit_dlm.sfx.exe<br>Autodesk AutoCAD MEP 2017<br> _MEP_2017_Italian_Win_64bit_dlm_001_002.sfx.exe<br> _MEP_2017_Italian_Win_64bit_dlm_002_002.sfx.exe<br> _MEP_2017_Italian_Win_32bit_dlm_001_002.sfx.exe<br> _MEP_2017_Italian_Win_32bit_dlm_002_002.sfx.exe<br>Autodesk AutoCAD MAP 3D 2017 x64<br> _Map_2017_Italian_Win_64bit_DLM_001_002.sfx.exe<br> _Map_2017_Italian_Win_64bit_DLM_002_002.sfx.exe<br>Autodesk AutoCAD Raster Design 2017<br> _Raster_Design_2017_Italian_Win_64bit_dlm.sfx.exe<br> _Raster_Design_2017_Italian_Win_32bit_dlm.sfx.exe<br>Autodesk Inventor 2017 X64<br> _2017_Italian_Win_64bit_dlm_001_003.sfx.exe<br> _2017_Italian_Win_64bit_dlm_002_003.sfx.exe<br> _2017_Italian_Win_64bit_dlm_003_003.sfx.exe<br>Autodesk Inventor LT 2017<br> _LT_2017_Italian_Win_64bit_dlm_001_002.sfx.exe<br> _LT_2017_Italian_Win_64bit_dlm_002_002.sfx.exe<br> _LT_2017_Italian_Win_32bit_dlm.sfx.exe<br>Offline Help Installers Italian<br> _2017_offline_help_installer/AutoCAD_2017_Product_Help_Italian_Win_32_64bit_dlm.sfx.exe<br> _lt_2017_offline_help/AutoCAD_LT_2017_Product_Help_Italian_Win_32_64bit_dlm.sfx.exe<br> _architecture_2017_product_help/AutoCAD_Architecture_Help_2017_Italian_Win_32_64bit_dlm.sfx.exe<br> _electrical_2017_help_download/AutoCAD_Electrical_2017_Italian_help_Win_32_64bit_dlm.sfx.exe<br> _mechanical_help_2017/AutoCAD_Mechanical_Help_2017_Italian_Win_32_64bit_dlm.sfx.exe<br> _mep_2017_product_help/AutoCAD_MEP_Help_2017_Italian_Win_32_64bit_dlm.sfx.exe<br>Product Design Suite 2017<br> _2017_Enu_Win_64bit_dlm_001_006.sfx.exe<br> _2017_Enu_Win_64bit_dlm_002_006.sfx.exe<br> _2017_Enu_Win_64bit_dlm_003_006.sfx.exe<br> _2017_Enu_Win_64bit_dlm_004_006.sfx.exe<br> _2017_Enu_Win_64bit_dlm_005_006.sfx.exe<br> _2017_Enu_Win_64bit_dlm_006_006.sfx.exe<br>AutoCAD Design Suite Ultimate 2017 English<br> _Ultimate_2017_English_Win_32bit_dlm_001_002.sfx.exe<br> _Ultimate_2017_English_Win_32bit_dlm_002_002.sfx.exe<br> _Ultimate_2017_English_Win_64bit_dlm_001_004.sfx.exe<br> _Ultimate_2017_English_Win_64bit_dlm_002_004.sfx.exe<br> _Ultimate_2017_English_Win_64bit_dlm_003_004.sfx.exe<br> _Ultimate_2017_English_Win_64bit_dlm_004_004.sfx.exe<br>Vault Professional 2017<br> _ENU_32_64bit_dlm.sfx.exe<br> _ENU_64bit_dlm.sfx.exe<br>Vault Workgroup 2017<br> _ENU_32_64bit_dlm.sfx.exe<br> _ENU_64bit_dlm.sfx.exe<br>Autodesk Advance Steel 2017<br> _2017_ML_WIN_64BIT_DLM.sfx.exe<br>Autodesk Vehicle Tracking English (32-64)bit 2017<br> _Vehicle_Tracking_2017_English_Win_32_64bit_DLM.sfx.exe<br>AutoCAD Raster Design 2017<br> _Raster_Design_2017_English_Win_32bit_dlm.sfx.exe<br> _Raster_Design_2017_English_Win_64bit_dlm.sfx.exe<br>Inventor 2017 local help: <br> _and_lt_local_help/Autodesk_Inventor_2017_Help.exe<br>Inventor 2017 sample files:<br> _sample_files/autodesk_inventor_2017_samples.sfx.exe<br>VRED Presenter 2017<br> _VREDPRS_2017_Enu_Win_64bit_dlm.sfx.exe<br>VRED Server 2017<br> _VREDSRV_2017_Enu_Win_64bit_dlm.sfx.exe<br>VRED 2017<br> _VRED_2017_Enu_Win_64bit_dlm.sfx.exe<br>VRED Design 2017<br> _VREDDES_2017_Enu_Win_64bit_dlm.sfx.exe<br>VRED Professional 2017<br> _VREDPRO_2017_Enu_Win_64bit_dlm.sfx.exe<br>AutoCAD Plant 3D 2017<br> _Plant_3D_2017_English_Win_64bit_dlm_001_002.sfx.exe<br> _Plant_3D_2017_English_Win_64bit_dlm_002_002.sfx.exe<br>Autodesk AutoCAD P&ID 2017<br> _PNID_2017_English_Win_64bit_dlm_001_002.sfx.exe<br> _PNID_2017_English_Win_64bit_dlm_002_002.sfx.exe<br>Mac_OSX Versions<br>Autodesk Alias Design 2017<br> _Alias_Design_2017_English_Mac_OSX.dmg<br>ALIAS AutoStudio 2017 for Mac<br> _Alias_AutoStudio_2017_English_Mac_OSX.dmg<br>Autodesk Alias Surface 2017<br> _Alias_Surface_2017_English_Mac_OSX.dmg<br>Autodesk Nastran In-CAD 2017<br> _INCAD_2017_Win_64bit_dlm.sfx.exe<br>Autodesk Nastran 2017<br> _2017_Win_64bit_dlm.sfx.exe<br>Autodesk_AutoCAD_Civil_3D_2017 Documentation<br> _civil_3d_2017/Autodesk_AutoCAD_Civil_3D_2017_Help_English.exe<br>Autodesk AutoCAD Civil 3D 2017<br> _Civil3D_2017_English_Win_64bit_dlm_001_003.sfx.exe<br> _Civil3D_2017_English_Win_64bit_dlm_002_003.sfx.exe<br> _Civil3D_2017_English_Win_64bit_dlm_003_003.sfx.exe<br>Infrastructure Design Suite Ultimate 2017 Win 64bit<br> _2017_Enu_Win_64bit_dlm_001_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_002_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_003_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_004_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_005_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_006_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_007_007.sfx.exe<br>Building Design Suite Ultimate 2017 Win 64bit<br> _2017_Enu_Win_64bit_dlm_001_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_002_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_003_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_004_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_005_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_006_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_007_007.sfx.exe<br>Documentation Alias 2017 Product Help<br>Online Help<br> <br>Help Install Instructions<br>English<br> _2017_install_help/installing_autodesk_alias_2017_help.html<br>Japanese<br> _2017_install_help/JPN/JPN/installing_autodesk_alias_2017_help_jpn.html<br>Simplified Chinese<br> _2017_install_help/CHS/CHS/installing_autodesk_alias_2017_help_chs.html<br>Windows Help Installer<br>English<br> _2017_install_help/autodesk_alias_2017_help.exe<br>Japanese<br> _2017_install_help/JPN/JPN/alias_help_2017_jpn.exe<br>Simplified Chinese<br> _2017_install_help/CHS/CHS/alias_help_2017_chs.exe<br>Mac OS X Help Installer<br>English<br> _2017_install_help/autodesk_alias_2017_help.dmg<br>Japanese<br> _2017_install_help/JPN/JPN/AliasDocs2017_Japanese_Mac.dmg<br>Simplified Chinese<br> _2017_install_help/CHS/CHS/AliasDocs2017_Chinese_Mac.dmg<br>Learning Movies<br>Japanese<br> _2017_install_help/JPN/JPN/learningmovies_jpn.exe<br>Simplified Chinese<br> _2017_install_help/CHS/CHS/learningmovies_chs.exe<br>Factory Design Suite<br> _2017_Enu_Win_64bit_dlm_001_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_002_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_003_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_004_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_005_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_006_007.sfx.exe<br> _2017_Enu_Win_64bit_dlm_007_007.sfx.exe<br>Autodesk Revit 2017<br> _Revit_2017_English_Win_64bit_dlm_001_002.sfx.exe<br> _Revit_2017_English_Win_64bit_dlm_002_002.sfx.exe<br>Autodesk Revit LT 2017<br> _Revit_LT_2017_English_Win_64bit_dlm_001_002.sfx.exe<br> _Revit_LT_2017_English_Win_64bit_dlm_002_002.sfx.exe<br>Showcase 2017<br> _2017_English_Win_64bit_dlm_001_003.sfx.exe<br> _2017_English_Win_64bit_dlm_002_003.sfx.exe<br> _2017_English_Win_64bit_dlm_003_003.sfx.exe<br>CFD 2017 <br> _CFD_2017_Win_64bit_dlm_001_002.sfx.exe<br> _CFD_2017_Win_64bit_dlm_002_002.sfx.exe<br>Simulation Mechanical 2017<br> _Simulation_Mechanical_2017_Win_64bit_dlm_001_002.sfx.exe<br> _Simulation_Mechanical_2017_Win_64bit_dlm_002_002.sfx.exe<br>Fabrication CADmep 2017<br> _Fabrication_CADmep_2017_win_64bit_dlm.sfx.exe<br>Fabrication CAMduct 2017<br> _Fabrication_CAMduct_2017_win_64bit_dlm.sfx.exe<br>Fabrication ESTmep 2017<br> _Fabrication_ESTmep_2017_win_64bit_dlm.sfx.exe<br>Autodesk InfraWorks 360 2017<br> _InfraWorks_2017_Win_64bit_DLM.sfx.exe<br>Point Layout 2017<br> _Point_Layout_2017_Win_32-64bit_en-us.exe<br>ReCap 360 Pro 2017<br> _ReCap360_30052_Multilingual_Win_64bit_dlm.sfx.exe<br>Alias Design 2017<br> _ALSDES_2017_Enu_64bit_dlm.sfx.exe<br>Alias Surface 2017<br> _ASURF_2017_Enu_64bit_dlm_001_002.sfx.exe<br> _ASURF_2017_Enu_64bit_dlm_002_002.sfx.exe<br>Alias Speedform 2017<br> _ALSSF_2017_Enu_Win_64bit_dlm.sfx.exe<br>Alias Autostudio 2017<br> _ALAUST_2017_Enu_64bit_dlm_001_003.sfx.exe<br> _ALAUST_2017_Enu_64bit_dlm_002_003.sfx.exe<br> _ALAUST_2017_Enu_64bit_dlm_003_003.sfx.exe<br>3ds Max 2017<br> _3ds_Max_2017_EFGJKPS_Win_64bit_001_002.sfx.exe<br> _3ds_Max_2017_EFGJKPS_Win_64bit_002_002.sfx.exe<br>Online Help for 3dsmax<br> <br>3dsmax OFFLINE Help<br> _max_2017_help/3dsMaxHelp.exe<br>for other languages go to:<br> -max/downloads/caas/downloads/content/download-and-install-3ds-max-product-help.html<br> -general-discussion/apple-mac-os-10-11-x-el-capitan-is-not-supported/m-p/5983674#M6245<br>mental ray Plugin, Satellte and Standalone for Maya 2016 Extension 2 (Direct links)<br>Maya 2016.5 is a part of Alias AutoStudio 2017<br>Windows<br> _2016_extension_2/mentalray_Plugin_for_Maya_2016_EXT2_EN_JP_ZH_Win_64bit_dlm.sfx.exe<br> _2016_extension_2/mentalray_Satellite_3_13_1_for_Maya_2016_EN_JP_ZH_Win_64bit.exe<br> _2016_extension_2/mentalray_Standalone_3_13_1_for_Autodesk_2016_EN_Win_64bit.exe<br>Linux<br> _2016_extension_2/mentalray_Plugin_for_Maya_2016_EXT2_EN_Linux_64bit.tgz<br> _2016_extension_2/mentalray_Satellite_3_13_1_for_Maya_2016_EN_Linux_64bit.tgz<br> _2016_extension_2/mentalray_Standalone_3_13_1_for_Autodesk_2016_EN_Linux_64bit.tgz<br>OSX<br> _2016_extension_2/mentalray_Plugin_for_Maya_2016_EXT2_EN_JP_ZH_Mac_OSX.dmg<br> _2016_extension_2/mentalray_Satellite_3_13_1_for_Maya_2016_EN_JP_ZH_Mac_OSX.dmg<br> _2016_extension_2/mentalray_Standalone_3_13_1_for_Autodesk_2016_EN_Mac_OSX.dmg<br>Offline help for Autodesk Maya 2016 Extension 2<br> _2016/MayaHelp2016_Ext2_enu.zip<br>Autodesk 3ds Max 2017 Sample Files<br> _sample_files/2017/Autodesk_3ds_Max_2017_English_Win_Samples_Files.exe<br>Open Light 2017 (32-bit and 64-bit)<br>Applies to AutoCAD Architecture 2017, and AutoCAD MEP 2017 (32-bit and 64-bit)<br>Open Light is a plug-in for AutoCAD Architecture / MEP and offers standard labels for objects, such as openings, windows and doors, which are common in Austria and part of Switzerland.<br>Open Light provides additional display properties for Plan 1-50 and Plan 1-100 representation to show dimensions of doors and windows automatically. <br> _downloads/Open_Light_2017_x64.exe<br> _downloads/Open_Light_2017.exe<br>Open Light 2017 Object Enabler (32-bit and 64-bit)<br>Applies to AutoCAD 2017, AutoCAD Architecture 2017, AutoCAD Civil 3D 2017, AutoCAD Electrical 2017, AutoCAD MEP 2017, AutoCAD Map 3D 2017, and AutoCAD Mechanical 2017Open Light Object Enabler is a freeware application distributed to Autodesk customers at no charge for the purpose of fully accessing Open Light objects in drawing files. Without this object enabler installed, you can share drawings using proxy graphics representations or the Export to AutoCAD command.<br> _downloads/Open_Light_2017_OE_x64.exe<br> _downloads/Open_Light_2017_OE.exe<br>Building Design Suite Premium 2017<br> _2017_Enu_Win_64bit_dlm_001_006.sfx.exe<br> _2017_Enu_Win_64bit_dlm_002_006.sfx.exe<br> _2017_Enu_Win_64bit_dlm_003_006.sfx.exe<br> _2017_Enu_Win_64bit_dlm_004_006.sfx.exe<br> _2017_Enu_Win_64bit_dlm_005_006.sfx.exe<br> _2017_Enu_Win_64bit_dlm_006_006.sfx.exe</p> aaccfb2cb3<br />
8
- <br />
9
- <br />
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Car Mechanic Simulator 2018 [1.6.4 11 DLC] RePack [full Fix].md DELETED
@@ -1,7 +0,0 @@
1
- <h2>Car Mechanic Simulator 2018 [1.6.4 11 DLC] RePack [Full]</h2><br /><p><b><b>Download</b> &harr; <a href="https://imgfil.com/2uy1X4">https://imgfil.com/2uy1X4</a></b></p><br /><br />
2
-
3
- car mechanic simulator 2018 [1.6.4 11 dlc] repack [full] [eng]
4
- Release date 2018 Genre Simulator Racing Driving 3D Developer Daedalic Entertainment Publisher Daedalic Entertainment Platform PC Engine Unity 5 Version 1.1 Edition type RePack Interface language English Voice language Russian Tabletka Not required System requirements OS Windows Vista Sp2 Processor Intel Pentium 4 2.0Ghz or higher Memory 1 GB Hard disk space 11 GB 8a78ff9644<br />
5
- <br />
6
- <br />
7
- <p></p>
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Dangal Tamil Full Movie Download 720p [PATCHED].md DELETED
@@ -1,28 +0,0 @@
1
- <h2>Dangal Tamil Full Movie Download 720p</h2><br /><p><b><b>DOWNLOAD</b> &#10027; <a href="https://imgfil.com/2uy0P3">https://imgfil.com/2uy0P3</a></b></p><br /><br />
2
- <br />
3
- More about Dangal Stalkers 2: Online Hindi Dubbed 720p Movie We are living in a time where magic is not a myth.
4
-
5
- Designed for watching over 50 televisions. Since its release on DVD, the film has been available for all audiences and audiences of all kinds: Here we have selected the very best serials that will help you to pass the boring time.
6
-
7
- There are a lot of reasons why people love this show! Subscribe to Torrentfreak! With the help of the latest technology, we provide you with the latest torrent files in just a click.
8
-
9
- So, look no further for finding the best movies to stream free. Latest Tamil HD Bollywood Movies. The best Indian movies from the year Tamil cinemas are just starting. Do you need to download a torrent for a movie you have just seen?
10
-
11
- If so, here are the very best sites to get the latest and best Indian films online.
12
-
13
- You can download the latest movie for free in just a few seconds. We strive to bring you the very best in movie streaming.
14
-
15
- Comments about this video are disabled. It appears that you already have a YouTube account. Subscribe to TorrentFreak. I have lost my car keys.
16
-
17
- Best Movies at TorrentFiles
18
-
19
- The crew took me to the parking lot and I couldn't find my keys. When I was trying to find my keys, I started to cry because I was so angry. The next morning when I woke up, my wife asked me why I didn't talk to her last night.
20
-
21
- When I told her what happened, she said that I have lost my key because of her. I asked her why she lied to me, I think she tried to make me think that I didn't talk to her because of her. I told her that she can't do that to me because I love her and I would never leave her. I asked her what the hell she was thinking? I love you and I don't want to lose you like that!
22
-
23
- I have never felt this way about anyone before. I didn't know how to react to her, she was just hugging and kissing me all over my body.
24
-
25
- She grabbed my cock and started to play with it. As she played with my cock, she was moaning louder and louder. She started to lick the head of my cock and she kept sucking and licking my cock. As she was sucking my cock, I could feel that I was about to cum. 4fefd39f24<br />
26
- <br />
27
- <br />
28
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/speech/brian.py DELETED
@@ -1,40 +0,0 @@
1
- """ Brian speech module for autogpt """
2
- import os
3
-
4
- import requests
5
- from playsound import playsound
6
-
7
- from autogpt.speech.base import VoiceBase
8
-
9
-
10
- class BrianSpeech(VoiceBase):
11
- """Brian speech module for autogpt"""
12
-
13
- def _setup(self) -> None:
14
- """Setup the voices, API key, etc."""
15
- pass
16
-
17
- def _speech(self, text: str, _: int = 0) -> bool:
18
- """Speak text using Brian with the streamelements API
19
-
20
- Args:
21
- text (str): The text to speak
22
-
23
- Returns:
24
- bool: True if the request was successful, False otherwise
25
- """
26
- tts_url = (
27
- f"https://api.streamelements.com/kappa/v2/speech?voice=Brian&text={text}"
28
- )
29
- response = requests.get(tts_url)
30
-
31
- if response.status_code == 200:
32
- with open("speech.mp3", "wb") as f:
33
- f.write(response.content)
34
- playsound("speech.mp3")
35
- os.remove("speech.mp3")
36
- return True
37
- else:
38
- print("Request failed with status code:", response.status_code)
39
- print("Response content:", response.content)
40
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download CarX Drift Racing 2 Mod Apk 1.22.0 and Become a Drift King.md DELETED
@@ -1,110 +0,0 @@
1
- <br />
2
- <h1>Download CarX Drift Racing 2 Mod Apk 1.22.0 and Enjoy the Ultimate Drifting Experience</h1>
3
- <p>Do you love racing games? Do you want to feel the thrill of drifting around the corners and burning rubber on the asphalt? If yes, then you should download CarX Drift Racing 2 mod apk 1.22.0, the best drifting game for Android devices.</p>
4
- <p>CarX Drift Racing 2 is a sequel to the popular CarX Drift Racing game, which has over 50 million downloads on Google Play Store. In this game, you can choose from hundreds of cars, customize them, and drift on various tracks with realistic physics and graphics.</p>
5
- <h2>download carx drift racing 2 mod apk 1.22.0</h2><br /><p><b><b>Download Zip</b> ->->->-> <a href="https://urlin.us/2uT13B">https://urlin.us/2uT13B</a></b></p><br /><br />
6
- <p>In this article, we will tell you everything you need to know about CarX Drift Racing 2, why you should download its mod apk version, and some tips and tricks to improve your drifting skills.</p>
7
- <h2>What is CarX Drift Racing 2?</h2>
8
- <p>CarX Drift Racing 2 is a racing game that focuses on drifting, which is a driving technique where the driver intentionally oversteers the car to make it slide sideways. Drifting is not only fun, but also challenging and rewarding, as it requires skill and precision.</p>
9
- <h3>Features of CarX Drift Racing 2</h3>
10
- <p>Some of the features that make CarX Drift Racing 2 stand out from other racing games are:</p>
11
- <ul>
12
- <li>Over 200 cars from different brands and categories, such as sports cars, muscle cars, supercars, etc.</li>
13
- <li>A wide range of customization options, such as paint, vinyls, wheels, spoilers, etc.</li>
14
- <li>More than 60 tracks with different layouts, weather conditions, and time of day.</li>
15
- <li>Realistic car physics and sound effects that simulate the behavior and performance of real cars.</li>
16
- <li>A career mode where you can progress through various levels and championships.</li>
17
- <li>A multiplayer mode where you can compete with other players online in different modes, such as tandem drifting, sprint racing, etc.</li>
18
- <li>A ghost mode where you can race against your own or other players' best times.</li>
19
- <li>A tuning mode where you can adjust various parameters of your car, such as engine power, suspension stiffness, tire pressure, etc.</li>
20
- <li>A garage mode where you can store and manage your car collection.</li>
21
- </ul>
22
- <h3>How to play CarX Drift Racing 2</h3>
23
- <p>The gameplay of CarX Drift Racing 2 is simple and intuitive. You can control your car using various options, such as tilt, buttons, or steering wheel. You can also choose between automatic or manual transmission.</p>
24
- <p>The main goal of the game is to drift as much as possible and earn points based on your speed, angle, and duration of your drifts. You can also perform combos by linking multiple drifts together without losing control or hitting obstacles.</p>
25
- <p>How to install carx drift racing 2 mod apk 1.22.0 on android<br />
26
- Carx drift racing 2 mod apk 1.22.0 unlimited money and gold<br />
27
- Carx drift racing 2 mod apk 1.22.0 latest version free download<br />
28
- Carx drift racing 2 mod apk 1.22.0 gameplay and features<br />
29
- Carx drift racing 2 mod apk 1.22.0 review and rating<br />
30
- Carx drift racing 2 mod apk 1.22.0 download link and instructions<br />
31
- Carx drift racing 2 mod apk 1.22.0 best cars and tracks<br />
32
- Carx drift racing 2 mod apk 1.22.0 online multiplayer mode<br />
33
- Carx drift racing 2 mod apk 1.22.0 cheats and hacks<br />
34
- Carx drift racing 2 mod apk 1.22.0 comparison with original version<br />
35
- Carx drift racing 2 mod apk 1.22.0 update and patch notes<br />
36
- Carx drift racing 2 mod apk 1.22.0 requirements and compatibility<br />
37
- Carx drift racing 2 mod apk 1.22.0 tips and tricks<br />
38
- Carx drift racing 2 mod apk 1.22.0 offline mode and data<br />
39
- Carx drift racing 2 mod apk 1.22.0 bugs and issues<br />
40
- Carx drift racing 2 mod apk 1.22.0 support and feedback<br />
41
- Carx drift racing 2 mod apk 1.22.0 customization and settings<br />
42
- Carx drift racing 2 mod apk 1.22.0 screenshots and videos<br />
43
- Carx drift racing 2 mod apk 1.22.0 new features and improvements<br />
44
- Carx drift racing 2 mod apk 1.22.0 pros and cons<br />
45
- Carx drift racing 2 mod apk 1.22.0 alternatives and similar apps<br />
46
- Carx drift racing 2 mod apk 1.22.0 developer and publisher<br />
47
- Carx drift racing 2 mod apk 1.22.0 license and terms of service<br />
48
- Carx drift racing 2 mod apk 1.22.0 download size and speed<br />
49
- Carx drift racing 2 mod apk 1.22.0 awards and achievements</p>
50
- <p>The game has a scoring system that evaluates your performance based on various criteria, such as style, speed, line, angle, etc. You can also earn coins and gold by completing missions, achievements, and events.</p>
51
- <p>You can use these currencies to buy new cars or upgrade your existing ones. You can also unlock new tracks and modes by increasing your reputation level.</p>
52
- <h2>Why download CarX Drift Racing 2 mod apk 1.22.0?</h2>
53
- <p>While CarX Drift Racing 2 is a free game, it also has some limitations and drawbacks, such as ads, in-app purchases, and limited resources. If you want to enjoy the game without any restrictions or interruptions, you should download CarX Drift Racing 2 mod apk 1.22.0.</p>
54
- <h3>Benefits of CarX Drift Racing 2 mod apk 1.22.0</h3>
55
- <p>Some of the benefits of downloading CarX Drift Racing 2 mod apk 1.22.0 are:</p>
56
- <ul>
57
- <li>You can get unlimited coins and gold, which you can use to buy and upgrade any car you want.</li>
58
- <li>You can unlock all the tracks and modes, which will give you more variety and fun.</li>
59
- <li>You can remove all the ads, which will make your gaming experience smoother and more enjoyable.</li>
60
- <li>You can get access to premium features, such as VIP cars, exclusive events, and special rewards.</li>
61
- <li>You can enjoy the game with high-quality graphics and sound effects, which will enhance your immersion and realism.</li>
62
- </ul>
63
- <h3>How to download and install CarX Drift Racing 2 mod apk 1.22.0</h3>
64
- <p>Downloading and installing CarX Drift Racing 2 mod apk 1.22.0 is easy and fast. Just follow these simple steps:</p>
65
- <ol>
66
- <li>Click on the link below to download the CarX Drift Racing 2 mod apk 1.22.0 file.</li>
67
- <li>Allow your device to install apps from unknown sources by going to Settings > Security > Unknown Sources.</li>
68
- <li>Locate the downloaded file in your file manager and tap on it to install it.</li>
69
- <li>Launch the game and enjoy the ultimate drifting experience.</li>
70
- </ol>
71
- <p><a href="">Download CarX Drift Racing 2 mod apk 1.22.0 here</a></p>
72
- <h2>Tips and tricks for CarX Drift Racing 2</h2>
73
- <p>If you want to improve your drifting skills and become a master of CarX Drift Racing 2, you should follow these tips and tricks:</p>
74
- <h3>Choose the right car and tune it</h3>
75
- <p>Not all cars are created equal in CarX Drift Racing 2. Some cars are better suited for drifting than others, depending on their power, weight, handling, and grip. You should choose a car that matches your style and preference, and experiment with different settings and configurations.</p>
76
- <p>You can tune your car in the tuning mode, where you can adjust various parameters, such as engine power, suspension stiffness, tire pressure, etc. You can also customize your car in the garage mode, where you can change its appearance, such as paint, vinyls, wheels, spoilers, etc.</p>
77
- <p>Tuning and customizing your car can make a big difference in your performance and score. You should try to find the optimal balance between speed and stability, and make your car look cool and unique.</p>
78
- <h3>Master the drifting techniques</h3>
79
- <p>Drifting is not just about sliding sideways. It is also about controlling your car's movement and direction with skill and precision. You should master the drifting techniques that will help you achieve better results and impress your opponents.</p>
80
- <p>Some of the drifting techniques that you should learn are:</p>
81
- <ul>
82
- <li>Handbrake drift: This is the most basic technique, where you use the handbrake to initiate a drift by locking the rear wheels.</li>
83
- <li>Clutch kick drift: This is a technique where you use the clutch to momentarily disengage the engine from the transmission, causing a sudden burst of power that breaks traction.</li>
84
- <li>Feint drift: This is a technique where you use the steering wheel to create a weight transfer that causes the rear end of the car to swing out.</li>
85
- <li>Braking drift: This is a technique where you use the brakes to reduce speed and induce oversteer while entering a corner.</li>
86
- <li>Power over drift: This is a technique where you use the throttle to increase power and maintain a drift by overcoming the grip of the tires.</li>
87
- </ul>
88
- <p>You should practice these techniques on different tracks and situations, and find out which ones work best for you. You should also learn how to control your car's angle, speed, and line while drifting, as these factors will affect your score and style.</p>
89
- <h3>Compete with other players online</h3>
90
- <p>If you want to test your skills and have more fun, you should compete with other players online in the multiplayer mode of CarX Drift Racing 2. You can choose from different modes, such as tandem drifting, sprint racing, etc., and challenge players from all over the world.</p>
91
- <p>You can also join or create a club or a team, where you can chat with other members, share your cars and tunes, and participate in tournaments and events.</p>
92
- <p>Competing with other players online will not only give you more excitement and challenge, but also help you improve your skills and learn from others. You can also earn more coins and gold, as well as reputation points, by winning races and drifting battles.</p>
93
- <h2>Conclusion</h2>
94
- <p>CarX Drift Racing 2 is a game that will satisfy your need for speed and adrenaline. It is a game that will let you experience the thrill of drifting on realistic tracks with realistic cars. It is a game that will let you customize your car and tune it to your liking. It is a game that will let you compete with other players online and show off your skills and style.</p>
95
- <p>If you want to enjoy the game to the fullest, you should download CarX Drift Racing 2 mod apk 1.22.0, which will give you unlimited resources, premium features, and no ads. You can download it from the link below, and follow the instructions to install it on your device.</p>
96
- <p>Download CarX Drift Racing 2 mod apk 1.22.0 now and enjoy the ultimate drifting experience.</p>
97
- <h2>FAQs</h2>
98
- <p>Here are some frequently asked questions about CarX Drift Racing 2 and its mod apk version:</p>
99
- <h3>Q: Is CarX Drift Racing 2 mod apk 1.22.0 safe to download and use?</h3>
100
- <p>A: Yes, CarX Drift Racing 2 mod apk 1.22.0 is safe to download and use, as long as you download it from a trusted source, such as the link we provided. It does not contain any viruses or malware, and it does not require any root or jailbreak access.</p>
101
- <h3>Q: Will I get banned from the game if I use CarX Drift Racing 2 mod apk 1.22.0?</h3>
102
- <p>A: No, you will not get banned from the game if you use CarX Drift Racing 2 mod apk 1.22.0, as it has an anti-ban feature that protects your account from detection. However, you should use it at your own risk, and be respectful of other players online.</p>
103
- <h3>Q: Can I update CarX Drift Racing 2 mod apk 1.22.0 to the latest version?</h3>
104
- <p>A: Yes, you can update CarX Drift Racing 2 mod apk 1.22.0 to the latest version, as long as you download it from the same source as before. You can also check for updates regularly on our website, where we will post the latest versions of the mod apk.</p>
105
- <h3>Q: Can I play CarX Drift Racing 2 offline?</h3>
106
- <p>A: Yes, you can play CarX Drift Racing 2 offline, as it does not require an internet connection to run. However, you will not be able to access some features of the game, such as multiplayer mode, online events, etc.</p>
107
- <h3>Q: How can I contact the developers of CarX Drift Racing 2?</h3>
108
- <p>A: You can contact the developers of CarX Drift Racing 2 by visiting their official website, where you can find their email address, social media accounts, and support forum.</p> 197e85843d<br />
109
- <br />
110
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Bubble Shooter A Free and Fun Game for Your Laptop.md DELETED
@@ -1,148 +0,0 @@
1
- <br />
2
- <h1>Bubble Shooter Free Download for Laptop: How to Play and Enjoy this Classic Game</h1>
3
- <p>Do you love popping bubbles and solving puzzles? If so, you might want to try Bubble Shooter, one of the most popular and addictive games ever created. Bubble Shooter is a classic game that has been enjoyed by millions of people around the world for decades. In this article, we will tell you everything you need to know about Bubble Shooter, including what it is, how to download it for free on your laptop, and how to play and enjoy it.</p>
4
- <h2>What is Bubble Shooter?</h2>
5
- <p>Bubble Shooter is a simple yet challenging game that involves shooting colored bubbles at a cluster of bubbles on the top of the screen. The goal is to match three or more bubbles of the same color to make them pop and clear the board. The game ends when there are no more bubbles left or when the bubbles reach the bottom of the screen.</p>
6
- <h2>bubble shooter free download for laptop</h2><br /><p><b><b>Download Zip</b> &#127775; <a href="https://jinyurl.com/2uNUdR">https://jinyurl.com/2uNUdR</a></b></p><br /><br />
7
- <h3>The history of Bubble Shooter</h3>
8
- <p>Bubble Shooter has a long and interesting history that dates back to the 1980s. The game was inspired by two arcade games: Bubble Bobble, released by Taito in 1986, and Puzzle Bobble, also known as Bust-a-Move, released by Taito in 1994. Puzzle Bobble was the first game to feature the bubble shooting mechanic that became the core of Bubble Shooter. In 2000, Puzzle Bobble was ported to Windows and renamed as Bubble Shooter. Since then, the game has been adapted and modified by many developers and publishers, resulting in hundreds of variations and versions of Bubble Shooter.</p>
9
- <h3>The gameplay of Bubble Shooter</h3>
10
- <p>The gameplay of Bubble Shooter is very simple and intuitive. You use your mouse or touchpad to aim and shoot bubbles at the cluster of bubbles on the top of the screen. You can see the color of the next bubble in the launcher at the bottom of the screen. You can also bounce the bubbles off the walls to reach tricky spots. When you match three or more bubbles of the same color, they pop and disappear, along with any bubbles that are hanging from them. You get points for every bubble you pop, and bonus points for popping more bubbles at once or dropping large groups of bubbles. You can also earn special bubbles that have different effects, such as bombs, stars, rainbows, or fireballs.</p>
11
- <h3>The benefits of playing Bubble Shooter</h3>
12
- <p>Bubble Shooter is not only fun and entertaining, but also beneficial for your brain and mood. Playing Bubble Shooter can help you improve your concentration, memory, logic, problem-solving, and spatial awareness skills. It can also help you relax, reduce stress, and boost your happiness. Moreover, playing Bubble Shooter can be a great way to pass time, kill boredom, or challenge yourself.</p>
13
- <h2>How to download Bubble Shooter for free on your laptop</h2>
14
- <p>If you want to play Bubble Shooter on your laptop, you have several options to choose from. One of the easiest and safest ways is to download it from Microsoft Store, which offers a variety of free and paid versions of Bubble Shooter for Windows 10 devices. Here are the steps to do so:</p>
15
- <h3>The requirements for running Bubble Shooter on your laptop</h3>
16
- <p>Before you download Bubble Shooter from Microsoft Store, make sure that your laptop meets the minimum requirements for running the game. These are:</p>
17
- <ul>
18
- <li>Operating system: Windows 10 version 17763.0 or higher</li>
19
- <li>Architecture: x64</li>
20
- <li>Internet connection: Required for some features</li>
21
- <li>Memory: 4 GB RAM</li>
22
- <li>Storage: 300 MB available space</li>
23
- </ul>
24
- <p>If your laptop does not meet these requirements, you may experience some issues or errors while playing the game. You may also need to update your Windows 10 to the latest version.</p>
25
- <h3>The steps to download and install Bubble Shooter from Microsoft Store</h3>
26
- <p>Once you have checked the requirements, you can follow these steps to download and install Bubble Shooter from Microsoft Store:</p>
27
- <ol>
28
- <li>Open Microsoft Store on your laptop. You can find it in the Start menu or by typing "Microsoft Store" in the search bar.</li>
29
- <li>In the search box, type "Bubble Shooter" and press Enter. You will see a list of results with different versions of Bubble Shooter.</li>
30
- <li>Select the version of Bubble Shooter that you want to download. You can read the description, reviews, and ratings of each version to help you decide. Some of the most popular and recommended versions are Bubble Shooter Classic, Bubble Shooter POP, and Bubble Shooter Legend.</li>
31
- <li>Click on the "Get" button to start the download. You may need to sign in with your Microsoft account if you have not done so already.</li>
32
- <li>Wait for the download to finish. It may take a few minutes depending on your internet speed and the size of the game.</li>
33
- <li>Once the download is complete, click on the "Install" button to install the game on your laptop.</li>
34
- <li>After the installation is done, you can launch the game by clicking on the "Play" button or by finding it in your Start menu or desktop.</li>
35
- </ol>
36
- <p>Congratulations! You have successfully downloaded and installed Bubble Shooter on your laptop. You can now enjoy playing this classic game anytime and anywhere.</p>
37
- <p>bubble shooter classic game for pc<br />
38
- bubble shooter (free) windows 10 app<br />
39
- bubble pop: bubble shooter microsoft store<br />
40
- download bubble shooter puzzle bobble<br />
41
- bubble shooter offline game for laptop<br />
42
- bubble shooter 1986 arcade game windows<br />
43
- bubble shooter deluxe free download pc<br />
44
- bubble shooter net energy gain experiment<br />
45
- bubble shooter taito original game download<br />
46
- bubble shooter kstar facility korea institute<br />
47
- bubble shooter 100 million degrees celsius<br />
48
- bubble shooter fusion reaction 30 seconds<br />
49
- bubble shooter holy grail mini sun<br />
50
- bubble shooter 15 million kelvins core<br />
51
- bubble shooter milanworldwidegames windows<br />
52
- bubble shooter gasp mobile games inc<br />
53
- bubble shooter action & adventure category<br />
54
- bubble shooter card & board classics<br />
55
- bubble shooter family & kids puzzle & trivia<br />
56
- bubble shooter system requirements windows 10<br />
57
- bubble shooter approximate size 54.25 mb<br />
58
- bubble shooter age rating for all ages<br />
59
- bubble shooter access your internet connection<br />
60
- bubble shooter installation up to ten devices<br />
61
- bubble shooter language supported english us<br />
62
- bubble shooter publisher info support link<br />
63
- bubble shooter privacy policy terms of transaction<br />
64
- bubble shooter seizure warnings photosensitive<br />
65
- bubble shooter report this game to microsoft<br />
66
- bubble shooter aim and tap the screen to launch<br />
67
- bubble shooter clear the board before it fills up<br />
68
- bubble shooter use menu to change level or score<br />
69
- bubble shooter screenshots people also like<br />
70
- bubble shooter mahjong solitaire free +<br />
71
- bubble shooter sudoku hd free free +<br />
72
- bubble shooter amazing mahjong: zen free +<br />
73
- bubble shooter mahjong - shanghai free <br />
74
- bubble shooter mahjongg v+ free +<br />
75
- bubble shooter the bubble buster free +<br />
76
- bubble shooter the bubble shooter free +<br />
77
- bubble shooter solitaire 40 cards free +<br />
78
- bubble shooter upward free climb up game<br />
79
- bubble shooter dictionary free offline english <br />
80
- bubble shooter phoenix force free + boss battles <br />
81
- download and install instructions for windows 10</p>
82
- <h3>The alternative ways to play Bubble Shooter online or offline</h3>
83
- <p>If you do not want to download Bubble Shooter from Microsoft Store, or if you want to try other versions of Bubble Shooter, you have some alternative ways to play this game online or offline. Here are some of them:</p>
84
- <ul>
85
- <li>Play Bubble Shooter online on your browser. There are many websites that offer free online versions of Bubble Shooter that you can play on your laptop without downloading anything. Some of these websites are <a href="">BubbleShooter.net</a>, <a href="">BubbleShooter.com</a>, and <a href="">Arkadium.com</a>. Just make sure that you have a stable internet connection and a compatible browser.</li>
86
- <li>Play Bubble Shooter offline on your laptop. If you do not have an internet connection or if you want to save some data, you can play Bubble Shooter offline on your laptop by downloading a standalone version of the game. You can find some free and safe downloads of Bubble Shooter for Windows 10 on websites like <a href="">GameTop.com</a>, <a href="">MyRealGames.com</a>, and <a href="">Softonic.com</a>. Just make sure that you scan the files for viruses before installing them.</li>
87
- <li>Play Bubble Shooter on your mobile device. If you prefer playing games on your smartphone or tablet, you can also download Bubble Shooter for free from Google Play Store or Apple App Store. There are many versions of Bubble Shooter available for Android and iOS devices, such as <a href="">Bubble Witch 3 Saga</a>, <a href="">Bubble Shooter 2021</a>, and <a href="">Angry Birds POP!</a>. Just make sure that you have enough space and battery on your device.</li>
88
- </ul>
89
- <p>As you can see, there are many ways to play Bubble Shooter on your laptop or other devices. You can choose the one that suits your preferences and needs best.</p>
90
- <h2>How to play and enjoy Bubble Shooter</h2>
91
- <p>Now that you have downloaded or accessed Bubble Shooter on your laptop, you may wonder how to play and enjoy this game. Don't worry, we will guide you through the basics and give you some tips and tricks to make the most out of this game.</p>
92
- <h3>The basic rules and tips for playing Bubble Shooter</h3>
93
- <p>The basic rules for playing Bubble Shooter are very simple and easy to follow. Here are some tips to help you get started:</p>
94
- <ul>
95
- <li>Aim carefully and shoot bubbles at the cluster of bubbles on the top of the screen. Try to match three or more bubbles of the same color to pop them and clear them from the board.</li>
96
- <li>Use your mouse or touchpad to move the launcher at the bottom of the screen. You can see the color of the next bubble in the launcher and adjust your angle accordingly. You can also bounce the bubbles off the walls to reach difficult spots.</li>
97
- <li>Be quick and efficient. The more bubbles you pop, the more points you get. However, if you miss or waste too many shots, the cluster of bubbles will move down one row. If the bubbles reach the bottom of the screen, you lose the game.</li>
98
- <li>Plan ahead and strategize. Try to pop large groups of bubbles or drop entire sections of bubbles to get bonus points. Also, try to clear the bubbles that are blocking other bubbles of the same color. This way, you can create more opportunities for popping and scoring.</li>
99
- <li>Use the special bubbles wisely. Some bubbles have different effects that can help you or hinder you in the game. For example, bombs can explode and pop all the nearby bubbles, stars can change the color of any bubble, rainbows can match any color, and fireballs can burn through any bubble. Learn how to use these special bubbles to your advantage and avoid using them when they are not needed.</li>
100
- </ul>
101
- <p>By following these basic rules and tips, you can play Bubble Shooter like a pro and have fun while doing so.</p>
102
- <h3>The different modes and levels of Bubble Shooter</h3>
103
- <p>Bubble Shooter is a game that never gets old or boring. There are many different modes and levels of Bubble Shooter that you can choose from, depending on your mood and preference. Here are some of them:</p>
104
- <ul>
105
- <li>Classic mode: This is the original and most popular mode of Bubble Shooter. It has a simple and elegant design, with no time limit or extra features. You just have to clear all the bubbles from the board and score as high as possible.</li>
106
- <li>Arcade mode: This is a more challenging and exciting mode of Bubble Shooter. It has a time limit and a faster pace, with more bubbles added every few seconds. You have to pop as many bubbles as you can before the time runs out or the board fills up.</li>
107
- <li>Puzzle mode: This is a more creative and strategic mode of Bubble Shooter. It has different levels with different layouts and objectives. You have to pop all the bubbles in each level using a limited number of shots or moves.</li>
108
- <li>Adventure mode: This is a more immersive and story-driven mode of Bubble Shooter. It has different worlds with different themes and backgrounds. You have to pop all the bubbles in each world and unlock new ones as you progress.</li>
109
- </ul>
110
- <p>By playing these different modes and levels of Bubble Shooter, you can experience different aspects and challenges of this game and keep yourself entertained for hours.</p>
111
- <h3>The best strategies and tricks for scoring high in Bubble Shooter</h3>
112
- <p>Bubble Shooter is a game that requires both skill and luck. However, there are some strategies and tricks that you can use to improve your chances of scoring high in this game. Here are some of them:</p>
113
- <ul>
114
- <li>Aim for the top: One of the best ways to score high in Bubble Shooter is to aim for the top rows of bubbles. By popping the top bubbles, you can drop large sections of bubbles below them and get massive points.</li>
115
- <li>Look for clusters: Another way to score high in Bubble Shooter is to look for clusters of bubbles that have the same color or are connected by one or two bubbles. By popping these clusters, you can clear more space on the board and create more opportunities for matching.</li>
116
- <li>Save the special bubbles: A third way to score high in Bubble Shooter is to save the special bubbles for later use. Instead of using them right away, try to save them until you have a large group of bubbles that you want to pop or clear. This way, you can maximize their effects and get more points.</li>
117
- <li>Use the walls: A fourth way to score high in Bubble Shooter is to use the walls to your advantage. By bouncing the bubbles off the walls, you can reach spots that are otherwise impossible or difficult to reach. You can also create angles that can help you match more bubbles.</li>
118
- </ul>
119
- <p>By using these strategies and tricks, you can score high in Bubble Shooter and impress yourself and others with your skills.</p>
120
- <h2>Conclusion</h2>
121
- <p>Bubble Shooter is a classic game that has been loved by millions of people for decades. It is a simple yet challenging game that involves shooting colored bubbles at a cluster of bubbles on the top of the screen. The goal is to match three or more bubbles of the same color to make them pop and clear the board.</p>
122
- <h3>Summary of the main points</h3>
123
- <p>In this article, we have covered everything you need to know about Bubble Shooter, including:</p>
124
- <ul>
125
- <li>What is Bubble Shooter? We have explained the history, gameplay, and benefits of this game.</li>
126
- <li>How to download Bubble Shooter for free on your laptop? We have shown you the easiest and safest way to download it from Microsoft Store, as well as some alternative ways to play it online or offline.</li>
127
- <li>How to play and enjoy Bubble Shooter? We have given you the basic rules and tips, the different modes and levels, and the best strategies and tricks for playing and scoring high in this game.</li>
128
- </ul>
129
- <p>By following this guide, you can play and enjoy Bubble Shooter on your laptop anytime and anywhere.</p>
130
- <h3>Call to action</h3>
131
- <p>What are you waiting for? Download Bubble Shooter for free on your laptop today and start popping bubbles and having fun. You will not regret it. Bubble Shooter is a game that can keep you entertained for hours, challenge your brain, and make you happy. It is a game that everyone can play and enjoy, regardless of age or skill level. It is a game that never gets old or boring. It is a game that you will love.</p>
132
- <p>Download Bubble Shooter for free on your laptop now and join the millions of people who are already addicted to this classic game. You will be glad you did.</p>
133
- <h2>FAQs</h2>
134
- <p>Here are some of the most frequently asked questions about Bubble Shooter:</p>
135
- <ul>
136
- <li><b>Q: How many levels are there in Bubble Shooter?</b></li>
137
- <li>A: The number of levels in Bubble Shooter depends on the version of the game you are playing. Some versions have a fixed number of levels, while others have an infinite number of levels that get harder as you progress. You can check the level number on the top of the screen or in the menu of the game.</li>
138
- <li><b>Q: How do I pause or resume the game?</b></li>
139
- <li>A: To pause or resume the game, you can click on the pause button on the bottom right corner of the screen or press the Esc key on your keyboard. You can also access the settings, sound, and help options from the pause menu.</li>
140
- <li><b>Q: How do I save or load my progress?</b></li>
141
- <li>A: To save or load your progress, you need to sign in with your Microsoft account if you are playing Bubble Shooter from Microsoft Store. You can also use cloud saving or local saving features if they are available in the version of the game you are playing. You can check the save or load options in the menu of the game.</li>
142
- <li><b>Q: How do I change the difficulty or mode of the game?</b></li>
143
- <li>A: To change the difficulty or mode of the game, you can select the option that suits your preference from the main menu of the game. You can choose from classic, arcade, puzzle, or adventure modes, and from easy, medium, hard, or expert difficulties. You can also change these options during the game from the pause menu.</li>
144
- <li><b>Q: How do I get more special bubbles or power-ups?</b></li>
145
- <li>A: To get more special bubbles or power-ups, you can either earn them by popping certain bubbles or buy them with coins or gems that you can collect by playing the game or watching ads. You can also get free gifts or rewards by logging in daily or completing achievements. You can check your inventory and shop options in the menu of the game.</li>
146
- </ul></p> 197e85843d<br />
147
- <br />
148
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/FIFA 22 Offline Apk Download Zip File with Obb and Data Included.md DELETED
@@ -1,103 +0,0 @@
1
-
2
- <h1>FIFA 22 Zip APK Download: Everything You Need to Know</h1>
3
- <p>If you are a fan of soccer games, you must have heard of FIFA 22, the latest installment in the popular FIFA series by EA Sports. FIFA 22 is a realistic and immersive soccer simulation game that features hundreds of teams, players, stadiums, and modes. You can play as your favorite soccer stars, create your own custom player or manager, compete with other players online, or enjoy the street-style Volta Football mode.</p>
4
- <p>But what if you want to play FIFA 22 on your mobile device without spending too much storage space or data? Well, there is a solution for that. You can download FIFA 22 zip apk, which is a compressed version of the game that you can install on your Android or iOS device. In this article, we will show you how to download FIFA 22 zip apk, what are its features and benefits, and what are the risks involved. Let's get started!</p>
5
- <h2>fifa 22 zip apk download</h2><br /><p><b><b>Download File</b> &#10022;&#10022;&#10022; <a href="https://jinyurl.com/2uNOTh">https://jinyurl.com/2uNOTh</a></b></p><br /><br />
6
- <h2>How to Download FIFA 22 Zip APK for Android</h2>
7
- <p>If you have an Android device, you can follow these steps to download and install FIFA 22 zip apk:</p>
8
- <ol>
9
- <li>Find a reliable source for the zip file. There are many websites that claim to offer FIFA 22 zip apk download, but not all of them are trustworthy. Some may contain malware or viruses that can harm your device or steal your personal information. To avoid this, you should only download from reputable sources that have positive reviews and feedback from other users. For example, you can try this link that offers a safe and secure download of FIFA 22 zip apk.</li>
10
- <li>Download and extract the zip file. Once you have found a good source, you can download the zip file to your device. The file size may vary depending on the source, but it should be around 1 GB. After downloading, you need to extract the zip file using a file manager app that supports zip files. You can use apps like ZArchiver or RAR for this purpose. You should see two files inside the zip file: an APK file and a data folder.</li>
11
- <li>Install the APK file and copy the data folder. Before installing the APK file, you need to enable unknown sources on your device settings. This will allow you to install apps from sources other than Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. Then, locate the APK file in your file manager app and tap on it to install it. After installing, do not launch the game yet. You need to copy the data folder to your internal storage first. The data folder contains all the game data such as graphics, sounds, and settings. To copy it, go to your file manager app and find the zip file and long-press on the data folder. Then, select Copy and navigate to your internal storage. You should see a folder named Android. Open it and look for a folder named obb. If you don't see it, you can create one by tapping on the + icon and naming it obb. Then, open the obb folder and paste the data folder inside it.</li>
12
- <li>Launch the game and enjoy. Now, you are ready to play FIFA 22 on your Android device. You can find the game icon on your app drawer or home screen. Tap on it to launch the game and wait for it to load. You may need to accept some permissions and terms of service before you can start playing. You can also adjust the game settings according to your preferences and device specifications. Enjoy the game!</li>
13
- </ol>
14
- <h2>How to Download FIFA 22 Zip APK for iOS</h2>
15
- <p>If you have an iOS device, you can follow these steps to download and install FIFA 22 zip apk:</p>
16
- <ol>
17
- <li>Install AltStore on your device. AltStore is an alternative app store that allows you to install apps that are not available on the official App Store. You need a computer and a USB cable to install AltStore on your device. You can follow this guide to learn how to install AltStore on your device.</li>
18
- <li>Download the IPA file from a trusted source. An IPA file is the equivalent of an APK file for iOS devices. It contains the app data and installation instructions for iOS devices. You need to find a reliable source for the FIFA 22 IPA file, just like you did for the zip file for Android devices. You can try this link that offers a safe and secure download of FIFA 22 IPA file.</li>
19
- <li>Install the IPA file using AltStore. After downloading the IPA file, you need to transfer it to your device using a USB cable or a cloud service like Dropbox or Google Drive. Then, open AltStore on your device and tap on the + icon at the top left corner. You should see a list of IPA files that are available on your device or cloud service. Tap on the FIFA 22 IPA file and enter your Apple ID and password when prompted. AltStore will then install the app on your device.</li>
20
- <li>Trust the app and start playing. Before you can play FIFA 22 on your iOS device, you need to trust the app developer on your device settings. To do this, go to Settings > General > Device Management and look for the developer name that matches your Apple ID. Tap on it and then tap on Trust. Now, you can find FIFA 22 on your home screen or app library. Tap on it to launch the game and enjoy!</li>
21
- </ol>
22
- <h2>Features of FIFA 22 Zip APK</h2>
23
- <p>FIFA 22 zip apk is not just a compressed version of the game, but also a full-featured one that offers all the same features as the original game. Here are some of the features that you can enjoy with FIFA 22 zip apk:</p>
24
- <ul>
25
- <li>HyperMotion Technology: A new level of realism and immersion. FIFA 22 uses advanced motion capture technology that records the movements of real players in high-intensity situations. This allows the game to create more realistic animations, behaviors, and emotions for the players on the pitch. You can feel the intensity, passion, and drama of every match with HyperMotion Technology.</li>
26
- <li>Career Mode: Create your own legacy as a player or manager. FIFA 22 lets you choose your own path in Career Mode, where you can create your own custom player or manager and lead them to glory. You can customize your appearance, skills, attributes, personality, and style of play. You can also manage your team's tactics, transfers, finances, and morale. You can experience different scenarios and challenges depending on your choices and actions.</li>
27
- <li>Ultimate Team: Build your dream squad and compete online. FIFA 22 Ultimate Team is the most popular mode in FIFA 22, where you can create your own fantasy team from thousands of players from different leagues, nations, and eras. You can earn coins, packs, rewards, and players by playing matches, completing objectives, and participating in events. You can also trade players, items, and coins with other players online. You can compete with other players in various modes such as Division Rivals, Squad Battles, FUT Champions, FUT Draft, FUT Co-Op, FUT Friendlies, and FUT Events.</li>
28
- <li>Volta Football: Experience street soccer with style and flair. FIFA 22 Volta Football is a mode that lets you play soccer in different street locations around the world with different rules, styles, and cultures. You can create your own custom character and customize their appearance, clothing, accessories, tattoos, hairstyles, and celebrations. You can play solo or with your friends in different modes such as Volta Story, Volta Arcade, Volta Squads, Volta League, and Volta Tournament. You can also customize your own pitch, rules, and difficulty.</li>
29
- </ul>
30
- <h2>Benefits of FIFA 22 Zip APK Download</h2>
31
- <p>Downloading FIFA 22 zip apk has some advantages over downloading the original game from the official app stores. Here are some of the benefits that you can get with FIFA 22 zip apk download:</p>
32
- <ul>
33
- <li>Save storage space and data usage. FIFA 22 zip apk is a compressed version of the game that reduces the file size significantly. This means that you can save more storage space on your device and use less data when downloading the game. The original game may require up to 4 GB of storage space and data, while the zip apk may only require around 1 GB.</li>
34
- <li>Play offline without internet connection. FIFA 22 zip apk allows you to play the game offline without needing an internet connection. This is useful if you have a limited or unstable internet connection, or if you want to save your battery life. You can still enjoy most of the game features and modes offline, such as Career Mode, Volta Football, and Ultimate Team (offline matches only).</li>
35
- <li>Enjoy the latest updates and patches. FIFA 22 zip apk is updated regularly with the latest updates and patches from EA Sports. This means that you can enjoy the newest features, improvements, fixes, and content that are added to the game. You can also get access to the latest players, teams, kits, ratings, and events that are updated in the game.</li>
36
- </ul>
37
- <h2>Risks of FIFA 22 Zip APK Download</h2>
38
- <p>However, downloading FIFA 22 zip apk also has some risks and drawbacks that you should be aware of before you decide to do it. Here are some of the risks that you may face with FIFA 22 zip apk download:</p>
39
- <p>fifa 22 android offline zip file download<br />
40
- how to install fifa 22 apk obb data zip<br />
41
- fifa 22 mod fifa 14 zip apk free download<br />
42
- fifa 22 mobile zip apk latest version download<br />
43
- download fifa 22 original apk obb data offline<br />
44
- fifa 22 zip apk download for android phone<br />
45
- fifa 22 apk obb data zip file size<br />
46
- fifa 22 mod apk zip download with unlimited coins<br />
47
- fifa 22 zip apk download link no verification<br />
48
- fifa 22 apk obb data zip highly compressed download<br />
49
- fifa 22 android zip apk gameplay and features<br />
50
- fifa 22 zip apk download for pc windows 10<br />
51
- fifa 22 mod apk obb data zip update download<br />
52
- fifa 22 mobile zip apk offline mode download<br />
53
- fifa 22 zip apk download without human verification<br />
54
- fifa 22 apk obb data zip password and extractor<br />
55
- fifa 22 mod apk zip download with new transfers and kits<br />
56
- fifa 22 zip apk download for ios iphone ipad<br />
57
- fifa 22 apk obb data zip system requirements<br />
58
- fifa 22 mobile zip apk online mode download<br />
59
- fifa 22 zip apk download full version free<br />
60
- fifa 22 apk obb data zip file location<br />
61
- fifa 22 mod apk zip download with real faces and stadiums<br />
62
- fifa 22 zip apk download for android tablet<br />
63
- fifa 22 apk obb data zip error and fix<br />
64
- fifa 22 mobile zip apk graphics and sound quality<br />
65
- fifa 22 zip apk download cracked and modded<br />
66
- fifa 22 apk obb data zip file manager and editor<br />
67
- fifa 22 mod apk zip download with commentary and languages<br />
68
- fifa 22 zip apk download for android tv box<br />
69
- fifa 22 apk obb data zip backup and restore<br />
70
- fifa 22 mobile zip apk controls and settings<br />
71
- fifa 22 zip apk download safe and secure<br />
72
- fifa 22 apk obb data zip cheats and hacks<br />
73
- fifa 22 mod apk zip download with all players and teams unlocked<br />
74
- fifa 22 zip apk download for android emulator<br />
75
- fifa 22 apk obb data zip tutorial and guide<br />
76
- fifa 22 mobile zip apk review and rating<br />
77
- fifa 22 zip apk download latest news and updates<br />
78
- fifa 22 apk obb data zip support and contact</p>
79
- <ul>
80
- <li>Potential malware or viruses in the zip file. As mentioned earlier, not all sources that offer FIFA 22 zip apk download are trustworthy. Some may contain malware or viruses that can infect your device or steal your personal information. To avoid this, you should only download from reputable sources that have positive reviews and feedback from other users. You should also scan the zip file with an antivirus app before extracting it.</li>
81
- <li>Possible legal issues or bans from EA Sports. Downloading FIFA 22 zip apk may violate the terms of service and license agreement of EA Sports. This means that you may face legal issues or bans from EA Sports if they detect that you are using an unauthorized version of the game. To avoid this, you should only download FIFA 22 zip apk for personal use and not for commercial purposes. You should also not share or distribute the zip file to others.</li>
82
- <li>Compatibility or performance issues on some devices. FIFA 22 zip apk may not work properly on some devices or operating systems. This may cause compatibility or performance issues such as crashes, glitches, errors, or lagging. To avoid this, you should check the minimum requirements and specifications of the game before downloading it. You should also adjust the game settings according to your device specifications.</li>
83
- </ul>
84
- <h2>Conclusion</h2>
85
- <p>FIFA 22 is a realistic and immersive soccer simulation game that features hundreds of teams, players, stadiums, and modes. You can play as your favorite soccer stars, create your own custom player or manager, compete with other players online, or enjoy the street-style Volta Football mode.</p>
86
- <p>If you want to play FIFA 22 on your mobile device without spending too much storage space or data, you can download FIFA 22 zip apk, which is a compressed version of the game that you can install on your Android or iOS device.</p>
87
- <p>In this article, we showed you how to download FIFA 22 zip apk, what are its features and benefits, and what are the risks involved. We hope that this article was helpful and informative for you.</p>
88
- <p>If you have any questions or comments about FIFA 22 zip apk download, feel free to leave them below. We would love to hear from you!</p>
89
- <h3>FAQs</h3>
90
- <ul>
91
- <li>Q: Is FIFA 22 zip apk free to download?</li>
92
- <li>A: Yes, FIFA 22 zip apk is free to download from various sources online. However, you may need to complete some surveys or offers before you can access the download link.</li>
93
- <li>Q: Is FIFA 22 zip apk safe to download?</li>
94
- <li>A: It depends on the source that you download from. Some sources may contain malware or viruses that can harm your device or steal your personal information. To avoid this, you should only download from reputable sources that have positive reviews and feedback from other users. You should also scan the zip file with an antivirus app before extracting it.</li>
95
- <li>Q: Is FIFA 22 zip apk legal to download?</li>
96
- <li>A: Downloading FIFA 22 zip apk may violate the terms of service and license agreement of EA Sports. This means that you may face legal issues or bans from EA Sports if they detect that you are using an unauthorized version of the game. To avoid this, you should only download FIFA 22 zip apk for personal use and not for commercial purposes. You should also not share or distribute the zip file to others.</li>
97
- <li>Q: Is FIFA 22 zip apk compatible with my device?</li>
98
- <li>A: FIFA 22 zip apk may not work properly on some devices or operating systems. This may cause compatibility or performance issues such as crashes, glitches, errors, or lagging. To avoid this, you should check the minimum requirements and specifications of the game before downloading it. You should also adjust the game settings according to your device specifications.</li>
99
- <li>Q: How can I update FIFA 22 zip apk?</li>
100
- <li>A: FIFA 22 zip apk is updated regularly with the latest updates and patches from EA Sports. This means that you can enjoy the newest features, improvements, fixes, and content that are added to the game. You can also get access to the latest players, teams, kits, ratings, and events that are updated in the game. To update FIFA 22 zip apk, you need to download the latest version of the zip file from a trusted source and follow the same installation steps as before.</li>
101
- </ul></p> 197e85843d<br />
102
- <br />
103
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/22h/vintedois-diffusion-v0-1/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Vintedois Diffusion V0 1
3
- emoji: 📚
4
- colorFrom: gray
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.15.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/style_transfer.py DELETED
@@ -1,232 +0,0 @@
1
- import os
2
- #os.environ['CUDA_VISIBLE_DEVICES'] = "0"
3
- import argparse
4
- import numpy as np
5
- import cv2
6
- import dlib
7
- import torch
8
- from torchvision import transforms
9
- import torch.nn.functional as F
10
- from tqdm import tqdm
11
- from model.vtoonify import VToonify
12
- from model.bisenet.model import BiSeNet
13
- from model.encoder.align_all_parallel import align_face
14
- from util import save_image, load_image, visualize, load_psp_standalone, get_video_crop_parameter, tensor2cv2
15
-
16
-
17
- class TestOptions():
18
- def __init__(self):
19
-
20
- self.parser = argparse.ArgumentParser(description="Style Transfer")
21
- self.parser.add_argument("--content", type=str, default='./data/077436.jpg', help="path of the content image/video")
22
- self.parser.add_argument("--style_id", type=int, default=26, help="the id of the style image")
23
- self.parser.add_argument("--style_degree", type=float, default=0.5, help="style degree for VToonify-D")
24
- self.parser.add_argument("--color_transfer", action="store_true", help="transfer the color of the style")
25
- self.parser.add_argument("--ckpt", type=str, default='./checkpoint/vtoonify_d_cartoon/vtoonify_s_d.pt', help="path of the saved model")
26
- self.parser.add_argument("--output_path", type=str, default='./output/', help="path of the output images")
27
- self.parser.add_argument("--scale_image", action="store_true", help="resize and crop the image to best fit the model")
28
- self.parser.add_argument("--style_encoder_path", type=str, default='./checkpoint/encoder.pt', help="path of the style encoder")
29
- self.parser.add_argument("--exstyle_path", type=str, default=None, help="path of the extrinsic style code")
30
- self.parser.add_argument("--faceparsing_path", type=str, default='./checkpoint/faceparsing.pth', help="path of the face parsing model")
31
- self.parser.add_argument("--video", action="store_true", help="if true, video stylization; if false, image stylization")
32
- self.parser.add_argument("--cpu", action="store_true", help="if true, only use cpu")
33
- self.parser.add_argument("--backbone", type=str, default='dualstylegan', help="dualstylegan | toonify")
34
- self.parser.add_argument("--padding", type=int, nargs=4, default=[200,200,200,200], help="left, right, top, bottom paddings to the face center")
35
- self.parser.add_argument("--batch_size", type=int, default=4, help="batch size of frames when processing video")
36
- self.parser.add_argument("--parsing_map_path", type=str, default=None, help="path of the refined parsing map of the target video")
37
-
38
- def parse(self):
39
- self.opt = self.parser.parse_args()
40
- if self.opt.exstyle_path is None:
41
- self.opt.exstyle_path = os.path.join(os.path.dirname(self.opt.ckpt), 'exstyle_code.npy')
42
- args = vars(self.opt)
43
- print('Load options')
44
- for name, value in sorted(args.items()):
45
- print('%s: %s' % (str(name), str(value)))
46
- return self.opt
47
-
48
- if __name__ == "__main__":
49
-
50
- parser = TestOptions()
51
- args = parser.parse()
52
- print('*'*98)
53
-
54
-
55
- device = "cpu" if args.cpu else "cuda"
56
-
57
- transform = transforms.Compose([
58
- transforms.ToTensor(),
59
- transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]),
60
- ])
61
-
62
- vtoonify = VToonify(backbone = args.backbone)
63
- vtoonify.load_state_dict(torch.load(args.ckpt, map_location=lambda storage, loc: storage)['g_ema'])
64
- vtoonify.to(device)
65
-
66
- parsingpredictor = BiSeNet(n_classes=19)
67
- parsingpredictor.load_state_dict(torch.load(args.faceparsing_path, map_location=lambda storage, loc: storage))
68
- parsingpredictor.to(device).eval()
69
-
70
- modelname = './checkpoint/shape_predictor_68_face_landmarks.dat'
71
- if not os.path.exists(modelname):
72
- import wget, bz2
73
- wget.download('http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2', modelname+'.bz2')
74
- zipfile = bz2.BZ2File(modelname+'.bz2')
75
- data = zipfile.read()
76
- open(modelname, 'wb').write(data)
77
- landmarkpredictor = dlib.shape_predictor(modelname)
78
-
79
- pspencoder = load_psp_standalone(args.style_encoder_path, device)
80
-
81
- if args.backbone == 'dualstylegan':
82
- exstyles = np.load(args.exstyle_path, allow_pickle='TRUE').item()
83
- stylename = list(exstyles.keys())[args.style_id]
84
- exstyle = torch.tensor(exstyles[stylename]).to(device)
85
- with torch.no_grad():
86
- exstyle = vtoonify.zplus2wplus(exstyle)
87
-
88
- if args.video and args.parsing_map_path is not None:
89
- x_p_hat = torch.tensor(np.load(args.parsing_map_path))
90
-
91
- print('Load models successfully!')
92
-
93
-
94
- filename = args.content
95
- basename = os.path.basename(filename).split('.')[0]
96
- scale = 1
97
- kernel_1d = np.array([[0.125],[0.375],[0.375],[0.125]])
98
- print('Processing ' + os.path.basename(filename) + ' with vtoonify_' + args.backbone[0])
99
- if args.video:
100
- cropname = os.path.join(args.output_path, basename + '_input.mp4')
101
- savename = os.path.join(args.output_path, basename + '_vtoonify_' + args.backbone[0] + '.mp4')
102
-
103
- video_cap = cv2.VideoCapture(filename)
104
- num = int(video_cap.get(7))
105
-
106
- first_valid_frame = True
107
- batch_frames = []
108
- for i in tqdm(range(num)):
109
- success, frame = video_cap.read()
110
- if success == False:
111
- assert('load video frames error')
112
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
113
- # We proprocess the video by detecting the face in the first frame,
114
- # and resizing the frame so that the eye distance is 64 pixels.
115
- # Centered on the eyes, we crop the first frame to almost 400x400 (based on args.padding).
116
- # All other frames use the same resizing and cropping parameters as the first frame.
117
- if first_valid_frame:
118
- if args.scale_image:
119
- paras = get_video_crop_parameter(frame, landmarkpredictor, args.padding)
120
- if paras is None:
121
- continue
122
- h,w,top,bottom,left,right,scale = paras
123
- H, W = int(bottom-top), int(right-left)
124
- # for HR video, we apply gaussian blur to the frames to avoid flickers caused by bilinear downsampling
125
- # this can also prevent over-sharp stylization results.
126
- if scale <= 0.75:
127
- frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d)
128
- if scale <= 0.375:
129
- frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d)
130
- frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
131
- else:
132
- H, W = frame.shape[0], frame.shape[1]
133
-
134
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
135
- videoWriter = cv2.VideoWriter(cropname, fourcc, video_cap.get(5), (W, H))
136
- videoWriter2 = cv2.VideoWriter(savename, fourcc, video_cap.get(5), (4*W, 4*H))
137
-
138
- # For each video, we detect and align the face in the first frame for pSp to obtain the style code.
139
- # This style code is used for all other frames.
140
- with torch.no_grad():
141
- I = align_face(frame, landmarkpredictor)
142
- I = transform(I).unsqueeze(dim=0).to(device)
143
- s_w = pspencoder(I)
144
- s_w = vtoonify.zplus2wplus(s_w)
145
- if vtoonify.backbone == 'dualstylegan':
146
- if args.color_transfer:
147
- s_w = exstyle
148
- else:
149
- s_w[:,:7] = exstyle[:,:7]
150
- first_valid_frame = False
151
- elif args.scale_image:
152
- if scale <= 0.75:
153
- frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d)
154
- if scale <= 0.375:
155
- frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d)
156
- frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
157
-
158
- videoWriter.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
159
-
160
- batch_frames += [transform(frame).unsqueeze(dim=0).to(device)]
161
-
162
- if len(batch_frames) == args.batch_size or (i+1) == num:
163
- x = torch.cat(batch_frames, dim=0)
164
- batch_frames = []
165
- with torch.no_grad():
166
- # parsing network works best on 512x512 images, so we predict parsing maps on upsmapled frames
167
- # followed by downsampling the parsing maps
168
- if args.video and args.parsing_map_path is not None:
169
- x_p = x_p_hat[i+1-x.size(0):i+1].to(device)
170
- else:
171
- x_p = F.interpolate(parsingpredictor(2*(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)))[0],
172
- scale_factor=0.5, recompute_scale_factor=False).detach()
173
- # we give parsing maps lower weight (1/16)
174
- inputs = torch.cat((x, x_p/16.), dim=1)
175
- # d_s has no effect when backbone is toonify
176
- y_tilde = vtoonify(inputs, s_w.repeat(inputs.size(0), 1, 1), d_s = args.style_degree)
177
- y_tilde = torch.clamp(y_tilde, -1, 1)
178
- for k in range(y_tilde.size(0)):
179
- videoWriter2.write(tensor2cv2(y_tilde[k].cpu()))
180
-
181
- videoWriter.release()
182
- videoWriter2.release()
183
- video_cap.release()
184
-
185
-
186
- else:
187
- cropname = os.path.join(args.output_path, basename + '_input.jpg')
188
- savename = os.path.join(args.output_path, basename + '_vtoonify_' + args.backbone[0] + '.jpg')
189
-
190
- frame = cv2.imread(filename)
191
- frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
192
-
193
- # We detect the face in the image, and resize the image so that the eye distance is 64 pixels.
194
- # Centered on the eyes, we crop the image to almost 400x400 (based on args.padding).
195
- if args.scale_image:
196
- paras = get_video_crop_parameter(frame, landmarkpredictor, args.padding)
197
- if paras is not None:
198
- h,w,top,bottom,left,right,scale = paras
199
- H, W = int(bottom-top), int(right-left)
200
- # for HR image, we apply gaussian blur to it to avoid over-sharp stylization results
201
- if scale <= 0.75:
202
- frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d)
203
- if scale <= 0.375:
204
- frame = cv2.sepFilter2D(frame, -1, kernel_1d, kernel_1d)
205
- frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
206
-
207
- with torch.no_grad():
208
- I = align_face(frame, landmarkpredictor)
209
- I = transform(I).unsqueeze(dim=0).to(device)
210
- s_w = pspencoder(I)
211
- s_w = vtoonify.zplus2wplus(s_w)
212
- if vtoonify.backbone == 'dualstylegan':
213
- if args.color_transfer:
214
- s_w = exstyle
215
- else:
216
- s_w[:,:7] = exstyle[:,:7]
217
-
218
- x = transform(frame).unsqueeze(dim=0).to(device)
219
- # parsing network works best on 512x512 images, so we predict parsing maps on upsmapled frames
220
- # followed by downsampling the parsing maps
221
- x_p = F.interpolate(parsingpredictor(2*(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)))[0],
222
- scale_factor=0.5, recompute_scale_factor=False).detach()
223
- # we give parsing maps lower weight (1/16)
224
- inputs = torch.cat((x, x_p/16.), dim=1)
225
- # d_s has no effect when backbone is toonify
226
- y_tilde = vtoonify(inputs, s_w.repeat(inputs.size(0), 1, 1), d_s = args.style_degree)
227
- y_tilde = torch.clamp(y_tilde, -1, 1)
228
-
229
- cv2.imwrite(cropname, cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
230
- save_image(y_tilde[0].cpu(), savename)
231
-
232
- print('Transfer style successfully!')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/train_vtoonify_d.py DELETED
@@ -1,515 +0,0 @@
1
- import os
2
- #os.environ['CUDA_VISIBLE_DEVICES'] = "0"
3
- import argparse
4
- import math
5
- import random
6
-
7
- import numpy as np
8
- import torch
9
- from torch import nn, optim
10
- from torch.nn import functional as F
11
- from torch.utils import data
12
- import torch.distributed as dist
13
- from torchvision import transforms, utils
14
- from tqdm import tqdm
15
- from PIL import Image
16
- from util import *
17
-
18
- from model.stylegan import lpips
19
- from model.stylegan.model import Generator, Downsample
20
- from model.vtoonify import VToonify, ConditionalDiscriminator
21
- from model.bisenet.model import BiSeNet
22
- from model.simple_augment import random_apply_affine
23
- from model.stylegan.distributed import (
24
- get_rank,
25
- synchronize,
26
- reduce_loss_dict,
27
- reduce_sum,
28
- get_world_size,
29
- )
30
-
31
- class TrainOptions():
32
- def __init__(self):
33
-
34
- self.parser = argparse.ArgumentParser(description="Train VToonify-D")
35
- self.parser.add_argument("--iter", type=int, default=2000, help="total training iterations")
36
- self.parser.add_argument("--batch", type=int, default=8, help="batch sizes for each gpus")
37
- self.parser.add_argument("--lr", type=float, default=0.0001, help="learning rate")
38
- self.parser.add_argument("--local_rank", type=int, default=0, help="local rank for distributed training")
39
- self.parser.add_argument("--start_iter", type=int, default=0, help="start iteration")
40
- self.parser.add_argument("--save_every", type=int, default=30000, help="interval of saving a checkpoint")
41
- self.parser.add_argument("--save_begin", type=int, default=30000, help="when to start saving a checkpoint")
42
- self.parser.add_argument("--log_every", type=int, default=200, help="interval of saving a checkpoint")
43
-
44
- self.parser.add_argument("--adv_loss", type=float, default=0.01, help="the weight of adv loss")
45
- self.parser.add_argument("--grec_loss", type=float, default=0.1, help="the weight of mse recontruction loss")
46
- self.parser.add_argument("--perc_loss", type=float, default=0.01, help="the weight of perceptual loss")
47
- self.parser.add_argument("--tmp_loss", type=float, default=1.0, help="the weight of temporal consistency loss")
48
- self.parser.add_argument("--msk_loss", type=float, default=0.0005, help="the weight of attention mask loss")
49
-
50
- self.parser.add_argument("--fix_degree", action="store_true", help="use a fixed style degree")
51
- self.parser.add_argument("--fix_style", action="store_true", help="use a fixed style image")
52
- self.parser.add_argument("--fix_color", action="store_true", help="use the original color (no color transfer)")
53
- self.parser.add_argument("--exstyle_path", type=str, default='./checkpoint/cartoon/refined_exstyle_code.npy', help="path of the extrinsic style code")
54
- self.parser.add_argument("--style_id", type=int, default=26, help="the id of the style image")
55
- self.parser.add_argument("--style_degree", type=float, default=0.5, help="style degree for VToonify-D")
56
-
57
- self.parser.add_argument("--encoder_path", type=str, default=None, help="path to the pretrained encoder model")
58
- self.parser.add_argument("--direction_path", type=str, default='./checkpoint/directions.npy', help="path to the editing direction latents")
59
- self.parser.add_argument("--stylegan_path", type=str, default='./checkpoint/cartoon/generator.pt', help="path to the stylegan model")
60
- self.parser.add_argument("--faceparsing_path", type=str, default='./checkpoint/faceparsing.pth', help="path of the face parsing model")
61
- self.parser.add_argument("--style_encoder_path", type=str, default='./checkpoint/encoder.pt', help="path of the style encoder")
62
-
63
- self.parser.add_argument("--name", type=str, default='vtoonify_d_cartoon', help="saved model name")
64
- self.parser.add_argument("--pretrain", action="store_true", help="if true, only pretrain the encoder")
65
-
66
- def parse(self):
67
- self.opt = self.parser.parse_args()
68
- if self.opt.encoder_path is None:
69
- self.opt.encoder_path = os.path.join('./checkpoint/', self.opt.name, 'pretrain.pt')
70
- args = vars(self.opt)
71
- if self.opt.local_rank == 0:
72
- print('Load options')
73
- for name, value in sorted(args.items()):
74
- print('%s: %s' % (str(name), str(value)))
75
- return self.opt
76
-
77
-
78
- # pretrain E of vtoonify.
79
- # We train E so that its the last-layer feature matches the original 8-th-layer input feature of G1
80
- # See Model initialization in Sec. 4.2.2 for the detail
81
- def pretrain(args, generator, g_optim, g_ema, parsingpredictor, down, directions, styles, device):
82
- pbar = range(args.iter)
83
-
84
- if get_rank() == 0:
85
- pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01)
86
-
87
- recon_loss = torch.tensor(0.0, device=device)
88
- loss_dict = {}
89
-
90
- if args.distributed:
91
- g_module = generator.module
92
- else:
93
- g_module = generator
94
-
95
- accum = 0.5 ** (32 / (10 * 1000))
96
-
97
- requires_grad(g_module.encoder, True)
98
-
99
- for idx in pbar:
100
- i = idx + args.start_iter
101
-
102
- if i > args.iter:
103
- print("Done!")
104
- break
105
-
106
- # during pretraining, the last 11 layers of DualStyleGAN (for color transfer) is not used.
107
- # so args.fix_color is not used. the last 11 elements in weight are not used.
108
- if args.fix_degree:
109
- d_s = args.style_degree
110
- else:
111
- d_s = 0 if i <= args.iter / 4.0 else np.random.rand(1)[0]
112
- weight = [d_s] * 18
113
-
114
- # sample pre-saved w''=E_s(s)
115
- if args.fix_style:
116
- style = styles[args.style_id:args.style_id+1].repeat(args.batch,1,1)
117
- else:
118
- style = styles[torch.randint(0, styles.size(0), (args.batch,))]
119
-
120
- with torch.no_grad():
121
- # during pretraining, no geometric transformations are applied.
122
- noise_sample = torch.randn(args.batch, 512).cuda()
123
- ws_ = g_ema.stylegan().style(noise_sample).unsqueeze(1).repeat(1,18,1) # random w
124
- ws_[:, 3:7] += directions[torch.randint(0, directions.shape[0], (args.batch,)), 3:7] # w'=w+n
125
- img_gen, _ = g_ema.stylegan()([ws_], input_is_latent=True, truncation=0.5, truncation_latent=0)
126
- img_gen = torch.clamp(img_gen, -1, 1).detach() # x''
127
- img_gen512 = down(img_gen.detach())
128
- img_gen256 = down(img_gen512.detach()) # image part of x''_down
129
- mask512 = parsingpredictor(2*torch.clamp(img_gen512, -1, 1))[0]
130
- real_input = torch.cat((img_gen256, down(mask512)/16.0), dim=1) # x''_down
131
- # f_G1^(8)(w', w'', d_s)
132
- real_feat, real_skip = g_ema.generator([ws_], style, input_is_latent=True, return_feat=True,
133
- truncation=0.5, truncation_latent=0, use_res=True, interp_weights=weight)
134
-
135
- real_input = real_input.detach()
136
- real_feat = real_feat.detach()
137
- real_skip = real_skip.detach()
138
-
139
- # f_E^(last)(x''_down, w'', d_s)
140
- fake_feat, fake_skip = generator(real_input, style, d_s, return_feat=True)
141
-
142
- # L_E in Eq.(8)
143
- recon_loss = F.mse_loss(fake_feat, real_feat) + F.mse_loss(fake_skip, real_skip)
144
-
145
- loss_dict["emse"] = recon_loss
146
-
147
- generator.zero_grad()
148
- recon_loss.backward()
149
- g_optim.step()
150
-
151
- accumulate(g_ema.encoder, g_module.encoder, accum)
152
-
153
- loss_reduced = reduce_loss_dict(loss_dict)
154
-
155
- emse_loss_val = loss_reduced["emse"].mean().item()
156
-
157
- if get_rank() == 0:
158
- pbar.set_description(
159
- (
160
- f"iter: {i:d}; emse: {emse_loss_val:.3f}"
161
- )
162
- )
163
-
164
- if ((i+1) >= args.save_begin and (i+1) % args.save_every == 0) or (i+1) == args.iter:
165
- if (i+1) == args.iter:
166
- savename = f"checkpoint/%s/pretrain.pt"%(args.name)
167
- else:
168
- savename = f"checkpoint/%s/pretrain-%05d.pt"%(args.name, i+1)
169
- torch.save(
170
- {
171
- #"g": g_module.encoder.state_dict(),
172
- "g_ema": g_ema.encoder.state_dict(),
173
- },
174
- savename,
175
- )
176
-
177
-
178
- # generate paired data and train vtoonify, see Sec. 4.2.2 for the detail
179
- def train(args, generator, discriminator, g_optim, d_optim, g_ema, percept, parsingpredictor, down, pspencoder, directions, styles, device):
180
- pbar = range(args.iter)
181
-
182
- if get_rank() == 0:
183
- pbar = tqdm(pbar, initial=args.start_iter, smoothing=0.01, ncols=130, dynamic_ncols=False)
184
-
185
- d_loss = torch.tensor(0.0, device=device)
186
- g_loss = torch.tensor(0.0, device=device)
187
- grec_loss = torch.tensor(0.0, device=device)
188
- gfeat_loss = torch.tensor(0.0, device=device)
189
- temporal_loss = torch.tensor(0.0, device=device)
190
- gmask_loss = torch.tensor(0.0, device=device)
191
- loss_dict = {}
192
-
193
- surffix = '_s'
194
- if args.fix_style:
195
- surffix += '%03d'%(args.style_id)
196
- surffix += '_d'
197
- if args.fix_degree:
198
- surffix += '%1.1f'%(args.style_degree)
199
- if not args.fix_color:
200
- surffix += '_c'
201
-
202
- if args.distributed:
203
- g_module = generator.module
204
- d_module = discriminator.module
205
-
206
- else:
207
- g_module = generator
208
- d_module = discriminator
209
-
210
- accum = 0.5 ** (32 / (10 * 1000))
211
-
212
- for idx in pbar:
213
- i = idx + args.start_iter
214
-
215
- if i > args.iter:
216
- print("Done!")
217
- break
218
-
219
- # sample style degree
220
- if args.fix_degree or idx == 0 or i == 0:
221
- d_s = args.style_degree
222
- else:
223
- d_s = np.random.randint(0,6) / 5.0
224
- if args.fix_color:
225
- weight = [d_s] * 7 + [0] * 11
226
- else:
227
- weight = [d_s] * 7 + [1] * 11
228
- # style degree condition for discriminator
229
- degree_label = torch.zeros(args.batch, 1).to(device) + d_s
230
-
231
- # style index condition for discriminator
232
- style_ind = torch.randint(0, styles.size(0), (args.batch,))
233
- if args.fix_style or idx == 0 or i == 0:
234
- style_ind = style_ind * 0 + args.style_id
235
- # sample pre-saved E_s(s)
236
- style = styles[style_ind]
237
-
238
- with torch.no_grad():
239
- noise_sample = torch.randn(args.batch, 512).cuda()
240
- wc = g_ema.stylegan().style(noise_sample).unsqueeze(1).repeat(1,18,1) # random w
241
- wc[:, 3:7] += directions[torch.randint(0, directions.shape[0], (args.batch,)), 3:7] # w'=w+n
242
- wc = wc.detach()
243
- xc, _ = g_ema.stylegan()([wc], input_is_latent=True, truncation=0.5, truncation_latent=0)
244
- xc = torch.clamp(xc, -1, 1).detach() # x''
245
- if not args.fix_color and args.fix_style: # only transfer this fixed style's color
246
- xl = style.clone()
247
- else:
248
- xl = pspencoder(F.adaptive_avg_pool2d(xc, 256))
249
- xl = g_ema.zplus2wplus(xl) # E_s(x''_down)
250
- xl = torch.cat((style[:,0:7], xl[:,7:18]), dim=1).detach() # w'' = concatenate E_s(s) and E_s(x''_down)
251
- xs, _ = g_ema.generator([wc], xl, input_is_latent=True,
252
- truncation=0.5, truncation_latent=0, use_res=True, interp_weights=weight)
253
- xs = torch.clamp(xs, -1, 1).detach() # y'=G1(w', w'', d_s, d_c)
254
- # apply color jitter to w'. we fuse w' of the current iteration with w' of the last iteration
255
- if idx > 0 and i >= (args.iter/2.0) and (not args.fix_color and not args.fix_style):
256
- wcfuse = wc.clone()
257
- wcfuse[:,7:] = wc_[:,7:] * (i/(args.iter/2.0)-1) + wcfuse[:,7:] * (2-i/(args.iter/2.0))
258
- xc, _ = g_ema.stylegan()([wcfuse], input_is_latent=True, truncation=0.5, truncation_latent=0)
259
- xc = torch.clamp(xc, -1, 1).detach() # x'
260
- wc_ = wc.clone() # wc_ is the w' in the last iteration
261
- # during training, random geometric transformations are applied.
262
- imgs, _ = random_apply_affine(torch.cat((xc.detach(),xs), dim=1), 0.2, None)
263
- real_input1024 = imgs[:,0:3].detach() # image part of x
264
- real_input512 = down(real_input1024).detach()
265
- real_input256 = down(real_input512).detach()
266
- mask512 = parsingpredictor(2*real_input512)[0]
267
- mask256 = down(mask512).detach()
268
- mask = F.adaptive_avg_pool2d(mask512, 1024).detach() # parsing part of x
269
- real_output = imgs[:,3:].detach() # y
270
- real_input = torch.cat((real_input256, mask256/16.0), dim=1) # x_down
271
- # for log, sample a fixed input-output pair (x_down, y, w'', d_s)
272
- if idx == 0 or i == 0:
273
- samplein = real_input.clone().detach()
274
- sampleout = real_output.clone().detach()
275
- samplexl = xl.clone().detach()
276
- sampleds = d_s
277
-
278
- ###### This part is for training discriminator
279
-
280
- requires_grad(g_module.encoder, False)
281
- requires_grad(g_module.fusion_out, False)
282
- requires_grad(g_module.fusion_skip, False)
283
- requires_grad(discriminator, True)
284
-
285
- fake_output = generator(real_input, xl, d_s)
286
- fake_pred = discriminator(F.adaptive_avg_pool2d(fake_output, 256), degree_label, style_ind)
287
- real_pred = discriminator(F.adaptive_avg_pool2d(real_output, 256), degree_label, style_ind)
288
-
289
- # L_adv in Eq.(3)
290
- d_loss = d_logistic_loss(real_pred, fake_pred) * args.adv_loss
291
- loss_dict["d"] = d_loss
292
-
293
- discriminator.zero_grad()
294
- d_loss.backward()
295
- d_optim.step()
296
-
297
- ###### This part is for training generator (encoder and fusion modules)
298
-
299
- requires_grad(g_module.encoder, True)
300
- requires_grad(g_module.fusion_out, True)
301
- requires_grad(g_module.fusion_skip, True)
302
- requires_grad(discriminator, False)
303
-
304
- fake_output, m_Es = generator(real_input, xl, d_s, return_mask=True)
305
- fake_pred = discriminator(F.adaptive_avg_pool2d(fake_output, 256), degree_label, style_ind)
306
-
307
- # L_adv in Eq.(3)
308
- g_loss = g_nonsaturating_loss(fake_pred) * args.adv_loss
309
- # L_rec in Eq.(2)
310
- grec_loss = F.mse_loss(fake_output, real_output) * args.grec_loss
311
- gfeat_loss = percept(F.adaptive_avg_pool2d(fake_output, 512), # 1024 will out of memory
312
- F.adaptive_avg_pool2d(real_output, 512)).sum() * args.perc_loss # 256 will get blurry output
313
-
314
- # L_msk in Eq.(9)
315
- gmask_loss = torch.tensor(0.0, device=device)
316
- if not args.fix_degree or args.msk_loss > 0:
317
- for jj, m_E in enumerate(m_Es):
318
- gd_s = (1 - d_s) ** 2 * 0.9 + 0.1
319
- gmask_loss += F.relu(torch.mean(m_E)-gd_s) * args.msk_loss
320
-
321
- loss_dict["g"] = g_loss
322
- loss_dict["gr"] = grec_loss
323
- loss_dict["gf"] = gfeat_loss
324
- loss_dict["msk"] = gmask_loss
325
-
326
- w = random.randint(0,1024-896)
327
- h = random.randint(0,1024-896)
328
- crop_input = torch.cat((real_input1024[:,:,w:w+896,h:h+896], mask[:,:,w:w+896,h:h+896]/16.0), dim=1).detach()
329
- crop_input = down(down(crop_input))
330
- crop_fake_output = fake_output[:,:,w:w+896,h:h+896]
331
- fake_crop_output = generator(crop_input, xl, d_s)
332
- # L_tmp in Eq.(4), gradually increase the weight of L_tmp
333
- temporal_loss = ((fake_crop_output-crop_fake_output)**2).mean() * max(idx/(args.iter/2.0)-1, 0) * args.tmp_loss
334
- loss_dict["tp"] = temporal_loss
335
-
336
- generator.zero_grad()
337
- (g_loss + grec_loss + gfeat_loss + temporal_loss + gmask_loss).backward()
338
- g_optim.step()
339
-
340
- accumulate(g_ema.encoder, g_module.encoder, accum)
341
- accumulate(g_ema.fusion_out, g_module.fusion_out, accum)
342
- accumulate(g_ema.fusion_skip, g_module.fusion_skip, accum)
343
-
344
- loss_reduced = reduce_loss_dict(loss_dict)
345
-
346
- d_loss_val = loss_reduced["d"].mean().item()
347
- g_loss_val = loss_reduced["g"].mean().item()
348
- gr_loss_val = loss_reduced["gr"].mean().item()
349
- gf_loss_val = loss_reduced["gf"].mean().item()
350
- tmp_loss_val = loss_reduced["tp"].mean().item()
351
- msk_loss_val = loss_reduced["msk"].mean().item()
352
-
353
- if get_rank() == 0:
354
- pbar.set_description(
355
- (
356
- f"iter: {i:d}; advd: {d_loss_val:.3f}; advg: {g_loss_val:.3f}; mse: {gr_loss_val:.3f}; "
357
- f"perc: {gf_loss_val:.3f}; tmp: {tmp_loss_val:.3f}; msk: {msk_loss_val:.3f}"
358
- )
359
- )
360
-
361
- if i == 0 or (i+1) % args.log_every == 0 or (i+1) == args.iter:
362
- with torch.no_grad():
363
- g_ema.eval()
364
- sample1 = g_ema(samplein, samplexl, sampleds)
365
- if args.fix_degree:
366
- sample = F.interpolate(torch.cat((sampleout, sample1), dim=0), 256)
367
- else:
368
- sample2 = g_ema(samplein, samplexl, d_s)
369
- sample = F.interpolate(torch.cat((sampleout, sample1, sample2), dim=0), 256)
370
- utils.save_image(
371
- sample,
372
- f"log/%s/%05d.jpg"%(args.name, (i+1)),
373
- nrow=int(args.batch),
374
- normalize=True,
375
- range=(-1, 1),
376
- )
377
-
378
- if ((i+1) >= args.save_begin and (i+1) % args.save_every == 0) or (i+1) == args.iter:
379
- if (i+1) == args.iter:
380
- savename = f"checkpoint/%s/vtoonify%s.pt"%(args.name, surffix)
381
- else:
382
- savename = f"checkpoint/%s/vtoonify%s_%05d.pt"%(args.name, surffix, i+1)
383
- torch.save(
384
- {
385
- #"g": g_module.state_dict(),
386
- #"d": d_module.state_dict(),
387
- "g_ema": g_ema.state_dict(),
388
- },
389
- savename,
390
- )
391
-
392
-
393
-
394
- if __name__ == "__main__":
395
-
396
- device = "cuda"
397
- parser = TrainOptions()
398
- args = parser.parse()
399
- if args.local_rank == 0:
400
- print('*'*98)
401
- if not os.path.exists("log/%s/"%(args.name)):
402
- os.makedirs("log/%s/"%(args.name))
403
- if not os.path.exists("checkpoint/%s/"%(args.name)):
404
- os.makedirs("checkpoint/%s/"%(args.name))
405
-
406
- n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
407
- args.distributed = n_gpu > 1
408
-
409
- if args.distributed:
410
- torch.cuda.set_device(args.local_rank)
411
- torch.distributed.init_process_group(backend="nccl", init_method="env://")
412
- synchronize()
413
-
414
- generator = VToonify(backbone = 'dualstylegan').to(device)
415
- generator.apply(weights_init)
416
- g_ema = VToonify(backbone = 'dualstylegan').to(device)
417
- g_ema.eval()
418
-
419
- ckpt = torch.load(args.stylegan_path, map_location=lambda storage, loc: storage)
420
- generator.generator.load_state_dict(ckpt["g_ema"], strict=False)
421
- # load ModRes blocks of DualStyleGAN into the modified ModRes blocks (with dilation)
422
- generator.res.load_state_dict(generator.generator.res.state_dict(), strict=False)
423
- g_ema.generator.load_state_dict(ckpt["g_ema"], strict=False)
424
- g_ema.res.load_state_dict(g_ema.generator.res.state_dict(), strict=False)
425
- requires_grad(generator.generator, False)
426
- requires_grad(generator.res, False)
427
- requires_grad(g_ema.generator, False)
428
- requires_grad(g_ema.res, False)
429
-
430
- if not args.pretrain:
431
- generator.encoder.load_state_dict(torch.load(args.encoder_path, map_location=lambda storage, loc: storage)["g_ema"])
432
- # we initialize the fusion modules to map f_G \otimes f_E to f_G.
433
- for k in generator.fusion_out:
434
- k.conv.weight.data *= 0.01
435
- k.conv.weight[:,0:k.conv.weight.shape[0],1,1].data += torch.eye(k.conv.weight.shape[0]).cuda()
436
- for k in generator.fusion_skip:
437
- k.weight.data *= 0.01
438
- k.weight[:,0:k.weight.shape[0],1,1].data += torch.eye(k.weight.shape[0]).cuda()
439
-
440
- accumulate(g_ema.encoder, generator.encoder, 0)
441
- accumulate(g_ema.fusion_out, generator.fusion_out, 0)
442
- accumulate(g_ema.fusion_skip, generator.fusion_skip, 0)
443
-
444
- g_parameters = list(generator.encoder.parameters())
445
- if not args.pretrain:
446
- g_parameters = g_parameters + list(generator.fusion_out.parameters()) + list(generator.fusion_skip.parameters())
447
-
448
- g_optim = optim.Adam(
449
- g_parameters,
450
- lr=args.lr,
451
- betas=(0.9, 0.99),
452
- )
453
-
454
- if args.distributed:
455
- generator = nn.parallel.DistributedDataParallel(
456
- generator,
457
- device_ids=[args.local_rank],
458
- output_device=args.local_rank,
459
- broadcast_buffers=False,
460
- find_unused_parameters=True,
461
- )
462
-
463
- parsingpredictor = BiSeNet(n_classes=19)
464
- parsingpredictor.load_state_dict(torch.load(args.faceparsing_path, map_location=lambda storage, loc: storage))
465
- parsingpredictor.to(device).eval()
466
- requires_grad(parsingpredictor, False)
467
-
468
- # we apply gaussian blur to the images to avoid flickers caused during downsampling
469
- down = Downsample(kernel=[1, 3, 3, 1], factor=2).to(device)
470
- requires_grad(down, False)
471
-
472
- directions = torch.tensor(np.load(args.direction_path)).to(device)
473
-
474
- # load style codes of DualStyleGAN
475
- exstyles = np.load(args.exstyle_path, allow_pickle='TRUE').item()
476
- if args.local_rank == 0 and not os.path.exists('checkpoint/%s/exstyle_code.npy'%(args.name)):
477
- np.save('checkpoint/%s/exstyle_code.npy'%(args.name), exstyles, allow_pickle=True)
478
- styles = []
479
- with torch.no_grad():
480
- for stylename in exstyles.keys():
481
- exstyle = torch.tensor(exstyles[stylename]).to(device)
482
- exstyle = g_ema.zplus2wplus(exstyle)
483
- styles += [exstyle]
484
- styles = torch.cat(styles, dim=0)
485
-
486
- if not args.pretrain:
487
- discriminator = ConditionalDiscriminator(256, use_condition=True, style_num = styles.size(0)).to(device)
488
-
489
- d_optim = optim.Adam(
490
- discriminator.parameters(),
491
- lr=args.lr,
492
- betas=(0.9, 0.99),
493
- )
494
-
495
- if args.distributed:
496
- discriminator = nn.parallel.DistributedDataParallel(
497
- discriminator,
498
- device_ids=[args.local_rank],
499
- output_device=args.local_rank,
500
- broadcast_buffers=False,
501
- find_unused_parameters=True,
502
- )
503
-
504
- percept = lpips.PerceptualLoss(model="net-lin", net="vgg", use_gpu=device.startswith("cuda"), gpu_ids=[args.local_rank])
505
- requires_grad(percept.model.net, False)
506
-
507
- pspencoder = load_psp_standalone(args.style_encoder_path, device)
508
-
509
- if args.local_rank == 0:
510
- print('Load models and data successfully loaded!')
511
-
512
- if args.pretrain:
513
- pretrain(args, generator, g_optim, g_ema, parsingpredictor, down, directions, styles, device)
514
- else:
515
- train(args, generator, discriminator, g_optim, d_optim, g_ema, percept, parsingpredictor, down, pspencoder, directions, styles, device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/sound_extraction/model/modules.py DELETED
@@ -1,483 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- import math
5
- from .film import Film
6
-
7
- class ConvBlock(nn.Module):
8
- def __init__(self, in_channels, out_channels, kernel_size, activation, momentum):
9
- super(ConvBlock, self).__init__()
10
-
11
- self.activation = activation
12
- padding = (kernel_size[0] // 2, kernel_size[1] // 2)
13
-
14
- self.conv1 = nn.Conv2d(
15
- in_channels=in_channels,
16
- out_channels=out_channels,
17
- kernel_size=kernel_size,
18
- stride=(1, 1),
19
- dilation=(1, 1),
20
- padding=padding,
21
- bias=False,
22
- )
23
-
24
- self.bn1 = nn.BatchNorm2d(out_channels, momentum=momentum)
25
-
26
- self.conv2 = nn.Conv2d(
27
- in_channels=out_channels,
28
- out_channels=out_channels,
29
- kernel_size=kernel_size,
30
- stride=(1, 1),
31
- dilation=(1, 1),
32
- padding=padding,
33
- bias=False,
34
- )
35
-
36
- self.bn2 = nn.BatchNorm2d(out_channels, momentum=momentum)
37
-
38
- self.init_weights()
39
-
40
- def init_weights(self):
41
- init_layer(self.conv1)
42
- init_layer(self.conv2)
43
- init_bn(self.bn1)
44
- init_bn(self.bn2)
45
-
46
- def forward(self, x):
47
- x = act(self.bn1(self.conv1(x)), self.activation)
48
- x = act(self.bn2(self.conv2(x)), self.activation)
49
- return x
50
-
51
-
52
- class EncoderBlock(nn.Module):
53
- def __init__(self, in_channels, out_channels, kernel_size, downsample, activation, momentum):
54
- super(EncoderBlock, self).__init__()
55
-
56
- self.conv_block = ConvBlock(
57
- in_channels, out_channels, kernel_size, activation, momentum
58
- )
59
- self.downsample = downsample
60
-
61
- def forward(self, x):
62
- encoder = self.conv_block(x)
63
- encoder_pool = F.avg_pool2d(encoder, kernel_size=self.downsample)
64
- return encoder_pool, encoder
65
-
66
-
67
- class DecoderBlock(nn.Module):
68
- def __init__(self, in_channels, out_channels, kernel_size, upsample, activation, momentum):
69
- super(DecoderBlock, self).__init__()
70
- self.kernel_size = kernel_size
71
- self.stride = upsample
72
- self.activation = activation
73
-
74
- self.conv1 = torch.nn.ConvTranspose2d(
75
- in_channels=in_channels,
76
- out_channels=out_channels,
77
- kernel_size=self.stride,
78
- stride=self.stride,
79
- padding=(0, 0),
80
- bias=False,
81
- dilation=(1, 1),
82
- )
83
-
84
- self.bn1 = nn.BatchNorm2d(out_channels, momentum=momentum)
85
-
86
- self.conv_block2 = ConvBlock(
87
- out_channels * 2, out_channels, kernel_size, activation, momentum
88
- )
89
-
90
- def init_weights(self):
91
- init_layer(self.conv1)
92
- init_bn(self.bn)
93
-
94
- def prune(self, x):
95
- """Prune the shape of x after transpose convolution."""
96
- padding = (self.kernel_size[0] // 2, self.kernel_size[1] // 2)
97
- x = x[
98
- :,
99
- :,
100
- padding[0] : padding[0] - self.stride[0],
101
- padding[1] : padding[1] - self.stride[1]]
102
- return x
103
-
104
- def forward(self, input_tensor, concat_tensor):
105
- x = act(self.bn1(self.conv1(input_tensor)), self.activation)
106
- # from IPython import embed; embed(using=False); os._exit(0)
107
- # x = self.prune(x)
108
- x = torch.cat((x, concat_tensor), dim=1)
109
- x = self.conv_block2(x)
110
- return x
111
-
112
-
113
- class EncoderBlockRes1B(nn.Module):
114
- def __init__(self, in_channels, out_channels, downsample, activation, momentum):
115
- super(EncoderBlockRes1B, self).__init__()
116
- size = (3,3)
117
-
118
- self.conv_block1 = ConvBlockRes(in_channels, out_channels, size, activation, momentum)
119
- self.conv_block2 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
120
- self.conv_block3 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
121
- self.conv_block4 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
122
- self.downsample = downsample
123
-
124
- def forward(self, x):
125
- encoder = self.conv_block1(x)
126
- encoder = self.conv_block2(encoder)
127
- encoder = self.conv_block3(encoder)
128
- encoder = self.conv_block4(encoder)
129
- encoder_pool = F.avg_pool2d(encoder, kernel_size=self.downsample)
130
- return encoder_pool, encoder
131
-
132
- class DecoderBlockRes1B(nn.Module):
133
- def __init__(self, in_channels, out_channels, stride, activation, momentum):
134
- super(DecoderBlockRes1B, self).__init__()
135
- size = (3,3)
136
- self.activation = activation
137
-
138
- self.conv1 = torch.nn.ConvTranspose2d(in_channels=in_channels,
139
- out_channels=out_channels, kernel_size=size, stride=stride,
140
- padding=(0, 0), output_padding=(0, 0), bias=False, dilation=1)
141
-
142
- self.bn1 = nn.BatchNorm2d(in_channels)
143
- self.conv_block2 = ConvBlockRes(out_channels * 2, out_channels, size, activation, momentum)
144
- self.conv_block3 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
145
- self.conv_block4 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
146
- self.conv_block5 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
147
-
148
- def init_weights(self):
149
- init_layer(self.conv1)
150
-
151
- def prune(self, x, both=False):
152
- """Prune the shape of x after transpose convolution.
153
- """
154
- if(both): x = x[:, :, 0 : - 1, 0:-1]
155
- else: x = x[:, :, 0: - 1, :]
156
- return x
157
-
158
- def forward(self, input_tensor, concat_tensor,both=False):
159
- x = self.conv1(F.relu_(self.bn1(input_tensor)))
160
- x = self.prune(x,both=both)
161
- x = torch.cat((x, concat_tensor), dim=1)
162
- x = self.conv_block2(x)
163
- x = self.conv_block3(x)
164
- x = self.conv_block4(x)
165
- x = self.conv_block5(x)
166
- return x
167
-
168
-
169
- class EncoderBlockRes2BCond(nn.Module):
170
- def __init__(self, in_channels, out_channels, downsample, activation, momentum, cond_embedding_dim):
171
- super(EncoderBlockRes2BCond, self).__init__()
172
- size = (3, 3)
173
-
174
- self.conv_block1 = ConvBlockResCond(in_channels, out_channels, size, activation, momentum, cond_embedding_dim)
175
- self.conv_block2 = ConvBlockResCond(out_channels, out_channels, size, activation, momentum, cond_embedding_dim)
176
- self.downsample = downsample
177
-
178
- def forward(self, x, cond_vec):
179
- encoder = self.conv_block1(x, cond_vec)
180
- encoder = self.conv_block2(encoder, cond_vec)
181
- encoder_pool = F.avg_pool2d(encoder, kernel_size=self.downsample)
182
- return encoder_pool, encoder
183
-
184
- class DecoderBlockRes2BCond(nn.Module):
185
- def __init__(self, in_channels, out_channels, stride, activation, momentum, cond_embedding_dim):
186
- super(DecoderBlockRes2BCond, self).__init__()
187
- size = (3, 3)
188
- self.activation = activation
189
-
190
- self.conv1 = torch.nn.ConvTranspose2d(in_channels=in_channels,
191
- out_channels=out_channels, kernel_size=size, stride=stride,
192
- padding=(0, 0), output_padding=(0, 0), bias=False, dilation=1)
193
-
194
- self.bn1 = nn.BatchNorm2d(in_channels)
195
- self.conv_block2 = ConvBlockResCond(out_channels * 2, out_channels, size, activation, momentum, cond_embedding_dim)
196
- self.conv_block3 = ConvBlockResCond(out_channels, out_channels, size, activation, momentum, cond_embedding_dim)
197
-
198
- def init_weights(self):
199
- init_layer(self.conv1)
200
-
201
- def prune(self, x, both=False):
202
- """Prune the shape of x after transpose convolution.
203
- """
204
- if(both): x = x[:, :, 0 : - 1, 0:-1]
205
- else: x = x[:, :, 0: - 1, :]
206
- return x
207
-
208
- def forward(self, input_tensor, concat_tensor, cond_vec, both=False):
209
- x = self.conv1(F.relu_(self.bn1(input_tensor)))
210
- x = self.prune(x, both=both)
211
- x = torch.cat((x, concat_tensor), dim=1)
212
- x = self.conv_block2(x, cond_vec)
213
- x = self.conv_block3(x, cond_vec)
214
- return x
215
-
216
- class EncoderBlockRes4BCond(nn.Module):
217
- def __init__(self, in_channels, out_channels, downsample, activation, momentum, cond_embedding_dim):
218
- super(EncoderBlockRes4B, self).__init__()
219
- size = (3,3)
220
-
221
- self.conv_block1 = ConvBlockResCond(in_channels, out_channels, size, activation, momentum, cond_embedding_dim)
222
- self.conv_block2 = ConvBlockResCond(out_channels, out_channels, size, activation, momentum, cond_embedding_dim)
223
- self.conv_block3 = ConvBlockResCond(out_channels, out_channels, size, activation, momentum, cond_embedding_dim)
224
- self.conv_block4 = ConvBlockResCond(out_channels, out_channels, size, activation, momentum, cond_embedding_dim)
225
- self.downsample = downsample
226
-
227
- def forward(self, x, cond_vec):
228
- encoder = self.conv_block1(x, cond_vec)
229
- encoder = self.conv_block2(encoder, cond_vec)
230
- encoder = self.conv_block3(encoder, cond_vec)
231
- encoder = self.conv_block4(encoder, cond_vec)
232
- encoder_pool = F.avg_pool2d(encoder, kernel_size=self.downsample)
233
- return encoder_pool, encoder
234
-
235
- class DecoderBlockRes4BCond(nn.Module):
236
- def __init__(self, in_channels, out_channels, stride, activation, momentum, cond_embedding_dim):
237
- super(DecoderBlockRes4B, self).__init__()
238
- size = (3, 3)
239
- self.activation = activation
240
-
241
- self.conv1 = torch.nn.ConvTranspose2d(in_channels=in_channels,
242
- out_channels=out_channels, kernel_size=size, stride=stride,
243
- padding=(0, 0), output_padding=(0, 0), bias=False, dilation=1)
244
-
245
- self.bn1 = nn.BatchNorm2d(in_channels)
246
- self.conv_block2 = ConvBlockResCond(out_channels * 2, out_channels, size, activation, momentum, cond_embedding_dim)
247
- self.conv_block3 = ConvBlockResCond(out_channels, out_channels, size, activation, momentum, cond_embedding_dim)
248
- self.conv_block4 = ConvBlockResCond(out_channels, out_channels, size, activation, momentum, cond_embedding_dim)
249
- self.conv_block5 = ConvBlockResCond(out_channels, out_channels, size, activation, momentum, cond_embedding_dim)
250
-
251
- def init_weights(self):
252
- init_layer(self.conv1)
253
-
254
- def prune(self, x, both=False):
255
- """Prune the shape of x after transpose convolution.
256
- """
257
- if(both): x = x[:, :, 0 : - 1, 0:-1]
258
- else: x = x[:, :, 0: - 1, :]
259
- return x
260
-
261
- def forward(self, input_tensor, concat_tensor, cond_vec, both=False):
262
- x = self.conv1(F.relu_(self.bn1(input_tensor)))
263
- x = self.prune(x,both=both)
264
- x = torch.cat((x, concat_tensor), dim=1)
265
- x = self.conv_block2(x, cond_vec)
266
- x = self.conv_block3(x, cond_vec)
267
- x = self.conv_block4(x, cond_vec)
268
- x = self.conv_block5(x, cond_vec)
269
- return x
270
-
271
- class EncoderBlockRes4B(nn.Module):
272
- def __init__(self, in_channels, out_channels, downsample, activation, momentum):
273
- super(EncoderBlockRes4B, self).__init__()
274
- size = (3, 3)
275
-
276
- self.conv_block1 = ConvBlockRes(in_channels, out_channels, size, activation, momentum)
277
- self.conv_block2 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
278
- self.conv_block3 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
279
- self.conv_block4 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
280
- self.downsample = downsample
281
-
282
- def forward(self, x):
283
- encoder = self.conv_block1(x)
284
- encoder = self.conv_block2(encoder)
285
- encoder = self.conv_block3(encoder)
286
- encoder = self.conv_block4(encoder)
287
- encoder_pool = F.avg_pool2d(encoder, kernel_size=self.downsample)
288
- return encoder_pool, encoder
289
-
290
- class DecoderBlockRes4B(nn.Module):
291
- def __init__(self, in_channels, out_channels, stride, activation, momentum):
292
- super(DecoderBlockRes4B, self).__init__()
293
- size = (3,3)
294
- self.activation = activation
295
-
296
- self.conv1 = torch.nn.ConvTranspose2d(in_channels=in_channels,
297
- out_channels=out_channels, kernel_size=size, stride=stride,
298
- padding=(0, 0), output_padding=(0, 0), bias=False, dilation=1)
299
-
300
- self.bn1 = nn.BatchNorm2d(in_channels)
301
- self.conv_block2 = ConvBlockRes(out_channels * 2, out_channels, size, activation, momentum)
302
- self.conv_block3 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
303
- self.conv_block4 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
304
- self.conv_block5 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
305
-
306
- def init_weights(self):
307
- init_layer(self.conv1)
308
-
309
- def prune(self, x, both=False):
310
- """Prune the shape of x after transpose convolution.
311
- """
312
- if(both): x = x[:, :, 0 : - 1, 0:-1]
313
- else: x = x[:, :, 0: - 1, :]
314
- return x
315
-
316
- def forward(self, input_tensor, concat_tensor,both=False):
317
- x = self.conv1(F.relu_(self.bn1(input_tensor)))
318
- x = self.prune(x,both=both)
319
- x = torch.cat((x, concat_tensor), dim=1)
320
- x = self.conv_block2(x)
321
- x = self.conv_block3(x)
322
- x = self.conv_block4(x)
323
- x = self.conv_block5(x)
324
- return x
325
-
326
- class ConvBlockResCond(nn.Module):
327
- def __init__(self, in_channels, out_channels, kernel_size, activation, momentum, cond_embedding_dim):
328
- r"""Residual block.
329
- """
330
- super(ConvBlockResCond, self).__init__()
331
-
332
- self.activation = activation
333
- padding = [kernel_size[0] // 2, kernel_size[1] // 2]
334
-
335
- self.bn1 = nn.BatchNorm2d(in_channels)
336
- self.bn2 = nn.BatchNorm2d(out_channels)
337
-
338
- self.conv1 = nn.Conv2d(in_channels=in_channels,
339
- out_channels=out_channels,
340
- kernel_size=kernel_size, stride=(1, 1),
341
- dilation=(1, 1), padding=padding, bias=False)
342
- self.film1 = Film(channels=out_channels, cond_embedding_dim=cond_embedding_dim)
343
- self.conv2 = nn.Conv2d(in_channels=out_channels,
344
- out_channels=out_channels,
345
- kernel_size=kernel_size, stride=(1, 1),
346
- dilation=(1, 1), padding=padding, bias=False)
347
- self.film2 = Film(channels=out_channels, cond_embedding_dim=cond_embedding_dim)
348
-
349
- if in_channels != out_channels:
350
- self.shortcut = nn.Conv2d(in_channels=in_channels,
351
- out_channels=out_channels, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0))
352
- self.film_res = Film(channels=out_channels, cond_embedding_dim=cond_embedding_dim)
353
- self.is_shortcut = True
354
- else:
355
- self.is_shortcut = False
356
-
357
- self.init_weights()
358
-
359
- def init_weights(self):
360
- init_bn(self.bn1)
361
- init_bn(self.bn2)
362
- init_layer(self.conv1)
363
- init_layer(self.conv2)
364
-
365
- if self.is_shortcut:
366
- init_layer(self.shortcut)
367
-
368
- def forward(self, x, cond_vec):
369
- origin = x
370
- x = self.conv1(F.leaky_relu_(self.bn1(x), negative_slope=0.01))
371
- x = self.film1(x, cond_vec)
372
- x = self.conv2(F.leaky_relu_(self.bn2(x), negative_slope=0.01))
373
- x = self.film2(x, cond_vec)
374
- if self.is_shortcut:
375
- residual = self.shortcut(origin)
376
- residual = self.film_res(residual, cond_vec)
377
- return residual + x
378
- else:
379
- return origin + x
380
-
381
- class ConvBlockRes(nn.Module):
382
- def __init__(self, in_channels, out_channels, kernel_size, activation, momentum):
383
- r"""Residual block.
384
- """
385
- super(ConvBlockRes, self).__init__()
386
-
387
- self.activation = activation
388
- padding = [kernel_size[0] // 2, kernel_size[1] // 2]
389
-
390
- self.bn1 = nn.BatchNorm2d(in_channels)
391
- self.bn2 = nn.BatchNorm2d(out_channels)
392
-
393
- self.conv1 = nn.Conv2d(in_channels=in_channels,
394
- out_channels=out_channels,
395
- kernel_size=kernel_size, stride=(1, 1),
396
- dilation=(1, 1), padding=padding, bias=False)
397
-
398
- self.conv2 = nn.Conv2d(in_channels=out_channels,
399
- out_channels=out_channels,
400
- kernel_size=kernel_size, stride=(1, 1),
401
- dilation=(1, 1), padding=padding, bias=False)
402
-
403
- if in_channels != out_channels:
404
- self.shortcut = nn.Conv2d(in_channels=in_channels,
405
- out_channels=out_channels, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0))
406
- self.is_shortcut = True
407
- else:
408
- self.is_shortcut = False
409
-
410
- self.init_weights()
411
-
412
- def init_weights(self):
413
- init_bn(self.bn1)
414
- init_bn(self.bn2)
415
- init_layer(self.conv1)
416
- init_layer(self.conv2)
417
-
418
- if self.is_shortcut:
419
- init_layer(self.shortcut)
420
-
421
- def forward(self, x):
422
- origin = x
423
- x = self.conv1(F.leaky_relu_(self.bn1(x), negative_slope=0.01))
424
- x = self.conv2(F.leaky_relu_(self.bn2(x), negative_slope=0.01))
425
-
426
- if self.is_shortcut:
427
- return self.shortcut(origin) + x
428
- else:
429
- return origin + x
430
-
431
- def init_layer(layer):
432
- """Initialize a Linear or Convolutional layer. """
433
- nn.init.xavier_uniform_(layer.weight)
434
-
435
- if hasattr(layer, 'bias'):
436
- if layer.bias is not None:
437
- layer.bias.data.fill_(0.)
438
-
439
- def init_bn(bn):
440
- """Initialize a Batchnorm layer. """
441
- bn.bias.data.fill_(0.)
442
- bn.weight.data.fill_(1.)
443
-
444
- def init_gru(rnn):
445
- """Initialize a GRU layer. """
446
-
447
- def _concat_init(tensor, init_funcs):
448
- (length, fan_out) = tensor.shape
449
- fan_in = length // len(init_funcs)
450
-
451
- for (i, init_func) in enumerate(init_funcs):
452
- init_func(tensor[i * fan_in: (i + 1) * fan_in, :])
453
-
454
- def _inner_uniform(tensor):
455
- fan_in = nn.init._calculate_correct_fan(tensor, 'fan_in')
456
- nn.init.uniform_(tensor, -math.sqrt(3 / fan_in), math.sqrt(3 / fan_in))
457
-
458
- for i in range(rnn.num_layers):
459
- _concat_init(
460
- getattr(rnn, 'weight_ih_l{}'.format(i)),
461
- [_inner_uniform, _inner_uniform, _inner_uniform]
462
- )
463
- torch.nn.init.constant_(getattr(rnn, 'bias_ih_l{}'.format(i)), 0)
464
-
465
- _concat_init(
466
- getattr(rnn, 'weight_hh_l{}'.format(i)),
467
- [_inner_uniform, _inner_uniform, nn.init.orthogonal_]
468
- )
469
- torch.nn.init.constant_(getattr(rnn, 'bias_hh_l{}'.format(i)), 0)
470
-
471
-
472
- def act(x, activation):
473
- if activation == 'relu':
474
- return F.relu_(x)
475
-
476
- elif activation == 'leaky_relu':
477
- return F.leaky_relu_(x, negative_slope=0.2)
478
-
479
- elif activation == 'swish':
480
- return x * torch.sigmoid(x)
481
-
482
- else:
483
- raise Exception('Incorrect activation!')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/attention.py DELETED
@@ -1,261 +0,0 @@
1
- from inspect import isfunction
2
- import math
3
- import torch
4
- import torch.nn.functional as F
5
- from torch import nn, einsum
6
- from einops import rearrange, repeat
7
-
8
- from ldm.modules.diffusionmodules.util import checkpoint
9
-
10
-
11
- def exists(val):
12
- return val is not None
13
-
14
-
15
- def uniq(arr):
16
- return{el: True for el in arr}.keys()
17
-
18
-
19
- def default(val, d):
20
- if exists(val):
21
- return val
22
- return d() if isfunction(d) else d
23
-
24
-
25
- def max_neg_value(t):
26
- return -torch.finfo(t.dtype).max
27
-
28
-
29
- def init_(tensor):
30
- dim = tensor.shape[-1]
31
- std = 1 / math.sqrt(dim)
32
- tensor.uniform_(-std, std)
33
- return tensor
34
-
35
-
36
- # feedforward
37
- class GEGLU(nn.Module):
38
- def __init__(self, dim_in, dim_out):
39
- super().__init__()
40
- self.proj = nn.Linear(dim_in, dim_out * 2)
41
-
42
- def forward(self, x):
43
- x, gate = self.proj(x).chunk(2, dim=-1)
44
- return x * F.gelu(gate)
45
-
46
-
47
- class FeedForward(nn.Module):
48
- def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
49
- super().__init__()
50
- inner_dim = int(dim * mult)
51
- dim_out = default(dim_out, dim)
52
- project_in = nn.Sequential(
53
- nn.Linear(dim, inner_dim),
54
- nn.GELU()
55
- ) if not glu else GEGLU(dim, inner_dim)
56
-
57
- self.net = nn.Sequential(
58
- project_in,
59
- nn.Dropout(dropout),
60
- nn.Linear(inner_dim, dim_out)
61
- )
62
-
63
- def forward(self, x):
64
- return self.net(x)
65
-
66
-
67
- def zero_module(module):
68
- """
69
- Zero out the parameters of a module and return it.
70
- """
71
- for p in module.parameters():
72
- p.detach().zero_()
73
- return module
74
-
75
-
76
- def Normalize(in_channels):
77
- return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
78
-
79
-
80
- class LinearAttention(nn.Module):
81
- def __init__(self, dim, heads=4, dim_head=32):
82
- super().__init__()
83
- self.heads = heads
84
- hidden_dim = dim_head * heads
85
- self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
86
- self.to_out = nn.Conv2d(hidden_dim, dim, 1)
87
-
88
- def forward(self, x):
89
- b, c, h, w = x.shape
90
- qkv = self.to_qkv(x)
91
- q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3)
92
- k = k.softmax(dim=-1)
93
- context = torch.einsum('bhdn,bhen->bhde', k, v)
94
- out = torch.einsum('bhde,bhdn->bhen', context, q)
95
- out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w)
96
- return self.to_out(out)
97
-
98
-
99
- class SpatialSelfAttention(nn.Module):
100
- def __init__(self, in_channels):
101
- super().__init__()
102
- self.in_channels = in_channels
103
-
104
- self.norm = Normalize(in_channels)
105
- self.q = torch.nn.Conv2d(in_channels,
106
- in_channels,
107
- kernel_size=1,
108
- stride=1,
109
- padding=0)
110
- self.k = torch.nn.Conv2d(in_channels,
111
- in_channels,
112
- kernel_size=1,
113
- stride=1,
114
- padding=0)
115
- self.v = torch.nn.Conv2d(in_channels,
116
- in_channels,
117
- kernel_size=1,
118
- stride=1,
119
- padding=0)
120
- self.proj_out = torch.nn.Conv2d(in_channels,
121
- in_channels,
122
- kernel_size=1,
123
- stride=1,
124
- padding=0)
125
-
126
- def forward(self, x):
127
- h_ = x
128
- h_ = self.norm(h_)
129
- q = self.q(h_)
130
- k = self.k(h_)
131
- v = self.v(h_)
132
-
133
- # compute attention
134
- b,c,h,w = q.shape
135
- q = rearrange(q, 'b c h w -> b (h w) c')
136
- k = rearrange(k, 'b c h w -> b c (h w)')
137
- w_ = torch.einsum('bij,bjk->bik', q, k)
138
-
139
- w_ = w_ * (int(c)**(-0.5))
140
- w_ = torch.nn.functional.softmax(w_, dim=2)
141
-
142
- # attend to values
143
- v = rearrange(v, 'b c h w -> b c (h w)')
144
- w_ = rearrange(w_, 'b i j -> b j i')
145
- h_ = torch.einsum('bij,bjk->bik', v, w_)
146
- h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
147
- h_ = self.proj_out(h_)
148
-
149
- return x+h_
150
-
151
-
152
- class CrossAttention(nn.Module):
153
- def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):# 如果设置了context_dim就不是自注意力了
154
- super().__init__()
155
- inner_dim = dim_head * heads # inner_dim == SpatialTransformer.model_channels
156
- context_dim = default(context_dim, query_dim)
157
-
158
- self.scale = dim_head ** -0.5
159
- self.heads = heads
160
-
161
- self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
162
- self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
163
- self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
164
-
165
- self.to_out = nn.Sequential(
166
- nn.Linear(inner_dim, query_dim),
167
- nn.Dropout(dropout)
168
- )
169
-
170
- def forward(self, x, context=None, mask=None):# x:(b,h*w,c), context:(b,seq_len,context_dim)
171
- h = self.heads
172
-
173
- q = self.to_q(x)# q:(b,h*w,inner_dim)
174
- context = default(context, x)
175
- k = self.to_k(context)# (b,seq_len,inner_dim)
176
- v = self.to_v(context)# (b,seq_len,inner_dim)
177
-
178
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))# n is seq_len for k and v
179
-
180
- sim = einsum('b i d, b j d -> b i j', q, k) * self.scale # (b*head,h*w,seq_len)
181
-
182
- if exists(mask):# false
183
- mask = rearrange(mask, 'b ... -> b (...)')
184
- max_neg_value = -torch.finfo(sim.dtype).max
185
- mask = repeat(mask, 'b j -> (b h) () j', h=h)
186
- sim.masked_fill_(~mask, max_neg_value)
187
-
188
- # attention, what we cannot get enough of
189
- attn = sim.softmax(dim=-1)
190
-
191
- out = einsum('b i j, b j d -> b i d', attn, v)# (b*head,h*w,inner_dim/head)
192
- out = rearrange(out, '(b h) n d -> b n (h d)', h=h)# (b,h*w,inner_dim)
193
- return self.to_out(out)
194
-
195
-
196
- class BasicTransformerBlock(nn.Module):
197
- def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True):
198
- super().__init__()
199
- self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is a self-attention
200
- self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
201
- self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim,
202
- heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none
203
- self.norm1 = nn.LayerNorm(dim)
204
- self.norm2 = nn.LayerNorm(dim)
205
- self.norm3 = nn.LayerNorm(dim)
206
- self.checkpoint = checkpoint
207
-
208
- def forward(self, x, context=None):
209
- return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
210
-
211
- def _forward(self, x, context=None):
212
- x = self.attn1(self.norm1(x)) + x
213
- x = self.attn2(self.norm2(x), context=context) + x
214
- x = self.ff(self.norm3(x)) + x
215
- return x
216
-
217
-
218
- class SpatialTransformer(nn.Module):
219
- """
220
- Transformer block for image-like data.
221
- First, project the input (aka embedding)
222
- and reshape to b, t, d.
223
- Then apply standard transformer action.
224
- Finally, reshape to image
225
- """
226
- def __init__(self, in_channels, n_heads, d_head,
227
- depth=1, dropout=0., context_dim=None):
228
- super().__init__()
229
- self.in_channels = in_channels
230
- inner_dim = n_heads * d_head
231
- self.norm = Normalize(in_channels)
232
-
233
- self.proj_in = nn.Conv2d(in_channels,
234
- inner_dim,
235
- kernel_size=1,
236
- stride=1,
237
- padding=0)
238
-
239
- self.transformer_blocks = nn.ModuleList(
240
- [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim)
241
- for d in range(depth)]
242
- )
243
-
244
- self.proj_out = zero_module(nn.Conv2d(inner_dim,
245
- in_channels,
246
- kernel_size=1,
247
- stride=1,
248
- padding=0))
249
-
250
- def forward(self, x, context=None):
251
- # note: if no context is given, cross-attention defaults to self-attention
252
- b, c, h, w = x.shape # such as [2,320,10,106]
253
- x_in = x
254
- x = self.norm(x)# group norm
255
- x = self.proj_in(x)# no shape change
256
- x = rearrange(x, 'b c h w -> b (h w) c')
257
- for block in self.transformer_blocks:
258
- x = block(x, context=context)# context shape [b,seq_len=77,context_dim]
259
- x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)
260
- x = self.proj_out(x)
261
- return x + x_in
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AJRFan/dreambooth-training/app.py DELETED
@@ -1,340 +0,0 @@
1
- import gradio as gr
2
- import os
3
- from pathlib import Path
4
- import argparse
5
- import shutil
6
- from train_dreambooth import run_training
7
- from convertosd import convert
8
- from PIL import Image
9
- from slugify import slugify
10
- import requests
11
- import torch
12
- import zipfile
13
- from diffusers import StableDiffusionPipeline
14
-
15
- css = '''
16
- .instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
17
- .arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important}
18
- #component-4, #component-3, #component-10{min-height: 0}
19
- '''
20
- model_to_load = "multimodalart/sd-fine-tunable"
21
- maximum_concepts = 3
22
- #Pre download the files even if we don't use it here
23
- StableDiffusionPipeline.from_pretrained(model_to_load)
24
-
25
- def zipdir(path, ziph):
26
- # ziph is zipfile handle
27
- for root, dirs, files in os.walk(path):
28
- for file in files:
29
- ziph.write(os.path.join(root, file),
30
- os.path.relpath(os.path.join(root, file),
31
- os.path.join(path, '..')))
32
-
33
- def swap_text(option):
34
- mandatory_liability = "You must have the right to do so and you are liable for the images you use, example:"
35
- if(option == "object"):
36
- instance_prompt_example = "cttoy"
37
- freeze_for = 50
38
- return [f"You are going to train `object`(s), upload 5-10 images of each object you are planning on training on from different angles/perspectives. {mandatory_liability}:", '''<img src="file/cat-toy.png" />''', f"You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `{instance_prompt_example}` here). Images will be automatically cropped to 512x512.", freeze_for]
39
- elif(option == "person"):
40
- instance_prompt_example = "julcto"
41
- freeze_for = 100
42
- return [f"You are going to train a `person`(s), upload 10-20 images of each person you are planning on training on from different angles/perspectives. {mandatory_liability}:", '''<img src="file/person.png" />''', f"You should name the files with a unique word that represent your concept (e.g.: `{instance_prompt_example}` here). Images will be automatically cropped to 512x512.", freeze_for]
43
- elif(option == "style"):
44
- instance_prompt_example = "trsldamrl"
45
- freeze_for = 10
46
- return [f"You are going to train a `style`, upload 10-20 images of the style you are planning on training on. Name the files with the words you would like {mandatory_liability}:", '''<img src="file/trsl_style.png" />''', f"You should name your files with a unique word that represent your concept (e.g.: `{instance_prompt_example}` here). Images will be automatically cropped to 512x512.", freeze_for]
47
-
48
- def count_files(*inputs):
49
- file_counter = 0
50
- concept_counter = 0
51
- for i, input in enumerate(inputs):
52
- if(i < maximum_concepts-1):
53
- files = inputs[i]
54
- if(files):
55
- concept_counter+=1
56
- file_counter+=len(files)
57
- uses_custom = inputs[-1]
58
- type_of_thing = inputs[-4]
59
- if(uses_custom):
60
- Training_Steps = int(inputs[-3])
61
- else:
62
- if(type_of_thing == "person"):
63
- Training_Steps = file_counter*200*2
64
- else:
65
- Training_Steps = file_counter*200
66
- return(gr.update(visible=True, value=f"You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps. This should take around {round(Training_Steps/1.5, 2)} seconds, or {round((Training_Steps/1.5)/3600, 2)} hours. As a reminder, the T4 GPU costs US$0.60 for 1h. Once training is over, don't forget to swap the hardware back to CPU."))
67
-
68
- def train(*inputs):
69
- if "IS_SHARED_UI" in os.environ:
70
- raise gr.Error("This Space only works in duplicated instances")
71
- if os.path.exists("output_model"): shutil.rmtree('output_model')
72
- if os.path.exists("instance_images"): shutil.rmtree('instance_images')
73
- if os.path.exists("diffusers_model.zip"): os.remove("diffusers_model.zip")
74
- if os.path.exists("model.ckpt"): os.remove("model.ckpt")
75
- file_counter = 0
76
- for i, input in enumerate(inputs):
77
- if(i < maximum_concepts-1):
78
- if(input):
79
- os.makedirs('instance_images',exist_ok=True)
80
- files = inputs[i+(maximum_concepts*2)]
81
- prompt = inputs[i+maximum_concepts]
82
- if(prompt == "" or prompt == None):
83
- raise gr.Error("You forgot to define your concept prompt")
84
- for j, file_temp in enumerate(files):
85
- file = Image.open(file_temp.name)
86
- width, height = file.size
87
- side_length = min(width, height)
88
- left = (width - side_length)/2
89
- top = (height - side_length)/2
90
- right = (width + side_length)/2
91
- bottom = (height + side_length)/2
92
- image = file.crop((left, top, right, bottom))
93
- image = image.resize((512, 512))
94
- extension = file_temp.name.split(".")[1]
95
- image = image.convert('RGB')
96
- image.save(f'instance_images/{prompt}_({j+1}).jpg', format="JPEG", quality = 100)
97
- file_counter += 1
98
-
99
- os.makedirs('output_model',exist_ok=True)
100
- uses_custom = inputs[-1]
101
- type_of_thing = inputs[-4]
102
- if(uses_custom):
103
- Training_Steps = int(inputs[-3])
104
- Train_text_encoder_for = int(inputs[-2])
105
- else:
106
- Training_Steps = file_counter*200
107
- if(type_of_thing == "object"):
108
- Train_text_encoder_for=30
109
- elif(type_of_thing == "person"):
110
- Train_text_encoder_for=60
111
- elif(type_of_thing == "style"):
112
- Train_text_encoder_for=15
113
-
114
- class_data_dir = None
115
- stptxt = int((Training_Steps*Train_text_encoder_for)/100)
116
- args_general = argparse.Namespace(
117
- image_captions_filename = True,
118
- train_text_encoder = True,
119
- stop_text_encoder_training = stptxt,
120
- save_n_steps = 0,
121
- pretrained_model_name_or_path = model_to_load,
122
- instance_data_dir="instance_images",
123
- class_data_dir=class_data_dir,
124
- output_dir="output_model",
125
- instance_prompt="",
126
- seed=42,
127
- resolution=512,
128
- mixed_precision="fp16",
129
- train_batch_size=1,
130
- gradient_accumulation_steps=1,
131
- use_8bit_adam=True,
132
- learning_rate=2e-6,
133
- lr_scheduler="polynomial",
134
- lr_warmup_steps = 0,
135
- max_train_steps=Training_Steps,
136
- )
137
- run_training(args_general)
138
- torch.cuda.empty_cache()
139
- #convert("output_model", "model.ckpt")
140
- #shutil.rmtree('instance_images')
141
- #shutil.make_archive("diffusers_model", 'zip', "output_model")
142
- with zipfile.ZipFile('diffusers_model.zip', 'w', zipfile.ZIP_DEFLATED) as zipf:
143
- zipdir('output_model/', zipf)
144
- torch.cuda.empty_cache()
145
- return [gr.update(visible=True, value=["diffusers_model.zip"]), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)]
146
-
147
- def generate(prompt):
148
- from diffusers import StableDiffusionPipeline
149
-
150
- pipe = StableDiffusionPipeline.from_pretrained("./output_model", torch_dtype=torch.float16)
151
- pipe = pipe.to("cuda")
152
- image = pipe(prompt).images[0]
153
- return(image)
154
-
155
- def push(model_name, where_to_upload, hf_token):
156
- if(not os.path.exists("model.ckpt")):
157
- convert("output_model", "model.ckpt")
158
- from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
159
- from huggingface_hub import create_repo
160
- model_name_slug = slugify(model_name)
161
- if(where_to_upload == "My personal profile"):
162
- api = HfApi()
163
- your_username = api.whoami(token=hf_token)["name"]
164
- model_id = f"{your_username}/{model_name_slug}"
165
- else:
166
- model_id = f"sd-dreambooth-library/{model_name_slug}"
167
- headers = {"Authorization" : f"Bearer: {hf_token}", "Content-Type": "application/json"}
168
- response = requests.post("https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX", headers=headers)
169
-
170
- images_upload = os.listdir("instance_images")
171
- image_string = ""
172
- instance_prompt_list = []
173
- previous_instance_prompt = ''
174
- for i, image in enumerate(images_upload):
175
- instance_prompt = image.split("_")[0]
176
- if(instance_prompt != previous_instance_prompt):
177
- title_instance_prompt_string = instance_prompt
178
- instance_prompt_list.append(instance_prompt)
179
- else:
180
- title_instance_prompt_string = ''
181
- previous_instance_prompt = instance_prompt
182
- image_string = f'''{title_instance_prompt_string}
183
- {image_string}![{instance_prompt} {i}](https://huggingface.co/{model_id}/resolve/main/sample_images/{image})'''
184
- readme_text = f'''---
185
- license: creativeml-openrail-m
186
- tags:
187
- - text-to-image
188
- ---
189
- ### {model_name} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training)
190
-
191
- You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb)
192
-
193
- Sample pictures of this concept:
194
- {image_string}
195
- '''
196
- #Save the readme to a file
197
- readme_file = open("README.md", "w")
198
- readme_file.write(readme_text)
199
- readme_file.close()
200
- #Save the token identifier to a file
201
- text_file = open("token_identifier.txt", "w")
202
- text_file.write(', '.join(instance_prompt_list))
203
- text_file.close()
204
- create_repo(model_id,private=True, token=hf_token)
205
- operations = [
206
- CommitOperationAdd(path_in_repo="token_identifier.txt", path_or_fileobj="token_identifier.txt"),
207
- CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
208
- CommitOperationAdd(path_in_repo=f"model.ckpt",path_or_fileobj="model.ckpt")
209
- ]
210
- api.create_commit(
211
- repo_id=model_id,
212
- operations=operations,
213
- commit_message=f"Upload the model {model_name}",
214
- token=hf_token
215
- )
216
- api.upload_folder(
217
- folder_path="output_model",
218
- repo_id=model_id,
219
- token=hf_token
220
- )
221
- api.upload_folder(
222
- folder_path="instance_images",
223
- path_in_repo="concept_images",
224
- repo_id=model_id,
225
- token=hf_token
226
- )
227
- return [gr.update(visible=True, value=f"Successfully uploaded your model. Access it [here](https://huggingface.co/{model_id})"), gr.update(visible=True, value=["diffusers_model.zip", "model.ckpt"])]
228
-
229
- def convert_to_ckpt():
230
- convert("output_model", "model.ckpt")
231
- return gr.update(visible=True, value=["diffusers_model.zip", "model.ckpt"])
232
-
233
- with gr.Blocks(css=css) as demo:
234
- with gr.Box():
235
- if "IS_SHARED_UI" in os.environ:
236
- gr.HTML('''
237
- <div class="gr-prose" style="max-width: 80%">
238
- <h2>Attention - This Space doesn't work in this shared UI</h2>
239
- <p>For it to work, you have to duplicate the Space and run it on your own profile where a (paid) private GPU will be attributed to it during runtime. As each T4 costs US$0,60/h, it should cost < US$1 to train a model with less than 100 images on default settings!</p>
240
- <img class="instruction" src="file/duplicate.png">
241
- <img class="arrow" src="file/arrow.png" />
242
- </div>
243
- ''')
244
- else:
245
- gr.HTML('''
246
- <div class="gr-prose" style="max-width: 80%">
247
- <h2>You have successfully cloned the Dreambooth Training Space</h2>
248
- <p>If you haven't already, attribute a T4 GPU to it (via the Settings tab) and run the training below. You will be billed by the minute from when you activate the GPU until when you turn it off.</p>
249
- </div>
250
- ''')
251
- gr.Markdown("# Dreambooth training")
252
- gr.Markdown("Customize Stable Diffusion by giving it with few-shot examples")
253
- with gr.Row():
254
- type_of_thing = gr.Dropdown(label="What would you like to train?", choices=["object", "person", "style"], value="object", interactive=True)
255
-
256
- with gr.Row():
257
- with gr.Column():
258
- thing_description = gr.Markdown("You are going to train an `object`, upload 5-10 images of the object you are planning on training on from different angles/perspectives. You must have the right to do so and you are liable for the images you use, example:")
259
- thing_image_example = gr.HTML('''<img src="file/cat-toy.png" />''')
260
- things_naming = gr.Markdown("You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `cttoy` here). Images will be automatically cropped to 512x512.")
261
- with gr.Column():
262
- file_collection = []
263
- concept_collection = []
264
- buttons_collection = []
265
- delete_collection = []
266
- is_visible = []
267
-
268
- row = [None] * maximum_concepts
269
- for x in range(maximum_concepts):
270
- ordinal = lambda n: "%d%s" % (n, "tsnrhtdd"[(n // 10 % 10 != 1) * (n % 10 < 4) * n % 10::4])
271
- if(x == 0):
272
- visible = True
273
- is_visible.append(gr.State(value=True))
274
- else:
275
- visible = False
276
- is_visible.append(gr.State(value=False))
277
-
278
- file_collection.append(gr.File(label=f"Upload the images for your {ordinal(x+1)} concept", file_count="multiple", interactive=True, visible=visible))
279
- with gr.Column(visible=visible) as row[x]:
280
- concept_collection.append(gr.Textbox(label=f"{ordinal(x+1)} concept prompt - use a unique, made up word to avoid collisions"))
281
- with gr.Row():
282
- if(x < maximum_concepts-1):
283
- buttons_collection.append(gr.Button(value="Add +1 concept", visible=visible))
284
- if(x > 0):
285
- delete_collection.append(gr.Button(value=f"Delete {ordinal(x+1)} concept"))
286
-
287
- counter_add = 1
288
- for button in buttons_collection:
289
- if(counter_add < len(buttons_collection)):
290
- button.click(lambda:
291
- [gr.update(visible=True),gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), True, None],
292
- None,
293
- [row[counter_add], file_collection[counter_add], buttons_collection[counter_add-1], buttons_collection[counter_add], is_visible[counter_add], file_collection[counter_add]], queue=False)
294
- else:
295
- button.click(lambda:[gr.update(visible=True),gr.update(visible=True), gr.update(visible=False), True], None, [row[counter_add], file_collection[counter_add], buttons_collection[counter_add-1], is_visible[counter_add]], queue=False)
296
- counter_add += 1
297
-
298
- counter_delete = 1
299
- for delete_button in delete_collection:
300
- if(counter_delete < len(delete_collection)+1):
301
- delete_button.click(lambda:[gr.update(visible=False),gr.update(visible=False), gr.update(visible=True), False], None, [file_collection[counter_delete], row[counter_delete], buttons_collection[counter_delete-1], is_visible[counter_delete]], queue=False)
302
- counter_delete += 1
303
-
304
-
305
-
306
- with gr.Accordion("Custom Settings", open=False):
307
- swap_auto_calculated = gr.Checkbox(label="Use custom settings")
308
- gr.Markdown("If not checked, the number of steps and % of frozen encoder will be tuned automatically according to the amount of images you upload and whether you are training an `object`, `person` or `style` as follows: The number of steps is calculated by number of images uploaded multiplied by 20. The text-encoder is frozen after 10% of the steps for a style, 30% of the steps for an object and is fully trained for persons.")
309
- steps = gr.Number(label="How many steps", value=800)
310
- perc_txt_encoder = gr.Number(label="Percentage of the training steps the text-encoder should be trained as well", value=30)
311
-
312
- type_of_thing.change(fn=swap_text, inputs=[type_of_thing], outputs=[thing_description, thing_image_example, things_naming, perc_txt_encoder], queue=False)
313
- training_summary = gr.Textbox("", visible=False, label="Training Summary")
314
- steps.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary], queue=False)
315
- perc_txt_encoder.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary], queue=False)
316
- for file in file_collection:
317
- file.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary], queue=False)
318
- train_btn = gr.Button("Start Training")
319
- with gr.Box(visible=False) as try_your_model:
320
- gr.Markdown("## Try your model")
321
- with gr.Row():
322
- prompt = gr.Textbox(label="Type your prompt")
323
- result_image = gr.Image()
324
- generate_button = gr.Button("Generate Image")
325
- with gr.Box(visible=False) as push_to_hub:
326
- gr.Markdown("## Push to Hugging Face Hub")
327
- model_name = gr.Textbox(label="Name of your model", placeholder="Tarsila do Amaral Style")
328
- where_to_upload = gr.Dropdown(["My personal profile", "Public Library"], label="Upload to")
329
- gr.Markdown("[A Hugging Face write access token](https://huggingface.co/settings/tokens), go to \"New token\" -> Role : Write. A regular read token won't work here.")
330
- hf_token = gr.Textbox(label="Hugging Face Write Token")
331
- push_button = gr.Button("Push to the Hub")
332
- result = gr.File(label="Download the uploaded models in the diffusers format", visible=True)
333
- success_message_upload = gr.Markdown(visible=False)
334
- convert_button = gr.Button("Convert to CKPT", visible=False)
335
-
336
- train_btn.click(fn=train, inputs=is_visible+concept_collection+file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[result, try_your_model, push_to_hub, convert_button])
337
- generate_button.click(fn=generate, inputs=prompt, outputs=result_image)
338
- push_button.click(fn=push, inputs=[model_name, where_to_upload, hf_token], outputs=[success_message_upload, result])
339
- convert_button.click(fn=convert_to_ckpt, inputs=[], outputs=result)
340
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AONYLMR/anime-remove-background/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Anime Remove Background
3
- emoji: 🪄🖼️
4
- colorFrom: indigo
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.1.4
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- duplicated_from: skytnt/anime-remove-background
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/work_dirs/mobilenet-v2_4xb32_2000e_3c_noF/mobilenet-v2_1xb32_300e_3c_noF.py DELETED
@@ -1,140 +0,0 @@
1
- model = dict(
2
- type='ImageClassifier',
3
- backbone=dict(type='MobileNetV2', widen_factor=1.0),
4
- neck=dict(type='GlobalAveragePooling'),
5
- head=dict(
6
- type='LinearClsHead',
7
- num_classes=7,
8
- in_channels=1280,
9
- loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
10
- topk=(
11
- 1,
12
- 3,
13
- )))
14
- dataset_type = 'CustomDataset'
15
- data_preprocessor = dict(
16
- num_classes=7,
17
- mean=[
18
- 123.675,
19
- 116.28,
20
- 103.53,
21
- ],
22
- std=[
23
- 58.395,
24
- 57.12,
25
- 57.375,
26
- ],
27
- to_rgb=True)
28
- train_pipeline = [
29
- dict(type='LoadImageFromFile'),
30
- dict(type='RandomResizedCrop', scale=224, backend='pillow'),
31
- dict(type='RandomFlip', prob=0.5, direction='horizontal'),
32
- dict(type='PackInputs'),
33
- ]
34
- val_pipeline = [
35
- dict(type='LoadImageFromFile'),
36
- dict(type='ResizeEdge', scale=256, edge='short', backend='pillow'),
37
- dict(type='CenterCrop', crop_size=224),
38
- dict(type='PackInputs'),
39
- ]
40
- train_dataloader = dict(
41
- pin_memory=True,
42
- persistent_workers=True,
43
- collate_fn=dict(type='default_collate'),
44
- batch_size=32,
45
- num_workers=5,
46
- dataset=dict(
47
- type='CustomDataset',
48
- data_root='data',
49
- with_label=True,
50
- ann_file='',
51
- data_prefix='train',
52
- pipeline=[
53
- dict(type='LoadImageFromFile'),
54
- dict(type='RandomResizedCrop', scale=224, backend='pillow'),
55
- dict(type='RandomFlip', prob=0.5, direction='horizontal'),
56
- dict(type='PackInputs'),
57
- ]),
58
- sampler=dict(type='DefaultSampler', shuffle=True))
59
- val_dataloader = dict(
60
- pin_memory=True,
61
- persistent_workers=True,
62
- collate_fn=dict(type='default_collate'),
63
- batch_size=32,
64
- num_workers=5,
65
- dataset=dict(
66
- type='CustomDataset',
67
- data_root='data',
68
- with_label=True,
69
- ann_file='',
70
- data_prefix='val',
71
- pipeline=[
72
- dict(type='LoadImageFromFile'),
73
- dict(type='ResizeEdge', scale=256, edge='short', backend='pillow'),
74
- dict(type='CenterCrop', crop_size=224),
75
- dict(type='PackInputs'),
76
- ]),
77
- sampler=dict(type='DefaultSampler', shuffle=False))
78
- val_evaluator = dict(
79
- type='Accuracy', topk=(
80
- 1,
81
- 3,
82
- ))
83
- test_dataloader = dict(
84
- pin_memory=True,
85
- persistent_workers=True,
86
- collate_fn=dict(type='default_collate'),
87
- batch_size=32,
88
- num_workers=5,
89
- dataset=dict(
90
- type='CustomDataset',
91
- data_root='data',
92
- with_label=True,
93
- ann_file='',
94
- data_prefix='val',
95
- pipeline=[
96
- dict(type='LoadImageFromFile'),
97
- dict(type='ResizeEdge', scale=256, edge='short', backend='pillow'),
98
- dict(type='CenterCrop', crop_size=224),
99
- dict(type='PackInputs'),
100
- ]),
101
- sampler=dict(type='DefaultSampler', shuffle=False))
102
- test_evaluator = dict(
103
- type='Accuracy', topk=(
104
- 1,
105
- 3,
106
- ))
107
- optim_wrapper = dict(
108
- optimizer=dict(type='SGD', lr=0.045, momentum=0.9, weight_decay=4e-05))
109
- param_scheduler = dict(type='StepLR', by_epoch=True, step_size=10, gamma=0.98)
110
- train_cfg = dict(by_epoch=True, max_epochs=2000, val_interval=10)
111
- val_cfg = dict()
112
- test_cfg = dict()
113
- auto_scale_lr = dict(base_batch_size=256)
114
- default_scope = 'mmpretrain'
115
- default_hooks = dict(
116
- timer=dict(type='IterTimerHook'),
117
- logger=dict(type='LoggerHook', interval=10),
118
- param_scheduler=dict(type='ParamSchedulerHook'),
119
- checkpoint=dict(type='CheckpointHook', save_best='auto', interval=10),
120
- sampler_seed=dict(type='DistSamplerSeedHook'),
121
- visualization=dict(type='VisualizationHook', enable=False))
122
- env_cfg = dict(
123
- cudnn_benchmark=False,
124
- mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
125
- dist_cfg=dict(backend='nccl'))
126
- vis_backends = [
127
- dict(type='LocalVisBackend'),
128
- ]
129
- visualizer = dict(
130
- type='UniversalVisualizer',
131
- vis_backends=[
132
- dict(type='LocalVisBackend'),
133
- dict(type='WandbVisBackend'),
134
- ])
135
- log_level = 'INFO'
136
- load_from = None
137
- resume = False
138
- randomness = dict(seed=None, deterministic=False)
139
- launcher = 'pytorch'
140
- work_dir = 'work_dirs/mobilenet-v2_4xb32_2000e_3c_noF'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/Free-Accounts-Generator/fortnite/js/d140ouchebag.js DELETED
@@ -1,65 +0,0 @@
1
- var NumberOfWords = 45
2
- var words = new BuildArray(NumberOfWords)
3
-
4
- // Use the following variables to
5
- // define your random words:
6
- words[1] = "https://cuty.io/NVohC0B"
7
- words[2] = "https://cuty.io/ApmaP7LV"
8
- words[3] = "https://cuty.io/fnacc3"
9
- words[4] = "https://cuty.io/5xDANQ5"
10
- words[5] = "https://cuty.io/fnacc5"
11
- words[6] = "https://cuty.io/qgeg"
12
- words[7] = "https://cuty.io/fnacc7"
13
- words[8] = "https://cuty.io/fnacc8"
14
- words[9] = "https://cuty.io/fnacc9"
15
- words[10] = "https://cuty.io/utMTVJooF"
16
- words[11] = "https://cuty.io/b39f"
17
- words[12] = "https://cuty.io/fnacc12"
18
- words[13] = "https://cuty.io/szZEPhy78v"
19
- words[14] = "https://cuty.io/fnacc14"
20
- words[15] = "https://cuty.io/eUaQe"
21
- words[16] = "https://cuty.io/VRUGIe"
22
- words[17] = "https://cuty.io/l6wa"
23
- words[18] = "https://cuty.io/WnlwopvX"
24
- words[19] = "https://cuty.io/sHMps1"
25
- words[20] = "https://cuty.io/j0Am8PZnBKkg"
26
- words[21] = "https://cuty.io/gT2uasHcl"
27
- words[22] = "https://cuty.io/UVRGq1f"
28
- words[23] = "https://cuty.io/six3gSRXEll"
29
- words[24] = "https://cuty.io/eDLT"
30
- words[25] = "https://cuty.io/pSvYxDQKV1NV"
31
- words[26] = "https://cuty.io/GNJniEyoC4"
32
- words[27] = "https://cuty.io/Hr3cPonuhQ"
33
- words[28] = "https://cuty.io/QGEzeBeD"
34
- words[29] = "https://cuty.io/b0apHN"
35
- words[30] = ""
36
- words[31] = ""
37
- words[32] = "https://cuty.io/OWtYHuEyL"
38
- words[33] = ""
39
- words[34] = "https://cuty.io/kQRXj"
40
- words[35] = ""
41
- words[36] = "https://cuty.io/CAJtlKvjX"
42
- words[37] = "https://cuty.io/PwMVd"
43
- words[38] = ""
44
- words[39] = "https://cuty.io/U4wgd"
45
- words[40] = ""
46
- words[41] = "https://cuty.io/SwTU5"
47
- words[42] = "https://cuty.io/r5Hryv6IV2Eh"
48
- words[43] = ""
49
- words[44] = "https://cuty.io/EuxDqLR0oFT"
50
- words[45] = "https://cuty.io/lflibkkVkK"
51
-
52
- function BuildArray(size){
53
- this.length = size
54
- for (var i = 1; i <= size; i++){
55
- this[i] = null}
56
- return this
57
- }
58
-
59
- function PickRandomWord(frm) {
60
- // Generate a random number between 1 and NumberOfWords
61
- var rnd = Math.ceil(Math.random() * NumberOfWords)
62
-
63
- // Display the word inside the text box
64
- frm.WordBox.value = words[rnd]
65
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/client/css/select.css DELETED
@@ -1,35 +0,0 @@
1
- select {
2
- -webkit-border-radius: 8px;
3
- -moz-border-radius: 8px;
4
- border-radius: 8px;
5
-
6
- -webkit-backdrop-filter: blur(20px);
7
- backdrop-filter: blur(20px);
8
-
9
- cursor: pointer;
10
- background-color: var(--blur-bg);
11
- border: 1px solid var(--blur-border);
12
- color: var(--colour-3);
13
- display: block;
14
- position: relative;
15
- overflow: hidden;
16
- outline: none;
17
- padding: 8px 16px;
18
-
19
- appearance: none;
20
- }
21
-
22
- /* scrollbar */
23
- select.dropdown::-webkit-scrollbar {
24
- width: 4px;
25
- padding: 8px 0px;
26
- }
27
-
28
- select.dropdown::-webkit-scrollbar-track {
29
- background-color: #ffffff00;
30
- }
31
-
32
- select.dropdown::-webkit-scrollbar-thumb {
33
- background-color: #555555;
34
- border-radius: 10px;
35
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/__init__.py DELETED
@@ -1,14 +0,0 @@
1
- from .AiService import AiService
2
- from .CodeLinkAva import CodeLinkAva
3
- from .DfeHub import DfeHub
4
- from .EasyChat import EasyChat
5
- from .Forefront import Forefront
6
- from .GetGpt import GetGpt
7
- from .Opchatgpts import Opchatgpts
8
- from .Lockchat import Lockchat
9
- from .Wewordle import Wewordle
10
- from .Equing import Equing
11
- from .Wuguokai import Wuguokai
12
- from .V50 import V50
13
- from .FastGpt import FastGpt
14
- from .ChatgptLogin import ChatgptLogin
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/dataloader/mgsm.py DELETED
@@ -1,23 +0,0 @@
1
- from .dataloader import DataLoader
2
- from . import dataloader_registry
3
- import json
4
- import re
5
-
6
-
7
- @dataloader_registry.register("tasksolving/mgsm/gpt-4")
8
- @dataloader_registry.register("tasksolving/mgsm/gpt-3.5")
9
- class MGSMLoader(DataLoader):
10
- def __init__(self, path: str):
11
- self.answer_pat = re.compile(r"#### (-?\d+)")
12
- super().__init__(path)
13
-
14
- def load(self):
15
- with open(self.path) as f:
16
- for line in f:
17
- line = json.loads(line)
18
- self.examples.append(
19
- {
20
- "input": line["question"],
21
- "answer": line["answer_number"],
22
- }
23
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/knob/input/OnPanPad.js DELETED
@@ -1,90 +0,0 @@
1
- import IsLocalPointInKnob from './IsLocalPointInKnob.js';
2
-
3
- const GetAngle = Phaser.Math.Angle.Between;
4
- const WrapAngle = Phaser.Math.Angle.Wrap;
5
-
6
- var OnPointerDown = function (pointer, localX, localY) {
7
- if ((!this.enable) || (this.panPointer)) {
8
- return;
9
- }
10
- var knob = this.sizerChildren.knob;
11
- if (!IsLocalPointInKnob(knob, localX, localY)) {
12
- return;
13
- }
14
-
15
- OnPanStart.call(this, pointer);
16
- }
17
-
18
- var OnPointerMove = function (pointer, localX, localY) {
19
- if (!this.enable) {
20
- return;
21
- }
22
- if (!pointer.isDown) {
23
- return;
24
- }
25
-
26
- var knob = this.sizerChildren.knob;
27
- switch (this.panState) {
28
- case TOUCH0:
29
- if (IsLocalPointInKnob(knob, localX, localY)) {
30
- OnPanStart.call(this, pointer);
31
- }
32
- break;
33
-
34
- case TOUCH1:
35
- if (IsLocalPointInKnob(knob, localX, localY)) {
36
- OnPan.call(this);
37
- } else {
38
- OnPanEnd.call(this);
39
- }
40
- break;
41
- }
42
- }
43
-
44
- var OnPointerUp = function (pointer, localX, localY) {
45
- if ((!this.enable) || (this.panPointer !== pointer)) {
46
- return;
47
- }
48
-
49
- OnPanEnd.call(this);
50
- }
51
-
52
- var OnPanStart = function (pointer) {
53
- this.panPointer = pointer;
54
- this.panState = TOUCH1;
55
- }
56
-
57
- var OnPanEnd = function () {
58
- this.panPointer = undefined;
59
- this.panState = TOUCH0;
60
- }
61
-
62
- var OnPan = function () {
63
- var p0 = this.panPointer.prevPosition,
64
- p1 = this.panPointer.position;
65
- var knob = this.sizerChildren.knob;
66
- var startAngle = GetAngle(knob.x, knob.y, p0.x, p0.y),
67
- endAngle = GetAngle(knob.x, knob.y, p1.x, p1.y);
68
- var deltaAngle = (knob.anticlockwise) ? (startAngle - endAngle) : (endAngle - startAngle);
69
- var deltaValue = WrapAngle(deltaAngle) / (Math.PI * 2);
70
-
71
- this.stopEaseValue();
72
- this.value += deltaValue;
73
- }
74
-
75
- const TOUCH0 = 0;
76
- const TOUCH1 = 1;
77
-
78
- var InstallEvents = function () {
79
- var knob = this.sizerChildren.knob;
80
- knob
81
- .on('pointerdown', OnPointerDown, this)
82
- .on('pointermove', OnPointerMove, this)
83
- .on('pointerup', OnPointerUp, this)
84
- .setInteractive()
85
-
86
- this.panPointer = undefined;
87
- this.panState = TOUCH0;
88
- }
89
-
90
- export default InstallEvents;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/utils/CreateAnySizer.js DELETED
@@ -1,30 +0,0 @@
1
- import MergeStyle from './MergeStyle.js';
2
- import ReplaceChildrenConfig from './ReplaceChildrenConfig.js';
3
-
4
- var CreateAnySizer = function (scene, data, view, styles, customBuilders, SizerClass) {
5
- data = MergeStyle(data, styles);
6
-
7
- var backgroundConfig = ReplaceChildrenConfig(scene, data.background, view, styles, customBuilders);
8
- var childrenConfig = ReplaceChildrenConfig(scene, data.children, view, styles, customBuilders);
9
-
10
- var gameObject = new SizerClass(scene, data);
11
- scene.add.existing(gameObject);
12
-
13
- if (backgroundConfig) {
14
- for (var i = 0, cnt = backgroundConfig.length; i < cnt; i++) {
15
- var childConfig = backgroundConfig[i];
16
- gameObject.addBackground(childConfig.$child, childConfig.padding);
17
- }
18
- }
19
-
20
- if (childrenConfig) {
21
- for (var i = 0, cnt = childrenConfig.length; i < cnt; i++) {
22
- var childConfig = childrenConfig[i];
23
- gameObject.add(childConfig.$child, childConfig);
24
- }
25
- }
26
-
27
- return gameObject;
28
- }
29
-
30
- export default CreateAnySizer;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aki004/herta-so-vits/modules/crepe.py DELETED
@@ -1,331 +0,0 @@
1
- from typing import Optional,Union
2
- try:
3
- from typing import Literal
4
- except Exception as e:
5
- from typing_extensions import Literal
6
- import numpy as np
7
- import torch
8
- import torchcrepe
9
- from torch import nn
10
- from torch.nn import functional as F
11
- import scipy
12
-
13
- #from:https://github.com/fishaudio/fish-diffusion
14
-
15
- def repeat_expand(
16
- content: Union[torch.Tensor, np.ndarray], target_len: int, mode: str = "nearest"
17
- ):
18
- """Repeat content to target length.
19
- This is a wrapper of torch.nn.functional.interpolate.
20
-
21
- Args:
22
- content (torch.Tensor): tensor
23
- target_len (int): target length
24
- mode (str, optional): interpolation mode. Defaults to "nearest".
25
-
26
- Returns:
27
- torch.Tensor: tensor
28
- """
29
-
30
- ndim = content.ndim
31
-
32
- if content.ndim == 1:
33
- content = content[None, None]
34
- elif content.ndim == 2:
35
- content = content[None]
36
-
37
- assert content.ndim == 3
38
-
39
- is_np = isinstance(content, np.ndarray)
40
- if is_np:
41
- content = torch.from_numpy(content)
42
-
43
- results = torch.nn.functional.interpolate(content, size=target_len, mode=mode)
44
-
45
- if is_np:
46
- results = results.numpy()
47
-
48
- if ndim == 1:
49
- return results[0, 0]
50
- elif ndim == 2:
51
- return results[0]
52
-
53
-
54
- class BasePitchExtractor:
55
- def __init__(
56
- self,
57
- hop_length: int = 512,
58
- f0_min: float = 50.0,
59
- f0_max: float = 1100.0,
60
- keep_zeros: bool = True,
61
- ):
62
- """Base pitch extractor.
63
-
64
- Args:
65
- hop_length (int, optional): Hop length. Defaults to 512.
66
- f0_min (float, optional): Minimum f0. Defaults to 50.0.
67
- f0_max (float, optional): Maximum f0. Defaults to 1100.0.
68
- keep_zeros (bool, optional): Whether keep zeros in pitch. Defaults to True.
69
- """
70
-
71
- self.hop_length = hop_length
72
- self.f0_min = f0_min
73
- self.f0_max = f0_max
74
- self.keep_zeros = keep_zeros
75
-
76
- def __call__(self, x, sampling_rate=44100, pad_to=None):
77
- raise NotImplementedError("BasePitchExtractor is not callable.")
78
-
79
- def post_process(self, x, sampling_rate, f0, pad_to):
80
- if isinstance(f0, np.ndarray):
81
- f0 = torch.from_numpy(f0).float().to(x.device)
82
-
83
- if pad_to is None:
84
- return f0
85
-
86
- f0 = repeat_expand(f0, pad_to)
87
-
88
- if self.keep_zeros:
89
- return f0
90
-
91
- vuv_vector = torch.zeros_like(f0)
92
- vuv_vector[f0 > 0.0] = 1.0
93
- vuv_vector[f0 <= 0.0] = 0.0
94
-
95
- # Remove 0 frequency and apply linear interpolation
96
- nzindex = torch.nonzero(f0).squeeze()
97
- f0 = torch.index_select(f0, dim=0, index=nzindex).cpu().numpy()
98
- time_org = self.hop_length / sampling_rate * nzindex.cpu().numpy()
99
- time_frame = np.arange(pad_to) * self.hop_length / sampling_rate
100
-
101
- if f0.shape[0] <= 0:
102
- return torch.zeros(pad_to, dtype=torch.float, device=x.device),torch.zeros(pad_to, dtype=torch.float, device=x.device)
103
-
104
- if f0.shape[0] == 1:
105
- return torch.ones(pad_to, dtype=torch.float, device=x.device) * f0[0],torch.ones(pad_to, dtype=torch.float, device=x.device)
106
-
107
- # Probably can be rewritten with torch?
108
- f0 = np.interp(time_frame, time_org, f0, left=f0[0], right=f0[-1])
109
- vuv_vector = vuv_vector.cpu().numpy()
110
- vuv_vector = np.ceil(scipy.ndimage.zoom(vuv_vector,pad_to/len(vuv_vector),order = 0))
111
-
112
- return f0,vuv_vector
113
-
114
-
115
- class MaskedAvgPool1d(nn.Module):
116
- def __init__(
117
- self, kernel_size: int, stride: Optional[int] = None, padding: Optional[int] = 0
118
- ):
119
- """An implementation of mean pooling that supports masked values.
120
-
121
- Args:
122
- kernel_size (int): The size of the median pooling window.
123
- stride (int, optional): The stride of the median pooling window. Defaults to None.
124
- padding (int, optional): The padding of the median pooling window. Defaults to 0.
125
- """
126
-
127
- super(MaskedAvgPool1d, self).__init__()
128
- self.kernel_size = kernel_size
129
- self.stride = stride or kernel_size
130
- self.padding = padding
131
-
132
- def forward(self, x, mask=None):
133
- ndim = x.dim()
134
- if ndim == 2:
135
- x = x.unsqueeze(1)
136
-
137
- assert (
138
- x.dim() == 3
139
- ), "Input tensor must have 2 or 3 dimensions (batch_size, channels, width)"
140
-
141
- # Apply the mask by setting masked elements to zero, or make NaNs zero
142
- if mask is None:
143
- mask = ~torch.isnan(x)
144
-
145
- # Ensure mask has the same shape as the input tensor
146
- assert x.shape == mask.shape, "Input tensor and mask must have the same shape"
147
-
148
- masked_x = torch.where(mask, x, torch.zeros_like(x))
149
- # Create a ones kernel with the same number of channels as the input tensor
150
- ones_kernel = torch.ones(x.size(1), 1, self.kernel_size, device=x.device)
151
-
152
- # Perform sum pooling
153
- sum_pooled = nn.functional.conv1d(
154
- masked_x,
155
- ones_kernel,
156
- stride=self.stride,
157
- padding=self.padding,
158
- groups=x.size(1),
159
- )
160
-
161
- # Count the non-masked (valid) elements in each pooling window
162
- valid_count = nn.functional.conv1d(
163
- mask.float(),
164
- ones_kernel,
165
- stride=self.stride,
166
- padding=self.padding,
167
- groups=x.size(1),
168
- )
169
- valid_count = valid_count.clamp(min=1) # Avoid division by zero
170
-
171
- # Perform masked average pooling
172
- avg_pooled = sum_pooled / valid_count
173
-
174
- # Fill zero values with NaNs
175
- avg_pooled[avg_pooled == 0] = float("nan")
176
-
177
- if ndim == 2:
178
- return avg_pooled.squeeze(1)
179
-
180
- return avg_pooled
181
-
182
-
183
- class MaskedMedianPool1d(nn.Module):
184
- def __init__(
185
- self, kernel_size: int, stride: Optional[int] = None, padding: Optional[int] = 0
186
- ):
187
- """An implementation of median pooling that supports masked values.
188
-
189
- This implementation is inspired by the median pooling implementation in
190
- https://gist.github.com/rwightman/f2d3849281624be7c0f11c85c87c1598
191
-
192
- Args:
193
- kernel_size (int): The size of the median pooling window.
194
- stride (int, optional): The stride of the median pooling window. Defaults to None.
195
- padding (int, optional): The padding of the median pooling window. Defaults to 0.
196
- """
197
-
198
- super(MaskedMedianPool1d, self).__init__()
199
- self.kernel_size = kernel_size
200
- self.stride = stride or kernel_size
201
- self.padding = padding
202
-
203
- def forward(self, x, mask=None):
204
- ndim = x.dim()
205
- if ndim == 2:
206
- x = x.unsqueeze(1)
207
-
208
- assert (
209
- x.dim() == 3
210
- ), "Input tensor must have 2 or 3 dimensions (batch_size, channels, width)"
211
-
212
- if mask is None:
213
- mask = ~torch.isnan(x)
214
-
215
- assert x.shape == mask.shape, "Input tensor and mask must have the same shape"
216
-
217
- masked_x = torch.where(mask, x, torch.zeros_like(x))
218
-
219
- x = F.pad(masked_x, (self.padding, self.padding), mode="reflect")
220
- mask = F.pad(
221
- mask.float(), (self.padding, self.padding), mode="constant", value=0
222
- )
223
-
224
- x = x.unfold(2, self.kernel_size, self.stride)
225
- mask = mask.unfold(2, self.kernel_size, self.stride)
226
-
227
- x = x.contiguous().view(x.size()[:3] + (-1,))
228
- mask = mask.contiguous().view(mask.size()[:3] + (-1,)).to(x.device)
229
-
230
- # Combine the mask with the input tensor
231
- #x_masked = torch.where(mask.bool(), x, torch.fill_(torch.zeros_like(x),float("inf")))
232
- x_masked = torch.where(mask.bool(), x, torch.FloatTensor([float("inf")]).to(x.device))
233
-
234
- # Sort the masked tensor along the last dimension
235
- x_sorted, _ = torch.sort(x_masked, dim=-1)
236
-
237
- # Compute the count of non-masked (valid) values
238
- valid_count = mask.sum(dim=-1)
239
-
240
- # Calculate the index of the median value for each pooling window
241
- median_idx = (torch.div((valid_count - 1), 2, rounding_mode='trunc')).clamp(min=0)
242
-
243
- # Gather the median values using the calculated indices
244
- median_pooled = x_sorted.gather(-1, median_idx.unsqueeze(-1).long()).squeeze(-1)
245
-
246
- # Fill infinite values with NaNs
247
- median_pooled[torch.isinf(median_pooled)] = float("nan")
248
-
249
- if ndim == 2:
250
- return median_pooled.squeeze(1)
251
-
252
- return median_pooled
253
-
254
-
255
- class CrepePitchExtractor(BasePitchExtractor):
256
- def __init__(
257
- self,
258
- hop_length: int = 512,
259
- f0_min: float = 50.0,
260
- f0_max: float = 1100.0,
261
- threshold: float = 0.05,
262
- keep_zeros: bool = False,
263
- device = None,
264
- model: Literal["full", "tiny"] = "full",
265
- use_fast_filters: bool = True,
266
- ):
267
- super().__init__(hop_length, f0_min, f0_max, keep_zeros)
268
-
269
- self.threshold = threshold
270
- self.model = model
271
- self.use_fast_filters = use_fast_filters
272
- self.hop_length = hop_length
273
- if device is None:
274
- self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
275
- else:
276
- self.dev = torch.device(device)
277
- if self.use_fast_filters:
278
- self.median_filter = MaskedMedianPool1d(3, 1, 1).to(device)
279
- self.mean_filter = MaskedAvgPool1d(3, 1, 1).to(device)
280
-
281
- def __call__(self, x, sampling_rate=44100, pad_to=None):
282
- """Extract pitch using crepe.
283
-
284
-
285
- Args:
286
- x (torch.Tensor): Audio signal, shape (1, T).
287
- sampling_rate (int, optional): Sampling rate. Defaults to 44100.
288
- pad_to (int, optional): Pad to length. Defaults to None.
289
-
290
- Returns:
291
- torch.Tensor: Pitch, shape (T // hop_length,).
292
- """
293
-
294
- assert x.ndim == 2, f"Expected 2D tensor, got {x.ndim}D tensor."
295
- assert x.shape[0] == 1, f"Expected 1 channel, got {x.shape[0]} channels."
296
-
297
- x = x.to(self.dev)
298
- f0, pd = torchcrepe.predict(
299
- x,
300
- sampling_rate,
301
- self.hop_length,
302
- self.f0_min,
303
- self.f0_max,
304
- pad=True,
305
- model=self.model,
306
- batch_size=1024,
307
- device=x.device,
308
- return_periodicity=True,
309
- )
310
-
311
- # Filter, remove silence, set uv threshold, refer to the original warehouse readme
312
- if self.use_fast_filters:
313
- pd = self.median_filter(pd)
314
- else:
315
- pd = torchcrepe.filter.median(pd, 3)
316
-
317
- pd = torchcrepe.threshold.Silence(-60.0)(pd, x, sampling_rate, 512)
318
- f0 = torchcrepe.threshold.At(self.threshold)(f0, pd)
319
-
320
- if self.use_fast_filters:
321
- f0 = self.mean_filter(f0)
322
- else:
323
- f0 = torchcrepe.filter.mean(f0, 3)
324
-
325
- f0 = torch.where(torch.isnan(f0), torch.full_like(f0, 0), f0)[0]
326
-
327
- if torch.all(f0 == 0):
328
- rtn = f0.cpu().numpy() if pad_to==None else np.zeros(pad_to)
329
- return rtn,rtn
330
-
331
- return self.post_process(x, sampling_rate, f0, pad_to)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Akshat231/super_space/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Super Space
3
- emoji: 🏃
4
- colorFrom: gray
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.29.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlterM/Zaglyt2-transformer-test/app.py DELETED
@@ -1,14 +0,0 @@
1
- import gradio as gr
2
- import net
3
-
4
- def generate(text):
5
- o = text
6
- r = []
7
- for i in range(5):
8
- t = net.gen(o)
9
- o += " " + t
10
- r.append(t)
11
- return text + " *"+' '.join(r)+"*"
12
-
13
- iface = gr.Interface(fn=generate, inputs="text", outputs="text")
14
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './faster_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(train_cfg=dict(rcnn=dict(sampler=dict(type='OHEMSampler'))))
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py DELETED
@@ -1,11 +0,0 @@
1
- _base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py'
2
- model = dict(
3
- backbone=dict(
4
- norm_cfg=dict(type='SyncBN', requires_grad=True),
5
- norm_eval=False,
6
- plugins=[
7
- dict(
8
- cfg=dict(type='ContextBlock', ratio=1. / 16),
9
- stages=(False, True, True, True),
10
- position='after_conv3')
11
- ]))
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_faster_r50_fpn_1x_coco.py DELETED
@@ -1,65 +0,0 @@
1
- _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- rpn_head=dict(
4
- _delete_=True,
5
- type='GARPNHead',
6
- in_channels=256,
7
- feat_channels=256,
8
- approx_anchor_generator=dict(
9
- type='AnchorGenerator',
10
- octave_base_scale=8,
11
- scales_per_octave=3,
12
- ratios=[0.5, 1.0, 2.0],
13
- strides=[4, 8, 16, 32, 64]),
14
- square_anchor_generator=dict(
15
- type='AnchorGenerator',
16
- ratios=[1.0],
17
- scales=[8],
18
- strides=[4, 8, 16, 32, 64]),
19
- anchor_coder=dict(
20
- type='DeltaXYWHBBoxCoder',
21
- target_means=[.0, .0, .0, .0],
22
- target_stds=[0.07, 0.07, 0.14, 0.14]),
23
- bbox_coder=dict(
24
- type='DeltaXYWHBBoxCoder',
25
- target_means=[.0, .0, .0, .0],
26
- target_stds=[0.07, 0.07, 0.11, 0.11]),
27
- loc_filter_thr=0.01,
28
- loss_loc=dict(
29
- type='FocalLoss',
30
- use_sigmoid=True,
31
- gamma=2.0,
32
- alpha=0.25,
33
- loss_weight=1.0),
34
- loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
35
- loss_cls=dict(
36
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
37
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
38
- roi_head=dict(
39
- bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
40
- # model training and testing settings
41
- train_cfg=dict(
42
- rpn=dict(
43
- ga_assigner=dict(
44
- type='ApproxMaxIoUAssigner',
45
- pos_iou_thr=0.7,
46
- neg_iou_thr=0.3,
47
- min_pos_iou=0.3,
48
- ignore_iof_thr=-1),
49
- ga_sampler=dict(
50
- type='RandomSampler',
51
- num=256,
52
- pos_fraction=0.5,
53
- neg_pos_ub=-1,
54
- add_gt_as_proposals=False),
55
- allowed_border=-1,
56
- center_ratio=0.2,
57
- ignore_ratio=0.5),
58
- rpn_proposal=dict(nms_post=1000, max_per_img=300),
59
- rcnn=dict(
60
- assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
61
- sampler=dict(type='RandomSampler', num=256))),
62
- test_cfg=dict(
63
- rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3)))
64
- optimizer_config = dict(
65
- _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py DELETED
@@ -1,97 +0,0 @@
1
- _base_ = [
2
- '../_base_/datasets/coco_detection.py',
3
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
4
- ]
5
-
6
- model = dict(
7
- type='NASFCOS',
8
- pretrained='open-mmlab://detectron2/resnet50_caffe',
9
- backbone=dict(
10
- type='ResNet',
11
- depth=50,
12
- num_stages=4,
13
- out_indices=(0, 1, 2, 3),
14
- frozen_stages=1,
15
- norm_cfg=dict(type='BN', requires_grad=False, eps=0),
16
- style='caffe'),
17
- neck=dict(
18
- type='NASFCOS_FPN',
19
- in_channels=[256, 512, 1024, 2048],
20
- out_channels=256,
21
- start_level=1,
22
- add_extra_convs=True,
23
- num_outs=5,
24
- norm_cfg=dict(type='BN'),
25
- conv_cfg=dict(type='DCNv2', deform_groups=2)),
26
- bbox_head=dict(
27
- type='NASFCOSHead',
28
- num_classes=80,
29
- in_channels=256,
30
- feat_channels=256,
31
- strides=[8, 16, 32, 64, 128],
32
- norm_cfg=dict(type='GN', num_groups=32),
33
- loss_cls=dict(
34
- type='FocalLoss',
35
- use_sigmoid=True,
36
- gamma=2.0,
37
- alpha=0.25,
38
- loss_weight=1.0),
39
- loss_bbox=dict(type='IoULoss', loss_weight=1.0),
40
- loss_centerness=dict(
41
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
42
- train_cfg=dict(
43
- assigner=dict(
44
- type='MaxIoUAssigner',
45
- pos_iou_thr=0.5,
46
- neg_iou_thr=0.4,
47
- min_pos_iou=0,
48
- ignore_iof_thr=-1),
49
- allowed_border=-1,
50
- pos_weight=-1,
51
- debug=False),
52
- test_cfg=dict(
53
- nms_pre=1000,
54
- min_bbox_size=0,
55
- score_thr=0.05,
56
- nms=dict(type='nms', iou_threshold=0.6),
57
- max_per_img=100))
58
-
59
- img_norm_cfg = dict(
60
- mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
61
-
62
- train_pipeline = [
63
- dict(type='LoadImageFromFile'),
64
- dict(type='LoadAnnotations', with_bbox=True),
65
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
66
- dict(type='RandomFlip', flip_ratio=0.5),
67
- dict(type='Normalize', **img_norm_cfg),
68
- dict(type='Pad', size_divisor=32),
69
- dict(type='DefaultFormatBundle'),
70
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
71
- ]
72
-
73
- test_pipeline = [
74
- dict(type='LoadImageFromFile'),
75
- dict(
76
- type='MultiScaleFlipAug',
77
- img_scale=(1333, 800),
78
- flip=False,
79
- transforms=[
80
- dict(type='Resize', keep_ratio=True),
81
- dict(type='RandomFlip'),
82
- dict(type='Normalize', **img_norm_cfg),
83
- dict(type='Pad', size_divisor=32),
84
- dict(type='ImageToTensor', keys=['img']),
85
- dict(type='Collect', keys=['img']),
86
- ])
87
- ]
88
-
89
- data = dict(
90
- samples_per_gpu=4,
91
- workers_per_gpu=2,
92
- train=dict(pipeline=train_pipeline),
93
- val=dict(pipeline=test_pipeline),
94
- test=dict(pipeline=test_pipeline))
95
-
96
- optimizer = dict(
97
- lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r18-d8_769x769_80k_cityscapes.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = './fcn_r50-d8_769x769_80k_cityscapes.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnet18_v1c',
4
- backbone=dict(depth=18),
5
- decode_head=dict(
6
- in_channels=512,
7
- channels=128,
8
- ),
9
- auxiliary_head=dict(in_channels=256, channels=64))
 
 
 
 
 
 
 
 
 
 
spaces/AngoHF/ANGO-Leaderboard/components/__init__.py DELETED
File without changes
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/superboogav2/benchmark.py DELETED
@@ -1,72 +0,0 @@
1
- """
2
- This module implements a benchmark function to evaluate the performance of the embedding pipeline. It expects a configuration JSON file. It must have questions and expected retrieved text.
3
- For each question, it's essential to have variants of that question. Language is fluid and each person might have their own spin on how they may ask it.
4
-
5
- At the end, it will save the results inside a benchmark_{sysdate}.txt file in the main directory.
6
-
7
- The benchmark function will return the score as an integer.
8
- """
9
- import datetime
10
- import json
11
- import os
12
-
13
- from pathlib import Path
14
-
15
- from .data_processor import process_and_add_to_collector, preprocess_text
16
- from .parameters import get_chunk_count, get_max_token_count
17
- from .utils import create_metadata_source
18
-
19
- def benchmark(config_path, collector):
20
- # Get the current system date
21
- sysdate = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
22
- filename = f"benchmark_{sysdate}.txt"
23
-
24
- # Open the log file in append mode
25
- with open(filename, 'a') as log:
26
- with open(config_path, 'r') as f:
27
- data = json.load(f)
28
-
29
- total_points = 0
30
- max_points = 0
31
-
32
- for item in data:
33
- filepath = item["text"]
34
- corpus = ""
35
-
36
- # Check if the file exists
37
- if os.path.isfile(Path(filepath)):
38
- # Open the file and read its content
39
- with open(Path(filepath), 'r') as file:
40
- corpus = file.read()
41
- process_and_add_to_collector(corpus, collector, True, create_metadata_source('benchmark'))
42
- else:
43
- raise f'Cannot find specified file {filepath}.'
44
-
45
- for question_group in item["questions"]:
46
- question_variants = question_group["question_variants"]
47
- criteria = question_group["criteria"]
48
-
49
- for q in question_variants:
50
- max_points += len(criteria)
51
- processed_text = preprocess_text(q)
52
-
53
- # Get the most similar chunks
54
- results = collector.get_sorted_by_dist(processed_text, n_results=get_chunk_count(), max_token_count=get_max_token_count())
55
-
56
- points = 0
57
-
58
- for c in criteria:
59
- for p in results:
60
- if c in p:
61
- points += 1
62
- total_points += 1
63
- break
64
-
65
- info = f"The question '{q}' scored {points}/{len(criteria)} points."
66
- print(info, file=log)
67
-
68
- print('\n---\n', file=log)
69
-
70
- print(f'##Total points:\n\n{total_points}/{max_points}', file=log)
71
-
72
- return total_points, max_points
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/fileio/file_client.py DELETED
@@ -1,1148 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import inspect
3
- import os
4
- import os.path as osp
5
- import re
6
- import tempfile
7
- import warnings
8
- from abc import ABCMeta, abstractmethod
9
- from contextlib import contextmanager
10
- from pathlib import Path
11
- from typing import Iterable, Iterator, Optional, Tuple, Union
12
- from urllib.request import urlopen
13
-
14
- import annotator.uniformer.mmcv as mmcv
15
- from annotator.uniformer.mmcv.utils.misc import has_method
16
- from annotator.uniformer.mmcv.utils.path import is_filepath
17
-
18
-
19
- class BaseStorageBackend(metaclass=ABCMeta):
20
- """Abstract class of storage backends.
21
-
22
- All backends need to implement two apis: ``get()`` and ``get_text()``.
23
- ``get()`` reads the file as a byte stream and ``get_text()`` reads the file
24
- as texts.
25
- """
26
-
27
- # a flag to indicate whether the backend can create a symlink for a file
28
- _allow_symlink = False
29
-
30
- @property
31
- def name(self):
32
- return self.__class__.__name__
33
-
34
- @property
35
- def allow_symlink(self):
36
- return self._allow_symlink
37
-
38
- @abstractmethod
39
- def get(self, filepath):
40
- pass
41
-
42
- @abstractmethod
43
- def get_text(self, filepath):
44
- pass
45
-
46
-
47
- class CephBackend(BaseStorageBackend):
48
- """Ceph storage backend (for internal use).
49
-
50
- Args:
51
- path_mapping (dict|None): path mapping dict from local path to Petrel
52
- path. When ``path_mapping={'src': 'dst'}``, ``src`` in ``filepath``
53
- will be replaced by ``dst``. Default: None.
54
-
55
- .. warning::
56
- :class:`mmcv.fileio.file_client.CephBackend` will be deprecated,
57
- please use :class:`mmcv.fileio.file_client.PetrelBackend` instead.
58
- """
59
-
60
- def __init__(self, path_mapping=None):
61
- try:
62
- import ceph
63
- except ImportError:
64
- raise ImportError('Please install ceph to enable CephBackend.')
65
-
66
- warnings.warn(
67
- 'CephBackend will be deprecated, please use PetrelBackend instead')
68
- self._client = ceph.S3Client()
69
- assert isinstance(path_mapping, dict) or path_mapping is None
70
- self.path_mapping = path_mapping
71
-
72
- def get(self, filepath):
73
- filepath = str(filepath)
74
- if self.path_mapping is not None:
75
- for k, v in self.path_mapping.items():
76
- filepath = filepath.replace(k, v)
77
- value = self._client.Get(filepath)
78
- value_buf = memoryview(value)
79
- return value_buf
80
-
81
- def get_text(self, filepath, encoding=None):
82
- raise NotImplementedError
83
-
84
-
85
- class PetrelBackend(BaseStorageBackend):
86
- """Petrel storage backend (for internal use).
87
-
88
- PetrelBackend supports reading and writing data to multiple clusters.
89
- If the file path contains the cluster name, PetrelBackend will read data
90
- from specified cluster or write data to it. Otherwise, PetrelBackend will
91
- access the default cluster.
92
-
93
- Args:
94
- path_mapping (dict, optional): Path mapping dict from local path to
95
- Petrel path. When ``path_mapping={'src': 'dst'}``, ``src`` in
96
- ``filepath`` will be replaced by ``dst``. Default: None.
97
- enable_mc (bool, optional): Whether to enable memcached support.
98
- Default: True.
99
-
100
- Examples:
101
- >>> filepath1 = 's3://path/of/file'
102
- >>> filepath2 = 'cluster-name:s3://path/of/file'
103
- >>> client = PetrelBackend()
104
- >>> client.get(filepath1) # get data from default cluster
105
- >>> client.get(filepath2) # get data from 'cluster-name' cluster
106
- """
107
-
108
- def __init__(self,
109
- path_mapping: Optional[dict] = None,
110
- enable_mc: bool = True):
111
- try:
112
- from petrel_client import client
113
- except ImportError:
114
- raise ImportError('Please install petrel_client to enable '
115
- 'PetrelBackend.')
116
-
117
- self._client = client.Client(enable_mc=enable_mc)
118
- assert isinstance(path_mapping, dict) or path_mapping is None
119
- self.path_mapping = path_mapping
120
-
121
- def _map_path(self, filepath: Union[str, Path]) -> str:
122
- """Map ``filepath`` to a string path whose prefix will be replaced by
123
- :attr:`self.path_mapping`.
124
-
125
- Args:
126
- filepath (str): Path to be mapped.
127
- """
128
- filepath = str(filepath)
129
- if self.path_mapping is not None:
130
- for k, v in self.path_mapping.items():
131
- filepath = filepath.replace(k, v)
132
- return filepath
133
-
134
- def _format_path(self, filepath: str) -> str:
135
- """Convert a ``filepath`` to standard format of petrel oss.
136
-
137
- If the ``filepath`` is concatenated by ``os.path.join``, in a Windows
138
- environment, the ``filepath`` will be the format of
139
- 's3://bucket_name\\image.jpg'. By invoking :meth:`_format_path`, the
140
- above ``filepath`` will be converted to 's3://bucket_name/image.jpg'.
141
-
142
- Args:
143
- filepath (str): Path to be formatted.
144
- """
145
- return re.sub(r'\\+', '/', filepath)
146
-
147
- def get(self, filepath: Union[str, Path]) -> memoryview:
148
- """Read data from a given ``filepath`` with 'rb' mode.
149
-
150
- Args:
151
- filepath (str or Path): Path to read data.
152
-
153
- Returns:
154
- memoryview: A memory view of expected bytes object to avoid
155
- copying. The memoryview object can be converted to bytes by
156
- ``value_buf.tobytes()``.
157
- """
158
- filepath = self._map_path(filepath)
159
- filepath = self._format_path(filepath)
160
- value = self._client.Get(filepath)
161
- value_buf = memoryview(value)
162
- return value_buf
163
-
164
- def get_text(self,
165
- filepath: Union[str, Path],
166
- encoding: str = 'utf-8') -> str:
167
- """Read data from a given ``filepath`` with 'r' mode.
168
-
169
- Args:
170
- filepath (str or Path): Path to read data.
171
- encoding (str): The encoding format used to open the ``filepath``.
172
- Default: 'utf-8'.
173
-
174
- Returns:
175
- str: Expected text reading from ``filepath``.
176
- """
177
- return str(self.get(filepath), encoding=encoding)
178
-
179
- def put(self, obj: bytes, filepath: Union[str, Path]) -> None:
180
- """Save data to a given ``filepath``.
181
-
182
- Args:
183
- obj (bytes): Data to be saved.
184
- filepath (str or Path): Path to write data.
185
- """
186
- filepath = self._map_path(filepath)
187
- filepath = self._format_path(filepath)
188
- self._client.put(filepath, obj)
189
-
190
- def put_text(self,
191
- obj: str,
192
- filepath: Union[str, Path],
193
- encoding: str = 'utf-8') -> None:
194
- """Save data to a given ``filepath``.
195
-
196
- Args:
197
- obj (str): Data to be written.
198
- filepath (str or Path): Path to write data.
199
- encoding (str): The encoding format used to encode the ``obj``.
200
- Default: 'utf-8'.
201
- """
202
- self.put(bytes(obj, encoding=encoding), filepath)
203
-
204
- def remove(self, filepath: Union[str, Path]) -> None:
205
- """Remove a file.
206
-
207
- Args:
208
- filepath (str or Path): Path to be removed.
209
- """
210
- if not has_method(self._client, 'delete'):
211
- raise NotImplementedError(
212
- ('Current version of Petrel Python SDK has not supported '
213
- 'the `delete` method, please use a higher version or dev'
214
- ' branch instead.'))
215
-
216
- filepath = self._map_path(filepath)
217
- filepath = self._format_path(filepath)
218
- self._client.delete(filepath)
219
-
220
- def exists(self, filepath: Union[str, Path]) -> bool:
221
- """Check whether a file path exists.
222
-
223
- Args:
224
- filepath (str or Path): Path to be checked whether exists.
225
-
226
- Returns:
227
- bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.
228
- """
229
- if not (has_method(self._client, 'contains')
230
- and has_method(self._client, 'isdir')):
231
- raise NotImplementedError(
232
- ('Current version of Petrel Python SDK has not supported '
233
- 'the `contains` and `isdir` methods, please use a higher'
234
- 'version or dev branch instead.'))
235
-
236
- filepath = self._map_path(filepath)
237
- filepath = self._format_path(filepath)
238
- return self._client.contains(filepath) or self._client.isdir(filepath)
239
-
240
- def isdir(self, filepath: Union[str, Path]) -> bool:
241
- """Check whether a file path is a directory.
242
-
243
- Args:
244
- filepath (str or Path): Path to be checked whether it is a
245
- directory.
246
-
247
- Returns:
248
- bool: Return ``True`` if ``filepath`` points to a directory,
249
- ``False`` otherwise.
250
- """
251
- if not has_method(self._client, 'isdir'):
252
- raise NotImplementedError(
253
- ('Current version of Petrel Python SDK has not supported '
254
- 'the `isdir` method, please use a higher version or dev'
255
- ' branch instead.'))
256
-
257
- filepath = self._map_path(filepath)
258
- filepath = self._format_path(filepath)
259
- return self._client.isdir(filepath)
260
-
261
- def isfile(self, filepath: Union[str, Path]) -> bool:
262
- """Check whether a file path is a file.
263
-
264
- Args:
265
- filepath (str or Path): Path to be checked whether it is a file.
266
-
267
- Returns:
268
- bool: Return ``True`` if ``filepath`` points to a file, ``False``
269
- otherwise.
270
- """
271
- if not has_method(self._client, 'contains'):
272
- raise NotImplementedError(
273
- ('Current version of Petrel Python SDK has not supported '
274
- 'the `contains` method, please use a higher version or '
275
- 'dev branch instead.'))
276
-
277
- filepath = self._map_path(filepath)
278
- filepath = self._format_path(filepath)
279
- return self._client.contains(filepath)
280
-
281
- def join_path(self, filepath: Union[str, Path],
282
- *filepaths: Union[str, Path]) -> str:
283
- """Concatenate all file paths.
284
-
285
- Args:
286
- filepath (str or Path): Path to be concatenated.
287
-
288
- Returns:
289
- str: The result after concatenation.
290
- """
291
- filepath = self._format_path(self._map_path(filepath))
292
- if filepath.endswith('/'):
293
- filepath = filepath[:-1]
294
- formatted_paths = [filepath]
295
- for path in filepaths:
296
- formatted_paths.append(self._format_path(self._map_path(path)))
297
- return '/'.join(formatted_paths)
298
-
299
- @contextmanager
300
- def get_local_path(self, filepath: Union[str, Path]) -> Iterable[str]:
301
- """Download a file from ``filepath`` and return a temporary path.
302
-
303
- ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It
304
- can be called with ``with`` statement, and when exists from the
305
- ``with`` statement, the temporary path will be released.
306
-
307
- Args:
308
- filepath (str | Path): Download a file from ``filepath``.
309
-
310
- Examples:
311
- >>> client = PetrelBackend()
312
- >>> # After existing from the ``with`` clause,
313
- >>> # the path will be removed
314
- >>> with client.get_local_path('s3://path/of/your/file') as path:
315
- ... # do something here
316
-
317
- Yields:
318
- Iterable[str]: Only yield one temporary path.
319
- """
320
- filepath = self._map_path(filepath)
321
- filepath = self._format_path(filepath)
322
- assert self.isfile(filepath)
323
- try:
324
- f = tempfile.NamedTemporaryFile(delete=False)
325
- f.write(self.get(filepath))
326
- f.close()
327
- yield f.name
328
- finally:
329
- os.remove(f.name)
330
-
331
- def list_dir_or_file(self,
332
- dir_path: Union[str, Path],
333
- list_dir: bool = True,
334
- list_file: bool = True,
335
- suffix: Optional[Union[str, Tuple[str]]] = None,
336
- recursive: bool = False) -> Iterator[str]:
337
- """Scan a directory to find the interested directories or files in
338
- arbitrary order.
339
-
340
- Note:
341
- Petrel has no concept of directories but it simulates the directory
342
- hierarchy in the filesystem through public prefixes. In addition,
343
- if the returned path ends with '/', it means the path is a public
344
- prefix which is a logical directory.
345
-
346
- Note:
347
- :meth:`list_dir_or_file` returns the path relative to ``dir_path``.
348
- In addition, the returned path of directory will not contains the
349
- suffix '/' which is consistent with other backends.
350
-
351
- Args:
352
- dir_path (str | Path): Path of the directory.
353
- list_dir (bool): List the directories. Default: True.
354
- list_file (bool): List the path of files. Default: True.
355
- suffix (str or tuple[str], optional): File suffix
356
- that we are interested in. Default: None.
357
- recursive (bool): If set to True, recursively scan the
358
- directory. Default: False.
359
-
360
- Yields:
361
- Iterable[str]: A relative path to ``dir_path``.
362
- """
363
- if not has_method(self._client, 'list'):
364
- raise NotImplementedError(
365
- ('Current version of Petrel Python SDK has not supported '
366
- 'the `list` method, please use a higher version or dev'
367
- ' branch instead.'))
368
-
369
- dir_path = self._map_path(dir_path)
370
- dir_path = self._format_path(dir_path)
371
- if list_dir and suffix is not None:
372
- raise TypeError(
373
- '`list_dir` should be False when `suffix` is not None')
374
-
375
- if (suffix is not None) and not isinstance(suffix, (str, tuple)):
376
- raise TypeError('`suffix` must be a string or tuple of strings')
377
-
378
- # Petrel's simulated directory hierarchy assumes that directory paths
379
- # should end with `/`
380
- if not dir_path.endswith('/'):
381
- dir_path += '/'
382
-
383
- root = dir_path
384
-
385
- def _list_dir_or_file(dir_path, list_dir, list_file, suffix,
386
- recursive):
387
- for path in self._client.list(dir_path):
388
- # the `self.isdir` is not used here to determine whether path
389
- # is a directory, because `self.isdir` relies on
390
- # `self._client.list`
391
- if path.endswith('/'): # a directory path
392
- next_dir_path = self.join_path(dir_path, path)
393
- if list_dir:
394
- # get the relative path and exclude the last
395
- # character '/'
396
- rel_dir = next_dir_path[len(root):-1]
397
- yield rel_dir
398
- if recursive:
399
- yield from _list_dir_or_file(next_dir_path, list_dir,
400
- list_file, suffix,
401
- recursive)
402
- else: # a file path
403
- absolute_path = self.join_path(dir_path, path)
404
- rel_path = absolute_path[len(root):]
405
- if (suffix is None
406
- or rel_path.endswith(suffix)) and list_file:
407
- yield rel_path
408
-
409
- return _list_dir_or_file(dir_path, list_dir, list_file, suffix,
410
- recursive)
411
-
412
-
413
- class MemcachedBackend(BaseStorageBackend):
414
- """Memcached storage backend.
415
-
416
- Attributes:
417
- server_list_cfg (str): Config file for memcached server list.
418
- client_cfg (str): Config file for memcached client.
419
- sys_path (str | None): Additional path to be appended to `sys.path`.
420
- Default: None.
421
- """
422
-
423
- def __init__(self, server_list_cfg, client_cfg, sys_path=None):
424
- if sys_path is not None:
425
- import sys
426
- sys.path.append(sys_path)
427
- try:
428
- import mc
429
- except ImportError:
430
- raise ImportError(
431
- 'Please install memcached to enable MemcachedBackend.')
432
-
433
- self.server_list_cfg = server_list_cfg
434
- self.client_cfg = client_cfg
435
- self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg,
436
- self.client_cfg)
437
- # mc.pyvector servers as a point which points to a memory cache
438
- self._mc_buffer = mc.pyvector()
439
-
440
- def get(self, filepath):
441
- filepath = str(filepath)
442
- import mc
443
- self._client.Get(filepath, self._mc_buffer)
444
- value_buf = mc.ConvertBuffer(self._mc_buffer)
445
- return value_buf
446
-
447
- def get_text(self, filepath, encoding=None):
448
- raise NotImplementedError
449
-
450
-
451
- class LmdbBackend(BaseStorageBackend):
452
- """Lmdb storage backend.
453
-
454
- Args:
455
- db_path (str): Lmdb database path.
456
- readonly (bool, optional): Lmdb environment parameter. If True,
457
- disallow any write operations. Default: True.
458
- lock (bool, optional): Lmdb environment parameter. If False, when
459
- concurrent access occurs, do not lock the database. Default: False.
460
- readahead (bool, optional): Lmdb environment parameter. If False,
461
- disable the OS filesystem readahead mechanism, which may improve
462
- random read performance when a database is larger than RAM.
463
- Default: False.
464
-
465
- Attributes:
466
- db_path (str): Lmdb database path.
467
- """
468
-
469
- def __init__(self,
470
- db_path,
471
- readonly=True,
472
- lock=False,
473
- readahead=False,
474
- **kwargs):
475
- try:
476
- import lmdb
477
- except ImportError:
478
- raise ImportError('Please install lmdb to enable LmdbBackend.')
479
-
480
- self.db_path = str(db_path)
481
- self._client = lmdb.open(
482
- self.db_path,
483
- readonly=readonly,
484
- lock=lock,
485
- readahead=readahead,
486
- **kwargs)
487
-
488
- def get(self, filepath):
489
- """Get values according to the filepath.
490
-
491
- Args:
492
- filepath (str | obj:`Path`): Here, filepath is the lmdb key.
493
- """
494
- filepath = str(filepath)
495
- with self._client.begin(write=False) as txn:
496
- value_buf = txn.get(filepath.encode('ascii'))
497
- return value_buf
498
-
499
- def get_text(self, filepath, encoding=None):
500
- raise NotImplementedError
501
-
502
-
503
- class HardDiskBackend(BaseStorageBackend):
504
- """Raw hard disks storage backend."""
505
-
506
- _allow_symlink = True
507
-
508
- def get(self, filepath: Union[str, Path]) -> bytes:
509
- """Read data from a given ``filepath`` with 'rb' mode.
510
-
511
- Args:
512
- filepath (str or Path): Path to read data.
513
-
514
- Returns:
515
- bytes: Expected bytes object.
516
- """
517
- with open(filepath, 'rb') as f:
518
- value_buf = f.read()
519
- return value_buf
520
-
521
- def get_text(self,
522
- filepath: Union[str, Path],
523
- encoding: str = 'utf-8') -> str:
524
- """Read data from a given ``filepath`` with 'r' mode.
525
-
526
- Args:
527
- filepath (str or Path): Path to read data.
528
- encoding (str): The encoding format used to open the ``filepath``.
529
- Default: 'utf-8'.
530
-
531
- Returns:
532
- str: Expected text reading from ``filepath``.
533
- """
534
- with open(filepath, 'r', encoding=encoding) as f:
535
- value_buf = f.read()
536
- return value_buf
537
-
538
- def put(self, obj: bytes, filepath: Union[str, Path]) -> None:
539
- """Write data to a given ``filepath`` with 'wb' mode.
540
-
541
- Note:
542
- ``put`` will create a directory if the directory of ``filepath``
543
- does not exist.
544
-
545
- Args:
546
- obj (bytes): Data to be written.
547
- filepath (str or Path): Path to write data.
548
- """
549
- mmcv.mkdir_or_exist(osp.dirname(filepath))
550
- with open(filepath, 'wb') as f:
551
- f.write(obj)
552
-
553
- def put_text(self,
554
- obj: str,
555
- filepath: Union[str, Path],
556
- encoding: str = 'utf-8') -> None:
557
- """Write data to a given ``filepath`` with 'w' mode.
558
-
559
- Note:
560
- ``put_text`` will create a directory if the directory of
561
- ``filepath`` does not exist.
562
-
563
- Args:
564
- obj (str): Data to be written.
565
- filepath (str or Path): Path to write data.
566
- encoding (str): The encoding format used to open the ``filepath``.
567
- Default: 'utf-8'.
568
- """
569
- mmcv.mkdir_or_exist(osp.dirname(filepath))
570
- with open(filepath, 'w', encoding=encoding) as f:
571
- f.write(obj)
572
-
573
- def remove(self, filepath: Union[str, Path]) -> None:
574
- """Remove a file.
575
-
576
- Args:
577
- filepath (str or Path): Path to be removed.
578
- """
579
- os.remove(filepath)
580
-
581
- def exists(self, filepath: Union[str, Path]) -> bool:
582
- """Check whether a file path exists.
583
-
584
- Args:
585
- filepath (str or Path): Path to be checked whether exists.
586
-
587
- Returns:
588
- bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.
589
- """
590
- return osp.exists(filepath)
591
-
592
- def isdir(self, filepath: Union[str, Path]) -> bool:
593
- """Check whether a file path is a directory.
594
-
595
- Args:
596
- filepath (str or Path): Path to be checked whether it is a
597
- directory.
598
-
599
- Returns:
600
- bool: Return ``True`` if ``filepath`` points to a directory,
601
- ``False`` otherwise.
602
- """
603
- return osp.isdir(filepath)
604
-
605
- def isfile(self, filepath: Union[str, Path]) -> bool:
606
- """Check whether a file path is a file.
607
-
608
- Args:
609
- filepath (str or Path): Path to be checked whether it is a file.
610
-
611
- Returns:
612
- bool: Return ``True`` if ``filepath`` points to a file, ``False``
613
- otherwise.
614
- """
615
- return osp.isfile(filepath)
616
-
617
- def join_path(self, filepath: Union[str, Path],
618
- *filepaths: Union[str, Path]) -> str:
619
- """Concatenate all file paths.
620
-
621
- Join one or more filepath components intelligently. The return value
622
- is the concatenation of filepath and any members of *filepaths.
623
-
624
- Args:
625
- filepath (str or Path): Path to be concatenated.
626
-
627
- Returns:
628
- str: The result of concatenation.
629
- """
630
- return osp.join(filepath, *filepaths)
631
-
632
- @contextmanager
633
- def get_local_path(
634
- self, filepath: Union[str, Path]) -> Iterable[Union[str, Path]]:
635
- """Only for unified API and do nothing."""
636
- yield filepath
637
-
638
- def list_dir_or_file(self,
639
- dir_path: Union[str, Path],
640
- list_dir: bool = True,
641
- list_file: bool = True,
642
- suffix: Optional[Union[str, Tuple[str]]] = None,
643
- recursive: bool = False) -> Iterator[str]:
644
- """Scan a directory to find the interested directories or files in
645
- arbitrary order.
646
-
647
- Note:
648
- :meth:`list_dir_or_file` returns the path relative to ``dir_path``.
649
-
650
- Args:
651
- dir_path (str | Path): Path of the directory.
652
- list_dir (bool): List the directories. Default: True.
653
- list_file (bool): List the path of files. Default: True.
654
- suffix (str or tuple[str], optional): File suffix
655
- that we are interested in. Default: None.
656
- recursive (bool): If set to True, recursively scan the
657
- directory. Default: False.
658
-
659
- Yields:
660
- Iterable[str]: A relative path to ``dir_path``.
661
- """
662
- if list_dir and suffix is not None:
663
- raise TypeError('`suffix` should be None when `list_dir` is True')
664
-
665
- if (suffix is not None) and not isinstance(suffix, (str, tuple)):
666
- raise TypeError('`suffix` must be a string or tuple of strings')
667
-
668
- root = dir_path
669
-
670
- def _list_dir_or_file(dir_path, list_dir, list_file, suffix,
671
- recursive):
672
- for entry in os.scandir(dir_path):
673
- if not entry.name.startswith('.') and entry.is_file():
674
- rel_path = osp.relpath(entry.path, root)
675
- if (suffix is None
676
- or rel_path.endswith(suffix)) and list_file:
677
- yield rel_path
678
- elif osp.isdir(entry.path):
679
- if list_dir:
680
- rel_dir = osp.relpath(entry.path, root)
681
- yield rel_dir
682
- if recursive:
683
- yield from _list_dir_or_file(entry.path, list_dir,
684
- list_file, suffix,
685
- recursive)
686
-
687
- return _list_dir_or_file(dir_path, list_dir, list_file, suffix,
688
- recursive)
689
-
690
-
691
- class HTTPBackend(BaseStorageBackend):
692
- """HTTP and HTTPS storage bachend."""
693
-
694
- def get(self, filepath):
695
- value_buf = urlopen(filepath).read()
696
- return value_buf
697
-
698
- def get_text(self, filepath, encoding='utf-8'):
699
- value_buf = urlopen(filepath).read()
700
- return value_buf.decode(encoding)
701
-
702
- @contextmanager
703
- def get_local_path(self, filepath: str) -> Iterable[str]:
704
- """Download a file from ``filepath``.
705
-
706
- ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It
707
- can be called with ``with`` statement, and when exists from the
708
- ``with`` statement, the temporary path will be released.
709
-
710
- Args:
711
- filepath (str): Download a file from ``filepath``.
712
-
713
- Examples:
714
- >>> client = HTTPBackend()
715
- >>> # After existing from the ``with`` clause,
716
- >>> # the path will be removed
717
- >>> with client.get_local_path('http://path/of/your/file') as path:
718
- ... # do something here
719
- """
720
- try:
721
- f = tempfile.NamedTemporaryFile(delete=False)
722
- f.write(self.get(filepath))
723
- f.close()
724
- yield f.name
725
- finally:
726
- os.remove(f.name)
727
-
728
-
729
- class FileClient:
730
- """A general file client to access files in different backends.
731
-
732
- The client loads a file or text in a specified backend from its path
733
- and returns it as a binary or text file. There are two ways to choose a
734
- backend, the name of backend and the prefix of path. Although both of them
735
- can be used to choose a storage backend, ``backend`` has a higher priority
736
- that is if they are all set, the storage backend will be chosen by the
737
- backend argument. If they are all `None`, the disk backend will be chosen.
738
- Note that It can also register other backend accessor with a given name,
739
- prefixes, and backend class. In addition, We use the singleton pattern to
740
- avoid repeated object creation. If the arguments are the same, the same
741
- object will be returned.
742
-
743
- Args:
744
- backend (str, optional): The storage backend type. Options are "disk",
745
- "ceph", "memcached", "lmdb", "http" and "petrel". Default: None.
746
- prefix (str, optional): The prefix of the registered storage backend.
747
- Options are "s3", "http", "https". Default: None.
748
-
749
- Examples:
750
- >>> # only set backend
751
- >>> file_client = FileClient(backend='petrel')
752
- >>> # only set prefix
753
- >>> file_client = FileClient(prefix='s3')
754
- >>> # set both backend and prefix but use backend to choose client
755
- >>> file_client = FileClient(backend='petrel', prefix='s3')
756
- >>> # if the arguments are the same, the same object is returned
757
- >>> file_client1 = FileClient(backend='petrel')
758
- >>> file_client1 is file_client
759
- True
760
-
761
- Attributes:
762
- client (:obj:`BaseStorageBackend`): The backend object.
763
- """
764
-
765
- _backends = {
766
- 'disk': HardDiskBackend,
767
- 'ceph': CephBackend,
768
- 'memcached': MemcachedBackend,
769
- 'lmdb': LmdbBackend,
770
- 'petrel': PetrelBackend,
771
- 'http': HTTPBackend,
772
- }
773
- # This collection is used to record the overridden backends, and when a
774
- # backend appears in the collection, the singleton pattern is disabled for
775
- # that backend, because if the singleton pattern is used, then the object
776
- # returned will be the backend before overwriting
777
- _overridden_backends = set()
778
- _prefix_to_backends = {
779
- 's3': PetrelBackend,
780
- 'http': HTTPBackend,
781
- 'https': HTTPBackend,
782
- }
783
- _overridden_prefixes = set()
784
-
785
- _instances = {}
786
-
787
- def __new__(cls, backend=None, prefix=None, **kwargs):
788
- if backend is None and prefix is None:
789
- backend = 'disk'
790
- if backend is not None and backend not in cls._backends:
791
- raise ValueError(
792
- f'Backend {backend} is not supported. Currently supported ones'
793
- f' are {list(cls._backends.keys())}')
794
- if prefix is not None and prefix not in cls._prefix_to_backends:
795
- raise ValueError(
796
- f'prefix {prefix} is not supported. Currently supported ones '
797
- f'are {list(cls._prefix_to_backends.keys())}')
798
-
799
- # concatenate the arguments to a unique key for determining whether
800
- # objects with the same arguments were created
801
- arg_key = f'{backend}:{prefix}'
802
- for key, value in kwargs.items():
803
- arg_key += f':{key}:{value}'
804
-
805
- # if a backend was overridden, it will create a new object
806
- if (arg_key in cls._instances
807
- and backend not in cls._overridden_backends
808
- and prefix not in cls._overridden_prefixes):
809
- _instance = cls._instances[arg_key]
810
- else:
811
- # create a new object and put it to _instance
812
- _instance = super().__new__(cls)
813
- if backend is not None:
814
- _instance.client = cls._backends[backend](**kwargs)
815
- else:
816
- _instance.client = cls._prefix_to_backends[prefix](**kwargs)
817
-
818
- cls._instances[arg_key] = _instance
819
-
820
- return _instance
821
-
822
- @property
823
- def name(self):
824
- return self.client.name
825
-
826
- @property
827
- def allow_symlink(self):
828
- return self.client.allow_symlink
829
-
830
- @staticmethod
831
- def parse_uri_prefix(uri: Union[str, Path]) -> Optional[str]:
832
- """Parse the prefix of a uri.
833
-
834
- Args:
835
- uri (str | Path): Uri to be parsed that contains the file prefix.
836
-
837
- Examples:
838
- >>> FileClient.parse_uri_prefix('s3://path/of/your/file')
839
- 's3'
840
-
841
- Returns:
842
- str | None: Return the prefix of uri if the uri contains '://'
843
- else ``None``.
844
- """
845
- assert is_filepath(uri)
846
- uri = str(uri)
847
- if '://' not in uri:
848
- return None
849
- else:
850
- prefix, _ = uri.split('://')
851
- # In the case of PetrelBackend, the prefix may contains the cluster
852
- # name like clusterName:s3
853
- if ':' in prefix:
854
- _, prefix = prefix.split(':')
855
- return prefix
856
-
857
- @classmethod
858
- def infer_client(cls,
859
- file_client_args: Optional[dict] = None,
860
- uri: Optional[Union[str, Path]] = None) -> 'FileClient':
861
- """Infer a suitable file client based on the URI and arguments.
862
-
863
- Args:
864
- file_client_args (dict, optional): Arguments to instantiate a
865
- FileClient. Default: None.
866
- uri (str | Path, optional): Uri to be parsed that contains the file
867
- prefix. Default: None.
868
-
869
- Examples:
870
- >>> uri = 's3://path/of/your/file'
871
- >>> file_client = FileClient.infer_client(uri=uri)
872
- >>> file_client_args = {'backend': 'petrel'}
873
- >>> file_client = FileClient.infer_client(file_client_args)
874
-
875
- Returns:
876
- FileClient: Instantiated FileClient object.
877
- """
878
- assert file_client_args is not None or uri is not None
879
- if file_client_args is None:
880
- file_prefix = cls.parse_uri_prefix(uri) # type: ignore
881
- return cls(prefix=file_prefix)
882
- else:
883
- return cls(**file_client_args)
884
-
885
- @classmethod
886
- def _register_backend(cls, name, backend, force=False, prefixes=None):
887
- if not isinstance(name, str):
888
- raise TypeError('the backend name should be a string, '
889
- f'but got {type(name)}')
890
- if not inspect.isclass(backend):
891
- raise TypeError(
892
- f'backend should be a class but got {type(backend)}')
893
- if not issubclass(backend, BaseStorageBackend):
894
- raise TypeError(
895
- f'backend {backend} is not a subclass of BaseStorageBackend')
896
- if not force and name in cls._backends:
897
- raise KeyError(
898
- f'{name} is already registered as a storage backend, '
899
- 'add "force=True" if you want to override it')
900
-
901
- if name in cls._backends and force:
902
- cls._overridden_backends.add(name)
903
- cls._backends[name] = backend
904
-
905
- if prefixes is not None:
906
- if isinstance(prefixes, str):
907
- prefixes = [prefixes]
908
- else:
909
- assert isinstance(prefixes, (list, tuple))
910
- for prefix in prefixes:
911
- if prefix not in cls._prefix_to_backends:
912
- cls._prefix_to_backends[prefix] = backend
913
- elif (prefix in cls._prefix_to_backends) and force:
914
- cls._overridden_prefixes.add(prefix)
915
- cls._prefix_to_backends[prefix] = backend
916
- else:
917
- raise KeyError(
918
- f'{prefix} is already registered as a storage backend,'
919
- ' add "force=True" if you want to override it')
920
-
921
- @classmethod
922
- def register_backend(cls, name, backend=None, force=False, prefixes=None):
923
- """Register a backend to FileClient.
924
-
925
- This method can be used as a normal class method or a decorator.
926
-
927
- .. code-block:: python
928
-
929
- class NewBackend(BaseStorageBackend):
930
-
931
- def get(self, filepath):
932
- return filepath
933
-
934
- def get_text(self, filepath):
935
- return filepath
936
-
937
- FileClient.register_backend('new', NewBackend)
938
-
939
- or
940
-
941
- .. code-block:: python
942
-
943
- @FileClient.register_backend('new')
944
- class NewBackend(BaseStorageBackend):
945
-
946
- def get(self, filepath):
947
- return filepath
948
-
949
- def get_text(self, filepath):
950
- return filepath
951
-
952
- Args:
953
- name (str): The name of the registered backend.
954
- backend (class, optional): The backend class to be registered,
955
- which must be a subclass of :class:`BaseStorageBackend`.
956
- When this method is used as a decorator, backend is None.
957
- Defaults to None.
958
- force (bool, optional): Whether to override the backend if the name
959
- has already been registered. Defaults to False.
960
- prefixes (str or list[str] or tuple[str], optional): The prefixes
961
- of the registered storage backend. Default: None.
962
- `New in version 1.3.15.`
963
- """
964
- if backend is not None:
965
- cls._register_backend(
966
- name, backend, force=force, prefixes=prefixes)
967
- return
968
-
969
- def _register(backend_cls):
970
- cls._register_backend(
971
- name, backend_cls, force=force, prefixes=prefixes)
972
- return backend_cls
973
-
974
- return _register
975
-
976
- def get(self, filepath: Union[str, Path]) -> Union[bytes, memoryview]:
977
- """Read data from a given ``filepath`` with 'rb' mode.
978
-
979
- Note:
980
- There are two types of return values for ``get``, one is ``bytes``
981
- and the other is ``memoryview``. The advantage of using memoryview
982
- is that you can avoid copying, and if you want to convert it to
983
- ``bytes``, you can use ``.tobytes()``.
984
-
985
- Args:
986
- filepath (str or Path): Path to read data.
987
-
988
- Returns:
989
- bytes | memoryview: Expected bytes object or a memory view of the
990
- bytes object.
991
- """
992
- return self.client.get(filepath)
993
-
994
- def get_text(self, filepath: Union[str, Path], encoding='utf-8') -> str:
995
- """Read data from a given ``filepath`` with 'r' mode.
996
-
997
- Args:
998
- filepath (str or Path): Path to read data.
999
- encoding (str): The encoding format used to open the ``filepath``.
1000
- Default: 'utf-8'.
1001
-
1002
- Returns:
1003
- str: Expected text reading from ``filepath``.
1004
- """
1005
- return self.client.get_text(filepath, encoding)
1006
-
1007
- def put(self, obj: bytes, filepath: Union[str, Path]) -> None:
1008
- """Write data to a given ``filepath`` with 'wb' mode.
1009
-
1010
- Note:
1011
- ``put`` should create a directory if the directory of ``filepath``
1012
- does not exist.
1013
-
1014
- Args:
1015
- obj (bytes): Data to be written.
1016
- filepath (str or Path): Path to write data.
1017
- """
1018
- self.client.put(obj, filepath)
1019
-
1020
- def put_text(self, obj: str, filepath: Union[str, Path]) -> None:
1021
- """Write data to a given ``filepath`` with 'w' mode.
1022
-
1023
- Note:
1024
- ``put_text`` should create a directory if the directory of
1025
- ``filepath`` does not exist.
1026
-
1027
- Args:
1028
- obj (str): Data to be written.
1029
- filepath (str or Path): Path to write data.
1030
- encoding (str, optional): The encoding format used to open the
1031
- `filepath`. Default: 'utf-8'.
1032
- """
1033
- self.client.put_text(obj, filepath)
1034
-
1035
- def remove(self, filepath: Union[str, Path]) -> None:
1036
- """Remove a file.
1037
-
1038
- Args:
1039
- filepath (str, Path): Path to be removed.
1040
- """
1041
- self.client.remove(filepath)
1042
-
1043
- def exists(self, filepath: Union[str, Path]) -> bool:
1044
- """Check whether a file path exists.
1045
-
1046
- Args:
1047
- filepath (str or Path): Path to be checked whether exists.
1048
-
1049
- Returns:
1050
- bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.
1051
- """
1052
- return self.client.exists(filepath)
1053
-
1054
- def isdir(self, filepath: Union[str, Path]) -> bool:
1055
- """Check whether a file path is a directory.
1056
-
1057
- Args:
1058
- filepath (str or Path): Path to be checked whether it is a
1059
- directory.
1060
-
1061
- Returns:
1062
- bool: Return ``True`` if ``filepath`` points to a directory,
1063
- ``False`` otherwise.
1064
- """
1065
- return self.client.isdir(filepath)
1066
-
1067
- def isfile(self, filepath: Union[str, Path]) -> bool:
1068
- """Check whether a file path is a file.
1069
-
1070
- Args:
1071
- filepath (str or Path): Path to be checked whether it is a file.
1072
-
1073
- Returns:
1074
- bool: Return ``True`` if ``filepath`` points to a file, ``False``
1075
- otherwise.
1076
- """
1077
- return self.client.isfile(filepath)
1078
-
1079
- def join_path(self, filepath: Union[str, Path],
1080
- *filepaths: Union[str, Path]) -> str:
1081
- """Concatenate all file paths.
1082
-
1083
- Join one or more filepath components intelligently. The return value
1084
- is the concatenation of filepath and any members of *filepaths.
1085
-
1086
- Args:
1087
- filepath (str or Path): Path to be concatenated.
1088
-
1089
- Returns:
1090
- str: The result of concatenation.
1091
- """
1092
- return self.client.join_path(filepath, *filepaths)
1093
-
1094
- @contextmanager
1095
- def get_local_path(self, filepath: Union[str, Path]) -> Iterable[str]:
1096
- """Download data from ``filepath`` and write the data to local path.
1097
-
1098
- ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It
1099
- can be called with ``with`` statement, and when exists from the
1100
- ``with`` statement, the temporary path will be released.
1101
-
1102
- Note:
1103
- If the ``filepath`` is a local path, just return itself.
1104
-
1105
- .. warning::
1106
- ``get_local_path`` is an experimental interface that may change in
1107
- the future.
1108
-
1109
- Args:
1110
- filepath (str or Path): Path to be read data.
1111
-
1112
- Examples:
1113
- >>> file_client = FileClient(prefix='s3')
1114
- >>> with file_client.get_local_path('s3://bucket/abc.jpg') as path:
1115
- ... # do something here
1116
-
1117
- Yields:
1118
- Iterable[str]: Only yield one path.
1119
- """
1120
- with self.client.get_local_path(str(filepath)) as local_path:
1121
- yield local_path
1122
-
1123
- def list_dir_or_file(self,
1124
- dir_path: Union[str, Path],
1125
- list_dir: bool = True,
1126
- list_file: bool = True,
1127
- suffix: Optional[Union[str, Tuple[str]]] = None,
1128
- recursive: bool = False) -> Iterator[str]:
1129
- """Scan a directory to find the interested directories or files in
1130
- arbitrary order.
1131
-
1132
- Note:
1133
- :meth:`list_dir_or_file` returns the path relative to ``dir_path``.
1134
-
1135
- Args:
1136
- dir_path (str | Path): Path of the directory.
1137
- list_dir (bool): List the directories. Default: True.
1138
- list_file (bool): List the path of files. Default: True.
1139
- suffix (str or tuple[str], optional): File suffix
1140
- that we are interested in. Default: None.
1141
- recursive (bool): If set to True, recursively scan the
1142
- directory. Default: False.
1143
-
1144
- Yields:
1145
- Iterable[str]: A relative path to ``dir_path``.
1146
- """
1147
- yield from self.client.list_dir_or_file(dir_path, list_dir, list_file,
1148
- suffix, recursive)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anthos23/hummus/app.py DELETED
@@ -1,38 +0,0 @@
1
- import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TextClassificationPipeline
3
- import operator
4
- import matplotlib.pyplot as plt
5
- import pandas as pd
6
-
7
- def get_sentiment(out):
8
- d = dict()
9
- for k in out:
10
- print(k)
11
- label = k['label']
12
- score = k['score']
13
- d[label] = score
14
-
15
- winning_lab = max(d.items(), key=operator.itemgetter(1))[0]
16
- winning_score = d[winning_lab]
17
-
18
- df = pd.DataFrame.from_dict(d, orient = 'index')
19
- return df #winning_lab, winning_score
20
-
21
- model_name = "mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis"
22
- model = AutoModelForSequenceClassification.from_pretrained(model_name)
23
- tokenizer = AutoTokenizer.from_pretrained(model_name)
24
-
25
- pipe = TextClassificationPipeline(model=model, tokenizer=tokenizer, return_all_scores=True)
26
- text = st.text_area(f'Ciao! This app uses {model_name}.\nEnter your text to test it ❤️')
27
-
28
-
29
- if text:
30
- out = pipe(text)
31
- df = get_sentiment(out[0])
32
- fig, ax = plt.subplots()
33
- c = ['#C34A36', '#FFC75F', '#008F7A']
34
- ax.bar(df.index, df[0], color=c, width=0.4)
35
-
36
- st.pyplot(fig)
37
-
38
- #st.json(get_sentiment(out[0][0]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AsakuraMizu/moe-tts/text/cantonese.py DELETED
@@ -1,59 +0,0 @@
1
- import re
2
- import cn2an
3
- import opencc
4
-
5
-
6
- converter = opencc.OpenCC('chinese_dialect_lexicons/jyutjyu')
7
-
8
- # List of (Latin alphabet, ipa) pairs:
9
- _latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
10
- ('A', 'ei˥'),
11
- ('B', 'biː˥'),
12
- ('C', 'siː˥'),
13
- ('D', 'tiː˥'),
14
- ('E', 'iː˥'),
15
- ('F', 'e˥fuː˨˩'),
16
- ('G', 'tsiː˥'),
17
- ('H', 'ɪk̚˥tsʰyː˨˩'),
18
- ('I', 'ɐi˥'),
19
- ('J', 'tsei˥'),
20
- ('K', 'kʰei˥'),
21
- ('L', 'e˥llou˨˩'),
22
- ('M', 'ɛːm˥'),
23
- ('N', 'ɛːn˥'),
24
- ('O', 'ou˥'),
25
- ('P', 'pʰiː˥'),
26
- ('Q', 'kʰiːu˥'),
27
- ('R', 'aː˥lou˨˩'),
28
- ('S', 'ɛː˥siː˨˩'),
29
- ('T', 'tʰiː˥'),
30
- ('U', 'juː˥'),
31
- ('V', 'wiː˥'),
32
- ('W', 'tʊk̚˥piː˥juː˥'),
33
- ('X', 'ɪk̚˥siː˨˩'),
34
- ('Y', 'waːi˥'),
35
- ('Z', 'iː˨sɛːt̚˥')
36
- ]]
37
-
38
-
39
- def number_to_cantonese(text):
40
- return re.sub(r'\d+(?:\.?\d+)?', lambda x: cn2an.an2cn(x.group()), text)
41
-
42
-
43
- def latin_to_ipa(text):
44
- for regex, replacement in _latin_to_ipa:
45
- text = re.sub(regex, replacement, text)
46
- return text
47
-
48
-
49
- def cantonese_to_ipa(text):
50
- text = number_to_cantonese(text.upper())
51
- text = converter.convert(text).replace('-','').replace('$',' ')
52
- text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text)
53
- text = re.sub(r'[、;:]', ',', text)
54
- text = re.sub(r'\s*,\s*', ', ', text)
55
- text = re.sub(r'\s*。\s*', '. ', text)
56
- text = re.sub(r'\s*?\s*', '? ', text)
57
- text = re.sub(r'\s*!\s*', '! ', text)
58
- text = re.sub(r'\s*$', '', text)
59
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/scanner.py DELETED
@@ -1,104 +0,0 @@
1
- """
2
- pygments.scanner
3
- ~~~~~~~~~~~~~~~~
4
-
5
- This library implements a regex based scanner. Some languages
6
- like Pascal are easy to parse but have some keywords that
7
- depend on the context. Because of this it's impossible to lex
8
- that just by using a regular expression lexer like the
9
- `RegexLexer`.
10
-
11
- Have a look at the `DelphiLexer` to get an idea of how to use
12
- this scanner.
13
-
14
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
15
- :license: BSD, see LICENSE for details.
16
- """
17
- import re
18
-
19
-
20
- class EndOfText(RuntimeError):
21
- """
22
- Raise if end of text is reached and the user
23
- tried to call a match function.
24
- """
25
-
26
-
27
- class Scanner:
28
- """
29
- Simple scanner
30
-
31
- All method patterns are regular expression strings (not
32
- compiled expressions!)
33
- """
34
-
35
- def __init__(self, text, flags=0):
36
- """
37
- :param text: The text which should be scanned
38
- :param flags: default regular expression flags
39
- """
40
- self.data = text
41
- self.data_length = len(text)
42
- self.start_pos = 0
43
- self.pos = 0
44
- self.flags = flags
45
- self.last = None
46
- self.match = None
47
- self._re_cache = {}
48
-
49
- def eos(self):
50
- """`True` if the scanner reached the end of text."""
51
- return self.pos >= self.data_length
52
- eos = property(eos, eos.__doc__)
53
-
54
- def check(self, pattern):
55
- """
56
- Apply `pattern` on the current position and return
57
- the match object. (Doesn't touch pos). Use this for
58
- lookahead.
59
- """
60
- if self.eos:
61
- raise EndOfText()
62
- if pattern not in self._re_cache:
63
- self._re_cache[pattern] = re.compile(pattern, self.flags)
64
- return self._re_cache[pattern].match(self.data, self.pos)
65
-
66
- def test(self, pattern):
67
- """Apply a pattern on the current position and check
68
- if it patches. Doesn't touch pos.
69
- """
70
- return self.check(pattern) is not None
71
-
72
- def scan(self, pattern):
73
- """
74
- Scan the text for the given pattern and update pos/match
75
- and related fields. The return value is a boolean that
76
- indicates if the pattern matched. The matched value is
77
- stored on the instance as ``match``, the last value is
78
- stored as ``last``. ``start_pos`` is the position of the
79
- pointer before the pattern was matched, ``pos`` is the
80
- end position.
81
- """
82
- if self.eos:
83
- raise EndOfText()
84
- if pattern not in self._re_cache:
85
- self._re_cache[pattern] = re.compile(pattern, self.flags)
86
- self.last = self.match
87
- m = self._re_cache[pattern].match(self.data, self.pos)
88
- if m is None:
89
- return False
90
- self.start_pos = m.start()
91
- self.pos = m.end()
92
- self.match = m.group()
93
- return True
94
-
95
- def get_char(self):
96
- """Scan exactly one char."""
97
- self.scan('.')
98
-
99
- def __repr__(self):
100
- return '<%s %d/%d>' % (
101
- self.__class__.__name__,
102
- self.pos,
103
- self.data_length
104
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/jupyter.py DELETED
@@ -1,101 +0,0 @@
1
- from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Sequence
2
-
3
- if TYPE_CHECKING:
4
- from pip._vendor.rich.console import ConsoleRenderable
5
-
6
- from . import get_console
7
- from .segment import Segment
8
- from .terminal_theme import DEFAULT_TERMINAL_THEME
9
-
10
- if TYPE_CHECKING:
11
- from pip._vendor.rich.console import ConsoleRenderable
12
-
13
- JUPYTER_HTML_FORMAT = """\
14
- <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">{code}</pre>
15
- """
16
-
17
-
18
- class JupyterRenderable:
19
- """A shim to write html to Jupyter notebook."""
20
-
21
- def __init__(self, html: str, text: str) -> None:
22
- self.html = html
23
- self.text = text
24
-
25
- def _repr_mimebundle_(
26
- self, include: Sequence[str], exclude: Sequence[str], **kwargs: Any
27
- ) -> Dict[str, str]:
28
- data = {"text/plain": self.text, "text/html": self.html}
29
- if include:
30
- data = {k: v for (k, v) in data.items() if k in include}
31
- if exclude:
32
- data = {k: v for (k, v) in data.items() if k not in exclude}
33
- return data
34
-
35
-
36
- class JupyterMixin:
37
- """Add to an Rich renderable to make it render in Jupyter notebook."""
38
-
39
- __slots__ = ()
40
-
41
- def _repr_mimebundle_(
42
- self: "ConsoleRenderable",
43
- include: Sequence[str],
44
- exclude: Sequence[str],
45
- **kwargs: Any,
46
- ) -> Dict[str, str]:
47
- console = get_console()
48
- segments = list(console.render(self, console.options))
49
- html = _render_segments(segments)
50
- text = console._render_buffer(segments)
51
- data = {"text/plain": text, "text/html": html}
52
- if include:
53
- data = {k: v for (k, v) in data.items() if k in include}
54
- if exclude:
55
- data = {k: v for (k, v) in data.items() if k not in exclude}
56
- return data
57
-
58
-
59
- def _render_segments(segments: Iterable[Segment]) -> str:
60
- def escape(text: str) -> str:
61
- """Escape html."""
62
- return text.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
63
-
64
- fragments: List[str] = []
65
- append_fragment = fragments.append
66
- theme = DEFAULT_TERMINAL_THEME
67
- for text, style, control in Segment.simplify(segments):
68
- if control:
69
- continue
70
- text = escape(text)
71
- if style:
72
- rule = style.get_html_style(theme)
73
- text = f'<span style="{rule}">{text}</span>' if rule else text
74
- if style.link:
75
- text = f'<a href="{style.link}" target="_blank">{text}</a>'
76
- append_fragment(text)
77
-
78
- code = "".join(fragments)
79
- html = JUPYTER_HTML_FORMAT.format(code=code)
80
-
81
- return html
82
-
83
-
84
- def display(segments: Iterable[Segment], text: str) -> None:
85
- """Render segments to Jupyter."""
86
- html = _render_segments(segments)
87
- jupyter_renderable = JupyterRenderable(html, text)
88
- try:
89
- from IPython.display import display as ipython_display
90
-
91
- ipython_display(jupyter_renderable)
92
- except ModuleNotFoundError:
93
- # Handle the case where the Console has force_jupyter=True,
94
- # but IPython is not installed.
95
- pass
96
-
97
-
98
- def print(*args: Any, **kwargs: Any) -> None:
99
- """Proxy for Console print."""
100
- console = get_console()
101
- return console.print(*args, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_adapters.py DELETED
@@ -1,170 +0,0 @@
1
- from contextlib import suppress
2
- from io import TextIOWrapper
3
-
4
- from . import abc
5
-
6
-
7
- class SpecLoaderAdapter:
8
- """
9
- Adapt a package spec to adapt the underlying loader.
10
- """
11
-
12
- def __init__(self, spec, adapter=lambda spec: spec.loader):
13
- self.spec = spec
14
- self.loader = adapter(spec)
15
-
16
- def __getattr__(self, name):
17
- return getattr(self.spec, name)
18
-
19
-
20
- class TraversableResourcesLoader:
21
- """
22
- Adapt a loader to provide TraversableResources.
23
- """
24
-
25
- def __init__(self, spec):
26
- self.spec = spec
27
-
28
- def get_resource_reader(self, name):
29
- return CompatibilityFiles(self.spec)._native()
30
-
31
-
32
- def _io_wrapper(file, mode='r', *args, **kwargs):
33
- if mode == 'r':
34
- return TextIOWrapper(file, *args, **kwargs)
35
- elif mode == 'rb':
36
- return file
37
- raise ValueError(
38
- "Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode)
39
- )
40
-
41
-
42
- class CompatibilityFiles:
43
- """
44
- Adapter for an existing or non-existent resource reader
45
- to provide a compatibility .files().
46
- """
47
-
48
- class SpecPath(abc.Traversable):
49
- """
50
- Path tied to a module spec.
51
- Can be read and exposes the resource reader children.
52
- """
53
-
54
- def __init__(self, spec, reader):
55
- self._spec = spec
56
- self._reader = reader
57
-
58
- def iterdir(self):
59
- if not self._reader:
60
- return iter(())
61
- return iter(
62
- CompatibilityFiles.ChildPath(self._reader, path)
63
- for path in self._reader.contents()
64
- )
65
-
66
- def is_file(self):
67
- return False
68
-
69
- is_dir = is_file
70
-
71
- def joinpath(self, other):
72
- if not self._reader:
73
- return CompatibilityFiles.OrphanPath(other)
74
- return CompatibilityFiles.ChildPath(self._reader, other)
75
-
76
- @property
77
- def name(self):
78
- return self._spec.name
79
-
80
- def open(self, mode='r', *args, **kwargs):
81
- return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs)
82
-
83
- class ChildPath(abc.Traversable):
84
- """
85
- Path tied to a resource reader child.
86
- Can be read but doesn't expose any meaningful children.
87
- """
88
-
89
- def __init__(self, reader, name):
90
- self._reader = reader
91
- self._name = name
92
-
93
- def iterdir(self):
94
- return iter(())
95
-
96
- def is_file(self):
97
- return self._reader.is_resource(self.name)
98
-
99
- def is_dir(self):
100
- return not self.is_file()
101
-
102
- def joinpath(self, other):
103
- return CompatibilityFiles.OrphanPath(self.name, other)
104
-
105
- @property
106
- def name(self):
107
- return self._name
108
-
109
- def open(self, mode='r', *args, **kwargs):
110
- return _io_wrapper(
111
- self._reader.open_resource(self.name), mode, *args, **kwargs
112
- )
113
-
114
- class OrphanPath(abc.Traversable):
115
- """
116
- Orphan path, not tied to a module spec or resource reader.
117
- Can't be read and doesn't expose any meaningful children.
118
- """
119
-
120
- def __init__(self, *path_parts):
121
- if len(path_parts) < 1:
122
- raise ValueError('Need at least one path part to construct a path')
123
- self._path = path_parts
124
-
125
- def iterdir(self):
126
- return iter(())
127
-
128
- def is_file(self):
129
- return False
130
-
131
- is_dir = is_file
132
-
133
- def joinpath(self, other):
134
- return CompatibilityFiles.OrphanPath(*self._path, other)
135
-
136
- @property
137
- def name(self):
138
- return self._path[-1]
139
-
140
- def open(self, mode='r', *args, **kwargs):
141
- raise FileNotFoundError("Can't open orphan path")
142
-
143
- def __init__(self, spec):
144
- self.spec = spec
145
-
146
- @property
147
- def _reader(self):
148
- with suppress(AttributeError):
149
- return self.spec.loader.get_resource_reader(self.spec.name)
150
-
151
- def _native(self):
152
- """
153
- Return the native reader if it supports files().
154
- """
155
- reader = self._reader
156
- return reader if hasattr(reader, 'files') else self
157
-
158
- def __getattr__(self, attr):
159
- return getattr(self._reader, attr)
160
-
161
- def files(self):
162
- return CompatibilityFiles.SpecPath(self.spec, self._reader)
163
-
164
-
165
- def wrap_spec(package):
166
- """
167
- Construct a package spec with traversable compatibility
168
- on the spec/loader/reader.
169
- """
170
- return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/unicode.py DELETED
@@ -1,352 +0,0 @@
1
- # unicode.py
2
-
3
- import sys
4
- from itertools import filterfalse
5
- from typing import List, Tuple, Union
6
-
7
-
8
- class _lazyclassproperty:
9
- def __init__(self, fn):
10
- self.fn = fn
11
- self.__doc__ = fn.__doc__
12
- self.__name__ = fn.__name__
13
-
14
- def __get__(self, obj, cls):
15
- if cls is None:
16
- cls = type(obj)
17
- if not hasattr(cls, "_intern") or any(
18
- cls._intern is getattr(superclass, "_intern", [])
19
- for superclass in cls.__mro__[1:]
20
- ):
21
- cls._intern = {}
22
- attrname = self.fn.__name__
23
- if attrname not in cls._intern:
24
- cls._intern[attrname] = self.fn(cls)
25
- return cls._intern[attrname]
26
-
27
-
28
- UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]]
29
-
30
-
31
- class unicode_set:
32
- """
33
- A set of Unicode characters, for language-specific strings for
34
- ``alphas``, ``nums``, ``alphanums``, and ``printables``.
35
- A unicode_set is defined by a list of ranges in the Unicode character
36
- set, in a class attribute ``_ranges``. Ranges can be specified using
37
- 2-tuples or a 1-tuple, such as::
38
-
39
- _ranges = [
40
- (0x0020, 0x007e),
41
- (0x00a0, 0x00ff),
42
- (0x0100,),
43
- ]
44
-
45
- Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x).
46
-
47
- A unicode set can also be defined using multiple inheritance of other unicode sets::
48
-
49
- class CJK(Chinese, Japanese, Korean):
50
- pass
51
- """
52
-
53
- _ranges: UnicodeRangeList = []
54
-
55
- @_lazyclassproperty
56
- def _chars_for_ranges(cls):
57
- ret = []
58
- for cc in cls.__mro__:
59
- if cc is unicode_set:
60
- break
61
- for rr in getattr(cc, "_ranges", ()):
62
- ret.extend(range(rr[0], rr[-1] + 1))
63
- return [chr(c) for c in sorted(set(ret))]
64
-
65
- @_lazyclassproperty
66
- def printables(cls):
67
- "all non-whitespace characters in this range"
68
- return "".join(filterfalse(str.isspace, cls._chars_for_ranges))
69
-
70
- @_lazyclassproperty
71
- def alphas(cls):
72
- "all alphabetic characters in this range"
73
- return "".join(filter(str.isalpha, cls._chars_for_ranges))
74
-
75
- @_lazyclassproperty
76
- def nums(cls):
77
- "all numeric digit characters in this range"
78
- return "".join(filter(str.isdigit, cls._chars_for_ranges))
79
-
80
- @_lazyclassproperty
81
- def alphanums(cls):
82
- "all alphanumeric characters in this range"
83
- return cls.alphas + cls.nums
84
-
85
- @_lazyclassproperty
86
- def identchars(cls):
87
- "all characters in this range that are valid identifier characters, plus underscore '_'"
88
- return "".join(
89
- sorted(
90
- set(
91
- "".join(filter(str.isidentifier, cls._chars_for_ranges))
92
- + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº"
93
- + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ"
94
- + "_"
95
- )
96
- )
97
- )
98
-
99
- @_lazyclassproperty
100
- def identbodychars(cls):
101
- """
102
- all characters in this range that are valid identifier body characters,
103
- plus the digits 0-9
104
- """
105
- return "".join(
106
- sorted(
107
- set(
108
- cls.identchars
109
- + "0123456789"
110
- + "".join(
111
- [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()]
112
- )
113
- )
114
- )
115
- )
116
-
117
-
118
- class pyparsing_unicode(unicode_set):
119
- """
120
- A namespace class for defining common language unicode_sets.
121
- """
122
-
123
- # fmt: off
124
-
125
- # define ranges in language character sets
126
- _ranges: UnicodeRangeList = [
127
- (0x0020, sys.maxunicode),
128
- ]
129
-
130
- class BasicMultilingualPlane(unicode_set):
131
- "Unicode set for the Basic Multilingual Plane"
132
- _ranges: UnicodeRangeList = [
133
- (0x0020, 0xFFFF),
134
- ]
135
-
136
- class Latin1(unicode_set):
137
- "Unicode set for Latin-1 Unicode Character Range"
138
- _ranges: UnicodeRangeList = [
139
- (0x0020, 0x007E),
140
- (0x00A0, 0x00FF),
141
- ]
142
-
143
- class LatinA(unicode_set):
144
- "Unicode set for Latin-A Unicode Character Range"
145
- _ranges: UnicodeRangeList = [
146
- (0x0100, 0x017F),
147
- ]
148
-
149
- class LatinB(unicode_set):
150
- "Unicode set for Latin-B Unicode Character Range"
151
- _ranges: UnicodeRangeList = [
152
- (0x0180, 0x024F),
153
- ]
154
-
155
- class Greek(unicode_set):
156
- "Unicode set for Greek Unicode Character Ranges"
157
- _ranges: UnicodeRangeList = [
158
- (0x0342, 0x0345),
159
- (0x0370, 0x0377),
160
- (0x037A, 0x037F),
161
- (0x0384, 0x038A),
162
- (0x038C,),
163
- (0x038E, 0x03A1),
164
- (0x03A3, 0x03E1),
165
- (0x03F0, 0x03FF),
166
- (0x1D26, 0x1D2A),
167
- (0x1D5E,),
168
- (0x1D60,),
169
- (0x1D66, 0x1D6A),
170
- (0x1F00, 0x1F15),
171
- (0x1F18, 0x1F1D),
172
- (0x1F20, 0x1F45),
173
- (0x1F48, 0x1F4D),
174
- (0x1F50, 0x1F57),
175
- (0x1F59,),
176
- (0x1F5B,),
177
- (0x1F5D,),
178
- (0x1F5F, 0x1F7D),
179
- (0x1F80, 0x1FB4),
180
- (0x1FB6, 0x1FC4),
181
- (0x1FC6, 0x1FD3),
182
- (0x1FD6, 0x1FDB),
183
- (0x1FDD, 0x1FEF),
184
- (0x1FF2, 0x1FF4),
185
- (0x1FF6, 0x1FFE),
186
- (0x2129,),
187
- (0x2719, 0x271A),
188
- (0xAB65,),
189
- (0x10140, 0x1018D),
190
- (0x101A0,),
191
- (0x1D200, 0x1D245),
192
- (0x1F7A1, 0x1F7A7),
193
- ]
194
-
195
- class Cyrillic(unicode_set):
196
- "Unicode set for Cyrillic Unicode Character Range"
197
- _ranges: UnicodeRangeList = [
198
- (0x0400, 0x052F),
199
- (0x1C80, 0x1C88),
200
- (0x1D2B,),
201
- (0x1D78,),
202
- (0x2DE0, 0x2DFF),
203
- (0xA640, 0xA672),
204
- (0xA674, 0xA69F),
205
- (0xFE2E, 0xFE2F),
206
- ]
207
-
208
- class Chinese(unicode_set):
209
- "Unicode set for Chinese Unicode Character Range"
210
- _ranges: UnicodeRangeList = [
211
- (0x2E80, 0x2E99),
212
- (0x2E9B, 0x2EF3),
213
- (0x31C0, 0x31E3),
214
- (0x3400, 0x4DB5),
215
- (0x4E00, 0x9FEF),
216
- (0xA700, 0xA707),
217
- (0xF900, 0xFA6D),
218
- (0xFA70, 0xFAD9),
219
- (0x16FE2, 0x16FE3),
220
- (0x1F210, 0x1F212),
221
- (0x1F214, 0x1F23B),
222
- (0x1F240, 0x1F248),
223
- (0x20000, 0x2A6D6),
224
- (0x2A700, 0x2B734),
225
- (0x2B740, 0x2B81D),
226
- (0x2B820, 0x2CEA1),
227
- (0x2CEB0, 0x2EBE0),
228
- (0x2F800, 0x2FA1D),
229
- ]
230
-
231
- class Japanese(unicode_set):
232
- "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
233
- _ranges: UnicodeRangeList = []
234
-
235
- class Kanji(unicode_set):
236
- "Unicode set for Kanji Unicode Character Range"
237
- _ranges: UnicodeRangeList = [
238
- (0x4E00, 0x9FBF),
239
- (0x3000, 0x303F),
240
- ]
241
-
242
- class Hiragana(unicode_set):
243
- "Unicode set for Hiragana Unicode Character Range"
244
- _ranges: UnicodeRangeList = [
245
- (0x3041, 0x3096),
246
- (0x3099, 0x30A0),
247
- (0x30FC,),
248
- (0xFF70,),
249
- (0x1B001,),
250
- (0x1B150, 0x1B152),
251
- (0x1F200,),
252
- ]
253
-
254
- class Katakana(unicode_set):
255
- "Unicode set for Katakana Unicode Character Range"
256
- _ranges: UnicodeRangeList = [
257
- (0x3099, 0x309C),
258
- (0x30A0, 0x30FF),
259
- (0x31F0, 0x31FF),
260
- (0x32D0, 0x32FE),
261
- (0xFF65, 0xFF9F),
262
- (0x1B000,),
263
- (0x1B164, 0x1B167),
264
- (0x1F201, 0x1F202),
265
- (0x1F213,),
266
- ]
267
-
268
- class Hangul(unicode_set):
269
- "Unicode set for Hangul (Korean) Unicode Character Range"
270
- _ranges: UnicodeRangeList = [
271
- (0x1100, 0x11FF),
272
- (0x302E, 0x302F),
273
- (0x3131, 0x318E),
274
- (0x3200, 0x321C),
275
- (0x3260, 0x327B),
276
- (0x327E,),
277
- (0xA960, 0xA97C),
278
- (0xAC00, 0xD7A3),
279
- (0xD7B0, 0xD7C6),
280
- (0xD7CB, 0xD7FB),
281
- (0xFFA0, 0xFFBE),
282
- (0xFFC2, 0xFFC7),
283
- (0xFFCA, 0xFFCF),
284
- (0xFFD2, 0xFFD7),
285
- (0xFFDA, 0xFFDC),
286
- ]
287
-
288
- Korean = Hangul
289
-
290
- class CJK(Chinese, Japanese, Hangul):
291
- "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
292
-
293
- class Thai(unicode_set):
294
- "Unicode set for Thai Unicode Character Range"
295
- _ranges: UnicodeRangeList = [
296
- (0x0E01, 0x0E3A),
297
- (0x0E3F, 0x0E5B)
298
- ]
299
-
300
- class Arabic(unicode_set):
301
- "Unicode set for Arabic Unicode Character Range"
302
- _ranges: UnicodeRangeList = [
303
- (0x0600, 0x061B),
304
- (0x061E, 0x06FF),
305
- (0x0700, 0x077F),
306
- ]
307
-
308
- class Hebrew(unicode_set):
309
- "Unicode set for Hebrew Unicode Character Range"
310
- _ranges: UnicodeRangeList = [
311
- (0x0591, 0x05C7),
312
- (0x05D0, 0x05EA),
313
- (0x05EF, 0x05F4),
314
- (0xFB1D, 0xFB36),
315
- (0xFB38, 0xFB3C),
316
- (0xFB3E,),
317
- (0xFB40, 0xFB41),
318
- (0xFB43, 0xFB44),
319
- (0xFB46, 0xFB4F),
320
- ]
321
-
322
- class Devanagari(unicode_set):
323
- "Unicode set for Devanagari Unicode Character Range"
324
- _ranges: UnicodeRangeList = [
325
- (0x0900, 0x097F),
326
- (0xA8E0, 0xA8FF)
327
- ]
328
-
329
- # fmt: on
330
-
331
-
332
- pyparsing_unicode.Japanese._ranges = (
333
- pyparsing_unicode.Japanese.Kanji._ranges
334
- + pyparsing_unicode.Japanese.Hiragana._ranges
335
- + pyparsing_unicode.Japanese.Katakana._ranges
336
- )
337
-
338
- pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane
339
-
340
- # add language identifiers using language Unicode
341
- pyparsing_unicode.العربية = pyparsing_unicode.Arabic
342
- pyparsing_unicode.中文 = pyparsing_unicode.Chinese
343
- pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic
344
- pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek
345
- pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew
346
- pyparsing_unicode.日本語 = pyparsing_unicode.Japanese
347
- pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji
348
- pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana
349
- pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana
350
- pyparsing_unicode.한국어 = pyparsing_unicode.Korean
351
- pyparsing_unicode.ไทย = pyparsing_unicode.Thai
352
- pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Atualli/yoloxTeste/checkYoloxGPU.sh DELETED
@@ -1,16 +0,0 @@
1
- #!/bin/sh
2
- export path=/home/atualli/.local/lib/python3.8/site-packages:$PATH
3
- cd ~/Projetos/huggingface/yoloxTeste_GPU
4
- SERVER=192.168.0.153
5
- PORT=8081
6
-
7
- if lsof -Pi :$PORT -sTCP:LISTEN -t >/dev/null ; then
8
- echo "running"
9
- else
10
- ./telegramCrise.sh "reiniciando_yolox_GPU_linux_192.168.0.153:8081"
11
- pkill -f app1.py
12
- python app1.py &
13
- echo "not running"
14
- fi
15
-
16
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/image_dense_captions.py DELETED
@@ -1,69 +0,0 @@
1
- import argparse
2
- import multiprocessing as mp
3
- import os
4
- import time
5
- import cv2
6
- import tqdm
7
- import sys
8
-
9
- from detectron2.config import get_cfg
10
- from detectron2.data.detection_utils import read_image
11
- from detectron2.utils.logger import setup_logger
12
-
13
- sys.path.insert(0, 'models/grit_src/third_party/CenterNet2/projects/CenterNet2/')
14
- from centernet.config import add_centernet_config
15
- from models.grit_src.grit.config import add_grit_config
16
-
17
- from models.grit_src.grit.predictor import VisualizationDemo
18
- import json
19
- from utils.util import resize_long_edge_cv2
20
-
21
-
22
- # constants
23
- WINDOW_NAME = "GRiT"
24
-
25
-
26
- def dense_pred_to_caption(predictions):
27
- boxes = predictions["instances"].pred_boxes if predictions["instances"].has("pred_boxes") else None
28
- object_description = predictions["instances"].pred_object_descriptions.data
29
- new_caption = ""
30
- for i in range(len(object_description)):
31
- new_caption += (object_description[i] + ": " + str([int(a) for a in boxes[i].tensor.cpu().detach().numpy()[0]])) + "; "
32
- return new_caption
33
-
34
- def setup_cfg(args):
35
- cfg = get_cfg()
36
- if args["cpu"]:
37
- cfg.MODEL.DEVICE="cpu"
38
- add_centernet_config(cfg)
39
- add_grit_config(cfg)
40
- cfg.merge_from_file(args["config_file"])
41
- cfg.merge_from_list(args["opts"])
42
- # Set score_threshold for builtin models
43
- cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args["confidence_threshold"]
44
- cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args["confidence_threshold"]
45
- if args["test_task"]:
46
- cfg.MODEL.TEST_TASK = args["test_task"]
47
- cfg.MODEL.BEAM_SIZE = 1
48
- cfg.MODEL.ROI_HEADS.SOFT_NMS_ENABLED = False
49
- cfg.USE_ACT_CHECKPOINT = False
50
- cfg.freeze()
51
- return cfg
52
-
53
-
54
- def get_parser(device):
55
- arg_dict = {'config_file': "models/grit_src/configs/GRiT_B_DenseCap_ObjectDet.yaml", 'cpu': False, 'confidence_threshold': 0.5, 'test_task': 'DenseCap', 'opts': ["MODEL.WEIGHTS", "pretrained_models/grit_b_densecap_objectdet.pth"]}
56
- if device == "cpu":
57
- arg_dict["cpu"] = True
58
- return arg_dict
59
-
60
- def image_caption_api(image_src, device):
61
- args2 = get_parser(device)
62
- cfg = setup_cfg(args2)
63
- demo = VisualizationDemo(cfg)
64
- if image_src:
65
- img = read_image(image_src, format="BGR")
66
- img = resize_long_edge_cv2(img, 384)
67
- predictions, visualized_output = demo.run_on_image(img)
68
- new_caption = dense_pred_to_caption(predictions)
69
- return new_caption
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/cocoeval/cocoeval.cpp DELETED
@@ -1,507 +0,0 @@
1
- // Copyright (c) Facebook, Inc. and its affiliates.
2
- #include "cocoeval.h"
3
- #include <time.h>
4
- #include <algorithm>
5
- #include <cstdint>
6
- #include <numeric>
7
-
8
- using namespace pybind11::literals;
9
-
10
- namespace detectron2 {
11
-
12
- namespace COCOeval {
13
-
14
- // Sort detections from highest score to lowest, such that
15
- // detection_instances[detection_sorted_indices[t]] >=
16
- // detection_instances[detection_sorted_indices[t+1]]. Use stable_sort to match
17
- // original COCO API
18
- void SortInstancesByDetectionScore(
19
- const std::vector<InstanceAnnotation>& detection_instances,
20
- std::vector<uint64_t>* detection_sorted_indices) {
21
- detection_sorted_indices->resize(detection_instances.size());
22
- std::iota(
23
- detection_sorted_indices->begin(), detection_sorted_indices->end(), 0);
24
- std::stable_sort(
25
- detection_sorted_indices->begin(),
26
- detection_sorted_indices->end(),
27
- [&detection_instances](size_t j1, size_t j2) {
28
- return detection_instances[j1].score > detection_instances[j2].score;
29
- });
30
- }
31
-
32
- // Partition the ground truth objects based on whether or not to ignore them
33
- // based on area
34
- void SortInstancesByIgnore(
35
- const std::array<double, 2>& area_range,
36
- const std::vector<InstanceAnnotation>& ground_truth_instances,
37
- std::vector<uint64_t>* ground_truth_sorted_indices,
38
- std::vector<bool>* ignores) {
39
- ignores->clear();
40
- ignores->reserve(ground_truth_instances.size());
41
- for (auto o : ground_truth_instances) {
42
- ignores->push_back(
43
- o.ignore || o.area < area_range[0] || o.area > area_range[1]);
44
- }
45
-
46
- ground_truth_sorted_indices->resize(ground_truth_instances.size());
47
- std::iota(
48
- ground_truth_sorted_indices->begin(),
49
- ground_truth_sorted_indices->end(),
50
- 0);
51
- std::stable_sort(
52
- ground_truth_sorted_indices->begin(),
53
- ground_truth_sorted_indices->end(),
54
- [&ignores](size_t j1, size_t j2) {
55
- return (int)(*ignores)[j1] < (int)(*ignores)[j2];
56
- });
57
- }
58
-
59
- // For each IOU threshold, greedily match each detected instance to a ground
60
- // truth instance (if possible) and store the results
61
- void MatchDetectionsToGroundTruth(
62
- const std::vector<InstanceAnnotation>& detection_instances,
63
- const std::vector<uint64_t>& detection_sorted_indices,
64
- const std::vector<InstanceAnnotation>& ground_truth_instances,
65
- const std::vector<uint64_t>& ground_truth_sorted_indices,
66
- const std::vector<bool>& ignores,
67
- const std::vector<std::vector<double>>& ious,
68
- const std::vector<double>& iou_thresholds,
69
- const std::array<double, 2>& area_range,
70
- ImageEvaluation* results) {
71
- // Initialize memory to store return data matches and ignore
72
- const int num_iou_thresholds = iou_thresholds.size();
73
- const int num_ground_truth = ground_truth_sorted_indices.size();
74
- const int num_detections = detection_sorted_indices.size();
75
- std::vector<uint64_t> ground_truth_matches(
76
- num_iou_thresholds * num_ground_truth, 0);
77
- std::vector<uint64_t>& detection_matches = results->detection_matches;
78
- std::vector<bool>& detection_ignores = results->detection_ignores;
79
- std::vector<bool>& ground_truth_ignores = results->ground_truth_ignores;
80
- detection_matches.resize(num_iou_thresholds * num_detections, 0);
81
- detection_ignores.resize(num_iou_thresholds * num_detections, false);
82
- ground_truth_ignores.resize(num_ground_truth);
83
- for (auto g = 0; g < num_ground_truth; ++g) {
84
- ground_truth_ignores[g] = ignores[ground_truth_sorted_indices[g]];
85
- }
86
-
87
- for (auto t = 0; t < num_iou_thresholds; ++t) {
88
- for (auto d = 0; d < num_detections; ++d) {
89
- // information about best match so far (match=-1 -> unmatched)
90
- double best_iou = std::min(iou_thresholds[t], 1 - 1e-10);
91
- int match = -1;
92
- for (auto g = 0; g < num_ground_truth; ++g) {
93
- // if this ground truth instance is already matched and not a
94
- // crowd, it cannot be matched to another detection
95
- if (ground_truth_matches[t * num_ground_truth + g] > 0 &&
96
- !ground_truth_instances[ground_truth_sorted_indices[g]].is_crowd) {
97
- continue;
98
- }
99
-
100
- // if detected instance matched to a regular ground truth
101
- // instance, we can break on the first ground truth instance
102
- // tagged as ignore (because they are sorted by the ignore tag)
103
- if (match >= 0 && !ground_truth_ignores[match] &&
104
- ground_truth_ignores[g]) {
105
- break;
106
- }
107
-
108
- // if IOU overlap is the best so far, store the match appropriately
109
- if (ious[d][ground_truth_sorted_indices[g]] >= best_iou) {
110
- best_iou = ious[d][ground_truth_sorted_indices[g]];
111
- match = g;
112
- }
113
- }
114
- // if match was made, store id of match for both detection and
115
- // ground truth
116
- if (match >= 0) {
117
- detection_ignores[t * num_detections + d] = ground_truth_ignores[match];
118
- detection_matches[t * num_detections + d] =
119
- ground_truth_instances[ground_truth_sorted_indices[match]].id;
120
- ground_truth_matches[t * num_ground_truth + match] =
121
- detection_instances[detection_sorted_indices[d]].id;
122
- }
123
-
124
- // set unmatched detections outside of area range to ignore
125
- const InstanceAnnotation& detection =
126
- detection_instances[detection_sorted_indices[d]];
127
- detection_ignores[t * num_detections + d] =
128
- detection_ignores[t * num_detections + d] ||
129
- (detection_matches[t * num_detections + d] == 0 &&
130
- (detection.area < area_range[0] || detection.area > area_range[1]));
131
- }
132
- }
133
-
134
- // store detection score results
135
- results->detection_scores.resize(detection_sorted_indices.size());
136
- for (size_t d = 0; d < detection_sorted_indices.size(); ++d) {
137
- results->detection_scores[d] =
138
- detection_instances[detection_sorted_indices[d]].score;
139
- }
140
- }
141
-
142
- std::vector<ImageEvaluation> EvaluateImages(
143
- const std::vector<std::array<double, 2>>& area_ranges,
144
- int max_detections,
145
- const std::vector<double>& iou_thresholds,
146
- const ImageCategoryInstances<std::vector<double>>& image_category_ious,
147
- const ImageCategoryInstances<InstanceAnnotation>&
148
- image_category_ground_truth_instances,
149
- const ImageCategoryInstances<InstanceAnnotation>&
150
- image_category_detection_instances) {
151
- const int num_area_ranges = area_ranges.size();
152
- const int num_images = image_category_ground_truth_instances.size();
153
- const int num_categories =
154
- image_category_ious.size() > 0 ? image_category_ious[0].size() : 0;
155
- std::vector<uint64_t> detection_sorted_indices;
156
- std::vector<uint64_t> ground_truth_sorted_indices;
157
- std::vector<bool> ignores;
158
- std::vector<ImageEvaluation> results_all(
159
- num_images * num_area_ranges * num_categories);
160
-
161
- // Store results for each image, category, and area range combination. Results
162
- // for each IOU threshold are packed into the same ImageEvaluation object
163
- for (auto i = 0; i < num_images; ++i) {
164
- for (auto c = 0; c < num_categories; ++c) {
165
- const std::vector<InstanceAnnotation>& ground_truth_instances =
166
- image_category_ground_truth_instances[i][c];
167
- const std::vector<InstanceAnnotation>& detection_instances =
168
- image_category_detection_instances[i][c];
169
-
170
- SortInstancesByDetectionScore(
171
- detection_instances, &detection_sorted_indices);
172
- if ((int)detection_sorted_indices.size() > max_detections) {
173
- detection_sorted_indices.resize(max_detections);
174
- }
175
-
176
- for (size_t a = 0; a < area_ranges.size(); ++a) {
177
- SortInstancesByIgnore(
178
- area_ranges[a],
179
- ground_truth_instances,
180
- &ground_truth_sorted_indices,
181
- &ignores);
182
-
183
- MatchDetectionsToGroundTruth(
184
- detection_instances,
185
- detection_sorted_indices,
186
- ground_truth_instances,
187
- ground_truth_sorted_indices,
188
- ignores,
189
- image_category_ious[i][c],
190
- iou_thresholds,
191
- area_ranges[a],
192
- &results_all
193
- [c * num_area_ranges * num_images + a * num_images + i]);
194
- }
195
- }
196
- }
197
-
198
- return results_all;
199
- }
200
-
201
- // Convert a python list to a vector
202
- template <typename T>
203
- std::vector<T> list_to_vec(const py::list& l) {
204
- std::vector<T> v(py::len(l));
205
- for (int i = 0; i < (int)py::len(l); ++i) {
206
- v[i] = l[i].cast<T>();
207
- }
208
- return v;
209
- }
210
-
211
- // Helper function to Accumulate()
212
- // Considers the evaluation results applicable to a particular category, area
213
- // range, and max_detections parameter setting, which begin at
214
- // evaluations[evaluation_index]. Extracts a sorted list of length n of all
215
- // applicable detection instances concatenated across all images in the dataset,
216
- // which are represented by the outputs evaluation_indices, detection_scores,
217
- // image_detection_indices, and detection_sorted_indices--all of which are
218
- // length n. evaluation_indices[i] stores the applicable index into
219
- // evaluations[] for instance i, which has detection score detection_score[i],
220
- // and is the image_detection_indices[i]'th of the list of detections
221
- // for the image containing i. detection_sorted_indices[] defines a sorted
222
- // permutation of the 3 other outputs
223
- int BuildSortedDetectionList(
224
- const std::vector<ImageEvaluation>& evaluations,
225
- const int64_t evaluation_index,
226
- const int64_t num_images,
227
- const int max_detections,
228
- std::vector<uint64_t>* evaluation_indices,
229
- std::vector<double>* detection_scores,
230
- std::vector<uint64_t>* detection_sorted_indices,
231
- std::vector<uint64_t>* image_detection_indices) {
232
- assert(evaluations.size() >= evaluation_index + num_images);
233
-
234
- // Extract a list of object instances of the applicable category, area
235
- // range, and max detections requirements such that they can be sorted
236
- image_detection_indices->clear();
237
- evaluation_indices->clear();
238
- detection_scores->clear();
239
- image_detection_indices->reserve(num_images * max_detections);
240
- evaluation_indices->reserve(num_images * max_detections);
241
- detection_scores->reserve(num_images * max_detections);
242
- int num_valid_ground_truth = 0;
243
- for (auto i = 0; i < num_images; ++i) {
244
- const ImageEvaluation& evaluation = evaluations[evaluation_index + i];
245
-
246
- for (int d = 0;
247
- d < (int)evaluation.detection_scores.size() && d < max_detections;
248
- ++d) { // detected instances
249
- evaluation_indices->push_back(evaluation_index + i);
250
- image_detection_indices->push_back(d);
251
- detection_scores->push_back(evaluation.detection_scores[d]);
252
- }
253
- for (auto ground_truth_ignore : evaluation.ground_truth_ignores) {
254
- if (!ground_truth_ignore) {
255
- ++num_valid_ground_truth;
256
- }
257
- }
258
- }
259
-
260
- // Sort detections by decreasing score, using stable sort to match
261
- // python implementation
262
- detection_sorted_indices->resize(detection_scores->size());
263
- std::iota(
264
- detection_sorted_indices->begin(), detection_sorted_indices->end(), 0);
265
- std::stable_sort(
266
- detection_sorted_indices->begin(),
267
- detection_sorted_indices->end(),
268
- [&detection_scores](size_t j1, size_t j2) {
269
- return (*detection_scores)[j1] > (*detection_scores)[j2];
270
- });
271
-
272
- return num_valid_ground_truth;
273
- }
274
-
275
- // Helper function to Accumulate()
276
- // Compute a precision recall curve given a sorted list of detected instances
277
- // encoded in evaluations, evaluation_indices, detection_scores,
278
- // detection_sorted_indices, image_detection_indices (see
279
- // BuildSortedDetectionList()). Using vectors precisions and recalls
280
- // and temporary storage, output the results into precisions_out, recalls_out,
281
- // and scores_out, which are large buffers containing many precion/recall curves
282
- // for all possible parameter settings, with precisions_out_index and
283
- // recalls_out_index defining the applicable indices to store results.
284
- void ComputePrecisionRecallCurve(
285
- const int64_t precisions_out_index,
286
- const int64_t precisions_out_stride,
287
- const int64_t recalls_out_index,
288
- const std::vector<double>& recall_thresholds,
289
- const int iou_threshold_index,
290
- const int num_iou_thresholds,
291
- const int num_valid_ground_truth,
292
- const std::vector<ImageEvaluation>& evaluations,
293
- const std::vector<uint64_t>& evaluation_indices,
294
- const std::vector<double>& detection_scores,
295
- const std::vector<uint64_t>& detection_sorted_indices,
296
- const std::vector<uint64_t>& image_detection_indices,
297
- std::vector<double>* precisions,
298
- std::vector<double>* recalls,
299
- std::vector<double>* precisions_out,
300
- std::vector<double>* scores_out,
301
- std::vector<double>* recalls_out) {
302
- assert(recalls_out->size() > recalls_out_index);
303
-
304
- // Compute precision/recall for each instance in the sorted list of detections
305
- int64_t true_positives_sum = 0, false_positives_sum = 0;
306
- precisions->clear();
307
- recalls->clear();
308
- precisions->reserve(detection_sorted_indices.size());
309
- recalls->reserve(detection_sorted_indices.size());
310
- assert(!evaluations.empty() || detection_sorted_indices.empty());
311
- for (auto detection_sorted_index : detection_sorted_indices) {
312
- const ImageEvaluation& evaluation =
313
- evaluations[evaluation_indices[detection_sorted_index]];
314
- const auto num_detections =
315
- evaluation.detection_matches.size() / num_iou_thresholds;
316
- const auto detection_index = iou_threshold_index * num_detections +
317
- image_detection_indices[detection_sorted_index];
318
- assert(evaluation.detection_matches.size() > detection_index);
319
- assert(evaluation.detection_ignores.size() > detection_index);
320
- const int64_t detection_match =
321
- evaluation.detection_matches[detection_index];
322
- const bool detection_ignores =
323
- evaluation.detection_ignores[detection_index];
324
- const auto true_positive = detection_match > 0 && !detection_ignores;
325
- const auto false_positive = detection_match == 0 && !detection_ignores;
326
- if (true_positive) {
327
- ++true_positives_sum;
328
- }
329
- if (false_positive) {
330
- ++false_positives_sum;
331
- }
332
-
333
- const double recall =
334
- static_cast<double>(true_positives_sum) / num_valid_ground_truth;
335
- recalls->push_back(recall);
336
- const int64_t num_valid_detections =
337
- true_positives_sum + false_positives_sum;
338
- const double precision = num_valid_detections > 0
339
- ? static_cast<double>(true_positives_sum) / num_valid_detections
340
- : 0.0;
341
- precisions->push_back(precision);
342
- }
343
-
344
- (*recalls_out)[recalls_out_index] = !recalls->empty() ? recalls->back() : 0;
345
-
346
- for (int64_t i = static_cast<int64_t>(precisions->size()) - 1; i > 0; --i) {
347
- if ((*precisions)[i] > (*precisions)[i - 1]) {
348
- (*precisions)[i - 1] = (*precisions)[i];
349
- }
350
- }
351
-
352
- // Sample the per instance precision/recall list at each recall threshold
353
- for (size_t r = 0; r < recall_thresholds.size(); ++r) {
354
- // first index in recalls >= recall_thresholds[r]
355
- std::vector<double>::iterator low = std::lower_bound(
356
- recalls->begin(), recalls->end(), recall_thresholds[r]);
357
- size_t precisions_index = low - recalls->begin();
358
-
359
- const auto results_ind = precisions_out_index + r * precisions_out_stride;
360
- assert(results_ind < precisions_out->size());
361
- assert(results_ind < scores_out->size());
362
- if (precisions_index < precisions->size()) {
363
- (*precisions_out)[results_ind] = (*precisions)[precisions_index];
364
- (*scores_out)[results_ind] =
365
- detection_scores[detection_sorted_indices[precisions_index]];
366
- } else {
367
- (*precisions_out)[results_ind] = 0;
368
- (*scores_out)[results_ind] = 0;
369
- }
370
- }
371
- }
372
- py::dict Accumulate(
373
- const py::object& params,
374
- const std::vector<ImageEvaluation>& evaluations) {
375
- const std::vector<double> recall_thresholds =
376
- list_to_vec<double>(params.attr("recThrs"));
377
- const std::vector<int> max_detections =
378
- list_to_vec<int>(params.attr("maxDets"));
379
- const int num_iou_thresholds = py::len(params.attr("iouThrs"));
380
- const int num_recall_thresholds = py::len(params.attr("recThrs"));
381
- const int num_categories = params.attr("useCats").cast<int>() == 1
382
- ? py::len(params.attr("catIds"))
383
- : 1;
384
- const int num_area_ranges = py::len(params.attr("areaRng"));
385
- const int num_max_detections = py::len(params.attr("maxDets"));
386
- const int num_images = py::len(params.attr("imgIds"));
387
-
388
- std::vector<double> precisions_out(
389
- num_iou_thresholds * num_recall_thresholds * num_categories *
390
- num_area_ranges * num_max_detections,
391
- -1);
392
- std::vector<double> recalls_out(
393
- num_iou_thresholds * num_categories * num_area_ranges *
394
- num_max_detections,
395
- -1);
396
- std::vector<double> scores_out(
397
- num_iou_thresholds * num_recall_thresholds * num_categories *
398
- num_area_ranges * num_max_detections,
399
- -1);
400
-
401
- // Consider the list of all detected instances in the entire dataset in one
402
- // large list. evaluation_indices, detection_scores,
403
- // image_detection_indices, and detection_sorted_indices all have the same
404
- // length as this list, such that each entry corresponds to one detected
405
- // instance
406
- std::vector<uint64_t> evaluation_indices; // indices into evaluations[]
407
- std::vector<double> detection_scores; // detection scores of each instance
408
- std::vector<uint64_t> detection_sorted_indices; // sorted indices of all
409
- // instances in the dataset
410
- std::vector<uint64_t>
411
- image_detection_indices; // indices into the list of detected instances in
412
- // the same image as each instance
413
- std::vector<double> precisions, recalls;
414
-
415
- for (auto c = 0; c < num_categories; ++c) {
416
- for (auto a = 0; a < num_area_ranges; ++a) {
417
- for (auto m = 0; m < num_max_detections; ++m) {
418
- // The COCO PythonAPI assumes evaluations[] (the return value of
419
- // COCOeval::EvaluateImages() is one long list storing results for each
420
- // combination of category, area range, and image id, with categories in
421
- // the outermost loop and images in the innermost loop.
422
- const int64_t evaluations_index =
423
- c * num_area_ranges * num_images + a * num_images;
424
- int num_valid_ground_truth = BuildSortedDetectionList(
425
- evaluations,
426
- evaluations_index,
427
- num_images,
428
- max_detections[m],
429
- &evaluation_indices,
430
- &detection_scores,
431
- &detection_sorted_indices,
432
- &image_detection_indices);
433
-
434
- if (num_valid_ground_truth == 0) {
435
- continue;
436
- }
437
-
438
- for (auto t = 0; t < num_iou_thresholds; ++t) {
439
- // recalls_out is a flattened vectors representing a
440
- // num_iou_thresholds X num_categories X num_area_ranges X
441
- // num_max_detections matrix
442
- const int64_t recalls_out_index =
443
- t * num_categories * num_area_ranges * num_max_detections +
444
- c * num_area_ranges * num_max_detections +
445
- a * num_max_detections + m;
446
-
447
- // precisions_out and scores_out are flattened vectors
448
- // representing a num_iou_thresholds X num_recall_thresholds X
449
- // num_categories X num_area_ranges X num_max_detections matrix
450
- const int64_t precisions_out_stride =
451
- num_categories * num_area_ranges * num_max_detections;
452
- const int64_t precisions_out_index = t * num_recall_thresholds *
453
- num_categories * num_area_ranges * num_max_detections +
454
- c * num_area_ranges * num_max_detections +
455
- a * num_max_detections + m;
456
-
457
- ComputePrecisionRecallCurve(
458
- precisions_out_index,
459
- precisions_out_stride,
460
- recalls_out_index,
461
- recall_thresholds,
462
- t,
463
- num_iou_thresholds,
464
- num_valid_ground_truth,
465
- evaluations,
466
- evaluation_indices,
467
- detection_scores,
468
- detection_sorted_indices,
469
- image_detection_indices,
470
- &precisions,
471
- &recalls,
472
- &precisions_out,
473
- &scores_out,
474
- &recalls_out);
475
- }
476
- }
477
- }
478
- }
479
-
480
- time_t rawtime;
481
- struct tm local_time;
482
- std::array<char, 200> buffer;
483
- time(&rawtime);
484
- #ifdef _WIN32
485
- localtime_s(&local_time, &rawtime);
486
- #else
487
- localtime_r(&rawtime, &local_time);
488
- #endif
489
- strftime(
490
- buffer.data(), 200, "%Y-%m-%d %H:%num_max_detections:%S", &local_time);
491
- return py::dict(
492
- "params"_a = params,
493
- "counts"_a = std::vector<int64_t>(
494
- {num_iou_thresholds,
495
- num_recall_thresholds,
496
- num_categories,
497
- num_area_ranges,
498
- num_max_detections}),
499
- "date"_a = buffer,
500
- "precision"_a = precisions_out,
501
- "recall"_a = recalls_out,
502
- "scores"_a = scores_out);
503
- }
504
-
505
- } // namespace COCOeval
506
-
507
- } // namespace detectron2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Blackjack 21 Blackjackist Descargar.md DELETED
@@ -1,93 +0,0 @@
1
-
2
- <h1>Blackjack 21 Blackjackist: Una revisión del popular juego de casino</h1>
3
- <p>Si estás buscando una forma divertida y emocionante de jugar al blackjack online, deberías echar un vistazo a Blackjack 21 Blackjackist. Este es un juego de casino gratuito que le ofrece la oportunidad de jugar al blackjack con millones de jugadores de todo el mundo. Puedes disfrutar de gráficos realistas en 3D, chatear con otros jugadores, obtener fichas gratis todos los días y aprender a jugar y ganar en el blackjack. En este artículo, revisaremos las características, beneficios, reglas y estrategias de Blackjack 21 Blackjackist. También te mostraremos cómo descargar y jugar el juego en tu dispositivo. Si usted es un principiante o un profesional, usted encontrará algo para amar acerca de este juego. </p>
4
- <h2>blackjack 21 blackjackist descargar</h2><br /><p><b><b>Download Zip</b> &#9675;&#9675;&#9675; <a href="https://bltlly.com/2v6Kvx">https://bltlly.com/2v6Kvx</a></b></p><br /><br />
5
- <h2>¿Qué es Blackjack 21 Blackjackist? </h2>
6
- <p>Blackjack 21 Blackjackist es un juego de casino desarrollado por KamaGames, un operador de casino social líder. El juego está disponible para Android, iOS, Windows, Mac y Facebook. Puedes descargarlo gratis desde la Google Play Store, la App Store o el sitio web oficial. También puedes reproducirlo en Facebook o en tu navegador. El juego tiene más de 10 millones de descargas y una calificación de 4.5 estrellas tanto en Google Play y App Store.</p>
7
- <h3>Las características y beneficios del juego</h3>
8
- <p>Blackjack 21 Blackjackist tiene muchas características y beneficios que lo convierten en uno de los mejores juegos de blackjack en línea. Estos son algunos de ellos:</p>
9
- <ul>
10
- <li><strong>Fichas gratis</strong>: Puedes jugar el juego todos los días y obtener fichas gratis. También puedes obtener más fichas completando misiones diarias, desbloqueando logros, invitando a amigos o comprándolos con dinero real. </li>
11
- <li><strong>Recompensas</strong>: Usted puede subir las apuestas, ganar juegos contra el distribuidor, tomar riesgos para llegar a 21, y ganar recompensas. También puedes participar en torneos y ganar trofeos y premios. </li>
12
-
13
- <li><strong>Chatea con otros jugadores</strong>: Puedes divertirte aún más en las mesas del casino con la práctica mensajería instantánea en el juego y chatear con otros jugadores de blackjack. Puedes hacer nuevos amigos, compartir consejos o retarlos a un juego. </li>
14
- <li><strong>Fair hand dealing guaranteed</strong>: El juego utiliza un generador de números aleatorios (RNG) certificado que le brinda la mejor y más justa experiencia de blackjack. No tienes que preocuparte por hacer trampa o resultados amañados. </li>
15
- <li><strong>Aprende a jugar</strong>: Si eres nuevo en el blackjack pero siempre has querido probarlo, puedes usar el modo tutorial sencillo que te ayudará a dar los primeros pasos. Puedes aprender rápidamente todo lo que necesitas saber sobre el blackjack, desde las reglas del juego hasta las combinaciones ganadoras. </li>
16
- <li><strong>Gráficos 3D</strong>: El juego tiene gráficos 3D increíblemente realistas que crean una atmósfera de casino inmersiva. Puede elegir entre diferentes temas de mesa, barajas de cartas y fondos. </li>
17
- <li><strong>No hay registro</strong>: Puedes entrar directamente en la acción sin registrarte. Puede elegir el modo invitado para usar la aplicación de casino gratuita sin registrarse. </li>
18
- <li><strong>Cuenta individual</strong>: Puedes empezar a jugar blackjack gratis en tu smartphone, luego continuar en tu tablet sin perder progreso. Puede utilizar su cuenta para jugar a cualquiera de los otros juegos de casino en una aplicación. </li>
19
- <li><strong>Más que blackjack</strong>: Si quieres más que blackjack, puedes probar otros juegos para una experiencia 3D inolvidable. Usted puede jugar Texas Hold'em Poker, Ranuras , Ruleta, Baccarat, Dados, y más. Usted puede cambiar entre los juegos fácilmente y tener una explosión. </li>
20
- </ul>
21
- <h3>Las reglas y estrategias del blackjack</h3>
22
- <p>El blackjack es un juego de cartas en el que intentas vencer al crupier consiguiendo un valor de mano lo más cercano posible a 21, sin pasarte. El juego se juega con una o más barajas estándar de 52 cartas. Las cartas tienen los siguientes valores:</p>
23
- <ul>
24
-
25
- <li>Las cartas de la cara (Jack, Queen, King) valen 10. </li>
26
- <li>Las tarjetas numéricas valen su valor. </li>
27
- </ul>
28
- <p>El juego comienza con el repartidor repartiendo dos cartas a cada jugador y a sí mismos. Una de las cartas del repartidor está boca arriba y la otra boca abajo. Los jugadores pueden ver sus propias cartas y la carta boca arriba del repartidor. Los jugadores tienen que decidir qué hacer con sus manos. Tienen las siguientes opciones:</p>
29
- <p></p>
30
- <ul>
31
- <li><strong>Hit</strong>: Toma otra carta de la baraja. Puedes golpear tantas veces como quieras, pero si pasas de 21, te revientas y pierdes tu apuesta. </li>
32
- <li><strong>Stand</strong>: Mantén tu mano actual y termina tu turno. A continuaci��n, compara tu mano con la del dealer para ver quién gana. </li>
33
- <li><strong>Dobla hacia abajo</strong>: Dobla tu apuesta inicial y toma una carta más. Luego te paras con tu mano final. </li>
34
- <li><strong>Split</strong>: Si tienes dos cartas del mismo valor, puedes dividirlas en dos manos separadas y jugarlas independientemente. Tienes que hacer otra apuesta igual a tu apuesta original para la segunda mano. Puedes golpear o pararte en cada mano como siempre. </li>
35
- <li><strong>Rendirse</strong>: Si crees que tienes una mala mano, puedes rendirte y renunciar a la mitad de tu apuesta. A continuación, terminar su turno y perder la otra mitad de su apuesta. </li>
36
- <li><strong>Seguro</strong>: Si la carta boca arriba del repartidor es un as, puedes tomar un seguro, que es una apuesta lateral que paga 2:1 si el repartidor tiene un blackjack (una tarjeta de 10 valores y un as). Puedes apostar hasta la mitad de tu apuesta original al seguro. Si el crupier tiene blackjack, ganas la apuesta del seguro pero pierdes tu apuesta original. Si el dealer no tiene blackjack, pierdes la apuesta del seguro y continúas el juego como de costumbre. </li>
37
- </ul>
38
- <p>Después de que todos los jugadores hayan terminado sus turnos, el repartidor revela su carta boca abajo y juega su mano de acuerdo con las siguientes reglas:</p>
39
- <ul>
40
- <li> El distribuidor debe golpear hasta que su valor de la mano es 17 o superior. </li>
41
-
42
- <li>El distribuidor no debe dividir o doblar. </li>
43
- </ul>
44
- <p>El resultado del juego se determina comparando los valores finales de las manos de los jugadores y del repartidor. Los posibles resultados son:</p>
45
- <ul>
46
- <li>Si el jugador tiene un blackjack y el repartidor no, el jugador gana y se le paga 3:2 en su apuesta. </li>
47
- <li>Si tanto el jugador como el dealer tienen un blackjack, es un push y el jugador obtiene su apuesta de vuelta. </li>
48
- <li>Si el jugador tiene un valor de mano más alto que el repartidor sin pasar de 21, el jugador gana y se le paga 1:1 en su apuesta. </li>
49
- <li> Si tanto el jugador como el repartidor tienen el mismo valor de mano, es un push y el jugador obtiene su apuesta de nuevo. </li>
50
- <li>Si el jugador tiene un valor de mano más bajo que el del repartidor sin pasar de 21, o si el jugador pierde, el jugador pierde y pierde su apuesta. </li>
51
- <li> Si tanto el jugador como el crupier revientan, el jugador pierde y pierde su apuesta. </li>
52
- </ul>
53
- <p>Para aumentar tus posibilidades de ganar en el blackjack, necesitas usar algunas estrategias básicas que te digan qué hacer en diferentes situaciones. Por ejemplo, siempre debes dividir ases y ochos, nunca dividir dieces o cincos, doblar en 11 o 10 cuando el repartidor tiene una carta baja, golpear en 17 suave o más bajo, pararse en 17 duro o más alto, etc. Puede encontrar gráficos de estrategia más detallados en línea que le muestran cómo jugar cada mano posible contra cada carta de repartidor posible. </p>
54
- <h2>¿Cómo descargar y jugar Blackjack 21 Blackjackist? </h2>
55
- <p>Descargar y jugar Blackjack 21 Blackjackist es fácil y rápido. Solo tienes que seguir estos sencillos pasos:</p>
56
- <h3>Los pasos para descargar el juego en diferentes dispositivos</h3>
57
- <p>Dependiendo del dispositivo que quieras usar, puedes descargar el juego desde diferentes fuentes. Aquí están los enlaces e instrucciones para cada dispositivo:</p>
58
- <ul>
59
-
60
- <li><strong>iOS</strong>: Puede descargar el juego desde la App Store buscando "Blackjack 21 Blackjackist" o haciendo clic en este enlace. También puede escanear este código QR con su dispositivo para ir directamente a la página de descarga. Una vez que hayas descargado el juego, puedes abrirlo y empezar a jugar. </li>
61
- <li><strong>Windows</strong>: Puede descargar el juego desde el sitio web oficial haciendo clic en el botón "Descargar para Windows" o haciendo clic en este enlace. También puede escanear este código QR con su dispositivo para ir directamente a la página de descarga. Una vez que haya descargado el juego, puede ejecutar el instalador y seguir las instrucciones. Una vez completada la instalación, puede abrir el juego y comenzar a jugar. </li>
62
- <li><strong>Mac</strong>: Puede descargar el juego desde el sitio web oficial haciendo clic en el botón "Descargar para Mac" o haciendo clic en este enlace. También puede escanear este código QR con su dispositivo para ir directamente a la página de descarga. Una vez que haya descargado el juego, puede ejecutar el instalador y seguir las instrucciones. Una vez completada la instalación, puede abrir el juego y comenzar a jugar. </li>
63
- <li><strong>Facebook</strong>: Puedes jugar el juego en Facebook buscando "Blackjack 21 Blackjackist" o haciendo clic en este enlace. También puede escanear este código QR con su dispositivo para ir directamente a la página del juego. Una vez que hayas abierto el juego, puedes iniciar sesión con tu cuenta de Facebook y comenzar a jugar. </li>
64
- <li><strong>Browser</strong>: Puedes jugar el juego en tu navegador haciendo clic en el botón "Play Now" o haciendo clic en este enlace. También puede escanear este código QR con su dispositivo para ir directamente a la página del juego. Una vez que haya abierto el juego, puede iniciar sesión con su correo electrónico o cuenta de redes sociales y comenzar a jugar. </li>
65
- </ul>
66
- <h3>Los consejos y trucos para mejorar tus habilidades y ganar más fichas</h3>
67
-
68
- <ul>
69
- <li><strong>Practice</strong>: La mejor manera de mejorar en el blackjack es practicar todo lo que puedas. Puedes jugar en diferentes modos, como un solo jugador, multijugador o torneo, y probar diferentes estrategias y apuestas. También puedes usar el modo tutorial para aprender los fundamentos del blackjack y probar tus conocimientos. </li>
70
- <li><strong>Usa un gráfico de estrategia</strong>: Como mencionamos antes, usar un gráfico de estrategia te ayudará a tomar las mejores decisiones en cada situación. Puedes encontrar un gráfico de estrategia en línea o en el propio juego. También puedes personalizar tu propio gráfico de estrategia según tus preferencias y estilo. </li>
71
- <li><strong>Administra tu bankroll</strong>: Una de las habilidades más importantes en el blackjack es administrar tu bankroll sabiamente. Nunca debe apostar más de lo que puede permitirse perder, y siempre debe establecer un límite para usted. También debe variar sus apuestas según su situación y ventaja. Por ejemplo, debes apostar más cuando tienes una alta probabilidad de ganar, como cuando tienes un blackjack o un valor de mano alto, y apostar menos cuando tienes una baja probabilidad de ganar, como cuando el dealer tiene una carta alta o un as. También debe evitar apostar demasiado en el seguro, ya que generalmente es una mala apuesta. </li>
72
- <li><strong>Aprende de otros jugadores</strong>: Una de las ventajas de jugar Blackjack 21 Blackjackist es que puedes chatear e interactuar con otros jugadores. Puedes aprender de sus movimientos, errores y consejos. También puedes hacerles preguntas, compartir tus experiencias o retarlos a un juego. Puedes hacer nuevos amigos y divertirte mientras mejoras tus habilidades. </li>
73
- <li><strong>Diviértete</strong>: El consejo más importante de todos es divertirse jugando al blackjack. El blackjack es un juego de habilidad, suerte y estrategia, pero también es un juego de entretenimiento y diversión. No debe tomarlo demasiado en serio o frustrarse si pierde. Siempre debes recordar que es solo un juego y que el objetivo principal es divertirte. </li>
74
-
75
- <h2>Conclusión</h2>
76
- <p>Blackjack 21 Blackjackist es un gran juego de casino que te permite jugar blackjack en línea con millones de jugadores de todo el mundo. Puedes disfrutar de gráficos realistas en 3D, chatear con otros jugadores, obtener fichas gratis todos los días y aprender a jugar y ganar en el blackjack. Puede descargar y jugar el juego en su dispositivo de forma gratuita desde varias fuentes. También puede utilizar algunos consejos y trucos para mejorar sus habilidades y ganar más fichas. Blackjack 21 Blackjackist es un juego que te mantendrá entretenido y comprometido durante horas. </p>
77
- <p>Si estás listo para unirte a la comunidad de blackjack y divertirte, descarga Blackjack 21 Blackjackist hoy y empieza a jugar. ¡No te arrepentirás! </p>
78
- <h3>Preguntas frecuentes</h3>
79
- <p>Aquí hay algunas preguntas frecuentes sobre Blackjack 21 Blackjackist:</p>
80
- <ul>
81
- <li><strong>Q: ¿Cómo puedo obtener más fichas? </strong></li>
82
- <li><strong>A: Puedes obtener más fichas jugando el juego todos los días y recibiendo fichas gratis. También puedes obtener más fichas completando misiones diarias, desbloqueando logros, invitando a amigos o comprándolos con dinero real. </strong></li>
83
- <li><strong>Q: ¿Cómo puedo jugar con mis amigos? </strong></li>
84
- <li><strong>A: Puedes jugar con tus amigos invitándolos al juego a través de Facebook o correo electrónico. También puede unirse a sus tablas o crear sus propias tablas privadas. </strong></li>
85
- <li><strong>Q: ¿Cómo puedo cambiar mi avatar o apodo? </strong></li>
86
- <li><strong>A: Puedes cambiar tu avatar o apodo yendo a tu página de perfil y tocando el botón de edición. Puedes elegir entre diferentes avatares o subir tu propia foto. También puedes cambiar tu apodo escribiendo uno nuevo. </strong></li>
87
- <li><strong>Q: ¿Cómo me pongo en contacto con el servicio de atención al cliente? </strong></li>
88
-
89
- <li><strong>Q: ¿Cómo puedo eliminar mi cuenta? </strong></li>
90
- <li><strong>A: Puedes borrar tu cuenta yendo al menú de configuración y pulsando el botón de borrar cuenta. A continuación, se le pedirá que confirme su decisión. Una vez que elimine su cuenta, perderá todo su progreso, fichas y recompensas en el juego. </strong></li>
91
- </ul></p> 64aa2da5cf<br />
92
- <br />
93
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Clash Royale Bluestacks Apk.md DELETED
@@ -1,35 +0,0 @@
1
-
2
- <h1>Choque Royale Bluestacks APK: Cómo jugar Clash Royale en PC</h1>
3
- <p>¿Te encanta jugar a Clash Royale, el popular juego de estrategia en tiempo real de Supercell? ¿Te gustaría poder jugar en una pantalla más grande con mejores gráficos y rendimiento? Si es así, estás de suerte. En este artículo, le mostraremos cómo descargar e instalar Clash Royale Bluestacks APK en su PC, y cómo disfrutar de la mejor experiencia de juego con Bluestacks, la plataforma de juego móvil más popular del mundo para Windows y Mac.</p>
4
- <h2>clash royale bluestacks apk</h2><br /><p><b><b>DOWNLOAD</b> &mdash;&mdash;&mdash; <a href="https://bltlly.com/2v6KY0">https://bltlly.com/2v6KY0</a></b></p><br /><br />
5
- <h2>¿Qué es Clash Royale? </h2>
6
- <p>Clash Royale es un juego multijugador en línea donde te enfrentas a otros jugadores en duelos de ritmo rápido. Puedes elegir entre una variedad de personajes del universo Clash of Clans, como Gigantes, Reyes Bárbaros, Rompemuros, Arqueros y muchos más. También puedes recoger y mejorar cartas, construir tus propias barajas y unirte a clanes para compartir cartas y participar en guerras de clanes. </p>
7
- <p>Clash Royale es un juego que combina estrategia, habilidad y suerte. Tienes que desplegar tus tropas sabiamente, usar tus hechizos con eficacia y administrar tu elixir de manera eficiente. También tienes que adaptarte a diferentes escenarios, modos y desafíos. Clash Royale es un juego que nunca se vuelve aburrido, ya que siempre hay algo nuevo para descubrir y disfrutar. </p>
8
- <h2>¿Qué es Bluestacks? </h2>
9
- <p>Bluestacks es una plataforma de juegos móvil que te permite jugar juegos Android en tu PC o Mac. Es 100% seguro y de uso gratuito. Con Bluestacks, puedes acceder a millones de juegos de varios géneros, como RPG, estrategia, acción, rompecabezas, casual y más. También puedes jugar online o offline, dependiendo de tu preferencia. </p>
10
-
11
- <h2>Cómo descargar e instalar Clash Royale Bluestacks APK en PC</h2>
12
- <p>Si quieres jugar Clash Royale en tu PC con Bluestacks, debes seguir estos sencillos pasos:</p>
13
- <p></p>
14
- <h3>Paso 1: Descargar Bluestacks desde el sitio web oficial</h3>
15
- <p>Vaya a <a href="( 1 )">el sitio web oficial de Bluestacks</a> y haga clic en el botón "Descargar". Esto comenzará a descargar el archivo de instalación para Bluestacks 10 o Bluestacks 5, dependiendo de su elección. Ambas versiones son compatibles con Windows 7 o superior y Mac OS X 10.12 o superior. </p>
16
- <h3>Paso 2: Instalar Bluestacks en su PC</h3>
17
- <p>Una vez completada la descarga, abra el archivo de instalación y siga las instrucciones en la pantalla. El proceso de instalación puede tardar unos minutos, dependiendo de las especificaciones del sistema. Después de la instalación, verá un icono de acceso directo en el escritorio o en el menú de inicio de Bluestacks.</p>
18
- <h3>Paso 3: Inicie Bluestacks e inicie sesión con su cuenta de Google</h3>
19
- <p>Haga doble clic en el icono de Bluestacks para iniciar el reproductor de aplicaciones. Se le pedirá que inicie sesión con su cuenta de Google, que es necesaria para acceder a la Google Play Store y otros servicios de Google. Si no tienes una cuenta de Google, puedes crear una gratis. También puedes omitir este paso si quieres usar otras tiendas de aplicaciones o archivos APK. </p>
20
- <h3>Paso 4: Buscar Clash Royale en la tienda de aplicaciones Bluestacks o descargar el APK de Uptodown</h3>
21
- <p>Hay dos maneras de conseguir Clash Royale en Bluestacks. Una es buscarlo en la tienda de aplicaciones Bluestacks, que funciona con la Google Play Store. Puede encontrarlo escribiendo "Clash Royale" en la barra de búsqueda y haciendo clic en el botón "Instalar". La otra forma es descargar el archivo APK de un sitio web de terceros, como Uptodown. Puedes encontrarlo yendo a <a href=">el sitio web de Uptodown</a> y buscando "Clash Royale". A continuación, haga clic en el botón "Descargar" y guarde el archivo en su PC.</p>
22
- <h3>Paso 5: Instalar y abrir Clash Royale en Bluestacks</h3>
23
-
24
- <h2>Cómo jugar Clash Royale en PC con Bluestacks</h2>
25
- <p>Ahora que tienes Clash Royale en tu PC, puedes empezar a jugar con Bluestacks. Aquí hay algunos consejos y trucos para mejorar su experiencia de juego:</p>
26
- <h3>Personaliza tus controles de teclado y ratón para una jugabilidad óptima</h3>
27
- <p>Una de las mejores características de Bluestacks es que te permite personalizar tus controles de teclado y ratón para cualquier juego. Puede acceder a esta función haciendo clic en el icono "Teclado" en la esquina inferior derecha de la ventana Bluestacks. Esto abrirá un menú donde puede asignar teclas o botones del ratón a diferentes acciones, como desplegar tropas, usar hechizos, hacer zoom, etc. También puede usar mapas de teclas predefinidos o crear sus propios. Puede guardar sus ajustes y cambiar entre ellos en cualquier momento. </p>
28
- <h3>Disfruta de los gráficos Full HD y el buen rendimiento de Bluestacks</h3>
29
- <p>Otra gran característica de Bluestacks es que ofrece gráficos full HD y un rendimiento suave para cualquier juego. Puede ajustar la configuración de gráficos haciendo clic en el icono "Configuración" en la esquina superior derecha de la ventana Bluestacks. Esto abrirá un menú donde puede cambiar la resolución, la velocidad de fotogramas, el modo de visualización, DPI, etc. También puede habilitar o deshabilitar características como altas tasas de fotogramas, controles inteligentes, notificaciones de juegos, etc. También puede verificar los requisitos del sistema y la compatibilidad haciendo clic en el "Información del sistema" icono en el mismo menú. </p>
30
- <h3>Acceda a funciones exclusivas y recompensas de Bluestacks</h3>
31
-
32
- <h2>Conclusión</h2>
33
- <p>En conclusión, jugar Clash Royale en PC con Bluestacks es una gran manera de disfrutar de este increíble juego en una pantalla más grande con mejores gráficos y rendimiento. También puedes personalizar tus controles, acceder a funciones exclusivas y recompensas, y divertirte más con Bluestacks. Todo lo que necesita hacer es descargar e instalar Clash Royale Bluestacks APK en su PC siguiendo nuestros sencillos pasos anteriores. Entonces, ¿qué estás esperando? Comience a jugar Clash Royale en PC con Bluestacks hoy! </p> 64aa2da5cf<br />
34
- <br />
35
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BirdL/DONOTUSEDemo/app.py DELETED
@@ -1,34 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from random import randint
4
- import os
5
- import huggingface_hub
6
-
7
- tok = os.getenv('HF_TOKEN')
8
- huggingface_hub.login(tok)
9
-
10
- from huggingface_hub import HfApi
11
- from peft import PeftModel, PeftConfig
12
- from transformers import AutoModelForCausalLM, AutoTokenizer
13
-
14
- config = PeftConfig.from_pretrained("BirdL/DONOTUSEV5")
15
- model = AutoModelForCausalLM.from_pretrained("stabilityai/stablelm-3b-4e1t", token=tok, trust_remote_code=True)
16
- model = PeftModel.from_pretrained(model, "BirdL/DONOTUSEV5")
17
- tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t", token=tok)
18
-
19
- def response(message, history):
20
- batch = tokenizer(message, return_tensors='pt')
21
-
22
- with torch.cuda.amp.autocast():
23
- output_tokens = model.generate(**batch, max_new_tokens=20)
24
- output_tokens = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
25
- filename = (("file" + str(randint(0, 1000000)) + ".txt"))
26
- api = HfApi()
27
- api.upload_file(
28
- path_or_fileobj=("|Question:" + message + " |RespV2: " + output_tokens).encode('ascii') ,
29
- path_in_repo=(filename),
30
- repo_id="BirdL/Data",
31
- )
32
-
33
- return output_tokens
34
- gr.ChatInterface(response).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/vector.h DELETED
@@ -1,69 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file thrust/system/cpp/vector.h
18
- * \brief A dynamically-sizable array of elements which reside in memory available to
19
- * Thrust's standard C++ system.
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/system/cpp/memory.h>
26
- #include <thrust/detail/vector_base.h>
27
- #include <vector>
28
-
29
- namespace thrust
30
- {
31
-
32
- // forward declaration of host_vector
33
- template<typename T, typename Allocator> class host_vector;
34
-
35
- namespace system
36
- {
37
- namespace cpp
38
- {
39
-
40
- /*! \p cpp::vector is a container that supports random access to elements,
41
- * constant time removal of elements at the end, and linear time insertion
42
- * and removal of elements at the beginning or in the middle. The number of
43
- * elements in a \p cpp::vector may vary dynamically; memory management is
44
- * automatic. The elements contained in a \p cpp::vector reside in memory
45
- * available to the \p cpp system.
46
- *
47
- * \tparam T The element type of the \p cpp::vector.
48
- * \tparam Allocator The allocator type of the \p cpp::vector. Defaults to \p cpp::allocator.
49
- *
50
- * \see http://www.sgi.com/tech/stl/Vector.html
51
- * \see host_vector For the documentation of the complete interface which is
52
- * shared by \p cpp::vector
53
- * \see device_vector
54
- */
55
- template<typename T, typename Allocator = allocator<T> >
56
- using vector = thrust::detail::vector_base<T, Allocator>;
57
-
58
- } // end cpp
59
- } // end system
60
-
61
- // alias system::cpp names at top-level
62
- namespace cpp
63
- {
64
-
65
- using thrust::system::cpp::vector;
66
-
67
- } // end cpp
68
-
69
- } // end thrust
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/data/transforms/torchvision_transforms/transforms.py DELETED
@@ -1,1955 +0,0 @@
1
- import math
2
- import numbers
3
- import random
4
- import warnings
5
- from collections.abc import Sequence
6
- from typing import Tuple, List, Optional
7
-
8
- import torch
9
- from torch import Tensor
10
-
11
- try:
12
- import accimage
13
- except ImportError:
14
- accimage = None
15
-
16
- from . import functional as F
17
- from .functional import InterpolationMode, _interpolation_modes_from_int
18
-
19
-
20
- __all__ = ["Compose", "ToTensor", "PILToTensor", "ConvertImageDtype", "ToPILImage", "Normalize", "Resize", "Scale",
21
- "CenterCrop", "Pad", "Lambda", "RandomApply", "RandomChoice", "RandomOrder", "RandomCrop",
22
- "RandomHorizontalFlip", "RandomVerticalFlip", "RandomResizedCrop", "RandomSizedCrop", "FiveCrop", "TenCrop",
23
- "LinearTransformation", "ColorJitter", "RandomRotation", "RandomAffine", "Grayscale", "RandomGrayscale",
24
- "RandomPerspective", "RandomErasing", "GaussianBlur", "InterpolationMode", "RandomInvert", "RandomPosterize",
25
- "RandomSolarize", "RandomAdjustSharpness", "RandomAutocontrast", "RandomEqualize"]
26
-
27
-
28
- class Compose:
29
- """Composes several transforms together. This transform does not support torchscript.
30
- Please, see the note below.
31
-
32
- Args:
33
- transforms (list of ``Transform`` objects): list of transforms to compose.
34
-
35
- Example:
36
- >>> transforms.Compose([
37
- >>> transforms.CenterCrop(10),
38
- >>> transforms.ToTensor(),
39
- >>> ])
40
-
41
- .. note::
42
- In order to script the transformations, please use ``torch.nn.Sequential`` as below.
43
-
44
- >>> transforms = torch.nn.Sequential(
45
- >>> transforms.CenterCrop(10),
46
- >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
47
- >>> )
48
- >>> scripted_transforms = torch.jit.script(transforms)
49
-
50
- Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require
51
- `lambda` functions or ``PIL.Image``.
52
-
53
- """
54
-
55
- def __init__(self, transforms):
56
- self.transforms = transforms
57
-
58
- def __call__(self, img):
59
- for t in self.transforms:
60
- img = t(img)
61
- return img
62
-
63
- def __repr__(self):
64
- format_string = self.__class__.__name__ + '('
65
- for t in self.transforms:
66
- format_string += '\n'
67
- format_string += ' {0}'.format(t)
68
- format_string += '\n)'
69
- return format_string
70
-
71
-
72
- class ToTensor:
73
- """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. This transform does not support torchscript.
74
-
75
- Converts a PIL Image or numpy.ndarray (H x W x C) in the range
76
- [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
77
- if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
78
- or if the numpy.ndarray has dtype = np.uint8
79
-
80
- In the other cases, tensors are returned without scaling.
81
-
82
- .. note::
83
- Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
84
- transforming target image masks. See the `references`_ for implementing the transforms for image masks.
85
-
86
- .. _references: https://github.com/pytorch/vision/tree/master/references/segmentation
87
- """
88
-
89
- def __call__(self, pic):
90
- """
91
- Args:
92
- pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
93
-
94
- Returns:
95
- Tensor: Converted image.
96
- """
97
- return F.to_tensor(pic)
98
-
99
- def __repr__(self):
100
- return self.__class__.__name__ + '()'
101
-
102
-
103
- class PILToTensor:
104
- """Convert a ``PIL Image`` to a tensor of the same type. This transform does not support torchscript.
105
-
106
- Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
107
- """
108
-
109
- def __call__(self, pic):
110
- """
111
- Args:
112
- pic (PIL Image): Image to be converted to tensor.
113
-
114
- Returns:
115
- Tensor: Converted image.
116
- """
117
- return F.pil_to_tensor(pic)
118
-
119
- def __repr__(self):
120
- return self.__class__.__name__ + '()'
121
-
122
-
123
- class ConvertImageDtype(torch.nn.Module):
124
- """Convert a tensor image to the given ``dtype`` and scale the values accordingly
125
- This function does not support PIL Image.
126
-
127
- Args:
128
- dtype (torch.dtype): Desired data type of the output
129
-
130
- .. note::
131
-
132
- When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
133
- If converted back and forth, this mismatch has no effect.
134
-
135
- Raises:
136
- RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
137
- well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
138
- overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
139
- of the integer ``dtype``.
140
- """
141
-
142
- def __init__(self, dtype: torch.dtype) -> None:
143
- super().__init__()
144
- self.dtype = dtype
145
-
146
- def forward(self, image):
147
- return F.convert_image_dtype(image, self.dtype)
148
-
149
-
150
- class ToPILImage:
151
- """Convert a tensor or an ndarray to PIL Image. This transform does not support torchscript.
152
-
153
- Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
154
- H x W x C to a PIL Image while preserving the value range.
155
-
156
- Args:
157
- mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
158
- If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
159
- - If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
160
- - If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
161
- - If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
162
- - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
163
- ``short``).
164
-
165
- .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
166
- """
167
- def __init__(self, mode=None):
168
- self.mode = mode
169
-
170
- def __call__(self, pic):
171
- """
172
- Args:
173
- pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
174
-
175
- Returns:
176
- PIL Image: Image converted to PIL Image.
177
-
178
- """
179
- return F.to_pil_image(pic, self.mode)
180
-
181
- def __repr__(self):
182
- format_string = self.__class__.__name__ + '('
183
- if self.mode is not None:
184
- format_string += 'mode={0}'.format(self.mode)
185
- format_string += ')'
186
- return format_string
187
-
188
-
189
- class Normalize(torch.nn.Module):
190
- """Normalize a tensor image with mean and standard deviation.
191
- This transform does not support PIL Image.
192
- Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``
193
- channels, this transform will normalize each channel of the input
194
- ``torch.*Tensor`` i.e.,
195
- ``output[channel] = (input[channel] - mean[channel]) / std[channel]``
196
-
197
- .. note::
198
- This transform acts out of place, i.e., it does not mutate the input tensor.
199
-
200
- Args:
201
- mean (sequence): Sequence of means for each channel.
202
- std (sequence): Sequence of standard deviations for each channel.
203
- inplace(bool,optional): Bool to make this operation in-place.
204
-
205
- """
206
-
207
- def __init__(self, mean, std, inplace=False):
208
- super().__init__()
209
- self.mean = mean
210
- self.std = std
211
- self.inplace = inplace
212
-
213
- def forward(self, tensor: Tensor) -> Tensor:
214
- """
215
- Args:
216
- tensor (Tensor): Tensor image to be normalized.
217
-
218
- Returns:
219
- Tensor: Normalized Tensor image.
220
- """
221
- return F.normalize(tensor, self.mean, self.std, self.inplace)
222
-
223
- def __repr__(self):
224
- return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
225
-
226
-
227
- class Resize(torch.nn.Module):
228
- """Resize the input image to the given size.
229
- If the image is torch Tensor, it is expected
230
- to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
231
-
232
- .. warning::
233
- The output image might be different depending on its type: when downsampling, the interpolation of PIL images
234
- and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences
235
- in the performance of a network. Therefore, it is preferable to train and serve a model with the same input
236
- types. See also below the ``antialias`` parameter, which can help making the output of PIL images and tensors
237
- closer.
238
-
239
- Args:
240
- size (sequence or int): Desired output size. If size is a sequence like
241
- (h, w), output size will be matched to this. If size is an int,
242
- smaller edge of the image will be matched to this number.
243
- i.e, if height > width, then image will be rescaled to
244
- (size * height / width, size).
245
-
246
- .. note::
247
- In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
248
- interpolation (InterpolationMode): Desired interpolation enum defined by
249
- :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
250
- If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and
251
- ``InterpolationMode.BICUBIC`` are supported.
252
- For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
253
- max_size (int, optional): The maximum allowed for the longer edge of
254
- the resized image: if the longer edge of the image is greater
255
- than ``max_size`` after being resized according to ``size``, then
256
- the image is resized again so that the longer edge is equal to
257
- ``max_size``. As a result, ``size`` might be overruled, i.e the
258
- smaller edge may be shorter than ``size``. This is only supported
259
- if ``size`` is an int (or a sequence of length 1 in torchscript
260
- mode).
261
- antialias (bool, optional): antialias flag. If ``img`` is PIL Image, the flag is ignored and anti-alias
262
- is always used. If ``img`` is Tensor, the flag is False by default and can be set to True for
263
- ``InterpolationMode.BILINEAR`` only mode. This can help making the output for PIL images and tensors
264
- closer.
265
-
266
- .. warning::
267
- There is no autodiff support for ``antialias=True`` option with input ``img`` as Tensor.
268
-
269
- """
270
-
271
- def __init__(self, size, interpolation=InterpolationMode.BILINEAR, max_size=None, antialias=None):
272
- super().__init__()
273
- if not isinstance(size, (int, Sequence)):
274
- raise TypeError("Size should be int or sequence. Got {}".format(type(size)))
275
- if isinstance(size, Sequence) and len(size) not in (1, 2):
276
- raise ValueError("If size is a sequence, it should have 1 or 2 values")
277
- self.size = size
278
- self.max_size = max_size
279
-
280
- # Backward compatibility with integer value
281
- if isinstance(interpolation, int):
282
- warnings.warn(
283
- "Argument interpolation should be of type InterpolationMode instead of int. "
284
- "Please, use InterpolationMode enum."
285
- )
286
- interpolation = _interpolation_modes_from_int(interpolation)
287
-
288
- self.interpolation = interpolation
289
- self.antialias = antialias
290
-
291
- def forward(self, img):
292
- """
293
- Args:
294
- img (PIL Image or Tensor): Image to be scaled.
295
-
296
- Returns:
297
- PIL Image or Tensor: Rescaled image.
298
- """
299
- return F.resize(img, self.size, self.interpolation, self.max_size, self.antialias)
300
-
301
- def __repr__(self):
302
- interpolate_str = self.interpolation.value
303
- return self.__class__.__name__ + '(size={0}, interpolation={1}, max_size={2}, antialias={3})'.format(
304
- self.size, interpolate_str, self.max_size, self.antialias)
305
-
306
-
307
- class Scale(Resize):
308
- """
309
- Note: This transform is deprecated in favor of Resize.
310
- """
311
- def __init__(self, *args, **kwargs):
312
- warnings.warn("The use of the transforms.Scale transform is deprecated, " +
313
- "please use transforms.Resize instead.")
314
- super(Scale, self).__init__(*args, **kwargs)
315
-
316
-
317
- class CenterCrop(torch.nn.Module):
318
- """Crops the given image at the center.
319
- If the image is torch Tensor, it is expected
320
- to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
321
- If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
322
-
323
- Args:
324
- size (sequence or int): Desired output size of the crop. If size is an
325
- int instead of sequence like (h, w), a square crop (size, size) is
326
- made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
327
- """
328
-
329
- def __init__(self, size):
330
- super().__init__()
331
- self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
332
-
333
- def forward(self, img):
334
- """
335
- Args:
336
- img (PIL Image or Tensor): Image to be cropped.
337
-
338
- Returns:
339
- PIL Image or Tensor: Cropped image.
340
- """
341
- return F.center_crop(img, self.size)
342
-
343
- def __repr__(self):
344
- return self.__class__.__name__ + '(size={0})'.format(self.size)
345
-
346
-
347
- class Pad(torch.nn.Module):
348
- """Pad the given image on all sides with the given "pad" value.
349
- If the image is torch Tensor, it is expected
350
- to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,
351
- at most 3 leading dimensions for mode edge,
352
- and an arbitrary number of leading dimensions for mode constant
353
-
354
- Args:
355
- padding (int or sequence): Padding on each border. If a single int is provided this
356
- is used to pad all borders. If sequence of length 2 is provided this is the padding
357
- on left/right and top/bottom respectively. If a sequence of length 4 is provided
358
- this is the padding for the left, top, right and bottom borders respectively.
359
-
360
- .. note::
361
- In torchscript mode padding as single int is not supported, use a sequence of
362
- length 1: ``[padding, ]``.
363
- fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
364
- length 3, it is used to fill R, G, B channels respectively.
365
- This value is only used when the padding_mode is constant.
366
- Only number is supported for torch Tensor.
367
- Only int or str or tuple value is supported for PIL Image.
368
- padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
369
- Default is constant.
370
-
371
- - constant: pads with a constant value, this value is specified with fill
372
-
373
- - edge: pads with the last value at the edge of the image.
374
- If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
375
-
376
- - reflect: pads with reflection of image without repeating the last value on the edge.
377
- For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
378
- will result in [3, 2, 1, 2, 3, 4, 3, 2]
379
-
380
- - symmetric: pads with reflection of image repeating the last value on the edge.
381
- For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
382
- will result in [2, 1, 1, 2, 3, 4, 4, 3]
383
- """
384
-
385
- def __init__(self, padding, fill=0, padding_mode="constant"):
386
- super().__init__()
387
- if not isinstance(padding, (numbers.Number, tuple, list)):
388
- raise TypeError("Got inappropriate padding arg")
389
-
390
- if not isinstance(fill, (numbers.Number, str, tuple)):
391
- raise TypeError("Got inappropriate fill arg")
392
-
393
- if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
394
- raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
395
-
396
- if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]:
397
- raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " +
398
- "{} element tuple".format(len(padding)))
399
-
400
- self.padding = padding
401
- self.fill = fill
402
- self.padding_mode = padding_mode
403
-
404
- def forward(self, img):
405
- """
406
- Args:
407
- img (PIL Image or Tensor): Image to be padded.
408
-
409
- Returns:
410
- PIL Image or Tensor: Padded image.
411
- """
412
- return F.pad(img, self.padding, self.fill, self.padding_mode)
413
-
414
- def __repr__(self):
415
- return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\
416
- format(self.padding, self.fill, self.padding_mode)
417
-
418
-
419
- class Lambda:
420
- """Apply a user-defined lambda as a transform. This transform does not support torchscript.
421
-
422
- Args:
423
- lambd (function): Lambda/function to be used for transform.
424
- """
425
-
426
- def __init__(self, lambd):
427
- if not callable(lambd):
428
- raise TypeError("Argument lambd should be callable, got {}".format(repr(type(lambd).__name__)))
429
- self.lambd = lambd
430
-
431
- def __call__(self, img):
432
- return self.lambd(img)
433
-
434
- def __repr__(self):
435
- return self.__class__.__name__ + '()'
436
-
437
-
438
- class RandomTransforms:
439
- """Base class for a list of transformations with randomness
440
-
441
- Args:
442
- transforms (sequence): list of transformations
443
- """
444
-
445
- def __init__(self, transforms):
446
- if not isinstance(transforms, Sequence):
447
- raise TypeError("Argument transforms should be a sequence")
448
- self.transforms = transforms
449
-
450
- def __call__(self, *args, **kwargs):
451
- raise NotImplementedError()
452
-
453
- def __repr__(self):
454
- format_string = self.__class__.__name__ + '('
455
- for t in self.transforms:
456
- format_string += '\n'
457
- format_string += ' {0}'.format(t)
458
- format_string += '\n)'
459
- return format_string
460
-
461
-
462
- class RandomApply(torch.nn.Module):
463
- """Apply randomly a list of transformations with a given probability.
464
-
465
- .. note::
466
- In order to script the transformation, please use ``torch.nn.ModuleList`` as input instead of list/tuple of
467
- transforms as shown below:
468
-
469
- >>> transforms = transforms.RandomApply(torch.nn.ModuleList([
470
- >>> transforms.ColorJitter(),
471
- >>> ]), p=0.3)
472
- >>> scripted_transforms = torch.jit.script(transforms)
473
-
474
- Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require
475
- `lambda` functions or ``PIL.Image``.
476
-
477
- Args:
478
- transforms (sequence or torch.nn.Module): list of transformations
479
- p (float): probability
480
- """
481
-
482
- def __init__(self, transforms, p=0.5):
483
- super().__init__()
484
- self.transforms = transforms
485
- self.p = p
486
-
487
- def forward(self, img):
488
- if self.p < torch.rand(1):
489
- return img
490
- for t in self.transforms:
491
- img = t(img)
492
- return img
493
-
494
- def __repr__(self):
495
- format_string = self.__class__.__name__ + '('
496
- format_string += '\n p={}'.format(self.p)
497
- for t in self.transforms:
498
- format_string += '\n'
499
- format_string += ' {0}'.format(t)
500
- format_string += '\n)'
501
- return format_string
502
-
503
-
504
- class RandomOrder(RandomTransforms):
505
- """Apply a list of transformations in a random order. This transform does not support torchscript.
506
- """
507
- def __call__(self, img):
508
- order = list(range(len(self.transforms)))
509
- random.shuffle(order)
510
- for i in order:
511
- img = self.transforms[i](img)
512
- return img
513
-
514
-
515
- class RandomChoice(RandomTransforms):
516
- """Apply single transformation randomly picked from a list. This transform does not support torchscript.
517
- """
518
- def __call__(self, img):
519
- t = random.choice(self.transforms)
520
- return t(img)
521
-
522
-
523
- class RandomCrop(torch.nn.Module):
524
- """Crop the given image at a random location.
525
- If the image is torch Tensor, it is expected
526
- to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions,
527
- but if non-constant padding is used, the input is expected to have at most 2 leading dimensions
528
-
529
- Args:
530
- size (sequence or int): Desired output size of the crop. If size is an
531
- int instead of sequence like (h, w), a square crop (size, size) is
532
- made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
533
- padding (int or sequence, optional): Optional padding on each border
534
- of the image. Default is None. If a single int is provided this
535
- is used to pad all borders. If sequence of length 2 is provided this is the padding
536
- on left/right and top/bottom respectively. If a sequence of length 4 is provided
537
- this is the padding for the left, top, right and bottom borders respectively.
538
-
539
- .. note::
540
- In torchscript mode padding as single int is not supported, use a sequence of
541
- length 1: ``[padding, ]``.
542
- pad_if_needed (boolean): It will pad the image if smaller than the
543
- desired size to avoid raising an exception. Since cropping is done
544
- after padding, the padding seems to be done at a random offset.
545
- fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
546
- length 3, it is used to fill R, G, B channels respectively.
547
- This value is only used when the padding_mode is constant.
548
- Only number is supported for torch Tensor.
549
- Only int or str or tuple value is supported for PIL Image.
550
- padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
551
- Default is constant.
552
-
553
- - constant: pads with a constant value, this value is specified with fill
554
-
555
- - edge: pads with the last value at the edge of the image.
556
- If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
557
-
558
- - reflect: pads with reflection of image without repeating the last value on the edge.
559
- For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
560
- will result in [3, 2, 1, 2, 3, 4, 3, 2]
561
-
562
- - symmetric: pads with reflection of image repeating the last value on the edge.
563
- For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
564
- will result in [2, 1, 1, 2, 3, 4, 4, 3]
565
- """
566
-
567
- @staticmethod
568
- def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]:
569
- """Get parameters for ``crop`` for a random crop.
570
-
571
- Args:
572
- img (PIL Image or Tensor): Image to be cropped.
573
- output_size (tuple): Expected output size of the crop.
574
-
575
- Returns:
576
- tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
577
- """
578
- w, h = F._get_image_size(img)
579
- th, tw = output_size
580
-
581
- if h + 1 < th or w + 1 < tw:
582
- raise ValueError(
583
- "Required crop size {} is larger then input image size {}".format((th, tw), (h, w))
584
- )
585
-
586
- if w == tw and h == th:
587
- return 0, 0, h, w
588
-
589
- i = torch.randint(0, h - th + 1, size=(1, )).item()
590
- j = torch.randint(0, w - tw + 1, size=(1, )).item()
591
- return i, j, th, tw
592
-
593
- def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode="constant"):
594
- super().__init__()
595
-
596
- self.size = tuple(_setup_size(
597
- size, error_msg="Please provide only two dimensions (h, w) for size."
598
- ))
599
-
600
- self.padding = padding
601
- self.pad_if_needed = pad_if_needed
602
- self.fill = fill
603
- self.padding_mode = padding_mode
604
-
605
- def forward(self, img):
606
- """
607
- Args:
608
- img (PIL Image or Tensor): Image to be cropped.
609
-
610
- Returns:
611
- PIL Image or Tensor: Cropped image.
612
- """
613
- if self.padding is not None:
614
- img = F.pad(img, self.padding, self.fill, self.padding_mode)
615
-
616
- width, height = F._get_image_size(img)
617
- # pad the width if needed
618
- if self.pad_if_needed and width < self.size[1]:
619
- padding = [self.size[1] - width, 0]
620
- img = F.pad(img, padding, self.fill, self.padding_mode)
621
- # pad the height if needed
622
- if self.pad_if_needed and height < self.size[0]:
623
- padding = [0, self.size[0] - height]
624
- img = F.pad(img, padding, self.fill, self.padding_mode)
625
-
626
- i, j, h, w = self.get_params(img, self.size)
627
-
628
- return F.crop(img, i, j, h, w)
629
-
630
- def __repr__(self):
631
- return self.__class__.__name__ + "(size={0}, padding={1})".format(self.size, self.padding)
632
-
633
-
634
- class RandomHorizontalFlip(torch.nn.Module):
635
- """Horizontally flip the given image randomly with a given probability.
636
- If the image is torch Tensor, it is expected
637
- to have [..., H, W] shape, where ... means an arbitrary number of leading
638
- dimensions
639
-
640
- Args:
641
- p (float): probability of the image being flipped. Default value is 0.5
642
- """
643
-
644
- def __init__(self, p=0.5):
645
- super().__init__()
646
- self.p = p
647
-
648
- def forward(self, img):
649
- """
650
- Args:
651
- img (PIL Image or Tensor): Image to be flipped.
652
-
653
- Returns:
654
- PIL Image or Tensor: Randomly flipped image.
655
- """
656
- if torch.rand(1) < self.p:
657
- return F.hflip(img)
658
- return img
659
-
660
- def __repr__(self):
661
- return self.__class__.__name__ + '(p={})'.format(self.p)
662
-
663
-
664
- class RandomVerticalFlip(torch.nn.Module):
665
- """Vertically flip the given image randomly with a given probability.
666
- If the image is torch Tensor, it is expected
667
- to have [..., H, W] shape, where ... means an arbitrary number of leading
668
- dimensions
669
-
670
- Args:
671
- p (float): probability of the image being flipped. Default value is 0.5
672
- """
673
-
674
- def __init__(self, p=0.5):
675
- super().__init__()
676
- self.p = p
677
-
678
- def forward(self, img):
679
- """
680
- Args:
681
- img (PIL Image or Tensor): Image to be flipped.
682
-
683
- Returns:
684
- PIL Image or Tensor: Randomly flipped image.
685
- """
686
- if torch.rand(1) < self.p:
687
- return F.vflip(img)
688
- return img
689
-
690
- def __repr__(self):
691
- return self.__class__.__name__ + '(p={})'.format(self.p)
692
-
693
-
694
- class RandomPerspective(torch.nn.Module):
695
- """Performs a random perspective transformation of the given image with a given probability.
696
- If the image is torch Tensor, it is expected
697
- to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
698
-
699
- Args:
700
- distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.
701
- Default is 0.5.
702
- p (float): probability of the image being transformed. Default is 0.5.
703
- interpolation (InterpolationMode): Desired interpolation enum defined by
704
- :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
705
- If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
706
- For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
707
- fill (sequence or number): Pixel fill value for the area outside the transformed
708
- image. Default is ``0``. If given a number, the value is used for all bands respectively.
709
- """
710
-
711
- def __init__(self, distortion_scale=0.5, p=0.5, interpolation=InterpolationMode.BILINEAR, fill=0):
712
- super().__init__()
713
- self.p = p
714
-
715
- # Backward compatibility with integer value
716
- if isinstance(interpolation, int):
717
- warnings.warn(
718
- "Argument interpolation should be of type InterpolationMode instead of int. "
719
- "Please, use InterpolationMode enum."
720
- )
721
- interpolation = _interpolation_modes_from_int(interpolation)
722
-
723
- self.interpolation = interpolation
724
- self.distortion_scale = distortion_scale
725
-
726
- if fill is None:
727
- fill = 0
728
- elif not isinstance(fill, (Sequence, numbers.Number)):
729
- raise TypeError("Fill should be either a sequence or a number.")
730
-
731
- self.fill = fill
732
-
733
- def forward(self, img):
734
- """
735
- Args:
736
- img (PIL Image or Tensor): Image to be Perspectively transformed.
737
-
738
- Returns:
739
- PIL Image or Tensor: Randomly transformed image.
740
- """
741
-
742
- fill = self.fill
743
- if isinstance(img, Tensor):
744
- if isinstance(fill, (int, float)):
745
- fill = [float(fill)] * F._get_image_num_channels(img)
746
- else:
747
- fill = [float(f) for f in fill]
748
-
749
- if torch.rand(1) < self.p:
750
- width, height = F._get_image_size(img)
751
- startpoints, endpoints = self.get_params(width, height, self.distortion_scale)
752
- return F.perspective(img, startpoints, endpoints, self.interpolation, fill)
753
- return img
754
-
755
- @staticmethod
756
- def get_params(width: int, height: int, distortion_scale: float) -> Tuple[List[List[int]], List[List[int]]]:
757
- """Get parameters for ``perspective`` for a random perspective transform.
758
-
759
- Args:
760
- width (int): width of the image.
761
- height (int): height of the image.
762
- distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.
763
-
764
- Returns:
765
- List containing [top-left, top-right, bottom-right, bottom-left] of the original image,
766
- List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.
767
- """
768
- half_height = height // 2
769
- half_width = width // 2
770
- topleft = [
771
- int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()),
772
- int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item())
773
- ]
774
- topright = [
775
- int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()),
776
- int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item())
777
- ]
778
- botright = [
779
- int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()),
780
- int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item())
781
- ]
782
- botleft = [
783
- int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()),
784
- int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item())
785
- ]
786
- startpoints = [[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]]
787
- endpoints = [topleft, topright, botright, botleft]
788
- return startpoints, endpoints
789
-
790
- def __repr__(self):
791
- return self.__class__.__name__ + '(p={})'.format(self.p)
792
-
793
-
794
- class RandomResizedCrop(torch.nn.Module):
795
- """Crop a random portion of image and resize it to a given size.
796
-
797
- If the image is torch Tensor, it is expected
798
- to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
799
-
800
- A crop of the original image is made: the crop has a random area (H * W)
801
- and a random aspect ratio. This crop is finally resized to the given
802
- size. This is popularly used to train the Inception networks.
803
-
804
- Args:
805
- size (int or sequence): expected output size of the crop, for each edge. If size is an
806
- int instead of sequence like (h, w), a square output size ``(size, size)`` is
807
- made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
808
-
809
- .. note::
810
- In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
811
- scale (tuple of float): Specifies the lower and upper bounds for the random area of the crop,
812
- before resizing. The scale is defined with respect to the area of the original image.
813
- ratio (tuple of float): lower and upper bounds for the random aspect ratio of the crop, before
814
- resizing.
815
- interpolation (InterpolationMode): Desired interpolation enum defined by
816
- :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
817
- If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and
818
- ``InterpolationMode.BICUBIC`` are supported.
819
- For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
820
-
821
- """
822
-
823
- def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=InterpolationMode.BILINEAR):
824
- super().__init__()
825
- self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
826
-
827
- if not isinstance(scale, Sequence):
828
- raise TypeError("Scale should be a sequence")
829
- if not isinstance(ratio, Sequence):
830
- raise TypeError("Ratio should be a sequence")
831
- if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
832
- warnings.warn("Scale and ratio should be of kind (min, max)")
833
-
834
- # Backward compatibility with integer value
835
- if isinstance(interpolation, int):
836
- warnings.warn(
837
- "Argument interpolation should be of type InterpolationMode instead of int. "
838
- "Please, use InterpolationMode enum."
839
- )
840
- interpolation = _interpolation_modes_from_int(interpolation)
841
-
842
- self.interpolation = interpolation
843
- self.scale = scale
844
- self.ratio = ratio
845
-
846
- @staticmethod
847
- def get_params(
848
- img: Tensor, scale: List[float], ratio: List[float]
849
- ) -> Tuple[int, int, int, int]:
850
- """Get parameters for ``crop`` for a random sized crop.
851
-
852
- Args:
853
- img (PIL Image or Tensor): Input image.
854
- scale (list): range of scale of the origin size cropped
855
- ratio (list): range of aspect ratio of the origin aspect ratio cropped
856
-
857
- Returns:
858
- tuple: params (i, j, h, w) to be passed to ``crop`` for a random
859
- sized crop.
860
- """
861
- width, height = F._get_image_size(img)
862
- area = height * width
863
-
864
- log_ratio = torch.log(torch.tensor(ratio))
865
- for _ in range(10):
866
- target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()
867
- aspect_ratio = torch.exp(
868
- torch.empty(1).uniform_(log_ratio[0], log_ratio[1])
869
- ).item()
870
-
871
- w = int(round(math.sqrt(target_area * aspect_ratio)))
872
- h = int(round(math.sqrt(target_area / aspect_ratio)))
873
-
874
- if 0 < w <= width and 0 < h <= height:
875
- i = torch.randint(0, height - h + 1, size=(1,)).item()
876
- j = torch.randint(0, width - w + 1, size=(1,)).item()
877
- return i, j, h, w
878
-
879
- # Fallback to central crop
880
- in_ratio = float(width) / float(height)
881
- if in_ratio < min(ratio):
882
- w = width
883
- h = int(round(w / min(ratio)))
884
- elif in_ratio > max(ratio):
885
- h = height
886
- w = int(round(h * max(ratio)))
887
- else: # whole image
888
- w = width
889
- h = height
890
- i = (height - h) // 2
891
- j = (width - w) // 2
892
- return i, j, h, w
893
-
894
- def forward(self, img):
895
- """
896
- Args:
897
- img (PIL Image or Tensor): Image to be cropped and resized.
898
-
899
- Returns:
900
- PIL Image or Tensor: Randomly cropped and resized image.
901
- """
902
- i, j, h, w = self.get_params(img, self.scale, self.ratio)
903
- return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)
904
-
905
- def __repr__(self):
906
- interpolate_str = self.interpolation.value
907
- format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
908
- format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
909
- format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
910
- format_string += ', interpolation={0})'.format(interpolate_str)
911
- return format_string
912
-
913
-
914
- class RandomSizedCrop(RandomResizedCrop):
915
- """
916
- Note: This transform is deprecated in favor of RandomResizedCrop.
917
- """
918
- def __init__(self, *args, **kwargs):
919
- warnings.warn("The use of the transforms.RandomSizedCrop transform is deprecated, " +
920
- "please use transforms.RandomResizedCrop instead.")
921
- super(RandomSizedCrop, self).__init__(*args, **kwargs)
922
-
923
-
924
- class FiveCrop(torch.nn.Module):
925
- """Crop the given image into four corners and the central crop.
926
- If the image is torch Tensor, it is expected
927
- to have [..., H, W] shape, where ... means an arbitrary number of leading
928
- dimensions
929
-
930
- .. Note::
931
- This transform returns a tuple of images and there may be a mismatch in the number of
932
- inputs and targets your Dataset returns. See below for an example of how to deal with
933
- this.
934
-
935
- Args:
936
- size (sequence or int): Desired output size of the crop. If size is an ``int``
937
- instead of sequence like (h, w), a square crop of size (size, size) is made.
938
- If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
939
-
940
- Example:
941
- >>> transform = Compose([
942
- >>> FiveCrop(size), # this is a list of PIL Images
943
- >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
944
- >>> ])
945
- >>> #In your test loop you can do the following:
946
- >>> input, target = batch # input is a 5d tensor, target is 2d
947
- >>> bs, ncrops, c, h, w = input.size()
948
- >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
949
- >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
950
- """
951
-
952
- def __init__(self, size):
953
- super().__init__()
954
- self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
955
-
956
- def forward(self, img):
957
- """
958
- Args:
959
- img (PIL Image or Tensor): Image to be cropped.
960
-
961
- Returns:
962
- tuple of 5 images. Image can be PIL Image or Tensor
963
- """
964
- return F.five_crop(img, self.size)
965
-
966
- def __repr__(self):
967
- return self.__class__.__name__ + '(size={0})'.format(self.size)
968
-
969
-
970
- class TenCrop(torch.nn.Module):
971
- """Crop the given image into four corners and the central crop plus the flipped version of
972
- these (horizontal flipping is used by default).
973
- If the image is torch Tensor, it is expected
974
- to have [..., H, W] shape, where ... means an arbitrary number of leading
975
- dimensions
976
-
977
- .. Note::
978
- This transform returns a tuple of images and there may be a mismatch in the number of
979
- inputs and targets your Dataset returns. See below for an example of how to deal with
980
- this.
981
-
982
- Args:
983
- size (sequence or int): Desired output size of the crop. If size is an
984
- int instead of sequence like (h, w), a square crop (size, size) is
985
- made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
986
- vertical_flip (bool): Use vertical flipping instead of horizontal
987
-
988
- Example:
989
- >>> transform = Compose([
990
- >>> TenCrop(size), # this is a list of PIL Images
991
- >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
992
- >>> ])
993
- >>> #In your test loop you can do the following:
994
- >>> input, target = batch # input is a 5d tensor, target is 2d
995
- >>> bs, ncrops, c, h, w = input.size()
996
- >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
997
- >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
998
- """
999
-
1000
- def __init__(self, size, vertical_flip=False):
1001
- super().__init__()
1002
- self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
1003
- self.vertical_flip = vertical_flip
1004
-
1005
- def forward(self, img):
1006
- """
1007
- Args:
1008
- img (PIL Image or Tensor): Image to be cropped.
1009
-
1010
- Returns:
1011
- tuple of 10 images. Image can be PIL Image or Tensor
1012
- """
1013
- return F.ten_crop(img, self.size, self.vertical_flip)
1014
-
1015
- def __repr__(self):
1016
- return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)
1017
-
1018
-
1019
- class LinearTransformation(torch.nn.Module):
1020
- """Transform a tensor image with a square transformation matrix and a mean_vector computed
1021
- offline.
1022
- This transform does not support PIL Image.
1023
- Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and
1024
- subtract mean_vector from it which is then followed by computing the dot
1025
- product with the transformation matrix and then reshaping the tensor to its
1026
- original shape.
1027
-
1028
- Applications:
1029
- whitening transformation: Suppose X is a column vector zero-centered data.
1030
- Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),
1031
- perform SVD on this matrix and pass it as transformation_matrix.
1032
-
1033
- Args:
1034
- transformation_matrix (Tensor): tensor [D x D], D = C x H x W
1035
- mean_vector (Tensor): tensor [D], D = C x H x W
1036
- """
1037
-
1038
- def __init__(self, transformation_matrix, mean_vector):
1039
- super().__init__()
1040
- if transformation_matrix.size(0) != transformation_matrix.size(1):
1041
- raise ValueError("transformation_matrix should be square. Got " +
1042
- "[{} x {}] rectangular matrix.".format(*transformation_matrix.size()))
1043
-
1044
- if mean_vector.size(0) != transformation_matrix.size(0):
1045
- raise ValueError("mean_vector should have the same length {}".format(mean_vector.size(0)) +
1046
- " as any one of the dimensions of the transformation_matrix [{}]"
1047
- .format(tuple(transformation_matrix.size())))
1048
-
1049
- if transformation_matrix.device != mean_vector.device:
1050
- raise ValueError("Input tensors should be on the same device. Got {} and {}"
1051
- .format(transformation_matrix.device, mean_vector.device))
1052
-
1053
- self.transformation_matrix = transformation_matrix
1054
- self.mean_vector = mean_vector
1055
-
1056
- def forward(self, tensor: Tensor) -> Tensor:
1057
- """
1058
- Args:
1059
- tensor (Tensor): Tensor image to be whitened.
1060
-
1061
- Returns:
1062
- Tensor: Transformed image.
1063
- """
1064
- shape = tensor.shape
1065
- n = shape[-3] * shape[-2] * shape[-1]
1066
- if n != self.transformation_matrix.shape[0]:
1067
- raise ValueError("Input tensor and transformation matrix have incompatible shape." +
1068
- "[{} x {} x {}] != ".format(shape[-3], shape[-2], shape[-1]) +
1069
- "{}".format(self.transformation_matrix.shape[0]))
1070
-
1071
- if tensor.device.type != self.mean_vector.device.type:
1072
- raise ValueError("Input tensor should be on the same device as transformation matrix and mean vector. "
1073
- "Got {} vs {}".format(tensor.device, self.mean_vector.device))
1074
-
1075
- flat_tensor = tensor.view(-1, n) - self.mean_vector
1076
- transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)
1077
- tensor = transformed_tensor.view(shape)
1078
- return tensor
1079
-
1080
- def __repr__(self):
1081
- format_string = self.__class__.__name__ + '(transformation_matrix='
1082
- format_string += (str(self.transformation_matrix.tolist()) + ')')
1083
- format_string += (", (mean_vector=" + str(self.mean_vector.tolist()) + ')')
1084
- return format_string
1085
-
1086
-
1087
- class ColorJitter(torch.nn.Module):
1088
- """Randomly change the brightness, contrast, saturation and hue of an image.
1089
- If the image is torch Tensor, it is expected
1090
- to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
1091
- If img is PIL Image, mode "1", "L", "I", "F" and modes with transparency (alpha channel) are not supported.
1092
-
1093
- Args:
1094
- brightness (float or tuple of float (min, max)): How much to jitter brightness.
1095
- brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
1096
- or the given [min, max]. Should be non negative numbers.
1097
- contrast (float or tuple of float (min, max)): How much to jitter contrast.
1098
- contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
1099
- or the given [min, max]. Should be non negative numbers.
1100
- saturation (float or tuple of float (min, max)): How much to jitter saturation.
1101
- saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
1102
- or the given [min, max]. Should be non negative numbers.
1103
- hue (float or tuple of float (min, max)): How much to jitter hue.
1104
- hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
1105
- Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
1106
- """
1107
-
1108
- def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
1109
- super().__init__()
1110
- self.brightness = self._check_input(brightness, 'brightness')
1111
- self.contrast = self._check_input(contrast, 'contrast')
1112
- self.saturation = self._check_input(saturation, 'saturation')
1113
- self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
1114
- clip_first_on_zero=False)
1115
-
1116
- @torch.jit.unused
1117
- def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
1118
- if isinstance(value, numbers.Number):
1119
- if value < 0:
1120
- raise ValueError("If {} is a single number, it must be non negative.".format(name))
1121
- value = [center - float(value), center + float(value)]
1122
- if clip_first_on_zero:
1123
- value[0] = max(value[0], 0.0)
1124
- elif isinstance(value, (tuple, list)) and len(value) == 2:
1125
- if not bound[0] <= value[0] <= value[1] <= bound[1]:
1126
- raise ValueError("{} values should be between {}".format(name, bound))
1127
- else:
1128
- raise TypeError("{} should be a single number or a list/tuple with length 2.".format(name))
1129
-
1130
- # if value is 0 or (1., 1.) for brightness/contrast/saturation
1131
- # or (0., 0.) for hue, do nothing
1132
- if value[0] == value[1] == center:
1133
- value = None
1134
- return value
1135
-
1136
- @staticmethod
1137
- def get_params(brightness: Optional[List[float]],
1138
- contrast: Optional[List[float]],
1139
- saturation: Optional[List[float]],
1140
- hue: Optional[List[float]]
1141
- ) -> Tuple[Tensor, Optional[float], Optional[float], Optional[float], Optional[float]]:
1142
- """Get the parameters for the randomized transform to be applied on image.
1143
-
1144
- Args:
1145
- brightness (tuple of float (min, max), optional): The range from which the brightness_factor is chosen
1146
- uniformly. Pass None to turn off the transformation.
1147
- contrast (tuple of float (min, max), optional): The range from which the contrast_factor is chosen
1148
- uniformly. Pass None to turn off the transformation.
1149
- saturation (tuple of float (min, max), optional): The range from which the saturation_factor is chosen
1150
- uniformly. Pass None to turn off the transformation.
1151
- hue (tuple of float (min, max), optional): The range from which the hue_factor is chosen uniformly.
1152
- Pass None to turn off the transformation.
1153
-
1154
- Returns:
1155
- tuple: The parameters used to apply the randomized transform
1156
- along with their random order.
1157
- """
1158
- fn_idx = torch.randperm(4)
1159
-
1160
- b = None if brightness is None else float(torch.empty(1).uniform_(brightness[0], brightness[1]))
1161
- c = None if contrast is None else float(torch.empty(1).uniform_(contrast[0], contrast[1]))
1162
- s = None if saturation is None else float(torch.empty(1).uniform_(saturation[0], saturation[1]))
1163
- h = None if hue is None else float(torch.empty(1).uniform_(hue[0], hue[1]))
1164
-
1165
- return fn_idx, b, c, s, h
1166
-
1167
- def forward(self, img):
1168
- """
1169
- Args:
1170
- img (PIL Image or Tensor): Input image.
1171
-
1172
- Returns:
1173
- PIL Image or Tensor: Color jittered image.
1174
- """
1175
- fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = \
1176
- self.get_params(self.brightness, self.contrast, self.saturation, self.hue)
1177
-
1178
- for fn_id in fn_idx:
1179
- if fn_id == 0 and brightness_factor is not None:
1180
- img = F.adjust_brightness(img, brightness_factor)
1181
- elif fn_id == 1 and contrast_factor is not None:
1182
- img = F.adjust_contrast(img, contrast_factor)
1183
- elif fn_id == 2 and saturation_factor is not None:
1184
- img = F.adjust_saturation(img, saturation_factor)
1185
- elif fn_id == 3 and hue_factor is not None:
1186
- img = F.adjust_hue(img, hue_factor)
1187
-
1188
- return img
1189
-
1190
- def __repr__(self):
1191
- format_string = self.__class__.__name__ + '('
1192
- format_string += 'brightness={0}'.format(self.brightness)
1193
- format_string += ', contrast={0}'.format(self.contrast)
1194
- format_string += ', saturation={0}'.format(self.saturation)
1195
- format_string += ', hue={0})'.format(self.hue)
1196
- return format_string
1197
-
1198
-
1199
- class RandomRotation(torch.nn.Module):
1200
- """Rotate the image by angle.
1201
- If the image is torch Tensor, it is expected
1202
- to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
1203
-
1204
- Args:
1205
- degrees (sequence or number): Range of degrees to select from.
1206
- If degrees is a number instead of sequence like (min, max), the range of degrees
1207
- will be (-degrees, +degrees).
1208
- interpolation (InterpolationMode): Desired interpolation enum defined by
1209
- :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
1210
- If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
1211
- For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
1212
- expand (bool, optional): Optional expansion flag.
1213
- If true, expands the output to make it large enough to hold the entire rotated image.
1214
- If false or omitted, make the output image the same size as the input image.
1215
- Note that the expand flag assumes rotation around the center and no translation.
1216
- center (sequence, optional): Optional center of rotation, (x, y). Origin is the upper left corner.
1217
- Default is the center of the image.
1218
- fill (sequence or number): Pixel fill value for the area outside the rotated
1219
- image. Default is ``0``. If given a number, the value is used for all bands respectively.
1220
- resample (int, optional): deprecated argument and will be removed since v0.10.0.
1221
- Please use the ``interpolation`` parameter instead.
1222
-
1223
- .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
1224
-
1225
- """
1226
-
1227
- def __init__(
1228
- self, degrees, interpolation=InterpolationMode.NEAREST, expand=False, center=None, fill=0, resample=None
1229
- ):
1230
- super().__init__()
1231
- if resample is not None:
1232
- warnings.warn(
1233
- "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
1234
- )
1235
- interpolation = _interpolation_modes_from_int(resample)
1236
-
1237
- # Backward compatibility with integer value
1238
- if isinstance(interpolation, int):
1239
- warnings.warn(
1240
- "Argument interpolation should be of type InterpolationMode instead of int. "
1241
- "Please, use InterpolationMode enum."
1242
- )
1243
- interpolation = _interpolation_modes_from_int(interpolation)
1244
-
1245
- self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2, ))
1246
-
1247
- if center is not None:
1248
- _check_sequence_input(center, "center", req_sizes=(2, ))
1249
-
1250
- self.center = center
1251
-
1252
- self.resample = self.interpolation = interpolation
1253
- self.expand = expand
1254
-
1255
- if fill is None:
1256
- fill = 0
1257
- elif not isinstance(fill, (Sequence, numbers.Number)):
1258
- raise TypeError("Fill should be either a sequence or a number.")
1259
-
1260
- self.fill = fill
1261
-
1262
- @staticmethod
1263
- def get_params(degrees: List[float]) -> float:
1264
- """Get parameters for ``rotate`` for a random rotation.
1265
-
1266
- Returns:
1267
- float: angle parameter to be passed to ``rotate`` for random rotation.
1268
- """
1269
- angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())
1270
- return angle
1271
-
1272
- def forward(self, img):
1273
- """
1274
- Args:
1275
- img (PIL Image or Tensor): Image to be rotated.
1276
-
1277
- Returns:
1278
- PIL Image or Tensor: Rotated image.
1279
- """
1280
- fill = self.fill
1281
- if isinstance(img, Tensor):
1282
- if isinstance(fill, (int, float)):
1283
- fill = [float(fill)] * F._get_image_num_channels(img)
1284
- else:
1285
- fill = [float(f) for f in fill]
1286
- angle = self.get_params(self.degrees)
1287
-
1288
- return F.rotate(img, angle, self.resample, self.expand, self.center, fill)
1289
-
1290
- def __repr__(self):
1291
- interpolate_str = self.interpolation.value
1292
- format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)
1293
- format_string += ', interpolation={0}'.format(interpolate_str)
1294
- format_string += ', expand={0}'.format(self.expand)
1295
- if self.center is not None:
1296
- format_string += ', center={0}'.format(self.center)
1297
- if self.fill is not None:
1298
- format_string += ', fill={0}'.format(self.fill)
1299
- format_string += ')'
1300
- return format_string
1301
-
1302
-
1303
- class RandomAffine(torch.nn.Module):
1304
- """Random affine transformation of the image keeping center invariant.
1305
- If the image is torch Tensor, it is expected
1306
- to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
1307
-
1308
- Args:
1309
- degrees (sequence or number): Range of degrees to select from.
1310
- If degrees is a number instead of sequence like (min, max), the range of degrees
1311
- will be (-degrees, +degrees). Set to 0 to deactivate rotations.
1312
- translate (tuple, optional): tuple of maximum absolute fraction for horizontal
1313
- and vertical translations. For example translate=(a, b), then horizontal shift
1314
- is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
1315
- randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
1316
- scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is
1317
- randomly sampled from the range a <= scale <= b. Will keep original scale by default.
1318
- shear (sequence or number, optional): Range of degrees to select from.
1319
- If shear is a number, a shear parallel to the x axis in the range (-shear, +shear)
1320
- will be applied. Else if shear is a sequence of 2 values a shear parallel to the x axis in the
1321
- range (shear[0], shear[1]) will be applied. Else if shear is a sequence of 4 values,
1322
- a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.
1323
- Will not apply shear by default.
1324
- interpolation (InterpolationMode): Desired interpolation enum defined by
1325
- :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
1326
- If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
1327
- For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
1328
- fill (sequence or number): Pixel fill value for the area outside the transformed
1329
- image. Default is ``0``. If given a number, the value is used for all bands respectively.
1330
- fillcolor (sequence or number, optional): deprecated argument and will be removed since v0.10.0.
1331
- Please use the ``fill`` parameter instead.
1332
- resample (int, optional): deprecated argument and will be removed since v0.10.0.
1333
- Please use the ``interpolation`` parameter instead.
1334
-
1335
- .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
1336
-
1337
- """
1338
-
1339
- def __init__(
1340
- self, degrees, translate=None, scale=None, shear=None, interpolation=InterpolationMode.NEAREST, fill=0,
1341
- fillcolor=None, resample=None
1342
- ):
1343
- super().__init__()
1344
- if resample is not None:
1345
- warnings.warn(
1346
- "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
1347
- )
1348
- interpolation = _interpolation_modes_from_int(resample)
1349
-
1350
- # Backward compatibility with integer value
1351
- if isinstance(interpolation, int):
1352
- warnings.warn(
1353
- "Argument interpolation should be of type InterpolationMode instead of int. "
1354
- "Please, use InterpolationMode enum."
1355
- )
1356
- interpolation = _interpolation_modes_from_int(interpolation)
1357
-
1358
- if fillcolor is not None:
1359
- warnings.warn(
1360
- "Argument fillcolor is deprecated and will be removed since v0.10.0. Please, use fill instead"
1361
- )
1362
- fill = fillcolor
1363
-
1364
- self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2, ))
1365
-
1366
- if translate is not None:
1367
- _check_sequence_input(translate, "translate", req_sizes=(2, ))
1368
- for t in translate:
1369
- if not (0.0 <= t <= 1.0):
1370
- raise ValueError("translation values should be between 0 and 1")
1371
- self.translate = translate
1372
-
1373
- if scale is not None:
1374
- _check_sequence_input(scale, "scale", req_sizes=(2, ))
1375
- for s in scale:
1376
- if s <= 0:
1377
- raise ValueError("scale values should be positive")
1378
- self.scale = scale
1379
-
1380
- if shear is not None:
1381
- self.shear = _setup_angle(shear, name="shear", req_sizes=(2, 4))
1382
- else:
1383
- self.shear = shear
1384
-
1385
- self.resample = self.interpolation = interpolation
1386
-
1387
- if fill is None:
1388
- fill = 0
1389
- elif not isinstance(fill, (Sequence, numbers.Number)):
1390
- raise TypeError("Fill should be either a sequence or a number.")
1391
-
1392
- self.fillcolor = self.fill = fill
1393
-
1394
- @staticmethod
1395
- def get_params(
1396
- degrees: List[float],
1397
- translate: Optional[List[float]],
1398
- scale_ranges: Optional[List[float]],
1399
- shears: Optional[List[float]],
1400
- img_size: List[int]
1401
- ) -> Tuple[float, Tuple[int, int], float, Tuple[float, float]]:
1402
- """Get parameters for affine transformation
1403
-
1404
- Returns:
1405
- params to be passed to the affine transformation
1406
- """
1407
- angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())
1408
- if translate is not None:
1409
- max_dx = float(translate[0] * img_size[0])
1410
- max_dy = float(translate[1] * img_size[1])
1411
- tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item()))
1412
- ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item()))
1413
- translations = (tx, ty)
1414
- else:
1415
- translations = (0, 0)
1416
-
1417
- if scale_ranges is not None:
1418
- scale = float(torch.empty(1).uniform_(scale_ranges[0], scale_ranges[1]).item())
1419
- else:
1420
- scale = 1.0
1421
-
1422
- shear_x = shear_y = 0.0
1423
- if shears is not None:
1424
- shear_x = float(torch.empty(1).uniform_(shears[0], shears[1]).item())
1425
- if len(shears) == 4:
1426
- shear_y = float(torch.empty(1).uniform_(shears[2], shears[3]).item())
1427
-
1428
- shear = (shear_x, shear_y)
1429
-
1430
- return angle, translations, scale, shear
1431
-
1432
- def forward(self, img):
1433
- """
1434
- img (PIL Image or Tensor): Image to be transformed.
1435
-
1436
- Returns:
1437
- PIL Image or Tensor: Affine transformed image.
1438
- """
1439
- fill = self.fill
1440
- if isinstance(img, Tensor):
1441
- if isinstance(fill, (int, float)):
1442
- fill = [float(fill)] * F._get_image_num_channels(img)
1443
- else:
1444
- fill = [float(f) for f in fill]
1445
-
1446
- img_size = F._get_image_size(img)
1447
-
1448
- ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img_size)
1449
-
1450
- return F.affine(img, *ret, interpolation=self.interpolation, fill=fill)
1451
-
1452
- def __repr__(self):
1453
- s = '{name}(degrees={degrees}'
1454
- if self.translate is not None:
1455
- s += ', translate={translate}'
1456
- if self.scale is not None:
1457
- s += ', scale={scale}'
1458
- if self.shear is not None:
1459
- s += ', shear={shear}'
1460
- if self.interpolation != InterpolationMode.NEAREST:
1461
- s += ', interpolation={interpolation}'
1462
- if self.fill != 0:
1463
- s += ', fill={fill}'
1464
- s += ')'
1465
- d = dict(self.__dict__)
1466
- d['interpolation'] = self.interpolation.value
1467
- return s.format(name=self.__class__.__name__, **d)
1468
-
1469
-
1470
- class Grayscale(torch.nn.Module):
1471
- """Convert image to grayscale.
1472
- If the image is torch Tensor, it is expected
1473
- to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
1474
-
1475
- Args:
1476
- num_output_channels (int): (1 or 3) number of channels desired for output image
1477
-
1478
- Returns:
1479
- PIL Image: Grayscale version of the input.
1480
-
1481
- - If ``num_output_channels == 1`` : returned image is single channel
1482
- - If ``num_output_channels == 3`` : returned image is 3 channel with r == g == b
1483
-
1484
- """
1485
-
1486
- def __init__(self, num_output_channels=1):
1487
- super().__init__()
1488
- self.num_output_channels = num_output_channels
1489
-
1490
- def forward(self, img):
1491
- """
1492
- Args:
1493
- img (PIL Image or Tensor): Image to be converted to grayscale.
1494
-
1495
- Returns:
1496
- PIL Image or Tensor: Grayscaled image.
1497
- """
1498
- return F.rgb_to_grayscale(img, num_output_channels=self.num_output_channels)
1499
-
1500
- def __repr__(self):
1501
- return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)
1502
-
1503
-
1504
- class RandomGrayscale(torch.nn.Module):
1505
- """Randomly convert image to grayscale with a probability of p (default 0.1).
1506
- If the image is torch Tensor, it is expected
1507
- to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
1508
-
1509
- Args:
1510
- p (float): probability that image should be converted to grayscale.
1511
-
1512
- Returns:
1513
- PIL Image or Tensor: Grayscale version of the input image with probability p and unchanged
1514
- with probability (1-p).
1515
- - If input image is 1 channel: grayscale version is 1 channel
1516
- - If input image is 3 channel: grayscale version is 3 channel with r == g == b
1517
-
1518
- """
1519
-
1520
- def __init__(self, p=0.1):
1521
- super().__init__()
1522
- self.p = p
1523
-
1524
- def forward(self, img):
1525
- """
1526
- Args:
1527
- img (PIL Image or Tensor): Image to be converted to grayscale.
1528
-
1529
- Returns:
1530
- PIL Image or Tensor: Randomly grayscaled image.
1531
- """
1532
- num_output_channels = F._get_image_num_channels(img)
1533
- if torch.rand(1) < self.p:
1534
- return F.rgb_to_grayscale(img, num_output_channels=num_output_channels)
1535
- return img
1536
-
1537
- def __repr__(self):
1538
- return self.__class__.__name__ + '(p={0})'.format(self.p)
1539
-
1540
-
1541
- class RandomErasing(torch.nn.Module):
1542
- """ Randomly selects a rectangle region in an torch Tensor image and erases its pixels.
1543
- This transform does not support PIL Image.
1544
- 'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/abs/1708.04896
1545
-
1546
- Args:
1547
- p: probability that the random erasing operation will be performed.
1548
- scale: range of proportion of erased area against input image.
1549
- ratio: range of aspect ratio of erased area.
1550
- value: erasing value. Default is 0. If a single int, it is used to
1551
- erase all pixels. If a tuple of length 3, it is used to erase
1552
- R, G, B channels respectively.
1553
- If a str of 'random', erasing each pixel with random values.
1554
- inplace: boolean to make this transform inplace. Default set to False.
1555
-
1556
- Returns:
1557
- Erased Image.
1558
-
1559
- Example:
1560
- >>> transform = transforms.Compose([
1561
- >>> transforms.RandomHorizontalFlip(),
1562
- >>> transforms.ToTensor(),
1563
- >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
1564
- >>> transforms.RandomErasing(),
1565
- >>> ])
1566
- """
1567
-
1568
- def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False):
1569
- super().__init__()
1570
- if not isinstance(value, (numbers.Number, str, tuple, list)):
1571
- raise TypeError("Argument value should be either a number or str or a sequence")
1572
- if isinstance(value, str) and value != "random":
1573
- raise ValueError("If value is str, it should be 'random'")
1574
- if not isinstance(scale, (tuple, list)):
1575
- raise TypeError("Scale should be a sequence")
1576
- if not isinstance(ratio, (tuple, list)):
1577
- raise TypeError("Ratio should be a sequence")
1578
- if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
1579
- warnings.warn("Scale and ratio should be of kind (min, max)")
1580
- if scale[0] < 0 or scale[1] > 1:
1581
- raise ValueError("Scale should be between 0 and 1")
1582
- if p < 0 or p > 1:
1583
- raise ValueError("Random erasing probability should be between 0 and 1")
1584
-
1585
- self.p = p
1586
- self.scale = scale
1587
- self.ratio = ratio
1588
- self.value = value
1589
- self.inplace = inplace
1590
-
1591
- @staticmethod
1592
- def get_params(
1593
- img: Tensor, scale: Tuple[float, float], ratio: Tuple[float, float], value: Optional[List[float]] = None
1594
- ) -> Tuple[int, int, int, int, Tensor]:
1595
- """Get parameters for ``erase`` for a random erasing.
1596
-
1597
- Args:
1598
- img (Tensor): Tensor image to be erased.
1599
- scale (sequence): range of proportion of erased area against input image.
1600
- ratio (sequence): range of aspect ratio of erased area.
1601
- value (list, optional): erasing value. If None, it is interpreted as "random"
1602
- (erasing each pixel with random values). If ``len(value)`` is 1, it is interpreted as a number,
1603
- i.e. ``value[0]``.
1604
-
1605
- Returns:
1606
- tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing.
1607
- """
1608
- img_c, img_h, img_w = img.shape[-3], img.shape[-2], img.shape[-1]
1609
- area = img_h * img_w
1610
-
1611
- log_ratio = torch.log(torch.tensor(ratio))
1612
- for _ in range(10):
1613
- erase_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()
1614
- aspect_ratio = torch.exp(
1615
- torch.empty(1).uniform_(log_ratio[0], log_ratio[1])
1616
- ).item()
1617
-
1618
- h = int(round(math.sqrt(erase_area * aspect_ratio)))
1619
- w = int(round(math.sqrt(erase_area / aspect_ratio)))
1620
- if not (h < img_h and w < img_w):
1621
- continue
1622
-
1623
- if value is None:
1624
- v = torch.empty([img_c, h, w], dtype=torch.float32).normal_()
1625
- else:
1626
- v = torch.tensor(value)[:, None, None]
1627
-
1628
- i = torch.randint(0, img_h - h + 1, size=(1, )).item()
1629
- j = torch.randint(0, img_w - w + 1, size=(1, )).item()
1630
- return i, j, h, w, v
1631
-
1632
- # Return original image
1633
- return 0, 0, img_h, img_w, img
1634
-
1635
- def forward(self, img):
1636
- """
1637
- Args:
1638
- img (Tensor): Tensor image to be erased.
1639
-
1640
- Returns:
1641
- img (Tensor): Erased Tensor image.
1642
- """
1643
- if torch.rand(1) < self.p:
1644
-
1645
- # cast self.value to script acceptable type
1646
- if isinstance(self.value, (int, float)):
1647
- value = [self.value, ]
1648
- elif isinstance(self.value, str):
1649
- value = None
1650
- elif isinstance(self.value, tuple):
1651
- value = list(self.value)
1652
- else:
1653
- value = self.value
1654
-
1655
- if value is not None and not (len(value) in (1, img.shape[-3])):
1656
- raise ValueError(
1657
- "If value is a sequence, it should have either a single value or "
1658
- "{} (number of input channels)".format(img.shape[-3])
1659
- )
1660
-
1661
- x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=value)
1662
- return F.erase(img, x, y, h, w, v, self.inplace)
1663
- return img
1664
-
1665
- def __repr__(self):
1666
- s = '(p={}, '.format(self.p)
1667
- s += 'scale={}, '.format(self.scale)
1668
- s += 'ratio={}, '.format(self.ratio)
1669
- s += 'value={}, '.format(self.value)
1670
- s += 'inplace={})'.format(self.inplace)
1671
- return self.__class__.__name__ + s
1672
-
1673
-
1674
- class GaussianBlur(torch.nn.Module):
1675
- """Blurs image with randomly chosen Gaussian blur.
1676
- If the image is torch Tensor, it is expected
1677
- to have [..., C, H, W] shape, where ... means an arbitrary number of leading dimensions.
1678
-
1679
- Args:
1680
- kernel_size (int or sequence): Size of the Gaussian kernel.
1681
- sigma (float or tuple of float (min, max)): Standard deviation to be used for
1682
- creating kernel to perform blurring. If float, sigma is fixed. If it is tuple
1683
- of float (min, max), sigma is chosen uniformly at random to lie in the
1684
- given range.
1685
-
1686
- Returns:
1687
- PIL Image or Tensor: Gaussian blurred version of the input image.
1688
-
1689
- """
1690
-
1691
- def __init__(self, kernel_size, sigma=(0.1, 2.0)):
1692
- super().__init__()
1693
- self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers")
1694
- for ks in self.kernel_size:
1695
- if ks <= 0 or ks % 2 == 0:
1696
- raise ValueError("Kernel size value should be an odd and positive number.")
1697
-
1698
- if isinstance(sigma, numbers.Number):
1699
- if sigma <= 0:
1700
- raise ValueError("If sigma is a single number, it must be positive.")
1701
- sigma = (sigma, sigma)
1702
- elif isinstance(sigma, Sequence) and len(sigma) == 2:
1703
- if not 0. < sigma[0] <= sigma[1]:
1704
- raise ValueError("sigma values should be positive and of the form (min, max).")
1705
- else:
1706
- raise ValueError("sigma should be a single number or a list/tuple with length 2.")
1707
-
1708
- self.sigma = sigma
1709
-
1710
- @staticmethod
1711
- def get_params(sigma_min: float, sigma_max: float) -> float:
1712
- """Choose sigma for random gaussian blurring.
1713
-
1714
- Args:
1715
- sigma_min (float): Minimum standard deviation that can be chosen for blurring kernel.
1716
- sigma_max (float): Maximum standard deviation that can be chosen for blurring kernel.
1717
-
1718
- Returns:
1719
- float: Standard deviation to be passed to calculate kernel for gaussian blurring.
1720
- """
1721
- return torch.empty(1).uniform_(sigma_min, sigma_max).item()
1722
-
1723
- def forward(self, img: Tensor) -> Tensor:
1724
- """
1725
- Args:
1726
- img (PIL Image or Tensor): image to be blurred.
1727
-
1728
- Returns:
1729
- PIL Image or Tensor: Gaussian blurred image
1730
- """
1731
- sigma = self.get_params(self.sigma[0], self.sigma[1])
1732
- return F.gaussian_blur(img, self.kernel_size, [sigma, sigma])
1733
-
1734
- def __repr__(self):
1735
- s = '(kernel_size={}, '.format(self.kernel_size)
1736
- s += 'sigma={})'.format(self.sigma)
1737
- return self.__class__.__name__ + s
1738
-
1739
-
1740
- def _setup_size(size, error_msg):
1741
- if isinstance(size, numbers.Number):
1742
- return int(size), int(size)
1743
-
1744
- if isinstance(size, Sequence) and len(size) == 1:
1745
- return size[0], size[0]
1746
-
1747
- if len(size) != 2:
1748
- raise ValueError(error_msg)
1749
-
1750
- return size
1751
-
1752
-
1753
- def _check_sequence_input(x, name, req_sizes):
1754
- msg = req_sizes[0] if len(req_sizes) < 2 else " or ".join([str(s) for s in req_sizes])
1755
- if not isinstance(x, Sequence):
1756
- raise TypeError("{} should be a sequence of length {}.".format(name, msg))
1757
- if len(x) not in req_sizes:
1758
- raise ValueError("{} should be sequence of length {}.".format(name, msg))
1759
-
1760
-
1761
- def _setup_angle(x, name, req_sizes=(2, )):
1762
- if isinstance(x, numbers.Number):
1763
- if x < 0:
1764
- raise ValueError("If {} is a single number, it must be positive.".format(name))
1765
- x = [-x, x]
1766
- else:
1767
- _check_sequence_input(x, name, req_sizes)
1768
-
1769
- return [float(d) for d in x]
1770
-
1771
-
1772
- class RandomInvert(torch.nn.Module):
1773
- """Inverts the colors of the given image randomly with a given probability.
1774
- If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1775
- where ... means it can have an arbitrary number of leading dimensions.
1776
- If img is PIL Image, it is expected to be in mode "L" or "RGB".
1777
-
1778
- Args:
1779
- p (float): probability of the image being color inverted. Default value is 0.5
1780
- """
1781
-
1782
- def __init__(self, p=0.5):
1783
- super().__init__()
1784
- self.p = p
1785
-
1786
- def forward(self, img):
1787
- """
1788
- Args:
1789
- img (PIL Image or Tensor): Image to be inverted.
1790
-
1791
- Returns:
1792
- PIL Image or Tensor: Randomly color inverted image.
1793
- """
1794
- if torch.rand(1).item() < self.p:
1795
- return F.invert(img)
1796
- return img
1797
-
1798
- def __repr__(self):
1799
- return self.__class__.__name__ + '(p={})'.format(self.p)
1800
-
1801
-
1802
- class RandomPosterize(torch.nn.Module):
1803
- """Posterize the image randomly with a given probability by reducing the
1804
- number of bits for each color channel. If the image is torch Tensor, it should be of type torch.uint8,
1805
- and it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
1806
- If img is PIL Image, it is expected to be in mode "L" or "RGB".
1807
-
1808
- Args:
1809
- bits (int): number of bits to keep for each channel (0-8)
1810
- p (float): probability of the image being color inverted. Default value is 0.5
1811
- """
1812
-
1813
- def __init__(self, bits, p=0.5):
1814
- super().__init__()
1815
- self.bits = bits
1816
- self.p = p
1817
-
1818
- def forward(self, img):
1819
- """
1820
- Args:
1821
- img (PIL Image or Tensor): Image to be posterized.
1822
-
1823
- Returns:
1824
- PIL Image or Tensor: Randomly posterized image.
1825
- """
1826
- if torch.rand(1).item() < self.p:
1827
- return F.posterize(img, self.bits)
1828
- return img
1829
-
1830
- def __repr__(self):
1831
- return self.__class__.__name__ + '(bits={},p={})'.format(self.bits, self.p)
1832
-
1833
-
1834
- class RandomSolarize(torch.nn.Module):
1835
- """Solarize the image randomly with a given probability by inverting all pixel
1836
- values above a threshold. If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1837
- where ... means it can have an arbitrary number of leading dimensions.
1838
- If img is PIL Image, it is expected to be in mode "L" or "RGB".
1839
-
1840
- Args:
1841
- threshold (float): all pixels equal or above this value are inverted.
1842
- p (float): probability of the image being color inverted. Default value is 0.5
1843
- """
1844
-
1845
- def __init__(self, threshold, p=0.5):
1846
- super().__init__()
1847
- self.threshold = threshold
1848
- self.p = p
1849
-
1850
- def forward(self, img):
1851
- """
1852
- Args:
1853
- img (PIL Image or Tensor): Image to be solarized.
1854
-
1855
- Returns:
1856
- PIL Image or Tensor: Randomly solarized image.
1857
- """
1858
- if torch.rand(1).item() < self.p:
1859
- return F.solarize(img, self.threshold)
1860
- return img
1861
-
1862
- def __repr__(self):
1863
- return self.__class__.__name__ + '(threshold={},p={})'.format(self.threshold, self.p)
1864
-
1865
-
1866
- class RandomAdjustSharpness(torch.nn.Module):
1867
- """Adjust the sharpness of the image randomly with a given probability. If the image is torch Tensor,
1868
- it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
1869
-
1870
- Args:
1871
- sharpness_factor (float): How much to adjust the sharpness. Can be
1872
- any non negative number. 0 gives a blurred image, 1 gives the
1873
- original image while 2 increases the sharpness by a factor of 2.
1874
- p (float): probability of the image being color inverted. Default value is 0.5
1875
- """
1876
-
1877
- def __init__(self, sharpness_factor, p=0.5):
1878
- super().__init__()
1879
- self.sharpness_factor = sharpness_factor
1880
- self.p = p
1881
-
1882
- def forward(self, img):
1883
- """
1884
- Args:
1885
- img (PIL Image or Tensor): Image to be sharpened.
1886
-
1887
- Returns:
1888
- PIL Image or Tensor: Randomly sharpened image.
1889
- """
1890
- if torch.rand(1).item() < self.p:
1891
- return F.adjust_sharpness(img, self.sharpness_factor)
1892
- return img
1893
-
1894
- def __repr__(self):
1895
- return self.__class__.__name__ + '(sharpness_factor={},p={})'.format(self.sharpness_factor, self.p)
1896
-
1897
-
1898
- class RandomAutocontrast(torch.nn.Module):
1899
- """Autocontrast the pixels of the given image randomly with a given probability.
1900
- If the image is torch Tensor, it is expected
1901
- to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
1902
- If img is PIL Image, it is expected to be in mode "L" or "RGB".
1903
-
1904
- Args:
1905
- p (float): probability of the image being autocontrasted. Default value is 0.5
1906
- """
1907
-
1908
- def __init__(self, p=0.5):
1909
- super().__init__()
1910
- self.p = p
1911
-
1912
- def forward(self, img):
1913
- """
1914
- Args:
1915
- img (PIL Image or Tensor): Image to be autocontrasted.
1916
-
1917
- Returns:
1918
- PIL Image or Tensor: Randomly autocontrasted image.
1919
- """
1920
- if torch.rand(1).item() < self.p:
1921
- return F.autocontrast(img)
1922
- return img
1923
-
1924
- def __repr__(self):
1925
- return self.__class__.__name__ + '(p={})'.format(self.p)
1926
-
1927
-
1928
- class RandomEqualize(torch.nn.Module):
1929
- """Equalize the histogram of the given image randomly with a given probability.
1930
- If the image is torch Tensor, it is expected
1931
- to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
1932
- If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
1933
-
1934
- Args:
1935
- p (float): probability of the image being equalized. Default value is 0.5
1936
- """
1937
-
1938
- def __init__(self, p=0.5):
1939
- super().__init__()
1940
- self.p = p
1941
-
1942
- def forward(self, img):
1943
- """
1944
- Args:
1945
- img (PIL Image or Tensor): Image to be equalized.
1946
-
1947
- Returns:
1948
- PIL Image or Tensor: Randomly equalized image.
1949
- """
1950
- if torch.rand(1).item() < self.p:
1951
- return F.equalize(img)
1952
- return img
1953
-
1954
- def __repr__(self):
1955
- return self.__class__.__name__ + '(p={})'.format(self.p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/tests/test_config.py DELETED
@@ -1,84 +0,0 @@
1
- from unittest import TestCase
2
-
3
- from autogpt.config import Config
4
-
5
-
6
- class TestConfig(TestCase):
7
- """
8
- Test cases for the Config class, which handles the configuration settings
9
- for the AI and ensures it behaves as a singleton.
10
- """
11
-
12
- def setUp(self):
13
- """
14
- Set up the test environment by creating an instance of the Config class.
15
- """
16
- self.config = Config()
17
-
18
- def test_singleton(self):
19
- """
20
- Test if the Config class behaves as a singleton by ensuring that two instances are the same.
21
- """
22
- config2 = Config()
23
- self.assertIs(self.config, config2)
24
-
25
- def test_initial_values(self):
26
- """
27
- Test if the initial values of the Config class attributes are set correctly.
28
- """
29
- self.assertFalse(self.config.debug_mode)
30
- self.assertFalse(self.config.continuous_mode)
31
- self.assertFalse(self.config.speak_mode)
32
- self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo")
33
- self.assertEqual(self.config.smart_llm_model, "gpt-4")
34
- self.assertEqual(self.config.fast_token_limit, 4000)
35
- self.assertEqual(self.config.smart_token_limit, 8000)
36
-
37
- def test_set_continuous_mode(self):
38
- """
39
- Test if the set_continuous_mode() method updates the continuous_mode attribute.
40
- """
41
- self.config.set_continuous_mode(True)
42
- self.assertTrue(self.config.continuous_mode)
43
-
44
- def test_set_speak_mode(self):
45
- """
46
- Test if the set_speak_mode() method updates the speak_mode attribute.
47
- """
48
- self.config.set_speak_mode(True)
49
- self.assertTrue(self.config.speak_mode)
50
-
51
- def test_set_fast_llm_model(self):
52
- """
53
- Test if the set_fast_llm_model() method updates the fast_llm_model attribute.
54
- """
55
- self.config.set_fast_llm_model("gpt-3.5-turbo-test")
56
- self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo-test")
57
-
58
- def test_set_smart_llm_model(self):
59
- """
60
- Test if the set_smart_llm_model() method updates the smart_llm_model attribute.
61
- """
62
- self.config.set_smart_llm_model("gpt-4-test")
63
- self.assertEqual(self.config.smart_llm_model, "gpt-4-test")
64
-
65
- def test_set_fast_token_limit(self):
66
- """
67
- Test if the set_fast_token_limit() method updates the fast_token_limit attribute.
68
- """
69
- self.config.set_fast_token_limit(5000)
70
- self.assertEqual(self.config.fast_token_limit, 5000)
71
-
72
- def test_set_smart_token_limit(self):
73
- """
74
- Test if the set_smart_token_limit() method updates the smart_token_limit attribute.
75
- """
76
- self.config.set_smart_token_limit(9000)
77
- self.assertEqual(self.config.smart_token_limit, 9000)
78
-
79
- def test_set_debug_mode(self):
80
- """
81
- Test if the set_debug_mode() method updates the debug_mode attribute.
82
- """
83
- self.config.set_debug_mode(True)
84
- self.assertTrue(self.config.debug_mode)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/client/js/sidebar-toggler.js DELETED
@@ -1,34 +0,0 @@
1
- const sidebar = document.querySelector(".sidebar");
2
- const menuButton = document.querySelector(".menu-button");
3
-
4
- function toggleSidebar(event) {
5
- if (sidebar.classList.contains("shown")) {
6
- hideSidebar(event.target);
7
- } else {
8
- showSidebar(event.target);
9
- }
10
- window.scrollTo(0, 0);
11
- }
12
-
13
- function showSidebar(target) {
14
- sidebar.classList.add("shown");
15
- target.classList.add("rotated");
16
- document.body.style.overflow = "hidden";
17
- }
18
-
19
- function hideSidebar(target) {
20
- sidebar.classList.remove("shown");
21
- target.classList.remove("rotated");
22
- document.body.style.overflow = "auto";
23
- }
24
-
25
- menuButton.addEventListener("click", toggleSidebar);
26
-
27
- document.body.addEventListener('click', function(event) {
28
- if (event.target.matches('.conversation-title')) {
29
- const menuButtonStyle = window.getComputedStyle(menuButton);
30
- if (menuButtonStyle.display !== 'none') {
31
- hideSidebar(menuButton);
32
- }
33
- }
34
- });