parquet-converter commited on
Commit
5d0244d
·
1 Parent(s): dd9da8f

Update parquet files (step 33 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/!FREE! Download One Touch Diabetes Management Softwarel [PATCHED].md +0 -29
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/FSX Steam Edition Air Hauler 2 Add-On Download] [Crack Serial Key - Best Practices and Recommendations.md +0 -159
  3. spaces/1gistliPinn/ChatGPT4/Examples/Advanced Road Design Crack How to Use ARD Pipes for Automatic Crack Detection and Characterization.md +0 -13
  4. spaces/1gistliPinn/ChatGPT4/Examples/Aiyyaa Tamil Movie Download BEST Torrent.md +0 -8
  5. spaces/1gistliPinn/ChatGPT4/Examples/Dragon Ball Z Burst Limit Pc Game Download Free.md +0 -33
  6. spaces/1gistliPinn/ChatGPT4/Examples/ETAP WITH CRACK FULL VERSION FREE TORRENT DOWNLOAD.389 [Extra Quality].md +0 -6
  7. spaces/1gistliPinn/ChatGPT4/Examples/Forza Horizon Season Pass Code Generator.md +0 -74
  8. spaces/1pelhydcardo/ChatGPT-prompt-generator/Akinsoft Cafeplus 11 Extra Quality Crack 21.md +0 -104
  9. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/AI Suite 3 The Ultimate Software for ASUS Motherboards on Windows 10.md +0 -103
  10. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us APK How to Play the Game that Everyone is Talking About.md +0 -151
  11. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Blackmagic Software The All-in-One Software Tool for Creative Professionals.md +0 -133
  12. spaces/1phancelerku/anime-remove-background/Car Parking Multiplayer on PC A Complete Guide to Download and Install on Windows 10.md +0 -105
  13. spaces/1phancelerku/anime-remove-background/EM3D The Ultimate App for 3D Scanning and Printing.md +0 -163
  14. spaces/2023Liu2023/bingo/tests/kblob.ts +0 -27
  15. spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/plot.py +0 -72
  16. spaces/AEUPH/CosmosTV/public/mpegts.js +0 -0
  17. spaces/AGITM/ToneCorrectionRecognition/README.md +0 -13
  18. spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/bert/create_sent_embedding.py +0 -89
  19. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_s-v61_syncbn-detect_8xb16-300e_coco.py +0 -23
  20. spaces/AchyuthGamer/OpenGPT/client/css/button.css +0 -26
  21. spaces/AgentVerse/agentVerse/agentverse/agents/__init__.py +0 -20
  22. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/canvasdata-plugin.js +0 -34
  23. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/board/match/RefreshSymbolCache.js +0 -15
  24. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/chart/SetChartData.js +0 -17
  25. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/input/ClickCell.js +0 -20
  26. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/label/methods/Methods.js +0 -9
  27. spaces/AlexZou/Deploy_Restoration/models/SCET.py +0 -276
  28. spaces/AlgoveraAI/medical-image-classification/README.md +0 -12
  29. spaces/Amon1/ChatGPTForAcadamic/toolbox.py +0 -344
  30. spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/app.py +0 -75
  31. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/audio_diffusion/mel.py +0 -179
  32. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py +0 -153
  33. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_sde_ve_flax.py +0 -279
  34. spaces/Andy1621/uniformer_image_detection/configs/htc/htc_r50_fpn_20e_coco.py +0 -4
  35. spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/stylegan_ops/upfirdn2d.cpp +0 -23
  36. spaces/AnthonyErosion/HoctotAI/README.md +0 -12
  37. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/check.py +0 -149
  38. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/__main__.py +0 -274
  39. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/config.py +0 -377
  40. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/checkpoint/detection_checkpoint.py +0 -120
  41. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/postprocessing.py +0 -101
  42. spaces/AzumaSeren100/XuanShen-Bert-VITS2/webui.py +0 -136
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/queue.py +0 -22
  44. spaces/BigSalmon/MaskSeveralAtOnce/app.py +0 -52
  45. spaces/Billyosoro/ESRGAN/README.md +0 -34
  46. spaces/CVMX-jaca-tonos/Identificar-lenguas-y-frases/app.py +0 -219
  47. spaces/CVPR/LIVE/thrust/dependencies/cub/cub/cmake/cub-config.cmake +0 -62
  48. spaces/CVPR/LIVE/thrust/thrust/iterator/iterator_adaptor.h +0 -240
  49. spaces/CVPR/SPOTER_Sign_Language_Recognition/spoter_mod/README.md +0 -61
  50. spaces/CVPR/WALT/mmdet/models/necks/hrfpn.py +0 -102
spaces/1acneusushi/gradio-2dmoleculeeditor/data/!FREE! Download One Touch Diabetes Management Softwarel [PATCHED].md DELETED
@@ -1,29 +0,0 @@
1
-
2
- <h1>Download One Touch Diabetes Management Software [PATCHED]</h1>
3
- <p>One Touch Diabetes Management Software is a free application that helps you organize and analyze your blood glucose readings from One Touch glucose meters. You can use this software to generate personalized reports that show you how your lifestyle choices affect your blood glucose levels. You can also share these reports with your healthcare provider or diabetes educator to get better guidance on managing your diabetes.</p>
4
- <h2>Download One Touch Diabetes Management Softwarel [PATCHED]</h2><br /><p><b><b>DOWNLOAD</b> &#10002; &#10002; &#10002; <a href="https://byltly.com/2uKvKa">https://byltly.com/2uKvKa</a></b></p><br /><br />
5
- <p>However, the original One Touch Diabetes Management Software is no longer being distributed by the company and requires an interface cable to connect your meter to your computer. This can be inconvenient and costly for some users. That's why we have created a patched version of the software that works with any USB cable and is compatible with the latest Windows operating systems. You can download this patched version from our website for free and enjoy all the benefits of One Touch Diabetes Management Software without any hassle.</p>
6
- <p>To download the patched version of One Touch Diabetes Management Software, follow these simple steps:</p>
7
- <ol>
8
- <li>Click on the download link below and save the file to your computer.</li>
9
- <li>Unzip the file and run the setup.exe file to install the software.</li>
10
- <li>Connect your One Touch glucose meter to your computer using a USB cable.</li>
11
- <li>Launch the software and follow the instructions to transfer your data from your meter to the software.</li>
12
- <li>View, print, or email your reports as you wish.</li>
13
- </ol>
14
- <p>Download link: <a href="https://example.com/onetouch-patched.zip">One Touch Diabetes Management Software [PATCHED]</a></p>
15
- <p>Note: This patched version of One Touch Diabetes Management Software is not affiliated with or endorsed by LifeScan, Inc. or Johnson & Johnson. Use it at your own risk. We are not responsible for any damage or loss caused by using this software.</p>
16
-
17
- <p>One Touch Diabetes Management Software [PATCHED] is a useful tool for anyone who wants to monitor their blood glucose levels and manage their diabetes more effectively. With this software, you can easily see how your diet, exercise, medication, and other factors affect your blood glucose levels over time. You can also compare your results with your target range and identify areas where you need to make changes.</p>
18
- <p></p>
19
- <p>Some of the reports that you can generate with One Touch Diabetes Management Software [PATCHED] are:</p>
20
- <ul>
21
- <li>Pie chart: This report shows the percentage of readings that fall within or outside your target range for each mealtime. You can use this report to see how well you are controlling your blood glucose levels throughout the day.</li>
22
- <li>Glucose trend: This report shows the changes in your blood glucose levels over a selected period of time. You can use this report to see how your blood glucose levels vary during the day or week.</li>
23
- <li>Histogram: This report shows the distribution of your blood glucose readings in a given range. You can use this report to see how often your blood glucose levels are too high or too low.</li>
24
- <li>Average reading: This report shows the average of your blood glucose readings for a selected period of time. You can use this report to see how your overall blood glucose control is.</li>
25
- </ul>
26
- <p>One Touch Diabetes Management Software [PATCHED] also allows you to customize your reports by adding notes, comments, or reminders. You can also export your data to other formats such as Excel or PDF for further analysis or sharing.</p>
27
- <p>If you have any questions or feedback about One Touch Diabetes Management Software [PATCHED], please contact us at [email protected]. We would love to hear from you and help you improve your diabetes management.</p> 7b8c122e87<br />
28
- <br />
29
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/FSX Steam Edition Air Hauler 2 Add-On Download] [Crack Serial Key - Best Practices and Recommendations.md DELETED
@@ -1,159 +0,0 @@
1
-
2
- <h1>FSX Steam Edition: Air Hauler 2 Add-On Download [Crack Serial Key]</h1>
3
- <h2>Introduction</h2>
4
- <p>If you are a fan of flight simulation games, you probably know about FSX Steam Edition. It is one of the most popular and realistic flight simulators available on the market. But did you know that you can enhance your gaming experience with Air Hauler 2 Add-On? In this article, we will tell you everything you need to know about this amazing add-on, how to download it, how to activate it with a crack serial key, and how to use it. So buckle up and get ready for takeoff!</p>
5
- <h2>FSX Steam Edition: Air Hauler 2 Add-On Download] [Crack Serial Key</h2><br /><p><b><b>Download</b> &#10145; <a href="https://byltly.com/2uKxPX">https://byltly.com/2uKxPX</a></b></p><br /><br />
6
- <h3>What is FSX Steam Edition?</h3>
7
- <p>FSX Steam Edition is a re-release of the original Microsoft Flight Simulator X (FSX) game that was launched in 2006. It is a flight simulation game that allows you to fly various aircrafts in different scenarios and locations around the world. You can choose from hundreds of planes, ranging from small propellers to large jets, and customize them with different liveries and accessories. You can also create your own missions, join multiplayer sessions, or explore the world in free flight mode.</p>
8
- <h3>What is Air Hauler 2?</h3>
9
- <p>Air Hauler 2 is an add-on for FSX Steam Edition that adds a whole new dimension to your flight simulation experience. It is a management simulation game that lets you create and run your own virtual airline company. You can hire pilots, buy and sell planes, set up bases, transport passengers and cargo, and compete with other airlines. You can also fly your own missions as a pilot or a co-pilot, and earn money and reputation for your company. Air Hauler 2 is a dynamic and immersive game that will challenge your skills as a pilot and a manager.</p>
10
- <h3>Why do you need Air Hauler 2 for FSX Steam Edition?</h3>
11
- <p>If you love FSX Steam Edition, you will love Air Hauler 2 even more. It is a perfect complement to your flight simulation game that will add more realism, variety, and fun to your gaming sessions. With Air Hauler 2, you can:</p>
12
- <ul>
13
- <li>Create your own airline company and customize it with your own logo, colors, and motto.</li>
14
- <li>Hire and fire pilots, assign them to different planes and routes, and monitor their performance.</li>
15
- <li>Buy and sell planes from a wide range of models, from small propellers to large jets.</li>
16
- <li>Set up bases around the world and expand your network of destinations.</li>
17
- <li>Transport passengers and cargo across different regions and countries.</li>
18
- <li>Manage your finances, expenses, income, loans, and taxes.</li>
19
- <li>Compete with other airlines in the market and try to become the best.</li>
20
- <li>Fly your own missions as a pilot or a co-pilot, using realistic flight models and weather conditions.</li>
21
- <li>Earn money and reputation for your company by completing missions successfully.</li>
22
- <li>Enjoy a dynamic and interactive world that changes according to your actions.</li>
23
- </ul>
24
- <h2>How to download Air Hauler 2 Add-On for FSX Steam Edition?</h2>
25
- <p>If you are interested in downloading Air Hauler 2 Add-On for FSX Steam Edition, you will need to follow some simple steps. Here they are:</p>
26
- <h3>Requirements and compatibility</h3>
27
- <p>Before you download Air Hauler 2 Add-On, you will need to make sure that you have the following requirements:</p>
28
- <ul>
29
- <li>A PC with Windows 7 or higher operating system.</li>
30
- <li>A copy of FSX Steam Edition installed on your PC.</li>
31
- <li>A minimum of 4 GB of RAM memory.</li>
32
- <li>A minimum of 1 GB of free disk space.</li>
33
- <li>A stable internet connection.</li>
34
- </ul>
35
- <p>Air Hauler 2 Add-On is compatible with FSX Steam Edition only. It is not compatible with other versions of FSX or other flight simulators.</p>
36
- <p>How to download Air Hauler 2 Add-On for FSX Steam Edition<br />
37
- FSX Steam Edition: Air Hauler 2 Add-On free download with crack<br />
38
- Air Hauler 2 Add-On serial key generator for FSX Steam Edition<br />
39
- FSX Steam Edition: Air Hauler 2 Add-On cracked version download link<br />
40
- Air Hauler 2 Add-On activation code for FSX Steam Edition<br />
41
- FSX Steam Edition: Air Hauler 2 Add-On full game download torrent<br />
42
- Air Hauler 2 Add-On license key for FSX Steam Edition<br />
43
- FSX Steam Edition: Air Hauler 2 Add-On patch download and installation guide<br />
44
- Air Hauler 2 Add-On product key for FSX Steam Edition<br />
45
- FSX Steam Edition: Air Hauler 2 Add-On crack only download<br />
46
- Air Hauler 2 Add-On registration key for FSX Steam Edition<br />
47
- FSX Steam Edition: Air Hauler 2 Add-On download with crack and serial key<br />
48
- Air Hauler 2 Add-On unlock code for FSX Steam Edition<br />
49
- FSX Steam Edition: Air Hauler 2 Add-On crack download and instructions<br />
50
- Air Hauler 2 Add-On keygen for FSX Steam Edition<br />
51
- FSX Steam Edition: Air Hauler 2 Add-On download full version with crack<br />
52
- Air Hauler 2 Add-On crack fix for FSX Steam Edition<br />
53
- FSX Steam Edition: Air Hauler 2 Add-On download and install with serial key<br />
54
- Air Hauler 2 Add-On steam key for FSX Steam Edition<br />
55
- FSX Steam Edition: Air Hauler 2 Add-On crack skidrow download<br />
56
- Air Hauler 2 Add-On cd key for FSX Steam Edition<br />
57
- FSX Steam Edition: Air Hauler 2 Add-On download pc game with crack<br />
58
- Air Hauler 2 Add-On online activation code for FSX Steam Edition<br />
59
- FSX Steam Edition: Air Hauler 2 Add-On crack reloaded download<br />
60
- Air Hauler 2 Add-On working serial key for FSX Steam Edition<br />
61
- FSX Steam Edition: Air Hauler 2 Add-On download free full version with crack<br />
62
- Air Hauler 2 Add-On no cd crack for FSX Steam Edition<br />
63
- FSX Steam Edition: Air Hauler 2 Add-On download compressed with crack and serial key<br />
64
- Air Hauler 2 Add-On offline activation code for FSX Steam Edition<br />
65
- FSX Steam Edition: Air Hauler 2 Add-On crack codex download<br />
66
- Air Hauler 2 Add-On verified serial key for FSX Steam Edition<br />
67
- FSX Steam Edition: Air Hauler 2 Add-On download repack with crack and serial key<br />
68
- Air Hauler 2 Add-On steam activation code for FSX Steam Edition<br />
69
- FSX Steam Edition: Air Hauler 2 Add-On crack cpy download<br />
70
- Air Hauler 2 Add-On legit serial key for FSX Steam Edition<br />
71
- FSX Steam Edition: Air Hauler 2 Add-On download iso with crack and serial key<br />
72
- Air Hauler 2 Add-On drm-free crack for FSX Steam Edition<br />
73
- FSX Steam Edition: Air Hauler 2 Add-On download highly compressed with crack and serial key<br />
74
- Air Hauler 2 Add-On origin activation code for FSX Steam Edition<br />
75
- FSX Steam Edition: Air Hauler 2 Add-On crack hoodlum download<br />
76
- Air Hauler 2 Add-On genuine serial key for FSX Steam Edition<br />
77
- FSX Steam Edition: Air Hauler 2 Add-On download direct link with crack and serial key<br />
78
- Air Hauler 2 Add-On gog activation code for FSX Steam Edition<br />
79
- FSX Steam Edition: Air Hauler 2 Add-On crack plaza download<br />
80
- Air Hauler 2 Add-On valid serial key for FSX Steam Edition<br />
81
- FSX Steam Edition: Air Hauler 2 Add-On download fitgirl repack with crack and serial key<br />
82
- Air Hauler 2 Add-On epic games activation code for FSX Steam Edition<br />
83
- FSX Steam Edition: Air Hauler 2 Add-On crack razor1911 download</p>
84
- <h3>Steps to download and install Air Hauler 2 Add-On</h3>
85
- <p>To download and install Air Hauler 2 Add-On for FSX Steam Edition, you will need to follow these steps:</p>
86
- <ol>
87
- <li>Go to the official website of Just Flight, the developer of Air Hauler 2 Add-On. You can find it here: <a href="https://www.justflight.com/product/air-hauler-2">https://www.justflight.com/product/air-hauler-2</a>.</li>
88
- <li>Click on the "Buy Now" button and choose your preferred payment method. You can pay with credit card, PayPal, or other options.</li>
89
- <li>After completing the payment process, you will receive an email with a download link and a serial key for Air Hauler 2 Add-On.</li>
90
- <li>Click on the download link and save the file on your PC.</li>
91
- <li>Run the file and follow the instructions on the screen to install Air Hauler 2 Add-On on your PC.</li>
92
- <li>Launch FSX Steam Edition and select "AirHauler" from the main menu.</li>
93
- </ol>
94
- <h3>How to activate Air Hauler 2 Add-On with crack serial key?</h3>
95
- <p>To activate Air Hauler 2 Add-On with crack serial key, you will need to follow these steps:</p>
96
- <ol>
97
- <li>Open the email that contains the serial key for Air Hauler 2 Add-On.</li>
98
- <li>Copy the serial key from the email.</li>
99
- <li>Paste the serial key into the activation window that appears when you launch AirHauler from FSX Steam Edition.</li>
100
- <li>Click on "Activate" and wait for the confirmation message.</li>
101
- </ol>
102
- <p>Congratulations! You have successfully activated Air Hauler 2 Add-On with crack serial key. You can now enjoy all the features and benefits of this amazing add-on for FSX Steam Edition.</p>
103
- <h2>How to use Air Hauler 2 Add-On for FSX Steam Edition?</h2>
104
- <p>To use Air Hauler 2 Add-On for FSX Steam Edition, you will need to learn some basic concepts and functions. Here are some tips and tricks to help you get started:</p>
105
- <h3>Features and benefits of Air Hauler 2 Add-On</h3>
106
- <p>Air Hauler 2 Add-On has many features and benefits that will enhance your flight simulation experience. Here are some of them:</p>
107
- <ul>
108
- <li>You can create your own airline company from scratch or join an existing one as an employee or a partner.</li>
109
- <li>You can customize your company with your own logo, colors, motto, website, etc.</li>
110
- <li>You can hire pilots from a pool of candidates with different skills, ratings, salaries, etc.</li>
111
- <li>You can buy planes from a wide range of models with different specifications, prices, maintenance costs, etc.</li>
112
- <li>You can set up bases around the world where you can park your planes, refuel them, repair them, etc.</li>
113
- <li>You can transport passengers and cargo across different regions and countries using realistic flight models and weather conditions.</li>
114
- <li>You can manage your finances by keeping track of your income, expenses, loans, taxes, etc.</li>
115
- <li>You can compete with other airlines in the market by offering better prices, services, destinations, etc.</li>
116
- <li>You can fly your own missions as a pilot or a co-pilot using realistic cockpit instruments, navigation systems, communication devices, etc. </li><li>You can earn money and reputation for your company by completing missions successfully and satisfying your customers and clients. </li><li>You can enjoy a dynamic and interactive world that changes according to your actions and decisions. </li></ul><h3>Tips and tricks for Hauler 2 Add-On</h3>
117
- <p>To use Air Hauler 2 Add-On effectively and efficiently, you will need to follow some tips and tricks. Here are some of them:</p>
118
- <ul>
119
- <li>Plan your flights carefully and check the weather conditions, fuel consumption, cargo weight, etc. before you take off.</li>
120
- <li>Follow the air traffic control instructions and communicate with them using the radio.</li>
121
- <li>Use the autopilot and the GPS when possible to save time and fuel.</li>
122
- <li>Land your plane smoothly and safely and park it at the designated spot.</li>
123
- <li>Check your plane for any damages or malfunctions and repair them as soon as possible.</li>
124
- <li>Keep your pilots happy and motivated by paying them well, giving them bonuses, and training them.</li>
125
- <li>Keep your customers and clients satisfied by delivering their passengers and cargo on time and in good condition.</li>
126
- <li>Expand your network of destinations by exploring new regions and countries.</li>
127
- <li>Upgrade your planes with better engines, avionics, interiors, etc. to improve their performance and appearance.</li>
128
- <li>Join online multiplayer sessions and cooperate or compete with other players around the world.</li>
129
- </ul>
130
- <h3>Troubleshooting and support for Air Hauler 2 Add-On</h3>
131
- <p>If you encounter any problems or issues with Air Hauler 2 Add-On, you can try some troubleshooting methods or contact the support team. Here are some options:</p>
132
- <ul>
133
- <li>Check the manual and the FAQ section on the official website of Just Flight for more information and guidance.</li>
134
- <li>Visit the forum and the blog on the official website of Just Flight for more tips, tricks, news, updates, etc.</li>
135
- <li>Contact the support team via email or phone for technical assistance or customer service.</li>
136
- </ul>
137
- <h2>Conclusion</h2>
138
- <h3>Summary of the article</h3>
139
- <p>In this article, we have discussed everything you need to know about Air Hauler 2 Add-On for FSX Steam Edition. We have explained what it is, why you need it, how to download it, how to activate it with a crack serial key, and how to use it. We have also provided some features, benefits, tips, tricks, troubleshooting methods, and support options for this amazing add-on. We hope that this article has been helpful and informative for you.</p>
140
- <h3>Call to action</h3>
141
- <p>If you are interested in downloading Air Hauler 2 Add-On for FSX Steam Edition, you can do so by clicking on the link below. You will be redirected to the official website of Just Flight where you can purchase this add-on with a secure payment method. You will also receive a download link and a serial key for Air Hauler 2 Add-On via email. Don't miss this opportunity to enhance your flight simulation experience with Air Hauler 2 Add-On for FSX Steam Edition. Download it now and enjoy!</p>
142
- <p><a href="https://www.justflight.com/product/air-hauler-2">Download Air Hauler 2 Add-On for FSX Steam Edition here</a></p>
143
- <h4>Frequently Asked Questions</h4>
144
- <p>Here are some frequently asked questions about Air Hauler 2 Add-On for FSX Steam Edition:</p>
145
- <ol>
146
- <li><b>What is the difference between Air Hauler 2 Add-On and Air Hauler 1?</b></li>
147
- <p>Air Hauler 2 Add-On is a sequel to Air Hauler 1 that was released in 2010. It has many improvements and new features that make it more realistic, dynamic, and immersive than its predecessor. Some of these features include: a new user interface, a new map system, a new flight model system, a new weather system, a new economy system, a new reputation system, a new multiplayer mode, etc.</p>
148
- <li><b>Can I use Air Hauler 2 Add-On with other add-ons for FSX Steam Edition?</b></li>
149
- <p>Yes, you can use Air Hauler 2 Add-On with other add-ons for FSX Steam Edition as long as they are compatible with each other. You can use different planes, sceneries, airports, weather enhancements, etc. with Air 2 Add-On. However, you should make sure that they do not conflict with each other or cause any errors or crashes.</p>
150
- <li><b>How much does Air Hauler 2 Add-On cost?</b></li>
151
- <p>Air Hauler 2 Add-On costs $39.99 USD on the official website of Just Flight. You can pay with credit card, PayPal, or other options. You will also receive a download link and a serial key for Air Hauler 2 Add-On via email.</p>
152
- <li><b>Is Air Hauler 2 Add-On worth it?</b></li>
153
- <p>Air Hauler 2 Add-On is definitely worth it if you are a fan of flight simulation games and want to add more realism, variety, and fun to your gaming sessions. It is a comprehensive and immersive add-on that will challenge your skills as a pilot and a manager. It will also enhance your FSX Steam Edition game with new features and benefits. You will not regret buying Air Hauler 2 Add-On for FSX Steam Edition.</p>
154
- <li><b>Where can I find more information about Air Hauler 2 Add-On?</b></li>
155
- <p>You can find more information about Air Hauler 2 Add-On on the official website of Just Flight. You can also visit the forum and the blog on the website for more tips, tricks, news, updates, etc. You can also contact the support team via email or phone for technical assistance or customer service.</p>
156
- </ol>
157
- </p> 0a6ba089eb<br />
158
- <br />
159
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Advanced Road Design Crack How to Use ARD Pipes for Automatic Crack Detection and Characterization.md DELETED
@@ -1,13 +0,0 @@
1
- <br />
2
- <p>Apple Watch Series 7 features a redesigned front crystal with a stronger and more robust geometry that is over 50 percent thicker than that of Apple Watch Series 6, making it more crack-resistant without compromising optical clarity. Apple Watch Series 7 is also certified IP6X dust-resistant, making it more durable in environments like the beach or the desert, while still maintaining excellent swimming performance with a water resistance rating of WR50.</p>
3
- <h2>Advanced Road Design Crack</h2><br /><p><b><b>Download Zip</b> &mdash;&mdash;&mdash; <a href="https://imgfil.com/2uxYUe">https://imgfil.com/2uxYUe</a></b></p><br /><br />
4
- <p>Abstract<b>:</b>The technological innovation of continuously reinforced concrete pavement (CRCP) that contains a significantly reduced amount of reinforcement and the same fundamental behavior as CRCP is called advanced reinforced concrete pavement (ARCP). This new concept of a rigid pavement structure is developed to eliminate unnecessary continuous longitudinal steel bars of CRCP by using partial length steel bars at predetermined crack locations. In Belgium, partial surface saw-cuts are used as the most effective crack induction method to eliminate the randomness in early-age crack patterns by inducing cracks at the predetermined locations of CRCP. The reinforcement layout of ARCP is designed based on the distribution of steel stress in continuous longitudinal steel bar in CRCP and the effectiveness of partial surface saw-cuts as a crack induction method. The 3D finite element (FE) model is developed to evaluate the behavior of ARCP with partial surface saw-cuts. The early-age crack characteristics in terms of crack initiation and crack propagation obtained from the FE simulation are validated with the field observations of cracking characteristics of the CRCP sections in Belgium. The finding indicates that there is fundamentally no difference in the steel stress distribution in the partial length steel bar of ARCP and continuous steel bar of CRCP. Moreover, ARCP exhibits the same cracking characteristics as CRCP even with a significantly reduced amount of continuous reinforcement.Keywords: early-age crack induction; partial surface saw-cuts; advanced reinforced concrete pavement; continuously reinforced concrete pavement; finite element simulation</p>
5
- <p>Pavement crack detection plays an important role in the field of road distress evaluation [1]. Traditional crack detection methods depend mainly on manual work and are limited by the following: (i) they are time consuming and laborious; (ii) they rely entirely on human experience and judgment. Therefore, automatic crack detection is essential to detect and identify cracks on the road quickly and accurately [2]. This procedure is a key part of intelligent maintenance systems, to assist and evaluate the pavement distress quality where more continual road status surveys are required. Over the past decade, the development of high-speed mobile cameras and large-capacity hardware storage devices has made it easier to obtain large-scale road images. Through mobile surveying and mapping technology, integrated acquisition equipment is fixed to the rear of the vehicle roof frame to monitor both the road surface and the surrounding environment. The images can be acquired by processing and storing pavement surface images that are realized [3]. Currently, many methods utilize computer vision algorithms to process the collected pavement crack images and then obtain the final maintenance evaluation results [4].</p>
6
- <p>The rest of this paper is organized as follows. Section 2 describes crack detection based on deep learning semantic segmentation. Section 3 demonstrates the effectiveness of the proposed scheme through comparative analyses of experiments. Section 4 discusses the detailed design of the two modules proposed in this paper. Finally, Section 5 concludes the paper.</p>
7
- <p></p>
8
- <p>Our CrackDataset consists of pavement detection images of 14 cities in the Liaoning Province, China. The data cover most of the pavement diseases in the whole road network. These images include collected images of different pavement, different illumination, and different sensors. The real values in the dataset provide two types of labels, cracks, and noncracks. The dataset is divided into three parts. The training set and the validation set are composed of 4736 and 1036 crack images, respectively. The test set contains 2416 images. In addition, two other crack datasets, CFD [15] and AigleRN [10], are used as test sets. The details of the datasets are shown in Table 2.</p>
9
- <p>In this paper, an end-to-end trainable pavement crack detection framework based on DCNN, CrackSeg, is proposed, which can automatically detect road cracks under complex backgrounds. First, a crack training dataset is established, which covers a wide range of data sources and reflects the overall situation of pavement distress in the Liaoning Province, China. Second, through the fusion of high-level features in the backbone network, we propose the multiscale dilated convolution module. By capturing the features of context information at multiple scales, the crack detection network can learn rich semantic information in a complex background. Therefore, based on the dilated convolution theory, we design a novel network structure that can be inserted into the existing semantic segmentation system to improve the accuracy of crack feature detection. Finally, through the upsampling module, the low-level features, and continuous convolution features are fused to realize the crack pixel-level prediction. This feature aggregation, which combines different levels of feature information, can not only fully mine the crack features in the image but also restore and describe the details of the object boundary information. The experimental results of CrackSeg achieve high performance with a precision of 98.00%, recall of 97.85%, -score of 97.92%, and a mIoU of 73.53%, which are higher than those of other networks. Furthermore, the model has strong stability and robustness to solve the noise interference caused by shadows, stains, and exposures in the process of data acquisition. The good performance of the CrackSeg network provides a possibility for large area automatic crack detection.</p>
10
- <p>Based on the traditional image processing method, it is the initial attempt to automatically detect road cracks. Akagic et al. [3] proposed a crack image detection method based on the Otsu threshold and histogram. Although this method is efficient, the crack area can be accurately found only when the crack pixel is darker than the surrounding pixels. Medina et al. [4] used the wavelet transform method to detect cracks, which not only was susceptible to the contrast between crack pixels and surrounding pixels, but also could not detect cracks with poor continuity. To improve the effect of detecting continuous cracks, the minimum path selection method [5] is proposed to detect cracks from a global perspective, which effectively enhances the continuity of fractured cracks. Although the minimum path selection method performs crack detection from a global perspective, its detection performance is still unsatisfactory when dealing with cracks with disordered shapes or low contrast with surrounding pixels. It can be seen that automatic detection of road cracks is still a difficult task for researchers.</p>
11
- <p>In recent years, deep learning has been applied to road crack detection tasks due to its outstanding feature extraction capabilities. Pauly et al. [6] cropped each crack image into a patch, and then the patch was classified as crack or noncrack after neural network training. Although this method was very efficient, it produced false detections. To further improve its detection accuracy, semantic segmentation algorithms based on the encoding-decoding architecture are widely used. Lau et al. [7] introduced U-Net to road crack detection. The network introduced skip connections into the encoding-decoding architecture, which helped to preserve rich image details, thereby improving the detection accuracy. Although U-Net performs well in the field of image segmentation, the crack area of the crack image is much smaller than the background area. Cao et al. [8] replaced the U-Net encoder with ResNet34 to deal with the loss of spatial information caused by continuous pooling. Effectively avoiding gradient disappearance or gradient explosion, Chen et al. [9] embedded a global context module in the U-Net network structure to give the network the ability to capture global context information, which is conducive to the detailed segmentation of pavement crack images. Augustauskas and Lipnickas [10] introduced a kind of attention based on the U-shaped network. The force gate model suppresses background noise and strengthens the ability of the network to capture detailed features of cracks. Fan et al. [11] proposed an end-to-end pixel-level road crack detection network. By building multiple expansion convolution modules to help the network obtain the multiscale context information of the cracks, a hierarchical feature learning module is designed to integrate low-level features and high-level features. The designed multiscale output feature map has better performance in fracture information inference, thereby improving the robustness and universality of the network. Ali et al. [12] implemented a deep fully convolutional neural network based on residual blocks. For the extreme imbalance between target and background pixels in crack images, a local weighting factor was proposed to effectively reduce the trouble caused by pixel imbalance to the network; a crack image dataset with different crack width directions and a location dataset were developed for researchers to use for training, validation, and testing. Fan et al. [13] proposed a road crack automatic detection and measurement network based on probability fusion. Through the designed integrated neural network model, satisfactory crack detection accuracy is obtained; according to the predicted crack map, the width and length of the crack can be measured effectively. Wang et al. [14] proposed a semisupervised semantic segmentation network for crack detection. The model extracts multiscale crack feature information through Efficient-UNet; it greatly reduces the workload of labeling while maintaining high labeling accuracy. Wang et al. [15] used a neural network to detect pavement cracks and applied a principal component analysis to classify the detected pavement cracks. The crack types were divided into transversal, longitudinal, cracked cracks. The accuracy scored higher than 90%. Nevertheless, patch classification is only suitable for rougher classification tasks. Cubero-Fernandez et al. [16] classified the discontinuous cracks in an image as a whole, though they did not consider the spatial distribution relationship between the cracks.</p> aaccfb2cb3<br />
12
- <br />
13
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Aiyyaa Tamil Movie Download BEST Torrent.md DELETED
@@ -1,8 +0,0 @@
1
- <br />
2
- <p> If you want to watch online movies, then you must be aware of Torrent sites. You can visit any video websites using torrents. You may also use the best torrent websites to download unlimited movies for free. These websites always have a search bar which allows you to search for the movie. You can use the search results to download the movie. These websites are usually known as the best torrent sites.</p>
3
- <h2>Aiyyaa Tamil Movie Download Torrent</h2><br /><p><b><b>Download Zip</b> &#10084;&#10084;&#10084; <a href="https://imgfil.com/2uxXhz">https://imgfil.com/2uxXhz</a></b></p><br /><br />
4
- <p>Aiyyaa is a Tamil movie which is directed by Arivazhagan and script written by Thamizhan's brother Arivazhagan. The movie is a horror comedy and it stars Akshay Kumar, Vijay and Bhumika Chawla in the lead roles. The movie is being produced by Veera Productions. The movie is upcoming release in the 2018. The name is taken from the Tamil, "Ayya (உய்யா)". The movie is based on a popular fashion brand name, "Aiyya".</p>
5
- <p>Kannada movie and Kannada movie in tamil. Aiyya is a Kannada movie which is directed by Guruprasad and it stars Ankita and Srinivas in lead roles. The movie has been remade in Tamil as a bilingual movie. It is based on popular fashion brand "Aiyya". The movie is an upcoming release in the 2018. The name is taken from the Tamil, "Ayya (உய்யா)". The movie is based on a popular fashion brand name, "Aiyya".</p>
6
- <p>The movie is directed by Ravi Kale and the director Ravi Kale is the writer as well. The movie has been remade in Hindi as a bilingual movie. It is based on a popular fashion brand name, "Aiyya". The movie is an upcoming release in the 2018. The name is taken from the Tamil, "Ayya (உய்யா)". The movie is based on a popular fashion brand name, "Aiyya".</p> 899543212b<br />
7
- <br />
8
- <br />
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Dragon Ball Z Burst Limit Pc Game Download Free.md DELETED
@@ -1,33 +0,0 @@
1
-
2
- <h1>How to Download and Play Dragon Ball Z Burst Limit on PC</h1>
3
- <p>Dragon Ball Z Burst Limit is a fighting game based on the popular anime and manga series Dragon Ball Z. It was released for PlayStation 3 and Xbox 360 in 2008, but it is not officially available for PC. However, there is a way to play it on your computer using an emulator called RPCS3.</p>
4
- <h2>dragon ball z burst limit pc game download free</h2><br /><p><b><b>Download File</b> &middot; <a href="https://imgfil.com/2uy1ql">https://imgfil.com/2uy1ql</a></b></p><br /><br />
5
- <p>RPCS3 is a free and open-source PlayStation 3 emulator that can run many PS3 games on Windows, Linux and BSD operating systems. It requires a powerful PC and some configuration, but it can provide a great gaming experience for fans of Dragon Ball Z Burst Limit.</p>
6
- <p>In this article, we will show you how to download and play Dragon Ball Z Burst Limit on PC using RPCS3. We will also provide some tips and tricks to optimize the performance and graphics of the game.</p>
7
- <h2>Step 1: Download RPCS3</h2>
8
- <p>The first step is to download RPCS3 from its official website[^1^]. You can choose between the latest stable release or the latest development build. The development build may have more features and fixes, but it may also be more unstable or buggy. We recommend downloading the stable release for better compatibility.</p>
9
- <p>Once you have downloaded the RPCS3 zip file, extract it to a folder of your choice. You will see several files and folders inside, such as rpcs3.exe, dev_hdd0, dev_flash, etc. These are the main components of the emulator.</p>
10
- <h2>Step 2: Download Dragon Ball Z Burst Limit</h2>
11
- <p>The next step is to download Dragon Ball Z Burst Limit for PS3. You can either use your own copy of the game if you have one, or you can download it from a website that offers PS3 game downloads. We do not condone piracy, so we will not provide any links to such websites. You can search for them on your own risk.</p>
12
- <p></p>
13
- <p>Dragon Ball Z Burst Limit is available in different regions and versions, such as BLES00231 (Europe), BLUS30117 (USA), BLJM60089 (Japan), etc. You can choose any version that suits your preference, but make sure it is compatible with RPCS3. You can check the compatibility list on the RPCS3 website[^1^] to see which games work well with the emulator.</p>
14
- <p>Once you have downloaded Dragon Ball Z Burst Limit, you will get a file with an extension such as .iso, .bin, .pkg, etc. This is the game image file that contains all the data of the game. You will need to place this file in a folder where RPCS3 can access it.</p>
15
- <h2>Step 3: Install Dragon Ball Z Burst Limit</h2>
16
- <p>The third step is to install Dragon Ball Z Burst Limit on RPCS3. Depending on the type of file you downloaded, you may need to do this differently.</p>
17
- <p>If you downloaded an .iso or .bin file, you can simply drag and drop it into the main window of RPCS3. The emulator will automatically scan and recognize the game image file and add it to your game list.</p>
18
- <p>If you downloaded a .pkg file, you will need to install it manually on RPCS3. To do this, go to File > Install .pkg and browse for the .pkg file you downloaded. The emulator will install the game package file and add it to your game list.</p>
19
- <h2>Step 4: Configure RPCS3</h2>
20
- <p>The fourth step is to configure RPCS3 for optimal performance and graphics of Dragon Ball Z Burst Limit. There are many settings that you can tweak in RPCS3, but we will only focus on the most important ones for this game.</p>
21
- <p>To access the settings menu of RPCS3, go to Config > Settings. You will see several tabs with different options. Here are some of the recommended settings for Dragon Ball Z Burst Limit:</p>
22
- <ul>
23
- <li>CPU tab:
24
- <ul>
25
- <li>PPU Decoder: LLVM Recompiler</li>
26
- <li>SPU Decoder: LLVM Recompiler</li>
27
- <li>Firmware Settings: Load liblv2.sprx only</li>
28
- <li>Preferred SPU Threads: Auto</li>
29
- <li>SPU Block Size: Safe</li>
30
- <li>TSX Instructions: Enabled (if your CPU supports it)</li>
31
- <li></p> d5da3c52bf<br />
32
- <br />
33
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/ETAP WITH CRACK FULL VERSION FREE TORRENT DOWNLOAD.389 [Extra Quality].md DELETED
@@ -1,6 +0,0 @@
1
- <h2>ETAP WITH CRACK FULL VERSION FREE TORRENT DOWNLOAD.389</h2><br /><p><b><b>Download Zip</b> ---> <a href="https://imgfil.com/2uxXxI">https://imgfil.com/2uxXxI</a></b></p><br /><br />
2
- <br />
3
- 4fefd39f24<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Forza Horizon Season Pass Code Generator.md DELETED
@@ -1,74 +0,0 @@
1
- <br />
2
- <h1>How to Get Forza Horizon Season Pass Code Generator for Free?</h1>
3
-
4
- <p>If you are a fan of racing games, you may have heard of Forza Horizon, a popular open-world racing game for Xbox 360 and Xbox One. Forza Horizon allows you to explore a vast and diverse landscape inspired by Colorado, USA, and participate in various events and challenges. You can also customize and upgrade your cars, as well as collect and trade them with other players.</p>
5
-
6
- <p>However, if you want to enjoy the full experience of Forza Horizon, you may need to get the Season Pass, which gives you access to six monthly car packs, each containing six new cars. The Season Pass also includes the Rally Expansion Pack, which adds new rally cars, events, and achievements to the game. The Season Pass normally costs $49.99, but what if you could get it for free?</p>
7
- <h2>Forza Horizon Season Pass Code Generator</h2><br /><p><b><b>Download</b> --->>> <a href="https://imgfil.com/2uy0qD">https://imgfil.com/2uy0qD</a></b></p><br /><br />
8
-
9
- <p>That's where Forza Horizon Season Pass Code Generator comes in handy. This is a tool that generates free codes that you can use to unlock the Season Pass for Forza Horizon on Xbox 360. With this tool, you can save money and enjoy all the benefits of the Season Pass without paying anything.</p>
10
-
11
- <h2>What is Forza Horizon Season Pass Code Generator?</h2>
12
-
13
- <p>Forza Horizon Season Pass Code Generator is a software program that creates unique and valid codes that you can redeem on Xbox Live to get the Season Pass for Forza Horizon on Xbox 360. The codes are generated randomly and are not linked to any account or device. This means that you can use them on any Xbox 360 console and share them with your friends.</p>
14
-
15
- <p>Forza Horizon Season Pass Code Generator is easy to use and does not require any installation or registration. You just need to download it from a reliable source online and run it on your computer. You can then choose the region of your Xbox Live account (US, UK, EU, AU, or JP) and click on the generate button. The program will then produce a code that you can copy and paste on your Xbox Live account.</p>
16
-
17
- <h2>How to Use Forza Horizon Season Pass Code Generator?</h2>
18
-
19
- <p>To use Forza Horizon Season Pass Code Generator, you need to follow these simple steps:</p>
20
-
21
- <ol>
22
- <li>Download Forza Horizon Season Pass Code Generator from a reputable source online. You can find it on various websites and blogs that offer free downloads of software tools and games. For example, you can download it from Dailymotion.com, where you can also watch a video tutorial on how to use it: https://www.dailymotion.com/video/xvxxmk</li>
23
- <li>Run Forza Horizon Season Pass Code Generator on your computer. You don't need to install it or register it. Just open the file and wait for it to load.</li>
24
- <li>Select the region of your Xbox Live account (US, UK, EU, AU, or JP) from the drop-down menu. This will ensure that the code will work on your account.</li>
25
- <li>Click on the generate button and wait for a few seconds. The program will then create a code that you can use to unlock the Season Pass for Forza Horizon on Xbox 360.</li>
26
- <li>Copy the code and paste it on your Xbox Live account. You can do this by going to xbox.com/redeemcode and entering the code in the box. You can also do this by going to your Xbox 360 console and selecting redeem code from the dashboard.</li>
27
- <li>Enjoy your free Season Pass for Forza Horizon on Xbox 360. You can now download and play all the car packs and the Rally Expansion Pack that are included in the Season Pass.</li>
28
- </ol>
29
-
30
- <h2>Conclusion</h2>
31
-
32
- <p>Forza Horizon Season Pass Code Generator is a useful tool that allows you to get the Season Pass for Forza Horizon on Xbox 360 for free. It is easy to download and use, and it generates valid codes that you can redeem on Xbox Live. With this tool, you can save money and enjoy all the features and content that the Season Pass has to offer.</p>
33
- <p></p>
34
-
35
- <p>We hope this article was helpful and informative for you. If you liked it, please share it with your friends and colleagues who may be interested in using Forza Horizon Season Pass Code Generator.</p>
36
- <h1>How to Get Forza Horizon Season Pass Code Generator for Free?</h1>
37
-
38
- <p>If you are a fan of racing games, you may have heard of Forza Horizon, a popular open-world racing game for Xbox 360 and Xbox One. Forza Horizon allows you to explore a vast and diverse landscape inspired by Colorado, USA, and participate in various events and challenges. You can also customize and upgrade your cars, as well as collect and trade them with other players.</p>
39
-
40
- <p>However, if you want to enjoy the full experience of Forza Horizon, you may need to get the Season Pass, which gives you access to six monthly car packs, each containing six new cars. The Season Pass also includes the Rally Expansion Pack, which adds new rally cars, events, and achievements to the game. The Season Pass normally costs $49.99, but what if you could get it for free?</p>
41
-
42
- <p>That's where Forza Horizon Season Pass Code Generator comes in handy. This is a tool that generates free codes that you can use to unlock the Season Pass for Forza Horizon on Xbox 360. With this tool, you can save money and enjoy all the benefits of the Season Pass without paying anything.</p>
43
-
44
- <h2>What is Forza Horizon Season Pass Code Generator?</h2>
45
-
46
- <p>Forza Horizon Season Pass Code Generator is a software program that creates unique and valid codes that you can redeem on Xbox Live to get the Season Pass for Forza Horizon on Xbox 360. The codes are generated randomly and are not linked to any account or device. This means that you can use them on any Xbox 360 console and share them with your friends.</p>
47
-
48
- <p>Forza Horizon Season Pass Code Generator is easy to use and does not require any installation or registration. You just need to download it from a reliable source online and run it on your computer. You can then choose the region of your Xbox Live account (US, UK, EU, AU, or JP) and click on the generate button. The program will then produce a code that you can copy and paste on your Xbox Live account.</p>
49
-
50
- <h2>How to Use Forza Horizon Season Pass Code Generator?</h2>
51
-
52
- <p>To use Forza Horizon Season Pass Code Generator, you need to follow these simple steps:</p>
53
-
54
- <ol>
55
- <li>Download Forza Horizon Season Pass Code Generator from a reputable source online. You can find it on various websites and blogs that offer free downloads of software tools and games. For example, you can download it from Dailymotion.com, where you can also watch a video tutorial on how to use it: https://www.dailymotion.com/video/xvxxmk</li>
56
- <li>Run Forza Horizon Season Pass Code Generator on your computer. You don't need to install it or register it. Just open the file and wait for it to load.</li>
57
- <li>Select the region of your Xbox Live account (US, UK, EU, AU, or JP) from the drop-down menu. This will ensure that the code will work on your account.</li>
58
- <li>Click on the generate button and wait for a few seconds. The program will then create a code that you can use to unlock the Season Pass for Forza Horizon on Xbox 360.</li>
59
- <li>Copy the code and paste it on your Xbox Live account. You can do this by going to xbox.com/redeemcode and entering the code in the box. You can also do this by going to your Xbox 360 console and selecting redeem code from the dashboard.</li>
60
- <li>Enjoy your free Season Pass for Forza Horizon on Xbox 360. You can now download and play all the car packs and the Rally Expansion Pack that are included in the Season Pass.</li>
61
- </ol>
62
-
63
- <h2>Conclusion</h2>
64
-
65
- <p>Forza Horizon Season Pass Code Generator is a useful tool that allows you to get the Season Pass for Forza Horizon on Xbox 360 for free. It is easy to download and use, and it generates valid codes that you can redeem on Xbox Live. With this tool, you can save money and enjoy all the features and content that the Season Pass has to offer.</p>
66
-
67
- <p>We hope this article was helpful and informative for you. If you liked it, please share it with your friends and colleagues who may be interested in using Forza Horizon Season Pass Code Generator.</p>
68
- <h2>Conclusion</h2>
69
-
70
- <p>Forza Horizon Season Pass Code Generator is a useful tool that allows you to get the Season Pass for Forza Horizon on Xbox 360 for free. It is easy to download and use, and it generates valid codes that you can redeem on Xbox Live. With this tool, you can save money and enjoy all the features and content that the Season Pass has to offer.</p>
71
-
72
- <p>We hope this article was helpful and informative for you. If you liked it, please share it with your friends and colleagues who may be interested in using Forza Horizon Season Pass Code Generator.</p> 3cee63e6c2<br />
73
- <br />
74
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/Akinsoft Cafeplus 11 Extra Quality Crack 21.md DELETED
@@ -1,104 +0,0 @@
1
- ## Akinsoft Cafeplus 11 Crack 21
2
-
3
-
4
-
5
-
6
-
7
- ![Akinsoft Cafeplus 11 Extra Quality Crack 21](https://www.akinsoft.com.tr/images/cafe_prg_res.jpg)
8
-
9
-
10
-
11
-
12
-
13
- **LINK === [https://kneedacexbrew.blogspot.com/?d=2txjiV](https://kneedacexbrew.blogspot.com/?d=2txjiV)**
14
-
15
-
16
-
17
-
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
-
26
-
27
- # Akinsoft Cafeplus 11 Crack 21: How to Download and Install the Latest Version of Akinsoft Cafeplus for Free
28
-
29
-
30
-
31
- Akinsoft Cafeplus is a popular software for managing internet cafes, gaming centers, and cybercafes. It offers features such as billing, inventory, reporting, security, and remote control. Akinsoft Cafeplus 11 is the latest version of the software, which was released in 2014. However, many users are looking for a way to download and install Akinsoft Cafeplus 11 crack 21, which is a modified version of the software that bypasses the license activation and allows unlimited usage for free.
32
-
33
-
34
-
35
- In this article, we will show you how to download and install Akinsoft Cafeplus 11 crack 21 on your PC. However, we do not recommend using cracked software, as it may contain viruses, malware, or spyware that can harm your computer or compromise your data. Moreover, using cracked software is illegal and unethical, as it violates the intellectual property rights of the software developers. Therefore, we advise you to purchase a legitimate license of Akinsoft Cafeplus 11 from the official website or authorized resellers.
36
-
37
-
38
-
39
- ## How to Download Akinsoft Cafeplus 11 Crack 21
40
-
41
-
42
-
43
- If you still want to download Akinsoft Cafeplus 11 crack 21, you will need to find a reliable source that offers the cracked file. However, this can be very risky, as many websites that claim to provide cracked software are actually scams or phishing sites that may infect your computer with malware or steal your personal information. Therefore, you should be very careful when downloading any file from unknown sources.
44
-
45
-
46
-
47
- One possible source that we found is urobtili.mystrikingly.com[^1^], which claims to offer Akinsoft Cafeplus 11 crack 21 as a .rar file. However, we cannot guarantee the safety or validity of this file, so download it at your own risk. To download the file, you will need to follow these steps:
48
-
49
-
50
-
51
- 1. Go to urobtili.mystrikingly.com[^1^] and scroll down to the bottom of the page.
52
-
53
- 2. Click on the "Download" button next to the file name "Akinsoft Cafeplus 11 Crack 21.rar".
54
-
55
- 3. You will be redirected to another page where you will need to complete a captcha verification.
56
-
57
- 4. After completing the captcha verification, click on the "Get Link" button.
58
-
59
- 5. You will be redirected again to another page where you will see a countdown timer.
60
-
61
- 6. After the countdown timer ends, click on the "Download" button again.
62
-
63
- 7. You will be redirected again to another page where you will see a download link.
64
-
65
- 8. Click on the download link and save the file to your desired location on your PC.
66
-
67
-
68
-
69
- ## How to Install Akinsoft Cafeplus 11 Crack 21
70
-
71
-
72
-
73
- After downloading Akinsoft Cafeplus 11 crack 21, you will need to extract the .rar file using a program such as WinRAR or 7-Zip. You will then see two files: "Akinsoft Cafeplus 11 Crack.exe" and "Akinsoft Cafeplus 11 Setup.exe". To install Akinsoft Cafeplus 11 crack 21 on your PC, you will need to follow these steps:
74
-
75
-
76
-
77
- 1. Run "Akinsoft Cafeplus 11 Setup.exe" and follow the installation wizard.
78
-
79
- 2. Choose your preferred language and accept the terms and conditions.
80
-
81
- 3. Select your destination folder and click on "Install".
82
-
83
- 4. Wait for the installation process to finish and click on "Finish".
84
-
85
- 5. Do not run Akinsoft Cafeplus 11 yet.
86
-
87
- 6. Run "Akinsoft Cafeplus 11 Crack.exe" and click on "Patch".
88
-
89
- 7. Browse for the installation folder of Akinsoft Cafeplus 11 and select it.
90
-
91
- 8. Wait for the patching process to finish and click on "OK".
92
-
93
- 9. You can now run Akinsoft Cafeplus 11 without any license activation.
94
-
95
- 1b8d091108
96
-
97
-
98
-
99
-
100
-
101
-
102
-
103
-
104
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/AI Suite 3 The Ultimate Software for ASUS Motherboards on Windows 10.md DELETED
@@ -1,103 +0,0 @@
1
- <br />
2
- <h1>Download AI Suite 3 Windows 10: A Complete Guide</h1>
3
- <p>If you are looking for a way to optimize your ASUS motherboard performance, monitor your system status, and access various ASUS software easily, then you might want to download AI Suite 3 Windows 10. AI Suite 3 is a comprehensive utility that integrates multiple ASUS features and functions into one convenient interface. In this article, we will show you what AI Suite 3 is, why you need it, how to download and install it, how to use it, and how to uninstall it.</p>
4
- <h2>download ai suite 3 windows 10</h2><br /><p><b><b>Download</b> &mdash;&mdash;&mdash;&mdash;&mdash; <a href="https://urlin.us/2uSZz3">https://urlin.us/2uSZz3</a></b></p><br /><br />
5
- <h2>What is AI Suite 3 and why do you need it?</h2>
6
- <p>AI Suite 3 is a software application that comes with ASUS motherboards and allows you to control and customize various aspects of your system. It has two main functions: one is to monitor the system status, such as CPU frequency, voltage, temperature, fan speed, etc.; the other is to integrate ASUS software so that you can easily access them through AI Suite 3, such as Fan Xpert, Digi+ VRM, TurboV EVO, EPU, etc.</p>
7
- <h3>AI Suite 3 features and benefits</h3>
8
- <p>Some of the features and benefits of AI Suite 3 are:</p>
9
- <ul>
10
- <li>It lets you overclock your CPU and memory with TurboV EVO, which automatically adjusts the optimal settings for your system.</li>
11
- <li>It lets you save energy and reduce noise with EPU, which intelligently switches between performance and power saving modes.</li>
12
- <li>It lets you control and optimize your fan speed and cooling system with Fan Xpert, which provides multiple presets and custom profiles.</li>
13
- <li>It lets you fine-tune your power delivery and stability with Digi+ VRM, which provides precise voltage control for your CPU and DRAM.</li>
14
- <li>It lets you update your BIOS and drivers with EZ Update, which automatically checks for the latest versions and notifies you when they are available.</li>
15
- <li>It lets you sync your RGB lighting effects with Aura Sync, which supports a variety of compatible devices and components.</li>
16
- </ul>
17
- <h3>AI Suite 3 compatibility and requirements</h3>
18
- <p>AI Suite 3 is compatible with Windows 10 operating system and ASUS motherboards that support it. However, not all ASUS motherboards have the same features and functions in AI Suite 3. The software list may differ from motherboard to motherboard. To check if your motherboard supports AI Suite 3 and what features are available, please refer to the official ASUS website or the user manual of your motherboard.</p>
19
- <h2>How to download and install AI Suite 3 Windows 10</h2>
20
- <p>If you want to download and install AI Suite 3 Windows 10, you can follow these steps:</p>
21
- <p>How to install AI Suite 3 on Windows 10<br />
22
- AI Suite 3 latest version for Windows 10<br />
23
- ASUS AI Suite 3 download for Windows 10<br />
24
- AI Suite 3 Windows 10 compatibility issues<br />
25
- How to uninstall AI Suite 3 from Windows 10<br />
26
- AI Suite 3 Windows 10 not working<br />
27
- AI Suite 3 Windows 10 update error<br />
28
- How to use AI Suite 3 on Windows 10<br />
29
- AI Suite 3 Windows 10 fan control<br />
30
- AI Suite 3 Windows 10 overclocking guide<br />
31
- AI Suite 3 Windows 10 performance boost<br />
32
- AI Suite 3 Windows 10 temperature monitor<br />
33
- AI Suite 3 Windows 10 voltage settings<br />
34
- AI Suite 3 Windows 10 CPU frequency<br />
35
- AI Suite 3 Windows 10 software list<br />
36
- AI Suite 3 Windows 10 system information<br />
37
- AI Suite 3 Windows 10 version check<br />
38
- AI Suite 3 Windows 10 FAQ<br />
39
- AI Suite 3 Windows 10 tutorial video<br />
40
- AI Suite 3 Windows 10 download link<br />
41
- AI Suite 3 Windows 10 driver and utility<br />
42
- AI Suite 3 Windows 10 motherboard support<br />
43
- AI Suite 3 Windows 10 prime x299 edition<br />
44
- AI Suite 3 Windows 10 AsusSetup.exe<br />
45
- AI Suite 3 Windows 10 installation steps<br />
46
- AI Suite 3 Windows 10 uninstallation steps<br />
47
- AI Suite 3 Windows 10 apps and features<br />
48
- AI Suite 3 Windows 10 blue triangle icon<br />
49
- AI Suite 3 Windows 10 CPU usage<br />
50
- AI Suite 3 Windows 10 +12V, +5V, +3.3V range<br />
51
- AI Suite 3 Windows 10 PCH, VRM, PSU temperature<br />
52
- AI Suite 3 Windows 10 fan speed control<br />
53
- AI Suite 3 Windows 10 software open button<br />
54
- AI Suite 3 Windows 10 MyASUS app integration<br />
55
- AI Suite 3 Windows 10 ASUS WebStorage backup<br />
56
- AI Suite 3 Windows 10 official support page<br />
57
- AI Suite 3 Windows 10 download center link<br />
58
- AI Suite 3 Windows 10 model name input<br />
59
- AI Suite 3 Windows 10 operating system selection<br />
60
- AI Suite 3 Windows 10 utility download button<br />
61
- AI Suite 3 Windows 10 extract files and open AsusSetup<br />
62
- AI Suite</p>
63
- <h3>Step 1: Go to ASUS download center</h3>
64
- <p>Open your web browser and go to <a href="(^2^)">ASUS download center</a>. This is where you can find the latest drivers, software, firmware, and user manuals for your ASUS products.</p>
65
- <h3>Step 2: Enter your model name or select a product</h3>
66
- <p>In the search box, enter the model name of your motherboard or select a product category from the drop-down menu. For example, you can type "ROG STRIX Z590-E GAMING" or select "Motherboard" and then "Intel Platform".</p>
67
- <h3>Step 3: Click Driver & Utility and choose your operating system</h3>
68
- <p>After you enter your model name or select a product, you will see a list of downloads for your product. Click the Driver & Utility tab and choose Windows 10 as your operating system from the drop-down menu.</p>
69
- <h3>Step 4: Download the latest AI Suite 3 from Utility</h3>
70
- <p>Scroll down to the Utility section and find the latest version of AI Suite 3. Click the Download button and save the file to your computer. The file name should be something like "AI_Suite_3_V3.00.66.zip".</p>
71
- <h3>Step 5: Extract the files and open AsusSetup</h3>
72
- <p>Once the download is complete, locate the file and extract it to a folder. You can use any file extraction software, such as WinRAR or 7-Zip. After extracting the files, open the folder and double-click AsusSetup.exe to start the installation.</p>
73
- <h3>Step 6: Click the program you want to install and click Install</h3>
74
- <p>A window will pop up with a list of programs that are included in AI Suite 3. You can choose which ones you want to install by checking or unchecking the boxes. For example, if you only want to install Fan Xpert and EZ Update, you can uncheck the other boxes. Then, click Install to proceed.</p>
75
- <h3>Step 7: Click OK when AI Suite 3 has finished the installation</h3>
76
- <p>The installation process may take a few minutes, depending on your system and the programs you selected. When it is done, you will see a message saying "AI Suite 3 has been installed successfully". Click OK to close the window and restart your computer.</p>
77
- <h2>How to use AI Suite 3 Windows 10</h2>
78
- <p>After installing AI Suite 3 Windows 10, you can launch it from the Start menu or the desktop shortcut. You will see a main interface with several icons that represent different functions and features of AI Suite 3. Here are some of the things you can do with AI Suite 3:</p>
79
- <h3>Monitor the system status</h3>
80
- <p>If you click the System Information icon, you will see a dashboard that shows various information about your system, such as CPU frequency, voltage, temperature, fan speed, etc. You can also customize what information you want to see by clicking the Settings icon and selecting or deselecting the items.</p>
81
- <h3>Access ASUS software through AI Suite 3</h3>
82
- <p>If you click the Dual Intelligent Processors icon, you will see a list of ASUS software that are integrated with AI Suite 3, such as TurboV EVO, EPU, Fan Xpert, Digi+ VRM, etc. You can click any of them to open their respective interfaces and adjust their settings according to your preferences.</p>
83
- <h2>How to uninstall AI Suite 3 Windows 10</h2>
84
- <p>If you want to uninstall AI Suite 3 Windows 10, you can follow these steps:</p>
85
- <ol>
86
- <li>Go to Control Panel and click Programs and Features.</li>
87
- <li>Find AI Suite 3 in the list of programs and click Uninstall.</li>
88
- <li>Follow the instructions on the screen to complete the uninstallation process.</li>
89
- <li>Restart your computer.</li>
90
- </ol>
91
- <h2>Conclusion</h2>
92
- <p>In this article, we have shown you how to download AI Suite 3 Windows 10, how to install it, how to use it, and how to uninstall it. AI Suite 3 is a useful utility that allows you to optimize your ASUS motherboard performance, monitor your system status, and access various ASUS software easily. We hope this guide has been helpful for you and that you enjoy using AI Suite 3 Windows 10.</p>
93
- <h2>FAQs</h2>
94
- <ul>
95
- <li><strong>What is AI Suite 3?</strong><br>AI Suite 3 is a software application that comes with ASUS motherboards and allows you to control and customize various aspects of your system.</li>
96
- <li><strong>What are the benefits of AI Suite 3?</strong><br>AI Suite 3 lets you overclock your CPU and memory, save energy and reduce noise, control and optimize your fan speed and cooling system, fine-tune your power delivery and stability, update your BIOS and drivers, sync your RGB lighting effects, and more.</li>
97
- <li><strong>How do I download AI Suite 3 Windows 10?</strong><br>You can download AI Suite 3 Windows 10 from ASUS download center by entering your model name or selecting a product category. Then, click Driver & Utility and choose Windows 10 as your operating system. Then, download the latest AI Suite 3 from Utility and extract the files.</li>
98
- <li><strong>How do I install AI Suite 3 Windows 10?</strong><br>You can install AI Suite 3 Windows 10 by opening the extracted folder and double-clicking AsusSetup.exe. Then, click the program you want to install and click Install. Then, click OK when AI Suite 3 has finished the installation and restart your computer.</li>
99
- <li><strong>How do I use AI Suite 3 Windows 10?</strong><br>You can use AI Suite 3 Windows 10 by launching it from the Start menu or the desktop shortcut. Then, you can monitor the system status by clicking the System Information icon, or access ASUS software by clicking the Dual Intelligent Processors icon.</li>
100
- <li><strong>How do I uninstall AI Suite 3 Windows 10?</strong><br>You can uninstall AI Suite 3 Windows 10 by going to Control Panel and clicking Programs and Features. Then, find AI Suite 3 in the list of programs and click Uninstall. Then, follow the instructions on the screen to complete the uninstallation process and restart your computer.</li>
101
- </ul></p> 197e85843d<br />
102
- <br />
103
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us APK How to Play the Game that Everyone is Talking About.md DELETED
@@ -1,151 +0,0 @@
1
-
2
- <h1>How to Download and Play Among Us on Android with APKSum</h1>
3
- <p>Among Us is one of the most popular and addictive games of the year. It is a multiplayer game of teamwork and betrayal, where you have to work together with your crewmates to prepare your spaceship for departure, while avoiding being killed by one or more impostors who are secretly among you. If you are looking for a way to download and play this game on your Android device, then you have come to the right place. In this article, we will show you how to use APKSum, a website that offers free APK downloads for Android games and apps, to get Among Us on your phone or tablet. We will also give you some tips and tricks for playing the game and having fun with your friends or online.</p>
4
- <h2>What is Among Us?</h2>
5
- <h3>A brief introduction to the game and its features</h3>
6
- <p>Among Us is a game developed by Innersloth, an indie studio based in Washington, USA. It was released in 2018, but it gained a huge surge of popularity in 2020, thanks to many streamers and YouTubers who played it online. The game has been downloaded over 500 million times on Google Play Store, and it has won several awards, such as the Best Multiplayer Game and the Best Mobile Game at The Game Awards 2020.</p>
7
- <h2>among us apksum</h2><br /><p><b><b>Download File</b> &#127775; <a href="https://urlin.us/2uSUmz">https://urlin.us/2uSUmz</a></b></p><br /><br />
8
- <p>The game is set in a spaceship, where you can play with 4 to 15 players online or over local WiFi. You can choose to be either a crewmate or an impostor, depending on the game mode. As a crewmate, your goal is to complete all the tasks assigned to you, such as fixing wires, scanning cards, or fueling engines. You also have to find and vote out the impostor(s) who are trying to sabotage the ship and kill everyone. As an impostor, your goal is to kill enough crewmates without being caught, or prevent them from completing their tasks. You can also use vents to move around quickly, or sabotage systems to cause chaos and confusion.</p>
9
- <h3>Why is it so popular and fun to play?</h3>
10
- <p>Among Us is a game that combines strategy, deception, social interaction, and humor. It is fun to play because it offers a lot of variety and replay value. You can customize your character with different colors, hats, skins, and pets. You can also change the settings and rules of the game, such as the number of impostors, tasks, voting time, kill cooldown, vision range, etc. You can also choose from four different maps to play in: The Skeld, MIRA HQ, Polus, and the Airship. Each map has its own layout, features, tasks <p>Among Us is also fun to play because it requires a lot of communication and collaboration with other players. You can use the chat or voice feature to talk to your teammates, accuse or defend yourself, or share information. You can also play with your friends or join a public lobby to meet new people. The game is full of hilarious moments, such as when someone makes a silly mistake, lies convincingly, or gets caught red-handed. The game is also constantly updated with new content, such as new maps, modes, roles, cosmetics, and more.</p>
11
- <h2>What is APKSum?</h2>
12
- <h3>A website that offers free APK downloads for Android games and apps</h3>
13
- <p>APKSum is a website that provides free and safe APK downloads for Android games and apps. APK stands for Android Package Kit, which is a file format that contains the code, resources, and metadata of an Android application. APK files can be used to install apps or games on Android devices without using the Google Play Store. This can be useful for various reasons, such as:</p>
14
- <ul>
15
- <li>Accessing apps or games that are not available in your region or country</li>
16
- <li>Getting the latest updates or versions of apps or games before they are released on the Play Store</li>
17
- <li>Downloading apps or games that have been removed from the Play Store due to policy violations or other issues</li>
18
- <li>Installing apps or games that are modified or hacked to unlock premium features or remove ads</li>
19
- <li>Backing up your apps or games in case you lose them or switch devices</li>
20
- </ul>
21
- <h3>The benefits of using APKSum to download Among Us</h3>
22
- <p>There are many websites that offer APK downloads for Android games and apps, but not all of them are reliable or trustworthy. Some of them may contain malware, viruses, spyware, or other harmful software that can damage your device or steal your personal information. Some of them may also provide fake or outdated APK files that do not work properly or cause errors. That is why you should be careful and choose a reputable website to download APK files from.</p>
23
- <p>One of the best websites that you can use to download APK files for Android games and apps is APKSum. APKSum is a website that has been around since 2016, and it has millions of users from all over the world. It has a large and diverse collection of APK files for various categories, such as action, adventure, arcade, puzzle, racing, simulation, sports, strategy, etc. It also has a user-friendly interface and a fast and secure download system. Here are some of the benefits of using APKSum to download Among Us:</p>
24
- <p>among us apksum download free<br />
25
- among us apksum latest version<br />
26
- among us apksum mod menu<br />
27
- among us apksum online play<br />
28
- among us apksum update 2023<br />
29
- among us apksum for android<br />
30
- among us apksum for pc<br />
31
- among us apksum for ios<br />
32
- among us apksum for mac<br />
33
- among us apksum for windows<br />
34
- among us apksum for linux<br />
35
- among us apksum for chromebook<br />
36
- among us apksum for firestick<br />
37
- among us apksum for smart tv<br />
38
- among us apksum for xbox one<br />
39
- among us apksum for ps4<br />
40
- among us apksum for nintendo switch<br />
41
- among us apksum hack version<br />
42
- among us apksum unlock all skins<br />
43
- among us apksum unlock all pets<br />
44
- among us apksum unlock all hats<br />
45
- among us apksum always impostor<br />
46
- among us apksum no kill cooldown<br />
47
- among us apksum unlimited emergency meetings<br />
48
- among us apksum voice chat enabled<br />
49
- among us apksum custom maps<br />
50
- among us apksum airship map<br />
51
- among us apksum polus map<br />
52
- among us apksum mira hq map<br />
53
- among us apksum the skeld map<br />
54
- among us apksum new roles<br />
55
- among us apksum sheriff role<br />
56
- among us apksum jester role<br />
57
- among us apksum doctor role<br />
58
- among us apksum engineer role<br />
59
- among us apksum detective role<br />
60
- among us apksum spy role<br />
61
- among us apksum zombie mode<br />
62
- among us apksum hide and seek mode<br />
63
- among us apksum infection mode<br />
64
- among us apksum prop hunt mode<br />
65
- among us apksum murder mystery mode<br />
66
- among us apksum trivia mode<br />
67
- among us apksum karaoke mode<br />
68
- among us apksum drawing mode<br />
69
- among us apksum bingo mode<br />
70
- among us apksum chess mode<br />
71
- among us apksum tic tac toe mode<br />
72
- among us apksum sudoku mode</p>
73
- <ul>
74
- <li>You can get the latest version of Among Us as soon as it is released by the developers</li>
75
- <li>You can download Among Us without any registration or subscription fees</li>
76
- <li>You can download Among Us without any annoying ads or pop-ups</li>
77
- <li>You can download Among Us without any risk of malware, viruses, spyware, or other harmful software</li>
78
- <li>You can download Among Us without any compatibility issues with your device or operating system</li>
79
- <li>You can download Among Us without any network restrictions or limitations</li>
80
- </ul>
81
- <h2>How to Download and Install Among Us from APKSum</h2>
82
- <h3>Step 1: Visit the APKSum website and search for Among Us</h3>
83
- <p>The first step to download and install Among Us from APKSum is to visit the website and search for the game. You can use any web browser on your device to access the website. The website address is <a href="">https://www.apksum.com/</a>. Once you are on the homepage of the website, you will see a search bar at the top. Type "Among Us" in the search bar and press enter. You will see a list of results related to the game.</p>
84
- <h3>Step 2: Choose the latest version of the game and download the APK file</h3>
85
- <p>The next step is to choose the latest version of the game and download the APK file. You will see different versions of the game with different release dates and sizes. You should always choose the latest version of the game because it will have the most features, improvements, bug fixes, and security patches. To choose the latest version of the game, look for the one with the highest number and the most recent date. For example, as of June 2021, the latest version of Among Us is 2021.6.15, which was released on June 15th. To download the APK file, click on the green "Download" button next to the version you want. You will see a pop-up window asking you to confirm your download. Click on "OK" to start downloading.</p>
86
- <h3>Step 3 <h3>Step 3: Enable unknown sources on your device and install the APK file</h3>
87
- <p>The third step is to enable unknown sources on your device and install the APK file. Unknown sources are sources that are not verified by Google or the Play Store. By default, Android devices do not allow installing apps or games from unknown sources for security reasons. However, since you are downloading Among Us from a trusted website like APKSum, you can safely enable unknown sources and install the game. To enable unknown sources, follow these steps:</p>
88
- <ol>
89
- <li>Go to your device's settings and look for the security or privacy option</li>
90
- <li>Find the option that says "Unknown sources" or "Install unknown apps" and toggle it on</li>
91
- <li>You may see a warning message that says installing from unknown sources may harm your device. Ignore it and tap on "OK" or "Allow"</li>
92
- </ol>
93
- <p>Once you have enabled unknown sources, you can install the APK file. To install the APK file, follow these steps:</p>
94
- <ol>
95
- <li>Go to your device's file manager and look for the folder where you downloaded the APK file</li>
96
- <li>Tap on the APK file and you will see a pop-up window asking you to confirm the installation</li>
97
- <li>Tap on "Install" and wait for the installation to finish</li>
98
- <li>You may see a message that says "App installed" or "Installation successful". Tap on "Open" or "Done"</li>
99
- </ol>
100
- <h3>Step 4: Launch the game and enjoy playing with your friends or online</h3>
101
- <p>The final step is to launch the game and enjoy playing with your friends or online. You will see an icon of Among Us on your device's home screen or app drawer. Tap on it and you will see the game's main menu. You can choose to play online, local, or freeplay. Online mode allows you to join or host a game with other players from around the world. Local mode allows you to play with your friends over WiFi. Freeplay mode allows you to practice and explore the maps by yourself. You can also access the settings, shop, account, and how to play options from the main menu.</p>
102
- <h2>Tips and Tricks for Playing Among Us on Android</h2>
103
- <h3>How to customize your character, settings, and game modes</h3>
104
- <p>One of the fun aspects of Among Us is that you can customize your character, settings, and game modes to suit your preferences and style. You can customize your character by tapping on the laptop icon in the lobby or in freeplay mode. You can change your name, color, hat, skin, and pet. You can also customize the settings by tapping on the gear icon in the lobby or in freeplay mode. You can change the language, sound, music, chat, graphics, and controls. You can also customize the game modes by tapping on the customize icon in the lobby or in freeplay mode. You can change the map, number of impostors, tasks, voting time, kill cooldown, vision range, etc.</p>
105
- <h3>How to communicate with other players using chat or voice</h3>
106
- <p>Another important aspect of Among Us is that you have to communicate with other players using chat or voice. Communication is essential for teamwork, deception, investigation, and voting. You can communicate with other players by tapping on the chat icon in the game or in meetings. You can type messages using text or use quick chat options such as "Where?", "Who?", "Why?", etc. You can also communicate with other players by using voice chat apps such as Discord, Skype, Zoom, etc. Voice chat apps allow you to talk to other players using audio instead of text. Voice chat apps can make the game more immersive and fun, but they also require more trust and coordination among players.</p>
107
- <h3>How to play as a crewmate or an impostor and win the game</h3>
108
- <p>The final aspect of Among Us is that you have to play as a crewmate or an impostor and win the game. Playing as a crewmate or an impostor requires different skills and strategies. Here are some tips and tricks for playing as a crewmate or an impostor:</p>
109
- <table>
110
- <tr><th>Crewmate</th><th>Impostor</th></tr>
111
- <tr><td>- Complete all your tasks as fast as possible</td><td>- Pretend to do tasks but don't fill up the task bar</td></tr>
112
- <tr><td>- Stay with other crewmates or in groups for safety</td><td>- Isolate yourself from other crewmates or split them up</td></tr>
113
- <tr><td>- Report dead bodies or call emergency meetings when you find evidence</td><td>- Kill crewmates when no one is around or vent away from the crime scene</td></tr>
114
- <tr><td>- Be observant and remember who was where and doing what</td><td>- Be deceptive and lie about your whereabouts and actions</td></tr>
115
- <tr><td>- Ask questions and share information during meetings</td><td>- Avoid suspicion and blame others during meetings</td></tr>
116
- <tr><td>- Vote wisely and eliminate the impostor(s)</td><td>- Manipulate the voting and save yourself or your partner(s)</td></tr>
117
- </table>
118
- <h2>Conclusion</h2>
119
- <p>Among Us is a game that you can download and play on your Android device with APKSum. APKSum is a website that offers free and safe APK downloads for Android games and apps. You can use APKSum to get the latest version of Among Us without any hassle or risk. You can also customize your character, settings, and game modes, communicate with other players using chat or voice, and play as a crewmate or an impostor and win the game. Among Us is a game that will keep you entertained and engaged for hours. So what are you waiting for? Download Among Us from APKSum today and join the fun!</p>
120
- <h2>FAQs</h2>
121
- <h3>Q1: Is Among Us free to play on Android?</h3>
122
- <p>A1: Yes, Among Us is free to play on Android. You can download it from the Google Play Store or from APKSum without any charge. However, the game does have some in-app purchases that you can buy to support the developers and get some extra features, such as skins, hats, pets, etc.</p>
123
- <h3>Q2: Is APKSum safe and legal to use?</h3>
124
- <p>A2: Yes, APKSum is safe and legal to use. APKSum is a website that only provides original and unmodified APK files from the official sources. It does not host any pirated, cracked, or hacked APK files that may contain malware, viruses, spyware, or other harmful software. It also does not violate any laws or regulations regarding intellectual property rights or distribution of apps or games.</p>
125
- <h3>Q3: How can I update Among Us on Android?</h3>
126
- <p>A3: You can update Among Us on Android by following these steps:</p>
127
- <ol>
128
- <li>Go to the APKSum website and search for Among Us</li>
129
- <li>Choose the latest version of the game and download the APK file</li>
130
- <li>Install the APK file over the existing version of the game</li>
131
- <li>Launch the game and enjoy the new features and improvements</li>
132
- </ol>
133
- <h3>Q4: How can I play Among Us with friends on different devices?</h3>
134
- <p>A4: You can play Among Us with friends on different devices by following these steps:</p>
135
- <ol>
136
- <li>Make sure that all your friends have downloaded and installed Among Us on their devices</li>
137
- <li>Create a private lobby in online mode by tapping on "Host" and choosing a map, number of impostors, tasks, etc.</li>
138
- <li>Share the code of your lobby with your friends by sending it to them via chat, voice, or social media</li>
139
- <li>Ask your friends to join your lobby by tapping on "Private" and entering the code</li>
140
- <li>Start the game and have fun with your friends</li>
141
- </ol>
142
- <h3>Q5: How can I report bugs or issues with Among Us on Android?</h3>
143
- <p>A5: You can report bugs or issues with Among Us on Android by following these steps:</p>
144
- <ol>
145
- <li>Go to the settings menu in the game and tap on "Report Bug"</li>
146
- <li>You will be redirected to a Google Form where you can fill out your details, device model, game version, bug description, screenshots, etc.</li>
147
- <li>Submit the form and wait for a response from the developers</li>
148
- <li>You can also contact the developers directly via email at [email protected] or via social media at @InnerslothDevs on Twitter or Innersloth on Facebook</li>
149
- </ol></p> 197e85843d<br />
150
- <br />
151
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Blackmagic Software The All-in-One Software Tool for Creative Professionals.md DELETED
@@ -1,133 +0,0 @@
1
-
2
- <h1>Download Black Magic Software: A Guide for Video Editors</h1>
3
- <p>If you are looking for a powerful and versatile software for video editing and production, you might want to consider downloading Black Magic software. Black Magic software is a suite of products developed by Black Magic Design, a leading company in the field of digital cinema and broadcast technology. In this article, we will explain what Black Magic software is, why you should download it, and how to do it. We will also provide some tips for installing and using Black Magic software effectively.</p>
4
- <h2>What is Black Magic Software?</h2>
5
- <p>Black Magic software is a term that refers to the software products created by Black Magic Design, a company that specializes in designing and manufacturing hardware and software for the film, television, and streaming industries. Black Magic Design was founded in 2001 by Grant Petty, a former engineer and post-production specialist who wanted to create affordable and innovative solutions for video professionals. Since then, the company has grown to become one of the most respected and influential players in the market, with offices in Australia, USA, UK, Japan, Singapore, and China.</p>
6
- <h2>download black magic software</h2><br /><p><b><b>Download Zip</b> &#10004;&#10004;&#10004; <a href="https://urlin.us/2uT2UV">https://urlin.us/2uT2UV</a></b></p><br /><br />
7
- <h3>Black Magic Design: The Company Behind the Software</h3>
8
- <p>Black Magic Design is known for its high-quality products that combine advanced technology, user-friendly design, and competitive pricing. Some of the products that the company offers include professional cameras, live production switchers, capture and playback devices, standards converters, broadcast converters, video and audio monitors, routers, streaming and encoding devices, and more. The company also provides support and training services for its customers, as well as a community forum where users can share their experiences and feedback.</p>
9
- <h3>Black Magic Software Products: DaVinci Resolve, ATEM, and More</h3>
10
- <p>One of the most popular products that Black Magic Design offers is DaVinci Resolve, a software application that combines video editing, color correction, visual effects, motion graphics, and audio post-production in one platform. DaVinci Resolve is used by professionals and amateurs alike for creating stunning videos for various purposes, such as films, documentaries, commercials, music videos, web videos, etc. DaVinci Resolve is available in two versions: DaVinci Resolve Studio, which is a paid version that offers more features and capabilities; and DaVinci Resolve Free Version which is a free version that still offers a lot of functionality.</p>
11
- <p>Another product that Black Magic Design offers is ATEM, a software application that allows users to control live production switchers from their computers. ATEM enables users to switch between multiple cameras, add graphics and transitions, mix audio sources, record program output, stream live video online, and more. ATEM works with various models of ATEM switchers that are designed for different levels of production complexity and budget.</p>
12
- <p>How to download black magic software for free<br />
13
- Download black magic software for Mac or Windows<br />
14
- Black magic software tutorial and tips<br />
15
- Black magic software review and comparison<br />
16
- Best black magic software alternatives and competitors<br />
17
- Download black magic software for live production and streaming<br />
18
- Download black magic software for color correction and grading<br />
19
- Download black magic software for visual effects and motion graphics<br />
20
- Download black magic software for audio post production and mixing<br />
21
- Download black magic software for professional editing and cutting<br />
22
- Download black magic software for broadcast and cinema cameras<br />
23
- Download black magic software for standards conversion and scaling<br />
24
- Download black magic software for capture and playback devices<br />
25
- Download black magic software for disk recorders and storage solutions<br />
26
- Download black magic software for network storage and media management<br />
27
- Download black magic software for routing and distribution systems<br />
28
- Download black magic software for multi view monitoring and display<br />
29
- Download black magic software for video and audio monitoring tools<br />
30
- Download black magic software for streaming and encoding hardware<br />
31
- Download black magic software for duplication and transcoding devices<br />
32
- Download black magic software updates and new features<br />
33
- Download black magic software manuals and instruction guides<br />
34
- Download black magic software support and troubleshooting resources<br />
35
- Download black magic software community forum and splice platform<br />
36
- Download black magic software developer kit and API documentation<br />
37
- Where to download black magic software legally and safely<br />
38
- How to install and activate black magic software on your computer<br />
39
- How to uninstall and remove black magic software from your system<br />
40
- How to upgrade and update your black magic software to the latest version<br />
41
- How to backup and restore your black magic software projects and settings<br />
42
- How to optimize and improve your black magic software performance and speed<br />
43
- How to customize and configure your black magic software preferences and options<br />
44
- How to use keyboard shortcuts and hotkeys in your black magic software workflow<br />
45
- How to import and export media files in your black magic software format<br />
46
- How to edit, trim, crop, rotate, zoom, pan, stabilize, retime, reverse, speed up, slow down, freeze frame, split, merge, splice, join, cut, copy, paste, duplicate, delete, ripple, roll, slip, slide, extend, shorten, nudge, snap, align, sync, link, unlink, group, ungroup, lock, unlock, enable, disable, mute, solo, hide, show, rename, color code, mark, flag, rate, tag, label, comment, annotate media clips in your black magic software timeline<br />
47
- How to apply transitions</p>
48
- <p>Other products that Black Magic Design offers include Fusion Studio (a software application for creating visual effects and motion graphics), Ultimatte (a software application for creating realistic compositing effects), Cintel Scanner (a device that scans film into digital files), Media Express (a software application for capturing and playing back video), Disk Speed Test (a software application that measures the performance of hard disks), Desktop Video Setup (a software application that configures capture and playback devices), etc.</p>
49
- <h2>Why Download Black Magic Software?</h2>
50
- <p>If you are interested in video editing or production, you might wonder why you should download Black Magic software instead of other alternatives. Here are some of the reasons why Black Magic software is worth downloading:</p>
51
- <h3>The Benefits of Black Magic Software for Video Editing</h3>
52
- <ul>
53
- <li>Black Magic software offers a comprehensive solution for video editing and production. You can use one software application to perform various tasks, such as editing, color grading, visual effects, motion graphics, and audio post-production. You don't need to switch between different applications or formats, which saves you time and hassle.</li>
54
- <li>Black Magic software offers a professional quality for video editing and production. You can use the same software and tools that are used by Hollywood studios and filmmakers. You can achieve stunning results with high-resolution, high-dynamic-range, and high-frame-rate formats. You can also work with different types of media, such as film, digital, and analog.</li>
55
- <li>Black Magic software offers a user-friendly interface for video editing and production. You can easily navigate and customize the software according to your preferences and needs. You can also access a variety of tutorials, manuals, and support resources that will help you learn and master the software.</li>
56
- </ul>
57
- <h3>The Features of Black Magic Software for Video Production</h3>
58
- <ul>
59
- <li>Black Magic software offers a range of features that enable you to create amazing videos for different purposes and platforms. Some of the features include:</li>
60
- <li>Cutting Page: A feature that allows you to quickly edit your videos using a simple timeline and intuitive tools. You can trim, splice, add transitions, adjust audio levels, and more.</li>
61
- <li>Edit Page: A feature that allows you to fine-tune your videos using a more advanced timeline and tools. You can add multiple tracks, sync audio and video, add titles, effects, and more.</li>
62
- <li>Color Page: A feature that allows you to enhance the look and feel of your videos using powerful color correction and grading tools. You can adjust the exposure, contrast, saturation, hue, temperature, tint, and more. You can also use curves, wheels, qualifiers, trackers, masks, LUTs, and more.</li>
63
- <li>Fusion Page: A feature that allows you to add stunning visual effects and motion graphics to your videos using a node-based workflow. You can create 2D and 3D effects, such as particles, text, shapes, masks, keying, tracking, stabilization, rotoscoping, and more.</li>
64
- <li>Fairlight Page: A feature that allows you to improve the sound quality of your videos using professional audio post-production tools. You can mix multiple audio tracks, add effects, filters, EQs, compressors, limiters, noise reduction, and more.</li>
65
- <li>Deliver Page: A feature that allows you to export your videos in different formats and resolutions for various destinations. You can choose from presets or customize your own settings for web, broadcast, cinema, or disk delivery.</li>
66
- </ul>
67
- <h3>The Compatibility of Black Magic Software with Different Devices and Formats</h3>
68
- <p>Black Magic software is compatible with different devices and formats that you might use for video editing or production. Some of the devices and formats that Black Magic software supports include:</p>
69
- <table>
70
- <tr><th>Devices</th><th>Formats</th></tr>
71
- <tr><td>Black Magic Design Cameras</td><td>Blackmagic RAW</td></tr>
72
- <tr><td>Other Professional Cameras</td><td>ProRes RAW</td></tr>
73
- <tr><td>Consumer Cameras</td><td>H.264/AVC</td></tr>
74
- <tr><td>Smartphones</td><td>H.265/HEVC</td></tr>
75
- <tr><td>Capture Cards</td><td>SDI/HDMI</td></tr>
76
- <tr><td>Storage Devices</td><td>SSD/HDD/USB/SD</td></tr>
77
- <tr><td>Computers</td><td>Windows/Mac/Linux</td></tr>
78
- </table>
79
- <h2>How to Download Black Magic Software?</h2>
80
- <p>If you are convinced that Black Magic software is the right choice for you, you might wonder how to download it. Here are the steps that you need to follow:</p>
81
- <h3>The Requirements for Downloading Black Magic Software</h3>
82
- <p>Before you download Black Magic software, you need to make sure that your device meets the minimum requirements for running the software smoothly. The requirements may vary depending on the product and version that you choose. However, here are some general guidelines:</p>
83
- <ul>
84
- <li>You need a computer with a 64-bit processor (Intel or AMD) and at least 16 GB of RAM (32 GB recommended).</li>
85
- <li>You need a graphics card with at least 2 GB of VRAM (4 GB recommended) that supports OpenGL or Metal.</li>
86
- <li>You need a monitor with at least 1366 x 768 resolution (1920 x 1080 recommended) that supports 10-bit color.</li>
87
- <li>You need an internet connection for downloading the software and activating the license.</li>
88
- <li>You need enough disk space for installing the software and storing your media files.</li>
89
- </ul>
90
- <h3>The Steps for Download ing Black Magic Software</h3>
91
- <p>Once you have checked the requirements, you can proceed to download Black Magic software from the official website of Black Magic Design. Here are the steps that you need to follow:</p>
92
- <ol>
93
- <li>Go to the <a href="">Black Magic Design website</a> and click on the Support tab.</li>
94
- <li>Select the product that you want to download from the list of categories. For example, if you want to download DaVinci Resolve, click on DaVinci Resolve and Fusion Software.</li>
95
- <li>Choose the version that you want to download from the list of available downloads. For example, if you want to download DaVinci Resolve 17, click on DaVinci Resolve 17.</li>
96
- <li>Read the terms and conditions and click on Register and Download. You will need to provide some information, such as your name, email address, country, and phone number.</li>
97
- <li>After you register, you will receive a confirmation email with a link to download the software. Click on the link and save the file to your device.</li>
98
- <li>Once the download is complete, you can open the file and follow the instructions to install the software.</li>
99
- </ol>
100
- <h3>The Tips for Installing and Using Black Magic Software</h3>
101
- <p>After you download and install Black Magic software, you can start using it for your video editing or production projects. Here are some tips that will help you get the most out of Black Magic software:</p>
102
- <ul>
103
- <li>Update your software regularly. Black Magic Design releases new versions and updates for its software products frequently. These updates may include bug fixes, performance improvements, new features, and compatibility enhancements. You can check for updates from the software itself or from the website.</li>
104
- <li>Learn from the tutorials and manuals. Black Magic Design provides a lot of resources for learning how to use its software products effectively. You can access online tutorials, user manuals, training videos, webinars, podcasts, blogs, and more from the website or from the software itself.</li>
105
- <li>Join the community forum. Black Magic Design has a community forum where users can interact with each other and with the company representatives. You can ask questions, share tips, give feedback, report issues, and more on the forum. You can also find useful information and solutions from previous posts.</li>
106
- <li>Explore the features and tools. Black Magic software offers a lot of features and tools that can help you create amazing videos. You can experiment with different settings, effects, transitions, filters, etc. to see how they affect your videos. You can also use presets or templates to speed up your workflow or get inspiration.</li>
107
- <li>Backup your files and projects. Black Magic software allows you to save your files and projects in different formats and locations. You can export your videos to your device, external drive, cloud storage, or online platform. You can also save your projects as archives or backups that you can restore later if needed.</li>
108
- </ul>
109
- <h2>Conclusion</h2>
110
- <p>In conclusion, Black Magic software is a great option for video editors and producers who want a powerful and versatile software for their projects. Black Magic software is developed by Black Magic Design, a reputable company that offers a range of products for the film, television, and streaming industries. Black Magic software includes DaVinci Resolve, ATEM, Fusion Studio, Ultimatte, Cintel Scanner, Media Express, Disk Speed Test, Desktop Video Setup, and more. You can download Black Magic software from the official website of Black Magic Design after checking the requirements and registering your details. You can also use some tips for installing and using Black Magic software effectively.</p>
111
- <h3>Call to Action</h3>
112
- <p>If you are ready to download Black Magic software and start creating amazing videos for your personal or professional purposes, don't wait any longer. Visit the <a href="">Black Magic Design website</a> today and choose the product that suits your needs best. You will be amazed by what you can do with Black Magic software!</p>
113
- <h3>FAQs</h3>
114
- <ul>
115
- <li><b>Q: How much does Black Magic software cost?</b></li>
116
- <li>A: Some of the products that Black Magic Design offers are free to download and use, such as DaVinci Resolve Free Version, Media Express, Disk Speed Test, and Desktop Video Setup. Other products, such as DaVinci Resolve Studio, ATEM, Fusion Studio, Ultimatte, and Cintel Scanner, require a one-time payment or a subscription fee to access the full features and capabilities. You can check the prices and plans for each product on the website.</li>
117
- <li><b>Q: Is Black Magic software compatible with other software?</b></li>
118
- <li>A: Yes, Black Magic software is compatible with other software that you might use for video editing or production. For example, you can import and export files from Adobe Premiere Pro, Final Cut Pro X, Avid Media Composer, and other popular applications. You can also use plugins and extensions from third-party developers to enhance the functionality of Black Magic software.</li>
119
- <li><b>Q: What are the advantages of using Black Magic software over other software?</b></li>
120
- <li>A: Black Magic software has several advantages over other software that you might use for video editing or production. Some of the advantages are:</li>
121
- <ul>
122
- <li>Black Magic software offers a comprehensive solution that covers all aspects of video editing and production in one platform. You don't need to use multiple applications or formats to complete your projects.</li>
123
- <li>Black Magic software offers a professional quality that matches the standards of the film, television, and streaming industries. You can work with high-resolution, high-dynamic-range, and high-frame-rate formats that deliver stunning results.</li>
124
- <li>Black Magic software offers a user-friendly interface that is easy to learn and customize. You can access a variety of tutorials, manuals, and support resources that will help you master the software.</li>
125
- <li>Black Magic software offers a competitive pricing that is affordable for both professionals and amateurs. You can use some of the products for free or pay a reasonable fee for the others.</li>
126
- </ul>
127
- <li><b>Q: How can I get help or support for using Black Magic software?</b></li>
128
- <li>A: If you need help or support for using Black Magic software, you can contact the customer service team of Black Magic Design via phone or email. You can also visit the support page on the website to access online resources, such as downloads, updates, manuals, videos, webinars, podcasts, blogs, etc. Additionally, you can join the community forum where you can interact with other users and company representatives.</li>
129
- <li><b>Q: Where can I find examples or inspiration for using Black Magic software?</b></li>
130
- <li>A: If you want to see some examples or get some inspiration for using Black Magic software, you can visit the gallery page on the website where you can watch videos created by other users with Black Magic software. You can also follow the social media accounts of Black Magic Design where you can see the latest news, updates, and showcases of Black Magic software.</li>
131
- </ul></p> 197e85843d<br />
132
- <br />
133
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Car Parking Multiplayer on PC A Complete Guide to Download and Install on Windows 10.md DELETED
@@ -1,105 +0,0 @@
1
- <br />
2
- <h1>How to Download Car Parking Multiplayer for Windows 10</h1>
3
- <p>Car Parking Multiplayer is a popular open-world multiplayer game that lets you drive, park, and customize over 130 cars in various environments. You can also compete against other players in racing, exchange cars, chat with friends, role-play, and explore the free world with real gas stations and car services. If you are a fan of car games and want to play Car Parking Multiplayer on your PC, this article will show you how to download it for Windows 10 using two methods.</p>
4
- <h2>download car parking multiplayer for windows 10</h2><br /><p><b><b>Download</b> &#9734;&#9734;&#9734;&#9734;&#9734; <a href="https://jinyurl.com/2uNOA9">https://jinyurl.com/2uNOA9</a></b></p><br /><br />
5
- <h2>What is Car Parking Multiplayer?</h2>
6
- <p>Car Parking Multiplayer is a simulation game developed by olzhass and published by Aidana Kengbeiil. It has over 100 million downloads on Google Play Store and a rating of 4.4 stars out of 5. The game offers more than just parking: you can experience the realistic physics, graphics, and sounds of various cars, from sports cars to trucks, as well as tune them to your liking. You can also interact with other players in the multiplayer mode, where you can race, chat, trade cars, join police mode, or become a taxi, cargo, or delivery driver. The game also features a drone mode, where you can explore the world and take stunning screenshots, and a daily tasks and rewards system, where you can collect coins and presents by completing challenges and joining the game.</p>
7
- <h3>Features of Car Parking Multiplayer</h3>
8
- <ul>
9
- <li>Multiplayer open world mode with free walking, free open world with real gas stations and car services, voice chat, friend list, police mode, role play, and thousands of real players every day.</li>
10
- <li>Car customization with adjustable suspension, wheel angle, engine tuning, visual auto tunings, dynamic vinyls, car body parts, and car plate.</li>
11
- <li>High-quality open world with highly-detailed environments, 130+ cars with the real interior, buildings with interior, and realistic weather effects.</li>
12
- <li>Interesting gameplay with 82 real-life parking and driving challenges, different vehicles such as tow truck, pickup, trucks, sport and classic cars, character customization with variety of clothes and skins, animations and reactions.</li>
13
- </ul>
14
- <h3>Why play Car Parking Multiplayer on PC?</h3>
15
- <p>While Car Parking Multiplayer is designed for mobile devices, playing it on PC can give you several advantages. For instance:</p>
16
- <ul>
17
- <li>You can enjoy a bigger screen and better graphics quality.</li>
18
- <li>You can use your mouse and keyboard to gain more control and agility.</li>
19
- <li>You can sync your progress and game library across devices with a single sign-in to your Google account.</li>
20
- <li>You can avoid battery drain and overheating issues on your phone.</li>
21
- </ul>
22
- <h2>How to download Car Parking Multiplayer for Windows 10</h2>
23
- <p>There are two methods to download Car Parking Multiplayer for Windows 10: using BlueStacks or using Google Play Games. Both methods require you to have a Google account to access the game from the Google Play Store. Here are the steps for each method:</p>
24
- <h3>Method 1: Using BlueStacks</h3>
25
- <p>BlueStacks is one of the best Android emulators for Windows 10 that allows you to play Android games on your PC. It has a user-friendly interface, a fast performance, and a large collection of games. To use BlueStacks to download Car Parking Multiplayer for Windows 10, follow these steps:</p>
26
- <p>How to install car parking multiplayer on windows 10/11<br />
27
- Car parking multiplayer system requirements for windows PC<br />
28
- Car parking multiplayer gameplay on windows 10/11<br />
29
- Car parking multiplayer free download for windows 10/11<br />
30
- Car parking multiplayer PC controls and settings<br />
31
- Car parking multiplayer windows 10/11 emulator<br />
32
- Car parking multiplayer best cars and tuning on windows PC<br />
33
- Car parking multiplayer online mode on windows 10/11<br />
34
- Car parking multiplayer tips and tricks for windows PC<br />
35
- Car parking multiplayer mod apk download for windows 10/11<br />
36
- Car parking multiplayer update and new features on windows PC<br />
37
- Car parking multiplayer cheats and hacks for windows 10/11<br />
38
- Car parking multiplayer review and rating on windows PC<br />
39
- Car parking multiplayer vs real car parking 2 on windows 10/11<br />
40
- Car parking multiplayer custom maps and modes on windows PC<br />
41
- How to play car parking multiplayer with friends on windows 10/11<br />
42
- Car parking multiplayer support and feedback for windows PC<br />
43
- Car parking multiplayer bugs and issues on windows 10/11<br />
44
- Car parking multiplayer alternatives and similar games on windows PC<br />
45
- Car parking multiplayer bluestacks vs memu play on windows 10/11<br />
46
- How to uninstall car parking multiplayer on windows 10/11<br />
47
- Car parking multiplayer comparison with real car driving simulator on windows PC<br />
48
- Car parking multiplayer graphics and sound quality on windows 10/11<br />
49
- Car parking multiplayer walkthrough and guide on windows PC<br />
50
- Car parking multiplayer challenges and achievements on windows 10/11<br />
51
- How to record car parking multiplayer gameplay on windows PC<br />
52
- Car parking multiplayer best settings and optimization for windows 10/11<br />
53
- Car parking multiplayer offline mode and data usage on windows PC<br />
54
- Car parking multiplayer minimum and recommended specs for windows 10/11<br />
55
- Car parking multiplayer community and forums on windows PC<br />
56
- How to stream car parking multiplayer on windows 10/11<br />
57
- Car parking multiplayer latest version and patch notes on windows PC<br />
58
- Car parking multiplayer FAQ and troubleshooting on windows 10/11<br />
59
- Car parking multiplayer pros and cons on windows PC<br />
60
- Car parking multiplayer memes and jokes on windows 10/11<br />
61
- How to backup and restore car parking multiplayer data on windows PC<br />
62
- Car parking multiplayer skins and accessories on windows 10/11<br />
63
- Car parking multiplayer news and events on windows PC<br />
64
- Car parking multiplayer wiki and database on windows 10/11<br />
65
- How to improve car parking multiplayer performance on windows PC<br />
66
- Car parking multiplayer fun facts and trivia on windows 10/11<br />
67
- How to create car parking multiplayer account on windows PC<br />
68
- Car parking multiplayer codes and coupons on windows 10/11<br />
69
- Car parking multiplayer videos and screenshots on windows PC<br />
70
- How to change car parking multiplayer language on windows 10/11<br />
71
- Car parking multiplayer keyboard shortcuts and commands on windows PC</p>
72
- <h4>Step 1: Download and install BlueStacks</h4>
73
- <p>Go to the official website of BlueStacks and click on the download button. The file size is about 500 MB, so it may take some time depending on your internet speed. Once the download is complete, run the installer and follow the instructions to install BlueStacks on your PC. You may need to grant some permissions and restart your PC to complete the installation.</p>
74
- <h4>Step 2: Launch BlueStacks and sign in with Google account</h4>
75
- <p>After installing BlueStacks, launch it from your desktop or start menu. You will see a welcome screen where you need to sign in with your Google account. If you don't have one, you can create one for free. Signing in with your Google account will allow you to access the Google Play Store and sync your game progress across devices.</p>
76
- <h4>Step 3: Search for Car Parking Multiplayer on Google Play Store</h4>
77
- <p>Once you are signed in, you will see the home screen of BlueStacks with various apps and games. Click on the Google Play Store icon on the bottom right corner. This will open the Google Play Store app on BlueStacks. In the search bar, type "Car Parking Multiplayer" and hit enter. You will see the game icon with the name and rating. Click on it to open the game page.</p>
78
- <h4>Step 4: Install and play Car Parking Multiplayer on PC</h4>
79
- <p>On the game page, you will see an install button. Click on it to start downloading and installing the game on your PC. The process may take a few minutes depending on your internet speed and PC specifications. Once the installation is done, you will see an open button. Click on it to launch the game on your PC. You can also find the game icon on the home screen of BlueStacks or in the app drawer. Now you can enjoy Car Parking Multiplayer on your PC with better graphics and controls.</p>
80
- <h3>Method 2: Using Google Play Games</h3>
81
- <p>Google Play Games is a service that allows you to play Android games on your PC without downloading any emulator or app. It works through a web browser and requires a stable internet connection and a compatible device. To use Google Play Games to download Car Parking Multiplayer for Windows 10, follow these steps:</p>
82
- <h4>Step 1: Download and install Google Play Games for PC</h4>
83
- <p>Go to the official website of Google Play Games and click on the download button. The file size is about 50 MB, so it should not take long to download. Once the download is complete, run the installer and follow the instructions to install Google Play Games on your PC. You may need to grant some permissions and restart your PC to complete the installation.</p>
84
- <h4>Step 2: Sign in with Google account and sync your progress</h4>
85
- <p>After installing Google Play Games, launch it from your desktop or start menu. You will see a welcome screen where you need to sign in with your Google account. If you don't have one, you can create one for free. Signing in with your Google account will allow you to access your game library and sync your game progress across devices.</p>
86
- <h4>Step 3: Browse and download Car Parking Multiplayer on PC</h4>
87
- <p>Once you are signed in, you will see your game library with various games that you have played or installed on your devices. You can also browse for new games by clicking on the browse button on the top left corner. This will open a web browser where you can search for games by genre, rating, popularity, or name. In the search bar, type "Car Parking Multiplayer" and hit enter. You will see the game icon with the name and rating. Click on it to open the game page.</p>
88
- <h4>Step 4: Enjoy Car Parking Multiplayer on PC with improved controls and graphics</h4>
89
- <p>On the game page, you will see a play button. Click on it to start playing the game on your PC. The game will run through your web browser, so make sure you have a stable internet connection and a compatible device. You can also adjust the settings such as graphics quality, sound volume, and controls by clicking on the menu button on the top right corner. Now you can enjoy Car Parking Multiplayer on your PC with improved controls and graphics.</p>
90
- <h2>Conclusion</h2>
91
- <p>Car Parking Multiplayer is a fun and realistic car simulation game that lets you drive, park, customize, and interact with over 130 cars in various environments. You can also play with other players in the multiplayer mode, where you can race, chat, trade cars, join police mode, or become a taxi, cargo, or delivery driver. The game also has a drone mode, where you can explore the world and take stunning screenshots, and a daily tasks and rewards system, where you can collect coins and presents by completing challenges and joining the game. If you want to play Car Parking Multiplayer on your PC, you can use either BlueStacks or Google Play Games. Both methods require you to have a Google account to access the game from the Google Play Store. BlueStacks is an Android emulator that allows you to play Android games on your PC with better graphics and controls. Google Play Games is a service that allows you to play Android games on your PC without downloading any emulator or app. It works through a web browser and requires a stable internet connection and a compatible device. We hope this article has helped you learn how to download Car Parking Multiplayer for Windows 10. If you have any questions or feedback, please let us know in the comments below. Happy parking! <h2>FAQs</h2>
92
- <ul>
93
- <li>Q: Is Car Parking Multiplayer free to play?</li>
94
- <li>A: Yes, Car Parking Multiplayer is free to play on both mobile devices and PC. However, it contains ads and in-app purchases that can enhance your gameplay experience.</li>
95
- <li>Q: Can I play Car Parking Multiplayer offline?</li>
96
- <li>A: Yes, you can play Car Parking Multiplayer offline in the single-player mode, where you can complete parking and driving challenges. However, you will need an internet connection to play in the multiplayer mode, where you can interact with other players.</li>
97
- <li>Q: How can I save my game progress in Car Parking Multiplayer?</li>
98
- <li>A: You can save your game progress in Car Parking Multiplayer by signing in with your Google account. This will allow you to sync your game data across devices and access your game library from the Google Play Store.</li>
99
- <li>Q: How can I contact the developer of Car Parking Multiplayer?</li>
100
- <li>A: You can contact the developer of Car Parking Multiplayer by sending an email to [email protected] or by visiting their website at https://olzhass.com/.</li>
101
- <li>Q: How can I update Car Parking Multiplayer on my PC?</li>
102
- <li>A: You can update Car Parking Multiplayer on your PC by following the same steps as downloading it. If you are using BlueStacks, you can check for updates on the Google Play Store app on BlueStacks. If you are using Google Play Games, you can check for updates on the web browser where you play the game.</li>
103
- </ul></p> 401be4b1e0<br />
104
- <br />
105
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/EM3D The Ultimate App for 3D Scanning and Printing.md DELETED
@@ -1,163 +0,0 @@
1
- <br />
2
- <h1>EM3D APK: How to Create and Share 3D Selfies with Your Smartphone</h1>
3
- <p>Have you ever wanted to create a 3D model of yourself, your friends, your pets, or anything else you can think of? Have you ever wondered how you would look like in 3D, or how you could print or share your 3D selfies with others? If you answered yes to any of these questions, then you might be interested in EM3D APK, a new app that lets you do all that and more with just your smartphone.</p>
4
- <h2>em 3d apk</h2><br /><p><b><b>Download File</b> &#9989; <a href="https://jinyurl.com/2uNUeu">https://jinyurl.com/2uNUeu</a></b></p><br /><br />
5
- <h2>What is EM3D APK?</h2>
6
- <p>EM3D APK is a 3D scanning app developed by Ethan Makes, a company that aims to make 3D scanning and printing easy, fun, and affordable for everyone. The app uses the front-facing TrueDepth 3D camera sensor on your device, which is the same technology that powers Face ID. If your device has Face ID, it has a TrueDepth camera. With EM3D APK, you can create printable 3D scans of yourself, your friends, your pets, or anything else your heart desires. Or, you can just share your 3D selfies and scans with your friends on social media. You can try it for free and only pay for what you want. No expensive hardware, subscriptions, or monthly fees are required.</p>
7
- <h2>Why use EM3D APK?</h2>
8
- <h3>The benefits of 3D scanning and printing</h3>
9
- <p>3D scanning and printing are not just cool technologies that belong in sci-fi movies. They are also practical and useful tools that can enhance your life in many ways. For example, you can use 3D scanning and printing to:</p>
10
- <ul>
11
- <li>Create personalized gifts, souvenirs, or memorabilia for yourself or your loved ones.</li>
12
- <li>Capture moments in 3D that photos or videos can't fully convey.</li>
13
- <li>Create custom accessories, jewelry, or clothing that fit your style and preferences.</li>
14
- <li>Repair or replace broken or missing parts of objects or devices.</li>
15
- <li>Create prototypes or models for your projects or hobbies.</li>
16
- <li>Learn about anatomy, geometry, physics, or art by exploring 3D models.</li>
17
- </ul>
18
- <h3>The fun and creative possibilities of 3D selfies</h3>
19
- <p>Besides being useful, 3D scanning and printing are also fun and creative activities that can unleash your imagination and express yourself. With EM3D APK, you can create 3D selfies that are more than just photos. You can:</p>
20
- <p>em 3d scanner app<br />
21
- em 3d scanning app by ethan makes<br />
22
- em 3d selfie app<br />
23
- em 3d printer app<br />
24
- em 3d camera app<br />
25
- em 3d scanner apk download<br />
26
- em 3d scanning app review<br />
27
- em 3d selfie apk free<br />
28
- em 3d printer apk mod<br />
29
- em 3d camera apk pro<br />
30
- how to use em 3d scanner app<br />
31
- how to export em 3d scanning app<br />
32
- how to share em 3d selfie app<br />
33
- how to print em 3d printer app<br />
34
- how to adjust em 3d camera app<br />
35
- best em 3d scanner app for iphone<br />
36
- best em 3d scanning app for android<br />
37
- best em 3d selfie app for ipad<br />
38
- best em 3d printer app for pc<br />
39
- best em 3d camera app for mac<br />
40
- em 3d scanner app vs kiri engine<br />
41
- em 3d scanning app vs qlone<br />
42
- em 3d selfie app vs bellus3d<br />
43
- em 3d printer app vs cura<br />
44
- em 3d camera app vs photogrammetry<br />
45
- em 3d scanner app tutorial<br />
46
- em 3d scanning app tips and tricks<br />
47
- em 3d selfie app features and benefits<br />
48
- em 3d printer app settings and options<br />
49
- em 3d camera app guide and manual<br />
50
- em 3d scanner app price and plans<br />
51
- em 3d scanning app cost and value<br />
52
- em 3d selfie app free and paid versions<br />
53
- em 3d printer app subscription and fees<br />
54
- em 3d camera app discount and coupon codes<br />
55
- download em 3d scanner app for ios<br />
56
- download em 3d scanning app for android<br />
57
- download em 3d selfie app for windows phone<br />
58
- download em 3d printer app for linux<br />
59
- download em 3d camera app for chromebook<br />
60
- install em 3d scanner app on iphone x<br />
61
- install em 3d scanning app on samsung galaxy s10<br />
62
- install em 3d selfie app on ipad pro<br />
63
- install em 3d printer app on macbook air<br />
64
- install em 3d camera app on dell laptop</p>
65
- <ul>
66
- <li>Experiment with different poses, expressions, outfits, or accessories.</li>
67
- <li>Add filters, stickers, text, or effects to your 3D selfies.</li>
68
- <li>Create a 360° selfie or a custom selfie video that shows off your 3D scan from different angles.</li>
69
- <li>Mix and match different parts of your 3D scans to create new combinations or hybrids.</li>
70
- <li>Transform your 3D scans into different shapes, sizes, colors, or textures.</li>
71
- <li>Make funny or scary faces that will surprise or amuse your friends.</li>
72
- </ul>
73
- <h3>The affordability and accessibility of the app</h3>
74
- <p>One of the best things about EM3D APK is that it makes 3D scanning and printing accessible and affordable for everyone. You don't need to buy any expensive or complicated equipment or software to use the app All you need is your smartphone and the app, which is free to download and use. You only pay for what you want, such as exporting your 3D scans to STL or OBJ files, or ordering 3D prints from the app. The prices are reasonable and transparent, and you can choose from different options and sizes. You can also share your 3D scans for free with anyone who has the app, or on social media platforms such as Facebook, Instagram, or TikTok.</p>
75
- <h2>How to use EM3D APK?</h2>
76
- <h3>The requirements and compatibility of the app</h3>
77
- <p>Before you start using EM3D APK, you need to make sure that your device meets the requirements and is compatible with the app. The app works on devices that have a TrueDepth 3D camera sensor, which is the same technology that powers Face ID. If your device has Face ID, it has a TrueDepth camera. Currently, the app supports the following devices:</p>
78
- <ul>
79
- <li>iPhone X</li>
80
- <li>iPhone XS</li>
81
- <li>iPhone XS Max</li>
82
- <li>iPhone XR</li>
83
- <li>iPhone 11</li>
84
- <li>iPhone 11 Pro</li>
85
- <li>iPhone 11 Pro Max</li>
86
- <li>iPhone 12</li>
87
- <li>iPhone 12 Mini</li>
88
- <li>iPhone 12 Pro</li>
89
- <li>iPhone 12 Pro Max</li>
90
- <li>iPad Pro (2018 or later)</li>
91
- </ul>
92
- <p>If your device is not on this list, you can still use the app to view and share 3D scans, but you won't be able to create them. You also need to have iOS 13 or later installed on your device.</p>
93
- <h3>The steps to create a 3D scan with the app</h3>
94
- <p>Creating a 3D scan with EM3D APK is easy and fast. Here are the steps you need to follow:</p>
95
- <ol>
96
- <li>Open the app and tap on the Scan button.</li>
97
- <li>Select whether you want to scan yourself or someone else.</li>
98
- <li>Hold your device in front of your face or the face of the person you want to scan. Make sure that the face is fully visible and centered on the screen.</li>
99
- <li>Follow the instructions on the screen to move your head slowly from side to side, up and down, and forward and backward. The app will capture multiple images of your face from different angles and create a 3D mesh.</li>
100
- <li>When the scan is complete, you will see a preview of your 3D scan on the screen. You can rotate, zoom, or pan to view it from different perspectives.</li>
101
- <li>You can also edit your 3D scan by tapping on the Edit button. You can crop, smooth, or sculpt your scan, or add filters, stickers, text, or effects.</li>
102
- <li>When you are happy with your 3D scan, you can save it to your gallery by tapping on the Save button. You can also export it to STL or OBJ files, order a 3D print, or share it with others by tapping on the corresponding buttons.</li>
103
- </ol>
104
- <h3>The options to export, print, or share your 3D scan</h3>
105
- <p>Once you have created and saved your 3D scan, you have several options to export, print, or share it with others. Here are some of them:</p>
106
- <ul>
107
- <li>If you want to export your 3D scan to STL or OBJ files, which are common formats for 3D printing or modeling software, you can tap on the Export button and choose the file type and resolution you want. You can then download the file to your device or send it via email or cloud storage services.</li>
108
- <li>If you want to order a 3D print of your scan, you can tap on the Print button and choose from different options and sizes. You can also customize your print by choosing the material, color, finish, or base. You can then place your order and pay securely via PayPal or credit card. Your print will be delivered to your address within a few days.</li>
109
- <li>If you want to share your 3D scan with others who have the app, you can tap on the Share button and choose whether you want to share it privately or publicly. If you share it privately, you can send a link to anyone who has the app and they can view your scan in full 3D. If you share it publicly, your scan will be uploaded to the EM3D Gallery, where anyone can view it and comment on it.</li>
110
- <li>If you want to share your 3D scan on social media platforms such as Facebook, Instagram, or TikTok, you can tap on the Share button and choose whether you want to share a photo, a video, or a 360° selfie. If you share a photo, you can choose from different views and angles of your scan. If you share a video, you can choose from different animations and effects. If you share a 360° selfie, you can create a custom video that shows your scan from all sides.</li>
111
- </ul>
112
- <h2>Tips and tricks for EM3D APK</h2>
113
- <h3>How to improve the quality and accuracy of your 3D scan</h3>
114
- <p>To get the best results from EM3D APK, you need to follow some tips and tricks that can improve the quality and accuracy of your 3D scan. Here are some of them:</p>
115
- <ul>
116
- <li>Make sure that you have good lighting and avoid shadows or reflections on your face or the object you want to scan.</li>
117
- <li>Make sure that your face or the object is fully visible and centered on the screen, and that there is enough contrast between it and the background.</li>
118
- <li>Make sure that you move your head slowly and smoothly, and cover all the angles and directions as instructed by the app.</li>
119
- <li>Make sure that you keep a steady distance between your device and your face or the object, and avoid moving too close or too far.</li>
120
- <li>Make sure that you have a stable internet connection and enough storage space on your device.</li>
121
- </ul>
122
- <h3>How to use the Mirror Saver feature or the mirror adapter</h3>
123
- <p>If you want to scan yourself without holding your device, you can use the Mirror Saver feature or the mirror adapter. The Mirror Saver feature lets you scan yourself using a mirror, while the mirror adapter lets you attach your device to a mirror using a suction cup. Here are the steps to use them:</p>
124
- <ol>
125
- <li>Open the app and tap on the Scan button.</li>
126
- <li>Select whether you want to scan yourself or someone else.</li>
127
- <li>Select whether you want to use the Mirror Saver feature or the mirror adapter.</li>
128
- <li>If you use the Mirror Saver feature, place your device on a stable surface facing a mirror. Make sure that your face is fully visible and centered on the screen. If you use the mirror adapter, attach your device to a mirror using the suction cup. Make sure that your device is securely attached and aligned with the mirror.</li>
129
- <li>Follow the instructions on the screen to move your head slowly from side to side, up and down, and forward and backward. The app will capture multiple images of your face from different angles and create a 3D mesh.</li>
130
- <li>When the scan is complete, you will see a preview of your 3D scan on the screen. You can rotate, zoom, or pan to view it from different perspectives.</li>
131
- <li>You can also edit your 3D scan by tapping on the Edit button. You can crop, smooth, or sculpt your scan, or add filters, stickers, text, or effects.</li>
132
- <li>When you are happy with your 3D scan, you can save it to your gallery by tapping on the Save button. You can also export it to STL or OBJ files, order a 3D print, or share it with others by tapping on the corresponding buttons.</li>
133
- </ol>
134
- <h3>How to use the Bluetooth shutter button for tripod scanning</h3>
135
- <p>If you want to scan yourself or someone else without touching your device, you can use the Bluetooth shutter button for tripod scanning. The Bluetooth shutter button lets you control your device remotely using a wireless button that connects via Bluetooth. You can also use a tripod to hold your device in place. Here are the steps to use them:</p>
136
- <ol>
137
- <li>Open the app and tap on the Scan button.</li>
138
- <li>Select whether you want to scan yourself or someone else.</li>
139
- <li>Select whether you want to use the Bluetooth shutter button for tripod scanning.</li>
140
- <li>If you use the Bluetooth shutter button, pair it with your device via Bluetooth. Make sure that it is fully charged and within range of your device. If you use a tripod, attach your device to it using a mount. Make sure that it is securely attached and stable.</li>
141
- <li>Place your device in front of your face or the face of the person you want to scan. Make sure that the face is fully visible and centered on the screen.</li>
142
- <li>Press the Bluetooth shutter button to start the scan. Follow the instructions on the screen to move your head slowly from side to side, up and down, and forward and backward. The app will capture multiple images of your face from different angles and create a 3D mesh.</li>
143
- <li>When the scan is complete, you will see a preview of your 3D scan on the screen. You can rotate, zoom, or pan to view it from different perspectives.</li>
144
- <li>You can also edit your 3D scan by tapping on the Edit button. You can crop, smooth, or sculpt your scan, or add filters, stickers, text, or effects.</li>
145
- <li>When you are happy with your 3D scan, you can save it to your gallery by tapping on the Save button. You can also export it to STL or OBJ files, order a 3D print, or share it with others by tapping on the corresponding buttons.</li>
146
- </ol>
147
- <h2>Conclusion and FAQs</h2>
148
- <p>EM3D APK is a 3D scanning app that lets you create and share 3D selfies with your smartphone. It uses the TrueDepth 3D camera sensor on your device, which is the same technology that powers Face ID. You can create printable 3D scans of yourself, your friends, your pets, or anything else you can think of. Or, you can just share your 3D selfies and scans with your friends on social media. You can try it for free and only pay for what you want. No expensive hardware, subscriptions, or monthly fees are required.</p>
149
- <p>If you are interested in EM3D APK, you can download it from the App Store or visit the official website for more information. You can also check out some of the FAQs below:</p>
150
- <ul>
151
- <li><b>Q: How accurate are the 3D scans created by EM3D APK?</b></li>
152
- <li>A: The accuracy of the 3D scans depends on several factors, such as the lighting, the distance, the movement, and the editing. Generally, the app can create 3D scans with a resolution of up to 0.5 mm and an accuracy of up to 99%. However, some errors or distortions may occur due to various reasons. You can always edit your scans to correct them or improve them.</li>
153
- <li><b>Q: How secure are my 3D scans and personal data?</b></li>
154
- <li>A: Your 3D scans and personal data are secure and private with EM3D APK. The app does not store any of your data on its servers or share it with any third parties without your consent. Your data is only stored locally on your device or in your iCloud account if you enable iCloud backup. You can also delete your data at any time from the app settings.</li>
155
- <li><b>Q: How long does it take to create a 3D scan with EM3D APK?</b></li>
156
- <li>A: It takes about 10 to 15 seconds to create a 3D scan with EM3D APK. However, this may vary depending on your device performance, internet speed, and scan quality. You can also edit your scan after creating it, which may take some additional time.</li>
157
- <li><b>Q: How much does it cost to use EM3D APK?</b></li>
158
- <li>A: EM3D APK is free to download and use. You only pay for what you want, such as exporting your 3D scans to STL or OBJ files, or ordering 3D prints from the app. The prices are reasonable and transparent, and you can choose from different options and sizes. You can also share your 3D scans for free with anyone who has the app, or on social media platforms such as Facebook, Instagram, or TikTok.</li>
159
- <li><b>Q: How can I contact EM3D APK for support or feedback?</b></li>
160
- <li>A: You can contact EM3D APK for support or feedback by emailing [email protected] or visiting their website at https://ethanmakes.com/em-3d/. You can also follow them on Facebook, Instagram, or TikTok for updates and news.</li>
161
- </ul></p> 401be4b1e0<br />
162
- <br />
163
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/tests/kblob.ts DELETED
@@ -1,27 +0,0 @@
1
- import FormData from 'form-data'
2
-
3
- import { fetch } from '@/lib/isomorphic'
4
-
5
- const formData = new FormData()
6
-
7
- const knowledgeRequest = {"imageInfo":{"url":"https://www.baidu.com/img/PCfb_5bf082d29588c07f842ccde3f97243ea.png"},"knowledgeRequest":{"invokedSkills":["ImageById"],"subscriptionId":"Bing.Chat.Multimodal","invokedSkillsRequestData":{"enableFaceBlur":true},"convoData":{"convoid":"51D|BingProdUnAuthenticatedUsers|E3DCA904FF236C67C3450163BCEC64CFF3F618CC8A4AFD75FD518F5ED0ADA080","convotone":"Creative"}}}
8
-
9
- formData.append('knowledgeRequest', JSON.stringify(knowledgeRequest))
10
-
11
-
12
- fetch('https://bing.vcanbb.top/images/kblob',
13
- {
14
- method: 'POST',
15
- body: formData.getBuffer(),
16
- headers: {
17
- "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"",
18
- "sec-ch-ua-mobile": "?0",
19
- "sec-ch-ua-platform": "\"Windows\"",
20
- "Referer": "https://bing.vcanbb.top/web/index.html",
21
- "Referrer-Policy": "origin-when-cross-origin",
22
- ...formData.getHeaders()
23
- }
24
-
25
- }
26
- ).then(res => res.text())
27
- .then(res => console.log('res', res))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/utils/plot.py DELETED
@@ -1,72 +0,0 @@
1
- # coding: utf-8
2
-
3
- import os
4
- from pathlib import Path
5
-
6
- import matplotlib.pyplot as plt
7
- import numpy as np
8
- import pandas as pd
9
- from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap
10
- from prettytable import PrettyTable
11
- from sklearn.metrics import roc_curve, auc
12
-
13
- image_path = "/data/anxiang/IJB_release/IJBC"
14
- files = [
15
- "./ms1mv3_arcface_r100/ms1mv3_arcface_r100/ijbc.npy"
16
- ]
17
-
18
-
19
- def read_template_pair_list(path):
20
- pairs = pd.read_csv(path, sep=' ', header=None).values
21
- t1 = pairs[:, 0].astype(np.int)
22
- t2 = pairs[:, 1].astype(np.int)
23
- label = pairs[:, 2].astype(np.int)
24
- return t1, t2, label
25
-
26
-
27
- p1, p2, label = read_template_pair_list(
28
- os.path.join('%s/meta' % image_path,
29
- '%s_template_pair_label.txt' % 'ijbc'))
30
-
31
- methods = []
32
- scores = []
33
- for file in files:
34
- methods.append(file.split('/')[-2])
35
- scores.append(np.load(file))
36
-
37
- methods = np.array(methods)
38
- scores = dict(zip(methods, scores))
39
- colours = dict(
40
- zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))
41
- x_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]
42
- tpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])
43
- fig = plt.figure()
44
- for method in methods:
45
- fpr, tpr, _ = roc_curve(label, scores[method])
46
- roc_auc = auc(fpr, tpr)
47
- fpr = np.flipud(fpr)
48
- tpr = np.flipud(tpr) # select largest tpr at same fpr
49
- plt.plot(fpr,
50
- tpr,
51
- color=colours[method],
52
- lw=1,
53
- label=('[%s (AUC = %0.4f %%)]' %
54
- (method.split('-')[-1], roc_auc * 100)))
55
- tpr_fpr_row = []
56
- tpr_fpr_row.append("%s-%s" % (method, "IJBC"))
57
- for fpr_iter in np.arange(len(x_labels)):
58
- _, min_index = min(
59
- list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr)))))
60
- tpr_fpr_row.append('%.2f' % (tpr[min_index] * 100))
61
- tpr_fpr_table.add_row(tpr_fpr_row)
62
- plt.xlim([10 ** -6, 0.1])
63
- plt.ylim([0.3, 1.0])
64
- plt.grid(linestyle='--', linewidth=1)
65
- plt.xticks(x_labels)
66
- plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True))
67
- plt.xscale('log')
68
- plt.xlabel('False Positive Rate')
69
- plt.ylabel('True Positive Rate')
70
- plt.title('ROC on IJB')
71
- plt.legend(loc="lower right")
72
- print(tpr_fpr_table)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AEUPH/CosmosTV/public/mpegts.js DELETED
The diff for this file is too large to render. See raw diff
 
spaces/AGITM/ToneCorrectionRecognition/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: ToneCorrectionRecognition
3
- emoji: 📊
4
- colorFrom: gray
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.19.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/bert/create_sent_embedding.py DELETED
@@ -1,89 +0,0 @@
1
- import pickle
2
- import fire
3
- import numpy as np
4
- import pandas as pd
5
- from tqdm import tqdm
6
-
7
-
8
- class EmbeddingExtractor(object):
9
-
10
- def extract_sentbert(self, caption_file: str, output: str, dev: bool=True, zh: bool=False):
11
- from sentence_transformers import SentenceTransformer
12
- lang2model = {
13
- "zh": "distiluse-base-multilingual-cased",
14
- "en": "bert-base-nli-mean-tokens"
15
- }
16
- lang = "zh" if zh else "en"
17
- model = SentenceTransformer(lang2model[lang])
18
-
19
- self.extract(caption_file, model, output, dev)
20
-
21
- def extract_originbert(self, caption_file: str, output: str, dev: bool=True, ip="localhost"):
22
- from bert_serving.client import BertClient
23
- client = BertClient(ip)
24
-
25
- self.extract(caption_file, client, output, dev)
26
-
27
- def extract(self, caption_file: str, model, output, dev: bool):
28
- caption_df = pd.read_json(caption_file, dtype={"key": str})
29
- embeddings = {}
30
-
31
- if dev:
32
- with tqdm(total=caption_df.shape[0], ascii=True) as pbar:
33
- for idx, row in caption_df.iterrows():
34
- caption = row["caption"]
35
- key = row["key"]
36
- cap_idx = row["caption_index"]
37
- embedding = model.encode([caption])
38
- embedding = np.array(embedding).reshape(-1)
39
- embeddings[f"{key}_{cap_idx}"] = embedding
40
- pbar.update()
41
-
42
- else:
43
- dump = {}
44
-
45
- with tqdm(total=caption_df.shape[0], ascii=True) as pbar:
46
- for idx, row in caption_df.iterrows():
47
- key = row["key"]
48
- caption = row["caption"]
49
- value = np.array(model.encode([caption])).reshape(-1)
50
-
51
- if key not in embeddings.keys():
52
- embeddings[key] = [value]
53
- else:
54
- embeddings[key].append(value)
55
-
56
- pbar.update()
57
-
58
- for key in embeddings:
59
- dump[key] = np.stack(embeddings[key])
60
-
61
- embeddings = dump
62
-
63
- with open(output, "wb") as f:
64
- pickle.dump(embeddings, f)
65
-
66
- def extract_sbert(self,
67
- input_json: str,
68
- output: str):
69
- from sentence_transformers import SentenceTransformer
70
- import json
71
- import torch
72
- from h5py import File
73
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
74
- model = SentenceTransformer("paraphrase-MiniLM-L6-v2")
75
- model = model.to(device)
76
- model.eval()
77
-
78
- data = json.load(open(input_json))["audios"]
79
- with torch.no_grad(), tqdm(total=len(data), ascii=True) as pbar, File(output, "w") as store:
80
- for sample in data:
81
- audio_id = sample["audio_id"]
82
- for cap in sample["captions"]:
83
- cap_id = cap["cap_id"]
84
- store[f"{audio_id}_{cap_id}"] = model.encode(cap["caption"])
85
- pbar.update()
86
-
87
-
88
- if __name__ == "__main__":
89
- fire.Fire(EmbeddingExtractor)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_s-v61_syncbn-detect_8xb16-300e_coco.py DELETED
@@ -1,23 +0,0 @@
1
- _base_ = 'yolov5_s-v61_syncbn_8xb16-300e_coco.py'
2
-
3
- test_pipeline = [
4
- dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),
5
- dict(
6
- type='LetterResize',
7
- scale=_base_.img_scale,
8
- allow_scale_up=True,
9
- use_mini_pad=True),
10
- dict(type='LoadAnnotations', with_bbox=True),
11
- dict(
12
- type='mmdet.PackDetInputs',
13
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
14
- 'scale_factor', 'pad_param'))
15
- ]
16
-
17
- val_dataloader = dict(
18
- dataset=dict(pipeline=test_pipeline, batch_shapes_cfg=None))
19
- test_dataloader = val_dataloader
20
-
21
- model = dict(
22
- test_cfg=dict(
23
- multi_label=False, score_thr=0.25, nms=dict(iou_threshold=0.45)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/client/css/button.css DELETED
@@ -1,26 +0,0 @@
1
- .button {
2
- display: flex;
3
- padding: 8px 12px;
4
- align-items: center;
5
- justify-content: center;
6
- border: 1px solid var(--conversations);
7
- border-radius: var(--border-radius-1);
8
- width: 100%;
9
- background: transparent;
10
- cursor: pointer;
11
- }
12
-
13
- .button span {
14
- color: var(--colour-3);
15
- font-size: 0.875rem;
16
- }
17
-
18
- .button i::before {
19
- margin-right: 8px;
20
- }
21
-
22
- @media screen and (max-width: 990px) {
23
- .button span {
24
- font-size: 0.75rem;
25
- }
26
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/agents/__init__.py DELETED
@@ -1,20 +0,0 @@
1
- # from .agent import Agent
2
- from agentverse.registry import Registry
3
-
4
- agent_registry = Registry(name="AgentRegistry")
5
-
6
-
7
- from .base import BaseAgent
8
- from agentverse.agents.simulation_agent.conversation import ConversationAgent
9
- from agentverse.agents.simulation_agent.tool import ToolAgent
10
- from agentverse.agents.simulation_agent.prisoner_dilemma import (
11
- PoliceAgent,
12
- PrisonerAgent,
13
- )
14
-
15
- from agentverse.agents.tasksolving_agent.role_assigner import RoleAssignerAgent
16
- from agentverse.agents.tasksolving_agent.critic import CriticAgent
17
- from agentverse.agents.tasksolving_agent.evaluator import EvaluatorAgent
18
- from agentverse.agents.tasksolving_agent.solver import SolverAgent
19
- from agentverse.agents.tasksolving_agent.manager import ManagerAgent
20
- from agentverse.agents.tasksolving_agent.executor import ExecutorAgent
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/canvasdata-plugin.js DELETED
@@ -1,34 +0,0 @@
1
- import Methods from './canvasdata.js';
2
-
3
- const CanvasPool = Phaser.Display.Canvas.CanvasPool;
4
-
5
- class CanvasDataPlugin extends Phaser.Plugins.BasePlugin {
6
-
7
- constructor(pluginManager) {
8
- super(pluginManager);
9
- }
10
-
11
- start() {
12
- var eventEmitter = this.game.events;
13
- eventEmitter.on('destroy', this.destroy, this);
14
-
15
- this._tmpCanvas = CanvasPool.create2D(this);
16
- }
17
-
18
- destroy() {
19
- CanvasPool.remove(this._tmpCanvas);
20
- this._tmpCanvas = undefined;
21
- super.destroy();
22
- }
23
-
24
- get textureManager() {
25
- return this.game.textures;
26
- }
27
- }
28
-
29
- Object.assign(
30
- CanvasDataPlugin.prototype,
31
- Methods
32
- );
33
-
34
- export default CanvasDataPlugin;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/board/match/RefreshSymbolCache.js DELETED
@@ -1,15 +0,0 @@
1
- var RefreshSymbolCache = function () {
2
- this.match.refreshSymbols(function (tileXY, board) {
3
- // Return null in upper board
4
- if (tileXY.y < (board.height / 2)) {
5
- return null;
6
- }
7
- var chess = board.tileXYZToChess(tileXY.x, tileXY.y, this.chessTileZ);
8
- if (chess == null) {
9
- return null;
10
- }
11
- return chess.getData('symbol');
12
- }, this);
13
- };
14
-
15
- export default RefreshSymbolCache;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/chart/SetChartData.js DELETED
@@ -1,17 +0,0 @@
1
- var SetChartData = function (datasetIndex, dataIndex, value) {
2
- if (this.chart === undefined) {
3
- return this;
4
- }
5
-
6
- var dataset = this.getChartDataset(datasetIndex);
7
- if (typeof (dataIndex) === 'string') {
8
- var labels = this.chart.data.labels;
9
- dataIndex = labels.indexOf(dataIndex);
10
- if (dataIndex === -1) {
11
- return this;
12
- }
13
- }
14
- dataset.data[dataIndex] = value;
15
- return this;
16
- };
17
- export default SetChartData;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/input/ClickCell.js DELETED
@@ -1,20 +0,0 @@
1
- import Button from '../../../../plugins/input/button/Button.js';
2
- import EmitCellEvent from './EmitCellEvent.js';
3
-
4
- const GetValue = Phaser.Utils.Objects.GetValue;
5
-
6
- var ClickCell = function (table, tableConfig) {
7
- var buttonConfig = GetValue(tableConfig, 'click', undefined);
8
- if (buttonConfig === false) {
9
- return;
10
- } else if (buttonConfig === undefined) {
11
- buttonConfig = {};
12
- }
13
- buttonConfig.threshold = 10;
14
- table._click = new Button(table, buttonConfig);
15
- table._click.on('click', function (button, gameObject, pointer, event) {
16
- EmitCellEvent(this.eventEmitter, 'cell.click', gameObject, pointer.worldX, pointer.worldY, pointer, event);
17
- }, this);
18
- };
19
-
20
- export default ClickCell;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/label/methods/Methods.js DELETED
@@ -1,9 +0,0 @@
1
- import AppendText from '../../../../plugins/utils/text/AppendText.js';
2
- import ResetDisplayContent from './ResetDisplayContent.js';
3
-
4
- var methods = {
5
- appendText: AppendText,
6
- resetDisplayContent: ResetDisplayContent,
7
- }
8
-
9
- export default methods;
 
 
 
 
 
 
 
 
 
 
spaces/AlexZou/Deploy_Restoration/models/SCET.py DELETED
@@ -1,276 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
-
5
- from einops import rearrange
6
- from einops.layers.torch import Rearrange
7
- import numbers
8
-
9
- # LayerNorm
10
-
11
- def to_3d(x):
12
- return rearrange(x, 'b c h w -> b (h w) c')
13
-
14
- def to_4d(x,h,w):
15
- return rearrange(x, 'b (h w) c -> b c h w',h=h,w=w)
16
-
17
- class BiasFree_LayerNorm(nn.Module):
18
- def __init__(self, normalized_shape):
19
- super(BiasFree_LayerNorm, self).__init__()
20
- if isinstance(normalized_shape, numbers.Integral):
21
- normalized_shape = (normalized_shape,)
22
- normalized_shape = torch.Size(normalized_shape)
23
-
24
- assert len(normalized_shape) == 1
25
-
26
- self.weight = nn.Parameter(torch.ones(normalized_shape))
27
- self.normalized_shape = normalized_shape
28
-
29
- def forward(self, x):
30
- sigma = x.var(-1, keepdim=True, unbiased=False)
31
- return x / torch.sqrt(sigma+1e-5) * self.weight
32
-
33
- class WithBias_LayerNorm(nn.Module):
34
- def __init__(self, normalized_shape):
35
- super(WithBias_LayerNorm, self).__init__()
36
- if isinstance(normalized_shape, numbers.Integral):
37
- normalized_shape = (normalized_shape,)
38
- normalized_shape = torch.Size(normalized_shape)
39
-
40
- assert len(normalized_shape) == 1
41
-
42
- self.weight = nn.Parameter(torch.ones(normalized_shape))
43
- self.bias = nn.Parameter(torch.zeros(normalized_shape))
44
- self.normalized_shape = normalized_shape
45
-
46
- def forward(self, x):
47
- mu = x.mean(-1, keepdim=True)
48
- sigma = x.var(-1, keepdim=True, unbiased=False)
49
- return (x - mu) / torch.sqrt(sigma+1e-5) * self.weight + self.bias
50
-
51
- class LayerNorm(nn.Module):
52
- def __init__(self, dim, LayerNorm_type):
53
- super(LayerNorm, self).__init__()
54
- if LayerNorm_type =='BiasFree':
55
- self.body = BiasFree_LayerNorm(dim)
56
- else:
57
- self.body = WithBias_LayerNorm(dim)
58
-
59
- def forward(self, x):
60
- h, w = x.shape[-2:]
61
- return to_4d(self.body(to_3d(x)), h, w)
62
-
63
-
64
- ## Gated-Dconv Feed-Forward Network (GDFN)
65
- class GFeedForward(nn.Module):
66
- def __init__(self, dim, ffn_expansion_factor, bias):
67
- super(GFeedForward, self).__init__()
68
-
69
- hidden_features = int(dim * ffn_expansion_factor)
70
-
71
- self.project_in = nn.Conv2d(dim, hidden_features * 2, kernel_size=1, bias=bias)
72
-
73
- self.dwconv = nn.Conv2d(hidden_features * 2, hidden_features * 2, kernel_size=3, stride=1, padding=1,
74
- groups=hidden_features * 2, bias=bias)
75
-
76
- self.project_out = nn.Conv2d(hidden_features, dim, kernel_size=1, bias=bias)
77
-
78
- def forward(self, x):
79
- x = self.project_in(x)
80
- x1, x2 = self.dwconv(x).chunk(2, dim=1)
81
- x = F.gelu(x1) * x2
82
- x = self.project_out(x)
83
- return x
84
-
85
-
86
- ##########################################################################
87
- ## Multi-DConv Head Transposed Self-Attention (MDTA)
88
- class Attention(nn.Module):
89
- def __init__(self, dim, num_heads, bias):
90
- super(Attention, self).__init__()
91
- self.num_heads = num_heads
92
- self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
93
-
94
- self.qkv = nn.Conv2d(dim, dim * 3, kernel_size=1, bias=bias)
95
- self.qkv_dwconv = nn.Conv2d(dim * 3, dim * 3, kernel_size=3, stride=1, padding=1, groups=dim * 3, bias=bias)
96
- self.project_out = nn.Conv2d(dim, dim, kernel_size=1, bias=bias)
97
-
98
- def forward(self, x):
99
- b, c, h, w = x.shape
100
-
101
- qkv = self.qkv_dwconv(self.qkv(x))
102
- q, k, v = qkv.chunk(3, dim=1)
103
-
104
- q = rearrange(q, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
105
- k = rearrange(k, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
106
- v = rearrange(v, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
107
-
108
- q = torch.nn.functional.normalize(q, dim=-1)
109
- k = torch.nn.functional.normalize(k, dim=-1)
110
-
111
- attn = (q @ k.transpose(-2, -1)) * self.temperature
112
- attn = attn.softmax(dim=-1)
113
-
114
- out = (attn @ v)
115
-
116
- out = rearrange(out, 'b head c (h w) -> b (head c) h w', head=self.num_heads, h=h, w=w)
117
-
118
- out = self.project_out(out)
119
- return out
120
-
121
-
122
- class TransformerBlock(nn.Module):
123
- def __init__(self, dim=48, num_heads=8, ffn_expansion_factor=2.66, bias=False, LayerNorm_type=WithBias_LayerNorm):
124
- super(TransformerBlock, self).__init__()
125
-
126
- self.norm1 = LayerNorm(dim, LayerNorm_type)
127
- self.attn = Attention(dim, num_heads, bias)
128
- self.norm2 = LayerNorm(dim, LayerNorm_type)
129
- self.ffn = GFeedForward(dim, ffn_expansion_factor, bias)
130
-
131
- def forward(self, x):
132
- x = x + self.attn(self.norm1(x))
133
- x = x + self.ffn(self.norm2(x))
134
-
135
- return x
136
-
137
-
138
- class BackBoneBlock(nn.Module):
139
- def __init__(self, num, fm, **args):
140
- super().__init__()
141
- self.arr = nn.ModuleList([])
142
- for _ in range(num):
143
- self.arr.append(fm(**args))
144
-
145
- def forward(self, x):
146
- for block in self.arr:
147
- x = block(x)
148
- return x
149
-
150
-
151
- class PAConv(nn.Module):
152
-
153
- def __init__(self, nf, k_size=3):
154
- super(PAConv, self).__init__()
155
- self.k2 = nn.Conv2d(nf, nf, 1) # 1x1 convolution nf->nf
156
- self.sigmoid = nn.Sigmoid()
157
- self.k3 = nn.Conv2d(nf, nf, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False) # 3x3 convolution
158
- self.k4 = nn.Conv2d(nf, nf, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False) # 3x3 convolution
159
-
160
- def forward(self, x):
161
- y = self.k2(x)
162
- y = self.sigmoid(y)
163
-
164
- out = torch.mul(self.k3(x), y)
165
- out = self.k4(out)
166
-
167
- return out
168
-
169
-
170
- class SCPA(nn.Module):
171
- """SCPA is modified from SCNet (Jiang-Jiang Liu et al. Improving Convolutional Networks with Self-Calibrated Convolutions. In CVPR, 2020)
172
- Github: https://github.com/MCG-NKU/SCNet
173
- """
174
-
175
- def __init__(self, nf, reduction=2, stride=1, dilation=1):
176
- super(SCPA, self).__init__()
177
- group_width = nf // reduction
178
-
179
- self.conv1_a = nn.Conv2d(nf, group_width, kernel_size=1, bias=False)
180
- self.conv1_b = nn.Conv2d(nf, group_width, kernel_size=1, bias=False)
181
-
182
- self.k1 = nn.Sequential(
183
- nn.Conv2d(
184
- group_width, group_width, kernel_size=3, stride=stride,
185
- padding=dilation, dilation=dilation,
186
- bias=False)
187
- )
188
-
189
- self.PAConv = PAConv(group_width)
190
-
191
- self.conv3 = nn.Conv2d(
192
- group_width * reduction, nf, kernel_size=1, bias=False)
193
-
194
- self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
195
-
196
- def forward(self, x):
197
- residual = x
198
-
199
- out_a = self.conv1_a(x)
200
- out_b = self.conv1_b(x)
201
- out_a = self.lrelu(out_a)
202
- out_b = self.lrelu(out_b)
203
-
204
- out_a = self.k1(out_a)
205
- out_b = self.PAConv(out_b)
206
- out_a = self.lrelu(out_a)
207
- out_b = self.lrelu(out_b)
208
-
209
- out = self.conv3(torch.cat([out_a, out_b], dim=1))
210
- out += residual
211
-
212
- return out
213
-
214
-
215
- class SCET(nn.Module):
216
- def __init__(self, hiddenDim=32, mlpDim=128, scaleFactor=2):
217
- super().__init__()
218
- self.conv3 = nn.Conv2d(3, hiddenDim,
219
- kernel_size=3, padding=1)
220
-
221
- lamRes = torch.nn.Parameter(torch.ones(1))
222
- lamX = torch.nn.Parameter(torch.ones(1))
223
- self.adaptiveWeight = (lamRes, lamX)
224
- if scaleFactor == 3:
225
- num_heads = 7
226
- else:
227
- num_heads = 8
228
- self.path1 = nn.Sequential(
229
- BackBoneBlock(16, SCPA, nf=hiddenDim, reduction=2, stride=1, dilation=1),
230
- BackBoneBlock(1, TransformerBlock,
231
- dim=hiddenDim, num_heads=num_heads, ffn_expansion_factor=2.66, bias=False, LayerNorm_type=WithBias_LayerNorm),
232
- nn.Conv2d(hiddenDim, hiddenDim, kernel_size=3, padding=1),
233
- nn.PixelShuffle(scaleFactor),
234
- nn.Conv2d(hiddenDim // (scaleFactor ** 2),
235
- 3, kernel_size=3, padding=1),
236
- )
237
-
238
- self.path2 = nn.Sequential(
239
- nn.PixelShuffle(scaleFactor),
240
- nn.Conv2d(hiddenDim // (scaleFactor ** 2),
241
- 3, kernel_size=3, padding=1),
242
- )
243
-
244
- def forward(self, x):
245
- x = self.conv3(x)
246
- x1, x2 = self.path1(x), self.path2(x)
247
- return x1 + x2
248
-
249
-
250
- def init_weights(self, pretrained=None, strict=True):
251
- """Init weights for models.
252
- Args:
253
- pretrained (str, optional): Path for pretrained weights. If given
254
- None, pretrained weights will not be loaded. Defaults to None.
255
- strict (boo, optional): Whether strictly load the pretrained model.
256
- Defaults to True.
257
- """
258
- if isinstance(pretrained, str):
259
- logger = get_root_logger()
260
- load_checkpoint(self, pretrained, strict=strict, logger=logger)
261
- elif pretrained is None:
262
- pass # use default initialization
263
- else:
264
- raise TypeError('"pretrained" must be a str or None. '
265
- f'But received {type(pretrained)}.')
266
-
267
-
268
-
269
- if __name__ == '__main__':
270
-
271
- from torchstat import stat
272
- import time
273
- import torchsummary
274
-
275
- net = SCET(32, 128, 4).cuda()
276
- torchsummary.summary(net, (3, 48, 48))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlgoveraAI/medical-image-classification/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: medical-image-classification
3
- emoji: 🏥
4
- colorFrom: indigo
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 2.9.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amon1/ChatGPTForAcadamic/toolbox.py DELETED
@@ -1,344 +0,0 @@
1
- import markdown, mdtex2html, threading, importlib, traceback, importlib, inspect, re
2
- from show_math import convert as convert_math
3
- from functools import wraps, lru_cache
4
-
5
- def get_reduce_token_percent(text):
6
- try:
7
- # text = "maximum context length is 4097 tokens. However, your messages resulted in 4870 tokens"
8
- pattern = r"(\d+)\s+tokens\b"
9
- match = re.findall(pattern, text)
10
- EXCEED_ALLO = 500 # 稍微留一点余地,否则在回复时会因余量太少出问题
11
- max_limit = float(match[0]) - EXCEED_ALLO
12
- current_tokens = float(match[1])
13
- ratio = max_limit/current_tokens
14
- assert ratio > 0 and ratio < 1
15
- return ratio, str(int(current_tokens-max_limit))
16
- except:
17
- return 0.5, '不详'
18
-
19
- def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[], sys_prompt='', long_connection=True):
20
- """
21
- 调用简单的predict_no_ui接口,但是依然保留了些许界面心跳功能,当对话太长时,会自动采用二分法截断
22
- i_say: 当前输入
23
- i_say_show_user: 显示到对话界面上的当前输入,例如,输入整个文件时,你绝对不想把文件的内容都糊到对话界面上
24
- chatbot: 对话界面句柄
25
- top_p, temperature: gpt参数
26
- history: gpt参数 对话历史
27
- sys_prompt: gpt参数 sys_prompt
28
- long_connection: 是否采用更稳定的连接方式(推荐)
29
- """
30
- import time
31
- from predict import predict_no_ui, predict_no_ui_long_connection
32
- from toolbox import get_conf
33
- TIMEOUT_SECONDS, MAX_RETRY = get_conf('TIMEOUT_SECONDS', 'MAX_RETRY')
34
- # 多线程的时候,需要一个mutable结构在不同线程之间传递信息
35
- # list就是最简单的mutable结构,我们第一个位置放gpt输出,第二个位置传递报错信息
36
- mutable = [None, '']
37
- # multi-threading worker
38
- def mt(i_say, history):
39
- while True:
40
- try:
41
- if long_connection:
42
- mutable[0] = predict_no_ui_long_connection(inputs=i_say, top_p=top_p, temperature=temperature, history=history, sys_prompt=sys_prompt)
43
- else:
44
- mutable[0] = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history, sys_prompt=sys_prompt)
45
- break
46
- except ConnectionAbortedError as token_exceeded_error:
47
- # 尝试计算比例,尽可能多地保留文本
48
- p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
49
- if len(history) > 0:
50
- history = [his[ int(len(his) *p_ratio): ] for his in history if his is not None]
51
- else:
52
- i_say = i_say[: int(len(i_say) *p_ratio) ]
53
- mutable[1] = f'警告,文本过长将进行截断,Token溢出数:{n_exceed},截断比例:{(1-p_ratio):.0%}。'
54
- except TimeoutError as e:
55
- mutable[0] = '[Local Message] 请求超时。'
56
- raise TimeoutError
57
- except Exception as e:
58
- mutable[0] = f'[Local Message] 异常:{str(e)}.'
59
- raise RuntimeError(f'[Local Message] 异常:{str(e)}.')
60
- # 创建新线程发出http请求
61
- thread_name = threading.Thread(target=mt, args=(i_say, history)); thread_name.start()
62
- # 原来的线程则负责持续更新UI,实现一个超时倒计时,并等待新线程的任务完成
63
- cnt = 0
64
- while thread_name.is_alive():
65
- cnt += 1
66
- chatbot[-1] = (i_say_show_user, f"[Local Message] {mutable[1]}waiting gpt response {cnt}/{TIMEOUT_SECONDS*2*(MAX_RETRY+1)}"+''.join(['.']*(cnt%4)))
67
- yield chatbot, history, '正常'
68
- time.sleep(1)
69
- # 把gpt的输出从mutable中取出来
70
- gpt_say = mutable[0]
71
- if gpt_say=='[Local Message] Failed with timeout.': raise TimeoutError
72
- return gpt_say
73
-
74
- def write_results_to_file(history, file_name=None):
75
- """
76
- 将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
77
- """
78
- import os, time
79
- if file_name is None:
80
- # file_name = time.strftime("chatGPT分析报告%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md'
81
- file_name = 'chatGPT分析报告' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md'
82
- os.makedirs('./gpt_log/', exist_ok=True)
83
- with open(f'./gpt_log/{file_name}', 'w', encoding = 'utf8') as f:
84
- f.write('# chatGPT 分析报告\n')
85
- for i, content in enumerate(history):
86
- try: # 这个bug没找到触发条件,暂时先这样顶一下
87
- if type(content) != str: content = str(content)
88
- except:
89
- continue
90
- if i%2==0: f.write('## ')
91
- f.write(content)
92
- f.write('\n\n')
93
- res = '以上材料已经被写入' + os.path.abspath(f'./gpt_log/{file_name}')
94
- print(res)
95
- return res
96
-
97
- def regular_txt_to_markdown(text):
98
- """
99
- 将普通文本转换为Markdown格式的文本。
100
- """
101
- text = text.replace('\n', '\n\n')
102
- text = text.replace('\n\n\n', '\n\n')
103
- text = text.replace('\n\n\n', '\n\n')
104
- return text
105
-
106
- def CatchException(f):
107
- """
108
- 装饰器函数,捕捉函数f中的异常并封装到一个生成器中返回,并显示到聊天当中。
109
- """
110
- @wraps(f)
111
- def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
112
- try:
113
- yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)
114
- except Exception as e:
115
- from check_proxy import check_proxy
116
- from toolbox import get_conf
117
- proxies, = get_conf('proxies')
118
- tb_str = '```\n' + traceback.format_exc() + '```'
119
- if len(chatbot) == 0: chatbot.append(["插件调度异常","异常原因"])
120
- chatbot[-1] = (chatbot[-1][0], f"[Local Message] 实验性函数调用出错: \n\n{tb_str} \n\n当前代理可用性: \n\n{check_proxy(proxies)}")
121
- yield chatbot, history, f'异常 {e}'
122
- return decorated
123
-
124
- def HotReload(f):
125
- """
126
- 装饰器函数,实现函数插件热更新
127
- """
128
- @wraps(f)
129
- def decorated(*args, **kwargs):
130
- fn_name = f.__name__
131
- f_hot_reload = getattr(importlib.reload(inspect.getmodule(f)), fn_name)
132
- yield from f_hot_reload(*args, **kwargs)
133
- return decorated
134
-
135
- def report_execption(chatbot, history, a, b):
136
- """
137
- 向chatbot中添加错误信息
138
- """
139
- chatbot.append((a, b))
140
- history.append(a); history.append(b)
141
-
142
- def text_divide_paragraph(text):
143
- """
144
- 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
145
- """
146
- if '```' in text:
147
- # careful input
148
- return text
149
- else:
150
- # wtf input
151
- lines = text.split("\n")
152
- for i, line in enumerate(lines):
153
- lines[i] = lines[i].replace(" ", "&nbsp;")
154
- text = "</br>".join(lines)
155
- return text
156
-
157
- def markdown_convertion(txt):
158
- """
159
- 将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
160
- """
161
- pre = '<div class="markdown-body">'
162
- suf = '</div>'
163
- if ('$' in txt) and ('```' not in txt):
164
- return pre + markdown.markdown(txt,extensions=['fenced_code','tables']) + '<br><br>' + markdown.markdown(convert_math(txt, splitParagraphs=False),extensions=['fenced_code','tables']) + suf
165
- else:
166
- return pre + markdown.markdown(txt,extensions=['fenced_code','tables']) + suf
167
-
168
- def close_up_code_segment_during_stream(gpt_reply):
169
- """
170
- 在gpt输出代码的中途(输出了前面的```,但还没输出完后面的```),补上后面的```
171
- """
172
- if '```' not in gpt_reply: return gpt_reply
173
- if gpt_reply.endswith('```'): return gpt_reply
174
-
175
- # 排除了以上两个情况,我们
176
- segments = gpt_reply.split('```')
177
- n_mark = len(segments) - 1
178
- if n_mark % 2 == 1:
179
- # print('输出代码片段中!')
180
- return gpt_reply+'\n```'
181
- else:
182
- return gpt_reply
183
-
184
-
185
-
186
- def format_io(self, y):
187
- """
188
- 将输入和输出解析为HTML格式。将y中最后一项的输入部分段落化,并将输出部分的Markdown和数学公式转换为HTML格式。
189
- """
190
- if y is None or y == []: return []
191
- i_ask, gpt_reply = y[-1]
192
- i_ask = text_divide_paragraph(i_ask) # 输入部分太自由,预处理一波
193
- gpt_reply = close_up_code_segment_during_stream(gpt_reply) # 当代码输出半截的时候,试着补上后个```
194
- y[-1] = (
195
- None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code','tables']),
196
- None if gpt_reply is None else markdown_convertion(gpt_reply)
197
- )
198
- return y
199
-
200
-
201
- def find_free_port():
202
- """
203
- 返回当前系统中可用的未使用端口。
204
- """
205
- import socket
206
- from contextlib import closing
207
- with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
208
- s.bind(('', 0))
209
- s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
210
- return s.getsockname()[1]
211
-
212
-
213
- def extract_archive(file_path, dest_dir):
214
- import zipfile
215
- import tarfile
216
- import os
217
- # Get the file extension of the input file
218
- file_extension = os.path.splitext(file_path)[1]
219
-
220
- # Extract the archive based on its extension
221
- if file_extension == '.zip':
222
- with zipfile.ZipFile(file_path, 'r') as zipobj:
223
- zipobj.extractall(path=dest_dir)
224
- print("Successfully extracted zip archive to {}".format(dest_dir))
225
-
226
- elif file_extension in ['.tar', '.gz', '.bz2']:
227
- with tarfile.open(file_path, 'r:*') as tarobj:
228
- tarobj.extractall(path=dest_dir)
229
- print("Successfully extracted tar archive to {}".format(dest_dir))
230
-
231
- # 第三方库,需要预先pip install rarfile
232
- # 此外,Windows上还需要安装winrar软件,配置其Path环境变量,如"C:\Program Files\WinRAR"才可以
233
- elif file_extension == '.rar':
234
- try:
235
- import rarfile
236
- with rarfile.RarFile(file_path) as rf:
237
- rf.extractall(path=dest_dir)
238
- print("Successfully extracted rar archive to {}".format(dest_dir))
239
- except:
240
- print("Rar format requires additional dependencies to install")
241
- return '\n\n需要安装pip install rarfile来解压rar文件'
242
-
243
- # 第三方库,需要预先pip install py7zr
244
- elif file_extension == '.7z':
245
- try:
246
- import py7zr
247
- with py7zr.SevenZipFile(file_path, mode='r') as f:
248
- f.extractall(path=dest_dir)
249
- print("Successfully extracted 7z archive to {}".format(dest_dir))
250
- except:
251
- print("7z format requires additional dependencies to install")
252
- return '\n\n需要安装pip install py7zr来解压7z文件'
253
- else:
254
- return ''
255
- return ''
256
-
257
- def find_recent_files(directory):
258
- """
259
- me: find files that is created with in one minutes under a directory with python, write a function
260
- gpt: here it is!
261
- """
262
- import os
263
- import time
264
- current_time = time.time()
265
- one_minute_ago = current_time - 60
266
- recent_files = []
267
-
268
- for filename in os.listdir(directory):
269
- file_path = os.path.join(directory, filename)
270
- if file_path.endswith('.log'): continue
271
- created_time = os.path.getctime(file_path)
272
- if created_time >= one_minute_ago:
273
- if os.path.isdir(file_path): continue
274
- recent_files.append(file_path)
275
-
276
- return recent_files
277
-
278
-
279
- def on_file_uploaded(files, chatbot, txt):
280
- if len(files) == 0: return chatbot, txt
281
- import shutil, os, time, glob
282
- from toolbox import extract_archive
283
- try: shutil.rmtree('./private_upload/')
284
- except: pass
285
- time_tag = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
286
- os.makedirs(f'private_upload/{time_tag}', exist_ok=True)
287
- err_msg = ''
288
- for file in files:
289
- file_origin_name = os.path.basename(file.orig_name)
290
- shutil.copy(file.name, f'private_upload/{time_tag}/{file_origin_name}')
291
- err_msg += extract_archive(f'private_upload/{time_tag}/{file_origin_name}',
292
- dest_dir=f'private_upload/{time_tag}/{file_origin_name}.extract')
293
- moved_files = [fp for fp in glob.glob('private_upload/**/*', recursive=True)]
294
- txt = f'private_upload/{time_tag}'
295
- moved_files_str = '\t\n\n'.join(moved_files)
296
- chatbot.append(['我上传了文件,请查收',
297
- f'[Local Message] 收到以下文件: \n\n{moved_files_str}'+
298
- f'\n\n调用路径参数已自动修正到: \n\n{txt}'+
299
- f'\n\n现在您点击任意实验功能时,以上文件将被作为输入参数'+err_msg])
300
- return chatbot, txt
301
-
302
-
303
- def on_report_generated(files, chatbot):
304
- from toolbox import find_recent_files
305
- report_files = find_recent_files('gpt_log')
306
- if len(report_files) == 0: return files, chatbot
307
- # files.extend(report_files)
308
- chatbot.append(['汇总报告如何远程获取?', '汇总报告已经添加到右侧“文件上传区”(可能处于折叠状态),请查收。'])
309
- return report_files, chatbot
310
-
311
- @lru_cache(maxsize=128)
312
- def read_single_conf_with_lru_cache(arg):
313
- try: r = getattr(importlib.import_module('config_private'), arg)
314
- except: r = getattr(importlib.import_module('config'), arg)
315
- # 在读取API_KEY时,检查一下是不是忘了改config
316
- if arg=='API_KEY':
317
- # 正确的 API_KEY 是 "sk-" + 48 位大小写字母数字的组合
318
- API_MATCH = re.match(r"sk-[a-zA-Z0-9]{48}$", r)
319
- if API_MATCH:
320
- print(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功")
321
- else:
322
- assert False, "正确的 API_KEY 是 'sk-' + '48 位大小写字母数字' 的组合,请在config文件中修改API密钥, 添加海外代理之后再运行。" + \
323
- "(如果您刚更新过代码,请确保旧版config_private文件中没有遗留任何新增键值)"
324
- if arg=='proxies':
325
- if r is None:
326
- print('[PROXY] 网络代理状态:未配置。无代理状态下很可能无法访问。建议:检查USE_PROXY选项是否修改。')
327
- else:
328
- print('[PROXY] 网络代理状态:已配置。配置信息如下:', r)
329
- assert isinstance(r, dict), 'proxies格式错误,请注意proxies选项的格式,不要遗漏括号。'
330
- return r
331
-
332
- def get_conf(*args):
333
- # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
334
- res = []
335
- for arg in args:
336
- r = read_single_conf_with_lru_cache(arg)
337
- res.append(r)
338
- return res
339
-
340
- def clear_line_break(txt):
341
- txt = txt.replace('\n', ' ')
342
- txt = txt.replace(' ', ' ')
343
- txt = txt.replace(' ', ' ')
344
- return txt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/app.py DELETED
@@ -1,75 +0,0 @@
1
- import streamlit as st
2
- from PIL import Image
3
- import imageio.v3 as iio
4
- from inference import inference
5
- from src.utils.opt import Opts
6
- import os
7
-
8
- st.set_page_config(layout='wide')
9
-
10
- st.markdown(
11
- """
12
- <style>
13
- div[data-testid="column"]:nth-of-type(1)
14
- {
15
-
16
- }
17
-
18
- div[data-testid="column"]:nth-of-type(2)
19
- {
20
-
21
- }
22
- </style>
23
- """,unsafe_allow_html=True
24
- )
25
-
26
- col1, col2, col3 = st.columns(3)
27
-
28
- if 'counter' not in st.session_state:
29
- st.session_state.video_path = None
30
- st.session_state.image = None
31
- st.session_state.counter = 0
32
-
33
- def showVideo(image):
34
- if st.session_state.image is not None:
35
- cfg = Opts(cfg="configs/style_inference.yml").parse_args()
36
- result = inference(cfg, "render_test", image=image)
37
- st.session_state.video_path = result["video_path"]
38
- st.session_state.counter += 1
39
- else:
40
- col2.write("No uploaded image")
41
-
42
- with col1:
43
- col1.subheader("Source multiview images")
44
- filteredImages = []
45
- for image_file in os.listdir('data/nerf_llff_data/trex/streamlit_images'):
46
- filteredImages.append(Image.open(os.path.join('data/nerf_llff_data/trex/streamlit_images', image_file)))
47
- id = 0
48
- for img in range(0, len(filteredImages), 4):
49
- cols = col1.columns(4)
50
- cols[0].image(filteredImages[id], use_column_width=True)
51
- id +=1
52
- cols[1].image(filteredImages[id], use_column_width=True)
53
- id +=1
54
- cols[2].image(filteredImages[id], use_column_width=True)
55
- id +=1
56
- cols[3].image(filteredImages[id], use_column_width=True)
57
- id +=1
58
-
59
-
60
- with col2:
61
- col2.subheader("Style image")
62
- uploaded_file = col2.file_uploader("Choose a image file")
63
- if uploaded_file:
64
- st.session_state.image = Image.open(uploaded_file)
65
- img = col2.image(st.session_state.image, caption='Style Image', use_column_width=True)
66
- col2.button('Run Style Transfer', on_click=showVideo, args=([st.session_state.image]))
67
-
68
-
69
- col3.subheader("Style videos")
70
- if st.session_state.counter > 0:
71
- video_file = open(st.session_state.video_path, 'rb')
72
- video_bytes = video_file.read()
73
- col3.video(video_bytes)
74
-
75
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/audio_diffusion/mel.py DELETED
@@ -1,179 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- import numpy as np # noqa: E402
17
-
18
- from ...configuration_utils import ConfigMixin, register_to_config
19
- from ...schedulers.scheduling_utils import SchedulerMixin
20
-
21
-
22
- try:
23
- import librosa # noqa: E402
24
-
25
- _librosa_can_be_imported = True
26
- _import_error = ""
27
- except Exception as e:
28
- _librosa_can_be_imported = False
29
- _import_error = (
30
- f"Cannot import librosa because {e}. Make sure to correctly install librosa to be able to install it."
31
- )
32
-
33
-
34
- from PIL import Image # noqa: E402
35
-
36
-
37
- class Mel(ConfigMixin, SchedulerMixin):
38
- """
39
- Parameters:
40
- x_res (`int`):
41
- x resolution of spectrogram (time).
42
- y_res (`int`):
43
- y resolution of spectrogram (frequency bins).
44
- sample_rate (`int`):
45
- Sample rate of audio.
46
- n_fft (`int`):
47
- Number of Fast Fourier Transforms.
48
- hop_length (`int`):
49
- Hop length (a higher number is recommended if `y_res` < 256).
50
- top_db (`int`):
51
- Loudest decibel value.
52
- n_iter (`int`):
53
- Number of iterations for Griffin-Lim Mel inversion.
54
- """
55
-
56
- config_name = "mel_config.json"
57
-
58
- @register_to_config
59
- def __init__(
60
- self,
61
- x_res: int = 256,
62
- y_res: int = 256,
63
- sample_rate: int = 22050,
64
- n_fft: int = 2048,
65
- hop_length: int = 512,
66
- top_db: int = 80,
67
- n_iter: int = 32,
68
- ):
69
- self.hop_length = hop_length
70
- self.sr = sample_rate
71
- self.n_fft = n_fft
72
- self.top_db = top_db
73
- self.n_iter = n_iter
74
- self.set_resolution(x_res, y_res)
75
- self.audio = None
76
-
77
- if not _librosa_can_be_imported:
78
- raise ValueError(_import_error)
79
-
80
- def set_resolution(self, x_res: int, y_res: int):
81
- """Set resolution.
82
-
83
- Args:
84
- x_res (`int`):
85
- x resolution of spectrogram (time).
86
- y_res (`int`):
87
- y resolution of spectrogram (frequency bins).
88
- """
89
- self.x_res = x_res
90
- self.y_res = y_res
91
- self.n_mels = self.y_res
92
- self.slice_size = self.x_res * self.hop_length - 1
93
-
94
- def load_audio(self, audio_file: str = None, raw_audio: np.ndarray = None):
95
- """Load audio.
96
-
97
- Args:
98
- audio_file (`str`):
99
- An audio file that must be on disk due to [Librosa](https://librosa.org/) limitation.
100
- raw_audio (`np.ndarray`):
101
- The raw audio file as a NumPy array.
102
- """
103
- if audio_file is not None:
104
- self.audio, _ = librosa.load(audio_file, mono=True, sr=self.sr)
105
- else:
106
- self.audio = raw_audio
107
-
108
- # Pad with silence if necessary.
109
- if len(self.audio) < self.x_res * self.hop_length:
110
- self.audio = np.concatenate([self.audio, np.zeros((self.x_res * self.hop_length - len(self.audio),))])
111
-
112
- def get_number_of_slices(self) -> int:
113
- """Get number of slices in audio.
114
-
115
- Returns:
116
- `int`:
117
- Number of spectograms audio can be sliced into.
118
- """
119
- return len(self.audio) // self.slice_size
120
-
121
- def get_audio_slice(self, slice: int = 0) -> np.ndarray:
122
- """Get slice of audio.
123
-
124
- Args:
125
- slice (`int`):
126
- Slice number of audio (out of `get_number_of_slices()`).
127
-
128
- Returns:
129
- `np.ndarray`:
130
- The audio slice as a NumPy array.
131
- """
132
- return self.audio[self.slice_size * slice : self.slice_size * (slice + 1)]
133
-
134
- def get_sample_rate(self) -> int:
135
- """Get sample rate.
136
-
137
- Returns:
138
- `int`:
139
- Sample rate of audio.
140
- """
141
- return self.sr
142
-
143
- def audio_slice_to_image(self, slice: int) -> Image.Image:
144
- """Convert slice of audio to spectrogram.
145
-
146
- Args:
147
- slice (`int`):
148
- Slice number of audio to convert (out of `get_number_of_slices()`).
149
-
150
- Returns:
151
- `PIL Image`:
152
- A grayscale image of `x_res x y_res`.
153
- """
154
- S = librosa.feature.melspectrogram(
155
- y=self.get_audio_slice(slice), sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_mels=self.n_mels
156
- )
157
- log_S = librosa.power_to_db(S, ref=np.max, top_db=self.top_db)
158
- bytedata = (((log_S + self.top_db) * 255 / self.top_db).clip(0, 255) + 0.5).astype(np.uint8)
159
- image = Image.fromarray(bytedata)
160
- return image
161
-
162
- def image_to_audio(self, image: Image.Image) -> np.ndarray:
163
- """Converts spectrogram to audio.
164
-
165
- Args:
166
- image (`PIL Image`):
167
- An grayscale image of `x_res x y_res`.
168
-
169
- Returns:
170
- audio (`np.ndarray`):
171
- The audio as a NumPy array.
172
- """
173
- bytedata = np.frombuffer(image.tobytes(), dtype="uint8").reshape((image.height, image.width))
174
- log_S = bytedata.astype("float") * self.top_db / 255 - self.top_db
175
- S = librosa.db_to_power(log_S)
176
- audio = librosa.feature.inverse.mel_to_audio(
177
- S, sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_iter=self.n_iter
178
- )
179
- return audio
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py DELETED
@@ -1,153 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- from typing import List, Optional, Tuple, Union
17
-
18
- import torch
19
-
20
- from ...utils import logging, randn_tensor
21
- from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
22
-
23
-
24
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
25
-
26
-
27
- class DanceDiffusionPipeline(DiffusionPipeline):
28
- r"""
29
- Pipeline for audio generation.
30
-
31
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
32
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
33
-
34
- Parameters:
35
- unet ([`UNet1DModel`]):
36
- A `UNet1DModel` to denoise the encoded audio.
37
- scheduler ([`SchedulerMixin`]):
38
- A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of
39
- [`IPNDMScheduler`].
40
- """
41
-
42
- def __init__(self, unet, scheduler):
43
- super().__init__()
44
- self.register_modules(unet=unet, scheduler=scheduler)
45
-
46
- @torch.no_grad()
47
- def __call__(
48
- self,
49
- batch_size: int = 1,
50
- num_inference_steps: int = 100,
51
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
52
- audio_length_in_s: Optional[float] = None,
53
- return_dict: bool = True,
54
- ) -> Union[AudioPipelineOutput, Tuple]:
55
- r"""
56
- The call function to the pipeline for generation.
57
-
58
- Args:
59
- batch_size (`int`, *optional*, defaults to 1):
60
- The number of audio samples to generate.
61
- num_inference_steps (`int`, *optional*, defaults to 50):
62
- The number of denoising steps. More denoising steps usually lead to a higher-quality audio sample at
63
- the expense of slower inference.
64
- generator (`torch.Generator`, *optional*):
65
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
66
- generation deterministic.
67
- audio_length_in_s (`float`, *optional*, defaults to `self.unet.config.sample_size/self.unet.config.sample_rate`):
68
- The length of the generated audio sample in seconds.
69
- return_dict (`bool`, *optional*, defaults to `True`):
70
- Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple.
71
-
72
- Example:
73
-
74
- ```py
75
- from diffusers import DiffusionPipeline
76
- from scipy.io.wavfile import write
77
-
78
- model_id = "harmonai/maestro-150k"
79
- pipe = DiffusionPipeline.from_pretrained(model_id)
80
- pipe = pipe.to("cuda")
81
-
82
- audios = pipe(audio_length_in_s=4.0).audios
83
-
84
- # To save locally
85
- for i, audio in enumerate(audios):
86
- write(f"maestro_test_{i}.wav", pipe.unet.sample_rate, audio.transpose())
87
-
88
- # To dislay in google colab
89
- import IPython.display as ipd
90
-
91
- for audio in audios:
92
- display(ipd.Audio(audio, rate=pipe.unet.sample_rate))
93
- ```
94
-
95
- Returns:
96
- [`~pipelines.AudioPipelineOutput`] or `tuple`:
97
- If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is
98
- returned where the first element is a list with the generated audio.
99
- """
100
-
101
- if audio_length_in_s is None:
102
- audio_length_in_s = self.unet.config.sample_size / self.unet.config.sample_rate
103
-
104
- sample_size = audio_length_in_s * self.unet.config.sample_rate
105
-
106
- down_scale_factor = 2 ** len(self.unet.up_blocks)
107
- if sample_size < 3 * down_scale_factor:
108
- raise ValueError(
109
- f"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
110
- f" {3 * down_scale_factor / self.unet.config.sample_rate}."
111
- )
112
-
113
- original_sample_size = int(sample_size)
114
- if sample_size % down_scale_factor != 0:
115
- sample_size = (
116
- (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
117
- ) * down_scale_factor
118
- logger.info(
119
- f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
120
- f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
121
- " process."
122
- )
123
- sample_size = int(sample_size)
124
-
125
- dtype = next(self.unet.parameters()).dtype
126
- shape = (batch_size, self.unet.config.in_channels, sample_size)
127
- if isinstance(generator, list) and len(generator) != batch_size:
128
- raise ValueError(
129
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
130
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
131
- )
132
-
133
- audio = randn_tensor(shape, generator=generator, device=self._execution_device, dtype=dtype)
134
-
135
- # set step values
136
- self.scheduler.set_timesteps(num_inference_steps, device=audio.device)
137
- self.scheduler.timesteps = self.scheduler.timesteps.to(dtype)
138
-
139
- for t in self.progress_bar(self.scheduler.timesteps):
140
- # 1. predict noise model_output
141
- model_output = self.unet(audio, t).sample
142
-
143
- # 2. compute previous audio sample: x_t -> t_t-1
144
- audio = self.scheduler.step(model_output, t, audio).prev_sample
145
-
146
- audio = audio.clamp(-1, 1).float().cpu().numpy()
147
-
148
- audio = audio[:, :, :original_sample_size]
149
-
150
- if not return_dict:
151
- return (audio,)
152
-
153
- return AudioPipelineOutput(audios=audio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_sde_ve_flax.py DELETED
@@ -1,279 +0,0 @@
1
- # Copyright 2023 Google Brain and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
16
-
17
- from dataclasses import dataclass
18
- from typing import Optional, Tuple, Union
19
-
20
- import flax
21
- import jax.numpy as jnp
22
- from jax import random
23
-
24
- from ..configuration_utils import ConfigMixin, register_to_config
25
- from .scheduling_utils_flax import FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left
26
-
27
-
28
- @flax.struct.dataclass
29
- class ScoreSdeVeSchedulerState:
30
- # setable values
31
- timesteps: Optional[jnp.ndarray] = None
32
- discrete_sigmas: Optional[jnp.ndarray] = None
33
- sigmas: Optional[jnp.ndarray] = None
34
-
35
- @classmethod
36
- def create(cls):
37
- return cls()
38
-
39
-
40
- @dataclass
41
- class FlaxSdeVeOutput(FlaxSchedulerOutput):
42
- """
43
- Output class for the ScoreSdeVeScheduler's step function output.
44
-
45
- Args:
46
- state (`ScoreSdeVeSchedulerState`):
47
- prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images):
48
- Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
49
- denoising loop.
50
- prev_sample_mean (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images):
51
- Mean averaged `prev_sample`. Same as `prev_sample`, only mean-averaged over previous timesteps.
52
- """
53
-
54
- state: ScoreSdeVeSchedulerState
55
- prev_sample: jnp.ndarray
56
- prev_sample_mean: Optional[jnp.ndarray] = None
57
-
58
-
59
- class FlaxScoreSdeVeScheduler(FlaxSchedulerMixin, ConfigMixin):
60
- """
61
- The variance exploding stochastic differential equation (SDE) scheduler.
62
-
63
- For more information, see the original paper: https://arxiv.org/abs/2011.13456
64
-
65
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
66
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
67
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
68
- [`~SchedulerMixin.from_pretrained`] functions.
69
-
70
- Args:
71
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
72
- snr (`float`):
73
- coefficient weighting the step from the model_output sample (from the network) to the random noise.
74
- sigma_min (`float`):
75
- initial noise scale for sigma sequence in sampling procedure. The minimum sigma should mirror the
76
- distribution of the data.
77
- sigma_max (`float`): maximum value used for the range of continuous timesteps passed into the model.
78
- sampling_eps (`float`): the end value of sampling, where timesteps decrease progressively from 1 to
79
- epsilon.
80
- correct_steps (`int`): number of correction steps performed on a produced sample.
81
- """
82
-
83
- @property
84
- def has_state(self):
85
- return True
86
-
87
- @register_to_config
88
- def __init__(
89
- self,
90
- num_train_timesteps: int = 2000,
91
- snr: float = 0.15,
92
- sigma_min: float = 0.01,
93
- sigma_max: float = 1348.0,
94
- sampling_eps: float = 1e-5,
95
- correct_steps: int = 1,
96
- ):
97
- pass
98
-
99
- def create_state(self):
100
- state = ScoreSdeVeSchedulerState.create()
101
- return self.set_sigmas(
102
- state,
103
- self.config.num_train_timesteps,
104
- self.config.sigma_min,
105
- self.config.sigma_max,
106
- self.config.sampling_eps,
107
- )
108
-
109
- def set_timesteps(
110
- self, state: ScoreSdeVeSchedulerState, num_inference_steps: int, shape: Tuple = (), sampling_eps: float = None
111
- ) -> ScoreSdeVeSchedulerState:
112
- """
113
- Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference.
114
-
115
- Args:
116
- state (`ScoreSdeVeSchedulerState`): the `FlaxScoreSdeVeScheduler` state data class instance.
117
- num_inference_steps (`int`):
118
- the number of diffusion steps used when generating samples with a pre-trained model.
119
- sampling_eps (`float`, optional):
120
- final timestep value (overrides value given at Scheduler instantiation).
121
-
122
- """
123
- sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps
124
-
125
- timesteps = jnp.linspace(1, sampling_eps, num_inference_steps)
126
- return state.replace(timesteps=timesteps)
127
-
128
- def set_sigmas(
129
- self,
130
- state: ScoreSdeVeSchedulerState,
131
- num_inference_steps: int,
132
- sigma_min: float = None,
133
- sigma_max: float = None,
134
- sampling_eps: float = None,
135
- ) -> ScoreSdeVeSchedulerState:
136
- """
137
- Sets the noise scales used for the diffusion chain. Supporting function to be run before inference.
138
-
139
- The sigmas control the weight of the `drift` and `diffusion` components of sample update.
140
-
141
- Args:
142
- state (`ScoreSdeVeSchedulerState`): the `FlaxScoreSdeVeScheduler` state data class instance.
143
- num_inference_steps (`int`):
144
- the number of diffusion steps used when generating samples with a pre-trained model.
145
- sigma_min (`float`, optional):
146
- initial noise scale value (overrides value given at Scheduler instantiation).
147
- sigma_max (`float`, optional):
148
- final noise scale value (overrides value given at Scheduler instantiation).
149
- sampling_eps (`float`, optional):
150
- final timestep value (overrides value given at Scheduler instantiation).
151
- """
152
- sigma_min = sigma_min if sigma_min is not None else self.config.sigma_min
153
- sigma_max = sigma_max if sigma_max is not None else self.config.sigma_max
154
- sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps
155
- if state.timesteps is None:
156
- state = self.set_timesteps(state, num_inference_steps, sampling_eps)
157
-
158
- discrete_sigmas = jnp.exp(jnp.linspace(jnp.log(sigma_min), jnp.log(sigma_max), num_inference_steps))
159
- sigmas = jnp.array([sigma_min * (sigma_max / sigma_min) ** t for t in state.timesteps])
160
-
161
- return state.replace(discrete_sigmas=discrete_sigmas, sigmas=sigmas)
162
-
163
- def get_adjacent_sigma(self, state, timesteps, t):
164
- return jnp.where(timesteps == 0, jnp.zeros_like(t), state.discrete_sigmas[timesteps - 1])
165
-
166
- def step_pred(
167
- self,
168
- state: ScoreSdeVeSchedulerState,
169
- model_output: jnp.ndarray,
170
- timestep: int,
171
- sample: jnp.ndarray,
172
- key: random.KeyArray,
173
- return_dict: bool = True,
174
- ) -> Union[FlaxSdeVeOutput, Tuple]:
175
- """
176
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
177
- process from the learned model outputs (most often the predicted noise).
178
-
179
- Args:
180
- state (`ScoreSdeVeSchedulerState`): the `FlaxScoreSdeVeScheduler` state data class instance.
181
- model_output (`jnp.ndarray`): direct output from learned diffusion model.
182
- timestep (`int`): current discrete timestep in the diffusion chain.
183
- sample (`jnp.ndarray`):
184
- current instance of sample being created by diffusion process.
185
- generator: random number generator.
186
- return_dict (`bool`): option for returning tuple rather than FlaxSdeVeOutput class
187
-
188
- Returns:
189
- [`FlaxSdeVeOutput`] or `tuple`: [`FlaxSdeVeOutput`] if `return_dict` is True, otherwise a `tuple`. When
190
- returning a tuple, the first element is the sample tensor.
191
-
192
- """
193
- if state.timesteps is None:
194
- raise ValueError(
195
- "`state.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler"
196
- )
197
-
198
- timestep = timestep * jnp.ones(
199
- sample.shape[0],
200
- )
201
- timesteps = (timestep * (len(state.timesteps) - 1)).long()
202
-
203
- sigma = state.discrete_sigmas[timesteps]
204
- adjacent_sigma = self.get_adjacent_sigma(state, timesteps, timestep)
205
- drift = jnp.zeros_like(sample)
206
- diffusion = (sigma**2 - adjacent_sigma**2) ** 0.5
207
-
208
- # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
209
- # also equation 47 shows the analog from SDE models to ancestral sampling methods
210
- diffusion = diffusion.flatten()
211
- diffusion = broadcast_to_shape_from_left(diffusion, sample.shape)
212
- drift = drift - diffusion**2 * model_output
213
-
214
- # equation 6: sample noise for the diffusion term of
215
- key = random.split(key, num=1)
216
- noise = random.normal(key=key, shape=sample.shape)
217
- prev_sample_mean = sample - drift # subtract because `dt` is a small negative timestep
218
- # TODO is the variable diffusion the correct scaling term for the noise?
219
- prev_sample = prev_sample_mean + diffusion * noise # add impact of diffusion field g
220
-
221
- if not return_dict:
222
- return (prev_sample, prev_sample_mean, state)
223
-
224
- return FlaxSdeVeOutput(prev_sample=prev_sample, prev_sample_mean=prev_sample_mean, state=state)
225
-
226
- def step_correct(
227
- self,
228
- state: ScoreSdeVeSchedulerState,
229
- model_output: jnp.ndarray,
230
- sample: jnp.ndarray,
231
- key: random.KeyArray,
232
- return_dict: bool = True,
233
- ) -> Union[FlaxSdeVeOutput, Tuple]:
234
- """
235
- Correct the predicted sample based on the output model_output of the network. This is often run repeatedly
236
- after making the prediction for the previous timestep.
237
-
238
- Args:
239
- state (`ScoreSdeVeSchedulerState`): the `FlaxScoreSdeVeScheduler` state data class instance.
240
- model_output (`jnp.ndarray`): direct output from learned diffusion model.
241
- sample (`jnp.ndarray`):
242
- current instance of sample being created by diffusion process.
243
- generator: random number generator.
244
- return_dict (`bool`): option for returning tuple rather than FlaxSdeVeOutput class
245
-
246
- Returns:
247
- [`FlaxSdeVeOutput`] or `tuple`: [`FlaxSdeVeOutput`] if `return_dict` is True, otherwise a `tuple`. When
248
- returning a tuple, the first element is the sample tensor.
249
-
250
- """
251
- if state.timesteps is None:
252
- raise ValueError(
253
- "`state.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler"
254
- )
255
-
256
- # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
257
- # sample noise for correction
258
- key = random.split(key, num=1)
259
- noise = random.normal(key=key, shape=sample.shape)
260
-
261
- # compute step size from the model_output, the noise, and the snr
262
- grad_norm = jnp.linalg.norm(model_output)
263
- noise_norm = jnp.linalg.norm(noise)
264
- step_size = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
265
- step_size = step_size * jnp.ones(sample.shape[0])
266
-
267
- # compute corrected sample: model_output term and noise term
268
- step_size = step_size.flatten()
269
- step_size = broadcast_to_shape_from_left(step_size, sample.shape)
270
- prev_sample_mean = sample + step_size * model_output
271
- prev_sample = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
272
-
273
- if not return_dict:
274
- return (prev_sample, state)
275
-
276
- return FlaxSdeVeOutput(prev_sample=prev_sample, state=state)
277
-
278
- def __len__(self):
279
- return self.config.num_train_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/htc/htc_r50_fpn_20e_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = './htc_r50_fpn_1x_coco.py'
2
- # learning policy
3
- lr_config = dict(step=[16, 19])
4
- runner = dict(type='EpochBasedRunner', max_epochs=20)
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/stylegan_ops/upfirdn2d.cpp DELETED
@@ -1,23 +0,0 @@
1
- #include <torch/extension.h>
2
-
3
-
4
- torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel,
5
- int up_x, int up_y, int down_x, int down_y,
6
- int pad_x0, int pad_x1, int pad_y0, int pad_y1);
7
-
8
- #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
9
- #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
10
- #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
11
-
12
- torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel,
13
- int up_x, int up_y, int down_x, int down_y,
14
- int pad_x0, int pad_x1, int pad_y0, int pad_y1) {
15
- CHECK_CUDA(input);
16
- CHECK_CUDA(kernel);
17
-
18
- return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1);
19
- }
20
-
21
- PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
22
- m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)");
23
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnthonyErosion/HoctotAI/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: HoctotAI
3
- emoji: ⚡
4
- colorFrom: blue
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.35.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/check.py DELETED
@@ -1,149 +0,0 @@
1
- """Validation of dependencies of packages
2
- """
3
-
4
- import logging
5
- from typing import Callable, Dict, List, NamedTuple, Optional, Set, Tuple
6
-
7
- from pip._vendor.packaging.requirements import Requirement
8
- from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
9
-
10
- from pip._internal.distributions import make_distribution_for_install_requirement
11
- from pip._internal.metadata import get_default_environment
12
- from pip._internal.metadata.base import DistributionVersion
13
- from pip._internal.req.req_install import InstallRequirement
14
-
15
- logger = logging.getLogger(__name__)
16
-
17
-
18
- class PackageDetails(NamedTuple):
19
- version: DistributionVersion
20
- dependencies: List[Requirement]
21
-
22
-
23
- # Shorthands
24
- PackageSet = Dict[NormalizedName, PackageDetails]
25
- Missing = Tuple[NormalizedName, Requirement]
26
- Conflicting = Tuple[NormalizedName, DistributionVersion, Requirement]
27
-
28
- MissingDict = Dict[NormalizedName, List[Missing]]
29
- ConflictingDict = Dict[NormalizedName, List[Conflicting]]
30
- CheckResult = Tuple[MissingDict, ConflictingDict]
31
- ConflictDetails = Tuple[PackageSet, CheckResult]
32
-
33
-
34
- def create_package_set_from_installed() -> Tuple[PackageSet, bool]:
35
- """Converts a list of distributions into a PackageSet."""
36
- package_set = {}
37
- problems = False
38
- env = get_default_environment()
39
- for dist in env.iter_installed_distributions(local_only=False, skip=()):
40
- name = dist.canonical_name
41
- try:
42
- dependencies = list(dist.iter_dependencies())
43
- package_set[name] = PackageDetails(dist.version, dependencies)
44
- except (OSError, ValueError) as e:
45
- # Don't crash on unreadable or broken metadata.
46
- logger.warning("Error parsing requirements for %s: %s", name, e)
47
- problems = True
48
- return package_set, problems
49
-
50
-
51
- def check_package_set(
52
- package_set: PackageSet, should_ignore: Optional[Callable[[str], bool]] = None
53
- ) -> CheckResult:
54
- """Check if a package set is consistent
55
-
56
- If should_ignore is passed, it should be a callable that takes a
57
- package name and returns a boolean.
58
- """
59
-
60
- missing = {}
61
- conflicting = {}
62
-
63
- for package_name, package_detail in package_set.items():
64
- # Info about dependencies of package_name
65
- missing_deps: Set[Missing] = set()
66
- conflicting_deps: Set[Conflicting] = set()
67
-
68
- if should_ignore and should_ignore(package_name):
69
- continue
70
-
71
- for req in package_detail.dependencies:
72
- name = canonicalize_name(req.name)
73
-
74
- # Check if it's missing
75
- if name not in package_set:
76
- missed = True
77
- if req.marker is not None:
78
- missed = req.marker.evaluate({"extra": ""})
79
- if missed:
80
- missing_deps.add((name, req))
81
- continue
82
-
83
- # Check if there's a conflict
84
- version = package_set[name].version
85
- if not req.specifier.contains(version, prereleases=True):
86
- conflicting_deps.add((name, version, req))
87
-
88
- if missing_deps:
89
- missing[package_name] = sorted(missing_deps, key=str)
90
- if conflicting_deps:
91
- conflicting[package_name] = sorted(conflicting_deps, key=str)
92
-
93
- return missing, conflicting
94
-
95
-
96
- def check_install_conflicts(to_install: List[InstallRequirement]) -> ConflictDetails:
97
- """For checking if the dependency graph would be consistent after \
98
- installing given requirements
99
- """
100
- # Start from the current state
101
- package_set, _ = create_package_set_from_installed()
102
- # Install packages
103
- would_be_installed = _simulate_installation_of(to_install, package_set)
104
-
105
- # Only warn about directly-dependent packages; create a whitelist of them
106
- whitelist = _create_whitelist(would_be_installed, package_set)
107
-
108
- return (
109
- package_set,
110
- check_package_set(
111
- package_set, should_ignore=lambda name: name not in whitelist
112
- ),
113
- )
114
-
115
-
116
- def _simulate_installation_of(
117
- to_install: List[InstallRequirement], package_set: PackageSet
118
- ) -> Set[NormalizedName]:
119
- """Computes the version of packages after installing to_install."""
120
- # Keep track of packages that were installed
121
- installed = set()
122
-
123
- # Modify it as installing requirement_set would (assuming no errors)
124
- for inst_req in to_install:
125
- abstract_dist = make_distribution_for_install_requirement(inst_req)
126
- dist = abstract_dist.get_metadata_distribution()
127
- name = dist.canonical_name
128
- package_set[name] = PackageDetails(dist.version, list(dist.iter_dependencies()))
129
-
130
- installed.add(name)
131
-
132
- return installed
133
-
134
-
135
- def _create_whitelist(
136
- would_be_installed: Set[NormalizedName], package_set: PackageSet
137
- ) -> Set[NormalizedName]:
138
- packages_affected = set(would_be_installed)
139
-
140
- for package_name in package_set:
141
- if package_name in packages_affected:
142
- continue
143
-
144
- for req in package_set[package_name].dependencies:
145
- if canonicalize_name(req.name) in packages_affected:
146
- packages_affected.add(package_name)
147
- break
148
-
149
- return packages_affected
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/__main__.py DELETED
@@ -1,274 +0,0 @@
1
- import colorsys
2
- import io
3
- from time import process_time
4
-
5
- from pip._vendor.rich import box
6
- from pip._vendor.rich.color import Color
7
- from pip._vendor.rich.console import Console, ConsoleOptions, Group, RenderableType, RenderResult
8
- from pip._vendor.rich.markdown import Markdown
9
- from pip._vendor.rich.measure import Measurement
10
- from pip._vendor.rich.pretty import Pretty
11
- from pip._vendor.rich.segment import Segment
12
- from pip._vendor.rich.style import Style
13
- from pip._vendor.rich.syntax import Syntax
14
- from pip._vendor.rich.table import Table
15
- from pip._vendor.rich.text import Text
16
-
17
-
18
- class ColorBox:
19
- def __rich_console__(
20
- self, console: Console, options: ConsoleOptions
21
- ) -> RenderResult:
22
- for y in range(0, 5):
23
- for x in range(options.max_width):
24
- h = x / options.max_width
25
- l = 0.1 + ((y / 5) * 0.7)
26
- r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0)
27
- r2, g2, b2 = colorsys.hls_to_rgb(h, l + 0.7 / 10, 1.0)
28
- bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255)
29
- color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255)
30
- yield Segment("▄", Style(color=color, bgcolor=bgcolor))
31
- yield Segment.line()
32
-
33
- def __rich_measure__(
34
- self, console: "Console", options: ConsoleOptions
35
- ) -> Measurement:
36
- return Measurement(1, options.max_width)
37
-
38
-
39
- def make_test_card() -> Table:
40
- """Get a renderable that demonstrates a number of features."""
41
- table = Table.grid(padding=1, pad_edge=True)
42
- table.title = "Rich features"
43
- table.add_column("Feature", no_wrap=True, justify="center", style="bold red")
44
- table.add_column("Demonstration")
45
-
46
- color_table = Table(
47
- box=None,
48
- expand=False,
49
- show_header=False,
50
- show_edge=False,
51
- pad_edge=False,
52
- )
53
- color_table.add_row(
54
- (
55
- "✓ [bold green]4-bit color[/]\n"
56
- "✓ [bold blue]8-bit color[/]\n"
57
- "✓ [bold magenta]Truecolor (16.7 million)[/]\n"
58
- "✓ [bold yellow]Dumb terminals[/]\n"
59
- "✓ [bold cyan]Automatic color conversion"
60
- ),
61
- ColorBox(),
62
- )
63
-
64
- table.add_row("Colors", color_table)
65
-
66
- table.add_row(
67
- "Styles",
68
- "All ansi styles: [bold]bold[/], [dim]dim[/], [italic]italic[/italic], [underline]underline[/], [strike]strikethrough[/], [reverse]reverse[/], and even [blink]blink[/].",
69
- )
70
-
71
- lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque in metus sed sapien ultricies pretium a at justo. Maecenas luctus velit et auctor maximus."
72
- lorem_table = Table.grid(padding=1, collapse_padding=True)
73
- lorem_table.pad_edge = False
74
- lorem_table.add_row(
75
- Text(lorem, justify="left", style="green"),
76
- Text(lorem, justify="center", style="yellow"),
77
- Text(lorem, justify="right", style="blue"),
78
- Text(lorem, justify="full", style="red"),
79
- )
80
- table.add_row(
81
- "Text",
82
- Group(
83
- Text.from_markup(
84
- """Word wrap text. Justify [green]left[/], [yellow]center[/], [blue]right[/] or [red]full[/].\n"""
85
- ),
86
- lorem_table,
87
- ),
88
- )
89
-
90
- def comparison(renderable1: RenderableType, renderable2: RenderableType) -> Table:
91
- table = Table(show_header=False, pad_edge=False, box=None, expand=True)
92
- table.add_column("1", ratio=1)
93
- table.add_column("2", ratio=1)
94
- table.add_row(renderable1, renderable2)
95
- return table
96
-
97
- table.add_row(
98
- "Asian\nlanguage\nsupport",
99
- ":flag_for_china: 该库支持中文,日文和韩文文本!\n:flag_for_japan: ライブラリは中国語、日本語、韓国語のテキストをサポートしています\n:flag_for_south_korea: 이 라이브러리는 중국어, 일본어 및 한국어 텍스트를 지원합니다",
100
- )
101
-
102
- markup_example = (
103
- "[bold magenta]Rich[/] supports a simple [i]bbcode[/i]-like [b]markup[/b] for [yellow]color[/], [underline]style[/], and emoji! "
104
- ":+1: :apple: :ant: :bear: :baguette_bread: :bus: "
105
- )
106
- table.add_row("Markup", markup_example)
107
-
108
- example_table = Table(
109
- show_edge=False,
110
- show_header=True,
111
- expand=False,
112
- row_styles=["none", "dim"],
113
- box=box.SIMPLE,
114
- )
115
- example_table.add_column("[green]Date", style="green", no_wrap=True)
116
- example_table.add_column("[blue]Title", style="blue")
117
- example_table.add_column(
118
- "[cyan]Production Budget",
119
- style="cyan",
120
- justify="right",
121
- no_wrap=True,
122
- )
123
- example_table.add_column(
124
- "[magenta]Box Office",
125
- style="magenta",
126
- justify="right",
127
- no_wrap=True,
128
- )
129
- example_table.add_row(
130
- "Dec 20, 2019",
131
- "Star Wars: The Rise of Skywalker",
132
- "$275,000,000",
133
- "$375,126,118",
134
- )
135
- example_table.add_row(
136
- "May 25, 2018",
137
- "[b]Solo[/]: A Star Wars Story",
138
- "$275,000,000",
139
- "$393,151,347",
140
- )
141
- example_table.add_row(
142
- "Dec 15, 2017",
143
- "Star Wars Ep. VIII: The Last Jedi",
144
- "$262,000,000",
145
- "[bold]$1,332,539,889[/bold]",
146
- )
147
- example_table.add_row(
148
- "May 19, 1999",
149
- "Star Wars Ep. [b]I[/b]: [i]The phantom Menace",
150
- "$115,000,000",
151
- "$1,027,044,677",
152
- )
153
-
154
- table.add_row("Tables", example_table)
155
-
156
- code = '''\
157
- def iter_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
158
- """Iterate and generate a tuple with a flag for last value."""
159
- iter_values = iter(values)
160
- try:
161
- previous_value = next(iter_values)
162
- except StopIteration:
163
- return
164
- for value in iter_values:
165
- yield False, previous_value
166
- previous_value = value
167
- yield True, previous_value'''
168
-
169
- pretty_data = {
170
- "foo": [
171
- 3.1427,
172
- (
173
- "Paul Atreides",
174
- "Vladimir Harkonnen",
175
- "Thufir Hawat",
176
- ),
177
- ],
178
- "atomic": (False, True, None),
179
- }
180
- table.add_row(
181
- "Syntax\nhighlighting\n&\npretty\nprinting",
182
- comparison(
183
- Syntax(code, "python3", line_numbers=True, indent_guides=True),
184
- Pretty(pretty_data, indent_guides=True),
185
- ),
186
- )
187
-
188
- markdown_example = """\
189
- # Markdown
190
-
191
- Supports much of the *markdown* __syntax__!
192
-
193
- - Headers
194
- - Basic formatting: **bold**, *italic*, `code`
195
- - Block quotes
196
- - Lists, and more...
197
- """
198
- table.add_row(
199
- "Markdown", comparison("[cyan]" + markdown_example, Markdown(markdown_example))
200
- )
201
-
202
- table.add_row(
203
- "+more!",
204
- """Progress bars, columns, styled logging handler, tracebacks, etc...""",
205
- )
206
- return table
207
-
208
-
209
- if __name__ == "__main__": # pragma: no cover
210
-
211
- console = Console(
212
- file=io.StringIO(),
213
- force_terminal=True,
214
- )
215
- test_card = make_test_card()
216
-
217
- # Print once to warm cache
218
- start = process_time()
219
- console.print(test_card)
220
- pre_cache_taken = round((process_time() - start) * 1000.0, 1)
221
-
222
- console.file = io.StringIO()
223
-
224
- start = process_time()
225
- console.print(test_card)
226
- taken = round((process_time() - start) * 1000.0, 1)
227
-
228
- c = Console(record=True)
229
- c.print(test_card)
230
-
231
- print(f"rendered in {pre_cache_taken}ms (cold cache)")
232
- print(f"rendered in {taken}ms (warm cache)")
233
-
234
- from pip._vendor.rich.panel import Panel
235
-
236
- console = Console()
237
-
238
- sponsor_message = Table.grid(padding=1)
239
- sponsor_message.add_column(style="green", justify="right")
240
- sponsor_message.add_column(no_wrap=True)
241
-
242
- sponsor_message.add_row(
243
- "Textualize",
244
- "[u blue link=https://github.com/textualize]https://github.com/textualize",
245
- )
246
- sponsor_message.add_row(
247
- "Twitter",
248
- "[u blue link=https://twitter.com/willmcgugan]https://twitter.com/willmcgugan",
249
- )
250
-
251
- intro_message = Text.from_markup(
252
- """\
253
- We hope you enjoy using Rich!
254
-
255
- Rich is maintained with [red]:heart:[/] by [link=https://www.textualize.io]Textualize.io[/]
256
-
257
- - Will McGugan"""
258
- )
259
-
260
- message = Table.grid(padding=2)
261
- message.add_column()
262
- message.add_column(no_wrap=True)
263
- message.add_row(intro_message, sponsor_message)
264
-
265
- console.print(
266
- Panel.fit(
267
- message,
268
- box=box.ROUNDED,
269
- padding=(1, 2),
270
- title="[b red]Thanks for trying out Rich!",
271
- border_style="bright_blue",
272
- ),
273
- justify="center",
274
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/config.py DELETED
@@ -1,377 +0,0 @@
1
- """distutils.command.config
2
-
3
- Implements the Distutils 'config' command, a (mostly) empty command class
4
- that exists mainly to be sub-classed by specific module distributions and
5
- applications. The idea is that while every "config" command is different,
6
- at least they're all named the same, and users always see "config" in the
7
- list of standard commands. Also, this is a good place to put common
8
- configure-like tasks: "try to compile this C code", or "figure out where
9
- this header file lives".
10
- """
11
-
12
- import os
13
- import re
14
-
15
- from distutils.core import Command
16
- from distutils.errors import DistutilsExecError
17
- from distutils.sysconfig import customize_compiler
18
- from distutils import log
19
-
20
- LANG_EXT = {"c": ".c", "c++": ".cxx"}
21
-
22
-
23
- class config(Command):
24
-
25
- description = "prepare to build"
26
-
27
- user_options = [
28
- ('compiler=', None, "specify the compiler type"),
29
- ('cc=', None, "specify the compiler executable"),
30
- ('include-dirs=', 'I', "list of directories to search for header files"),
31
- ('define=', 'D', "C preprocessor macros to define"),
32
- ('undef=', 'U', "C preprocessor macros to undefine"),
33
- ('libraries=', 'l', "external C libraries to link with"),
34
- ('library-dirs=', 'L', "directories to search for external C libraries"),
35
- ('noisy', None, "show every action (compile, link, run, ...) taken"),
36
- (
37
- 'dump-source',
38
- None,
39
- "dump generated source files before attempting to compile them",
40
- ),
41
- ]
42
-
43
- # The three standard command methods: since the "config" command
44
- # does nothing by default, these are empty.
45
-
46
- def initialize_options(self):
47
- self.compiler = None
48
- self.cc = None
49
- self.include_dirs = None
50
- self.libraries = None
51
- self.library_dirs = None
52
-
53
- # maximal output for now
54
- self.noisy = 1
55
- self.dump_source = 1
56
-
57
- # list of temporary files generated along-the-way that we have
58
- # to clean at some point
59
- self.temp_files = []
60
-
61
- def finalize_options(self):
62
- if self.include_dirs is None:
63
- self.include_dirs = self.distribution.include_dirs or []
64
- elif isinstance(self.include_dirs, str):
65
- self.include_dirs = self.include_dirs.split(os.pathsep)
66
-
67
- if self.libraries is None:
68
- self.libraries = []
69
- elif isinstance(self.libraries, str):
70
- self.libraries = [self.libraries]
71
-
72
- if self.library_dirs is None:
73
- self.library_dirs = []
74
- elif isinstance(self.library_dirs, str):
75
- self.library_dirs = self.library_dirs.split(os.pathsep)
76
-
77
- def run(self):
78
- pass
79
-
80
- # Utility methods for actual "config" commands. The interfaces are
81
- # loosely based on Autoconf macros of similar names. Sub-classes
82
- # may use these freely.
83
-
84
- def _check_compiler(self):
85
- """Check that 'self.compiler' really is a CCompiler object;
86
- if not, make it one.
87
- """
88
- # We do this late, and only on-demand, because this is an expensive
89
- # import.
90
- from distutils.ccompiler import CCompiler, new_compiler
91
-
92
- if not isinstance(self.compiler, CCompiler):
93
- self.compiler = new_compiler(
94
- compiler=self.compiler, dry_run=self.dry_run, force=1
95
- )
96
- customize_compiler(self.compiler)
97
- if self.include_dirs:
98
- self.compiler.set_include_dirs(self.include_dirs)
99
- if self.libraries:
100
- self.compiler.set_libraries(self.libraries)
101
- if self.library_dirs:
102
- self.compiler.set_library_dirs(self.library_dirs)
103
-
104
- def _gen_temp_sourcefile(self, body, headers, lang):
105
- filename = "_configtest" + LANG_EXT[lang]
106
- with open(filename, "w") as file:
107
- if headers:
108
- for header in headers:
109
- file.write("#include <%s>\n" % header)
110
- file.write("\n")
111
- file.write(body)
112
- if body[-1] != "\n":
113
- file.write("\n")
114
- return filename
115
-
116
- def _preprocess(self, body, headers, include_dirs, lang):
117
- src = self._gen_temp_sourcefile(body, headers, lang)
118
- out = "_configtest.i"
119
- self.temp_files.extend([src, out])
120
- self.compiler.preprocess(src, out, include_dirs=include_dirs)
121
- return (src, out)
122
-
123
- def _compile(self, body, headers, include_dirs, lang):
124
- src = self._gen_temp_sourcefile(body, headers, lang)
125
- if self.dump_source:
126
- dump_file(src, "compiling '%s':" % src)
127
- (obj,) = self.compiler.object_filenames([src])
128
- self.temp_files.extend([src, obj])
129
- self.compiler.compile([src], include_dirs=include_dirs)
130
- return (src, obj)
131
-
132
- def _link(self, body, headers, include_dirs, libraries, library_dirs, lang):
133
- (src, obj) = self._compile(body, headers, include_dirs, lang)
134
- prog = os.path.splitext(os.path.basename(src))[0]
135
- self.compiler.link_executable(
136
- [obj],
137
- prog,
138
- libraries=libraries,
139
- library_dirs=library_dirs,
140
- target_lang=lang,
141
- )
142
-
143
- if self.compiler.exe_extension is not None:
144
- prog = prog + self.compiler.exe_extension
145
- self.temp_files.append(prog)
146
-
147
- return (src, obj, prog)
148
-
149
- def _clean(self, *filenames):
150
- if not filenames:
151
- filenames = self.temp_files
152
- self.temp_files = []
153
- log.info("removing: %s", ' '.join(filenames))
154
- for filename in filenames:
155
- try:
156
- os.remove(filename)
157
- except OSError:
158
- pass
159
-
160
- # XXX these ignore the dry-run flag: what to do, what to do? even if
161
- # you want a dry-run build, you still need some sort of configuration
162
- # info. My inclination is to make it up to the real config command to
163
- # consult 'dry_run', and assume a default (minimal) configuration if
164
- # true. The problem with trying to do it here is that you'd have to
165
- # return either true or false from all the 'try' methods, neither of
166
- # which is correct.
167
-
168
- # XXX need access to the header search path and maybe default macros.
169
-
170
- def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"):
171
- """Construct a source file from 'body' (a string containing lines
172
- of C/C++ code) and 'headers' (a list of header files to include)
173
- and run it through the preprocessor. Return true if the
174
- preprocessor succeeded, false if there were any errors.
175
- ('body' probably isn't of much use, but what the heck.)
176
- """
177
- from distutils.ccompiler import CompileError
178
-
179
- self._check_compiler()
180
- ok = True
181
- try:
182
- self._preprocess(body, headers, include_dirs, lang)
183
- except CompileError:
184
- ok = False
185
-
186
- self._clean()
187
- return ok
188
-
189
- def search_cpp(self, pattern, body=None, headers=None, include_dirs=None, lang="c"):
190
- """Construct a source file (just like 'try_cpp()'), run it through
191
- the preprocessor, and return true if any line of the output matches
192
- 'pattern'. 'pattern' should either be a compiled regex object or a
193
- string containing a regex. If both 'body' and 'headers' are None,
194
- preprocesses an empty file -- which can be useful to determine the
195
- symbols the preprocessor and compiler set by default.
196
- """
197
- self._check_compiler()
198
- src, out = self._preprocess(body, headers, include_dirs, lang)
199
-
200
- if isinstance(pattern, str):
201
- pattern = re.compile(pattern)
202
-
203
- with open(out) as file:
204
- match = False
205
- while True:
206
- line = file.readline()
207
- if line == '':
208
- break
209
- if pattern.search(line):
210
- match = True
211
- break
212
-
213
- self._clean()
214
- return match
215
-
216
- def try_compile(self, body, headers=None, include_dirs=None, lang="c"):
217
- """Try to compile a source file built from 'body' and 'headers'.
218
- Return true on success, false otherwise.
219
- """
220
- from distutils.ccompiler import CompileError
221
-
222
- self._check_compiler()
223
- try:
224
- self._compile(body, headers, include_dirs, lang)
225
- ok = True
226
- except CompileError:
227
- ok = False
228
-
229
- log.info(ok and "success!" or "failure.")
230
- self._clean()
231
- return ok
232
-
233
- def try_link(
234
- self,
235
- body,
236
- headers=None,
237
- include_dirs=None,
238
- libraries=None,
239
- library_dirs=None,
240
- lang="c",
241
- ):
242
- """Try to compile and link a source file, built from 'body' and
243
- 'headers', to executable form. Return true on success, false
244
- otherwise.
245
- """
246
- from distutils.ccompiler import CompileError, LinkError
247
-
248
- self._check_compiler()
249
- try:
250
- self._link(body, headers, include_dirs, libraries, library_dirs, lang)
251
- ok = True
252
- except (CompileError, LinkError):
253
- ok = False
254
-
255
- log.info(ok and "success!" or "failure.")
256
- self._clean()
257
- return ok
258
-
259
- def try_run(
260
- self,
261
- body,
262
- headers=None,
263
- include_dirs=None,
264
- libraries=None,
265
- library_dirs=None,
266
- lang="c",
267
- ):
268
- """Try to compile, link to an executable, and run a program
269
- built from 'body' and 'headers'. Return true on success, false
270
- otherwise.
271
- """
272
- from distutils.ccompiler import CompileError, LinkError
273
-
274
- self._check_compiler()
275
- try:
276
- src, obj, exe = self._link(
277
- body, headers, include_dirs, libraries, library_dirs, lang
278
- )
279
- self.spawn([exe])
280
- ok = True
281
- except (CompileError, LinkError, DistutilsExecError):
282
- ok = False
283
-
284
- log.info(ok and "success!" or "failure.")
285
- self._clean()
286
- return ok
287
-
288
- # -- High-level methods --------------------------------------------
289
- # (these are the ones that are actually likely to be useful
290
- # when implementing a real-world config command!)
291
-
292
- def check_func(
293
- self,
294
- func,
295
- headers=None,
296
- include_dirs=None,
297
- libraries=None,
298
- library_dirs=None,
299
- decl=0,
300
- call=0,
301
- ):
302
- """Determine if function 'func' is available by constructing a
303
- source file that refers to 'func', and compiles and links it.
304
- If everything succeeds, returns true; otherwise returns false.
305
-
306
- The constructed source file starts out by including the header
307
- files listed in 'headers'. If 'decl' is true, it then declares
308
- 'func' (as "int func()"); you probably shouldn't supply 'headers'
309
- and set 'decl' true in the same call, or you might get errors about
310
- a conflicting declarations for 'func'. Finally, the constructed
311
- 'main()' function either references 'func' or (if 'call' is true)
312
- calls it. 'libraries' and 'library_dirs' are used when
313
- linking.
314
- """
315
- self._check_compiler()
316
- body = []
317
- if decl:
318
- body.append("int %s ();" % func)
319
- body.append("int main () {")
320
- if call:
321
- body.append(" %s();" % func)
322
- else:
323
- body.append(" %s;" % func)
324
- body.append("}")
325
- body = "\n".join(body) + "\n"
326
-
327
- return self.try_link(body, headers, include_dirs, libraries, library_dirs)
328
-
329
- def check_lib(
330
- self,
331
- library,
332
- library_dirs=None,
333
- headers=None,
334
- include_dirs=None,
335
- other_libraries=[],
336
- ):
337
- """Determine if 'library' is available to be linked against,
338
- without actually checking that any particular symbols are provided
339
- by it. 'headers' will be used in constructing the source file to
340
- be compiled, but the only effect of this is to check if all the
341
- header files listed are available. Any libraries listed in
342
- 'other_libraries' will be included in the link, in case 'library'
343
- has symbols that depend on other libraries.
344
- """
345
- self._check_compiler()
346
- return self.try_link(
347
- "int main (void) { }",
348
- headers,
349
- include_dirs,
350
- [library] + other_libraries,
351
- library_dirs,
352
- )
353
-
354
- def check_header(self, header, include_dirs=None, library_dirs=None, lang="c"):
355
- """Determine if the system header file named by 'header_file'
356
- exists and can be found by the preprocessor; return true if so,
357
- false otherwise.
358
- """
359
- return self.try_cpp(
360
- body="/* No body */", headers=[header], include_dirs=include_dirs
361
- )
362
-
363
-
364
- def dump_file(filename, head=None):
365
- """Dumps a file content into log.info.
366
-
367
- If head is not None, will be dumped before the file content.
368
- """
369
- if head is None:
370
- log.info('%s', filename)
371
- else:
372
- log.info(head)
373
- file = open(filename)
374
- try:
375
- log.info(file.read())
376
- finally:
377
- file.close()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/checkpoint/detection_checkpoint.py DELETED
@@ -1,120 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import logging
3
- import os
4
- import pickle
5
- import torch
6
- from fvcore.common.checkpoint import Checkpointer
7
- from torch.nn.parallel import DistributedDataParallel
8
-
9
- import detectron2.utils.comm as comm
10
- from detectron2.utils.file_io import PathManager
11
-
12
- from .c2_model_loading import align_and_update_state_dicts
13
-
14
-
15
- class DetectionCheckpointer(Checkpointer):
16
- """
17
- Same as :class:`Checkpointer`, but is able to:
18
- 1. handle models in detectron & detectron2 model zoo, and apply conversions for legacy models.
19
- 2. correctly load checkpoints that are only available on the master worker
20
- """
21
-
22
- def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables):
23
- is_main_process = comm.is_main_process()
24
- super().__init__(
25
- model,
26
- save_dir,
27
- save_to_disk=is_main_process if save_to_disk is None else save_to_disk,
28
- **checkpointables,
29
- )
30
- self.path_manager = PathManager
31
-
32
- def load(self, path, *args, **kwargs):
33
- need_sync = False
34
-
35
- if path and isinstance(self.model, DistributedDataParallel):
36
- logger = logging.getLogger(__name__)
37
- path = self.path_manager.get_local_path(path)
38
- has_file = os.path.isfile(path)
39
- all_has_file = comm.all_gather(has_file)
40
- if not all_has_file[0]:
41
- raise OSError(f"File {path} not found on main worker.")
42
- if not all(all_has_file):
43
- logger.warning(
44
- f"Not all workers can read checkpoint {path}. "
45
- "Training may fail to fully resume."
46
- )
47
- # TODO: broadcast the checkpoint file contents from main
48
- # worker, and load from it instead.
49
- need_sync = True
50
- if not has_file:
51
- path = None # don't load if not readable
52
- ret = super().load(path, *args, **kwargs)
53
-
54
- if need_sync:
55
- logger.info("Broadcasting model states from main worker ...")
56
- self.model._sync_params_and_buffers()
57
- return ret
58
-
59
- def _load_file(self, filename):
60
- if filename.endswith(".pkl"):
61
- with PathManager.open(filename, "rb") as f:
62
- data = pickle.load(f, encoding="latin1")
63
- if "model" in data and "__author__" in data:
64
- # file is in Detectron2 model zoo format
65
- self.logger.info("Reading a file from '{}'".format(data["__author__"]))
66
- return data
67
- else:
68
- # assume file is from Caffe2 / Detectron1 model zoo
69
- if "blobs" in data:
70
- # Detection models have "blobs", but ImageNet models don't
71
- data = data["blobs"]
72
- data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
73
- return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
74
- elif filename.endswith(".pyth"):
75
- # assume file is from pycls; no one else seems to use the ".pyth" extension
76
- with PathManager.open(filename, "rb") as f:
77
- data = torch.load(f)
78
- assert (
79
- "model_state" in data
80
- ), f"Cannot load .pyth file {filename}; pycls checkpoints must contain 'model_state'."
81
- model_state = {
82
- k: v
83
- for k, v in data["model_state"].items()
84
- if not k.endswith("num_batches_tracked")
85
- }
86
- return {"model": model_state, "__author__": "pycls", "matching_heuristics": True}
87
-
88
- loaded = super()._load_file(filename) # load native pth checkpoint
89
- if "model" not in loaded:
90
- loaded = {"model": loaded}
91
- return loaded
92
-
93
- def _load_model(self, checkpoint):
94
- if checkpoint.get("matching_heuristics", False):
95
- self._convert_ndarray_to_tensor(checkpoint["model"])
96
- # convert weights by name-matching heuristics
97
- checkpoint["model"] = align_and_update_state_dicts(
98
- self.model.state_dict(),
99
- checkpoint["model"],
100
- c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
101
- )
102
- # for non-caffe2 models, use standard ways to load it
103
- incompatible = super()._load_model(checkpoint)
104
-
105
- model_buffers = dict(self.model.named_buffers(recurse=False))
106
- for k in ["pixel_mean", "pixel_std"]:
107
- # Ignore missing key message about pixel_mean/std.
108
- # Though they may be missing in old checkpoints, they will be correctly
109
- # initialized from config anyway.
110
- if k in model_buffers:
111
- try:
112
- incompatible.missing_keys.remove(k)
113
- except ValueError:
114
- pass
115
- for k in incompatible.unexpected_keys[:]:
116
- # Ignore unexpected keys about cell anchors. They exist in old checkpoints
117
- # but now they are non-persistent buffers and will not be in new checkpoints.
118
- if "anchor_generator.cell_anchors" in k:
119
- incompatible.unexpected_keys.remove(k)
120
- return incompatible
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/postprocessing.py DELETED
@@ -1,101 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import torch
3
- from torch.nn import functional as F
4
-
5
- from detectron2.structures import Instances, ROIMasks
6
-
7
-
8
- # perhaps should rename to "resize_instance"
9
- def detector_postprocess(
10
- results: Instances, output_height: int, output_width: int, mask_threshold: float = 0.5
11
- ):
12
- """
13
- Resize the output instances.
14
- The input images are often resized when entering an object detector.
15
- As a result, we often need the outputs of the detector in a different
16
- resolution from its inputs.
17
-
18
- This function will resize the raw outputs of an R-CNN detector
19
- to produce outputs according to the desired output resolution.
20
-
21
- Args:
22
- results (Instances): the raw outputs from the detector.
23
- `results.image_size` contains the input image resolution the detector sees.
24
- This object might be modified in-place.
25
- output_height, output_width: the desired output resolution.
26
-
27
- Returns:
28
- Instances: the resized output from the model, based on the output resolution
29
- """
30
- if isinstance(output_width, torch.Tensor):
31
- # This shape might (but not necessarily) be tensors during tracing.
32
- # Converts integer tensors to float temporaries to ensure true
33
- # division is performed when computing scale_x and scale_y.
34
- output_width_tmp = output_width.float()
35
- output_height_tmp = output_height.float()
36
- new_size = torch.stack([output_height, output_width])
37
- else:
38
- new_size = (output_height, output_width)
39
- output_width_tmp = output_width
40
- output_height_tmp = output_height
41
-
42
- scale_x, scale_y = (
43
- output_width_tmp / results.image_size[1],
44
- output_height_tmp / results.image_size[0],
45
- )
46
- results = Instances(new_size, **results.get_fields())
47
-
48
- if results.has("pred_boxes"):
49
- output_boxes = results.pred_boxes
50
- elif results.has("proposal_boxes"):
51
- output_boxes = results.proposal_boxes
52
- else:
53
- output_boxes = None
54
- assert output_boxes is not None, "Predictions must contain boxes!"
55
-
56
- output_boxes.scale(scale_x, scale_y)
57
- output_boxes.clip(results.image_size)
58
-
59
- results = results[output_boxes.nonempty()]
60
-
61
- if results.has("pred_masks"):
62
- if isinstance(results.pred_masks, ROIMasks):
63
- roi_masks = results.pred_masks
64
- else:
65
- # pred_masks is a tensor of shape (N, 1, M, M)
66
- roi_masks = ROIMasks(results.pred_masks[:, 0, :, :])
67
- results.pred_masks = roi_masks.to_bitmasks(
68
- results.pred_boxes, output_height, output_width, mask_threshold
69
- ).tensor # TODO return ROIMasks/BitMask object in the future
70
-
71
- if results.has("pred_keypoints"):
72
- results.pred_keypoints[:, :, 0] *= scale_x
73
- results.pred_keypoints[:, :, 1] *= scale_y
74
-
75
- return results
76
-
77
-
78
- def sem_seg_postprocess(result, img_size, output_height, output_width):
79
- """
80
- Return semantic segmentation predictions in the original resolution.
81
-
82
- The input images are often resized when entering semantic segmentor. Moreover, in same
83
- cases, they also padded inside segmentor to be divisible by maximum network stride.
84
- As a result, we often need the predictions of the segmentor in a different
85
- resolution from its inputs.
86
-
87
- Args:
88
- result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W),
89
- where C is the number of classes, and H, W are the height and width of the prediction.
90
- img_size (tuple): image size that segmentor is taking as input.
91
- output_height, output_width: the desired output resolution.
92
-
93
- Returns:
94
- semantic segmentation prediction (Tensor): A tensor of the shape
95
- (C, output_height, output_width) that contains per-pixel soft predictions.
96
- """
97
- result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)
98
- result = F.interpolate(
99
- result, size=(output_height, output_width), mode="bilinear", align_corners=False
100
- )[0]
101
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AzumaSeren100/XuanShen-Bert-VITS2/webui.py DELETED
@@ -1,136 +0,0 @@
1
- import sys, os
2
-
3
- if sys.platform == "darwin":
4
- os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
5
-
6
- import logging
7
-
8
- logging.getLogger("numba").setLevel(logging.WARNING)
9
- logging.getLogger("markdown_it").setLevel(logging.WARNING)
10
- logging.getLogger("urllib3").setLevel(logging.WARNING)
11
- logging.getLogger("matplotlib").setLevel(logging.WARNING)
12
-
13
- logging.basicConfig(level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s")
14
-
15
- logger = logging.getLogger(__name__)
16
-
17
- import torch
18
- import argparse
19
- import commons
20
- import utils
21
- from models import SynthesizerTrn
22
- from text.symbols import symbols
23
- from text import cleaned_text_to_sequence, get_bert
24
- from text.cleaner import clean_text
25
- import gradio as gr
26
- import webbrowser
27
-
28
-
29
- net_g = None
30
-
31
-
32
- def get_text(text, language_str, hps):
33
- norm_text, phone, tone, word2ph = clean_text(text, language_str)
34
- phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
35
-
36
- if hps.data.add_blank:
37
- phone = commons.intersperse(phone, 0)
38
- tone = commons.intersperse(tone, 0)
39
- language = commons.intersperse(language, 0)
40
- for i in range(len(word2ph)):
41
- word2ph[i] = word2ph[i] * 2
42
- word2ph[0] += 1
43
- bert = get_bert(norm_text, word2ph, language_str)
44
- del word2ph
45
-
46
- assert bert.shape[-1] == len(phone)
47
-
48
- phone = torch.LongTensor(phone)
49
- tone = torch.LongTensor(tone)
50
- language = torch.LongTensor(language)
51
-
52
- return bert, phone, tone, language
53
-
54
- def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid):
55
- global net_g
56
- bert, phones, tones, lang_ids = get_text(text, "ZH", hps)
57
- with torch.no_grad():
58
- x_tst=phones.to(device).unsqueeze(0)
59
- tones=tones.to(device).unsqueeze(0)
60
- lang_ids=lang_ids.to(device).unsqueeze(0)
61
- bert = bert.to(device).unsqueeze(0)
62
- x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
63
- del phones
64
- speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
65
- audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids, bert, sdp_ratio=sdp_ratio
66
- , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy()
67
- del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers
68
- return audio
69
-
70
- def tts_fn(text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale):
71
- with torch.no_grad():
72
- audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker)
73
- return "Success", (hps.data.sampling_rate, audio)
74
-
75
-
76
- if __name__ == "__main__":
77
- parser = argparse.ArgumentParser()
78
- # parser.add_argument("-m", "--model", default="./logs/dxl/G21200.pth", help="path of your model")
79
- parser.add_argument("-mn", "--model_name", default="dxl", help="path of your model")
80
- parser.add_argument("-m", "--model", default="null", help="path of your model")
81
- parser.add_argument("-c", "--config", default="./configs/config.json", help="path of your config file")
82
- parser.add_argument("--share", default=True, help="make link public")
83
- parser.add_argument("-d", "--debug", action="store_true", help="enable DEBUG-LEVEL log")
84
-
85
- args = parser.parse_args()
86
- if args.debug:
87
- logger.info("Enable DEBUG-LEVEL log")
88
- logging.basicConfig(level=logging.DEBUG)
89
- hps = utils.get_hparams_from_file(args.config)
90
-
91
- device = (
92
- "cuda:0"
93
- if torch.cuda.is_available()
94
- else (
95
- "mps"
96
- if sys.platform == "darwin" and torch.backends.mps.is_available()
97
- else "cpu"
98
- )
99
- )
100
- net_g = SynthesizerTrn(
101
- len(symbols),
102
- hps.data.filter_length // 2 + 1,
103
- hps.train.segment_size // hps.data.hop_length,
104
- n_speakers=hps.data.n_speakers,
105
- **hps.model).to(device)
106
- _ = net_g.eval()
107
-
108
- model_path = args.model
109
- if not os.path.exists(model_path) or model_path == "null":
110
- model_path = utils.latest_checkpoint_path(os.path.join("./logs/",args.model_name), "G_*.pth")
111
-
112
- _ = utils.load_checkpoint(model_path, net_g, None, skip_optimizer=True)
113
-
114
- speaker_ids = hps.data.spk2id
115
- speakers = list(speaker_ids.keys())
116
- with gr.Blocks() as app:
117
- with gr.Row():
118
- with gr.Column():
119
- text = gr.TextArea(label="Text", placeholder="Input Text Here",
120
- value="吃葡萄不吐葡萄皮,不吃葡萄倒吐葡萄皮。")
121
- speaker = gr.Dropdown(choices=speakers, value=speakers[0], label='Speaker')
122
- sdp_ratio = gr.Slider(minimum=0, maximum=1, value=0.2, step=0.1, label='SDP Ratio')
123
- noise_scale = gr.Slider(minimum=0.1, maximum=2, value=0.6, step=0.1, label='Noise Scale')
124
- noise_scale_w = gr.Slider(minimum=0.1, maximum=2, value=0.8, step=0.1, label='Noise Scale W')
125
- length_scale = gr.Slider(minimum=0.1, maximum=2, value=1, step=0.1, label='Length Scale')
126
- btn = gr.Button("Generate!", variant="primary")
127
- with gr.Column():
128
- text_output = gr.Textbox(label="Message")
129
- audio_output = gr.Audio(label="Output Audio")
130
-
131
- btn.click(tts_fn,
132
- inputs=[text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale],
133
- outputs=[text_output, audio_output])
134
-
135
- webbrowser.open("http://127.0.0.1:7860")
136
- app.launch(share=args.share)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/queue.py DELETED
@@ -1,22 +0,0 @@
1
- import collections
2
-
3
- from ..packages import six
4
- from ..packages.six.moves import queue
5
-
6
- if six.PY2:
7
- # Queue is imported for side effects on MS Windows. See issue #229.
8
- import Queue as _unused_module_Queue # noqa: F401
9
-
10
-
11
- class LifoQueue(queue.Queue):
12
- def _init(self, _):
13
- self.queue = collections.deque()
14
-
15
- def _qsize(self, len=len):
16
- return len(self.queue)
17
-
18
- def _put(self, item):
19
- self.queue.append(item)
20
-
21
- def _get(self):
22
- return self.queue.pop()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BigSalmon/MaskSeveralAtOnce/app.py DELETED
@@ -1,52 +0,0 @@
1
- import torch
2
- import streamlit as st
3
- import numpy as np
4
- import pandas as pd
5
- import os
6
- import torch
7
- import torch.nn as nn
8
- from transformers.activations import get_activation
9
- from transformers import AutoTokenizer
10
-
11
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
-
13
- from transformers import AutoTokenizer, AutoModelForMaskedLM
14
- tokenizer = AutoTokenizer.from_pretrained("roberta-large")
15
- model = AutoModelForMaskedLM.from_pretrained("BigSalmon/FormalRobertaLincoln")
16
- #model = AutoModelForMaskedLM.from_pretrained("BigSalmon/MrLincolnBerta")
17
- model2 = AutoModelForMaskedLM.from_pretrained("roberta-base")
18
-
19
-
20
- with st.expander('BigSalmon/FormalRobertaa'):
21
- with st.form(key='my_form'):
22
- prompt = st.text_area(label='Enter Text. Put <mask> where you want the model to fill in the blank. You can use more than one at a time.')
23
- submit_button = st.form_submit_button(label='Submit')
24
-
25
- if submit_button:
26
- a_list = []
27
- token_ids = tokenizer.encode(prompt, return_tensors='pt')
28
- token_ids_tk = tokenizer.tokenize(prompt, return_tensors='pt')
29
- masked_position = (token_ids.squeeze() == tokenizer.mask_token_id).nonzero()
30
- masked_pos = [mask.item() for mask in masked_position ]
31
- with torch.no_grad():
32
- output = model(token_ids)
33
- last_hidden_state = output[0].squeeze()
34
- for mask_index in masked_pos:
35
- mask_hidden_state = last_hidden_state[mask_index]
36
- idx = torch.topk(mask_hidden_state, k=100, dim=0)[1]
37
- words = [tokenizer.decode(i.item()).strip() for i in idx]
38
- st.text_area(label = 'Infill:', value=words)
39
-
40
- with st.expander('roberta-base result'):
41
- token_ids = tokenizer.encode(prompt, return_tensors='pt')
42
- token_ids_tk = tokenizer.tokenize(prompt, return_tensors='pt')
43
- masked_position = (token_ids.squeeze() == tokenizer.mask_token_id).nonzero()
44
- masked_pos = [mask.item() for mask in masked_position ]
45
- with torch.no_grad():
46
- output = model2(token_ids)
47
- last_hidden_state = output[0].squeeze()
48
- for mask_index in masked_pos:
49
- mask_hidden_state = last_hidden_state[mask_index]
50
- idx = torch.topk(mask_hidden_state, k=100, dim=0)[1]
51
- words = [tokenizer.decode(i.item()).strip() for i in idx]
52
- st.text_area(label = 'Infill:', value=words)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Billyosoro/ESRGAN/README.md DELETED
@@ -1,34 +0,0 @@
1
- ---
2
- title: Real ESRGAN
3
- emoji: 🏃
4
- colorFrom: blue
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.1.7
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- # Configuration
13
-
14
- `title`: _string_
15
- Display title for the Space
16
-
17
- `emoji`: _string_
18
- Space emoji (emoji-only character allowed)
19
-
20
- `colorFrom`: _string_
21
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
22
-
23
- `colorTo`: _string_
24
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
-
26
- `sdk`: _string_
27
- Can be either `gradio` or `streamlit`
28
-
29
- `app_file`: _string_
30
- Path to your main application file (which contains either `gradio` or `streamlit` Python code).
31
- Path is relative to the root of the repository.
32
-
33
- `pinned`: _boolean_
34
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVMX-jaca-tonos/Identificar-lenguas-y-frases/app.py DELETED
@@ -1,219 +0,0 @@
1
- from io import BytesIO
2
- from typing import Tuple
3
- import wave
4
- import gradio as gr
5
- import numpy as np
6
- from pydub.audio_segment import AudioSegment
7
- import requests
8
- from os.path import exists
9
- from stt import Model
10
-
11
- import torch
12
- from transformers import pipeline
13
- import librosa
14
- import torchaudio
15
- from speechbrain.pretrained import EncoderClassifier
16
-
17
- UI_STRINGS = {
18
- "title": {
19
- "es": "Reconocimiento de Dictado en Chatino, Mixteco, Totonaco y Español",
20
- "en": "Speech recognition in Chatino, Mixtec, Totonac and Spanish",
21
- },
22
- "description": {
23
- "es": "Una demo de identificar frases del español y de tres lenguas indígenas de México, y proveer el texto de cada una",
24
- "en": "A demo of identifying phrases in Spanish and three Mexican indigenous languages, and providing transcripts of each",
25
- },
26
- "article": {
27
- "es": "La identificación de lenguas usa el modelo"
28
- " [lang-id-commonlanguage-ecapa de Speechbrain](https://huggingface.co/speechbrain/lang-id-commonlanguage_ecapa)"
29
- " y aquí se supone que si la lengua no es español, debe ser la lengua indígena del contexto."
30
- "\n\n"
31
- "Chatino: Prueba de dictado a texto para el chatino de la sierra (Quiahije) "
32
- " usando [el modelo entrenado por Bülent Özden](https://coqui.ai/chatino/bozden/v1.0.0)"
33
- " con [los datos recopilados por Hilaria Cruz y sus colaboradores](https://gorilla.linguistlist.org/code/ctp/)."
34
- "\n\n"
35
- "Mixteco: Prueba de dictado a texto para el mixteco de Yoloxochitl,"
36
- " usando [el modelo entrenado por Josh Meyer](https://coqui.ai/mixtec/jemeyer/v1.0.0/)"
37
- " con [los datos recopilados por Rey Castillo y sus colaboradores](https://www.openslr.org/89)."
38
- " \n\n"
39
- "Totonaco: Prueba de dictado a texto para el totonaco de la sierra,"
40
- " usando [el modelo entrenado por Bülent Özden](https://coqui.ai/totonac/bozden/v1.0.0)"
41
- " con [los datos recopilados por Osbel López Francisco y sus colaboradores](https://www.openslr.org/107)."
42
- " \n\n"
43
- "Los ejemplos vienen del proyecto [DEMCA](https://demca.mesolex.org/) de Jonathan Amith. "
44
- " Esta demo es basada en la de [Ukraniano](https://huggingface.co/spaces/robinhad/ukrainian-stt).",
45
- "en": "The language identification uses the model"
46
- " [lang-id-commonlanguage-ecapa from Speechbrain](https://huggingface.co/speechbrain/lang-id-commonlanguage_ecapa)"
47
- " and here it is assumed that if the language is not Spanish, it must be the indigenous language of the context."
48
- "\n\n"
49
- "Chatino: Test of speech-to-text for Highland Chatino (Quiahije) "
50
- " using [the model trained by Bülent Özden](https://coqui.ai/chatino/bozden/v1.0.0)"
51
- " with [the data compiled by Hilaria Cruz and collaborators](https://gorilla.linguistlist.org/code/ctp/)."
52
- "\n\n"
53
- "Mixtec: Test of speech-to-text for Yoloxochitl Mixtec,"
54
- " using [the model trained by Josh Meyer](https://coqui.ai/mixtec/jemeyer/v1.0.0/)"
55
- " with [the data compiled by Rey Castillo and collaborators](https://www.openslr.org/89)."
56
- "\n\n"
57
- "Totonac: Test of speech-to-text for Highland Totonac,"
58
- " using [the model trained by Bülent Özden](https://coqui.ai/chatino/bozden/v1.0.0)"
59
- " with [the data compiled by Osbel López Francisco and collaborators](https://www.openslr.org/107)."
60
- "\n\n"
61
- "The examples come from the Jonathan Amith's [DEMCA](https://demca.mesolex.org/) project. "
62
- " This demo is based on the one for [Ukrainian](https://huggingface.co/spaces/robinhad/ukrainian-stt).",
63
- },
64
- "languages": {
65
- "mixteco": {
66
- "es": "mixteco",
67
- "en": "Mixtec",
68
- },
69
- "chatino": {
70
- "es": "chatino",
71
- "en": "Chatino",
72
- },
73
- "totonaco": {
74
- "es": "totonaco",
75
- "en": "Totonac",
76
- },
77
- "español": {
78
- "es": "español",
79
- "en": "Spanish",
80
- },
81
- "inglés": {
82
- "es": "inglés",
83
- "en": "English",
84
- }
85
- },
86
- "labels": {
87
- "target": {
88
- "es": "Lengua principal",
89
- "en": "Primary language",
90
- },
91
- "input": {
92
- "es": "Audio",
93
- "en": "Audio",
94
- },
95
- "output": {
96
- "es": "Resultado",
97
- "en": "Result",
98
- }
99
- }
100
- }
101
-
102
-
103
- # initialize language ID model
104
- lang_classifier = EncoderClassifier.from_hparams(
105
- source="speechbrain/lang-id-commonlanguage_ecapa",
106
- savedir="pretrained_models/lang-id-commonlanguage_ecapa"
107
- )
108
-
109
-
110
- # download STT models
111
- model_info = {
112
- "mixteco": ("https://coqui.gateway.scarf.sh/mixtec/jemeyer/v1.0.0/model.tflite", "mixtec.tflite"),
113
- "chatino": ("https://coqui.gateway.scarf.sh/chatino/bozden/v1.0.0/model.tflite", "chatino.tflite"),
114
- "totonaco": ("https://coqui.gateway.scarf.sh/totonac/bozden/v1.0.0/model.tflite", "totonac.tflite"),
115
- "español": ("jonatasgrosman/wav2vec2-large-xlsr-53-spanish", "spanish_xlsr"),
116
- "inglés": ("facebook/wav2vec2-large-robust-ft-swbd-300h", "english_xlsr"),
117
- }
118
-
119
-
120
- def load_hf_model(model_path="facebook/wav2vec2-large-robust-ft-swbd-300h"):
121
- return pipeline("automatic-speech-recognition", model=model_path)
122
-
123
-
124
- def load_coqui_models(language):
125
-
126
- model_path, file_name = model_info.get(language, ("", ""))
127
-
128
- if not exists(file_name):
129
- print(f"Downloading {model_path}")
130
- r = requests.get(model_path, allow_redirects=True)
131
- with open(file_name, 'wb') as file:
132
- file.write(r.content)
133
- else:
134
- print(f"Found {file_name}. Skipping download...")
135
- return Model(file_name)
136
-
137
-
138
- STT_MODELS = {lang: load_hf_model(model_info[lang][0]) for lang in ("español",)}
139
- for lang in ('mixteco', 'chatino', 'totonaco'):
140
- STT_MODELS[lang] = load_coqui_models(lang)
141
-
142
-
143
- def client(audio_data: np.array, sample_rate: int, default_lang: str):
144
- output_audio = _convert_audio(audio_data, sample_rate)
145
- waveform, _ = torchaudio.load(output_audio)
146
- out_prob, score, index, text_lab = lang_classifier.classify_batch(waveform)
147
- text_lab = text_lab[0]
148
-
149
- output_audio.seek(0)
150
- fin = wave.open(output_audio, 'rb')
151
- coqui_audio = np.frombuffer(fin.readframes(fin.getnframes()), np.int16)
152
-
153
- output_audio.seek(0)
154
- hf_audio, _ = librosa.load(output_audio)
155
-
156
- fin.close()
157
- print(default_lang, text_lab)
158
-
159
- if text_lab == 'Spanish':
160
- text_lab = UI_STRINGS["languages"]['español'][ui_language]
161
-
162
- asr_pipeline = STT_MODELS['español']
163
- result = asr_pipeline(hf_audio, chunk_length_s=5, stride_length_s=1)['text']
164
-
165
- else:
166
- text_lab = UI_STRINGS["languages"][default_lang][ui_language]
167
- ds = STT_MODELS[default_lang]
168
- result = ds.stt(coqui_audio)
169
-
170
- return f"{text_lab}: {result}"
171
-
172
-
173
- def stt(default_lang: str, audio: Tuple[int, np.array]):
174
- sample_rate, audio = audio
175
-
176
- recognized_result = client(audio, sample_rate, default_lang)
177
-
178
- return recognized_result
179
-
180
-
181
- def _convert_audio(audio_data: np.array, sample_rate: int):
182
- source_audio = BytesIO()
183
- source_audio.write(audio_data)
184
- source_audio.seek(0)
185
- output_audio = BytesIO()
186
- wav_file = AudioSegment.from_raw(
187
- source_audio,
188
- channels=1,
189
- sample_width=2,
190
- frame_rate=sample_rate
191
- )
192
- wav_file.set_frame_rate(16000).set_channels(1).export(output_audio, "wav", codec="pcm_s16le")
193
- output_audio.seek(0)
194
- return output_audio
195
-
196
- ui_language = 'es'
197
-
198
- iface = gr.Interface(
199
- fn=stt,
200
- inputs=[
201
- gr.inputs.Radio(choices=("chatino", "mixteco", "totonaco"), default="mixteco", label=UI_STRINGS["labels"]["target"][ui_language]),
202
- gr.inputs.Audio(type="numpy", label=UI_STRINGS["labels"]["input"][ui_language], source="microphone", optional=False),
203
- ],
204
- outputs=[
205
- gr.outputs.Textbox(label=UI_STRINGS["labels"]["output"][ui_language]),
206
- ],
207
- title=UI_STRINGS["title"][ui_language],
208
- theme="huggingface",
209
- description=UI_STRINGS["description"][ui_language],
210
- examples=[["mixteco", "ejemplos/espanol1-Yolox_BotFl_CTB501-FEF537-EGS503_40202-Acanthaceae-Ruellia_2017-01-05-h.wav"],
211
- ["mixteco", "ejemplos/espanol2-Yolox_BotFl_CTB501-FEF537-EGS503_40202-Acanthaceae-Ruellia_2017-01-05-h.wav"],
212
- ["mixteco", "ejemplos/mixteco1-Yolox_BotFl_CTB501-FEF537-EGS503_40202-Acanthaceae-Ruellia_2017-01-05-h.wav"],
213
- ["mixteco", "ejemplos/mixteco2-Yolox_BotFl_CTB501-FEF537-EGS503_40202-Acanthaceae-Ruellia_2017-01-05-h.wav"],
214
- ["totonaco", "ejemplos/totonaco1-Zongo_Botan_Acanthaceae-Justicia-spicigera_SLC388-IPN389_2018-07-26-i.wav"],
215
- ["totonaco", "ejemplos/totonaco2-Zongo_Botan_Acanthaceae-Justicia-spicigera_SLC388-IPN389_2018-07-26-i.wav"]],
216
- article=UI_STRINGS["article"][ui_language],
217
- )
218
-
219
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/dependencies/cub/cub/cmake/cub-config.cmake DELETED
@@ -1,62 +0,0 @@
1
- #
2
- # find_package(CUB) config file.
3
- #
4
- # Defines a CUB::CUB target that may be linked from user projects to include
5
- # CUB.
6
-
7
- if (TARGET CUB::CUB)
8
- return()
9
- endif()
10
-
11
- function(_cub_declare_interface_alias alias_name ugly_name)
12
- # 1) Only IMPORTED and ALIAS targets can be placed in a namespace.
13
- # 2) When an IMPORTED library is linked to another target, its include
14
- # directories are treated as SYSTEM includes.
15
- # 3) nvcc will automatically check the CUDA Toolkit include path *before* the
16
- # system includes. This means that the Toolkit CUB will *always* be used
17
- # during compilation, and the include paths of an IMPORTED CUB::CUB
18
- # target will never have any effect.
19
- # 4) This behavior can be fixed by setting the property NO_SYSTEM_FROM_IMPORTED
20
- # on EVERY target that links to CUB::CUB. This would be a burden and a
21
- # footgun for our users. Forgetting this would silently pull in the wrong CUB!
22
- # 5) A workaround is to make a non-IMPORTED library outside of the namespace,
23
- # configure it, and then ALIAS it into the namespace (or ALIAS and then
24
- # configure, that seems to work too).
25
- add_library(${ugly_name} INTERFACE)
26
- add_library(${alias_name} ALIAS ${ugly_name})
27
- endfunction()
28
-
29
- #
30
- # Setup targets
31
- #
32
-
33
- _cub_declare_interface_alias(CUB::CUB _CUB_CUB)
34
- # Strip out the 'cub/cmake/' from 'cub/cmake/cub-config.cmake':
35
- get_filename_component(_CUB_INCLUDE_DIR "../.." ABSOLUTE BASE_DIR "${CMAKE_CURRENT_LIST_DIR}")
36
- target_include_directories(_CUB_CUB INTERFACE "${_CUB_INCLUDE_DIR}")
37
-
38
- if (CUB_IGNORE_DEPRECATED_CPP_DIALECT OR
39
- THRUST_IGNORE_DEPRECATED_CPP_DIALECT)
40
- target_compile_definitions(_CUB_CUB INTERFACE "CUB_IGNORE_DEPRECATED_CPP_DIALECT")
41
- endif()
42
-
43
- if (CUB_IGNORE_DEPRECATED_CPP_11 OR
44
- THRUST_IGNORE_DEPRECATED_CPP_11)
45
- target_compile_definitions(_CUB_CUB INTERFACE "CUB_IGNORE_DEPRECATED_CPP_11")
46
- endif()
47
-
48
- if (CUB_IGNORE_DEPRECATED_COMPILER OR
49
- THRUST_IGNORE_DEPRECATED_COMPILER)
50
- target_compile_definitions(_CUB_CUB INTERFACE "CUB_IGNORE_DEPRECATED_COMPILER")
51
- endif()
52
-
53
- #
54
- # Standardize version info
55
- #
56
-
57
- set(CUB_VERSION ${${CMAKE_FIND_PACKAGE_NAME}_VERSION} CACHE INTERNAL "")
58
- set(CUB_VERSION_MAJOR ${${CMAKE_FIND_PACKAGE_NAME}_VERSION_MAJOR} CACHE INTERNAL "")
59
- set(CUB_VERSION_MINOR ${${CMAKE_FIND_PACKAGE_NAME}_VERSION_MINOR} CACHE INTERNAL "")
60
- set(CUB_VERSION_PATCH ${${CMAKE_FIND_PACKAGE_NAME}_VERSION_PATCH} CACHE INTERNAL "")
61
- set(CUB_VERSION_TWEAK ${${CMAKE_FIND_PACKAGE_NAME}_VERSION_TWEAK} CACHE INTERNAL "")
62
- set(CUB_VERSION_COUNT ${${CMAKE_FIND_PACKAGE_NAME}_VERSION_COUNT} CACHE INTERNAL "")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/iterator/iterator_adaptor.h DELETED
@@ -1,240 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file thrust/iterator/iterator_adaptor.h
19
- * \brief An iterator which adapts a base iterator
20
- */
21
-
22
- /*
23
- * (C) Copyright David Abrahams 2002.
24
- * (C) Copyright Jeremy Siek 2002.
25
- * (C) Copyright Thomas Witt 2002.
26
- *
27
- * Distributed under the Boost Software License, Version 1.0.
28
- * (See accompanying NOTICE file for the complete license)
29
- *
30
- * For more information, see http://www.boost.org
31
- */
32
-
33
- #pragma once
34
-
35
- #include <thrust/detail/config.h>
36
- #include <thrust/iterator/iterator_facade.h>
37
- #include <thrust/detail/use_default.h>
38
- #include <thrust/iterator/detail/iterator_adaptor_base.h>
39
-
40
- namespace thrust
41
- {
42
-
43
- /*! \addtogroup iterators
44
- * \{
45
- */
46
-
47
- /*! \addtogroup fancyiterator Fancy Iterators
48
- * \ingroup iterators
49
- * \{
50
- */
51
-
52
- /*! \p iterator_adaptor is an iterator which adapts an existing type of iterator to create a new type of
53
- * iterator. Most of Thrust's fancy iterators are defined via inheritance from \p iterator_adaptor.
54
- * While composition of these existing Thrust iterators is often sufficient for expressing the desired
55
- * functionality, it is occasionally more straightforward to derive from \p iterator_adaptor directly.
56
- *
57
- * To see how to use \p iterator_adaptor to create a novel iterator type, let's examine how to use it to
58
- * define \p repeat_iterator, a fancy iterator which repeats elements from another range a given number of time:
59
- *
60
- * \code
61
- * #include <thrust/iterator/iterator_adaptor.h>
62
- *
63
- * // derive repeat_iterator from iterator_adaptor
64
- * template<typename Iterator>
65
- * class repeat_iterator
66
- * : public thrust::iterator_adaptor<
67
- * repeat_iterator<Iterator>, // the first template parameter is the name of the iterator we're creating
68
- * Iterator // the second template parameter is the name of the iterator we're adapting
69
- * // we can use the default for the additional template parameters
70
- * >
71
- * {
72
- * public:
73
- * // shorthand for the name of the iterator_adaptor we're deriving from
74
- * typedef thrust::iterator_adaptor<
75
- * repeat_iterator<Iterator>,
76
- * Iterator
77
- * > super_t;
78
- *
79
- * __host__ __device__
80
- * repeat_iterator(const Iterator &x, int n) : super_t(x), begin(x), n(n) {}
81
- *
82
- * // befriend thrust::iterator_core_access to allow it access to the private interface below
83
- * friend class thrust::iterator_core_access;
84
- *
85
- * private:
86
- * // repeat each element of the adapted range n times
87
- * unsigned int n;
88
- *
89
- * // used to keep track of where we began
90
- * const Iterator begin;
91
- *
92
- * // it is private because only thrust::iterator_core_access needs access to it
93
- * __host__ __device__
94
- * typename super_t::reference dereference() const
95
- * {
96
- * return *(begin + (this->base() - begin) / n);
97
- * }
98
- * };
99
- * \endcode
100
- *
101
- * Except for the first two, \p iterator_adaptor's template parameters are optional. When omitted, or when the
102
- * user specifies \p thrust::use_default in its place, \p iterator_adaptor will use a default type inferred from \p Base.
103
- *
104
- * \p iterator_adaptor's functionality is derived from and generally equivalent to \p boost::iterator_adaptor.
105
- * The exception is Thrust's addition of the template parameter \p System, which is necessary to allow Thrust
106
- * to dispatch an algorithm to one of several parallel backend systems.
107
- *
108
- * \p iterator_adaptor is a powerful tool for creating custom iterators directly. However, the large set of iterator semantics which must be satisfied
109
- * for algorithm compatibility can make \p iterator_adaptor difficult to use correctly. Unless you require the full expressivity of \p iterator_adaptor,
110
- * consider building a custom iterator through composition of existing higher-level fancy iterators instead.
111
- *
112
- * Interested users may refer to <tt>boost::iterator_adaptor</tt>'s documentation for further usage examples.
113
- */
114
- template<typename Derived,
115
- typename Base,
116
- typename Value = use_default,
117
- typename System = use_default,
118
- typename Traversal = use_default,
119
- typename Reference = use_default,
120
- typename Difference = use_default>
121
- class iterator_adaptor:
122
- public detail::iterator_adaptor_base<
123
- Derived, Base, Value, System, Traversal, Reference, Difference
124
- >::type
125
- {
126
- /*! \cond
127
- */
128
-
129
- friend class thrust::iterator_core_access;
130
-
131
- protected:
132
- typedef typename detail::iterator_adaptor_base<
133
- Derived, Base, Value, System, Traversal, Reference, Difference
134
- >::type super_t;
135
-
136
- /*! \endcond
137
- */
138
-
139
- public:
140
- /*! \p iterator_adaptor's default constructor does nothing.
141
- */
142
- __host__ __device__
143
- iterator_adaptor(){}
144
-
145
- /*! This constructor copies from a given instance of the \p Base iterator.
146
- */
147
- __thrust_exec_check_disable__
148
- __host__ __device__
149
- explicit iterator_adaptor(Base const& iter)
150
- : m_iterator(iter)
151
- {}
152
-
153
- /*! The type of iterator this \p iterator_adaptor's \p adapts.
154
- */
155
- typedef Base base_type;
156
-
157
- /*! \cond
158
- */
159
- typedef typename super_t::reference reference;
160
-
161
- typedef typename super_t::difference_type difference_type;
162
- /*! \endcond
163
- */
164
-
165
- /*! \return A \p const reference to the \p Base iterator this \p iterator_adaptor adapts.
166
- */
167
- __host__ __device__
168
- Base const& base() const
169
- { return m_iterator; }
170
-
171
- protected:
172
- /*! \return A \p const reference to the \p Base iterator this \p iterator_adaptor adapts.
173
- */
174
- __host__ __device__
175
- Base const& base_reference() const
176
- { return m_iterator; }
177
-
178
- /*! \return A mutable reference to the \p Base iterator this \p iterator_adaptor adapts.
179
- */
180
- __host__ __device__
181
- Base& base_reference()
182
- { return m_iterator; }
183
-
184
- /*! \cond
185
- */
186
- private: // Core iterator interface for iterator_facade
187
-
188
- __thrust_exec_check_disable__
189
- __host__ __device__
190
- typename iterator_adaptor::reference dereference() const
191
- { return *m_iterator; }
192
-
193
- __thrust_exec_check_disable__
194
- template<typename OtherDerived, typename OtherIterator, typename V, typename S, typename T, typename R, typename D>
195
- __host__ __device__
196
- bool equal(iterator_adaptor<OtherDerived, OtherIterator, V, S, T, R, D> const& x) const
197
- { return m_iterator == x.base(); }
198
-
199
- __thrust_exec_check_disable__
200
- __host__ __device__
201
- void advance(typename iterator_adaptor::difference_type n)
202
- {
203
- // XXX statically assert on random_access_traversal_tag
204
- m_iterator += n;
205
- }
206
-
207
- __thrust_exec_check_disable__
208
- __host__ __device__
209
- void increment()
210
- { ++m_iterator; }
211
-
212
- __thrust_exec_check_disable__
213
- __host__ __device__
214
- void decrement()
215
- {
216
- // XXX statically assert on bidirectional_traversal_tag
217
- --m_iterator;
218
- }
219
-
220
- __thrust_exec_check_disable__
221
- template<typename OtherDerived, typename OtherIterator, typename V, typename S, typename T, typename R, typename D>
222
- __host__ __device__
223
- typename iterator_adaptor::difference_type distance_to(iterator_adaptor<OtherDerived, OtherIterator, V, S, T, R, D> const& y) const
224
- { return y.base() - m_iterator; }
225
-
226
- private:
227
- Base m_iterator;
228
-
229
- /*! \endcond
230
- */
231
- }; // end iterator_adaptor
232
-
233
- /*! \} // end fancyiterators
234
- */
235
-
236
- /*! \} // end iterators
237
- */
238
-
239
- } // end thrust
240
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/SPOTER_Sign_Language_Recognition/spoter_mod/README.md DELETED
@@ -1,61 +0,0 @@
1
- ![Alt Text](http://spoter.signlanguagerecognition.com/img/GitHub_banner.png)
2
-
3
- > by **[Matyáš Boháček](https://github.com/matyasbohacek)** and **[Marek Hrúz](https://github.com/mhruz)**, University of West Bohemia <br>
4
- > Should you have any questions or inquiries, feel free to contact us [here](mailto:[email protected]).
5
-
6
- [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/sign-pose-based-transformer-for-word-level/sign-language-recognition-on-lsa64)](https://paperswithcode.com/sota/sign-language-recognition-on-lsa64?p=sign-pose-based-transformer-for-word-level)
7
-
8
- Repository accompanying the [Sign Pose-based Transformer for Word-level Sign Language Recognition](https://openaccess.thecvf.com/content/WACV2022W/HADCV/html/Bohacek_Sign_Pose-Based_Transformer_for_Word-Level_Sign_Language_Recognition_WACVW_2022_paper.html) paper, where we present a novel architecture for word-level sign language recognition based on the Transformer model. We designed our solution with low computational cost in mind, since we see egreat potential in the usage of such recognition system on hand-held devices. We introduce multiple original augmentation techniques tailored for the task of sign language recognition and propose a unique normalization scheme based on sign language linguistics.
9
-
10
- ![Alt Text](http://spoter.signlanguagerecognition.com/img/architecture_github.gif)
11
-
12
- ## Get Started
13
-
14
- First, make sure to install all necessary dependencies using:
15
-
16
- ```shell
17
- pip install -r requirements.txt
18
- ```
19
-
20
- To train the model, simply specify the hyperparameters and run the following:
21
-
22
- ```
23
- python -m train
24
- --experiment_name [str; name of the experiment to name the output logs and plots]
25
-
26
- --epochs [int; number of epochs]
27
- --lr [float; learning rate]
28
-
29
- --training_set_path [str; path to the csv file with training set's skeletal data]
30
- --validation_set_path [str; path to the csv file with validation set's skeletal data]
31
- --testing_set_path [str; path to the csv file with testing set's skeletal data]
32
- ```
33
-
34
- If either the validation or testing sets' paths are left empty, these corresponding metrics will not be calculated. We also provide out-of-the box parameter to split the validation set as a desired split of the training set while preserving the label distribution for datasets without author-specified splits. These and many other specific hyperparameters with their descriptions can be found in the [train.py](https://github.com/matyasbohacek/spoter/blob/main/train.py) file. All of them are provided a default value we found to be working well in our experiments.
35
-
36
- ## Data
37
-
38
- As SPOTER works on top of sequences of signers' skeletal data extracted from videos, we wanted to eliminate the computational demands of such annotation for each training run by pre-collecting this. For this reason and reproducibility, we are open-sourcing this data for WLASL100 and LSA64 datasets along with the repository. You can find the data [here](https://github.com/matyasbohacek/spoter/releases/tag/supplementary-data).
39
-
40
- ![Alt Text](http://spoter.signlanguagerecognition.com/img/datasets_overview.gif)
41
-
42
- ## License
43
-
44
- The **code** is published under the [Apache License 2.0](https://github.com/matyasbohacek/spoter/blob/main/LICENSE) which allows for both academic and commercial use if relevant License and copyright notice is included, our work is cited and all changes are stated.
45
-
46
- The accompanying skeletal data of the [WLASL](https://arxiv.org/pdf/1910.11006.pdf) and [LSA64](https://core.ac.uk/download/pdf/76495887.pdf) datasets used for experiments are, however, shared under the [Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/) license allowing only for non-commercial usage.
47
-
48
- ## Citation
49
-
50
- If you find our work relevant, build upon it or compare your approaches with it, please cite our work as stated below:
51
-
52
- ```
53
- @InProceedings{Bohacek_2022_WACV,
54
- author = {Boh\'a\v{c}ek, Maty\'a\v{s} and Hr\'uz, Marek},
55
- title = {Sign Pose-Based Transformer for Word-Level Sign Language Recognition},
56
- booktitle = {Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV) Workshops},
57
- month = {January},
58
- year = {2022},
59
- pages = {182-191}
60
- }
61
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/necks/hrfpn.py DELETED
@@ -1,102 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from mmcv.cnn import ConvModule, caffe2_xavier_init
5
- from torch.utils.checkpoint import checkpoint
6
-
7
- from ..builder import NECKS
8
-
9
-
10
- @NECKS.register_module()
11
- class HRFPN(nn.Module):
12
- """HRFPN (High Resolution Feature Pyramids)
13
-
14
- paper: `High-Resolution Representations for Labeling Pixels and Regions
15
- <https://arxiv.org/abs/1904.04514>`_.
16
-
17
- Args:
18
- in_channels (list): number of channels for each branch.
19
- out_channels (int): output channels of feature pyramids.
20
- num_outs (int): number of output stages.
21
- pooling_type (str): pooling for generating feature pyramids
22
- from {MAX, AVG}.
23
- conv_cfg (dict): dictionary to construct and config conv layer.
24
- norm_cfg (dict): dictionary to construct and config norm layer.
25
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
26
- memory while slowing down the training speed.
27
- stride (int): stride of 3x3 convolutional layers
28
- """
29
-
30
- def __init__(self,
31
- in_channels,
32
- out_channels,
33
- num_outs=5,
34
- pooling_type='AVG',
35
- conv_cfg=None,
36
- norm_cfg=None,
37
- with_cp=False,
38
- stride=1):
39
- super(HRFPN, self).__init__()
40
- assert isinstance(in_channels, list)
41
- self.in_channels = in_channels
42
- self.out_channels = out_channels
43
- self.num_ins = len(in_channels)
44
- self.num_outs = num_outs
45
- self.with_cp = with_cp
46
- self.conv_cfg = conv_cfg
47
- self.norm_cfg = norm_cfg
48
-
49
- self.reduction_conv = ConvModule(
50
- sum(in_channels),
51
- out_channels,
52
- kernel_size=1,
53
- conv_cfg=self.conv_cfg,
54
- act_cfg=None)
55
-
56
- self.fpn_convs = nn.ModuleList()
57
- for i in range(self.num_outs):
58
- self.fpn_convs.append(
59
- ConvModule(
60
- out_channels,
61
- out_channels,
62
- kernel_size=3,
63
- padding=1,
64
- stride=stride,
65
- conv_cfg=self.conv_cfg,
66
- act_cfg=None))
67
-
68
- if pooling_type == 'MAX':
69
- self.pooling = F.max_pool2d
70
- else:
71
- self.pooling = F.avg_pool2d
72
-
73
- def init_weights(self):
74
- """Initialize the weights of module."""
75
- for m in self.modules():
76
- if isinstance(m, nn.Conv2d):
77
- caffe2_xavier_init(m)
78
-
79
- def forward(self, inputs):
80
- """Forward function."""
81
- assert len(inputs) == self.num_ins
82
- outs = [inputs[0]]
83
- for i in range(1, self.num_ins):
84
- outs.append(
85
- F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
86
- out = torch.cat(outs, dim=1)
87
- if out.requires_grad and self.with_cp:
88
- out = checkpoint(self.reduction_conv, out)
89
- else:
90
- out = self.reduction_conv(out)
91
- outs = [out]
92
- for i in range(1, self.num_outs):
93
- outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
94
- outputs = []
95
-
96
- for i in range(self.num_outs):
97
- if outs[i].requires_grad and self.with_cp:
98
- tmp_out = checkpoint(self.fpn_convs[i], outs[i])
99
- else:
100
- tmp_out = self.fpn_convs[i](outs[i])
101
- outputs.append(tmp_out)
102
- return tuple(outputs)