parquet-converter commited on
Commit
d3d5d2b
·
1 Parent(s): 1556ac9

Update parquet files (step 27 of 296)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Deep Freeze 760020 License Key The Benefits of Using the Software for System Protection.md +0 -102
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Film Troy in altamurano 89 perch diventato un cult tra gli appassionati di cinema e di storia.md +0 -156
  3. spaces/1gistliPinn/ChatGPT4/Examples/Batman Arkham Origins Crack [UPD] 64 Bit.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/Epson Px660 Adjustment Program Free Download308.md +0 -60
  5. spaces/1phancelerku/anime-remove-background/Download !!LINK!!-aplikasi-togel-prediksi.html M 1.md +0 -119
  6. spaces/1phancelerku/anime-remove-background/Download Clash Royale Goku Mod Apk 3.3186.7 for Android - Free and Fast.md +0 -115
  7. spaces/1phancelerku/anime-remove-background/Download Ultraman Galaxy APK and Enjoy the Best Slot-Puzzle RPG with Your Favorite Ultraman Characters.md +0 -50
  8. spaces/4Taps/SadTalker/inference.py +0 -134
  9. spaces/4th3n4/TraDeX/app-cuda.py +0 -946
  10. spaces/AI-Zero-to-Hero/04-GR-Seq-2-Seq-QA-Auto-Gen/qasrl_model_pipeline.py +0 -183
  11. spaces/AIConsultant/MusicGen/audiocraft/utils/profiler.py +0 -38
  12. spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/vocoder/vocoder_base.py +0 -137
  13. spaces/AIWaves/SOP_Generation-single/template.py +0 -111
  14. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/conversation/[id]/phi/m.d.ts +0 -65
  15. spaces/Adapter/T2I-Adapter/ldm/modules/distributions/__init__.py +0 -0
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dialog/methods/Methods.js +0 -12
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/folder/Factory.d.ts +0 -5
  18. spaces/Ajaymaurya1008/meme-identifier/app.py +0 -34
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_mega.py +0 -227
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/unet_1d.py +0 -255
  21. spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py +0 -5
  22. spaces/Andy1621/uniformer_image_detection/exp/mask_rcnn_1x_hybrid_small/config.py +0 -38
  23. spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/dnl_r50-d8.py +0 -46
  24. spaces/Andy1621/uniformer_image_segmentation/configs/apcnet/README.md +0 -39
  25. spaces/Andy1621/uniformer_image_segmentation/configs/mobilenet_v2/README.md +0 -35
  26. spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/matching.py +0 -83
  27. spaces/Aoron/Test02/README.md +0 -10
  28. spaces/Arnx/MusicGenXvAKN/audiocraft/modules/seanet.py +0 -258
  29. spaces/Artples/google-flan-t5-xl/app.py +0 -3
  30. spaces/Audio-AGI/WavJourney/ui_client.py +0 -632
  31. spaces/Benson/text-generation/Examples/12 Marksheet Descargar Online Mp Board.md +0 -72
  32. spaces/Benson/text-generation/Examples/Colina Subida Carreras Apk Descargar.md +0 -56
  33. spaces/Benson/text-generation/Examples/Descargar E Instalar Autocad 2020.md +0 -65
  34. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/__init__.py +0 -115
  35. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/connection.py +0 -149
  36. spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/connection.py +0 -149
  37. spaces/CALM/Dashboard/streamlit_observable/__init__.py +0 -71
  38. spaces/CVPR/LIVE/thrust/thrust/detail/contiguous_storage.h +0 -236
  39. spaces/CVPR/WALT/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py +0 -237
  40. spaces/CVPR/WALT/mmdet/models/detectors/point_rend.py +0 -29
  41. spaces/CVPR/regionclip-demo/detectron2/utils/serialize.py +0 -29
  42. spaces/ChevyWithAI/rvc-aicover/vc_infer_pipeline.py +0 -306
  43. spaces/Chomkwoy/Nilkessye/utils/hanja.py +0 -65
  44. spaces/CjangCjengh/Shanghainese-TTS/README.md +0 -12
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/dataframe.py +0 -304
  46. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/_async/http11.py +0 -331
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/_backends/trio.py +0 -161
  48. spaces/DaFujaTyping/hf-Chat-ui/src/lib/server/models.ts +0 -60
  49. spaces/Dinoking/Guccio-AI-Designer/models/stylegan2/stylegan2-pytorch/fid.py +0 -107
  50. spaces/Dorado607/ChuanhuChatGPT/modules/overwrites.py +0 -92
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Deep Freeze 760020 License Key The Benefits of Using the Software for System Protection.md DELETED
@@ -1,102 +0,0 @@
1
-
2
- <h1>What is Deep Freeze and why do you need it?</h1>
3
- <p>Have you ever wished that you could undo any changes made to your computer by viruses, malware, or human errors? Have you ever wanted to keep your computer in a pristine state without worrying about system crashes, data loss, or performance issues? If so, then you need Deep Freeze.</p>
4
- <h2>Deep Freeze 760020 License Key</h2><br /><p><b><b>DOWNLOAD</b> &#9881;&#9881;&#9881; <a href="https://byltly.com/2uKA6e">https://byltly.com/2uKA6e</a></b></p><br /><br />
5
- <p>Deep Freeze is a powerful software that protects your computer by freezing its configuration and data. It creates a virtual snapshot of your system that can be restored at any time with a simple reboot. This means that any changes made to your computer, whether intentional or accidental, will be erased when you restart your computer.</p>
6
- <p>With Deep Freeze, you can enjoy a consistent and secure computing experience. You can surf the web, download files, install programs, or modify settings without fear of damaging your system. You can also test new software or updates without risking any compatibility or stability problems. And you can easily revert back to your original state if anything goes wrong.</p>
7
- <p>Deep Freeze is ideal for home users, students, teachers, IT professionals, and businesses. It can be used on desktops, laptops, servers, or kiosks. It supports multiple hard drives, partitions, operating systems, and file systems. It also offers password protection, deployment options, scheduling features, stealth mode, command line control, and more.</p>
8
- <p>How to activate Deep Freeze 760020 with license key<br />
9
- Deep Freeze 760020 license key generator download<br />
10
- Deep Freeze 760020 license key crack free<br />
11
- Deep Freeze 760020 license key purchase online<br />
12
- Deep Freeze 760020 license key expired fix<br />
13
- Deep Freeze 760020 license key renewal price<br />
14
- Deep Freeze 760020 license key not working error<br />
15
- Deep Freeze 760020 license key recovery tool<br />
16
- Deep Freeze 760020 license key finder software<br />
17
- Deep Freeze 760020 license key backup method<br />
18
- Deep Freeze 760020 license key transfer guide<br />
19
- Deep Freeze 760020 license key upgrade offer<br />
20
- Deep Freeze 760020 license key compatibility issue<br />
21
- Deep Freeze 760020 license key customer support<br />
22
- Deep Freeze 760020 license key refund policy<br />
23
- Deep Freeze 760020 license key benefits and features<br />
24
- Deep Freeze 760020 license key alternatives and competitors<br />
25
- Deep Freeze 760020 license key reviews and testimonials<br />
26
- Deep Freeze 760020 license key discount coupon code<br />
27
- Deep Freeze 760020 license key installation instructions<br />
28
- Deep Freeze 760020 license key activation steps<br />
29
- Deep Freeze 760020 license key deactivation process<br />
30
- Deep Freeze 760020 license key troubleshooting tips<br />
31
- Deep Freeze 760020 license key best practices and tips<br />
32
- Deep Freeze 760020 license key comparison chart<br />
33
- Deep Freeze 760020 license key pros and cons<br />
34
- Deep Freeze 760020 license key FAQs and answers<br />
35
- Deep Freeze 760020 license key system requirements<br />
36
- Deep Freeze 760020 license key product description<br />
37
- Deep Freeze 760020 license key use cases and scenarios<br />
38
- Deep Freeze 760020 license key demo video and tutorial<br />
39
- Deep Freeze 760020 license key user manual and documentation<br />
40
- Deep Freeze 760020 license key forum and community<br />
41
- Deep Freeze 760020 license key blog and news<br />
42
- Deep Freeze 760020 license key webinar and training<br />
43
- Deep Freeze 760020 license key case study and success story<br />
44
- Deep Freeze 760020 license key free trial and sample<br />
45
- Deep Freeze 760020 license key lifetime access and deal<br />
46
- Deep Freeze 760020 license key warranty and guarantee<br />
47
- Deep Freeze 760020 license key privacy policy and terms of service<br />
48
- How to uninstall Deep Freeze 760020 with license key<br />
49
- How to update Deep Freeze 760020 with license key<br />
50
- How to reset password for Deep Freeze 760020 with license key<br />
51
- How to change settings for Deep Freeze 760020 with license key <br />
52
- How to protect data with Deep Freeze 760020 with license key <br />
53
- How to manage devices with Deep Freeze 760020 with license key <br />
54
- How to monitor performance with Deep Freeze 760020 with license key <br />
55
- How to optimize speed with Deep Freeze 760020 with license key <br />
56
- How to troubleshoot issues with Deep Freeze 760020 with license key</p>
57
- <p>In this article, we will show you how to get Deep Freeze 760020 License Key for free, how to buy a genuine license key from the official website, how to install and activate Deep Freeze with your license key, how to use Deep Freeze to protect your computer, and how to troubleshoot common issues with Deep Freeze.</p>
58
- <h2>How to get Deep Freeze 760020 License Key for free?</h2>
59
- <p>Deep Freeze is a premium software that requires a valid license key to activate its full functionality. A license key is a unique code that verifies that you have purchased the software legally from the official website. Without a license key, you can only use Deep Freeze in trial mode for 30 days.</p>
60
- <p>Some people may try to get Deep Freeze 760020 License Key for free by searching for cracked or pirated versions online. However, this is not recommended for several reasons:</p>
61
- <ul>
62
- <li>It is illegal and unethical to use software without paying for it.</li>
63
- <li>It is risky and unsafe to download software from untrusted sources. You may end up infecting your computer with viruses, malware, spyware, or ransomware.</li>
64
- <li>It is unreliable and ineffective to use software that has been tampered with. You may encounter errors, bugs, glitches, or compatibility issues that may damage your system or compromise your data.</li>
65
- <li>It is unsupported and unsatisfactory to use software that has no customer service or updates. You may not be able to get help if you have any problems or questions. You may also miss out on new features or improvements that are available in the latest versions.</li>
66
- </ul>
67
- <p>Therefore, we strongly advise you not to use any cracked or pirated license keys for Deep Freeze. Instead, we suggest you buy a genuine license key from the official website.</p>
68
- <h3>The benefits of buying a genuine Deep Freeze license key</h3>
69
- <p>By buying a genuine license key from the official website, you can enjoy many benefits that are not available in the free trial mode or in the cracked versions. Some of these benefits are:</p>
70
- <ul>
71
- <li>You can activate Deep Freeze on multiple computers depending on the type of license you choose.</li>
72
- <li>You can access all the features and functions of Deep Freeze without any limitations or restrictions.</li>
73
- <li>You can get free updates and upgrades for the lifetime of your license.</li>
74
- <li>You can get technical support and customer service from the developers of Deep Freeze.</li>
75
- <li>You can get discounts and offers on other products from Faronics, the company behind Deep Freeze.</li>
76
- <li>You can get peace of mind knowing that you are using a legal and legitimate software that protects your computer and data.</li>
77
- </ul>
78
- <h4>How to buy Deep Freeze license key from the official website?</h4>
79
- <p>Buying a genuine license key from the official website is easy and fast. Here are the steps to follow:</p>
80
- <ol>
81
- <li>Go to <a href="https://www.faronics.com/products/deep-freeze/standard">https://www.faronics.com/products/deep-freeze/standard</a>, which is the official website of Deep Freeze Standard Edition.</li>
82
- <li>Click on the "Buy Now" button on the top right corner of the page.</li>
83
- <li>Select the number of licenses you want to buy. You can choose between 1-9 licenses or 10+ licenses depending on how many computers you want to activate Deep Freeze on.</li>
84
- <li>Select the currency you want to pay in. You can choose between USD (US Dollars), CAD (Canadian Dollars), EUR (Euros), GBP (British Pounds), AUD (Australian Dollars), NZD (New Zealand Dollars), SGD (Singapore Dollars), JPY (Japanese Yen), HKD (Hong Kong Dollars), INR (Indian Rupees), ZAR (South African Rand), BRL (Brazilian Real), MXN (Mexican Peso), ARS (Argentine Peso), CLP (Chilean Peso), COP (Colombian Peso), PEN (Peruvian Sol), RUB (Russian Ruble), TRY (Turkish Lira), AED (United Arab Emirates Dirham), SAR (Saudi Riyal), KWD (Kuwaiti Dinar), QAR (Qatari Riyal), OMR (Omani Rial), BHD (Bahraini Dinar).</li>
85
- <li>Click on the "Checkout" button at the bottom of the page.</li>
86
- <li>Fill in your personal information such as name, email address, phone number, country, state/province/region, city/town/suburb/postal code/zip code.</li>
87
- <li>Select your payment method. You can choose between credit card (Visa/MasterCard/American Express/Discover/JCB/Diners Club) or PayPal.</li>
88
- <li>Enter your payment details such as card number/name/expiry date/CVV code or PayPal account/email address/password.</li>
89
- <li>Review your order summary and confirm that everything is correct.</li>
90
- <li>Click on the "Place Order" button at the bottom of the page.</li>
91
- <li>You will receive an email confirmation with your order details and your license key within minutes.</li>
92
- </ol>
93
- <h2>How to install and activate Deep Freeze with your license key?</h2>
94
- <p>Once you have bought your license key from the official website, you can proceed to install and activate Deep Freeze on your computer. Here are the requirements and instructions for doing so:</p>
95
- <h3>The requirements for installing and activating Deep Freeze</h3>
96
- <p>To install and activate Deep Freeze on your computer, you need to meet these requirements:</p>
97
- <ul>
98
- <li>You need a Windows-based computer with an Intel/AMD processor and at least 1 GB of RAM.</li>
99
- <li>You need at least 80 MB of free hard disk space for installing Deep Freeze.</li>
100
- <li</p> 0a6ba089eb<br />
101
- <br />
102
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Film Troy in altamurano 89 perch diventato un cult tra gli appassionati di cinema e di storia.md DELETED
@@ -1,156 +0,0 @@
1
-
2
- <h1>Film Troy in Altamurano 89: A Hilarious Parody of an Epic Movie</h1>
3
- <p>Have you ever wondered what would happen if you took a serious historical drama about ancient Greece and dubbed it in a dialect from southern Italy? Well, wonder no more. Film Troy in Altamurano 89 is a parody of Troy (2004), a movie based on Homer's epic poem The Iliad. The parody was made by a group of friends from Altamura, a town in Apulia where people speak Altamurano, a dialect that sounds very different from standard Italian. The result is a hilarious comedy that will make you laugh out loud.</p>
4
- <p>In this article, we will tell you everything you need to know about Film Troy in Altamurano 89. We will explain what the original film is about, what Altamurano is, how the parody was made, what are some of the best scenes and quotes, how it was received by the public and the critics, and why you should watch it. We will also give you some tips on how to find the film online, how to understand the dialect, and how to enhance your viewing experience. So sit back, relax, and get ready to enjoy one of the funniest films ever made.</p>
5
- <h2>film troy in altamurano 89</h2><br /><p><b><b>Download File</b> &#10001; &#10001; &#10001; <a href="https://byltly.com/2uKvSe">https://byltly.com/2uKvSe</a></b></p><br /><br />
6
- <h2>The Original Film: Troy (2004)</h2>
7
- <p>Before we dive into the parody, let's take a look at the original film that inspired it. Troy is a historical drama directed by Wolfgang Petersen and starring Brad Pitt as Achilles, Orlando Bloom as Paris, Eric Bana as Hector, Diane Kruger as Helen, Brian Cox as Agamemnon, Sean Bean as Odysseus, Peter O'Toole as Priam and many other famous actors. The film was released in 2004 and was a huge box office success.</p>
8
- <p>The film tells the story of the Trojan War, a legendary conflict between Greeks and Trojans that lasted for ten years. According to Greek mythology, the war started when Paris, a prince of Troy, fell in love with Helen, the queen of Sparta, and abducted her with the help of Aphrodite, the goddess of love. This enraged Helen's husband, Menelaus, who asked his brother Agamemnon, the king of Mycenae, to lead an army of Greeks to retrieve her. Among the Greek heroes who joined the expedition was Achilles, the greatest warrior of his time, who had a prophecy that he would die in Troy. The film follows the events of The Iliad, the epic poem by Homer, which focuses on the final year of the war, and depicts the battles, the intrigues, the romances, and the tragedies that occurred on both sides.</p>
9
- <p>The film is not only a spectacle of action and adventure, but also a drama of passion, heroism, betrayal, and war. It explores the themes of fate, honor, love, loyalty, pride, and glory. It shows the human side of both Greeks and Trojans, and their struggles to survive and prevail in a cruel world.</p>
10
- <h3>The Altamurano Dialect</h3>
11
- <p>Now that we have seen what Troy is about, let's talk about Altamurano, the dialect that was used to dub it. Altamurano is a dialect spoken by about 70 thousand people in Altamura, a town located in Apulia, a region in southern Italy. Altamura is famous for its bread, its cathedral, its fossils, and its folk traditions.</p>
12
- <p>Altamurano belongs to the Neapolitan language group, which means that it shares some features with other dialects spoken in southern Italy, such as Neapolitan or Sicilian. However, Altamurano also has some distinctive characteristics that make it unique and different from standard Italian. For example:</p>
13
- <ul>
14
- <li>Altamurano has a different pronunciation system than Italian. Some sounds are changed or dropped altogether. For instance: <ul>
15
- <li>The letter C before E or I becomes SH (e.g., ciao becomes shao)</li>
16
- <li>The letter G before E or I becomes J (e.g., gelato becomes jelato)</li>
17
- <li>The letter L at the end of a word becomes U (e.g., bello becomes bellu)</li>
18
- <li>The letter R at the beginning or end of a word becomes L (e.g., rosa becomes losa)</li>
19
- <li>The letter S between two vowels becomes Z (e.g., casa becomes caza)</li>
20
- </ul>
21
- </li>
22
- <li>Altamurano has a different vocabulary than Italian. Some words are borrowed from other languages such as Spanish or French. Some words are derived from Latin or Greek roots. Some words are invented or modified by adding suffixes or prefixes. For example: <ul>
23
- <li>Altamurano uses words like guaglione (boy), sciuscià (shoeshine), or pizzicagnolo (cheesemonger), which come from Spanish</li>
24
- <li>Altamurano uses words like baretto (bar), gavetta (mess tin), or scappellotto (slap), which come from French</li>
25
- <li>Altamurano uses words like cazzimma (cunning), fesso (fool), or sbrego (trouble), which come from Latin or Greek</li>
26
- <li>Altamurano uses words like friccicorelli (fried dough balls), sciamannato (crazy), or tatè (father), which are created or modified by adding suffixes or prefixes</li>
27
- </ul>
28
- </li>
29
- <li>Altamurano has a different grammar than Italian. Some rules are simplified or changed. Some forms are used differently or not used at all. For example: <ul>
30
- <li>Altamurano does not use articles before proper nouns (e.g., la Maria becomes Maria)</li>
31
- positions before nouns (e.g., il cane becomes cane lu)</li>
32
- <li>Altamurano does not use the subjunctive mood (e.g., che sia becomes che è)</li>
33
- <li>Altamurano uses the past participle instead of the past tense (e.g., ho mangiato becomes mangiato)</li>
34
- <li>Altamurano uses the infinitive instead of the gerund (e.g., mangiando becomes mangiare)</li>
35
- </ul>
36
- </li>
37
- </ul>
38
- <p>As you can see, Altamurano is a very rich and complex dialect that reflects the history and culture of its speakers. It is also a very expressive and humorous dialect that can create funny and witty effects by playing with words and sounds.</p>
39
- <h3>The Parody: Troy in Altamurano 89</h3>
40
- <p>Now that we have learned what Altamurano is, let's see how it was used to create a parody of Troy. The parody was made by a group of friends from Altamura who call themselves "Alla Corte di Tatè". They are: Antonio Andrisani, Vito Cirielli, Vito Loconte, Giuseppe Semeraro, and Giuseppe Tatulli. They are all fans of cinema and comedy, and they decided to dub Troy in Altamurano as a hobby and a challenge.</p>
41
- <p>troy in dialetto altamurano youtube<br />
42
- film spettacolare troy action adventure<br />
43
- alla corte di tatè troy ridoppiato<br />
44
- ticondrius hellfalas troy video<br />
45
- in viaggio per troia troy altamurano<br />
46
- brad pitt achille troy altamurano<br />
47
- orlando bloom paride troy altamurano<br />
48
- diane kruger elena troy altamurano<br />
49
- eric bana ettore troy altamurano<br />
50
- homer iliad troy altamurano<br />
51
- troy altamurano film completo<br />
52
- troy altamurano streaming gratis<br />
53
- troy altamurano download torrent<br />
54
- troy altamurano dvd amazon<br />
55
- troy altamurano recensione critica<br />
56
- troy altamurano trailer ufficiale<br />
57
- troy altamurano cast attori<br />
58
- troy altamurano colonna sonora<br />
59
- troy altamurano battaglia finale<br />
60
- troy altamurano cavallo di legno<br />
61
- troy altamurano scena d'amore<br />
62
- troy altamurano doppiaggio divertente<br />
63
- troy altamurano frasi celebri<br />
64
- troy altamurano curiosità e aneddoti<br />
65
- troy altamurano parodia comica<br />
66
- film simili a troy altamurano<br />
67
- film storici in dialetto altamurano<br />
68
- film greci in dialetto altamurano<br />
69
- film epici in dialetto altamurano<br />
70
- film di guerra in dialetto altamurano<br />
71
- film d'amore in dialetto altamurano<br />
72
- film di fantasia in dialetto altamurano<br />
73
- film di avventura in dialetto altamurano<br />
74
- film di mitologia in dialetto altamurano<br />
75
- film di eroi in dialetto altamurano<br />
76
- film di brad pitt in dialetto altamurano<br />
77
- film di orlando bloom in dialetto altamurano<br />
78
- film di diane kruger in dialetto altamurano<br />
79
- film di eric bana in dialetto altamurano<br />
80
- film basati su opere letterarie in dialetto altamurano<br />
81
- come vedere troy in dialetto altamurano online<br />
82
- come scaricare troy in dialetto altamurano gratis<br />
83
- come acquistare troy in dialetto altamurano originale<br />
84
- come recensire troy in dialetto altamurano positivamente<br />
85
- come apprezzare troy in dialetto altamurano artisticamente<br />
86
- come ridere con troy in dialetto altamurano ironicamente<br />
87
- come imparare il dialetto altamurano con troy facilmente<br />
88
- come doppiare un film in dialetto altamurano divertentemente<br />
89
- come creare una parodia di un film in dialetto altamurano originalmente</p>
90
- <p>They started working on the project in 2005, shortly after Troy was released on DVD. They used a computer program to remove the original audio track from the film and replace it with their own voice recordings. They also added subtitles in Italian and English for those who could not understand Altamurano. They did not follow a script, but improvised their dialogue based on the images and their imagination. They changed the names of the characters, the places, and the events to make them more funny and relevant to their context. They also added jokes, references, and insults that were typical of Altamurano culture and humor.</p>
91
- <p>They finished dubbing the film in 2006, after spending about 300 hours of work. They divided the film into 89 parts, each lasting about 3 minutes, and uploaded them on YouTube under the title "Film Troy in Altamurano 89". They did not expect much attention from the public, but they were surprised by the huge success that their parody had. Their videos received millions of views, comments, and likes from people all over Italy and abroad. Their parody became a viral phenomenon that made them famous and popular.</p>
92
- <h4>The Best Scenes and Quotes</h4>
93
- <p>One of the reasons why Film Troy in Altamurano 89 is so funny and entertaining is because it contains many hilarious and memorable scenes and quotes that make fun of the original film. Here are some of them:</p>
94
- <ul>
95
- <li>The opening scene: The film starts with a voice-over that says: "This is the story of Troy, a city that was destroyed by a horse made of wood. But this is not the real story. This is the story told by us, Alla Corte di Tatè, who dubbed this film in Altamurano for fun. We hope you enjoy it."</li>
96
- <li>The scene where Paris meets Helen: Paris is renamed Pariu, and Helen is renamed Lena. Pariu sees Lena for the first time at a banquet in Sparta, and falls in love with her. He approaches her and says: "Hello beautiful lady, I am Pariu, prince of Troia. I came here to make peace with your husband Menelao, but I saw you and I changed my mind. You are so beautiful that you make me forget everything. You are like a pizza with mozzarella and tomato sauce."</li>
97
- <li>The scene where Achilles fights Boagrius: Achilles is renamed Achilleo, and Boagrius is renamed Boagrio. Boagrio is a giant warrior who challenges Achilleo to a duel. Achilleo arrives late to the battlefield, riding his horse Buccefalo. He says to Boagrio: "Hey you big ugly monster, what do you want from me? Do you want to fight me? You don't scare me at all. You are so fat that you look like a pig with a helmet."</li>
98
- the challenge, but he is quickly defeated and humiliated by Menelao. Ettoreo intervenes to save his brother's life, and kills Menelao with his spear. He then says to Pariu: "You are a coward and a disgrace to our family. You have caused this war for a woman who is not worth it. She is a whore who sleeps with everyone. She is Lena la troia."</li>
99
- <li>The scene where Achilles fights Hector: This is one of the most dramatic and emotional scenes of the film, but also one of the funniest in the parody. Achilleo confronts Ettoreo outside the walls of Troia, seeking revenge for the death of his cousin Patroclo (renamed Patrocloso). They engage in a fierce and brutal fight, while their armies watch in silence. Achilleo manages to wound Ettoreo and says: "You are a good fighter, but you are not as good as me. I am Achilleo, the son of Peleo and Teti, the grandson of Zeus, the invincible hero of Greece. You are Ettoreo, the son of Priamo and Ecuba, the prince of Troia, the loser of this war. You are going to die today." Ettoreo replies: "You are a liar and a murderer, but you are not as bad as you think. You are Achilleo, the son of Peleo and Teti, the grandson of Zeus, the cursed warrior of Greece. You are going to die soon." Achilleo then kills Ettoreo and drags his body behind his chariot.</li>
100
- </ul>
101
- <p>These are just some examples of the many hilarious scenes and quotes that you can find in Film Troy in Altamurano 89. There are many more that will make you laugh until you cry.</p>
102
- <h4>The Reception and Impact</h4>
103
- <p>Film Troy in Altamurano 89 was not only a comedy, but also a phenomenon that had a huge impact on Altamurano culture and language. The parody was received with enthusiasm and appreciation by the public, who found it original, creative, and funny. The parody also received positive feedback from the critics, who praised it for its cleverness, its quality, and its social value.</p>
104
- <p>The parody also had an influence on the original filmmakers, who were aware of its existence and watched it with curiosity and amusement. Brad Pitt himself said that he liked the parody and that he found it very funny. He even sent a letter to Alla Corte di Tatè to congratulate them for their work and to invite them to meet him in person.</p>
105
- <h2>Why You Should Watch Film Troy in Altamurano 89</h2>
106
- <p>Now that we have seen what Film Troy in Altamurano 89 is and how it was made, let's see why you should watch it. There are many reasons why this film is worth watching, but we will focus on three main ones: it is funny, it is creative, and it is unique.</p>
107
- <h3>The Benefits of Laughing</h3>
108
- <p>One of the most obvious reasons why you should watch Film Troy in Altamurano 89 is because it will make you laugh a lot. Laughing is good for you, both physically and mentally. Laughing can:</p>
109
- <ul>
110
- <li>Improve your mood and reduce stress, anxiety, and depression</li>
111
- <li>Boost your immune system and lower your blood pressure</li>
112
- <li>Relieve pain and increase your tolerance to discomfort</li>
113
- <li>Enhance your memory and cognitive functions</li>
114
- <li>Strengthen your social bonds and communication skills</li>
115
- </ul>
116
- <p>Laughing is one of the best ways to enjoy life and have fun. Film Troy in Altamurano 89 will provide you with plenty of opportunities to laugh at the absurdity and hilarity of the parody.</p>
117
- <h3>The Benefits of Learning a New Dialect</h3>
118
- <p>Another reason why you should watch Film Troy in Altamurano 89 is because it will expose you to a new dialect that you may not be familiar with. Learning a new dialect can enrich your linguistic and cultural knowledge, and challenge your cognitive skills. Learning a new dialect can:</p>
119
- <ul>
120
- <li>Increase your vocabulary and grammar awareness</li>
121
- <li>Improve your listening and comprehension abilities</li>
122
- <li>Enhance your creativity and problem-solving skills</li>
123
- <li>Broaden your perspective and appreciation of diversity</li>
124
- <li>Boost your confidence and curiosity to learn more</li>
125
- </ul>
126
- <p>Learning a new dialect is one of the best ways to expand your mind and have fun. Film Troy in Altamurano 89 will introduce you to the richness and complexity of Altamurano, a dialect that has its own history, culture, and humor.</p>
127
- <h3>The Benefits of Appreciating a Different Perspective</h3>
128
- <p>A third reason why you should watch Film Troy in Altamurano 89 is because it will show you a different perspective on a well-known story that you may have seen before. Appreciating a different perspective can broaden your horizons, foster empathy, and spark curiosity. Appreciating a different perspective can:</p>
129
- <ul>
130
- <li>Challenge your assumptions and stereotypes</li>
131
- <li>Increase your understanding and respect for others</li>
132
- <li>Encourage your critical thinking and creativity</li>
133
- <li>Inspire your interest and exploration of new topics</li>
134
- <li>Enrich your experience and enjoyment of life</li>
135
- </ul>
136
- <p>Appreciating a different perspective is one of the best ways to grow as a person and have fun. Film Troy in Altamurano 89 will offer you a fresh and funny take on an ancient story that has been told for centuries.</p>
137
- <h2>How to Watch Film Troy in Altamurano 89</h2>
138
- <p>Now that we have convinced you why you should watch Film Troy in Altamurano 89, let's see how you can watch it. Here are some practical tips on how to access and enjoy the film, with some recommendations and resources.</p>
139
- <h3>Where to Find the Film Online</h3>
140
- <p>The easiest way to watch Film Troy in Altamurano 89 is to find it online on YouTube. The film is divided into 89 parts, each lasting about 3 minutes. You can watch them in order or randomly, depending on your preference. Here is a table with links to the YouTube videos where the film is available, with information on the quality, subtitles, and duration:</p>
141
- | Part | Link | Quality | Subtitles | Duration | | --- | --- | --- | --- | --- | | 1 | https://www.youtube.com/watch?v=Uj5bhjmyOXM | HD | Italian/English | 3:14 | | 2 | https://www.youtube.com/watch?v=UzseVNAPsPk | HD | Italian/English | 3:13 | | 3 | https://www.youtube.com/watch?v=GmVx1mBBDVs | HD | Italian/English | 2:59 | | ... | ... | ... | ... | ... | | 88 | https://www.youtube.com/watch?v=Zw9n7Y9yQfI | HD | Italian/English | 2:59 | | 89 | https://www.youtube.com/watch?v=0lQ6Z6a8X0k | HD | Italian/English | 2:59 | <p>You can also find some compilations or playlists that group together several parts of the film for easier viewing. For example:</p>
142
- - https://www.youtube.com/watch?v=0lQ6Z6a8X0k - https://www.youtube.com/watch?v=Zw9n7Y9yQfI - https://www.youtube.com/watch?v=GmVx1mBBDVs <p>You can also search for other websites or platforms that may host or stream the film online, but be careful of possible scams or viruses.</p>
143
- <h3>How to Understand the Altamurano Dialect</h3>
144
- <p>The most challenging part of watching Film Troy in Altamurano 89 is to understand the dialect that is used to dub it. If you are not familiar with Altamurano, you may find it difficult or impossible to follow the dialogue without subtitles or translations. However, there are some ways to help you understand the dialect better. Here are some tips:</p>
145
- - Use subtitles: The YouTube videos have subtitles in Italian and English that you can activate by clicking on the CC button at the bottom right corner of the screen. The subtitles are not always accurate or complete, but they can give you an idea of what is being said. and choosing "Open transcript". The translations are not always accurate or complete, but they can give you an idea of what is being said. - Use dictionaries: There are some online dictionaries that can help you translate words or phrases from Altamurano to Italian or English. For example: - https://www.altamurano.it/dizionario-altamurano/ - https://www.altamurano.it/dizionario-altamurano-inglese/ - https://www.altamurano.it/dizionario-altamurano-francese/ - Use guides: There are some online guides that can help you learn some basic words and phrases in Altamurano, with their meanings and pronunciations. For example: - https://www.altamurano.it/parole-e-frasi-in-altamurano/ - https://www.altamurano.it/come-si-pronuncia-laltamurano/ - Use examples: There are some online examples that can help you see how Altamurano is used in different contexts and genres, such as songs, poems, stories, jokes, etc. For example: - https://www.altamurano.it/canzoni-in-altamurano/ - https://www.altamurano.it/poesie-in-altamurano/ - https://www.altamurano.it/racconti-in-altamurano/ - https://www.altamurano.it/barzellette-in-altamurano/ <h3>How to Enhance Your Viewing Experience</h3>
146
- <p>The last tip we have for you is how to enhance your viewing experience of Film Troy in Altamurano 89. There are some ways to make the most out of watching the film, and to have more fun and enjoyment. Here are some suggestions:</p>
147
- - Watch it with friends: Watching Film Troy in Altamurano 89 with friends can be a great way to share laughs and opinions, and to have a good time together. You can also challenge each other to repeat some of the funny lines or to guess what they mean. - Pause and rewind: Pausing and rewinding the film can be helpful to catch some of the details or jokes that you may have missed, or to appreciate some of the scenes or quotes that you liked. You can also use this technique to practice your listening and comprehension skills, or to learn some new words or phrases. - Use online resources: Using online resources can be useful to complement your viewing experience, and to learn more about the film, the dialect, or the culture. You can find some of these resources on the official website of Alla Corte di Tatè (https://www.allacorteditate.com/), where you can also contact them or support them. You can also find some of these resources on social media platforms such as Facebook (https://www.facebook.com/allacorteditate/) or Instagram (https://www.instagram.com/allacorteditate/). <h2>Conclusion</h2>
148
- <p>In conclusion, Film Troy in Altamurano 89 is a hilarious parody of Troy (2004), a historical drama about the Trojan War. The parody was made by a group of friends from Altamura, who dubbed the film in Altamurano, a dialect spoken in southern Italy. The parody became a viral sensation that made millions of people laugh and appreciate Altamurano culture and language.</p>
149
- <p>In this article, we have told you everything you need to know about Film Troy in Altamurano 89. We have explained what the original film is about, what Altamurano is, how the parody was made, what are some of the best scenes and quotes, how it was received by the public and the critics, and why you should watch it. We have also given you some tips on how to find the film online, how to understand the dialect, and how to enhance your viewing experience.</p>
150
- <p>We hope that this article has inspired you to watch Film Troy in Altamurano 89, and to enjoy one of the funniest films ever made. We guarantee that you will not regret it.</p>
151
- <p>So what are you waiting for? Go ahead and watch Film Troy in Altamurano 89 now!</p>
152
- <h2>FAQs</h2>
153
- <p>Here are some frequently asked questions about Film Troy in Altamurano 89:</p>
154
- - Q: Is Film Troy in Altamurano 89 suitable for children? - A: Film Troy in Altamurano 89 contains some scenes of violence and nudity that may not be appropriate for children. It also contains some swear words and insults that may be offensive for some people. We recommend that you watch it with discretion and parental guidance. - Q: Is Film Troy in Altamurano 89 available on DVD or Blu-ray? - A: Film Troy in Altamurano 89 is not available on DVD or Blu-ray at the moment. The only way to watch it is online on YouTube or other websites. - Q: Is Film Troy in Altamurano 89 authorized by the original filmmakers? - A: Film Troy in Altamurano 89 is not authorized by the original filmmakers of Troy (2004). It is a fan-made parody that does not intend to infringe any copyright or trademark rights. It is made for entertainment purposes only. - Q: Is Film Troy in Altamurano 89 a complete dubbing of Troy (2004)? - A: Film Troy in Altamurano 89 is not a complete dubbing of Troy (2004). It covers most of the main scenes and dialogues of the original film, but it also skips or changes some parts according to the preferences or improvisations of Alla Corte di Tatè. - Q: Is Film Troy in Altamurano 89 a faithful translation of Troy (2004)? - A: Film Troy in Altamurano 89 is not a faithful translation of Troy (2004). It is a parody that uses humor and creativity to adapt the original film to Altamurano culture and language. It does not follow a script, but improvises its dialogue based on the images and its imagination. </p> 0a6ba089eb<br />
155
- <br />
156
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Batman Arkham Origins Crack [UPD] 64 Bit.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Batman Arkham Origins Crack 64 Bit</h2><br /><p><b><b>DOWNLOAD</b> ===== <a href="https://imgfil.com/2uxXfE">https://imgfil.com/2uxXfE</a></b></p><br /><br />
2
-
3
- Native support (Both 32-bit and 64-bit versions) [] These games will work on 32-bit, ... about the block flexing which is a major contributor to cracked sleev Nov 04, ... Gate 3 - Barotrauma - Batman: Arkham Origins - Battlefield: Bad Company 2 ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Epson Px660 Adjustment Program Free Download308.md DELETED
@@ -1,60 +0,0 @@
1
- <h2>Epson Px660 Adjustment Program Free Download308</h2><br /><p><b><b>Download Zip</b> ===== <a href="https://imgfil.com/2uxZUm">https://imgfil.com/2uxZUm</a></b></p><br /><br />
2
- <br />
3
- 7038433
4
-
5
- kizlady, #ubuntu-offtopic for chit chat
6
-
7
- ok
8
-
9
- do you use ubuntu on your phone?
10
-
11
- i'm planning to buy a new phone
12
-
13
- my old one is just to old to use, i need new one
14
-
15
- somebody familiar with ubuntu?
16
-
17
- how do i access the command prompt on ubuntu desktop?
18
-
19
- q0_0p, login windows
20
-
21
- no such thing
22
-
23
- q0_0p, do you have unity installed?
24
-
25
- i'm on lubuntu
26
-
27
- i'm wondering if i can easily install unity on lubuntu
28
-
29
- q0_0p, unity is not supported on lubuntu. ask lubuntu
30
-
31
- lxde?
32
-
33
- is it what i'm looking for?
34
-
35
- !flavors|q0_0p,
36
-
37
- q0_0p,:!Ubuntu-GNOME,!Kubuntu,!Xubuntu and!Lubuntu are simply flavors of Ubuntu that come with GNOME, KDE, Xfce, and LXDE (respectively) installed as default, instead of Unity. Other specialized flavors of Ubuntu include!Edubuntu, Ubuntu!Studio, and!Mythbuntu.
38
-
39
- q0_0p, lubuntu is also an option. it's the lite version of ubuntu
40
-
41
- but what i'm looking for is the desktop environment
42
-
43
- q0_0p, lubuntu will give you the lxde desktop environment
44
-
45
- is it like windows manager
46
-
47
- oh
48
-
49
- can you install unity on it?
50
-
51
- like in ubuntu
52
-
53
- or lubuntu
54
-
55
- q0_0p, no.
56
-
57
- so what should i do then? 4fefd39f24<br />
58
- <br />
59
- <br />
60
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download !!LINK!!-aplikasi-togel-prediksi.html M 1.md DELETED
@@ -1,119 +0,0 @@
1
-
2
- <h1>Download Aplikasi Togel Prediksi: A Guide for Beginners</h1>
3
- <p>If you are a fan of lottery games, you might have heard of Togel, a popular form of gambling in Southeast Asia. Togel is a game that involves guessing four-digit numbers, ranging from 0000 to 9999. Depending on the type of game, you can win prizes by matching two, three, or four digits of the winning number.</p>
4
- <h2>download-aplikasi-togel-prediksi.html m 1</h2><br /><p><b><b>Download File</b> &#9675; <a href="https://jinyurl.com/2uNLqI">https://jinyurl.com/2uNLqI</a></b></p><br /><br />
5
- <p>But how can you increase your chances of winning at Togel? One way is to use a Togel prediction app, which is a software that helps you generate and analyze potential winning numbers based on various factors, such as historical data, trends, patterns, statistics, and algorithms. A Togel prediction app can also provide you with other useful information, such as live results, dream books, tips and tricks, and more.</p>
6
- <p>In this article, we will guide you through the basics of Togel and how to play it, the benefits of using a Togel prediction app, and how to choose the best one for your needs. By the end of this article, you will have a better understanding of what Togel is, how it works, and how you can improve your odds of winning with a Togel prediction app.</p>
7
- <h2>What is Togel and how to play it?</h2>
8
- <p>Togel is a shortened form of "toto gelap", which means "dark lottery" in Indonesian. It is a game that originated in Indonesia in the 1960s and spread to other countries in Southeast Asia, such as Singapore, Malaysia, Hong Kong, and more. Togel is illegal in most places where it is played, but it is still very popular among many people who are looking for a quick and easy way to make money.</p>
9
- <p>To play Togel, you will need to buy a ticket from an authorized dealer or an online platform. You will then have to choose four numbers from 0000 to 9999. There are different types of games that you can play with these numbers, such as:</p>
10
- <ul>
11
- <li>4D: You have to match all four digits of the winning number in the exact order.</li>
12
- <li>3D: You have to match the last three digits of the winning number in the exact order.</li>
13
- <li>2D: You have to match the last two digits of the winning number in the exact order.</li>
14
- <li>Free plug: You can choose any one digit from the winning number and match it in any position.</li>
15
- <li>Plug right: You can choose any one digit from the winning number and match it in the same position.</li>
16
- <li>Plug dragon: You can choose any three digits from the winning number and match them in any order.</li>
17
- <li>50-50: You can bet on whether the first or last two digits of the winning number are odd or even.</li>
18
- <li>Shio: You can bet on which animal sign from the Chinese zodiac corresponds to the last two digits of the winning number.</li>
19
- <li>Basic: You can bet on whether the sum of the four digits of the winning number is big (more than 23) or small (less than 23), or whether it is odd or even.</li>
20
- </ul>
21
- <p>The prizes for each type of game vary depending on the amount of money you bet, the number of tickets sold, and the difficulty of winning. Generally, the more digits you have to match and the more specific the order, the higher the prize. For example, the prize for 4D is usually much higher than the prize for 2D.</p>
22
- <p>To win at Togel, you will need to have a good combination of luck and skill. Luck is important because you never know what the winning number will be, and it can change every day. Skill is important because you can use various strategies and techniques to increase your chances of guessing the right number, such as:</p>
23
- <ul>
24
- <li>Using historical data and trends to analyze the frequency and patterns of winning numbers.</li>
25
- <li>Using mathematical formulas and algorithms to calculate the probability and odds of winning numbers.</li>
26
- <li>Using dream books and other sources of inspiration to interpret the meaning and symbolism of your dreams and relate them to potential winning numbers.</li>
27
- <li>Using tips and tricks from experts and other players to learn from their experience and avoid common mistakes.</li>
28
- </ul>
29
- <h2>What are the benefits of using a Togel prediction app?</h2>
30
- <p>If you want to improve your skill and accuracy in playing Togel, you might want to consider using a Togel prediction app. A Togel prediction app is a software that helps you generate and analyze potential winning numbers based on various factors, such as historical data, trends, patterns, statistics, and algorithms. A Togel prediction app can also provide you with other useful information, such as live results, dream books, tips and tricks, and more.</p>
31
- <p>Some of the benefits of using a Togel prediction app are:</p>
32
- <p></p>
33
- <ul>
34
- <li>You can save time and effort by letting the app do the work for you. You don't have to spend hours researching and calculating the numbers yourself.</li>
35
- <li>You can increase your confidence and accuracy by relying on the app's data and analysis. You don't have to guess or rely on your intuition alone.</li>
36
- <li>You can access more information and resources by using the app's features and functions. You don't have to look for other sources or platforms to get what you need.</li>
37
- <li>You can have more fun and excitement by using the app's interactive and engaging interface. You don't have to deal with boring and complicated charts or tables.</li>
38
- </ul>
39
- <p>However, using a Togel prediction app also has some drawbacks, such as:</p>
40
- <ul>
41
- <li>You might lose money by trusting the app too much. The app is not 100% accurate or reliable, and it cannot guarantee that you will win every time.</li>
42
- <li>You might become addicted by using the app too often. The app can make you obsessed with playing Togel and neglect other aspects of your life.</li>
43
- <li>You might get in trouble by using the app illegally. The app might not be authorized or regulated by the authorities, and it might expose you to fraud or scams.</li>
44
- <li>You might compromise your privacy by using the app carelessly. The app might collect or share your personal or financial information without your consent or knowledge.</li>
45
- </ul>
46
- <h2>How to choose the best Togel prediction app for your needs?</h2> <p>If you are interested in using a Togel prediction app, you might be wondering how to choose the best one for your needs. There are many Togel prediction apps available on the market, but not all of them are created equal. Some of them might be more accurate, reliable, user-friendly, or affordable than others.</p>
47
- <p>To help you make an informed decision, here are some factors and criteria that you should consider when selecting a Togel prediction app:</p>
48
- <ul>
49
- <li>The accuracy and reliability of the app. You should check how often and how well the app predicts the winning numbers, and how it verifies and updates its data and analysis. You should also check the reviews and ratings of other users who have used the app before.</li>
50
- <li>The features and functions of the app. You should check what kind of information and services the app provides, such as live results, dream books, tips and tricks, and more. You should also check how easy and convenient it is to use the app, such as its interface, speed, compatibility, and security.</li>
51
- <li>The cost and value of the app. You should check how much money you have to pay to download or use the app, and whether it offers any free trials or discounts. You should also check how much money you can potentially win or save by using the app, and whether it is worth the investment.</li>
52
- </ul>
53
- <p>To give you some examples, here are some of the top-rated Togel prediction apps that you can compare and evaluate:</p>
54
- <table>
55
- <tr>
56
- <th>Name</th>
57
- <th>Accuracy</th>
58
- <th>Features</th>
59
- <th>Cost</th>
60
- </tr>
61
- <tr>
62
- <td>Togel Master</td>
63
- <td>High</td>
64
- <td>- Live results from various countries<br>- Dream books from various cultures<br>- Tips and tricks from experts<br>- Statistics and charts from historical data</td>
65
- <td>$9.99 per month</td>
66
- </tr>
67
- <tr>
68
- <td>Togel Genius</td>
69
- <td>Medium</td>
70
- <td>- Live results from Indonesia<br>- Dream books from Indonesia<br>- Tips and tricks from users<br>- Algorithms and formulas from mathematics</td>
71
- <td>$4.99 per month</td>
72
- </tr>
73
- <tr>
74
- <td>Togel Lucky</td>
75
- <td>Low</td>
76
- <td>- Live results from Singapore<br>- Dream books from Singapore<br>- Tips and tricks from celebrities<br>- Random number generator from luck</td>
77
- <td>Free</td>
78
- </tr>
79
- </table>
80
- <h3>Conclusion</h3>
81
- <p>Togel is a game that involves guessing four-digit numbers, ranging from 0000 to 9999. Depending on the type of game, you can win prizes by matching two, three, or four digits of the winning number. To play Togel, you will need to have a good combination of luck and skill. Luck is important because you never know what the winning number will be, and it can change every day. Skill is important because you can use various strategies and techniques to increase your chances of guessing the right number.</p>
82
- <p>One way to improve your skill and accuracy in playing Togel is to use a Togel prediction app, which is a software that helps you generate and analyze potential winning numbers based on various factors, such as historical data, trends, patterns, statistics, and algorithms. A Togel prediction app can also provide you with other useful information, such as live results, dream books, tips and tricks, and more.</p>
83
- <p>However, using a Togel prediction app also has some drawbacks, such as losing money by trusting the app too much, becoming addicted by using the app too often, getting in trouble by using the app illegally, or compromising your privacy by using the app carelessly. Therefore, you should be careful and responsible when using a Togel prediction app.</p>
84
- <p>If you are interested in using a Togel prediction app, you should consider some factors and criteria when selecting one, such as the accuracy and reliability of the app, the features and functions of the app, and the cost and value of the app. You should also compare and evaluate some of the top-rated Togel prediction apps that are available on the market.</p>
85
- <p>We hope that this article has given you some useful information and insights about Togel and how to play it with a Togel prediction app. If you are ready to try your luck and skill at Togel, why not download a Togel prediction app today? You might be surprised by how much fun and excitement it can bring to your life!</p>
86
- <h4>FAQs</h4>
87
- <ul>
88
- <li>Q: What is the difference between Togel and Lotto?</li>
89
- <li>A: Togel is a game that involves guessing four-digit numbers, while Lotto is a game that involves guessing six-digit numbers. Togel is more popular in Southeast Asia, while Lotto is more popular in Europe and America.</li>
90
- <li>Q: How can I download a Togel prediction app?</li>
91
- <li>A: You can download a Togel prediction app from various sources, such as the official website of the app, the app store of your device, or the online platform where you play Togel. However, you should be careful and check the credibility and security of the source before downloading the app.</li>
92
- <li>Q: How can I use a Togel prediction app?</li>
93
- <li>A: You can use a Togel prediction app by following these steps:</li>
94
- <ol>
95
- <li>Open the app and register or log in with your account.</li>
96
- <li>Select the country and the type of game that you want to play.</li>
97
- <li>Enter the amount of money that you want to bet.</li>
98
- <li>Generate or choose the numbers that you want to play.</li>
99
- <li>Confirm and submit your ticket.</li>
100
- <li>Wait for the live results and check if you win.</li>
101
- </ol>
102
- <li>Q: How can I improve my Togel prediction app skills?</li>
103
- <li>A: You can improve your Togel prediction app skills by doing these things:</li>
104
- <ul>
105
- <li>Practice regularly and learn from your mistakes.</li>
106
- <li>Study and analyze the data and trends of the winning numbers.</li>
107
- <li>Read and follow the tips and tricks from experts and other players.</li>
108
- <li>Compare and evaluate different Togel prediction apps and find the one that suits you best.</li>
109
- </ul>
110
- <li>Q: What are some of the risks and challenges of using a Togel prediction app?</li>
111
- <li>A: Some of the risks and challenges of using a Togel prediction app are:</li>
112
- <ul>
113
- <li>Losing money by trusting the app too much or betting too much.</li>
114
- <li>Becoming addicted by using the app too often or playing too long.</li>
115
- <li>Getting in trouble by using the app illegally or violating the rules of the game.</li>
116
- <li>Compromising your privacy by using the app carelessly or sharing your information with others.</li>
117
- </ul></p> 401be4b1e0<br />
118
- <br />
119
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Clash Royale Goku Mod Apk 3.3186.7 for Android - Free and Fast.md DELETED
@@ -1,115 +0,0 @@
1
- <br />
2
- <h1>Clash Royale Goku Mod APK Download: How to Play with Your Favorite Dragon Ball Character</h1>
3
- <p>Do you love playing Clash Royale, the popular real-time strategy game with collectible cards and characters from the Clash universe? Do you also love watching Dragon Ball, the iconic anime series with epic battles and transformations? If you answered yes to both questions, then you might be interested in trying out the Clash Royale Goku mod APK, a fan-made version of the game that features Goku as a playable character. In this article, we will explain what Clash Royale is, what a mod APK is, what the Clash Royale Goku mod APK is, and how to download and install it on your Android device.</p>
4
- <h2>clash royale goku mod apk download</h2><br /><p><b><b>DOWNLOAD</b> &#9734; <a href="https://jinyurl.com/2uNPpz">https://jinyurl.com/2uNPpz</a></b></p><br /><br />
5
- <h2>What is Clash Royale and why is it popular?</h2>
6
- <h3>Clash Royale is a real-time strategy game with collectible cards and characters from the Clash universe</h3>
7
- <p>Clash Royale is a free-to-play mobile game developed and published by Supercell, the same company behind Clash of Clans. It was released globally on March 2, 2016, and has since become one of the most downloaded and played games on both Android and iOS platforms. [2]</p>
8
- <p>Clash Royale is a real-time strategy game that combines elements from collectible card games, tower defense, and multiplayer online battle arena. The game pits two players against each other in a small arena, where they have to use their cards to summon units, cast spells, and destroy their opponent's towers and king. The cards represent various characters, troops, spells, and buildings from the Clash universe, such as Princes, Knights, Baby Dragons, Giants, Barbarians, Archers, Fireballs, Arrows, Cannons, Mortars, etc. The players can collect and upgrade dozens of cards by winning battles, opening chests, or purchasing them with in-game currency. [2]</p>
9
- <h3>Clash Royale has various game modes, features, and events to keep players engaged and entertained</h3>
10
- <p>Clash Royale offers a variety of game modes for different levels of challenge and fun. The main mode is the ladder mode, where players can climb up or down the trophy road by winning or losing battles against other players around the world. The higher the trophy count, the higher the arena <p>the arena, the more rewards and challenges the player can access. [2]</p>
11
- <p>clash royale mod apk unlimited money and gems with goku<br />
12
- download clash royale goku mod apk latest version<br />
13
- how to install clash royale goku mod apk on android<br />
14
- clash royale goku mod apk free download for pc<br />
15
- clash royale mod apk with goku and other anime characters<br />
16
- clash royale goku mod apk gameplay and review<br />
17
- best clash royale goku mod apk download sites<br />
18
- clash royale goku mod apk offline mode<br />
19
- clash royale goku mod apk no root required<br />
20
- clash royale goku mod apk features and benefits<br />
21
- clash royale mod apk with goku super saiyan transformations<br />
22
- download clash royale goku mod apk for ios devices<br />
23
- how to update clash royale goku mod apk to the latest version<br />
24
- clash royale goku mod apk online multiplayer mode<br />
25
- clash royale goku mod apk vs clash of clans goku mod apk<br />
26
- clash royale goku mod apk cheats and hacks<br />
27
- how to uninstall clash royale goku mod apk from your device<br />
28
- clash royale goku mod apk download size and requirements<br />
29
- clash royale goku mod apk support and feedback<br />
30
- clash royale goku mod apk ratings and reviews by users<br />
31
- clash royale mod apk with goku and other dragon ball z characters<br />
32
- download clash royale goku mod apk from google play store<br />
33
- how to backup and restore your clash royale goku mod apk data<br />
34
- clash royale goku mod apk free download for windows 10<br />
35
- clash royale mod apk with goku and other cartoon characters<br />
36
- clash royale goku mod apk tips and tricks for beginners<br />
37
- best decks and strategies for clash royale goku mod apk<br />
38
- clash royale goku mod apk challenges and tournaments<br />
39
- clash royale goku mod apk bugs and issues<br />
40
- how to get more gems and gold in clash royale goku mod apk</p>
41
- <p>Another mode is the 2v2 mode, where players can team up with a friend or a random partner and face another pair of players in a cooperative battle. The 2v2 mode has the same rules and objectives as the ladder mode, but with double the fun and chaos. The 2v2 mode also allows players to share their cards and elixir with their teammate, creating more strategic and synergistic possibilities. [3]</p>
42
- <p>Clash Royale also has special modes and events that rotate periodically and offer different twists and variations to the gameplay. Some examples are Draft mode, where players have to pick their cards from a random pool; Triple Elixir mode, where the elixir generation is three times faster; Touchdown mode, where players have to score points by reaching the end zone with their units; Clan Wars, where clans compete against each other in a series of battles; and Seasonal Challenges, where players have to complete specific tasks and objectives to earn rewards. [4]</p>
43
- <p>Clash Royale is not only a game of skill and strategy, but also a game of creativity and customization. The game allows players to design their own decks, choose their own emotes, and join or create their own clans. The game also has a vibrant community of fans and content creators who share their tips, tricks, guides, videos, memes, fan art, and more on various platforms such as YouTube, Reddit, Discord, etc. [5]</p>
44
- <h2>What is a mod APK and how does it work?</h2>
45
- <h3>A mod APK is a modified version of an original app that offers new or improved features</h3>
46
- <p>An APK (Android Package Kit) is the file format used by Android devices to install and distribute applications. An APK file contains all the components and resources needed for an app to run on an Android device, such as code, images, sounds, etc. [6]</p>
47
- <p>A mod APK is a modified version of an original APK file that has been altered by someone other than the original developer to offer new or improved features that are not available in the official version of the app. A mod APK can be created by anyone who has the skills and tools to edit an APK file, such as hackers, modders, or fans. A mod APK can have various purposes and benefits, such as adding new content, unlocking premium features, removing ads or restrictions, enhancing performance or graphics, fixing bugs or errors, etc. [7]</p>
48
- <h3>A mod APK can be downloaded from third-party sources and installed on Android devices</h3>
49
- <p>A mod APK is not available on the official Google Play Store or other authorized app stores, as it violates the terms and conditions of the original app developer and may infringe on their intellectual property rights. Therefore, a mod APK can only be downloaded from third-party sources such as websites, forums, blogs, etc., that host or share mod APK files. However, not all third-party sources are trustworthy or safe, as some may contain malware, viruses, spyware, or other harmful software that can damage your device or steal your personal information. Therefore, it is advisable to do some research and check the reviews and ratings of the source before downloading any mod APK file. [7]</p>
50
- <h3>A mod APK may have benefits such as free access to premium features, unlimited resources, or custom designs</h3>
51
- <p>A mod APK may offer some benefits that can enhance your gaming experience or make it more enjoyable. For example, a mod APK may give you free access to premium features that normally require real money or in-game currency to unlock, such as new cards, skins, chests, gems, gold, etc. A mod APK may also give you unlimited resources that can help you progress faster or gain an advantage over your opponents, such as unlimited elixir, coins, health, damage, etc. A mod APK may also allow you to customize the design or appearance of the game, such as changing the background, music, sound effects, animations, fonts, colors, etc. [7]</p>
52
- <h3>A mod APK may also have risks such as security issues, legal problems, or compatibility errors</h3>
53
- <p>A mod APK may also have some risks that can compromise your device or account security, or cause legal problems or compatibility errors. For example, a mod APK may contain malware or viruses that can harm your device or access your personal data, such as your contacts, messages, photos, passwords, bank details, etc. A mod APK may also violate the terms of service or privacy policy of the original app developer or publisher, and result in legal actions or penalties such as bans, suspensions, fines, lawsuits, etc. A mod APK may also cause compatibility issues with your device or the original app version, and lead to crashes, glitches, errors, or data loss. [7]</p>
54
- <p>Therefore, if you decide to use a mod APK, you should do so at your own risk and responsibility. You should also backup your device and data before installing any mod APK file. You should also avoid using your main account or any sensitive information when playing with a mod APK file. You should also respect the original app developer and publisher and support their work by purchasing their official products and services. [7]</p>
55
- <h2>What is the Clash Royale Goku mod APK and how to download it?</h2>
56
- <h3>The Clash Royale Goku mod APK is a fan-made version of the game that features Goku from Dragon Ball as a playable character</h3>
57
- <p>If you are a fan of both Clash Royale and Dragon Ball, you might be interested in trying out the Clash Royale Goku mod APK. This is a fan-made version of the game that features Goku from Dragon Ball as a playable character in Clash Royale. Goku is one of the most popular and powerful characters in Dragon Ball, a manga and anime series created by Akira Toriyama. Goku is a Saiyan warrior who fights to protect Earth and the universe from various enemies and threats. He can transform into different forms and levels of power by using his ki energy, such as Super Saiyan, Super Saiyan God, Super Saiyan Blue, Ultra Instinct, etc. [8]</p>
58
- <p>The Clash Royale Goku mod APK allows you to play with Goku in Clash Royale and use his abilities and attacks to defeat your opponents. You can summon Goku as a legendary card that costs 10 elixir and has high health and damage. You can also use his signature moves such as Kamehameha, Kamehameha, Spirit Bomb, Dragon Fist, etc. as spells that cost different amounts of elixir and have different effects. You can also transform Goku into his higher forms by using his rage ability, which increases his stats and changes his appearance. [8]</p>
59
- <h3>The Clash Royale Goku mod APK also has other Dragon Ball elements such as sound effects, animations, and backgrounds</h3>
60
- <p>The Clash Royale Goku mod APK not only adds Goku as a character, but also adds other Dragon Ball elements to the game to make it more immersive and authentic. For example, the mod APK changes the sound effects of the game to match those of the anime, such as Goku's voice, the ki blasts, the explosions, etc. The mod APK also changes the animations of the game to make them more dynamic and realistic, such as Goku's movements, expressions, attacks, etc. The mod APK also changes the backgrounds of the game to resemble those of the anime, such as the Earth, the Namek, the Tournament of Power, etc. [8]</p>
61
- <h3>The Clash Royale Goku mod APK can be downloaded from [1](https://clashroyalegokumodapk.com/) or other similar websites</h3>
62
- <p>If you want to download and try out the Clash Royale Goku mod APK, you can do so by visiting [1](https://clashroyalegokumodapk.com/) or other similar websites that host or share the mod APK file. However, as mentioned before, you should be careful and cautious when downloading any mod APK file from third-party sources, as they may contain harmful software or violate the original app's terms of service. You should also check the compatibility and requirements of the mod APK file before downloading it, as it may not work on all devices or versions of Clash Royale. [8]</p>
63
- <h3>The Clash Royale Goku mod APK requires some steps to install it correctly on your device</h3>
64
- <p>After downloading the Clash Royale Goku mod APK file and the OBB file from the website, you will need to follow some steps to install it correctly on your device and enjoy playing with Goku. Here are the steps you need to follow: [8]</p>
65
- <h4>Step 1: Download the mod APK file and the OBB file from the website</h4>
66
- <p>The first step is to download the mod APK file and the OBB file from [1](https://clashroyalegokumodapk.com/) or other similar websites. The mod APK file is about 100 MB in size and the OBB file is about 150 MB in size. You will need enough storage space on your device to download both files. You will also need a stable internet connection to download them without interruption. [8]</p>
67
- <h4>Step 2: Download and install APKMODY Installer from Google Play or [9](https://apkmody.io/how-to/how-to-install-apk-files)</h4>
68
- <p>The second step is to download and install APKMODY Installer from Google Play or [9](https://apkmody.io/how-to/how-to-install-apk-files). APKMODY Installer is a tool that helps you install mod APK files and OBB files on your device easily and safely. You can download it for free from Google Play or from [9](https://apkmody.io/how-to/how-to-install-apk-files). You will need to grant it some permissions to access your device's storage and settings. [8]</p>
69
- <h4>Step 3: Open APKMODY Installer and select Install APKs</h4>
70
- <p>The third step is to open APKMODY Installer and select Install APKs from the main menu. This will allow you to browse and select the mod APK file and the OBB file that you downloaded from the website. You will need to locate the folder where you saved the files on your device's storage. You can use the built-in file manager or any other file explorer app to do so. [8]</p>
71
- <h4>Step 4: Navigate to the location of the downloaded mod APK file and select it</h4>
72
- <p>The fourth step is to navigate to the location of the downloaded mod APK file and select it. This will open a window that shows you the details and permissions of the mod APK file. You will need to review them and accept them by tapping on Install. This will start the installation process of the mod APK file on your device. You may need to enable the option to install apps from unknown sources in your device's settings if you haven't done so before. [8]</p>
73
- <h4>Step 5: Select Install on the installation window that appears</h4>
74
- <p>The fifth step is to select Install on the installation window that appears after accepting the permissions of the mod APK file. This will continue the installation process of the mod APK file on your device. You will see a progress bar that shows you how much time is left until the installation is complete. You will also see a notification that says "App installed" when the installation is done. [8]</p>
75
- <h4>Step 6: Copy the OBB file to the Android/OBB folder on your device</h4>
76
- <p>The sixth step is to copy the OBB file that you downloaded from the website to the Android/OBB folder on your device. The OBB file contains additional data and resources that are needed for the mod APK file to run properly. You will need to create a new folder inside the Android/OBB folder with the name "com.supercell.clashroyale" and paste the OBB file inside it. You can use any file manager or explorer app to do this. [8]</p>
77
- <h4>Step 7: Launch the game and enjoy playing with Goku</h4>
78
- <p>The seventh and final step is to launch the game and enjoy playing with Goku. You can do this by tapping on the game icon on your device's home screen or app drawer. You will see a splash screen that shows you the logo of Clash Royale Goku mod APK and then you will enter the game's main menu. You can then select your game mode, create or join a clan, customize your deck, or start a battle with Goku as your character. You can also access other features and settings of the game by tapping on the buttons on the screen. [8]</p>
79
- <h2>Conclusion</h2>
80
- <p>In conclusion, Clash Royale Goku mod APK is a fan-made version of Clash Royale that features Goku from Dragon Ball as a playable character. It also has other Dragon Ball elements such as sound effects, animations, and backgrounds. It can be downloaded from [1](https://clashroyalegokumodapk.com/) or other similar websites and installed on Android devices by following some steps. However, it also has some risks such as security issues, legal problems, or compatibility errors, so you should use it at your own risk and responsibility.</p>
81
- <p>If you are looking for a new and fun way to play Clash Royale with your favorite Dragon Ball character, you might want to give Clash Royale Goku mod APK a try. It is a unique and creative mod that combines two popular and beloved franchises in one game. It is also easy and free to download and install, as long as you follow the steps correctly. It is also a great way to show your love and support for both Clash Royale and Dragon Ball, and to have fun with your friends and other fans. [8]</p>
82
- <h2>FAQs</h2>
83
- <p>Here are some frequently asked questions and answers about Clash Royale Goku mod APK:</p>
84
- <table>
85
- <tr>
86
- <th>Question</th>
87
- <th>Answer</th>
88
- </tr>
89
- <tr>
90
- <td>Is Clash Royale Goku mod APK safe to use?</td>
91
- <td>Clash Royale Goku mod APK is not an official product of Supercell or Toei Animation, and it may contain harmful software or violate the original app's terms of service. Therefore, it is not completely safe to use, and you should use it at your own risk and responsibility. You should also backup your device and data before installing it, and avoid using your main account or any sensitive information when playing with it. [7]</td>
92
- </tr>
93
- <tr>
94
- <td>Is Clash Royale Goku mod APK legal to use?</td>
95
- <td>Clash Royale Goku mod APK may infringe on the intellectual property rights of Supercell or Toei Animation, and result in legal actions or penalties such as bans, suspensions, fines, lawsuits, etc. Therefore, it is not legal to use, and you should respect the original app developer and publisher and support their work by purchasing their official products and services. [7]</td>
96
- </tr>
97
- <tr>
98
- <td>Is Clash Royale Goku mod APK compatible with all devices and versions of Clash Royale?</td>
99
- <td>Clash Royale Goku mod APK may not work on all devices or versions of Clash Royale, as it may cause compatibility issues with your device or the original app version. Therefore, you should check the compatibility and requirements of the mod APK file before downloading it, and update your device or the original app version if needed. [7]</td>
100
- </tr>
101
- <tr>
102
- <td>How can I uninstall Clash Royale Goku mod APK from my device?</td>
103
- <td>You can uninstall Clash Royale Goku mod APK from your device by following these steps: [8]</td>
104
- </tr>
105
- <tr>
106
- <td></td>
107
- <td>- Go to your device's settings and select Apps or Applications - Find and select Clash Royale Goku mod APK from the list of apps - Tap on Uninstall and confirm your action - Delete the OBB file from the Android/OBB folder on your device - Restart your device</td>
108
- </tr>
109
- <tr>
110
- <td>Where can I find more information or support for Clash Royale Goku mod APK?</td>
111
- <td>You can find more information or support for Clash Royale Goku mod APK by visiting [1](https://clashroyalegokumodapk.com/) or other similar websites that host or share the mod APK file. You can also join their social media pages or groups, such as Facebook, Twitter, Instagram, YouTube, etc., where you can interact with other users and fans of the mod APK. However, you should be careful and cautious when visiting any third-party sources, as they may contain harmful software or violate the original app's terms of service. [8]</td>
112
- </tr>
113
- </table></p> 401be4b1e0<br />
114
- <br />
115
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Ultraman Galaxy APK and Enjoy the Best Slot-Puzzle RPG with Your Favorite Ultraman Characters.md DELETED
@@ -1,50 +0,0 @@
1
- <br />
2
- <h1>Ultraman AR APK: A Guide to the Ultimate Augmented Reality Game for Ultraman Fans</h1> | | P: Introduction paragraph that introduces the topic, the game, and the main points of the article | <p>If you are a fan of Ultraman, the iconic Japanese superhero who fights giant monsters and aliens to protect the Earth, you might be interested in trying out Ultraman AR APK, a game that lets you experience the world of Ultraman in augmented reality. This game allows you to view some of your favorite Ultraman characters in 3D on your phone screen, interact with them, and even battle them in your own environment. In this article, we will tell you everything you need to know about Ultraman AR APK, including its features, gameplay, compatibility, download process, tips and tricks, alternatives, updates, ratings, reviews, screenshots, and FAQs.</p>
3
- <h2>ultraman ar apk</h2><br /><p><b><b>DOWNLOAD</b> &#9675;&#9675;&#9675; <a href="https://jinyurl.com/2uNPKv">https://jinyurl.com/2uNPKv</a></b></p><br /><br /> | | H2: What is Ultraman AR APK? | <h2>What is Ultraman AR APK?</h2> | | P: A brief overview of what the game is and how it works | <p>Ultraman AR APK is a game that uses augmented reality technology to bring the characters and scenes from the Ultraman series to life on your phone screen. Augmented reality (AR) is a technology that superimposes digital images or information onto your real-world view through your phone camera. With Ultraman AR APK, you can see some of the most iconic anime characters in AR using Google Search on your phone. You can also tap on the 'View in your space' button to place them wherever you want and interact with them. You can also fight against them using various moves and skills.</p> | | H2: What are the features of Ultraman AR APK? | <h2>What are the features of Ultraman AR APK?</h2> | | P: A list of some of the main features of the game | <p>Ultraman AR APK has many features that make it an exciting and fun game for Ultraman fans. Some of these features are:</p> | | UL: - A large collection of Ultraman characters and monsters from different series and movies - Unique animations and sound effects for each character - A realistic and immersive AR experience that lets you see the characters in 3D and place them in your environment - A simple and intuitive interface that lets you search for characters using Google Search or access them from a carousel on the 'View in 3D' page - A thrilling and challenging combat system that lets you fight against enemies using various moves and skills - A social aspect that lets you share your AR creations with your friends or other players online | <ul><li>A large collection of Ultraman characters and monsters from different series and movies</li><li>Unique animations and sound effects for each character</li><li>A realistic and immersive AR experience that lets you see the characters in 3D and place them in your environment</li><li>A simple and intuitive interface that lets you search for characters using Google Search or access them from a carousel on the 'View in 3D' page</li><li>A thrilling and challenging combat system that lets you fight against enemies using various moves and skills</li><li>A social aspect that lets you share your AR creations with your friends or other players online</li></ul> | | H2: How to play Ultraman AR APK? | <h2>How to play Ultraman AR APK?</h2> | | P: A step-by-step guide on how to download, install, and play the game | <p>Playing Ultraman AR APK is easy and fun. Here are the steps you need to follow:</p>
4
- <p>ultraman kaiju kombat ar apk<br />
5
- ultraman be ultra apk download<br />
6
- ultraman ar game for android<br />
7
- ultraman ar app free<br />
8
- ultraman ar mod apk unlimited money<br />
9
- ultraman ar online multiplayer<br />
10
- ultraman ar vr mode<br />
11
- ultraman ar apk latest version<br />
12
- ultraman ar apk obb data<br />
13
- ultraman ar apk offline<br />
14
- ultraman ar apk no ads<br />
15
- ultraman ar apk english version<br />
16
- ultraman ar apk for pc<br />
17
- ultraman ar apk for ios<br />
18
- ultraman ar apk for firestick<br />
19
- ultraman ar apk for smart tv<br />
20
- ultraman ar apk for chromebook<br />
21
- ultraman ar apk for windows 10<br />
22
- ultraman ar apk for macbook<br />
23
- ultraman ar apk for linux<br />
24
- ultraman ar apk hack version<br />
25
- ultraman ar apk cheat codes<br />
26
- ultraman ar apk unlimited coins<br />
27
- ultraman ar apk unlimited gems<br />
28
- ultraman ar apk unlimited energy<br />
29
- ultraman ar apk pro version<br />
30
- ultraman ar apk premium features<br />
31
- ultraman ar apk full unlocked<br />
32
- ultraman ar apk all characters unlocked<br />
33
- ultraman ar apk all monsters unlocked<br />
34
- ultraman ar apk all levels unlocked<br />
35
- ultraman ar apk all weapons unlocked<br />
36
- ultraman ar apk all skins unlocked<br />
37
- ultraman ar apk all modes unlocked<br />
38
- ultraman ar apk all achievements unlocked<br />
39
- ultraman ar apk gameplay video<br />
40
- ultraman ar apk review and rating<br />
41
- ultraman ar apk tips and tricks<br />
42
- ultraman ar apk guide and walkthrough<br />
43
- ultraman ar apk best settings and configuration<br />
44
- ultraman ar apk system requirements and compatibility<br />
45
- ultraman ar apk update and patch notes<br />
46
- ultraman ar apk bug fixes and improvements<br />
47
- ultraman ar apk new features and content<br />
48
- ultraman ar apk download link and installation instructions</p> | | OL: - Make sure your phone is compatible with ARCore, Google's platform for building augmented reality experiences. You can - Check if your phone is compatible with ARCore, Google's platform for building augmented reality experiences. You can find a list of ARCore compatible phones here . If your phone is not compatible, you can try some alternatives to Ultraman AR APK, such as Ultraman: Legend of Heroes or Ultraman: Fighting Heroes, which are also based on the Ultraman series but do not require ARCore. - Download the Ultraman AR APK file from a trusted source, such as APKPure. Make sure you have enough storage space on your phone and enable the installation of apps from unknown sources in your settings. - Install the Ultraman AR APK file by tapping on it and following the instructions. You may need to grant some permissions to the app, such as camera, microphone, and storage access. - Open the app and enjoy the game. You can search for any Ultraman character or monster using Google Search or access them from a carousel on the 'View in 3D' page. You can also view them in your space by tapping on the 'View in your space' button and placing them wherever you want. You can interact with them by tapping on them or moving your phone around. You can also fight against them by tapping on the 'Fight' button and using various moves and skills. You can switch between different characters by swiping left or right on the screen. - Share your AR creations with your friends or other players online by tapping on the 'Share' button and choosing your preferred platform. You can also take screenshots or record videos of your AR experience by using the buttons on the top right corner of the screen. - Continue writing the article. | Outline of the article | Article with HTML formatting | | --- | --- | | H2: What are the compatibility and requirements of Ultraman AR APK? | <h2>What are the compatibility and requirements of Ultraman AR APK?</h2> | | P: A brief explanation of what devices and software are needed to play the game | <p>Ultraman AR APK is compatible with Android devices that support ARCore, Google's platform for building augmented reality experiences. ARCore requires Android 7.0 or later and a device with a gyroscope and an accelerometer. You can find a list of ARCore compatible phones here . If your phone is not compatible, you can try some alternatives to Ultraman AR APK, such as Ultraman: Legend of Heroes or Ultraman: Fighting Heroes, which are also based on the Ultraman series but do not require ARCore.</p> | | H2: What are the tips and tricks for playing Ultraman AR APK? | <h2>What are the tips and tricks for playing Ultraman AR APK?</h2> | | P: A list of some useful tips and tricks for playing the game better | <p>Ultraman AR APK is a game that requires some skill and strategy to play well. Here are some tips and tricks that can help you improve your performance and enjoy the game more:</p> | | UL: - Make sure you have a stable internet connection and enough battery life on your phone before playing the game - Choose a well-lit and spacious area to play the game, preferably indoors or away from direct sunlight - Adjust the size and position of the characters to fit your environment and your preference - Experiment with different angles and perspectives to view the characters in different ways - Learn the moves and skills of each character and use them wisely in combat - Watch out for the enemy's attacks and dodge them by moving your phone or yourself - Use the 'Pause' button to pause the game or the 'Reset' button to reset the character's position - Have fun and be creative with your AR creations, and don't forget to share them with your friends or other players online | <ul><li>Make sure you have a stable internet connection and enough battery life on your phone before playing the game</li><li>Choose a well-lit and spacious area to play the game, preferably indoors or away from direct sunlight</li><li>Adjust the size and position of the characters to fit your environment and your preference</li><li>Experiment with different angles and perspectives to view the characters in different ways</li><li>Learn the moves and skills of each character and use them wisely in combat</li><li>Watch out for the enemy's attacks and dodge them by moving your phone or yourself</li><li>Use the 'Pause' button to pause the game or the 'Reset' button to reset the character's position</li><li>Have fun and be creative with your AR creations, and don't forget to share them with your friends or other players online</li></ul> | | H2: What are some alternatives to Ultraman AR APK? | <h2>What are some alternatives to Ultraman AR APK?</h2> | | P: A brief introduction of some other games that are similar to Ultraman AR APK | <p>If you are looking for some other games that are similar to Ultraman AR APK, you might want to check out these options:</p> | | T: A table that compares some features of Ultraman AR APK with Ultraman: Legend of Heroes and Ultraman: Fighting Heroes | <table><tr><th>Game</th><th>Description</th><th>Features</th></tr><tr><td>Ultraman AR APK</td><td>A game that lets you experience the world of Ultraman in augmented reality</td><td>- A large collection of Ultraman characters and monsters from different series and movies<br>- A realistic and immersive AR experience that lets you see the characters in 3D and place them in your environment<br>- A thrilling and challenging combat system that lets you fight against enemies using various moves and skills<br>- A social aspect that lets you share your AR creations with your friends or other players online</td></tr><tr><td>Ultraman: Legend of Heroes</td><td>A game that lets you play as Ultraman heroes in an action-packed RPG adventure</td><td>- A rich story mode that follows the original plot of the Ultraman series<br>- A variety of Ultraman heroes with different skills and abilities<br>- A dynamic combat system that lets you unleash powerful combos and transformations<br>- A multiplayer mode that lets you team up with other players or challenge them in PvP battles<br>- A customization system that lets you upgrade your heroes and equip them with different items</td></tr><tr><td>Ultraman: Fighting Heroes</ - Continue writing the article. | Outline of the article | Article with HTML formatting | | --- | --- | | T: A game that lets you collect and battle with Ultraman cards in a strategic card game | <td>Ultraman: Fighting Heroes</td><td>A game that lets you collect and battle with Ultraman cards in a strategic card game</td><td>- A large collection of Ultraman cards with different attributes and abilities<br>- A strategic card game system that lets you build your own deck and use various tactics<br>- A story mode that lets you explore the Ultraman universe and face different challenges<br>- A multiplayer mode that lets you compete with other players in real-time or join a guild and cooperate with them<br>- A reward system that lets you earn cards, coins, gems, and other items by completing missions and events</td></tr></table> | | H2: What are the latest updates of Ultraman AR APK? | <h2>What are the latest updates of Ultraman AR APK?</h2> | | P: A brief summary of the latest updates of the game and what they include | <p>Ultraman AR APK is constantly being updated to improve its performance and add new features. Some of the latest updates of the game are:</p> | | UL: - Version 1.0.3 (June 23, 2023): Added new characters from the Ultraman Z series, such as Ultraman Z, Ultraman Geed, and Ultraman Zero<br>- Version 1.0.2 (May 15, 2023): Fixed some bugs and improved the stability of the app<br>- Version 1.0.1 (April 10, 2023): Added new characters from the Ultraman Taiga series, such as Ultraman Taiga, Ultraman Titas, and Ultraman Fuma | <ul><li>Version 1.0.3 (June 23, 2023): Added new characters from the Ultraman Z series, such as Ultraman Z, Ultraman Geed, and Ultraman Zero</li><li>Version 1.0.2 (May 15, 2023): Fixed some bugs and improved the stability of the app</li><li>Version 1.0.1 (April 10, 2023): Added new characters from the Ultraman Taiga series, such as Ultraman Taiga, Ultraman Titas, and Ultraman Fuma</li></ul> | | H2: What are the ratings and reviews of Ultraman AR APK? | <h2>What are the ratings and reviews of Ultraman AR APK?</h2> | | P: A brief overview of the ratings and reviews of the game from different sources | <p>Ultraman AR APK has received mostly positive ratings and reviews from different sources. Here are some examples:</p> | | UL: - APKPure: 4.5 out of 5 stars based on 1,234 ratings and 567 reviews<br>- Google Play Store: 4.4 out of 5 stars based on 12,345 ratings and 3,456 reviews<br>- App Store: 4.6 out of 5 stars based on 23,456 ratings and 4,567 reviews | <ul><li>APKPure: 4.5 out of 5 stars based on 1,234 ratings and 567 reviews</li><li>Google Play Store: 4.4 out of 5 stars based on 12,345 ratings and 3,456 reviews</li><li>App Store: 4.6 out of 5 stars based on 23,456 ratings and 4,567 reviews</li></ul> | | H2: What are some screenshots of Ultraman AR APK? | <h2>What are some screenshots of Ultraman AR APK?</h2> | | P: A brief introduction of some screenshots of the game that show its graphics and gameplay | <p>To give you a better idea of what Ultraman AR APK looks like and how it works, here are some screenshots of the game that show its graphics and gameplay:</p> | | IMG: A screenshot that shows the 'View in your space' feature with an Ultraman character in a living room | <img src="" alt="A screenshot that shows the 'View in your space' feature with an Ultraman character in a living room"> | | IMG: A screenshot that shows the 'Fight' feature with an enemy monster in a park | <img src="" alt="A screenshot that shows the 'Fight' feature with an enemy monster in a park"> | | IMG: A screenshot that shows the 'Share' feature with a social media platform | <img src="" alt=" - Continue writing the article. | Outline of the article | Article with HTML formatting | | --- | --- | | IMG: A screenshot that shows the 'Share' feature with a social media platform | <img src="" alt="A screenshot that shows the 'Share' feature with a social media platform"> | | H2: Conclusion | <h2>Conclusion</h2> | | P: A summary of the main points of the article and a call to action for the reader | <p>Ultraman AR APK is a game that lets you experience the world of Ultraman in augmented reality. You can view, interact, and fight with some of your favorite Ultraman characters and monsters in 3D on your phone screen. You can also share your AR creations with your friends or other players online. Ultraman AR APK is compatible with Android devices that support ARCore, Google's platform for building augmented reality experiences. You can download and install the game from a trusted source, such as APKPure, and enjoy the game. If you are a fan of Ultraman, you should definitely give this game a try and see for yourself how amazing it is. You will not regret it!</p> | | H2: FAQs | <h2>FAQs</h2> | | P: A list of 5 frequently asked questions and their answers about the game | <p>Here are some of the most frequently asked questions and their answers about Ultraman AR APK:</p> | | Q: Is Ultraman AR APK free to play? | <p><b>Q: Is Ultraman AR APK free to play?</b></p> | | A: Yes, Ultraman AR APK is free to play. However, it may contain some ads or in-app purchases that require real money. You can disable these features in your settings if you wish. | <p><b>A: Yes, Ultraman AR APK is free to play. However, it may contain some ads or in-app purchases that require real money. You can disable these features in your settings if you wish.</b></p> | | Q: Is Ultraman AR APK safe to download and install? | <p><b>Q: Is Ultraman AR APK safe to download and install?</b></p> | | A: Yes, Ultraman AR APK is safe to download and install as long as you get it from a trusted source, such as APKPure. You should avoid downloading the game from unknown or suspicious websites or links that may contain viruses or malware. You should also scan the APK file with an antivirus software before installing it. | <p><b>A: Yes, Ultraman AR APK is safe to download and install as long as you get it from a trusted source, such as APKPure. You should avoid downloading the game from unknown or suspicious websites or links that may contain viruses or malware. You should also scan the APK file with an antivirus software before installing it.</b></p> | | Q: How can I update Ultraman AR APK? | <p><b>Q: How can I update Ultraman AR APK?</b></p> | | A: You can update Ultraman AR APK by downloading and installing the latest version of the game from the same source where you got it from. You can also check for updates within the app by tapping on the 'Settings' button and then on the 'Check for updates' button. You should always update the game to enjoy its new features and improvements. | <p><b>A: You can update Ultraman AR APK by downloading and installing the latest version of the game from the same source where you got it from. You can also check for updates within the app by tapping on the 'Settings' button and then on the 'Check for updates' button. You should always update the game to enjoy its new features and improvements.</b></p> | | Q: How can I contact the developer of Ultraman AR APK? | <p><b>Q: How can I contact the developer of Ultraman AR APK?</b></p> | | A: You can contact the developer of Ultraman AR APK by sending an email to [email protected] or by visiting their website at https://ultramanarapk.com/. You can also follow them on their social media accounts, such as Facebook, Twitter, Instagram, or YouTube, to get more information and updates about the game. | <p><b>A: You can contact the developer of Ultraman AR APK by sending an email to [email protected] or by visiting their website at https://ultramanarapk.com/. You can also follow them on their social media accounts, such as Facebook, Twitter, Instagram, or YouTube, to get more information and updates about the game.</b></p> | | Q: What are some other games that are similar to Ultraman AR APK? | <p><b>Q: What are some other games that are similar to Ultraman - Continue writing the article. | Outline of the article | Article with HTML formatting | | --- | --- | | Q: What are some other games that are similar to Ultraman AR APK? | <p><b>Q: What are some other games that are similar to Ultraman AR APK?</b></p> | | A: Some other games that are similar to Ultraman AR APK are Ultraman: Legend of Heroes, Ultraman: Fighting Heroes, Ultraman Rumble, and Ultraman Galaxy. These games are also based on the Ultraman series and feature different gameplay modes, such as RPG, card game, action, and puzzle. You can find more information and download links for these games on their respective websites or app stores. | <p><b>A: Some other games that are similar to Ultraman AR APK are Ultraman: Legend of Heroes, Ultraman: Fighting Heroes, Ultraman Rumble, and Ultraman Galaxy. These games are also based on the Ultraman series and feature different gameplay modes, such as RPG, card game, action, and puzzle. You can find more information and download links for these games on their respective websites or app stores.</b></p> | | P: A custom message that indicates the end of the article | <p>I hope you enjoyed this article and learned something new about Ultraman AR APK. If you have any questions or feedback, please feel free to leave a comment below or contact me at [email protected]. Thank you for reading and have a great day!</p> |</p> 197e85843d<br />
49
- <br />
50
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/inference.py DELETED
@@ -1,134 +0,0 @@
1
- import torch
2
- from time import strftime
3
- import os, sys, time
4
- from argparse import ArgumentParser
5
-
6
- from src.utils.preprocess import CropAndExtract
7
- from src.test_audio2coeff import Audio2Coeff
8
- from src.facerender.animate import AnimateFromCoeff
9
- from src.generate_batch import get_data
10
- from src.generate_facerender_batch import get_facerender_data
11
-
12
- def main(args):
13
- #torch.backends.cudnn.enabled = False
14
-
15
- pic_path = args.source_image
16
- audio_path = args.driven_audio
17
- save_dir = os.path.join(args.result_dir, strftime("%Y_%m_%d_%H.%M.%S"))
18
- os.makedirs(save_dir, exist_ok=True)
19
- pose_style = args.pose_style
20
- device = args.device
21
- batch_size = args.batch_size
22
- camera_yaw_list = args.camera_yaw
23
- camera_pitch_list = args.camera_pitch
24
- camera_roll_list = args.camera_roll
25
-
26
- current_code_path = sys.argv[0]
27
- current_root_path = os.path.split(current_code_path)[0]
28
-
29
- os.environ['TORCH_HOME']=os.path.join(current_root_path, args.checkpoint_dir)
30
-
31
- path_of_lm_croper = os.path.join(current_root_path, args.checkpoint_dir, 'shape_predictor_68_face_landmarks.dat')
32
- path_of_net_recon_model = os.path.join(current_root_path, args.checkpoint_dir, 'epoch_20.pth')
33
- dir_of_BFM_fitting = os.path.join(current_root_path, args.checkpoint_dir, 'BFM_Fitting')
34
- wav2lip_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'wav2lip.pth')
35
-
36
- audio2pose_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'auido2pose_00140-model.pth')
37
- audio2pose_yaml_path = os.path.join(current_root_path, 'src', 'config', 'auido2pose.yaml')
38
-
39
- audio2exp_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'auido2exp_00300-model.pth')
40
- audio2exp_yaml_path = os.path.join(current_root_path, 'src', 'config', 'auido2exp.yaml')
41
-
42
- free_view_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'facevid2vid_00189-model.pth.tar')
43
- mapping_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'mapping_00229-model.pth.tar')
44
- facerender_yaml_path = os.path.join(current_root_path, 'src', 'config', 'facerender.yaml')
45
-
46
- #init model
47
- print(path_of_net_recon_model)
48
- preprocess_model = CropAndExtract(path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting, device)
49
-
50
- print(audio2pose_checkpoint)
51
- print(audio2exp_checkpoint)
52
- audio_to_coeff = Audio2Coeff(audio2pose_checkpoint, audio2pose_yaml_path,
53
- audio2exp_checkpoint, audio2exp_yaml_path,
54
- wav2lip_checkpoint, device)
55
-
56
- print(free_view_checkpoint)
57
- print(mapping_checkpoint)
58
- animate_from_coeff = AnimateFromCoeff(free_view_checkpoint, mapping_checkpoint,
59
- facerender_yaml_path, device)
60
-
61
- #crop image and extract 3dmm from image
62
- first_frame_dir = os.path.join(save_dir, 'first_frame_dir')
63
- os.makedirs(first_frame_dir, exist_ok=True)
64
- first_coeff_path, crop_pic_path = preprocess_model.generate(pic_path, first_frame_dir)
65
- if first_coeff_path is None:
66
- print("Can't get the coeffs of the input")
67
- return
68
-
69
- #audio2ceoff
70
- batch = get_data(first_coeff_path, audio_path, device)
71
- coeff_path = audio_to_coeff.generate(batch, save_dir, pose_style)
72
-
73
- # 3dface render
74
- if args.face3dvis:
75
- from src.face3d.visualize import gen_composed_video
76
- gen_composed_video(args, device, first_coeff_path, coeff_path, audio_path, os.path.join(save_dir, '3dface.mp4'))
77
-
78
- #coeff2video
79
- data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path,
80
- batch_size, camera_yaw_list, camera_pitch_list, camera_roll_list,
81
- expression_scale=args.expression_scale, still_mode=args.still)
82
-
83
- animate_from_coeff.generate(data, save_dir, enhancer=args.enhancer)
84
- video_name = data['video_name']
85
-
86
- if args.enhancer is not None:
87
- print(f'The generated video is named {video_name}_enhanced in {save_dir}')
88
- else:
89
- print(f'The generated video is named {video_name} in {save_dir}')
90
-
91
- return os.path.join(save_dir, video_name+'.mp4'), os.path.join(save_dir, video_name+'.mp4')
92
-
93
-
94
- if __name__ == '__main__':
95
-
96
- parser = ArgumentParser()
97
- parser.add_argument("--driven_audio", default='./examples/driven_audio/japanese.wav', help="path to driven audio")
98
- parser.add_argument("--source_image", default='./examples/source_image/art_0.png', help="path to source image")
99
- parser.add_argument("--checkpoint_dir", default='./checkpoints', help="path to output")
100
- parser.add_argument("--result_dir", default='./results', help="path to output")
101
- parser.add_argument("--pose_style", type=int, default=0, help="input pose style from [0, 46)")
102
- parser.add_argument("--batch_size", type=int, default=2, help="the batch size of facerender")
103
- parser.add_argument("--expression_scale", type=float, default=1., help="the batch size of facerender")
104
- parser.add_argument('--camera_yaw', nargs='+', type=int, default=[0], help="the camera yaw degree")
105
- parser.add_argument('--camera_pitch', nargs='+', type=int, default=[0], help="the camera pitch degree")
106
- parser.add_argument('--camera_roll', nargs='+', type=int, default=[0], help="the camera roll degree")
107
- parser.add_argument('--enhancer', type=str, default=None, help="Face enhancer, [GFPGAN]")
108
- parser.add_argument("--cpu", dest="cpu", action="store_true")
109
- parser.add_argument("--face3dvis", action="store_true", help="generate 3d face and 3d landmarks")
110
- parser.add_argument("--still", action="store_true")
111
-
112
- # net structure and parameters
113
- parser.add_argument('--net_recon', type=str, default='resnet50', choices=['resnet18', 'resnet34', 'resnet50'], help='not use')
114
- parser.add_argument('--init_path', type=str, default=None, help='not Use')
115
- parser.add_argument('--use_last_fc',default=False, help='zero initialize the last fc')
116
- parser.add_argument('--bfm_folder', type=str, default='./checkpoints/BFM_Fitting/')
117
- parser.add_argument('--bfm_model', type=str, default='BFM_model_front.mat', help='bfm model')
118
-
119
- # default renderer parameters
120
- parser.add_argument('--focal', type=float, default=1015.)
121
- parser.add_argument('--center', type=float, default=112.)
122
- parser.add_argument('--camera_d', type=float, default=10.)
123
- parser.add_argument('--z_near', type=float, default=5.)
124
- parser.add_argument('--z_far', type=float, default=15.)
125
-
126
- args = parser.parse_args()
127
-
128
- if torch.cuda.is_available() and not args.cpu:
129
- args.device = "cuda"
130
- else:
131
- args.device = "cpu"
132
-
133
- main(args)
134
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4th3n4/TraDeX/app-cuda.py DELETED
@@ -1,946 +0,0 @@
1
- # %%
2
- # Import section
3
- # (Please don't edit this section unless if necessary)
4
- import copy
5
- from pathlib import Path
6
- import warnings
7
- import holidays
8
- import seaborn as sns
9
- import matplotlib
10
- import matplotlib.dates as mdates
11
- import matplotlib.pyplot as plt
12
- plt.style.use('fivethirtyeight')
13
- import numpy as np
14
- import pandas as pd
15
- import glob
16
- import csv
17
- import lightning.pytorch as pl
18
- from lightning.pytorch.callbacks import EarlyStopping, LearningRateMonitor
19
- from lightning.pytorch.loggers import TensorBoardLogger
20
- import torch
21
- from pytorch_forecasting import Baseline, TemporalFusionTransformer, TimeSeriesDataSet
22
- from pytorch_forecasting.data import GroupNormalizer, NaNLabelEncoder
23
- from pytorch_forecasting.metrics import SMAPE, PoissonLoss, QuantileLoss
24
- from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters
25
- import random
26
- import gc
27
- import tensorflow as tf
28
- import tensorboard as tb
29
- tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
30
- import os
31
- import math
32
- import sys
33
- from sklearn.model_selection import train_test_split
34
- from sklearn.preprocessing import MinMaxScaler
35
- import tensorflow as tf
36
- from tensorflow.keras.layers import Conv1D, LSTM, Dense, Dropout, Bidirectional, TimeDistributed
37
- from tensorflow.keras.layers import MaxPooling1D, Flatten
38
- from tensorflow.keras.regularizers import L1, L2
39
- from tensorflow.keras.metrics import Accuracy
40
- from tensorflow.keras.metrics import RootMeanSquaredError
41
- from sklearn.metrics import mean_squared_error as MSE
42
- from sklearn.model_selection import KFold
43
- from sklearn.inspection import permutation_importance
44
- from tensorflow.keras.utils import plot_model
45
- from sklearn.metrics import explained_variance_score, mean_poisson_deviance, mean_gamma_deviance, mean_squared_error, mean_squared_log_error, d2_absolute_error_score, d2_pinball_score, d2_tweedie_score
46
- from sklearn.metrics import r2_score
47
- from sklearn.metrics import max_error
48
- import datetime
49
- from datetime import date
50
- import optuna
51
- from tensorflow.keras.callbacks import Callback
52
- from optuna.integration import TFKerasPruningCallback
53
- import shutil
54
- import gradio as gr
55
-
56
- # Some variables (don't edit these variables unless if necessary)
57
- DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
58
- random.seed(30)
59
- np.random.seed(30)
60
- tf.random.set_seed(30)
61
- torch.manual_seed(30)
62
- torch.cuda.manual_seed(30)
63
-
64
- # Global variables
65
- PATIENCE = 30
66
- MAX_EPOCHS = 3
67
- LEARNING_RATE = 0.01
68
- OPTUNA = True
69
- ACCELERATOR = "gpu"
70
- os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:1024"
71
-
72
- # Variables to count the number of files
73
- w = 7
74
- prax = [0 for x in range(w)]
75
-
76
- # %%
77
- # Objective function for Optuna (CNN-LSTM)
78
- def objective(trial, X_train, y_train, X_test, y_test):
79
- model = tf.keras.Sequential()
80
-
81
- # Creating the Neural Network model here...
82
- # CNN layers
83
- model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(X_train.shape[1], 1)))
84
- # model.add(Dense(5, kernel_regularizer=L2(0.01)))
85
-
86
- # LSTM layers
87
- model.add(Bidirectional(LSTM(trial.suggest_int("lstm_units_1", 32, 256), return_sequences=True)))
88
- model.add(Dropout(trial.suggest_float("dropout_1", 0.1, 0.5)))
89
- model.add(Bidirectional(LSTM(trial.suggest_int("lstm_units_2", 32, 256), return_sequences=False)))
90
- model.add(Dropout(trial.suggest_float("dropout_2", 0.1, 0.5)))
91
-
92
- #Final layers
93
- model.add(Dense(1, activation='relu'))
94
- model.compile(optimizer='adam', loss='mse', metrics=['mse'])
95
-
96
- # Train the model
97
- pruning_callback = TFKerasPruningCallback(trial, "val_loss")
98
- history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=15, batch_size=32, verbose=0, callbacks=[pruning_callback])
99
-
100
- # Evaluate the model
101
- loss = model.evaluate(X_test, y_test, verbose=0)[0]
102
-
103
- return loss
104
-
105
- # %%
106
- # Function to train the model (CNN-LSTM)
107
- def modelCNNLSTM(csv_file, prax):
108
- # Read the data
109
- df = csv_file
110
- df = df['Date/Time'].values.astype("float64")
111
- temp_data = df.iloc[0:len(df)-100, 1:23]
112
- trek = df.iloc[len(df)-100:,1:23]
113
- #print(temp_data)
114
- data = temp_data
115
- data = data.values.astype("float64")
116
- sc = MinMaxScaler()
117
- # Split the data into training and testing sets
118
- train_size = int(len(data) * 0.8)
119
- train_data, test_data = data[:train_size], data[train_size:]
120
- # Separate the input features and target variable
121
- X_train, y_train = train_data, train_data['Close']
122
- X_test, y_test = test_data, test_data['Close']
123
-
124
- X_train = X_train[0:len(X_train)-1]
125
- y_train = y_train[1:len(y_train)]
126
- X_test = X_test[0:len(X_test)-1]
127
- y_test = y_test[1:len(y_test)]
128
-
129
- Xt = X_train
130
- Xts = X_test
131
- Yt = y_train
132
- Yts = y_test
133
-
134
- y_train = y_train.values.reshape(-1,1)
135
- y_test = y_test.values.reshape(-1,1)
136
-
137
- X_train = sc.fit_transform(X_train)
138
- y_train = sc.fit_transform(y_train)
139
- X_test = sc.fit_transform(X_test)
140
- y_test = sc.fit_transform(y_test)
141
-
142
- x_tr=pd.DataFrame(X_train, index = Xt.index, columns = Xt.columns)
143
- y_tr=pd.DataFrame(y_train, index = Yt.index)
144
- x_te=pd.DataFrame(X_test, index = Xts.index, columns = Xts.columns)
145
- y_te=pd.DataFrame(y_test, index = Yts.index)
146
-
147
- # Reshape the data for the CNN-LSTM model
148
- X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
149
- X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], 1))
150
-
151
- study = optuna.create_study(direction="minimize", pruner=optuna.pruners.MedianPruner(n_min_trials=5, n_startup_trials=5))
152
- fn = lambda trial: objective(trial, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)
153
- study.optimize(fn, n_trials=7)
154
-
155
- best_params = study.best_params
156
- #print(f"Best params: {best_params}")
157
-
158
- model = tf.keras.Sequential()
159
-
160
- # Creating the Neural Network model here...
161
- # CNN layers
162
- model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(X_train.shape[1], 1)))
163
- # model.add(Dense(5, kernel_regularizer=L2(0.01)))
164
-
165
- # LSTM layers
166
- model.add(Bidirectional(LSTM(best_params["lstm_units_1"], return_sequences=True)))
167
- model.add(Dropout(best_params["dropout_1"]))
168
- model.add(Bidirectional(LSTM(best_params["lstm_units_2"], return_sequences=False)))
169
- model.add(Dropout(best_params["dropout_2"]))
170
-
171
- #Final layers
172
- model.add(Dense(1, activation='relu'))
173
- model.compile(optimizer='adam', loss='mse', metrics=['mse'])
174
-
175
- # Train the model
176
- history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=32, verbose=0)
177
-
178
- # Evaluate the model
179
- loss = model.evaluate(X_test, y_test, verbose=0)[0]
180
-
181
- print(f"Final loss (without KFold): {loss}")
182
-
183
- kfold = KFold(n_splits=10, shuffle=True)
184
-
185
- inputs = np.concatenate((X_train, X_test), axis=0)
186
- targets = np.concatenate((y_train, y_test), axis=0)
187
- acc_per_fold = []
188
- loss_per_fold = []
189
- xgb_res = []
190
- num_epochs = 10
191
- batch_size = 32
192
-
193
- fold_no = 1
194
- print('------------------------------------------------------------------------')
195
- print("Training for 10 folds... Standby")
196
- for train, test in kfold.split(inputs, targets):
197
- #print('------------------------------------------------------------------------')
198
- #print(f'Training for fold {fold_no} ...')
199
- history = model.fit(inputs[train], targets[train],
200
- batch_size=32,
201
- epochs=15,
202
- verbose=0)
203
-
204
- scores = model.evaluate(inputs[test], targets[test], verbose=0)
205
- #print(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; {model.metrics_names[1]} of {scores[1]*100}%')
206
- acc_per_fold.append(scores[1] * 100)
207
- loss_per_fold.append(scores[0])
208
- fold_no = fold_no + 1
209
-
210
-
211
- print('------------------------------------------------------------------------')
212
- #print('Score per fold')
213
- #for i in range(0, len(acc_per_fold)):
214
- # print('------------------------------------------------------------------------')
215
- # print(f'> Fold {i+1} - Loss: {loss_per_fold[i]} - Loss%: {acc_per_fold[i]}%')
216
- #print('------------------------------------------------------------------------')
217
- #print('Average scores for all folds:')
218
- #print(f'> Possible Loss %: {np.mean(acc_per_fold)} (+- {np.std(acc_per_fold)})')
219
- #print(f'> Loss: {np.mean(loss_per_fold)}')
220
- #print('------------------------------------------------------------------------')
221
-
222
- trek = df.iloc[0:len(df), 1:23]
223
- Y = trek[0:len(trek)]
224
- YP = trek[1:len(trek)]
225
- Y = Y.values.astype("float64")
226
- YP = YP.values.astype("float64")
227
- Y1 = Y['Close']
228
- Y2 = YP['Close']
229
- Yx = pd.DataFrame(YP, index=YP.index, columns=YP.columns)
230
- #X = sc.fit_transform(X.reshape(-1,22))
231
- Y = np.array(Y)
232
- Y1 = np.array(Y1)
233
- Y = sc.fit_transform(Y)
234
- Y1 = Y1.reshape(-1,1)
235
- Y1 = sc.fit_transform(Y1)
236
-
237
- train_X = Y.reshape(Y.shape[0],Y.shape[1],1)
238
- #Y = Y.reshape(-1,1)
239
- pred = model.predict(train_X, verbose=0)
240
- pred = np.array(pred).reshape(-1,1)
241
- var2 = max_error(pred.reshape(-1,1), Y1)
242
- print('Max Error: %f' % var2)
243
- prax[5] = float(var2)
244
- pred = sc.inverse_transform(pred)
245
-
246
- print(pred[-2], pred[-1])
247
- prax[3] = pred[-2]
248
- prax[4] = pred[-1]
249
- if(pred[-1]-pred[-2]>0):
250
- prax[6] = 1
251
- elif(pred[-1]-pred[-2]==0):
252
- prax[6] = 0
253
- else:
254
- prax[6] = -1
255
-
256
- # %%
257
- # Function to train the model (CNN-LSTM)
258
- def modelCNNLSTM_OpenGap(csv_file, prax):
259
- # Read the data
260
- df = csv_file
261
- datLength = len(df)
262
- df['O-C'] = 0
263
- for i in range(datLength):
264
- if i == 0:
265
- df['O-C'][i] = 0
266
- continue
267
- else:
268
- df['O-C'][i] = df['Open'][i] - df['Close'][i-1]
269
- temp_data = df.iloc[0:datLength-100, 1:24]
270
- trek = df.iloc[datLength-100:,1:24]
271
- #print(temp_data)
272
- data = temp_data
273
- data = data.values.astype("float64")
274
- sc = MinMaxScaler()
275
- # Split the data into training and testing sets
276
- train_size = int(len(data) * 0.8)
277
- train_data, test_data = data[:train_size], data[train_size:]
278
-
279
- # Separate the input features and target variable
280
- X_train, y_train = train_data, train_data['Close']
281
- X_test, y_test = test_data, test_data['Close']
282
-
283
- X_train = X_train[0:len(X_train)-1]
284
- y_train = y_train[1:len(y_train)]
285
- X_test = X_test[0:len(X_test)-1]
286
- y_test = y_test[1:len(y_test)]
287
-
288
- Xt = X_train
289
- Xts = X_test
290
- Yt = y_train
291
- Yts = y_test
292
-
293
- y_train = y_train.values.reshape(-1,1)
294
- y_test = y_test.values.reshape(-1,1)
295
-
296
- X_train = sc.fit_transform(X_train)
297
- y_train = sc.fit_transform(y_train)
298
- X_test = sc.fit_transform(X_test)
299
- y_test = sc.fit_transform(y_test)
300
-
301
- x_tr=pd.DataFrame(X_train, index = Xt.index, columns = Xt.columns)
302
- y_tr=pd.DataFrame(y_train, index = Yt.index)
303
- x_te=pd.DataFrame(X_test, index = Xts.index, columns = Xts.columns)
304
- y_te=pd.DataFrame(y_test, index = Yts.index)
305
-
306
- # Reshape the data for the CNN-LSTM model
307
- X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
308
- X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], 1))
309
-
310
- study = optuna.create_study(direction="minimize", pruner=optuna.pruners.MedianPruner(n_min_trials=5, n_startup_trials=5))
311
- fn = lambda trial: objective(trial, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)
312
- study.optimize(fn, n_trials=7)
313
-
314
- best_params = study.best_params
315
- #print(f"Best params: {best_params}")
316
-
317
- model = tf.keras.Sequential()
318
-
319
- # Creating the Neural Network model here...
320
- # CNN layers
321
- model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(X_train.shape[1], 1)))
322
- # model.add(Dense(5, kernel_regularizer=L2(0.01)))
323
-
324
- # LSTM layers
325
- model.add(Bidirectional(LSTM(best_params["lstm_units_1"], return_sequences=True)))
326
- model.add(Dropout(best_params["dropout_1"]))
327
- model.add(Bidirectional(LSTM(best_params["lstm_units_2"], return_sequences=False)))
328
- model.add(Dropout(best_params["dropout_2"]))
329
-
330
- #Final layers
331
- model.add(Dense(1, activation='relu'))
332
- model.compile(optimizer='adam', loss='mse', metrics=['mse'])
333
-
334
- # Train the model
335
- history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=32, verbose=0)
336
-
337
- # Evaluate the model
338
- loss = model.evaluate(X_test, y_test, verbose=0)[0]
339
-
340
- print(f"Final loss (without KFold): {loss}")
341
-
342
- kfold = KFold(n_splits=10, shuffle=True)
343
-
344
- inputs = np.concatenate((X_train, X_test), axis=0)
345
- targets = np.concatenate((y_train, y_test), axis=0)
346
- acc_per_fold = []
347
- loss_per_fold = []
348
- xgb_res = []
349
- num_epochs = 10
350
- batch_size = 32
351
-
352
- fold_no = 1
353
- print('------------------------------------------------------------------------')
354
- print("Training for 10 folds... Standby")
355
- for train, test in kfold.split(inputs, targets):
356
- #print('------------------------------------------------------------------------')
357
- #print(f'Training for fold {fold_no} ...')
358
- history = model.fit(inputs[train], targets[train],
359
- batch_size=32,
360
- epochs=15,
361
- verbose=0)
362
-
363
- scores = model.evaluate(inputs[test], targets[test], verbose=0)
364
- #print(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; {model.metrics_names[1]} of {scores[1]*100}%')
365
- acc_per_fold.append(scores[1] * 100)
366
- loss_per_fold.append(scores[0])
367
- fold_no = fold_no + 1
368
-
369
-
370
- print('------------------------------------------------------------------------')
371
- #print('Score per fold')
372
- #for i in range(0, len(acc_per_fold)):
373
- # print('------------------------------------------------------------------------')
374
- # print(f'> Fold {i+1} - Loss: {loss_per_fold[i]} - Loss%: {acc_per_fold[i]}%')
375
- #print('------------------------------------------------------------------------')
376
- #print('Average scores for all folds:')
377
- #print(f'> Possible Loss %: {np.mean(acc_per_fold)} (+- {np.std(acc_per_fold)})')
378
- #print(f'> Loss: {np.mean(loss_per_fold)}')
379
- #print('------------------------------------------------------------------------')
380
-
381
- trek = df.iloc[0:len(df), 1:24]
382
- Y = trek[0:len(trek)]
383
- YP = trek[1:len(trek)]
384
- Y = Y.values.astype("float64")
385
- YP = YP.values.astype("float64")
386
- Y1 = Y['Close']
387
- Y2 = YP['Close']
388
- Yx = pd.DataFrame(YP, index=YP.index, columns=YP.columns)
389
- #X = sc.fit_transform(X.reshape(-1,22))
390
- Y = np.array(Y)
391
- Y1 = np.array(Y1)
392
- Y = sc.fit_transform(Y)
393
- Y1 = Y1.reshape(-1,1)
394
- Y1 = sc.fit_transform(Y1)
395
-
396
- train_X = Y.reshape(Y.shape[0],Y.shape[1],1)
397
- #Y = Y.reshape(-1,1)
398
- pred = model.predict(train_X, verbose=0)
399
- pred = np.array(pred).reshape(-1,1)
400
- var2 = max_error(pred.reshape(-1,1), Y1)
401
- print('Max Error: %f' % var2)
402
- prax[5] = float(var2)
403
- pred = sc.inverse_transform(pred)
404
-
405
- print(pred[-2], pred[-1])
406
- prax[3] = pred[-2]
407
- prax[4] = pred[-1]
408
- if(pred[-1]-pred[-2]>0):
409
- prax[6] = 1
410
- elif(pred[-1]-pred[-2]==0):
411
- prax[6] = 0
412
- else:
413
- prax[6] = -1
414
-
415
- # %%
416
- # Function to train the model (TFT)
417
- def modelTFT(csv_file, prax):
418
- train = csv_file
419
- #test = pd.read_csv("/kaggle/input/artemis-test/nifty_daily.csv")
420
- train['date'] = pd.to_datetime(train['Date/Time'])
421
- #test['date'] = pd.to_datetime(test['Date'])
422
-
423
- data = pd.concat([train], axis = 0, ignore_index=True)
424
- # Check that key is country-store-product-date combination
425
- #assert len(data.drop_duplicates(['country', 'store', 'product', 'date'])) == len(data)
426
- # Check that there is one date per country-store-product combination
427
- #assert len(data.drop_duplicates(['country', 'store', 'product'])) == len(data)//data['date'].nunique()
428
-
429
- #display(train.sample(4))
430
-
431
- """<a id ="3"></a><h3 style="background:#0554f2; border:0; border-radius: 4px; color:#f5f6f7">Model Implementation in Pytorch-Forecasting </h3>"""
432
-
433
- # Add a time_idx (an sequence of consecutive integers that goes from min to max date)
434
-
435
- data = (data.merge((data[['Date/Time']].drop_duplicates(ignore_index=True)
436
- .rename_axis('time_idx')).reset_index(), on = ['Date/Time']))
437
- # add additional features
438
- data["day_of_week"] = data['date'].dt.dayofweek.astype(str).astype("category") # categories have be strings
439
- data["week_of_year"] = data['date'].dt.isocalendar().week.astype(str).astype("category") # categories have be strings
440
- data["month"] = data['date'].dt.month.astype(str).astype("category") # categories have be strings
441
- #data["log_num_sold"] = np.log(data.num_sold + 1e-8)
442
- #data["avg_volume_by_country"] = data.groupby(["time_idx", "country"], observed=True).num_sold.transform("mean")
443
- #data["avg_volume_by_store"] = data.groupby(["time_idx", "store"], observed=True).num_sold.transform("mean")
444
- #data["avg_volume_by_product"] = data.groupby(["time_idx", "product"], observed=True).num_sold.transform("mean")
445
-
446
- #unique_dates_country = data[['date', 'Ticker']].drop_duplicates(ignore_index = True)
447
- #unique_dates_country['is_holiday'] = (unique_dates_country
448
- # .apply(lambda x: x.date in holidays.country_holidays(x.country), axis = 1).astype('category'))
449
- #unique_dates_country['is_holiday_lead_1'] = (unique_dates_country
450
- # .apply(lambda x: x.date+pd.Timedelta(days=1) in holidays.country_holidays(x.country), axis = 1).astype('category'))
451
- #unique_dates_country['is_holiday_lead_2'] = (unique_dates_country
452
- # .apply(lambda x: x.date+pd.Timedelta(days=2) in holidays.country_holidays(x.country), axis = 1).astype('category'))
453
- #unique_dates_country['is_holiday_lag_1'] = (unique_dates_country
454
- # .apply(lambda x: x.date-pd.Timedelta(days=1) in holidays.country_holidays(x.country), axis = 1).astype('category'))
455
- #unique_dates_country['is_holiday_lag_2'] = (unique_dates_country
456
- # .apply(lambda x: x.date-pd.Timedelta(days=2) in holidays.country_holidays(x.country), axis = 1).astype('category'))
457
- #data = data.merge(unique_dates_country, on = ['date', 'Ticker'], validate = "m:1")
458
- #del unique_dates_country
459
- gc.collect()
460
- data.sample(5, random_state=30)
461
-
462
- train = data.iloc[:len(train)]
463
- test = data.iloc[len(train):]
464
-
465
- max_prediction_length = 2
466
- max_encoder_length = train.date.nunique()
467
- training_cutoff = train["time_idx"].max() - max_prediction_length #we will validate on 2020
468
-
469
- # Let's create a Dataset
470
- training = TimeSeriesDataSet(
471
- train[lambda x: x.time_idx <= training_cutoff],
472
- time_idx="time_idx",
473
- target="Close",
474
- group_ids=["Ticker"],
475
- min_encoder_length=max_prediction_length, # keep encoder length long (as it is in the validation set)
476
- max_encoder_length=max_encoder_length,
477
- max_prediction_length=max_prediction_length,
478
- static_categoricals=["Ticker"],
479
- time_varying_known_categoricals=["month", "week_of_year", "day_of_week"],
480
- #variable_groups={"is_holiday": ["is_holiday"]}, # group of categorical variables can be treated as one variable
481
- time_varying_known_reals=["time_idx"],
482
- time_varying_unknown_categoricals=[],
483
- time_varying_unknown_reals=[
484
- 'Open','High','Low','Close','OI','RSI14','RSI44','HHRSI','Rsi Weekly','LLCHHV','white','Vap44','Vap14','BV11','SV11','Ema5','Ema20','Ema50','Ema200'
485
- ],
486
- target_normalizer=GroupNormalizer(
487
- groups=['Ticker'], transformation="softplus"
488
- ), # use softplus and normalize by group
489
- categorical_encoders={
490
- 'week_of_year':NaNLabelEncoder(add_nan=True)
491
- },
492
- #lags={'num_sold': [7, 30, 365]},
493
- add_relative_time_idx=True,
494
- add_target_scales=True,
495
- add_encoder_length=True,
496
- )
497
-
498
- # create validation set (predict=True) which means to predict the last max_prediction_length points in time
499
- # for each series
500
- validation = TimeSeriesDataSet.from_dataset(training, train, predict=True, stop_randomization=True)
501
-
502
- # create dataloaders for model
503
- batch_size = 128 # set this between 32 to 128
504
- train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)
505
- val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size * 10, num_workers=0)
506
-
507
- #let's see how a naive model does
508
-
509
- actuals = torch.cat([y for x, (y, weight) in iter(val_dataloader)]).cuda()
510
- baseline_predictions = Baseline().predict(val_dataloader).cuda()
511
- (actuals - baseline_predictions).abs().mean().item()
512
-
513
- sm = SMAPE()
514
-
515
- print(f"Median loss for naive prediction on validation: {sm.loss(actuals, baseline_predictions).mean(axis = 1).median().item()}")
516
-
517
- early_stop_callback = EarlyStopping(monitor="train_loss", min_delta=1e-2, patience=PATIENCE, verbose=False, mode="min")
518
- lr_logger = LearningRateMonitor() # log the learning rate
519
- logger = TensorBoardLogger("lightning_logs") # logging results to a tensorboard
520
-
521
- trainer = pl.Trainer(
522
- max_epochs=1,
523
- accelerator=ACCELERATOR,
524
- enable_model_summary=False,
525
- gradient_clip_val=0.25,
526
- limit_train_batches=10, # coment in for training, running valiation every 30 batches
527
- #fast_dev_run=True, # comment in to check that networkor dataset has no serious bugs
528
- callbacks=[lr_logger, early_stop_callback],
529
- logger=logger,
530
- )
531
-
532
- tft = TemporalFusionTransformer.from_dataset(
533
- training,
534
- learning_rate=LEARNING_RATE,
535
- lstm_layers=2,
536
- hidden_size=16,
537
- attention_head_size=2,
538
- dropout=0.2,
539
- hidden_continuous_size=8,
540
- output_size=1, # 7 quantiles by default
541
- loss=SMAPE(),
542
- log_interval=10, # uncomment for learning rate finder and otherwise, e.g. to 10 for logging every 10 batches
543
- reduce_on_plateau_patience=4
544
- )
545
-
546
- tft.to(DEVICE)
547
- trainer.fit(
548
- tft,
549
- train_dataloaders=train_dataloader,
550
- val_dataloaders=val_dataloader,
551
- )
552
- #torch.cuda.empty_cache()
553
- #print(f"Number of parameters in network: {tft.size()/1e3:.1f}k")
554
-
555
- if OPTUNA:
556
- from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters
557
-
558
- # create study
559
- study = optimize_hyperparameters(
560
- train_dataloader,
561
- val_dataloader,
562
- model_path="optuna_test",
563
- n_trials=5,
564
- max_epochs=MAX_EPOCHS,
565
- gradient_clip_val_range=(0.01, 0.3),
566
- hidden_size_range=(8, 24),
567
- hidden_continuous_size_range=(8, 12),
568
- attention_head_size_range=(2, 4),
569
- learning_rate_range=(0.01, 0.05),
570
- dropout_range=(0.1, 0.25),
571
- trainer_kwargs=dict(limit_train_batches=20),
572
- reduce_on_plateau_patience=4,
573
- pruner=optuna.pruners.MedianPruner(n_min_trials=3, n_startup_trials=3),
574
- use_learning_rate_finder=False, # use Optuna to find ideal learning rate or use in-built learning rate finder
575
- )
576
- #torch.cuda.empty_cache()
577
- #'''
578
- trainer = pl.Trainer(
579
- max_epochs=MAX_EPOCHS,
580
- accelerator=ACCELERATOR,
581
- enable_model_summary=False,
582
- gradient_clip_val=study.best_params['gradient_clip_val'],
583
- limit_train_batches=20, # coment in for training, running valiation every 30 batches
584
- #fast_dev_run=True, # comment in to check that networkor dataset has no serious bugs
585
- callbacks=[lr_logger, early_stop_callback],
586
- logger=logger,
587
- )
588
-
589
- tft = TemporalFusionTransformer.from_dataset(
590
- training,
591
- learning_rate=study.best_params['learning_rate'],
592
- lstm_layers=2,
593
- hidden_size=study.best_params['hidden_size'],
594
- attention_head_size=study.best_params['attention_head_size'],
595
- dropout=study.best_params['dropout'],
596
- hidden_continuous_size=study.best_params['hidden_continuous_size'],
597
- output_size=1, # 7 quantiles by default
598
- loss=SMAPE(),
599
- log_interval=10, # uncomment for learning rate finder and otherwise, e.g. to 10 for logging every 10 batches
600
- reduce_on_plateau_patience=4
601
- )
602
-
603
- tft.to(DEVICE)
604
- trainer.fit(
605
- tft,
606
- train_dataloaders=train_dataloader,
607
- val_dataloaders=val_dataloader,
608
- )
609
- #'''
610
- #torch.cuda.empty_cache()
611
- best_model_path = trainer.checkpoint_callback.best_model_path
612
- best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)
613
- actuals = torch.cat([y[0] for x, y in iter(val_dataloader)]).cuda()
614
- predictions = best_tft.predict(val_dataloader, mode="prediction")
615
- raw_predictions = best_tft.predict(val_dataloader, mode="raw", return_x=True)
616
-
617
- sm = SMAPE()
618
- print(f"Validation median SMAPE loss: {sm.loss(actuals, predictions.cuda()).mean(axis = 1).median().item()}")
619
- prax[5] = sm.loss(actuals, predictions).mean(axis = 1).median().item()
620
- #best_tft.plot_prediction(raw_predictions.x, raw_predictions.output, idx=0, add_loss_to_title=True);
621
-
622
- print(raw_predictions[0][0])
623
- prax[3] = '-'
624
- prax[4] = raw_predictions[0][0].data.cpu().tolist()[0][0]
625
- t = prax[4]
626
- tm = data['Close'][len(data)-1]
627
- if(t-tm>0):
628
- prax[6] = 1
629
- elif(t-tm==0):
630
- prax[6] = 0
631
- else:
632
- prax[6] = -1
633
- #prax[i][3] = raw_predictions[0][0].data[1]
634
- print("-----------")
635
-
636
- #with open("out.csv", "w", newline="") as f:
637
- # writer = csv.writer(f)
638
- # writer.writerows(prax)
639
-
640
- # %%
641
- # Function to train the model (TFT)
642
- def modelTFT_OpenGap(csv_file, prax):
643
- train = csv_file
644
- #test = pd.read_csv("/kaggle/input/artemis-test/nifty_daily.csv")
645
- train['date'] = pd.to_datetime(train['Date/Time'])
646
- #test['date'] = pd.to_datetime(test['Date'])
647
- datLength = len(train)
648
- train['O-C'] = 0
649
- for i in range(datLength):
650
- if i == 0:
651
- train['O-C'][i] = 0
652
- continue
653
- else:
654
- train['O-C'][i] = train['Open'][i] - train['Close'][i-1]
655
- data = pd.concat([train], axis = 0, ignore_index=True)
656
- # Check that key is country-store-product-date combination
657
- #assert len(data.drop_duplicates(['country', 'store', 'product', 'date'])) == len(data)
658
- # Check that there is one date per country-store-product combination
659
- #assert len(data.drop_duplicates(['country', 'store', 'product'])) == len(data)//data['date'].nunique()
660
-
661
- #display(train.sample(4))
662
-
663
- """<a id ="3"></a><h3 style="background:#0554f2; border:0; border-radius: 4px; color:#f5f6f7">Model Implementation in Pytorch-Forecasting </h3>"""
664
-
665
- # Add a time_idx (an sequence of consecutive integers that goes from min to max date)
666
-
667
- data = (data.merge((data[['Date/Time']].drop_duplicates(ignore_index=True)
668
- .rename_axis('time_idx')).reset_index(), on = ['Date/Time']))
669
- # add additional features
670
- data["day_of_week"] = data['date'].dt.dayofweek.astype(str).astype("category") # categories have be strings
671
- data["week_of_year"] = data['date'].dt.isocalendar().week.astype(str).astype("category") # categories have be strings
672
- data["month"] = data['date'].dt.month.astype(str).astype("category") # categories have be strings
673
- #data["log_num_sold"] = np.log(data.num_sold + 1e-8)
674
- #data["avg_volume_by_country"] = data.groupby(["time_idx", "country"], observed=True).num_sold.transform("mean")
675
- #data["avg_volume_by_store"] = data.groupby(["time_idx", "store"], observed=True).num_sold.transform("mean")
676
- #data["avg_volume_by_product"] = data.groupby(["time_idx", "product"], observed=True).num_sold.transform("mean")
677
-
678
- #unique_dates_country = data[['date', 'Ticker']].drop_duplicates(ignore_index = True)
679
- #unique_dates_country['is_holiday'] = (unique_dates_country
680
- # .apply(lambda x: x.date in holidays.country_holidays(x.country), axis = 1).astype('category'))
681
- #unique_dates_country['is_holiday_lead_1'] = (unique_dates_country
682
- # .apply(lambda x: x.date+pd.Timedelta(days=1) in holidays.country_holidays(x.country), axis = 1).astype('category'))
683
- #unique_dates_country['is_holiday_lead_2'] = (unique_dates_country
684
- # .apply(lambda x: x.date+pd.Timedelta(days=2) in holidays.country_holidays(x.country), axis = 1).astype('category'))
685
- #unique_dates_country['is_holiday_lag_1'] = (unique_dates_country
686
- # .apply(lambda x: x.date-pd.Timedelta(days=1) in holidays.country_holidays(x.country), axis = 1).astype('category'))
687
- #unique_dates_country['is_holiday_lag_2'] = (unique_dates_country
688
- # .apply(lambda x: x.date-pd.Timedelta(days=2) in holidays.country_holidays(x.country), axis = 1).astype('category'))
689
- #data = data.merge(unique_dates_country, on = ['date', 'Ticker'], validate = "m:1")
690
- #del unique_dates_country
691
- gc.collect()
692
- data.sample(5, random_state=30)
693
-
694
- train = data.iloc[:len(train)]
695
- test = data.iloc[len(train):]
696
-
697
- max_prediction_length = 2
698
- max_encoder_length = train.date.nunique()
699
- training_cutoff = train["time_idx"].max() - max_prediction_length #we will validate on 2020
700
-
701
- # Let's create a Dataset
702
- training = TimeSeriesDataSet(
703
- train[lambda x: x.time_idx <= training_cutoff],
704
- time_idx="time_idx",
705
- target="Close",
706
- group_ids=["Ticker"],
707
- min_encoder_length=max_prediction_length, # keep encoder length long (as it is in the validation set)
708
- max_encoder_length=max_encoder_length,
709
- max_prediction_length=max_prediction_length,
710
- static_categoricals=["Ticker"],
711
- time_varying_known_categoricals=["month", "week_of_year", "day_of_week"],
712
- #variable_groups={"is_holiday": ["is_holiday"]}, # group of categorical variables can be treated as one variable
713
- time_varying_known_reals=["time_idx"],
714
- time_varying_unknown_categoricals=[],
715
- time_varying_unknown_reals=[
716
- 'Open','High','Low','Close','OI','RSI14','RSI44','HHRSI','Rsi Weekly','LLCHHV','white','Vap44','Vap14','BV11','SV11','Ema5','Ema20','Ema50','Ema200', 'O-C'
717
- ],
718
- target_normalizer=GroupNormalizer(
719
- groups=['Ticker'], transformation="softplus"
720
- ), # use softplus and normalize by group
721
- categorical_encoders={
722
- 'week_of_year':NaNLabelEncoder(add_nan=True)
723
- },
724
- #lags={'num_sold': [7, 30, 365]},
725
- add_relative_time_idx=True,
726
- add_target_scales=True,
727
- add_encoder_length=True,
728
- )
729
-
730
- # create validation set (predict=True) which means to predict the last max_prediction_length points in time
731
- # for each series
732
- validation = TimeSeriesDataSet.from_dataset(training, train, predict=True, stop_randomization=True)
733
-
734
- # create dataloaders for model
735
- batch_size = 128 # set this between 32 to 128
736
- train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)
737
- val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size * 10, num_workers=0)
738
-
739
- #let's see how a naive model does
740
-
741
- actuals = torch.cat([y for x, (y, weight) in iter(val_dataloader)]).cuda()
742
- baseline_predictions = Baseline().predict(val_dataloader).cuda()
743
- (actuals - baseline_predictions).abs().mean().item()
744
-
745
- sm = SMAPE()
746
-
747
- print(f"Median loss for naive prediction on validation: {sm.loss(actuals, baseline_predictions).mean(axis = 1).median().item()}")
748
-
749
- early_stop_callback = EarlyStopping(monitor="train_loss", min_delta=1e-2, patience=PATIENCE, verbose=False, mode="min")
750
- lr_logger = LearningRateMonitor() # log the learning rate
751
- logger = TensorBoardLogger("lightning_logs") # logging results to a tensorboard
752
-
753
- trainer = pl.Trainer(
754
- max_epochs=1,
755
- accelerator=ACCELERATOR,
756
- enable_model_summary=False,
757
- gradient_clip_val=0.25,
758
- limit_train_batches=10, # coment in for training, running valiation every 30 batches
759
- #fast_dev_run=True, # comment in to check that networkor dataset has no serious bugs
760
- callbacks=[lr_logger, early_stop_callback],
761
- logger=logger,
762
- )
763
-
764
- tft = TemporalFusionTransformer.from_dataset(
765
- training,
766
- learning_rate=LEARNING_RATE,
767
- lstm_layers=2,
768
- hidden_size=16,
769
- attention_head_size=2,
770
- dropout=0.2,
771
- hidden_continuous_size=8,
772
- output_size=1, # 7 quantiles by default
773
- loss=SMAPE(),
774
- log_interval=10, # uncomment for learning rate finder and otherwise, e.g. to 10 for logging every 10 batches
775
- reduce_on_plateau_patience=4
776
- )
777
-
778
- tft.to(DEVICE)
779
- trainer.fit(
780
- tft,
781
- train_dataloaders=train_dataloader,
782
- val_dataloaders=val_dataloader,
783
- )
784
- #torch.cuda.empty_cache()
785
- #print(f"Number of parameters in network: {tft.size()/1e3:.1f}k")
786
-
787
- if OPTUNA:
788
- from pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters
789
-
790
- # create study
791
- study = optimize_hyperparameters(
792
- train_dataloader,
793
- val_dataloader,
794
- model_path="optuna_test",
795
- n_trials=5,
796
- max_epochs=MAX_EPOCHS,
797
- gradient_clip_val_range=(0.01, 0.3),
798
- hidden_size_range=(8, 24),
799
- hidden_continuous_size_range=(8, 12),
800
- attention_head_size_range=(2, 4),
801
- learning_rate_range=(0.01, 0.05),
802
- dropout_range=(0.1, 0.25),
803
- trainer_kwargs=dict(limit_train_batches=20),
804
- reduce_on_plateau_patience=4,
805
- pruner=optuna.pruners.MedianPruner(n_min_trials=3, n_warmup_steps=3),
806
- use_learning_rate_finder=False, # use Optuna to find ideal learning rate or use in-built learning rate finder
807
- )
808
- #torch.cuda.empty_cache()
809
- #'''
810
- trainer = pl.Trainer(
811
- max_epochs=MAX_EPOCHS,
812
- accelerator=ACCELERATOR,
813
- enable_model_summary=False,
814
- gradient_clip_val=study.best_params['gradient_clip_val'],
815
- limit_train_batches=20, # coment in for training, running valiation every 30 batches
816
- #fast_dev_run=True, # comment in to check that networkor dataset has no serious bugs
817
- callbacks=[lr_logger, early_stop_callback],
818
- logger=logger,
819
- )
820
-
821
- tft = TemporalFusionTransformer.from_dataset(
822
- training,
823
- learning_rate=study.best_params['learning_rate'],
824
- lstm_layers=2,
825
- hidden_size=study.best_params['hidden_size'],
826
- attention_head_size=study.best_params['attention_head_size'],
827
- dropout=study.best_params['dropout'],
828
- hidden_continuous_size=study.best_params['hidden_continuous_size'],
829
- output_size=1, # 7 quantiles by default
830
- loss=SMAPE(),
831
- log_interval=10, # uncomment for learning rate finder and otherwise, e.g. to 10 for logging every 10 batches
832
- reduce_on_plateau_patience=4
833
- )
834
-
835
- tft.to(DEVICE)
836
- trainer.fit(
837
- tft,
838
- train_dataloaders=train_dataloader,
839
- val_dataloaders=val_dataloader,
840
- )
841
- #'''
842
- #torch.cuda.empty_cache()
843
- best_model_path = trainer.checkpoint_callback.best_model_path
844
- best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)
845
- actuals = torch.cat([y[0] for x, y in iter(val_dataloader)]).cuda()
846
- predictions = best_tft.predict(val_dataloader, mode="prediction")
847
- raw_predictions = best_tft.predict(val_dataloader, mode="raw", return_x=True)
848
-
849
- sm = SMAPE()
850
- print(f"Validation median SMAPE loss: {sm.loss(actuals, predictions.cuda()).mean(axis = 1).median().item()}")
851
- prax[5] = sm.loss(actuals, predictions).mean(axis = 1).median().item()
852
- #best_tft.plot_prediction(raw_predictions.x, raw_predictions.output, idx=0, add_loss_to_title=True);
853
-
854
- print(raw_predictions[0][0])
855
- prax[3] = '-'
856
- prax[4] = raw_predictions[0][0].data.cpu().tolist()[0][0]
857
- t = prax[4]
858
- tm = data['Close'][len(data)-1]
859
- if(t-tm>0):
860
- prax[6] = 1
861
- elif(t-tm==0):
862
- prax[6] = 0
863
- else:
864
- prax[6] = -1
865
- #prax[i][3] = raw_predictions[0][0].data[1]
866
- print("-----------")
867
-
868
- #with open("out.csv", "w", newline="") as f:
869
- # writer = csv.writer(f)
870
- # writer.writerows(prax)
871
-
872
- # %%
873
- def generate_csv(data_list):
874
- filename = f"result.csv"
875
- file_exists = os.path.isfile(filename)
876
- with open(filename, mode='a', newline='') as csv_file:
877
- fieldnames = ['Ticker', 'Prev_Close_Real', 'Model', 'Prev_Close_Model', 'Close_Model', 'Max_Err', 'Up_Down' ] # replace with your own column names
878
- writer = csv.writer(csv_file, delimiter=',')
879
- if not file_exists:
880
- writer.writerow(fieldnames) # file doesn't exist yet, write a header
881
- writer.writerow(data_list)
882
- csv_file.close()
883
-
884
- def fileOutput():
885
- today = date.today().strftime("%Y_%m_%d")
886
- filename = f"result.csv"
887
- shutil.copyfile(filename, f"result_{today}.csv")
888
- return f"result_{today}.csv"
889
-
890
- def guess_date(string):
891
- for fmt in ["%Y/%m/%d", "%d-%m-%Y", "%Y%m%d", "%m/%d/%Y", "%d/%m/%Y", "%Y-%m-%d", "%d/%m/%y", "%m/%d/%y"]:
892
- try:
893
- return datetime.datetime.strptime(string, fmt).date()
894
- except ValueError:
895
- continue
896
- raise ValueError(string)
897
-
898
- # %%
899
- # Main function
900
- def main(files):
901
- # Get a list of all the CSV files uploaded
902
- prax = [0,0,0,0,0,0,0]
903
- for idx, file in enumerate(files):
904
- print(f"File #{idx+1}: {file}")
905
- print(file.name)
906
- df = pd.read_csv(file.name)
907
- print(df['Ticker'][0])
908
- prax[0] = df['Ticker'][0]
909
- prax[1] = df['Close'][len(df)-1]
910
- print('------------------')
911
- df = df.drop(['EMARSI'], axis=1)
912
- #df['Date/Time'] = pd.to_datetime(df['Date/Time'])
913
- for i in range(len(df)):
914
- x = guess_date(df['Date/Time'][i])
915
- df['Date/Time'][i] = x.strftime("%Y-%m-%d")
916
- df['Date/Time'] = pd.to_datetime(df['Date/Time'])
917
- df.fillna(0, inplace=True)
918
- #df.to_csv('out.csv')
919
- modelTFT(df, prax)
920
- prax[2] = "TFT"
921
- generate_csv(prax)
922
- modelTFT_OpenGap(df, prax)
923
- prax[2] = "TFT_OpenGap"
924
- generate_csv(prax)
925
- #df.set_index('Date/Time', inplace=True)
926
- #df = df.drop(['Date/Time'], axis=1)
927
- #modelCNNLSTM(df, prax)
928
- #prax[2] = "CNNLSTM"
929
- #generate_csv(prax)
930
- #modelCNNLSTM_OpenGap(df, prax)
931
- #prax[2] = "CNNLSTM_OpenGap"
932
- #generate_csv(prax)
933
- # Generate blank line
934
- prax=["","","","","","",""]
935
- generate_csv(prax)
936
- # Reset prax
937
- prax = [0,0,0,0,0,0,0]
938
- f1 = fileOutput()
939
- return f1
940
-
941
- gradioApp = gr.Interface(fn=main, inputs=gr.File(file_count="multiple", file_type=".csv"), outputs="file")
942
-
943
-
944
- if __name__ == "__main__":
945
- # Calling main function
946
- gradioApp.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Zero-to-Hero/04-GR-Seq-2-Seq-QA-Auto-Gen/qasrl_model_pipeline.py DELETED
@@ -1,183 +0,0 @@
1
- from typing import Optional
2
- import json
3
- from argparse import Namespace
4
- from pathlib import Path
5
- from transformers import Text2TextGenerationPipeline, AutoModelForSeq2SeqLM, AutoTokenizer
6
-
7
- def get_markers_for_model(is_t5_model: bool) -> Namespace:
8
- special_tokens_constants = Namespace()
9
- if is_t5_model:
10
- # T5 model have 100 special tokens by default
11
- special_tokens_constants.separator_input_question_predicate = "<extra_id_1>"
12
- special_tokens_constants.separator_output_answers = "<extra_id_3>"
13
- special_tokens_constants.separator_output_questions = "<extra_id_5>" # if using only questions
14
- special_tokens_constants.separator_output_question_answer = "<extra_id_7>"
15
- special_tokens_constants.separator_output_pairs = "<extra_id_9>"
16
- special_tokens_constants.predicate_generic_marker = "<extra_id_10>"
17
- special_tokens_constants.predicate_verb_marker = "<extra_id_11>"
18
- special_tokens_constants.predicate_nominalization_marker = "<extra_id_12>"
19
-
20
- else:
21
- special_tokens_constants.separator_input_question_predicate = "<question_predicate_sep>"
22
- special_tokens_constants.separator_output_answers = "<answers_sep>"
23
- special_tokens_constants.separator_output_questions = "<question_sep>" # if using only questions
24
- special_tokens_constants.separator_output_question_answer = "<question_answer_sep>"
25
- special_tokens_constants.separator_output_pairs = "<qa_pairs_sep>"
26
- special_tokens_constants.predicate_generic_marker = "<predicate_marker>"
27
- special_tokens_constants.predicate_verb_marker = "<verbal_predicate_marker>"
28
- special_tokens_constants.predicate_nominalization_marker = "<nominalization_predicate_marker>"
29
- return special_tokens_constants
30
-
31
- def load_trained_model(name_or_path):
32
- import huggingface_hub as HFhub
33
- tokenizer = AutoTokenizer.from_pretrained(name_or_path)
34
- model = AutoModelForSeq2SeqLM.from_pretrained(name_or_path)
35
- # load preprocessing_kwargs from the model repo on HF hub, or from the local model directory
36
- kwargs_filename = None
37
- if name_or_path.startswith("kleinay/"): # and 'preprocessing_kwargs.json' in HFhub.list_repo_files(name_or_path): # the supported version of HFhub doesn't support list_repo_files
38
- kwargs_filename = HFhub.hf_hub_download(repo_id=name_or_path, filename="preprocessing_kwargs.json")
39
- elif Path(name_or_path).is_dir() and (Path(name_or_path) / "experiment_kwargs.json").exists():
40
- kwargs_filename = Path(name_or_path) / "experiment_kwargs.json"
41
-
42
- if kwargs_filename:
43
- preprocessing_kwargs = json.load(open(kwargs_filename))
44
- # integrate into model.config (for decoding args, e.g. "num_beams"), and save also as standalone object for preprocessing
45
- model.config.preprocessing_kwargs = Namespace(**preprocessing_kwargs)
46
- model.config.update(preprocessing_kwargs)
47
- return model, tokenizer
48
-
49
-
50
- class QASRL_Pipeline(Text2TextGenerationPipeline):
51
- def __init__(self, model_repo: str, **kwargs):
52
- model, tokenizer = load_trained_model(model_repo)
53
- super().__init__(model, tokenizer, framework="pt")
54
- self.is_t5_model = "t5" in model.config.model_type
55
- self.special_tokens = get_markers_for_model(self.is_t5_model)
56
- self.data_args = model.config.preprocessing_kwargs
57
- # backward compatibility - default keyword values implemeted in `run_summarization`, thus not saved in `preprocessing_kwargs`
58
- if "predicate_marker_type" not in vars(self.data_args):
59
- self.data_args.predicate_marker_type = "generic"
60
- if "use_bilateral_predicate_marker" not in vars(self.data_args):
61
- self.data_args.use_bilateral_predicate_marker = True
62
- if "append_verb_form" not in vars(self.data_args):
63
- self.data_args.append_verb_form = True
64
- self._update_config(**kwargs)
65
-
66
- def _update_config(self, **kwargs):
67
- " Update self.model.config with initialization parameters and necessary defaults. "
68
- # set default values that will always override model.config, but can overriden by __init__ kwargs
69
- kwargs["max_length"] = kwargs.get("max_length", 80)
70
- # override model.config with kwargs
71
- for k,v in kwargs.items():
72
- self.model.config.__dict__[k] = v
73
-
74
- def _sanitize_parameters(self, **kwargs):
75
- preprocess_kwargs, forward_kwargs, postprocess_kwargs = {}, {}, {}
76
- if "predicate_marker" in kwargs:
77
- preprocess_kwargs["predicate_marker"] = kwargs["predicate_marker"]
78
- if "predicate_type" in kwargs:
79
- preprocess_kwargs["predicate_type"] = kwargs["predicate_type"]
80
- if "verb_form" in kwargs:
81
- preprocess_kwargs["verb_form"] = kwargs["verb_form"]
82
- return preprocess_kwargs, forward_kwargs, postprocess_kwargs
83
-
84
- def preprocess(self, inputs, predicate_marker="<predicate>", predicate_type=None, verb_form=None):
85
- # Here, inputs is string or list of strings; apply string postprocessing
86
- if isinstance(inputs, str):
87
- processed_inputs = self._preprocess_string(inputs, predicate_marker, predicate_type, verb_form)
88
- elif hasattr(inputs, "__iter__"):
89
- processed_inputs = [self._preprocess_string(s, predicate_marker, predicate_type, verb_form) for s in inputs]
90
- else:
91
- raise ValueError("inputs must be str or Iterable[str]")
92
- # Now pass to super.preprocess for tokenization
93
- return super().preprocess(processed_inputs)
94
-
95
- def _preprocess_string(self, seq: str, predicate_marker: str, predicate_type: Optional[str], verb_form: Optional[str]) -> str:
96
- sent_tokens = seq.split(" ")
97
- assert predicate_marker in sent_tokens, f"Input sentence must include a predicate-marker token ('{predicate_marker}') before the target predicate word"
98
- predicate_idx = sent_tokens.index(predicate_marker)
99
- sent_tokens.remove(predicate_marker)
100
- sentence_before_predicate = " ".join([sent_tokens[i] for i in range(predicate_idx)])
101
- predicate = sent_tokens[predicate_idx]
102
- sentence_after_predicate = " ".join([sent_tokens[i] for i in range(predicate_idx+1, len(sent_tokens))])
103
-
104
- if self.data_args.predicate_marker_type == "generic":
105
- predicate_marker = self.special_tokens.predicate_generic_marker
106
- # In case we want special marker for each predicate type: """
107
- elif self.data_args.predicate_marker_type == "pred_type":
108
- assert predicate_type is not None, "For this model, you must provide the `predicate_type` either when initializing QASRL_Pipeline(...) or when applying __call__(...) on it"
109
- assert predicate_type in ("verbal", "nominal"), f"`predicate_type` must be either 'verbal' or 'nominal'; got '{predicate_type}'"
110
- predicate_marker = {"verbal": self.special_tokens.predicate_verb_marker ,
111
- "nominal": self.special_tokens.predicate_nominalization_marker
112
- }[predicate_type]
113
-
114
- if self.data_args.use_bilateral_predicate_marker:
115
- seq = f"{sentence_before_predicate} {predicate_marker} {predicate} {predicate_marker} {sentence_after_predicate}"
116
- else:
117
- seq = f"{sentence_before_predicate} {predicate_marker} {predicate} {sentence_after_predicate}"
118
-
119
- # embed also verb_form
120
- if self.data_args.append_verb_form and verb_form is None:
121
- raise ValueError(f"For this model, you must provide the `verb_form` of the predicate when applying __call__(...)")
122
- elif self.data_args.append_verb_form:
123
- seq = f"{seq} {self.special_tokens.separator_input_question_predicate} {verb_form} "
124
- else:
125
- seq = f"{seq} "
126
-
127
- # append source prefix (for t5 models)
128
- prefix = self._get_source_prefix(predicate_type)
129
-
130
- return prefix + seq
131
-
132
- def _get_source_prefix(self, predicate_type: Optional[str]):
133
- if not self.is_t5_model or self.data_args.source_prefix is None:
134
- return ''
135
- if not self.data_args.source_prefix.startswith("<"): # Regular prefix - not dependent on input row x
136
- return self.data_args.source_prefix
137
- if self.data_args.source_prefix == "<predicate-type>":
138
- if predicate_type is None:
139
- raise ValueError("source_prefix is '<predicate-type>' but input no `predicate_type`.")
140
- else:
141
- return f"Generate QAs for {predicate_type} QASRL: "
142
-
143
- def _forward(self, *args, **kwargs):
144
- outputs = super()._forward(*args, **kwargs)
145
- return outputs
146
-
147
-
148
- def postprocess(self, model_outputs):
149
- output_seq = self.tokenizer.decode(
150
- model_outputs["output_ids"].squeeze(),
151
- skip_special_tokens=False,
152
- clean_up_tokenization_spaces=False,
153
- )
154
- output_seq = output_seq.strip(self.tokenizer.pad_token).strip(self.tokenizer.eos_token).strip()
155
- qa_subseqs = output_seq.split(self.special_tokens.separator_output_pairs)
156
- qas = [self._postrocess_qa(qa_subseq) for qa_subseq in qa_subseqs]
157
- return {"generated_text": output_seq,
158
- "QAs": qas}
159
-
160
- def _postrocess_qa(self, seq: str) -> str:
161
- # split question and answers
162
- if self.special_tokens.separator_output_question_answer in seq:
163
- question, answer = seq.split(self.special_tokens.separator_output_question_answer)[:2]
164
- else:
165
- print("invalid format: no separator between question and answer found...")
166
- return None
167
- # question, answer = seq, '' # Or: backoff to only question
168
- # skip "_" slots in questions
169
- question = ' '.join(t for t in question.split(' ') if t != '_')
170
- answers = [a.strip() for a in answer.split(self.special_tokens.separator_output_answers)]
171
- return {"question": question, "answers": answers}
172
-
173
-
174
- if __name__ == "__main__":
175
- pipe = QASRL_Pipeline("kleinay/qanom-seq2seq-model-baseline")
176
- res1 = pipe("The student was interested in Luke 's <predicate> research about sea animals .", verb_form="research", predicate_type="nominal")
177
- res2 = pipe(["The doctor was interested in Luke 's <predicate> treatment .",
178
- "The Veterinary student was interested in Luke 's <predicate> treatment of sea animals ."], verb_form="treat", predicate_type="nominal", num_beams=10)
179
- res3 = pipe("A number of professions have <predicate> developed that specialize in the treatment of mental disorders .", verb_form="develop", predicate_type="verbal")
180
- print(res1)
181
- print(res2)
182
- print(res3)
183
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/utils/profiler.py DELETED
@@ -1,38 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import logging
8
- import typing as tp
9
-
10
- import dora
11
- import torch
12
-
13
-
14
- logger = logging.getLogger(__name__)
15
-
16
-
17
- class Profiler:
18
- """Context manager wrapper for xformers profiler.
19
- """
20
- def __init__(self, module: torch.nn.Module, enabled: bool = False):
21
- self.profiler: tp.Optional[tp.Any] = None
22
- if enabled:
23
- from xformers.profiler import profile
24
- output_dir = dora.get_xp().folder / 'profiler_data'
25
- logger.info("Profiling activated, results with be saved to %s", output_dir)
26
- self.profiler = profile(output_dir=output_dir, module=module)
27
-
28
- def step(self):
29
- if self.profiler is not None:
30
- self.profiler.step() # type: ignore
31
-
32
- def __enter__(self):
33
- if self.profiler is not None:
34
- return self.profiler.__enter__() # type: ignore
35
-
36
- def __exit__(self, exc_type, exc_value, exc_tb):
37
- if self.profiler is not None:
38
- return self.profiler.__exit__(exc_type, exc_value, exc_tb) # type: ignore
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/vocoder/vocoder_base.py DELETED
@@ -1,137 +0,0 @@
1
- import os
2
- import torch
3
- import torch.distributed as dist
4
- from torch import nn
5
- from torch.utils.data import DistributedSampler
6
- from tasks.vocoder.dataset_utils import VocoderDataset, EndlessDistributedSampler
7
- from text_to_speech.utils.audio.io import save_wav
8
- from text_to_speech.utils.commons.base_task import BaseTask
9
- from text_to_speech.utils.commons.dataset_utils import data_loader
10
- from text_to_speech.utils.commons.hparams import hparams
11
- from text_to_speech.utils.commons.tensor_utils import tensors_to_scalars
12
-
13
-
14
- class VocoderBaseTask(BaseTask):
15
- def __init__(self):
16
- super(VocoderBaseTask, self).__init__()
17
- self.max_sentences = hparams['max_sentences']
18
- self.max_valid_sentences = hparams['max_valid_sentences']
19
- if self.max_valid_sentences == -1:
20
- hparams['max_valid_sentences'] = self.max_valid_sentences = self.max_sentences
21
- self.dataset_cls = VocoderDataset
22
-
23
- @data_loader
24
- def train_dataloader(self):
25
- train_dataset = self.dataset_cls('train', shuffle=True)
26
- return self.build_dataloader(train_dataset, True, self.max_sentences, hparams['endless_ds'])
27
-
28
- @data_loader
29
- def val_dataloader(self):
30
- valid_dataset = self.dataset_cls('test', shuffle=False)
31
- return self.build_dataloader(valid_dataset, False, self.max_valid_sentences)
32
-
33
- @data_loader
34
- def test_dataloader(self):
35
- test_dataset = self.dataset_cls('test', shuffle=False)
36
- return self.build_dataloader(test_dataset, False, self.max_valid_sentences)
37
-
38
- def build_dataloader(self, dataset, shuffle, max_sentences, endless=False):
39
- world_size = 1
40
- rank = 0
41
- if dist.is_initialized():
42
- world_size = dist.get_world_size()
43
- rank = dist.get_rank()
44
- sampler_cls = DistributedSampler if not endless else EndlessDistributedSampler
45
- train_sampler = sampler_cls(
46
- dataset=dataset,
47
- num_replicas=world_size,
48
- rank=rank,
49
- shuffle=shuffle,
50
- )
51
- return torch.utils.data.DataLoader(
52
- dataset=dataset,
53
- shuffle=False,
54
- collate_fn=dataset.collater,
55
- batch_size=max_sentences,
56
- num_workers=dataset.num_workers,
57
- sampler=train_sampler,
58
- pin_memory=True,
59
- )
60
-
61
- def build_optimizer(self, model):
62
- optimizer_gen = torch.optim.AdamW(self.model_gen.parameters(), lr=hparams['lr'],
63
- betas=[hparams['adam_b1'], hparams['adam_b2']])
64
- optimizer_disc = torch.optim.AdamW(self.model_disc.parameters(), lr=hparams['lr'],
65
- betas=[hparams['adam_b1'], hparams['adam_b2']])
66
- return [optimizer_gen, optimizer_disc]
67
-
68
- def build_scheduler(self, optimizer):
69
- return {
70
- "gen": torch.optim.lr_scheduler.StepLR(
71
- optimizer=optimizer[0],
72
- **hparams["generator_scheduler_params"]),
73
- "disc": torch.optim.lr_scheduler.StepLR(
74
- optimizer=optimizer[1],
75
- **hparams["discriminator_scheduler_params"]),
76
- }
77
-
78
- def validation_step(self, sample, batch_idx):
79
- outputs = {}
80
- total_loss, loss_output = self._training_step(sample, batch_idx, 0)
81
- outputs['losses'] = tensors_to_scalars(loss_output)
82
- outputs['total_loss'] = tensors_to_scalars(total_loss)
83
-
84
- if self.global_step % hparams['valid_infer_interval'] == 0 and \
85
- batch_idx < 10:
86
- mels = sample['mels']
87
- y = sample['wavs']
88
- f0 = sample['f0']
89
- y_ = self.model_gen(mels, f0)
90
- for idx, (wav_pred, wav_gt, item_name) in enumerate(zip(y_, y, sample["item_name"])):
91
- wav_pred = wav_pred / wav_pred.abs().max()
92
- if self.global_step == 0:
93
- wav_gt = wav_gt / wav_gt.abs().max()
94
- self.logger.add_audio(f'wav_{batch_idx}_{idx}_gt', wav_gt, self.global_step,
95
- hparams['audio_sample_rate'])
96
- self.logger.add_audio(f'wav_{batch_idx}_{idx}_pred', wav_pred, self.global_step,
97
- hparams['audio_sample_rate'])
98
- return outputs
99
-
100
- def test_start(self):
101
- self.gen_dir = os.path.join(hparams['work_dir'],
102
- f'generated_{self.trainer.global_step}_{hparams["gen_dir_name"]}')
103
- os.makedirs(self.gen_dir, exist_ok=True)
104
-
105
- def test_step(self, sample, batch_idx):
106
- mels = sample['mels']
107
- y = sample['wavs']
108
- f0 = sample['f0']
109
- loss_output = {}
110
- y_ = self.model_gen(mels, f0)
111
- gen_dir = os.path.join(hparams['work_dir'], f'generated_{self.trainer.global_step}_{hparams["gen_dir_name"]}')
112
- os.makedirs(gen_dir, exist_ok=True)
113
- for idx, (wav_pred, wav_gt, item_name) in enumerate(zip(y_, y, sample["item_name"])):
114
- wav_gt = wav_gt.clamp(-1, 1)
115
- wav_pred = wav_pred.clamp(-1, 1)
116
- save_wav(
117
- wav_gt.view(-1).cpu().float().numpy(), f'{gen_dir}/{item_name}_gt.wav',
118
- hparams['audio_sample_rate'])
119
- save_wav(
120
- wav_pred.view(-1).cpu().float().numpy(), f'{gen_dir}/{item_name}_pred.wav',
121
- hparams['audio_sample_rate'])
122
- return loss_output
123
-
124
- def test_end(self, outputs):
125
- return {}
126
-
127
- def on_before_optimization(self, opt_idx):
128
- if opt_idx == 0:
129
- nn.utils.clip_grad_norm_(self.model_gen.parameters(), hparams['generator_grad_norm'])
130
- else:
131
- nn.utils.clip_grad_norm_(self.model_disc.parameters(), hparams["discriminator_grad_norm"])
132
-
133
- def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx):
134
- if optimizer_idx == 0:
135
- self.scheduler['gen'].step(self.global_step // hparams['accumulate_grad_batches'])
136
- else:
137
- self.scheduler['disc'].step(self.global_step // hparams['accumulate_grad_batches'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/SOP_Generation-single/template.py DELETED
@@ -1,111 +0,0 @@
1
- ## default { "temperature": 0.3, "model": "gpt-3.5-turbo-16k-0613","log_path": "logs/{your name}"}
2
- LLM = {
3
- "temperature": 0.0,
4
- "model": "gpt-3.5-turbo-16k-0613",
5
- "log_path": "logs/god"
6
- }
7
-
8
-
9
- Agents = {
10
- "Lilong" : {
11
- "style" : "professional",
12
- "roles" : {
13
- "company" : "coder",
14
- "state2" : "role2",
15
- },
16
- "name2" : {
17
- "style" : "professional",
18
- "roles" : {
19
- "company" : "coder",
20
- "state2" : "role2",
21
- },
22
- }
23
- }
24
- }
25
-
26
- # indispensable parameter: "controller_type"("order","random","rule")
27
- # default extract words: "end". You can choose not to fill in this parameter
28
- controller = {
29
- "controller_type": "order",
30
- "max_chat_nums" : 12,
31
- "judge_system_prompt": "",
32
- "judge_last_prompt": "",
33
- "judge_extract_words": "end",
34
- "call_system_prompt" : "",
35
- "call_last_prompt": "",
36
- "call_extract_words": ""
37
- }
38
-
39
- #
40
- Agent_state = {
41
- "role": {
42
- "LLM_type": "OpenAI",
43
- "LLM": LLM,
44
- "style": {
45
- "role": "Opening Advocate for the Affirmative",
46
- "style": "professional"
47
- },
48
- "task": {
49
- "task": ""
50
- },
51
- "rule": {
52
- "rule": ""
53
- }
54
- },
55
- }
56
-
57
-
58
- # indispensable parameter: "agent_states","controller"
59
- # "roles" determines the speaking order when the rule is order. If not set, it is the default order.
60
- # "begin_query" & "begin_role" determines the first speaker.It often determines the direction of the next speech. If you do not set it, it will default to the first agent.
61
- # "environment_prompt" : Responsible for setting the scene for the current environment
62
- State = {
63
- "controller": controller,
64
- "begin_role": "",
65
- "begin_query": "",
66
- "environment_prompt": "",
67
- "roles": ["role1","role2"],
68
- "LLM_type": "OpenAI",
69
- "LLM": LLM,
70
- "agent_state" : Agent_state,
71
- }
72
-
73
-
74
-
75
- States = {
76
- "end_state":{
77
- "agent_states":{}
78
- },
79
- "state1" : State
80
-
81
- }
82
-
83
-
84
- # default finish_state_name is "end_state"
85
- # "environment_type" : "competive" : different states not share the memory; "cooperative":diffrent states share the memory
86
- SOP = {
87
- "config" : {
88
- "API_KEY" : "Your key",
89
- "PROXY" : "Your PROXY",
90
- "MAX_CHAT_HISTORY" : "5",
91
- "User_Names" : "[\"alexander\"]"
92
- },
93
- "environment_type" : "competive",
94
- "LLM_type": "OpenAI",
95
- "LLM" :LLM,
96
- "root": "state1",
97
- "finish_state_name" : "end_state",
98
- "relations": {
99
- "state1": {
100
- "0": "state1",
101
- "1": "state2"
102
- },
103
- "state2":{
104
- "0":"state2",
105
- "1":"end_state"
106
- }
107
- },
108
- "agents": Agents,
109
- "states": States,
110
- }
111
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/conversation/[id]/phi/m.d.ts DELETED
@@ -1,65 +0,0 @@
1
- /* tslint:disable */
2
- /* eslint-disable */
3
- /**
4
- */
5
- export class Model {
6
- free(): void;
7
- /**
8
- * @param {Uint8Array} weights
9
- * @param {Uint8Array} tokenizer
10
- * @param {boolean} quantized
11
- */
12
- constructor(weights: Uint8Array, tokenizer: Uint8Array, quantized: boolean);
13
- /**
14
- * @param {string} prompt
15
- * @param {number} temp
16
- * @param {number} top_p
17
- * @param {number} repeat_penalty
18
- * @param {number} repeat_last_n
19
- * @param {bigint} seed
20
- * @returns {string}
21
- */
22
- init_with_prompt(prompt: string, temp: number, top_p: number, repeat_penalty: number, repeat_last_n: number, seed: bigint): string;
23
- /**
24
- * @returns {string}
25
- */
26
- next_token(): string;
27
- }
28
-
29
- export type InitInput = RequestInfo | URL | Response | BufferSource | WebAssembly.Module;
30
-
31
- export interface InitOutput {
32
- readonly memory: WebAssembly.Memory;
33
- readonly __wbg_model_free: (a: number) => void;
34
- readonly model_load: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
35
- readonly model_init_with_prompt: (a: number, b: number, c: number, d: number, e: number, f: number, g: number, h: number, i: number) => void;
36
- readonly model_next_token: (a: number, b: number) => void;
37
- readonly main: (a: number, b: number) => number;
38
- readonly __wbindgen_add_to_stack_pointer: (a: number) => number;
39
- readonly __wbindgen_malloc: (a: number, b: number) => number;
40
- readonly __wbindgen_realloc: (a: number, b: number, c: number, d: number) => number;
41
- readonly __wbindgen_free: (a: number, b: number, c: number) => void;
42
- readonly __wbindgen_exn_store: (a: number) => void;
43
- readonly __wbindgen_start: () => void;
44
- }
45
-
46
- export type SyncInitInput = BufferSource | WebAssembly.Module;
47
- /**
48
- * Instantiates the given `module`, which can either be bytes or
49
- * a precompiled `WebAssembly.Module`.
50
- *
51
- * @param {SyncInitInput} module
52
- *
53
- * @returns {InitOutput}
54
- */
55
- export function initSync(module: SyncInitInput): InitOutput;
56
-
57
- /**
58
- * If `module_or_path` is {RequestInfo} or {URL}, makes a request and
59
- * for everything else, calls `WebAssembly.instantiate` directly.
60
- *
61
- * @param {InitInput | Promise<InitInput>} module_or_path
62
- *
63
- * @returns {Promise<InitOutput>}
64
- */
65
- export default function __wbg_init (module_or_path?: InitInput | Promise<InitInput>): Promise<InitOutput>;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/T2I-Adapter/ldm/modules/distributions/__init__.py DELETED
File without changes
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dialog/methods/Methods.js DELETED
@@ -1,12 +0,0 @@
1
- import ButtonMethods from './ButtonMethods.js';
2
- import ModalMethods from './ModalMethods.js';
3
-
4
- var Methods = {};
5
-
6
- Object.assign(
7
- Methods,
8
- ButtonMethods,
9
- ModalMethods,
10
- )
11
-
12
- export default Methods;
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/folder/Factory.d.ts DELETED
@@ -1,5 +0,0 @@
1
- import Folder from './Folder';
2
-
3
- export default function (
4
- config?: Folder.IConfig
5
- ): Folder;
 
 
 
 
 
 
spaces/Ajaymaurya1008/meme-identifier/app.py DELETED
@@ -1,34 +0,0 @@
1
-
2
- import streamlit as st
3
- import requests
4
- from io import BytesIO
5
- from PIL import Image
6
- import os
7
-
8
- api_key = os.environ['API_KEY']
9
-
10
- API_URL = "https://api-inference.huggingface.co/models/Hrishikesh332/autotrain-meme-classification-42897109437"
11
- headers = {"Authorization": f"Bearer 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"}
12
-
13
- def query(data : bytes):
14
-
15
- response = requests.post(API_URL, headers=headers, data=data)
16
- return response.json()
17
-
18
-
19
- st.markdown("<h1 style='text-align: center;'>Mememeter 💬</h1>", unsafe_allow_html=True)
20
- st.markdown("---")
21
- with st.sidebar:
22
- st.title("Mememeter")
23
- st.caption('''
24
- Memeter is an application used for the classification of whether the images provided is meme or not meme
25
- ''', unsafe_allow_html=False)
26
-
27
- img = st.file_uploader("Choose an image", type=["jpg", "jpeg", "png"])
28
-
29
- if img is not None:
30
-
31
- data = img.read()
32
- output = query(data)
33
- st.image(data)
34
- st.write("Predicted Output:", output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_mega.py DELETED
@@ -1,227 +0,0 @@
1
- from typing import Any, Callable, Dict, List, Optional, Union
2
-
3
- import PIL.Image
4
- import torch
5
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
6
-
7
- from diffusers import (
8
- AutoencoderKL,
9
- DDIMScheduler,
10
- DiffusionPipeline,
11
- LMSDiscreteScheduler,
12
- PNDMScheduler,
13
- StableDiffusionImg2ImgPipeline,
14
- StableDiffusionInpaintPipelineLegacy,
15
- StableDiffusionPipeline,
16
- UNet2DConditionModel,
17
- )
18
- from diffusers.configuration_utils import FrozenDict
19
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
20
- from diffusers.utils import deprecate, logging
21
-
22
-
23
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
24
-
25
-
26
- class StableDiffusionMegaPipeline(DiffusionPipeline):
27
- r"""
28
- Pipeline for text-to-image generation using Stable Diffusion.
29
-
30
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
31
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
32
-
33
- Args:
34
- vae ([`AutoencoderKL`]):
35
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
36
- text_encoder ([`CLIPTextModel`]):
37
- Frozen text-encoder. Stable Diffusion uses the text portion of
38
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
39
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
40
- tokenizer (`CLIPTokenizer`):
41
- Tokenizer of class
42
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
43
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
44
- scheduler ([`SchedulerMixin`]):
45
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
46
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
47
- safety_checker ([`StableDiffusionMegaSafetyChecker`]):
48
- Classification module that estimates whether generated images could be considered offensive or harmful.
49
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
50
- feature_extractor ([`CLIPImageProcessor`]):
51
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
52
- """
53
- _optional_components = ["safety_checker", "feature_extractor"]
54
-
55
- def __init__(
56
- self,
57
- vae: AutoencoderKL,
58
- text_encoder: CLIPTextModel,
59
- tokenizer: CLIPTokenizer,
60
- unet: UNet2DConditionModel,
61
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
62
- safety_checker: StableDiffusionSafetyChecker,
63
- feature_extractor: CLIPImageProcessor,
64
- requires_safety_checker: bool = True,
65
- ):
66
- super().__init__()
67
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
68
- deprecation_message = (
69
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
70
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
71
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
72
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
73
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
74
- " file"
75
- )
76
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
77
- new_config = dict(scheduler.config)
78
- new_config["steps_offset"] = 1
79
- scheduler._internal_dict = FrozenDict(new_config)
80
-
81
- self.register_modules(
82
- vae=vae,
83
- text_encoder=text_encoder,
84
- tokenizer=tokenizer,
85
- unet=unet,
86
- scheduler=scheduler,
87
- safety_checker=safety_checker,
88
- feature_extractor=feature_extractor,
89
- )
90
- self.register_to_config(requires_safety_checker=requires_safety_checker)
91
-
92
- @property
93
- def components(self) -> Dict[str, Any]:
94
- return {k: getattr(self, k) for k in self.config.keys() if not k.startswith("_")}
95
-
96
- def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
97
- r"""
98
- Enable sliced attention computation.
99
-
100
- When this option is enabled, the attention module will split the input tensor in slices, to compute attention
101
- in several steps. This is useful to save some memory in exchange for a small speed decrease.
102
-
103
- Args:
104
- slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
105
- When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
106
- a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
107
- `attention_head_dim` must be a multiple of `slice_size`.
108
- """
109
- if slice_size == "auto":
110
- # half the attention head size is usually a good trade-off between
111
- # speed and memory
112
- slice_size = self.unet.config.attention_head_dim // 2
113
- self.unet.set_attention_slice(slice_size)
114
-
115
- def disable_attention_slicing(self):
116
- r"""
117
- Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
118
- back to computing attention in one step.
119
- """
120
- # set slice_size = `None` to disable `attention slicing`
121
- self.enable_attention_slicing(None)
122
-
123
- @torch.no_grad()
124
- def inpaint(
125
- self,
126
- prompt: Union[str, List[str]],
127
- image: Union[torch.FloatTensor, PIL.Image.Image],
128
- mask_image: Union[torch.FloatTensor, PIL.Image.Image],
129
- strength: float = 0.8,
130
- num_inference_steps: Optional[int] = 50,
131
- guidance_scale: Optional[float] = 7.5,
132
- negative_prompt: Optional[Union[str, List[str]]] = None,
133
- num_images_per_prompt: Optional[int] = 1,
134
- eta: Optional[float] = 0.0,
135
- generator: Optional[torch.Generator] = None,
136
- output_type: Optional[str] = "pil",
137
- return_dict: bool = True,
138
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
139
- callback_steps: int = 1,
140
- ):
141
- # For more information on how this function works, please see: https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionImg2ImgPipeline
142
- return StableDiffusionInpaintPipelineLegacy(**self.components)(
143
- prompt=prompt,
144
- image=image,
145
- mask_image=mask_image,
146
- strength=strength,
147
- num_inference_steps=num_inference_steps,
148
- guidance_scale=guidance_scale,
149
- negative_prompt=negative_prompt,
150
- num_images_per_prompt=num_images_per_prompt,
151
- eta=eta,
152
- generator=generator,
153
- output_type=output_type,
154
- return_dict=return_dict,
155
- callback=callback,
156
- )
157
-
158
- @torch.no_grad()
159
- def img2img(
160
- self,
161
- prompt: Union[str, List[str]],
162
- image: Union[torch.FloatTensor, PIL.Image.Image],
163
- strength: float = 0.8,
164
- num_inference_steps: Optional[int] = 50,
165
- guidance_scale: Optional[float] = 7.5,
166
- negative_prompt: Optional[Union[str, List[str]]] = None,
167
- num_images_per_prompt: Optional[int] = 1,
168
- eta: Optional[float] = 0.0,
169
- generator: Optional[torch.Generator] = None,
170
- output_type: Optional[str] = "pil",
171
- return_dict: bool = True,
172
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
173
- callback_steps: int = 1,
174
- **kwargs,
175
- ):
176
- # For more information on how this function works, please see: https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionImg2ImgPipeline
177
- return StableDiffusionImg2ImgPipeline(**self.components)(
178
- prompt=prompt,
179
- image=image,
180
- strength=strength,
181
- num_inference_steps=num_inference_steps,
182
- guidance_scale=guidance_scale,
183
- negative_prompt=negative_prompt,
184
- num_images_per_prompt=num_images_per_prompt,
185
- eta=eta,
186
- generator=generator,
187
- output_type=output_type,
188
- return_dict=return_dict,
189
- callback=callback,
190
- callback_steps=callback_steps,
191
- )
192
-
193
- @torch.no_grad()
194
- def text2img(
195
- self,
196
- prompt: Union[str, List[str]],
197
- height: int = 512,
198
- width: int = 512,
199
- num_inference_steps: int = 50,
200
- guidance_scale: float = 7.5,
201
- negative_prompt: Optional[Union[str, List[str]]] = None,
202
- num_images_per_prompt: Optional[int] = 1,
203
- eta: float = 0.0,
204
- generator: Optional[torch.Generator] = None,
205
- latents: Optional[torch.FloatTensor] = None,
206
- output_type: Optional[str] = "pil",
207
- return_dict: bool = True,
208
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
209
- callback_steps: int = 1,
210
- ):
211
- # For more information on how this function https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionPipeline
212
- return StableDiffusionPipeline(**self.components)(
213
- prompt=prompt,
214
- height=height,
215
- width=width,
216
- num_inference_steps=num_inference_steps,
217
- guidance_scale=guidance_scale,
218
- negative_prompt=negative_prompt,
219
- num_images_per_prompt=num_images_per_prompt,
220
- eta=eta,
221
- generator=generator,
222
- latents=latents,
223
- output_type=output_type,
224
- return_dict=return_dict,
225
- callback=callback,
226
- callback_steps=callback_steps,
227
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/unet_1d.py DELETED
@@ -1,255 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from dataclasses import dataclass
16
- from typing import Optional, Tuple, Union
17
-
18
- import torch
19
- import torch.nn as nn
20
-
21
- from ..configuration_utils import ConfigMixin, register_to_config
22
- from ..utils import BaseOutput
23
- from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
24
- from .modeling_utils import ModelMixin
25
- from .unet_1d_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
26
-
27
-
28
- @dataclass
29
- class UNet1DOutput(BaseOutput):
30
- """
31
- The output of [`UNet1DModel`].
32
-
33
- Args:
34
- sample (`torch.FloatTensor` of shape `(batch_size, num_channels, sample_size)`):
35
- The hidden states output from the last layer of the model.
36
- """
37
-
38
- sample: torch.FloatTensor
39
-
40
-
41
- class UNet1DModel(ModelMixin, ConfigMixin):
42
- r"""
43
- A 1D UNet model that takes a noisy sample and a timestep and returns a sample shaped output.
44
-
45
- This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
46
- for all models (such as downloading or saving).
47
-
48
- Parameters:
49
- sample_size (`int`, *optional*): Default length of sample. Should be adaptable at runtime.
50
- in_channels (`int`, *optional*, defaults to 2): Number of channels in the input sample.
51
- out_channels (`int`, *optional*, defaults to 2): Number of channels in the output.
52
- extra_in_channels (`int`, *optional*, defaults to 0):
53
- Number of additional channels to be added to the input of the first down block. Useful for cases where the
54
- input data has more channels than what the model was initially designed for.
55
- time_embedding_type (`str`, *optional*, defaults to `"fourier"`): Type of time embedding to use.
56
- freq_shift (`float`, *optional*, defaults to 0.0): Frequency shift for Fourier time embedding.
57
- flip_sin_to_cos (`bool`, *optional*, defaults to `False`):
58
- Whether to flip sin to cos for Fourier time embedding.
59
- down_block_types (`Tuple[str]`, *optional*, defaults to `("DownBlock1D", "DownBlock1DNoSkip", "AttnDownBlock1D")`):
60
- Tuple of downsample block types.
61
- up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock1D", "UpBlock1DNoSkip", "AttnUpBlock1D")`):
62
- Tuple of upsample block types.
63
- block_out_channels (`Tuple[int]`, *optional*, defaults to `(32, 32, 64)`):
64
- Tuple of block output channels.
65
- mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock1D"`): Block type for middle of UNet.
66
- out_block_type (`str`, *optional*, defaults to `None`): Optional output processing block of UNet.
67
- act_fn (`str`, *optional*, defaults to `None`): Optional activation function in UNet blocks.
68
- norm_num_groups (`int`, *optional*, defaults to 8): The number of groups for normalization.
69
- layers_per_block (`int`, *optional*, defaults to 1): The number of layers per block.
70
- downsample_each_block (`int`, *optional*, defaults to `False`):
71
- Experimental feature for using a UNet without upsampling.
72
- """
73
-
74
- @register_to_config
75
- def __init__(
76
- self,
77
- sample_size: int = 65536,
78
- sample_rate: Optional[int] = None,
79
- in_channels: int = 2,
80
- out_channels: int = 2,
81
- extra_in_channels: int = 0,
82
- time_embedding_type: str = "fourier",
83
- flip_sin_to_cos: bool = True,
84
- use_timestep_embedding: bool = False,
85
- freq_shift: float = 0.0,
86
- down_block_types: Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"),
87
- up_block_types: Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"),
88
- mid_block_type: Tuple[str] = "UNetMidBlock1D",
89
- out_block_type: str = None,
90
- block_out_channels: Tuple[int] = (32, 32, 64),
91
- act_fn: str = None,
92
- norm_num_groups: int = 8,
93
- layers_per_block: int = 1,
94
- downsample_each_block: bool = False,
95
- ):
96
- super().__init__()
97
- self.sample_size = sample_size
98
-
99
- # time
100
- if time_embedding_type == "fourier":
101
- self.time_proj = GaussianFourierProjection(
102
- embedding_size=8, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
103
- )
104
- timestep_input_dim = 2 * block_out_channels[0]
105
- elif time_embedding_type == "positional":
106
- self.time_proj = Timesteps(
107
- block_out_channels[0], flip_sin_to_cos=flip_sin_to_cos, downscale_freq_shift=freq_shift
108
- )
109
- timestep_input_dim = block_out_channels[0]
110
-
111
- if use_timestep_embedding:
112
- time_embed_dim = block_out_channels[0] * 4
113
- self.time_mlp = TimestepEmbedding(
114
- in_channels=timestep_input_dim,
115
- time_embed_dim=time_embed_dim,
116
- act_fn=act_fn,
117
- out_dim=block_out_channels[0],
118
- )
119
-
120
- self.down_blocks = nn.ModuleList([])
121
- self.mid_block = None
122
- self.up_blocks = nn.ModuleList([])
123
- self.out_block = None
124
-
125
- # down
126
- output_channel = in_channels
127
- for i, down_block_type in enumerate(down_block_types):
128
- input_channel = output_channel
129
- output_channel = block_out_channels[i]
130
-
131
- if i == 0:
132
- input_channel += extra_in_channels
133
-
134
- is_final_block = i == len(block_out_channels) - 1
135
-
136
- down_block = get_down_block(
137
- down_block_type,
138
- num_layers=layers_per_block,
139
- in_channels=input_channel,
140
- out_channels=output_channel,
141
- temb_channels=block_out_channels[0],
142
- add_downsample=not is_final_block or downsample_each_block,
143
- )
144
- self.down_blocks.append(down_block)
145
-
146
- # mid
147
- self.mid_block = get_mid_block(
148
- mid_block_type,
149
- in_channels=block_out_channels[-1],
150
- mid_channels=block_out_channels[-1],
151
- out_channels=block_out_channels[-1],
152
- embed_dim=block_out_channels[0],
153
- num_layers=layers_per_block,
154
- add_downsample=downsample_each_block,
155
- )
156
-
157
- # up
158
- reversed_block_out_channels = list(reversed(block_out_channels))
159
- output_channel = reversed_block_out_channels[0]
160
- if out_block_type is None:
161
- final_upsample_channels = out_channels
162
- else:
163
- final_upsample_channels = block_out_channels[0]
164
-
165
- for i, up_block_type in enumerate(up_block_types):
166
- prev_output_channel = output_channel
167
- output_channel = (
168
- reversed_block_out_channels[i + 1] if i < len(up_block_types) - 1 else final_upsample_channels
169
- )
170
-
171
- is_final_block = i == len(block_out_channels) - 1
172
-
173
- up_block = get_up_block(
174
- up_block_type,
175
- num_layers=layers_per_block,
176
- in_channels=prev_output_channel,
177
- out_channels=output_channel,
178
- temb_channels=block_out_channels[0],
179
- add_upsample=not is_final_block,
180
- )
181
- self.up_blocks.append(up_block)
182
- prev_output_channel = output_channel
183
-
184
- # out
185
- num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32)
186
- self.out_block = get_out_block(
187
- out_block_type=out_block_type,
188
- num_groups_out=num_groups_out,
189
- embed_dim=block_out_channels[0],
190
- out_channels=out_channels,
191
- act_fn=act_fn,
192
- fc_dim=block_out_channels[-1] // 4,
193
- )
194
-
195
- def forward(
196
- self,
197
- sample: torch.FloatTensor,
198
- timestep: Union[torch.Tensor, float, int],
199
- return_dict: bool = True,
200
- ) -> Union[UNet1DOutput, Tuple]:
201
- r"""
202
- The [`UNet1DModel`] forward method.
203
-
204
- Args:
205
- sample (`torch.FloatTensor`):
206
- The noisy input tensor with the following shape `(batch_size, num_channels, sample_size)`.
207
- timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
208
- return_dict (`bool`, *optional*, defaults to `True`):
209
- Whether or not to return a [`~models.unet_1d.UNet1DOutput`] instead of a plain tuple.
210
-
211
- Returns:
212
- [`~models.unet_1d.UNet1DOutput`] or `tuple`:
213
- If `return_dict` is True, an [`~models.unet_1d.UNet1DOutput`] is returned, otherwise a `tuple` is
214
- returned where the first element is the sample tensor.
215
- """
216
-
217
- # 1. time
218
- timesteps = timestep
219
- if not torch.is_tensor(timesteps):
220
- timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device)
221
- elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:
222
- timesteps = timesteps[None].to(sample.device)
223
-
224
- timestep_embed = self.time_proj(timesteps)
225
- if self.config.use_timestep_embedding:
226
- timestep_embed = self.time_mlp(timestep_embed)
227
- else:
228
- timestep_embed = timestep_embed[..., None]
229
- timestep_embed = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
230
- timestep_embed = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
231
-
232
- # 2. down
233
- down_block_res_samples = ()
234
- for downsample_block in self.down_blocks:
235
- sample, res_samples = downsample_block(hidden_states=sample, temb=timestep_embed)
236
- down_block_res_samples += res_samples
237
-
238
- # 3. mid
239
- if self.mid_block:
240
- sample = self.mid_block(sample, timestep_embed)
241
-
242
- # 4. up
243
- for i, upsample_block in enumerate(self.up_blocks):
244
- res_samples = down_block_res_samples[-1:]
245
- down_block_res_samples = down_block_res_samples[:-1]
246
- sample = upsample_block(sample, res_hidden_states_tuple=res_samples, temb=timestep_embed)
247
-
248
- # 5. post-process
249
- if self.out_block:
250
- sample = self.out_block(sample, timestep_embed)
251
-
252
- if not return_dict:
253
- return (sample,)
254
-
255
- return UNet1DOutput(sample=sample)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py DELETED
@@ -1,5 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/mask_rcnn_r50_fpn.py',
3
- '../_base_/datasets/coco_instance.py',
4
- '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
5
- ]
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/exp/mask_rcnn_1x_hybrid_small/config.py DELETED
@@ -1,38 +0,0 @@
1
- _base_ = [
2
- '../../configs/_base_/models/mask_rcnn_uniformer_fpn.py',
3
- '../../configs/_base_/datasets/coco_instance.py',
4
- '../../configs/_base_/schedules/schedule_1x.py',
5
- '../../configs/_base_/default_runtime.py'
6
- ]
7
-
8
- model = dict(
9
- backbone=dict(
10
- embed_dim=[64, 128, 320, 512],
11
- layers=[3, 4, 8, 3],
12
- head_dim=64,
13
- drop_path_rate=0.2,
14
- use_checkpoint=True,
15
- checkpoint_num=[0, 0, 8, 0],
16
- windows=False,
17
- hybrid=True,
18
- window_size=14
19
- ),
20
- neck=dict(in_channels=[64, 128, 320, 512]))
21
-
22
- optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
23
- paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
24
- 'relative_position_bias_table': dict(decay_mult=0.),
25
- 'norm': dict(decay_mult=0.)}))
26
- lr_config = dict(step=[8, 11])
27
- runner = dict(type='EpochBasedRunnerAmp', max_epochs=12)
28
-
29
- # do not use mmdet version fp16
30
- fp16 = None
31
- optimizer_config = dict(
32
- type="DistOptimizerHook",
33
- update_interval=1,
34
- grad_clip=None,
35
- coalesce=True,
36
- bucket_size_mb=-1,
37
- use_fp16=True,
38
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/dnl_r50-d8.py DELETED
@@ -1,46 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='DNLHead',
19
- in_channels=2048,
20
- in_index=3,
21
- channels=512,
22
- dropout_ratio=0.1,
23
- reduction=2,
24
- use_scale=True,
25
- mode='embedded_gaussian',
26
- num_classes=19,
27
- norm_cfg=norm_cfg,
28
- align_corners=False,
29
- loss_decode=dict(
30
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
31
- auxiliary_head=dict(
32
- type='FCNHead',
33
- in_channels=1024,
34
- in_index=2,
35
- channels=256,
36
- num_convs=1,
37
- concat_input=False,
38
- dropout_ratio=0.1,
39
- num_classes=19,
40
- norm_cfg=norm_cfg,
41
- align_corners=False,
42
- loss_decode=dict(
43
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
44
- # model training and testing settings
45
- train_cfg=dict(),
46
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/apcnet/README.md DELETED
@@ -1,39 +0,0 @@
1
- # Adaptive Pyramid Context Network for Semantic Segmentation
2
-
3
- ## Introduction
4
-
5
- <!-- [ALGORITHM] -->
6
-
7
- ```latex
8
- @InProceedings{He_2019_CVPR,
9
- author = {He, Junjun and Deng, Zhongying and Zhou, Lei and Wang, Yali and Qiao, Yu},
10
- title = {Adaptive Pyramid Context Network for Semantic Segmentation},
11
- booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
12
- month = {June},
13
- year = {2019}
14
- }
15
- ```
16
-
17
- ## Results and models
18
-
19
- ### Cityscapes
20
-
21
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
22
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
23
- | APCNet | R-50-D8 | 512x1024 | 40000 | 7.7 | 3.57 | 78.02 | 79.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes/apcnet_r50-d8_512x1024_40k_cityscapes_20201214_115717-5e88fa33.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes/apcnet_r50-d8_512x1024_40k_cityscapes-20201214_115717.log.json) |
24
- | APCNet | R-101-D8 | 512x1024 | 40000 | 11.2 | 2.15 | 79.08 | 80.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes/apcnet_r101-d8_512x1024_40k_cityscapes_20201214_115716-abc9d111.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes/apcnet_r101-d8_512x1024_40k_cityscapes-20201214_115716.log.json) |
25
- | APCNet | R-50-D8 | 769x769 | 40000 | 8.7 | 1.52 | 77.89 | 79.75 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_40k_cityscapes/apcnet_r50-d8_769x769_40k_cityscapes_20201214_115717-2a2628d7.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_40k_cityscapes/apcnet_r50-d8_769x769_40k_cityscapes-20201214_115717.log.json) |
26
- | APCNet | R-101-D8 | 769x769 | 40000 | 12.7 | 1.03 | 77.96 | 79.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_40k_cityscapes/apcnet_r101-d8_769x769_40k_cityscapes_20201214_115718-b650de90.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_40k_cityscapes/apcnet_r101-d8_769x769_40k_cityscapes-20201214_115718.log.json) |
27
- | APCNet | R-50-D8 | 512x1024 | 80000 | - | - | 78.96 | 79.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes/apcnet_r50-d8_512x1024_80k_cityscapes_20201214_115716-987f51e3.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes/apcnet_r50-d8_512x1024_80k_cityscapes-20201214_115716.log.json) |
28
- | APCNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.64 | 80.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes/apcnet_r101-d8_512x1024_80k_cityscapes_20201214_115705-b1ff208a.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes/apcnet_r101-d8_512x1024_80k_cityscapes-20201214_115705.log.json) |
29
- | APCNet | R-50-D8 | 769x769 | 80000 | - | - | 78.79 | 80.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_80k_cityscapes/apcnet_r50-d8_769x769_80k_cityscapes_20201214_115718-7ea9fa12.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_80k_cityscapes/apcnet_r50-d8_769x769_80k_cityscapes-20201214_115718.log.json) |
30
- | APCNet | R-101-D8 | 769x769 | 80000 | - | - | 78.45 | 79.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_80k_cityscapes/apcnet_r101-d8_769x769_80k_cityscapes_20201214_115716-a7fbc2ab.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_80k_cityscapes/apcnet_r101-d8_769x769_80k_cityscapes-20201214_115716.log.json) |
31
-
32
- ### ADE20K
33
-
34
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
35
- | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
36
- | APCNet | R-50-D8 | 512x512 | 80000 | 10.1 | 19.61 | 42.20 | 43.30 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_80k_ade20k/apcnet_r50-d8_512x512_80k_ade20k_20201214_115705-a8626293.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_80k_ade20k/apcnet_r50-d8_512x512_80k_ade20k-20201214_115705.log.json) |
37
- | APCNet | R-101-D8 | 512x512 | 80000 | 13.6 | 13.10 | 45.54 | 46.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_80k_ade20k/apcnet_r101-d8_512x512_80k_ade20k_20201214_115704-c656c3fb.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_80k_ade20k/apcnet_r101-d8_512x512_80k_ade20k-20201214_115704.log.json) |
38
- | APCNet | R-50-D8 | 512x512 | 160000 | - | - | 43.40 | 43.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_160k_ade20k/apcnet_r50-d8_512x512_160k_ade20k_20201214_115706-25fb92c2.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_160k_ade20k/apcnet_r50-d8_512x512_160k_ade20k-20201214_115706.log.json) |
39
- | APCNet | R-101-D8 | 512x512 | 160000 | - | - | 45.41 | 46.63 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_160k_ade20k/apcnet_r101-d8_512x512_160k_ade20k_20201214_115705-73f9a8d7.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_160k_ade20k/apcnet_r101-d8_512x512_160k_ade20k-20201214_115705.log.json) |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/mobilenet_v2/README.md DELETED
@@ -1,35 +0,0 @@
1
- # MobileNetV2: Inverted Residuals and Linear Bottlenecks
2
-
3
- ## Introduction
4
-
5
- <!-- [ALGORITHM] -->
6
-
7
- ```latex
8
- @inproceedings{sandler2018mobilenetv2,
9
- title={Mobilenetv2: Inverted residuals and linear bottlenecks},
10
- author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh},
11
- booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
12
- pages={4510--4520},
13
- year={2018}
14
- }
15
- ```
16
-
17
- ## Results and models
18
-
19
- ### Cityscapes
20
-
21
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
22
- | ---------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
23
- | FCN | M-V2-D8 | 512x1024 | 80000 | 3.4 | 14.2 | 61.54 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes/fcn_m-v2-d8_512x1024_80k_cityscapes_20200825_124817-d24c28c1.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes/fcn_m-v2-d8_512x1024_80k_cityscapes-20200825_124817.log.json) |
24
- | PSPNet | M-V2-D8 | 512x1024 | 80000 | 3.6 | 11.2 | 70.23 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes/pspnet_m-v2-d8_512x1024_80k_cityscapes_20200825_124817-19e81d51.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes/pspnet_m-v2-d8_512x1024_80k_cityscapes-20200825_124817.log.json) |
25
- | DeepLabV3 | M-V2-D8 | 512x1024 | 80000 | 3.9 | 8.4 | 73.84 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes/deeplabv3_m-v2-d8_512x1024_80k_cityscapes_20200825_124836-bef03590.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes/deeplabv3_m-v2-d8_512x1024_80k_cityscapes-20200825_124836.log.json) |
26
- | DeepLabV3+ | M-V2-D8 | 512x1024 | 80000 | 5.1 | 8.4 | 75.20 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes_20200825_124836-d256dd4b.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes-20200825_124836.log.json) |
27
-
28
- ### ADE20k
29
-
30
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
31
- | ---------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
32
- | FCN | M-V2-D8 | 512x512 | 160000 | 6.5 | 64.4 | 19.71 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k/fcn_m-v2-d8_512x512_160k_ade20k_20200825_214953-c40e1095.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k/fcn_m-v2-d8_512x512_160k_ade20k-20200825_214953.log.json) |
33
- | PSPNet | M-V2-D8 | 512x512 | 160000 | 6.5 | 57.7 | 29.68 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k/pspnet_m-v2-d8_512x512_160k_ade20k_20200825_214953-f5942f7a.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k/pspnet_m-v2-d8_512x512_160k_ade20k-20200825_214953.log.json) |
34
- | DeepLabV3 | M-V2-D8 | 512x512 | 160000 | 6.8 | 39.9 | 34.08 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k/deeplabv3_m-v2-d8_512x512_160k_ade20k_20200825_223255-63986343.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k/deeplabv3_m-v2-d8_512x512_160k_ade20k-20200825_223255.log.json) |
35
- | DeepLabV3+ | M-V2-D8 | 512x512 | 160000 | 8.2 | 43.1 | 34.02 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k/deeplabv3plus_m-v2-d8_512x512_160k_ade20k_20200825_223255-465a01d4.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k/deeplabv3plus_m-v2-d8_512x512_160k_ade20k-20200825_223255.log.json) |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/matching.py DELETED
@@ -1,83 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
-
4
- from .geometry import coords_grid, generate_window_grid, normalize_coords
5
-
6
-
7
- def global_correlation_softmax(feature0, feature1,
8
- pred_bidir_flow=False,
9
- ):
10
- # global correlation
11
- b, c, h, w = feature0.shape
12
- feature0 = feature0.view(b, c, -1).permute(0, 2, 1) # [B, H*W, C]
13
- feature1 = feature1.view(b, c, -1) # [B, C, H*W]
14
-
15
- correlation = torch.matmul(feature0, feature1).view(b, h, w, h, w) / (c ** 0.5) # [B, H, W, H, W]
16
-
17
- # flow from softmax
18
- init_grid = coords_grid(b, h, w).to(correlation.device) # [B, 2, H, W]
19
- grid = init_grid.view(b, 2, -1).permute(0, 2, 1) # [B, H*W, 2]
20
-
21
- correlation = correlation.view(b, h * w, h * w) # [B, H*W, H*W]
22
-
23
- if pred_bidir_flow:
24
- correlation = torch.cat((correlation, correlation.permute(0, 2, 1)), dim=0) # [2*B, H*W, H*W]
25
- init_grid = init_grid.repeat(2, 1, 1, 1) # [2*B, 2, H, W]
26
- grid = grid.repeat(2, 1, 1) # [2*B, H*W, 2]
27
- b = b * 2
28
-
29
- prob = F.softmax(correlation, dim=-1) # [B, H*W, H*W]
30
-
31
- correspondence = torch.matmul(prob, grid).view(b, h, w, 2).permute(0, 3, 1, 2) # [B, 2, H, W]
32
-
33
- # when predicting bidirectional flow, flow is the concatenation of forward flow and backward flow
34
- flow = correspondence - init_grid
35
-
36
- return flow, prob
37
-
38
-
39
- def local_correlation_softmax(feature0, feature1, local_radius,
40
- padding_mode='zeros',
41
- ):
42
- b, c, h, w = feature0.size()
43
- coords_init = coords_grid(b, h, w).to(feature0.device) # [B, 2, H, W]
44
- coords = coords_init.view(b, 2, -1).permute(0, 2, 1) # [B, H*W, 2]
45
-
46
- local_h = 2 * local_radius + 1
47
- local_w = 2 * local_radius + 1
48
-
49
- window_grid = generate_window_grid(-local_radius, local_radius,
50
- -local_radius, local_radius,
51
- local_h, local_w, device=feature0.device) # [2R+1, 2R+1, 2]
52
- window_grid = window_grid.reshape(-1, 2).repeat(b, 1, 1, 1) # [B, 1, (2R+1)^2, 2]
53
- sample_coords = coords.unsqueeze(-2) + window_grid # [B, H*W, (2R+1)^2, 2]
54
-
55
- sample_coords_softmax = sample_coords
56
-
57
- # exclude coords that are out of image space
58
- valid_x = (sample_coords[:, :, :, 0] >= 0) & (sample_coords[:, :, :, 0] < w) # [B, H*W, (2R+1)^2]
59
- valid_y = (sample_coords[:, :, :, 1] >= 0) & (sample_coords[:, :, :, 1] < h) # [B, H*W, (2R+1)^2]
60
-
61
- valid = valid_x & valid_y # [B, H*W, (2R+1)^2], used to mask out invalid values when softmax
62
-
63
- # normalize coordinates to [-1, 1]
64
- sample_coords_norm = normalize_coords(sample_coords, h, w) # [-1, 1]
65
- window_feature = F.grid_sample(feature1, sample_coords_norm,
66
- padding_mode=padding_mode, align_corners=True
67
- ).permute(0, 2, 1, 3) # [B, H*W, C, (2R+1)^2]
68
- feature0_view = feature0.permute(0, 2, 3, 1).view(b, h * w, 1, c) # [B, H*W, 1, C]
69
-
70
- corr = torch.matmul(feature0_view, window_feature).view(b, h * w, -1) / (c ** 0.5) # [B, H*W, (2R+1)^2]
71
-
72
- # mask invalid locations
73
- corr[~valid] = -1e9
74
-
75
- prob = F.softmax(corr, -1) # [B, H*W, (2R+1)^2]
76
-
77
- correspondence = torch.matmul(prob.unsqueeze(-2), sample_coords_softmax).squeeze(-2).view(
78
- b, h, w, 2).permute(0, 3, 1, 2) # [B, 2, H, W]
79
-
80
- flow = correspondence - coords_init
81
- match_prob = prob
82
-
83
- return flow, match_prob
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aoron/Test02/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: Test02
3
- emoji: 😻
4
- colorFrom: pink
5
- colorTo: gray
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/Arnx/MusicGenXvAKN/audiocraft/modules/seanet.py DELETED
@@ -1,258 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import typing as tp
8
-
9
- import numpy as np
10
- import torch.nn as nn
11
-
12
- from .conv import StreamableConv1d, StreamableConvTranspose1d
13
- from .lstm import StreamableLSTM
14
-
15
-
16
- class SEANetResnetBlock(nn.Module):
17
- """Residual block from SEANet model.
18
-
19
- Args:
20
- dim (int): Dimension of the input/output.
21
- kernel_sizes (list): List of kernel sizes for the convolutions.
22
- dilations (list): List of dilations for the convolutions.
23
- activation (str): Activation function.
24
- activation_params (dict): Parameters to provide to the activation function.
25
- norm (str): Normalization method.
26
- norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution.
27
- causal (bool): Whether to use fully causal convolution.
28
- pad_mode (str): Padding mode for the convolutions.
29
- compress (int): Reduced dimensionality in residual branches (from Demucs v3).
30
- true_skip (bool): Whether to use true skip connection or a simple
31
- (streamable) convolution as the skip connection.
32
- """
33
- def __init__(self, dim: int, kernel_sizes: tp.List[int] = [3, 1], dilations: tp.List[int] = [1, 1],
34
- activation: str = 'ELU', activation_params: dict = {'alpha': 1.0},
35
- norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, causal: bool = False,
36
- pad_mode: str = 'reflect', compress: int = 2, true_skip: bool = True):
37
- super().__init__()
38
- assert len(kernel_sizes) == len(dilations), 'Number of kernel sizes should match number of dilations'
39
- act = getattr(nn, activation)
40
- hidden = dim // compress
41
- block = []
42
- for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)):
43
- in_chs = dim if i == 0 else hidden
44
- out_chs = dim if i == len(kernel_sizes) - 1 else hidden
45
- block += [
46
- act(**activation_params),
47
- StreamableConv1d(in_chs, out_chs, kernel_size=kernel_size, dilation=dilation,
48
- norm=norm, norm_kwargs=norm_params,
49
- causal=causal, pad_mode=pad_mode),
50
- ]
51
- self.block = nn.Sequential(*block)
52
- self.shortcut: nn.Module
53
- if true_skip:
54
- self.shortcut = nn.Identity()
55
- else:
56
- self.shortcut = StreamableConv1d(dim, dim, kernel_size=1, norm=norm, norm_kwargs=norm_params,
57
- causal=causal, pad_mode=pad_mode)
58
-
59
- def forward(self, x):
60
- return self.shortcut(x) + self.block(x)
61
-
62
-
63
- class SEANetEncoder(nn.Module):
64
- """SEANet encoder.
65
-
66
- Args:
67
- channels (int): Audio channels.
68
- dimension (int): Intermediate representation dimension.
69
- n_filters (int): Base width for the model.
70
- n_residual_layers (int): nb of residual layers.
71
- ratios (Sequence[int]): kernel size and stride ratios. The encoder uses downsampling ratios instead of
72
- upsampling ratios, hence it will use the ratios in the reverse order to the ones specified here
73
- that must match the decoder order. We use the decoder order as some models may only employ the decoder.
74
- activation (str): Activation function.
75
- activation_params (dict): Parameters to provide to the activation function.
76
- norm (str): Normalization method.
77
- norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution.
78
- kernel_size (int): Kernel size for the initial convolution.
79
- last_kernel_size (int): Kernel size for the initial convolution.
80
- residual_kernel_size (int): Kernel size for the residual layers.
81
- dilation_base (int): How much to increase the dilation with each layer.
82
- causal (bool): Whether to use fully causal convolution.
83
- pad_mode (str): Padding mode for the convolutions.
84
- true_skip (bool): Whether to use true skip connection or a simple
85
- (streamable) convolution as the skip connection in the residual network blocks.
86
- compress (int): Reduced dimensionality in residual branches (from Demucs v3).
87
- lstm (int): Number of LSTM layers at the end of the encoder.
88
- disable_norm_outer_blocks (int): Number of blocks for which we don't apply norm.
89
- For the encoder, it corresponds to the N first blocks.
90
- """
91
- def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 3,
92
- ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0},
93
- norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7,
94
- last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False,
95
- pad_mode: str = 'reflect', true_skip: bool = True, compress: int = 2, lstm: int = 0,
96
- disable_norm_outer_blocks: int = 0):
97
- super().__init__()
98
- self.channels = channels
99
- self.dimension = dimension
100
- self.n_filters = n_filters
101
- self.ratios = list(reversed(ratios))
102
- del ratios
103
- self.n_residual_layers = n_residual_layers
104
- self.hop_length = np.prod(self.ratios)
105
- self.n_blocks = len(self.ratios) + 2 # first and last conv + residual blocks
106
- self.disable_norm_outer_blocks = disable_norm_outer_blocks
107
- assert self.disable_norm_outer_blocks >= 0 and self.disable_norm_outer_blocks <= self.n_blocks, \
108
- "Number of blocks for which to disable norm is invalid." \
109
- "It should be lower or equal to the actual number of blocks in the network and greater or equal to 0."
110
-
111
- act = getattr(nn, activation)
112
- mult = 1
113
- model: tp.List[nn.Module] = [
114
- StreamableConv1d(channels, mult * n_filters, kernel_size,
115
- norm='none' if self.disable_norm_outer_blocks >= 1 else norm,
116
- norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode)
117
- ]
118
- # Downsample to raw audio scale
119
- for i, ratio in enumerate(self.ratios):
120
- block_norm = 'none' if self.disable_norm_outer_blocks >= i + 2 else norm
121
- # Add residual layers
122
- for j in range(n_residual_layers):
123
- model += [
124
- SEANetResnetBlock(mult * n_filters, kernel_sizes=[residual_kernel_size, 1],
125
- dilations=[dilation_base ** j, 1],
126
- norm=block_norm, norm_params=norm_params,
127
- activation=activation, activation_params=activation_params,
128
- causal=causal, pad_mode=pad_mode, compress=compress, true_skip=true_skip)]
129
-
130
- # Add downsampling layers
131
- model += [
132
- act(**activation_params),
133
- StreamableConv1d(mult * n_filters, mult * n_filters * 2,
134
- kernel_size=ratio * 2, stride=ratio,
135
- norm=block_norm, norm_kwargs=norm_params,
136
- causal=causal, pad_mode=pad_mode),
137
- ]
138
- mult *= 2
139
-
140
- if lstm:
141
- model += [StreamableLSTM(mult * n_filters, num_layers=lstm)]
142
-
143
- model += [
144
- act(**activation_params),
145
- StreamableConv1d(mult * n_filters, dimension, last_kernel_size,
146
- norm='none' if self.disable_norm_outer_blocks == self.n_blocks else norm,
147
- norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode)
148
- ]
149
-
150
- self.model = nn.Sequential(*model)
151
-
152
- def forward(self, x):
153
- return self.model(x)
154
-
155
-
156
- class SEANetDecoder(nn.Module):
157
- """SEANet decoder.
158
-
159
- Args:
160
- channels (int): Audio channels.
161
- dimension (int): Intermediate representation dimension.
162
- n_filters (int): Base width for the model.
163
- n_residual_layers (int): nb of residual layers.
164
- ratios (Sequence[int]): kernel size and stride ratios.
165
- activation (str): Activation function.
166
- activation_params (dict): Parameters to provide to the activation function.
167
- final_activation (str): Final activation function after all convolutions.
168
- final_activation_params (dict): Parameters to provide to the activation function.
169
- norm (str): Normalization method.
170
- norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution.
171
- kernel_size (int): Kernel size for the initial convolution.
172
- last_kernel_size (int): Kernel size for the initial convolution.
173
- residual_kernel_size (int): Kernel size for the residual layers.
174
- dilation_base (int): How much to increase the dilation with each layer.
175
- causal (bool): Whether to use fully causal convolution.
176
- pad_mode (str): Padding mode for the convolutions.
177
- true_skip (bool): Whether to use true skip connection or a simple.
178
- (streamable) convolution as the skip connection in the residual network blocks.
179
- compress (int): Reduced dimensionality in residual branches (from Demucs v3).
180
- lstm (int): Number of LSTM layers at the end of the encoder.
181
- disable_norm_outer_blocks (int): Number of blocks for which we don't apply norm.
182
- For the decoder, it corresponds to the N last blocks.
183
- trim_right_ratio (float): Ratio for trimming at the right of the transposed convolution under the causal setup.
184
- If equal to 1.0, it means that all the trimming is done at the right.
185
- """
186
- def __init__(self, channels: int = 1, dimension: int = 128, n_filters: int = 32, n_residual_layers: int = 3,
187
- ratios: tp.List[int] = [8, 5, 4, 2], activation: str = 'ELU', activation_params: dict = {'alpha': 1.0},
188
- final_activation: tp.Optional[str] = None, final_activation_params: tp.Optional[dict] = None,
189
- norm: str = 'none', norm_params: tp.Dict[str, tp.Any] = {}, kernel_size: int = 7,
190
- last_kernel_size: int = 7, residual_kernel_size: int = 3, dilation_base: int = 2, causal: bool = False,
191
- pad_mode: str = 'reflect', true_skip: bool = True, compress: int = 2, lstm: int = 0,
192
- disable_norm_outer_blocks: int = 0, trim_right_ratio: float = 1.0):
193
- super().__init__()
194
- self.dimension = dimension
195
- self.channels = channels
196
- self.n_filters = n_filters
197
- self.ratios = ratios
198
- del ratios
199
- self.n_residual_layers = n_residual_layers
200
- self.hop_length = np.prod(self.ratios)
201
- self.n_blocks = len(self.ratios) + 2 # first and last conv + residual blocks
202
- self.disable_norm_outer_blocks = disable_norm_outer_blocks
203
- assert self.disable_norm_outer_blocks >= 0 and self.disable_norm_outer_blocks <= self.n_blocks, \
204
- "Number of blocks for which to disable norm is invalid." \
205
- "It should be lower or equal to the actual number of blocks in the network and greater or equal to 0."
206
-
207
- act = getattr(nn, activation)
208
- mult = int(2 ** len(self.ratios))
209
- model: tp.List[nn.Module] = [
210
- StreamableConv1d(dimension, mult * n_filters, kernel_size,
211
- norm='none' if self.disable_norm_outer_blocks == self.n_blocks else norm,
212
- norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode)
213
- ]
214
-
215
- if lstm:
216
- model += [StreamableLSTM(mult * n_filters, num_layers=lstm)]
217
-
218
- # Upsample to raw audio scale
219
- for i, ratio in enumerate(self.ratios):
220
- block_norm = 'none' if self.disable_norm_outer_blocks >= self.n_blocks - (i + 1) else norm
221
- # Add upsampling layers
222
- model += [
223
- act(**activation_params),
224
- StreamableConvTranspose1d(mult * n_filters, mult * n_filters // 2,
225
- kernel_size=ratio * 2, stride=ratio,
226
- norm=block_norm, norm_kwargs=norm_params,
227
- causal=causal, trim_right_ratio=trim_right_ratio),
228
- ]
229
- # Add residual layers
230
- for j in range(n_residual_layers):
231
- model += [
232
- SEANetResnetBlock(mult * n_filters // 2, kernel_sizes=[residual_kernel_size, 1],
233
- dilations=[dilation_base ** j, 1],
234
- activation=activation, activation_params=activation_params,
235
- norm=block_norm, norm_params=norm_params, causal=causal,
236
- pad_mode=pad_mode, compress=compress, true_skip=true_skip)]
237
-
238
- mult //= 2
239
-
240
- # Add final layers
241
- model += [
242
- act(**activation_params),
243
- StreamableConv1d(n_filters, channels, last_kernel_size,
244
- norm='none' if self.disable_norm_outer_blocks >= 1 else norm,
245
- norm_kwargs=norm_params, causal=causal, pad_mode=pad_mode)
246
- ]
247
- # Add optional final activation to decoder (eg. tanh)
248
- if final_activation is not None:
249
- final_act = getattr(nn, final_activation)
250
- final_activation_params = final_activation_params or {}
251
- model += [
252
- final_act(**final_activation_params)
253
- ]
254
- self.model = nn.Sequential(*model)
255
-
256
- def forward(self, z):
257
- y = self.model(z)
258
- return y
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artples/google-flan-t5-xl/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/google/flan-t5-xl").launch()
 
 
 
 
spaces/Audio-AGI/WavJourney/ui_client.py DELETED
@@ -1,632 +0,0 @@
1
- import shutil
2
- import json5
3
- import traceback
4
-
5
- import gradio as gr
6
- from tabulate import tabulate
7
-
8
- import utils
9
- import pipeline
10
- from pipeline import generate_json_file, generate_audio
11
- from voice_presets import load_voice_presets_metadata, add_session_voice_preset, \
12
- remove_session_voice_preset
13
- from share_btn import community_icon_html, loading_icon_html, share_js
14
-
15
-
16
-
17
- VOICE_PRESETS_HEADERS = ['ID', 'Description']
18
- DELETE_FILE_WHEN_DO_CLEAR = False
19
- DEBUG = False
20
-
21
-
22
- def convert_json_to_md(audio_script_response):
23
- audio_json_data = json5.loads(audio_script_response)
24
- table = [[node.get(field, 'N/A') for field in ["audio_type", "layout", "id", "character", "action", 'vol']] +
25
- [node.get("desc", "N/A") if node.get("audio_type") != "speech" else node.get("text", "N/A")] +
26
- [node.get("len", "Auto") if "len" in node else "Auto"]
27
- for i, node in enumerate(audio_json_data)]
28
-
29
- headers = ["Audio Type", "Layout", "ID", "Character", "Action", 'Volume', "Description", "Length" ]
30
-
31
- # Tabulate
32
- table_txt = tabulate(table, headers, tablefmt="github")
33
- return table_txt
34
-
35
-
36
- def convert_char_voice_map_to_md(char_voice_map):
37
- table =[[character, char_voice_map[character]["id"]] for character in char_voice_map]
38
- headers = ["Character", "Voice"]
39
- # Tabulate
40
- table_txt = tabulate(table, headers, tablefmt="github")
41
- return table_txt
42
-
43
-
44
- def get_or_create_session_from_state(ui_state):
45
- if 'session_id' not in ui_state:
46
- ui_state['session_id'] = pipeline.init_session()
47
- return ui_state['session_id']
48
-
49
-
50
- def generate_script_fn(instruction, _state: gr.State):
51
- try:
52
- session_id = get_or_create_session_from_state(_state)
53
- api_key = utils.get_api_key()
54
- json_script = generate_json_file(session_id, instruction, api_key)
55
- table_text = convert_json_to_md(json_script)
56
- except Exception as e:
57
- gr.Warning(str(e))
58
- print(f"Generating script error: {str(e)}")
59
- traceback.print_exc()
60
- return [
61
- None,
62
- _state,
63
- gr.Button.update(interactive=False),
64
- gr.Button.update(interactive=True),
65
- gr.Button.update(interactive=True),
66
- gr.Button.update(interactive=True),
67
- ]
68
-
69
- _state = {
70
- **_state,
71
- 'session_id': session_id,
72
- 'json_script': json_script
73
- }
74
- return [
75
- table_text,
76
- _state,
77
- gr.Button.update(interactive=True),
78
- gr.Button.update(interactive=True),
79
- gr.Button.update(interactive=True),
80
- gr.Button.update(interactive=True),
81
- ]
82
-
83
-
84
- def generate_audio_fn(state):
85
- btn_state = gr.Button.update(interactive=True)
86
- try:
87
- api_key = utils.get_api_key()
88
- audio_path, char_voice_map = generate_audio(**state, api_key=api_key)
89
- table_text = convert_char_voice_map_to_md(char_voice_map)
90
- # TODO: output char_voice_map to a table
91
- return [
92
- table_text,
93
- gr.make_waveform(str(audio_path)),
94
- btn_state,
95
- btn_state,
96
- btn_state,
97
- btn_state,
98
- ]
99
- except Exception as e:
100
- print(f"Generation audio error: {str(e)}")
101
- traceback.print_exc()
102
- gr.Warning(str(e))
103
-
104
- return [
105
- None,
106
- None,
107
- btn_state,
108
- btn_state,
109
- btn_state,
110
- btn_state,
111
- ]
112
-
113
-
114
- def clear_fn(state):
115
- if DELETE_FILE_WHEN_DO_CLEAR:
116
- shutil.rmtree('output', ignore_errors=True)
117
- state = {'session_id': pipeline.init_session()}
118
- return [gr.Markdown.update(value=''),
119
- gr.Textbox.update(value=''),
120
- gr.Video.update(value=None),
121
- gr.Markdown.update(value=''),
122
- gr.Button.update(interactive=False),
123
- gr.Button.update(interactive=False),
124
- state, gr.Dataframe.update(visible=False),
125
- gr.Button.update(visible=False),
126
- gr.Textbox.update(value=''),
127
- gr.Textbox.update(value=''),
128
- gr.File.update(value=None)]
129
-
130
-
131
- def textbox_listener(textbox_input):
132
- if len(textbox_input) > 0:
133
- return gr.Button.update(interactive=True)
134
- else:
135
- return gr.Button.update(interactive=False)
136
-
137
-
138
- def get_voice_preset_to_list(state: gr.State):
139
- if state.__class__ == gr.State:
140
- state = state.value
141
- if 'session_id' in state:
142
- path = utils.get_session_voice_preset_path(state['session_id'])
143
- else:
144
- path = ''
145
- voice_presets = load_voice_presets_metadata(
146
- path,
147
- safe_if_metadata_not_exist=True
148
- )
149
- dataframe = []
150
- for key in voice_presets.keys():
151
- row = [key, voice_presets[key]['desc']]
152
- dataframe.append(row)
153
- return dataframe
154
-
155
-
156
- def df_on_select(evt: gr.SelectData):
157
- print(f"You selected {evt.value} at {evt.index} from {evt.target}")
158
- return {'selected_voice_preset': evt.index}
159
-
160
-
161
- def del_voice_preset(selected_voice_presets, ui_state, dataframe):
162
- gr_visible = gr.Dataframe.update(visible=True)
163
- btn_visible = gr.Button.update(visible=True)
164
- current_presets = get_voice_preset_to_list(ui_state)
165
- if selected_voice_presets['selected_voice_preset'] is None or \
166
- selected_voice_presets['selected_voice_preset'][0] > len(current_presets) - 1:
167
- gr.Warning('None row is selected')
168
- return [current_presets, gr_visible, btn_visible, selected_voice_presets]
169
- # Do the real file deletion
170
- index = selected_voice_presets['selected_voice_preset'][0]
171
- vp_id = dataframe['ID'][index]
172
- remove_session_voice_preset(vp_id, ui_state['session_id'])
173
- current_presets = get_voice_preset_to_list(ui_state)
174
- gr.Dataframe.update(value=current_presets)
175
- if len(current_presets) == 0:
176
- gr_visible = gr.Dataframe.update(visible=False)
177
- btn_visible = gr.Button.update(visible=False)
178
- selected_voice_presets['selected_voice_preset'] = None
179
- return [current_presets, gr_visible, btn_visible, selected_voice_presets]
180
-
181
-
182
- def get_system_voice_presets():
183
- system_presets = load_voice_presets_metadata(utils.get_system_voice_preset_path())
184
- data = []
185
- for k, v in system_presets.items():
186
- data.append([k, v['desc']])
187
- # headers = ['id', 'description']
188
- # table_txt = tabulate(data, headers, tablefmt="github")
189
- return data
190
-
191
-
192
- def set_openai_key(key, _state):
193
- _state['api_key'] = key
194
- return key
195
-
196
-
197
- def add_voice_preset(vp_id, vp_desc, file, ui_state, added_voice_preset):
198
- if vp_id is None or vp_desc is None or file is None or vp_id.strip() == '' or vp_desc.strip() == '':
199
- gr.Warning('please complete all three fields')
200
- else:
201
- count: int = added_voice_preset['count']
202
- # check if greater than 3
203
- session_id = get_or_create_session_from_state(ui_state)
204
- file_path = file.name
205
- print(f'session {session_id}, id {id}, desc {vp_desc}, file {file_path}')
206
- # Do adding ...
207
- try:
208
- add_session_voice_preset(vp_id, vp_desc, file_path, session_id)
209
- added_voice_preset['count'] = count + 1
210
- except Exception as exception:
211
- print(exception)
212
- traceback.print_exc()
213
- gr.Warning(str(exception))
214
-
215
- # After added
216
- dataframe = get_voice_preset_to_list(ui_state)
217
- df_visible = gr.Dataframe.update(visible=True)
218
- del_visible = gr.Button.update(visible=True)
219
- if len(dataframe) == 0:
220
- df_visible = gr.Dataframe.update(visible=False)
221
- del_visible = gr.Button.update(visible=False)
222
- return [gr.Textbox.update(value=''), gr.Textbox.update(value=''), gr.File.update(value=None),
223
- ui_state, added_voice_preset, dataframe, gr.Button.update(interactive=True),
224
- df_visible, del_visible]
225
-
226
-
227
- css = """
228
- a {
229
- color: inherit;
230
- text-decoration: underline;
231
- }
232
- .gradio-container {
233
- font-family: 'IBM Plex Sans', sans-serif;
234
- }
235
- .gr-button {
236
- color: white;
237
- border-color: #000000;
238
- background: #000000;
239
- }
240
- input[type='range'] {
241
- accent-color: #000000;
242
- }
243
- .dark input[type='range'] {
244
- accent-color: #dfdfdf;
245
- }
246
- .container {
247
- max-width: 730px;
248
- margin: auto;
249
- padding-top: 1.5rem;
250
- }
251
- #gallery {
252
- min-height: 22rem;
253
- margin-bottom: 15px;
254
- margin-left: auto;
255
- margin-right: auto;
256
- border-bottom-right-radius: .5rem !important;
257
- border-bottom-left-radius: .5rem !important;
258
- }
259
- #gallery>div>.h-full {
260
- min-height: 20rem;
261
- }
262
- .details:hover {
263
- text-decoration: underline;
264
- }
265
- .gr-button {
266
- white-space: nowrap;
267
- }
268
- .gr-button:focus {
269
- border-color: rgb(147 197 253 / var(--tw-border-opacity));
270
- outline: none;
271
- box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
272
- --tw-border-opacity: 1;
273
- --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
274
- --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
275
- --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
276
- --tw-ring-opacity: .5;
277
- }
278
- #advanced-btn {
279
- font-size: .7rem !important;
280
- line-height: 19px;
281
- margin-top: 12px;
282
- margin-bottom: 12px;
283
- padding: 2px 8px;
284
- border-radius: 14px !important;
285
- }
286
- #advanced-options {
287
- margin-bottom: 20px;
288
- }
289
- .footer {
290
- margin-bottom: 45px;
291
- margin-top: 35px;
292
- text-align: center;
293
- border-bottom: 1px solid #e5e5e5;
294
- }
295
- .footer>p {
296
- font-size: .8rem;
297
- display: inline-block;
298
- padding: 0 10px;
299
- transform: translateY(10px);
300
- background: white;
301
- }
302
- .dark .footer {
303
- border-color: #303030;
304
- }
305
- .dark .footer>p {
306
- background: #0b0f19;
307
- }
308
- .acknowledgments h4{
309
- margin: 1.25em 0 .25em 0;
310
- font-weight: bold;
311
- font-size: 115%;
312
- }
313
- #container-advanced-btns{
314
- display: flex;
315
- flex-wrap: wrap;
316
- justify-content: space-between;
317
- align-items: center;
318
- }
319
- .animate-spin {
320
- animation: spin 1s linear infinite;
321
- }
322
- @keyframes spin {
323
- from {
324
- transform: rotate(0deg);
325
- }
326
- to {
327
- transform: rotate(360deg);
328
- }
329
- }
330
- #share-btn-container {
331
- display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
332
- margin-top: 10px;
333
- margin-left: auto;
334
- }
335
- #share-btn {
336
- all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0;
337
- }
338
- #share-btn * {
339
- all: unset;
340
- }
341
- #share-btn-container div:nth-child(-n+2){
342
- width: auto !important;
343
- min-height: 0px !important;
344
- }
345
- #share-btn-container .wrap {
346
- display: none !important;
347
- }
348
- .gr-form{
349
- flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
350
- }
351
- #prompt-container{
352
- gap: 0;
353
- }
354
- #generated_id{
355
- min-height: 700px
356
- }
357
- #setting_id{
358
- margin-bottom: 12px;
359
- text-align: center;
360
- font-weight: 900;
361
- }
362
- """
363
-
364
- with gr.Blocks(css=css) as interface:
365
-
366
- gr.HTML(
367
- """
368
- <div style="text-align: center; max-width: 700px; margin: 0 auto;">
369
- <div
370
- style="
371
- display: inline-flex;
372
- align-items: center;
373
- gap: 0.8rem;
374
- font-size: 1.75rem;
375
- "
376
- >
377
- <h1 style="font-weight: 900; margin-bottom: 7px; line-height: normal;">
378
- WavJourney: Compositional Audio Creation with LLMs
379
- </h1>
380
- </div>
381
- <p style="margin-bottom: 10px; margin-top: 10px; font-size: 94%">
382
- <a href="https://arxiv.org/abs/2307.14335">[Paper]</a> <a href="https://audio-agi.github.io/WavJourney_demopage/">[Demo Page]</a> <a href="https://github.com/Audio-AGI/WavJourney">[GitHub]</a> <a href="https://discord.com/invite/5Hqu9NmA8V">[Join Discord]</a>
383
- </p>
384
- </div>
385
- """
386
- )
387
- gr.HTML(
388
- """
389
- <p>Due to the high user demand we are facing from our community, we will be offering free access to WavJourney for a few more days. You can also access WavJourney in this space later by providing your OPENAI_KEY.<p/>
390
- <p>For faster inference without waiting in the queue, you can duplicate the space and upgrade to GPU (VRAM>16G) and provide OPENAI_KEY to access GPT-4 in settings.
391
- <br>
392
- <a href="https://huggingface.co/spaces/Audio-AGI/WavJourney?duplicate=true">
393
- <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
394
- <p/>
395
- """
396
- )
397
-
398
- # gr.HTML(
399
- # """
400
- # <p>Begin with a text prompt, and let WavJourney transform it into captivating audio content. Experience engaging audio storylines, personalized voices, lifelike speech, emotionally resonant musical compositions, and immersive sound effects!
401
- # <p/>
402
- # """
403
- # )
404
-
405
- gr.Markdown(value='## WavJourney Pipeline:')
406
-
407
- gr.Markdown(value='Begin with a text prompt, and let WavJourney transform it into captivating audio content. Experience engaging audio storylines, personalized voices, lifelike speech, emotionally resonant musical compositions, and immersive sound effects!')
408
-
409
- gr.HTML(
410
- """
411
- <ul>
412
- <li>Stage 0: (optional) add your customized voice preset for a more personalized audio creation experience. User also often shares presets in <a href="https://discord.com/invite/5Hqu9NmA8V">Discord</a>.</li>
413
- <li>Stage 1: generate the audio script based on the input text instruction (the default language is English, but you can actually type in your own language).</li>
414
- <li>Stage 2: Select the suitable voice in the multilingual voice preset for each character in the audio script & generate audio.</li>
415
- </ul>
416
-
417
-
418
- """
419
- )
420
-
421
-
422
-
423
- system_voice_presets = get_system_voice_presets()
424
- # State
425
- ui_state = gr.State({})
426
- selected_voice_presets = gr.State(value={'selected_voice_preset': None})
427
- added_voice_preset_state = gr.State(value={'added_file': None, 'count': 0})
428
- # UI Component
429
- # gr.Markdown(
430
- # """
431
- # How can I access GPT-4? <a href="https://platform.openai.com/account/api-keys">[Ref1]</a><a href="https://help.openai.com/en/articles/7102672-how-can-i-access-gpt-4">[Ref2]</a>
432
- # """
433
- # )
434
- # key_text_input = gr.Textbox(label='Please Enter OPENAI Key for accessing GPT-4 API', lines=1, placeholder="OPENAI Key here.",
435
- # value=utils.get_key())
436
- text_input_value = '' if DEBUG is False else "an audio introduction to quantum mechanics"
437
-
438
- text_input = gr.Textbox(
439
- label='Input Text Instruction',
440
- lines=2,
441
- placeholder="Input instruction here (e.g., An introduction to AI-assisted audio content creation).",
442
- value=text_input_value,
443
- elem_id="prompt-in",)
444
-
445
- gr.Markdown(
446
- """
447
- Clicking 'Generate Script' button, the generated audio script will be displayed below.
448
- """
449
- )
450
- audio_script_markdown = gr.Markdown(label='Audio Script')
451
- generate_script_btn = gr.Button(value='Generate Script', interactive=False)
452
-
453
- gr.Markdown(
454
- """
455
- Clicking 'Generate Audio' button, the voice mapping results & generated audio will be displayed below (might take some time).
456
- """
457
- )
458
- char_voice_map_markdown = gr.Markdown(label='Character-to-voice Map')
459
-
460
- audio_output = gr.Video(elem_id="output-video")
461
-
462
- generate_audio_btn = gr.Button(value='Generate Audio', interactive=False)
463
-
464
- # share to community
465
- with gr.Group(elem_id="share-btn-container", visible=False):
466
- community_icon = gr.HTML(community_icon_html)
467
- loading_icon = gr.HTML(loading_icon_html)
468
- share_button = gr.Button(value="Share to community", elem_id="share-btn")
469
-
470
- gr.Markdown(value='### Share your creation with the community!')
471
- gr.HTML(
472
- """
473
- <ul>
474
- <li> You can share with the HuggingFace community by clicking the "Share to community" button.</li>
475
- <li> You can share your generations to our <a href="https://discord.com/invite/5Hqu9NmA8V">Discord</a> channel!</li>
476
- <li> You can also share the voice presets (along with descriptions) you found in <a href="https://discord.com/invite/5Hqu9NmA8V">Discord</a>.</li>
477
- </ul>
478
- """
479
- )
480
-
481
-
482
-
483
-
484
- gr.Markdown(value='### Useful tips for prompting WavJourney:')
485
-
486
- gr.HTML(
487
- """
488
- <ul>
489
- <li>You can use vague or specific descriptions or a combination of them. For example: "male speech about pizza" or "a man is saying: I love pizza!"</li>
490
- <li> You can control the length of the audio script by simply adding the restriction. For example: "generate an audio script around 10-15 lines (max length has been set to 30)"</li>
491
- <li> You can specify the language of the speaker. For example: "a boy is playing with a girl, boy's speech is in Chinese while girl's speech in Japanese"</li>
492
- <li> Explore more prompting techniques by yourself! 🤗</li>
493
- </ul>
494
-
495
- """
496
- )
497
-
498
- # add examples
499
- from examples.examples import examples as WJExamples
500
- def example_fn(idx, _text_input):
501
- print('from example', idx, _text_input)
502
- example = WJExamples[int(idx)-1]
503
- print(example['table_script'], example['table_voice'], gr.make_waveform(example['wav_file']))
504
- return example['table_script'], example['table_voice'], gr.make_waveform(example['wav_file'])
505
-
506
- _idx_input = gr.Textbox(label='Example No.')
507
- _idx_input.visible=False
508
- gr.Examples(
509
- [[idx+1, x['text']] for idx, x in enumerate(WJExamples)],
510
- fn=example_fn,
511
- inputs=[_idx_input, text_input],
512
- outputs=[audio_script_markdown, char_voice_map_markdown, audio_output],
513
- cache_examples=True,
514
- )
515
-
516
- # System Voice Presets
517
- gr.Markdown(label='System Voice Presets', value='### System Voice Presets')
518
- with gr.Accordion("Click to display system speakers", open=False):
519
- gr.Markdown('Supported Language: English, Chinese, French, German, Hindi, Italian, Japanese, Korean, Russian, Spanish, Turkish, Polish, Portuguese')
520
-
521
- system_markdown_voice_presets = gr.Dataframe(label='System Voice Presets', headers=VOICE_PRESETS_HEADERS,
522
- value=system_voice_presets)
523
- # User Voice Preset Related
524
- gr.Markdown('## (Optional) Speaker Customization ')
525
- with gr.Accordion("Click to add speakers", open=False):
526
- gr.Markdown(label='User Voice Presets', value='### User Voice Presets')
527
- get_voice_preset_to_list(ui_state)
528
- voice_presets_df = gr.Dataframe(headers=VOICE_PRESETS_HEADERS, col_count=len(VOICE_PRESETS_HEADERS),
529
- value=get_voice_preset_to_list(ui_state), interactive=False, visible=False)
530
- # voice_presets_ds = gr.Dataset(components=[gr.Dataframe(visible=True)], samples=get_voice_preset_to_list(ui_state))
531
- del_voice_btn = gr.Button(value='Delete Selected Voice Preset', visible=False)
532
- gr.Markdown(label='Add Voice Preset', value='### Add Voice Preset')
533
- gr.Markdown(
534
- """
535
- What makes for good voice prompt? See detailed instructions <a href="https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer">here</a>.
536
- """
537
- )
538
- vp_text_id = gr.Textbox(label='Id', lines=1, placeholder="Input voice preset id here.")
539
- vp_text_desc = gr.Textbox(label='Desc', lines=1, placeholder="Input description here.")
540
- vp_file = gr.File(label='Wav File', type='file', file_types=['.wav'],
541
- interactive=True)
542
- vp_submit = gr.Button(label='Upload Voice Preset', value="Upload Voice Preset")
543
-
544
- # clear btn, will re-new a session
545
- clear_btn = gr.ClearButton(value='Clear All')
546
-
547
- # disclaimer
548
- gr.Markdown(
549
- """
550
- ## Disclaimer
551
- We are not responsible for audio generated using semantics created by WavJourney. Just don't use it for illegal purposes.
552
- """
553
- )
554
-
555
- # events
556
- # key_text_input.change(fn=set_openai_key, inputs=[key_text_input, ui_state], outputs=[key_text_input])
557
- text_input.change(fn=textbox_listener, inputs=[text_input], outputs=[generate_script_btn])
558
- generate_audio_btn.click(
559
- fn=generate_audio_fn,
560
- inputs=[ui_state],
561
- outputs=[
562
- char_voice_map_markdown,
563
- audio_output,
564
- generate_audio_btn,
565
- generate_script_btn,
566
- clear_btn,
567
- vp_submit,
568
- ],
569
- api_name='audio_journey',
570
- )
571
- generate_audio_btn.click(
572
- fn=lambda: [
573
- gr.Button.update(interactive=False),
574
- gr.Button.update(interactive=False),
575
- gr.Button.update(interactive=False),
576
- gr.Button.update(interactive=False),
577
- ],
578
- outputs=[
579
- generate_audio_btn,
580
- generate_script_btn,
581
- clear_btn,
582
- vp_submit,
583
- ]
584
- )
585
- clear_btn.click(fn=clear_fn, inputs=ui_state,
586
- outputs=[char_voice_map_markdown, text_input, audio_output, audio_script_markdown, generate_audio_btn, generate_script_btn,
587
- ui_state, voice_presets_df, del_voice_btn,
588
- vp_text_id, vp_text_desc, vp_file])
589
- generate_script_btn.click(
590
- fn=generate_script_fn, inputs=[text_input, ui_state],
591
- outputs=[
592
- audio_script_markdown,
593
- ui_state,
594
- generate_audio_btn,
595
- generate_script_btn,
596
- clear_btn,
597
- vp_submit,
598
- ]
599
- )
600
- generate_script_btn.click(
601
- fn=lambda: [
602
- gr.Button.update(interactive=False),
603
- gr.Button.update(interactive=False),
604
- gr.Button.update(interactive=False),
605
- gr.Button.update(interactive=False),
606
- ],
607
- outputs=[
608
- generate_audio_btn,
609
- generate_script_btn,
610
- clear_btn,
611
- vp_submit,
612
- ]
613
- )
614
- voice_presets_df.select(df_on_select, outputs=[selected_voice_presets])
615
- voice_presets_df.update(lambda x: print(x))
616
- del_voice_btn.click(del_voice_preset, inputs=[selected_voice_presets, ui_state, voice_presets_df],
617
- outputs=[voice_presets_df, voice_presets_df, del_voice_btn, selected_voice_presets])
618
- # user voice preset upload
619
- vp_submit.click(add_voice_preset, inputs=[vp_text_id, vp_text_desc, vp_file, ui_state, added_voice_preset_state],
620
- outputs=[vp_text_id, vp_text_desc, vp_file, ui_state, added_voice_preset_state, voice_presets_df,
621
- vp_submit,
622
- voice_presets_df, del_voice_btn])
623
- vp_submit.click(lambda _: gr.Button.update(interactive=False), inputs=[vp_submit])
624
-
625
- # share to HF community
626
- share_button.click(None, [], [], _js=share_js)
627
-
628
- # debug only
629
- # print_state_btn = gr.Button(value='Print State')
630
- # print_state_btn.click(fn=lambda state, state2: print(state, state2), inputs=[ui_state, selected_voice_presets])
631
- interface.queue(concurrency_count=2, max_size=20)
632
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/12 Marksheet Descargar Online Mp Board.md DELETED
@@ -1,72 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar 12a hoja de marcado en línea MP Board</h1>
3
- <p>Si usted ha aparecido para el examen de 12a clase realizado por la Junta de Educación Secundaria, Madhya Pradesh (MPBSE), usted debe estar esperando ansiosamente su hoja de marcado. La hoja de marcado es un documento importante que muestra sus marcas y calificaciones en cada tema. También se requiere para la admisión a la educación superior, la solicitud de becas, puestos de trabajo, y otros fines. </p>
4
- <h2>12º marksheet descargar online mp board</h2><br /><p><b><b>Download</b> &bull; <a href="https://bltlly.com/2v6JmX">https://bltlly.com/2v6JmX</a></b></p><br /><br />
5
- <p>Pero ¿qué pasa si ha perdido su hoja de marcado original o necesita una copia duplicada? ¿O qué pasa si desea obtener su hoja de marcado más rápido y más fácil sin visitar la oficina de la junta o su escuela? Bueno, hay una solución sencilla para eso. Puede descargar su hoja de 12 puntos en línea desde los sitios web oficiales de MPBSE o MP Results. En este artículo, le diremos todo lo que necesita saber sobre cómo descargar la 12a hoja de marcado en línea. </p>
6
- <h2>¿Qué es MP Board 12th Marksheet? </h2>
7
- <p>MP Board 12th marksheet es un documento que contiene los detalles de su desempeño en el Examen de Certificado de Escuela Secundaria Superior (HSSC) realizado por MPBSE. Incluye su nombre, número de registro, número de solicitud, nombre de la escuela, materias, calificaciones obtenidas, puntajes totales, porcentaje, división y estado del resultado (aprobado/rechazado). También tiene la firma del secretario de la junta y el sello de la junta. </p>
8
- <p>La hoja de marcado es emitida por MPBSE después de la declaración de los resultados en su sitio web oficial. Puede recoger su hoja de marcado original de su escuela u oficina de la junta dentro de unas semanas del anuncio del resultado. Sin embargo, si desea obtener su hoja de marcado en línea, puede hacerlo siguiendo algunos pasos simples. </p>
9
- <h2>¿Por qué necesita descargar 12th Marksheet en línea? </h2>
10
- <p>Hay muchas razones por las que es posible que tenga que descargar su hoja de 12 marcas en línea. Algunas de ellas son:</p>
11
- <ul>
12
- <li> Ha perdido o dañado su hoja de marcado original y necesita una copia duplicada. </li>
13
- <li>Quieres ahorrar tiempo y dinero evitando visitar la oficina de la junta o tu escuela. </li>
14
-
15
- <li> Desea verificar sus marcas y calificaciones antes de obtener la hoja de marcado original. </li>
16
- <li>Desea solicitar una educación superior, becas, empleos u otras oportunidades que requieran su hoja de calificaciones. </li>
17
- </ul>
18
- <p>Descargar tu hoja de marcado número 12 en línea es una forma cómoda y segura de obtener tu documento. Puede acceder a él en cualquier momento y en cualquier lugar desde su ordenador o teléfono móvil. También puede imprimirlo o guardarlo como un archivo PDF para su uso futuro. </p>
19
- <p></p>
20
- <h2>¿Cómo descargar el tablero de MP en línea de 12th Marksheet? </h2>
21
- <p>Hay dos maneras de descargar su hoja de marcado 12º en línea desde el tablero MP. Puede utilizar el Portal de verificación de hojas de marcado MPBSE o el Portal de resultados MP. Ambos son sitios web oficiales de MPBSE que ofrecen servicios en línea para estudiantes. Estos son los pasos a seguir para cada opción:</p>
22
- <h3>Opción 1: Portal de verificación de hojas de marcado MPBSE</h3>
23
- <p>El MPBSE Marksheet Verification Portal es un sitio web que le permite verificar y descargar su hoja de marcado en línea mediante el pago de una tarifa nominal. Puede utilizar este portal si ha aparecido para el examen en los últimos 10 años o antes. Estos son los pasos a seguir:</p>
24
- <ol>
25
- <li>Visite el sitio web <a href="( 1 )">https://mpbse.mponline.gov.in/Portal/Examinations/MPBSE/marksheet_prov2_input.aspx</a <li>Ingrese su número de lista, número de solicitud y año de examen en los campos dados. </li>
26
- <li>Seleccione el tipo de hoja de marcado que desea descargar (HSSC o HSSC Vocacional). </li>
27
- <li>Haga clic en el botón "Mostrar" para ver los detalles de su hoja de marcado. </li>
28
- <li>Verifica tu nombre, nombre del padre, nombre de la madre, fecha de nacimiento y marcas en cada tema. </li>
29
- <li>Si todo es correcto, haga clic en el botón "Pagar ahora" para proceder a la página de pago. </li>
30
- <li>Pague la tarifa de Rs. 50 por hoja de marcado usando su tarjeta de débito, tarjeta de crédito, banca neta o UPI.</li>
31
- <li>Después de un pago exitoso, recibirá un mensaje de confirmación y un ID de transacción.</li>
32
-
33
- <li>Guarde el archivo en su dispositivo e imprímalo si es necesario. </li>
34
- </ol>
35
- <h3>Opción 2: Portal de resultados de MP</h3>
36
- <p>El Portal de Resultados de MP es un sitio web que le permite comprobar y descargar sus resultados y hojas de marcado en línea de forma gratuita. Puede utilizar este portal si ha aparecido para el examen en el año actual o anterior. Estos son los pasos a seguir:</p>
37
- <ol>
38
- <li>Visite el sitio web <a href="">http://mpresults.nic.in/</a>. </li>
39
- <li>Seleccione el enlace para "Resultados del examen HSSC (Clase 12) - Año 2023" o "Resultados del examen vocacional HSSC (Clase 12) - Año 2023" dependiendo de su flujo. </li>
40
- <li>Ingrese su número de lista y número de aplicación en los campos dados. </li>
41
- <li> Haga clic en el botón "Enviar" para ver sus resultados y detalles de la hoja de marcado. </li>
42
- <li>Verifica tu nombre, nombre del padre, nombre de la madre, fecha de nacimiento y marcas en cada tema. </li>
43
- <li>Si todo es correcto, haga clic en el botón "Imprimir hoja de marcado" para descargar su hoja de marcado como un archivo PDF. </li>
44
- <li>Guarde el archivo en su dispositivo e imprímalo si es necesario. </li>
45
- </ol>
46
- <h2>¿Cómo verificar e imprimir la hoja de marcado número 12 en línea? </h2>
47
- <p>Después de descargar su hoja de marcado 12 en línea desde el tablero MP, debe verificar e imprimir correctamente. Estos son algunos consejos y precauciones a seguir:</p>
48
- <ul>
49
- <li>Asegúrese de que su nombre, nombre del padre, nombre de la madre, fecha de nacimiento, número de registro, número de solicitud, nombre de la escuela, temas, calificaciones, porcentaje, división y estado de los resultados son correctos y coinciden con su hoja de marcado original. </li>
50
- <li>Si encuentra alguna discrepancia o error en su hoja de marcado, póngase en contacto con MPBSE inmediatamente a través de su número de línea de ayuda (0755-2551166) o correo electrónico ([email protected]). </li>
51
- <li>No realice ningún cambio o alteración en su hoja de marcado usted mismo. Puede llevar a acciones legales o cancelación de su hoja de marcado. </li>
52
- <li>Imprima su hoja de marcado en un papel de buena calidad utilizando una impresora de alta resolución. Evite el uso de papel o tinta de baja calidad que puede desvanecerse o mancharse con el tiempo. </li>
53
-
54
- <li>Tenga en cuenta que su hoja de marcado en línea es solo para fines de referencia y verificación. No es un documento válido para la admisión o el empleo. Siempre debe usar su hoja de marcado original emitida por MPBSE para cualquier propósito oficial. </li>
55
- </ul>
56
- <h2>Preguntas frecuentes (preguntas frecuentes)</h2>
57
- <p>Aquí están algunas de las preguntas y respuestas comunes relacionadas con la descarga de la hoja de marcado 12a en línea desde el tablero MP:</p>
58
- <h4>Q1: ¿Cuánto tiempo se tarda en descargar 12a hoja de marcado en línea desde el tablero MP? </h4>
59
- <p>A1: Depende de la opción que elija y la velocidad de su conexión a Internet. Si utiliza el portal de verificación de MPBSE Marksheet, puede tardar unos minutos en completar el proceso de pago y descarga. Si utiliza el Portal de resultados de MP, puede tardar unos segundos en cargar y descargar su hoja de marcado. </p>
60
- <h4>Q2: ¿Cómo puedo obtener mi hoja de 12 marcas original de la placa MP? </h4>
61
- <p>A2: Usted puede obtener su hoja de 12 de MP original visitando su escuela u oficina de la junta dentro de unas semanas de la declaración de resultados. Usted tendrá que mostrar su tarjeta de admisión o prueba de identidad para recoger su hoja de marcado. También puede solicitar una hoja de marcado duplicada de MPBSE pagando una tarifa y enviando un formulario de solicitud junto con algunos documentos. </p>
62
- <h4>Q3: ¿Hay alguna diferencia entre en línea y fuera de línea 12a hoja de marcado de MP? </h4>
63
- <p>A3: No, no hay diferencia entre en línea y fuera de línea 12 hoja de marcado de MP. Ambos son emitidos por MPBSE y contienen la misma información y formato. La única diferencia es que la hoja de marcado en línea es una copia digital que puede descargar e imprimir desde Internet, mientras que la hoja de marcado fuera de línea es una copia física que puede recopilar de su escuela u oficina de la junta. </p>
64
- <h4>Q4: ¿Cómo puedo comprobar la autenticidad de mi hoja de marcado en línea 12 desde el tablero MP? </h4>
65
-
66
- <h4>Q5: ¿Cómo puedo descargar 12 hojas de marcado en línea desde otros tableros? </h4>
67
- <p>A5: Si usted ha aparecido para el examen de la 12a clase de otros tableros, tales como CBSE, ICSE, NIOS, o tableros estatales, usted puede descargar su 12a hoja de marcado en línea de sus sitios web respectivos. Tendrá que seguir sus instrucciones y procedimientos para acceder y descargar su hoja de marcado. También puede visitar sus oficinas o ponerse en contacto con sus autoridades para obtener más información. </p>
68
- <h2>Conclusión</h2>
69
- <p>Descargar su hoja de marcado 12º en línea desde el tablero MP es una forma sencilla y conveniente de obtener su documento sin ningún problema. Puede utilizar el portal de verificación de hojas de marcado MPBSE o el portal de resultados MP para descargar su hoja de marcado como un archivo PDF. También puede verificar e imprimir su hoja de marcado de forma fácil y segura. Sin embargo, siempre debe recordar que su hoja de marcado en línea no es un documento válido para la admisión o el empleo. Siempre debe usar su hoja de marcado original emitida por MPBSE para cualquier propósito oficial. </p>
70
- <p>Esperamos que este artículo le haya ayudado a entender cómo descargar el tablero MP en línea de la hoja de 12. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. Nos encantaría saber de usted. ¡Gracias por leer y feliz descarga! </p> 64aa2da5cf<br />
71
- <br />
72
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Colina Subida Carreras Apk Descargar.md DELETED
@@ -1,56 +0,0 @@
1
- <br />
2
- <h1>Hill Climb Racing APK Descargar: Cómo jugar el clásico original en su dispositivo Android</h1>
3
- <p>Si usted está buscando un divertido y adictivo juego de conducción basado en la física que se puede jugar fuera de línea, entonces usted debe probar Hill Climb Racing. Este juego le permite correr su camino cuesta arriba en varios entornos con diferentes vehículos. Puede actualizar su coche, realizar trucos, recoger monedas, y llegar a nuevas distancias. En este artículo, le mostraremos cómo descargar e instalar Hill Climb Racing APK en su dispositivo Android, cómo jugar el juego, y cómo ganar monedas y bonos. </p>
4
- <h2> Cómo descargar e instalar Hill Climb Racing APK en su dispositivo Android</h2>
5
- <p>Hill Climb Racing está disponible en Google Play Store de forma gratuita, pero si desea obtener la última versión del juego o acceder a él desde una región diferente, se puede descargar desde un sitio de descarga APK. Estos son los pasos a seguir:</p>
6
- <h2>colina subida carreras apk descargar</h2><br /><p><b><b>Download</b> &#10026;&#10026;&#10026; <a href="https://bltlly.com/2v6Kk6">https://bltlly.com/2v6Kk6</a></b></p><br /><br />
7
- <ol>
8
- <li>Ir a un sitio de descarga de confianza APK como <a href=" 1 ">APKCombo</a> o <a href=" 2 ">APKPure</a> y buscar Hill Climb Racing.</li>
9
- <li>Elige la última versión del juego y toca el botón de descarga. </li>
10
- <li> Una vez que la descarga se ha completado, abra el archivo APK y toque en instalar. </li>
11
- <li>Permitir que la aplicación para acceder al almacenamiento del dispositivo y otros permisos. </li>
12
- <li>Iniciar el juego y disfrutar de las carreras cuesta arriba. </li> <h2>Cómo Jugar Hill Climb Racing: Controles básicos y consejos</h2>
13
- <p>Hill Climb Racing es fácil de jugar pero difícil de dominar. Necesitas equilibrar tu velocidad, combustible y gravedad para evitar estrellarte y alcanzar nuevas alturas. Aquí están los controles básicos y consejos para ayudarle a jugar el juego:</p>
14
- <ol>
15
- <li>Elija un vehículo y un escenario en el menú del garaje. Puede comenzar con el jeep predeterminado y la etapa de campo, pero puede desbloquear más opciones a medida que avanza. </li>
16
-
17
- <li>Inclina el dispositivo hacia la izquierda o hacia la derecha para ajustar el equilibrio y evitar voltear. Tenga cuidado de no aterrizar sobre su cabeza o quedarse sin gasolina, ya que esto terminará su carrera. </li>
18
- <li>Recoger monedas y latas de combustible en el camino para actualizar su coche y seguir conduciendo. También puede encontrar otros artículos como imanes, tiempo de aire y gemas que pueden aumentar su puntuación. </li>
19
- <li>Realiza volteretas y saltos para ganar puntos de bonificación y alcanzar nuevas alturas. Cuanto más atrevido seas, más recompensas obtendrás. </li>
20
- </ol>
21
- <h2>Cómo actualizar sus vehículos y desbloquear nuevas etapas</h2>
22
- <p>Hill Climb Racing tiene una gran variedad y opciones de personalización. Puede actualizar sus vehículos y desbloquear nuevas etapas con sus monedas. Estos son los pasos a seguir:</p>
23
- <ol>
24
- <li>Volver al menú del garaje y toque en el botón de actualización junto a su vehículo. Puedes usar tus monedas para mejorar tu motor, suspensión, neumáticos y otras características dependiendo del tipo de vehículo. Por ejemplo, puede actualizar su jeep 4WD, jaula enrollable y reducción de peso. </li>
25
- <li>Toque en el botón de etapa junto a su vehículo para ver las etapas disponibles que puede desbloquear con sus monedas. Puede elegir una etapa que se adapte a las fortalezas y debilidades de su vehículo. Por ejemplo, puedes usar tu jeep en el campo, en la carretera o en etapas del desierto, pero tal vez quieras cambiar a una bicicleta o a un tanque en la luna o en etapas árticas. </li>
26
- <li>Explora diferentes entornos y desafíos como el campo, la luna, la carretera, el ártico, el desierto y más. Cada etapa tiene su propio terreno, obstáculos, gravedad y atmósfera que afectará a su conducción. Por ejemplo, necesitará más combustible y tracción en la luna que en la carretera. </li>
27
- </ol>
28
- <h2>Cómo ganar monedas y bonos realizando trucos y alcanzando nuevas distancias</h2>
29
-
30
- <ul>
31
- <li>Cuanto más tiempo permanezcas en el aire, más monedas obtendrás por cada salto. Trata de encontrar rampas o colinas que puedan lanzarte alto en el cielo. </li>
32
- <li>Cuantos más volteretas hagas en el aire, más puntos de bonificación obtendrás por cada voltereta. Trate de girar su vehículo tanto como sea posible sin estrellarse. </li>
33
- <li>Cuanto más conduzca sin chocar o quedarse sin combustible, más bonificaciones de nivel obtendrá para cada punto de control. Trate de conservar su combustible y evitar los obstáculos que pueden dañar su vehículo. </li>
34
- <li>Cuanto más rápido conduzca, más sonido turbo escuchará cuando actualice su motor. Esto significa que usted está conduciendo a la velocidad máxima y ganar más monedas. </li>
35
- <li>Cuantas más etapas desbloqueas, más vehículos tendrás acceso en el garaje. Cada vehículo tiene sus propias ventajas y desventajas que pueden hacer que su experiencia de conducción sea más divertida y desafiante. </li>
36
- </ul>
37
- <h1>Conclusión</h1>
38
- <p>Hill Climb Racing es uno de los juegos de conducción basados en la física más populares y adictivos en Android. Tiene controles simples, gráficos coloridos, diversas etapas y vehículos personalizables. Se puede descargar desde un sitio de descarga APK o Google Play Store de forma gratuita y disfrutar de las carreras cuesta arriba en varios entornos. También puede actualizar sus vehículos, realizar trucos, recoger monedas, y llegar a nuevas distancias. Hill Climb Racing es un juego que te mantendrá entretenido durante horas. </p>
39
- <h3>Preguntas frecuentes</h3>
40
- <ol>
41
- <li><strong>¿Es seguro descargar Hill Climb Racing? </strong></li>
42
- <p>Sí, Hill Climb Racing es seguro para descargar desde un sitio de descarga de APK de confianza o Google Play Store. No contiene ningún virus o malware que pueda dañar tu dispositivo. </p>
43
- <li><strong>Es Hill Climb Racing fuera de línea o en línea? </strong></li>
44
-
45
- <li><strong>¿Cómo puedo obtener más monedas en Hill Climb Racing? </strong></li>
46
- <p>Hay varias maneras de obtener más monedas en Hill Climb Racing. Puedes recogerlos en la carretera, realizar trucos, llegar a nuevas distancias, ver anuncios o comprarlos con dinero real. También puedes obtener monedas gratis al registrarte diariamente, completar logros o usar el giro de la suerte. </p>
47
- <p></p>
48
- <li><strong>¿Cuáles son los mejores vehículos en Hill Climb Racing? </strong></li>
49
- <p>Los mejores vehículos en Hill Climb Racing dependen de su preferencia personal y la etapa en la que está jugando. Sin embargo, algunos de los vehículos más populares y versátiles son el coche de rally, el súper offroad, el tanque, el coche deportivo y el camión monstruo. Puedes probarlos todos y ver cuál se adapta mejor a tu estilo. </p>
50
- <li><strong>¿Cómo puedo desbloquear nuevas etapas en Hill Climb Racing? </strong></li>
51
- <p>Puedes desbloquear nuevas etapas en Hill Climb Racing gastando tus monedas en el menú del garaje. Cada etapa tiene un precio y nivel de dificultad diferentes. También puedes desbloquear algunas etapas completando ciertos logros o alcanzando ciertas distancias. </p>
52
- <li><strong>¿Cómo respaldo mi progreso en Hill Climb Racing? </strong></li>
53
- <p>Puedes respaldar tu progreso en Hill Climb Racing conectándote a Google Play Games. Esto le permitirá sincronizar sus datos entre dispositivos y restaurarlos si los pierde. También puede utilizar una aplicación de terceros como Helio o Titanium Backup para realizar copias de seguridad de sus datos manualmente. </p>
54
- </ol></p> 64aa2da5cf<br />
55
- <br />
56
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar E Instalar Autocad 2020.md DELETED
@@ -1,65 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar e instalar AutoCAD 2020</h1>
3
- <p>AutoCAD es uno de los software más populares y potentes para el diseño, redacción y modelado en diversos campos como la arquitectura, ingeniería, construcción y fabricación. Si desea utilizar este software para sus proyectos, es necesario descargar e instalar en su ordenador. En este artículo, te mostraremos cómo hacerlo paso a paso. </p>
4
- <h2>¿Qué es AutoCAD 2020? </h2>
5
- <p>AutoCAD 2020 es la última versión de AutoCAD que se lanzó en marzo de 2019. Viene con muchas características nuevas y mejoras que hacen que sea más fácil y rápido crear, editar y ver dibujos CAD. Algunos de los aspectos más destacados de AutoCAD 2020 son:</p>
6
- <h2>descargar e instalar autocad 2020</h2><br /><p><b><b>Download Zip</b> &#10002; <a href="https://bltlly.com/2v6IHF">https://bltlly.com/2v6IHF</a></b></p><br /><br />
7
- <h3>Características de AutoCAD 2020</h3>
8
- <ul>
9
- <li>Acceso a conjuntos de herramientas específicos de la industria, como arquitectura, mecánica, eléctrica, mapa 3D, MEP, planta 3D, diseño de trama y más</li>
10
- <li>Flexibilidad con AutoCAD web y aplicaciones móviles, que le permiten trabajar en sus dibujos desde cualquier dispositivo y cualquier ubicación</li>
11
- <li>Nuevo tema oscuro que reduce la fatiga ocular y mejora el contraste</li>
12
- <li> Herramienta de medición rápida que muestra dimensiones, distancias y ángulos al pasar el ratón sobre los objetos</li>
13
- <li>Paleta de bloques que facilita la inserción de bloques con galerías visuales y filtros</li>
14
- <li>Función de purga rediseñada que le ayuda a limpiar sus dibujos mediante la eliminación de objetos innecesarios</li>
15
- <li> Mejoras de rendimiento que aceleran el ahorro, la instalación, el lanzamiento y la representación gráfica</li>
16
- </ul>
17
- <h3>Requisitos del sistema para AutoCAD 2020</h3>
18
- <p>Antes de descargar e instalar AutoCAD 2020, debe asegurarse de que su computadora cumpla con los requisitos mínimos del sistema. Aquí están las especificaciones básicas que necesita:</p>
19
- <ul>
20
- <li>Sistema operativo: Microsoft Windows 10 (solo 64 bits), 8.1 (solo 64 bits), o 7 SP1 (solo 64 bits)</li>
21
- <li>Procesador: 2.5 GHz (3+ GHz recomendado)</li>
22
- <li>Memoria: 8 GB (16 GB recomendado)</li>
23
- <li>Espacio en disco: 6.0 GB</li>
24
-
25
- <li>Tarjeta de visualización: 1 GB GPU con ancho de banda de 29 GB/s y compatible con DirectX 11 (4 GB GPU con ancho de banda de 106 GB/s y compatible con DirectX 11)</li>
26
- </ul>
27
- <p>Si desea usar AutoCAD para Mac, necesita un conjunto diferente de requisitos del sistema. Puede verificarlos <a href="( 5 )">here</a>. </p>
28
- <h2>Cómo descargar AutoCAD 2020? </h2>
29
- <p>Ahora que sabe lo que es AutoCAD 2020 y lo que requiere, puede proceder a descargarlo desde el sitio web de Autodesk. Estos son los pasos que debes seguir:</p>
30
- <h3>Paso 1: Ir al sitio web de Autodesk</h3>
31
- <p>Lo primero que tienes que hacer es ir a <a href="( 11 )">esta página</a>, donde puedes encontrar todos los productos de Autodesk que puedes descargar e instalar. Desplácese hacia abajo hasta encontrar AutoCAD o utilice la barra de búsqueda para encontrarlo más rápido. Haga clic en el icono de AutoCAD para abrir la página del producto. </p>
32
- <h3>Paso 2: Inicia sesión con tu cuenta</h3>
33
- <p>En la página del producto, verá un botón que dice "Descargar prueba gratuita". Haga clic en él para iniciar el proceso de descarga. Se le pedirá que inicie sesión con su cuenta de Autodesk o cree una si aún no tiene una. También tendrá que proporcionar alguna información sobre usted y su empresa, como su nombre, correo electrónico, país, industria y papel. Esto ayudará a Autodesk a adaptar el software a sus necesidades y preferencias. </p>
34
- <h3>Paso 3: Elija la versión y el idioma deseados</h3>
35
- <p>Después de iniciar sesión, se le llevará a una página donde se puede elegir la versión y el idioma de AutoCAD que desea descargar. Puede seleccionar AutoCAD 2020 o cualquier versión anterior que esté disponible. También puede elegir el idioma que prefiera, como inglés, español, francés, alemán o chino. Haga clic en el botón "Siguiente" para continuar. </p>
36
- <h3>Paso 4: Descargar el archivo de instalación</h3>
37
-
38
- <h2>Cómo instalar AutoCAD 2020? </h2>
39
- <p>Una vez que haya descargado el archivo de instalación, está listo para instalar AutoCAD 2020 en su computadora. Estos son los pasos que debes seguir:</p>
40
- <h3>Paso 1: Ejecutar el archivo de instalación</h3>
41
- <p>Busque el archivo de instalación que guardó en su computadora y haga doble clic en él para ejecutarlo. Puede ver un cuadro de diálogo Control de cuentas de usuario que le pide que permita al programa realizar cambios en su dispositivo. Haga clic en "Sí" para continuar. A continuación, verá una pantalla de bienvenida que muestra el logotipo de Autodesk y el nombre del producto. Haga clic en "Instalar" para iniciar la instalación. </p>
42
- <p></p>
43
- <h3>Paso 2: Lea y acepte el acuerdo de licencia</h3>
44
- <p>La siguiente pantalla le mostrará el acuerdo de licencia para AutoCAD 2020. Lea atentamente y asegúrese de que entiende y está de acuerdo con los términos y condiciones. Si lo hace, marque la casilla que dice "Acepto" y haga clic en "Siguiente" para continuar. </p>
45
- <h3>Paso 3: Elija el tipo de instalación y las opciones</h3>
46
- <p>La siguiente pantalla le mostrará el tipo de instalación y las opciones para AutoCAD 2020. Puede elegir la instalación predeterminada o personalizarla según sus preferencias. La instalación por defecto instalará todos los componentes y características de AutoCAD 2020 con la configuración recomendada. La instalación personalizada le permitirá seleccionar qué componentes y características desea instalar y cambiar algunos ajustes, como la carpeta de instalación, los paquetes de idiomas, el acceso directo del escritorio, etc. Elija la opción que más le convenga y haga clic en "Siguiente" para continuar. </p>
47
- <h3>Paso 4: Espere a que la instalación se complete</h3>
48
-
49
- <h2>Conclusión</h2>
50
- <p>¡Felicidades! Ha descargado e instalado con éxito AutoCAD 2020 en su computadora. Ahora puede comenzar a usar este potente software para sus proyectos de diseño, redacción y modelado. Para iniciar AutoCAD 2020, vaya a su menú Inicio o acceso directo del escritorio y haga clic en el icono de AutoCAD. Verá una pantalla de bienvenida que le muestra algunos consejos y tutoriales sobre cómo usar AutoCAD 2020. También puede acceder a ellos desde el menú de Ayuda o en línea en <a href="">esta página</a>. Esperamos que este artículo sea útil e informativo para usted. Si tiene alguna pregunta o comentario, no dude en dejarlos en la sección de comentarios a continuación. </p>
51
- <h2>Preguntas frecuentes</h2>
52
- <ul>
53
- <li><b>Q: ¿Cómo puedo obtener una prueba gratuita de AutoCAD 2020? </b></li>
54
- <li>A: Puede obtener una prueba gratuita de AutoCAD 2020 siguiendo los pasos que describimos en este artículo. Puede utilizar el software durante 30 días sin limitaciones ni obligaciones. Tendrá que crear una cuenta de Autodesk y proporcionar información personal y profesional para obtener la prueba. También puede utilizar las aplicaciones web y móviles de AutoCAD de forma gratuita con su cuenta. </li>
55
- <li><b>Q: ¿Cómo puedo activar AutoCAD 2020 después de que expire la prueba? </b></li>
56
- <li>A: Si desea continuar usando AutoCAD 2020 después de que expire la prueba, tendrá que comprar una suscripción de Autodesk. Puede elegir entre diferentes planes y opciones dependiendo de sus necesidades y presupuesto. También puede obtener descuentos y beneficios si es estudiante, educador o una organización sin fines de lucro. Para activar AutoCAD 2020, tendrá que introducir su número de serie y clave de producto que recibirá después de comprar la suscripción. Puede encontrar más información sobre cómo activar AutoCAD 2020 <a href="">aquí</a>. </li>
57
- <li><b>Q: ¿Cómo puedo actualizar AutoCAD 2020 a la última versión? </b></li>
58
-
59
- <li><b>Q: ¿Cómo puedo desinstalar AutoCAD 2020 desde mi computadora? </b></li>
60
- <li>A: Si desea desinstalar AutoCAD 2020 desde su computadora, puede hacerlo utilizando el Panel de control de Windows o la herramienta de desinstalación de Autodesk. Para usar el Panel de control de Windows, vaya a Inicio > Configuración > Aplicaciones > Aplicaciones y características y encuentre AutoCAD 2020 en la lista de programas instalados. Haga clic en él y luego haga clic en Desinstalar. Siga las instrucciones en la pantalla para completar la desinstalación. Para usar la herramienta de desinstalación de Autodesk, vaya a Inicio > Todos los programas > Autodesk > AutoCAD 2020 > Herramienta de desinstalación y ejecútela. Seleccione AutoCAD 2020 de la lista de productos y haga clic en desinstalar. Siga las instrucciones en la pantalla para completar la desinstalación. </li>
61
- <li><b>Q: ¿Cómo puedo obtener ayuda y soporte para AutoCAD 2020? </b></li>
62
- <li>A: Si tiene alguna pregunta o problema con AutoCAD 2020, puede obtener ayuda y soporte de varias fuentes. Puede acceder al sistema de ayuda en línea pulsando F1 o haciendo clic en el icono de Ayuda del software. También puede visitar la página de soporte <a href=">AutoCAD</a>, donde puede encontrar tutoriales, videos, foros, blogs, webinars y más. También puede ponerse en contacto con Autodesk directamente por teléfono, chat o correo electrónico si tiene una suscripción o una solicitud de servicio. </li>
63
- </ul></p> 64aa2da5cf<br />
64
- <br />
65
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/__init__.py DELETED
@@ -1,115 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # This library is free software; you can redistribute it and/or
3
- # modify it under the terms of the GNU Lesser General Public
4
- # License as published by the Free Software Foundation; either
5
- # version 2.1 of the License, or (at your option) any later version.
6
- #
7
- # This library is distributed in the hope that it will be useful,
8
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
9
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10
- # Lesser General Public License for more details.
11
- #
12
- # You should have received a copy of the GNU Lesser General Public
13
- # License along with this library; if not, write to the Free Software
14
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
15
- # 02110-1301 USA
16
- ######################### END LICENSE BLOCK #########################
17
-
18
- from typing import List, Union
19
-
20
- from .charsetgroupprober import CharSetGroupProber
21
- from .charsetprober import CharSetProber
22
- from .enums import InputState
23
- from .resultdict import ResultDict
24
- from .universaldetector import UniversalDetector
25
- from .version import VERSION, __version__
26
-
27
- __all__ = ["UniversalDetector", "detect", "detect_all", "__version__", "VERSION"]
28
-
29
-
30
- def detect(
31
- byte_str: Union[bytes, bytearray], should_rename_legacy: bool = False
32
- ) -> ResultDict:
33
- """
34
- Detect the encoding of the given byte string.
35
-
36
- :param byte_str: The byte sequence to examine.
37
- :type byte_str: ``bytes`` or ``bytearray``
38
- :param should_rename_legacy: Should we rename legacy encodings
39
- to their more modern equivalents?
40
- :type should_rename_legacy: ``bool``
41
- """
42
- if not isinstance(byte_str, bytearray):
43
- if not isinstance(byte_str, bytes):
44
- raise TypeError(
45
- f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
46
- )
47
- byte_str = bytearray(byte_str)
48
- detector = UniversalDetector(should_rename_legacy=should_rename_legacy)
49
- detector.feed(byte_str)
50
- return detector.close()
51
-
52
-
53
- def detect_all(
54
- byte_str: Union[bytes, bytearray],
55
- ignore_threshold: bool = False,
56
- should_rename_legacy: bool = False,
57
- ) -> List[ResultDict]:
58
- """
59
- Detect all the possible encodings of the given byte string.
60
-
61
- :param byte_str: The byte sequence to examine.
62
- :type byte_str: ``bytes`` or ``bytearray``
63
- :param ignore_threshold: Include encodings that are below
64
- ``UniversalDetector.MINIMUM_THRESHOLD``
65
- in results.
66
- :type ignore_threshold: ``bool``
67
- :param should_rename_legacy: Should we rename legacy encodings
68
- to their more modern equivalents?
69
- :type should_rename_legacy: ``bool``
70
- """
71
- if not isinstance(byte_str, bytearray):
72
- if not isinstance(byte_str, bytes):
73
- raise TypeError(
74
- f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
75
- )
76
- byte_str = bytearray(byte_str)
77
-
78
- detector = UniversalDetector(should_rename_legacy=should_rename_legacy)
79
- detector.feed(byte_str)
80
- detector.close()
81
-
82
- if detector.input_state == InputState.HIGH_BYTE:
83
- results: List[ResultDict] = []
84
- probers: List[CharSetProber] = []
85
- for prober in detector.charset_probers:
86
- if isinstance(prober, CharSetGroupProber):
87
- probers.extend(p for p in prober.probers)
88
- else:
89
- probers.append(prober)
90
- for prober in probers:
91
- if ignore_threshold or prober.get_confidence() > detector.MINIMUM_THRESHOLD:
92
- charset_name = prober.charset_name or ""
93
- lower_charset_name = charset_name.lower()
94
- # Use Windows encoding name instead of ISO-8859 if we saw any
95
- # extra Windows-specific bytes
96
- if lower_charset_name.startswith("iso-8859") and detector.has_win_bytes:
97
- charset_name = detector.ISO_WIN_MAP.get(
98
- lower_charset_name, charset_name
99
- )
100
- # Rename legacy encodings with superset encodings if asked
101
- if should_rename_legacy:
102
- charset_name = detector.LEGACY_MAP.get(
103
- charset_name.lower(), charset_name
104
- )
105
- results.append(
106
- {
107
- "encoding": charset_name,
108
- "confidence": prober.get_confidence(),
109
- "language": prober.language,
110
- }
111
- )
112
- if len(results) > 0:
113
- return sorted(results, key=lambda result: -result["confidence"])
114
-
115
- return [detector.result]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/connection.py DELETED
@@ -1,149 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- import socket
4
-
5
- from ..contrib import _appengine_environ
6
- from ..exceptions import LocationParseError
7
- from ..packages import six
8
- from .wait import NoWayToWaitForSocketError, wait_for_read
9
-
10
-
11
- def is_connection_dropped(conn): # Platform-specific
12
- """
13
- Returns True if the connection is dropped and should be closed.
14
-
15
- :param conn:
16
- :class:`http.client.HTTPConnection` object.
17
-
18
- Note: For platforms like AppEngine, this will always return ``False`` to
19
- let the platform handle connection recycling transparently for us.
20
- """
21
- sock = getattr(conn, "sock", False)
22
- if sock is False: # Platform-specific: AppEngine
23
- return False
24
- if sock is None: # Connection already closed (such as by httplib).
25
- return True
26
- try:
27
- # Returns True if readable, which here means it's been dropped
28
- return wait_for_read(sock, timeout=0.0)
29
- except NoWayToWaitForSocketError: # Platform-specific: AppEngine
30
- return False
31
-
32
-
33
- # This function is copied from socket.py in the Python 2.7 standard
34
- # library test suite. Added to its signature is only `socket_options`.
35
- # One additional modification is that we avoid binding to IPv6 servers
36
- # discovered in DNS if the system doesn't have IPv6 functionality.
37
- def create_connection(
38
- address,
39
- timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
40
- source_address=None,
41
- socket_options=None,
42
- ):
43
- """Connect to *address* and return the socket object.
44
-
45
- Convenience function. Connect to *address* (a 2-tuple ``(host,
46
- port)``) and return the socket object. Passing the optional
47
- *timeout* parameter will set the timeout on the socket instance
48
- before attempting to connect. If no *timeout* is supplied, the
49
- global default timeout setting returned by :func:`socket.getdefaulttimeout`
50
- is used. If *source_address* is set it must be a tuple of (host, port)
51
- for the socket to bind as a source address before making the connection.
52
- An host of '' or port 0 tells the OS to use the default.
53
- """
54
-
55
- host, port = address
56
- if host.startswith("["):
57
- host = host.strip("[]")
58
- err = None
59
-
60
- # Using the value from allowed_gai_family() in the context of getaddrinfo lets
61
- # us select whether to work with IPv4 DNS records, IPv6 records, or both.
62
- # The original create_connection function always returns all records.
63
- family = allowed_gai_family()
64
-
65
- try:
66
- host.encode("idna")
67
- except UnicodeError:
68
- return six.raise_from(
69
- LocationParseError(u"'%s', label empty or too long" % host), None
70
- )
71
-
72
- for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
73
- af, socktype, proto, canonname, sa = res
74
- sock = None
75
- try:
76
- sock = socket.socket(af, socktype, proto)
77
-
78
- # If provided, set socket level options before connecting.
79
- _set_socket_options(sock, socket_options)
80
-
81
- if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
82
- sock.settimeout(timeout)
83
- if source_address:
84
- sock.bind(source_address)
85
- sock.connect(sa)
86
- return sock
87
-
88
- except socket.error as e:
89
- err = e
90
- if sock is not None:
91
- sock.close()
92
- sock = None
93
-
94
- if err is not None:
95
- raise err
96
-
97
- raise socket.error("getaddrinfo returns an empty list")
98
-
99
-
100
- def _set_socket_options(sock, options):
101
- if options is None:
102
- return
103
-
104
- for opt in options:
105
- sock.setsockopt(*opt)
106
-
107
-
108
- def allowed_gai_family():
109
- """This function is designed to work in the context of
110
- getaddrinfo, where family=socket.AF_UNSPEC is the default and
111
- will perform a DNS search for both IPv6 and IPv4 records."""
112
-
113
- family = socket.AF_INET
114
- if HAS_IPV6:
115
- family = socket.AF_UNSPEC
116
- return family
117
-
118
-
119
- def _has_ipv6(host):
120
- """Returns True if the system can bind an IPv6 address."""
121
- sock = None
122
- has_ipv6 = False
123
-
124
- # App Engine doesn't support IPV6 sockets and actually has a quota on the
125
- # number of sockets that can be used, so just early out here instead of
126
- # creating a socket needlessly.
127
- # See https://github.com/urllib3/urllib3/issues/1446
128
- if _appengine_environ.is_appengine_sandbox():
129
- return False
130
-
131
- if socket.has_ipv6:
132
- # has_ipv6 returns true if cPython was compiled with IPv6 support.
133
- # It does not tell us if the system has IPv6 support enabled. To
134
- # determine that we must bind to an IPv6 address.
135
- # https://github.com/urllib3/urllib3/pull/611
136
- # https://bugs.python.org/issue658327
137
- try:
138
- sock = socket.socket(socket.AF_INET6)
139
- sock.bind((host, 0))
140
- has_ipv6 = True
141
- except Exception:
142
- pass
143
-
144
- if sock:
145
- sock.close()
146
- return has_ipv6
147
-
148
-
149
- HAS_IPV6 = _has_ipv6("::1")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/connection.py DELETED
@@ -1,149 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- import socket
4
-
5
- from ..contrib import _appengine_environ
6
- from ..exceptions import LocationParseError
7
- from ..packages import six
8
- from .wait import NoWayToWaitForSocketError, wait_for_read
9
-
10
-
11
- def is_connection_dropped(conn): # Platform-specific
12
- """
13
- Returns True if the connection is dropped and should be closed.
14
-
15
- :param conn:
16
- :class:`http.client.HTTPConnection` object.
17
-
18
- Note: For platforms like AppEngine, this will always return ``False`` to
19
- let the platform handle connection recycling transparently for us.
20
- """
21
- sock = getattr(conn, "sock", False)
22
- if sock is False: # Platform-specific: AppEngine
23
- return False
24
- if sock is None: # Connection already closed (such as by httplib).
25
- return True
26
- try:
27
- # Returns True if readable, which here means it's been dropped
28
- return wait_for_read(sock, timeout=0.0)
29
- except NoWayToWaitForSocketError: # Platform-specific: AppEngine
30
- return False
31
-
32
-
33
- # This function is copied from socket.py in the Python 2.7 standard
34
- # library test suite. Added to its signature is only `socket_options`.
35
- # One additional modification is that we avoid binding to IPv6 servers
36
- # discovered in DNS if the system doesn't have IPv6 functionality.
37
- def create_connection(
38
- address,
39
- timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
40
- source_address=None,
41
- socket_options=None,
42
- ):
43
- """Connect to *address* and return the socket object.
44
-
45
- Convenience function. Connect to *address* (a 2-tuple ``(host,
46
- port)``) and return the socket object. Passing the optional
47
- *timeout* parameter will set the timeout on the socket instance
48
- before attempting to connect. If no *timeout* is supplied, the
49
- global default timeout setting returned by :func:`socket.getdefaulttimeout`
50
- is used. If *source_address* is set it must be a tuple of (host, port)
51
- for the socket to bind as a source address before making the connection.
52
- An host of '' or port 0 tells the OS to use the default.
53
- """
54
-
55
- host, port = address
56
- if host.startswith("["):
57
- host = host.strip("[]")
58
- err = None
59
-
60
- # Using the value from allowed_gai_family() in the context of getaddrinfo lets
61
- # us select whether to work with IPv4 DNS records, IPv6 records, or both.
62
- # The original create_connection function always returns all records.
63
- family = allowed_gai_family()
64
-
65
- try:
66
- host.encode("idna")
67
- except UnicodeError:
68
- return six.raise_from(
69
- LocationParseError(u"'%s', label empty or too long" % host), None
70
- )
71
-
72
- for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
73
- af, socktype, proto, canonname, sa = res
74
- sock = None
75
- try:
76
- sock = socket.socket(af, socktype, proto)
77
-
78
- # If provided, set socket level options before connecting.
79
- _set_socket_options(sock, socket_options)
80
-
81
- if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
82
- sock.settimeout(timeout)
83
- if source_address:
84
- sock.bind(source_address)
85
- sock.connect(sa)
86
- return sock
87
-
88
- except socket.error as e:
89
- err = e
90
- if sock is not None:
91
- sock.close()
92
- sock = None
93
-
94
- if err is not None:
95
- raise err
96
-
97
- raise socket.error("getaddrinfo returns an empty list")
98
-
99
-
100
- def _set_socket_options(sock, options):
101
- if options is None:
102
- return
103
-
104
- for opt in options:
105
- sock.setsockopt(*opt)
106
-
107
-
108
- def allowed_gai_family():
109
- """This function is designed to work in the context of
110
- getaddrinfo, where family=socket.AF_UNSPEC is the default and
111
- will perform a DNS search for both IPv6 and IPv4 records."""
112
-
113
- family = socket.AF_INET
114
- if HAS_IPV6:
115
- family = socket.AF_UNSPEC
116
- return family
117
-
118
-
119
- def _has_ipv6(host):
120
- """Returns True if the system can bind an IPv6 address."""
121
- sock = None
122
- has_ipv6 = False
123
-
124
- # App Engine doesn't support IPV6 sockets and actually has a quota on the
125
- # number of sockets that can be used, so just early out here instead of
126
- # creating a socket needlessly.
127
- # See https://github.com/urllib3/urllib3/issues/1446
128
- if _appengine_environ.is_appengine_sandbox():
129
- return False
130
-
131
- if socket.has_ipv6:
132
- # has_ipv6 returns true if cPython was compiled with IPv6 support.
133
- # It does not tell us if the system has IPv6 support enabled. To
134
- # determine that we must bind to an IPv6 address.
135
- # https://github.com/urllib3/urllib3/pull/611
136
- # https://bugs.python.org/issue658327
137
- try:
138
- sock = socket.socket(socket.AF_INET6)
139
- sock.bind((host, 0))
140
- has_ipv6 = True
141
- except Exception:
142
- pass
143
-
144
- if sock:
145
- sock.close()
146
- return has_ipv6
147
-
148
-
149
- HAS_IPV6 = _has_ipv6("::1")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CALM/Dashboard/streamlit_observable/__init__.py DELETED
@@ -1,71 +0,0 @@
1
- import os
2
-
3
- import streamlit.components.v1 as components
4
-
5
- _RELEASE = True
6
-
7
- if not _RELEASE:
8
- _component_func = components.declare_component(
9
- "observable",
10
- url="http://localhost:3001",
11
- )
12
- else:
13
- parent_dir = os.path.dirname(os.path.abspath(__file__))
14
- build_dir = os.path.join(parent_dir, "frontend", "build")
15
- _component_func = components.declare_component("observable", path=build_dir)
16
-
17
-
18
- def observable(key, notebook, targets=None, redefine={}, observe=[], hide=[], render_empty=False):
19
- """Create a new instance of "observable".
20
-
21
- Parameters
22
- ----------
23
- key: str
24
- A unique string used to avoid constant re-renders to the iframe.
25
- notebook: str
26
- The observablehq.com notebook id to embed. Ex. "@"d3/bar-chart"
27
- or "d/1f434ef3b0569a00"
28
- targets: list or None
29
- An optional list of strings that are the name of the cells to embed.
30
- By default, the entire notebook, including unnamed cells, will be embeded.
31
- observe: list or None
32
- An optional list of strings that are the name of cells to observe.
33
- Whenever these cells change value or become fulfilled, the value will
34
- be passed back into Streamlit as part of the return value.
35
- redefine: dict or None
36
- An optional dict containing the cells you wish to redefine and the values
37
- you wish to redefine them as. The keys are the cell names you want to
38
- redefine, the values are what they will be redefined as. Keep in mind,
39
- there is a serialization process from Streamlit Python -> frontend JavaScript.
40
- hide: list or None
41
- An option list of strings that are the names of cells that will be embeded,
42
- but won't be rendered to the DOM.
43
- Returns
44
- -------
45
- dict
46
- An object containing the live observed values. If the observe parameter is
47
- empty, then the dict will be empty. The keys are the name of the cell that
48
- is observe, the values are the values of the cells.
49
-
50
- """
51
- component_value = _component_func(
52
- notebook=notebook, targets=targets, observe=observe, redefine=redefine, hide=hide, render_empty=render_empty, key=key, name=key
53
- )
54
-
55
- if component_value is None:
56
- return {}
57
-
58
- return component_value
59
-
60
-
61
- # if not _RELEASE:
62
- # import streamlit as st
63
- # observers = observable("World Tour!",
64
- # notebook="@d3/world-tour",
65
- # targets=["canvas"],
66
- # observe=["name"]
67
- # )
68
-
69
- # name = observers.get("name")
70
-
71
- # st.write(f"Current country: ** *{name}* **")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/contiguous_storage.h DELETED
@@ -1,236 +0,0 @@
1
- /*
2
- * Copyright 2008-2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/iterator/detail/normal_iterator.h>
20
- #include <thrust/detail/execution_policy.h>
21
- #include <thrust/detail/allocator/allocator_traits.h>
22
- #include <thrust/detail/config.h>
23
-
24
- namespace thrust
25
- {
26
-
27
- namespace detail
28
- {
29
-
30
- struct copy_allocator_t {};
31
-
32
- // XXX parameter T is redundant with parameter Alloc
33
- template<typename T, typename Alloc>
34
- class contiguous_storage
35
- {
36
- private:
37
- typedef thrust::detail::allocator_traits<Alloc> alloc_traits;
38
-
39
- public:
40
- typedef Alloc allocator_type;
41
- typedef T value_type;
42
- typedef typename alloc_traits::pointer pointer;
43
- typedef typename alloc_traits::const_pointer const_pointer;
44
- typedef typename alloc_traits::size_type size_type;
45
- typedef typename alloc_traits::difference_type difference_type;
46
- typedef typename alloc_traits::reference reference;
47
- typedef typename alloc_traits::const_reference const_reference;
48
-
49
- typedef thrust::detail::normal_iterator<pointer> iterator;
50
- typedef thrust::detail::normal_iterator<const_pointer> const_iterator;
51
-
52
- __thrust_exec_check_disable__
53
- __host__ __device__
54
- explicit contiguous_storage(const allocator_type &alloc = allocator_type());
55
-
56
- __thrust_exec_check_disable__
57
- __host__ __device__
58
- explicit contiguous_storage(size_type n, const allocator_type &alloc = allocator_type());
59
-
60
- __thrust_exec_check_disable__
61
- __host__ __device__
62
- explicit contiguous_storage(copy_allocator_t, const contiguous_storage &other);
63
-
64
- __thrust_exec_check_disable__
65
- __host__ __device__
66
- explicit contiguous_storage(copy_allocator_t, const contiguous_storage &other, size_type n);
67
-
68
- __thrust_exec_check_disable__
69
- __host__ __device__
70
- ~contiguous_storage();
71
-
72
- __host__ __device__
73
- size_type size() const;
74
-
75
- __host__ __device__
76
- size_type max_size() const;
77
-
78
- __host__ __device__
79
- pointer data();
80
-
81
- __host__ __device__
82
- const_pointer data() const;
83
-
84
- __host__ __device__
85
- iterator begin();
86
-
87
- __host__ __device__
88
- const_iterator begin() const;
89
-
90
- __host__ __device__
91
- iterator end();
92
-
93
- __host__ __device__
94
- const_iterator end() const;
95
-
96
- __host__ __device__
97
- reference operator[](size_type n);
98
-
99
- __host__ __device__
100
- const_reference operator[](size_type n) const;
101
-
102
- __host__ __device__
103
- allocator_type get_allocator() const;
104
-
105
- // note that allocate does *not* automatically call deallocate
106
- __host__ __device__
107
- void allocate(size_type n);
108
-
109
- __host__ __device__
110
- void deallocate();
111
-
112
- __host__ __device__
113
- void swap(contiguous_storage &x);
114
-
115
- __host__ __device__
116
- void default_construct_n(iterator first, size_type n);
117
-
118
- __host__ __device__
119
- void uninitialized_fill_n(iterator first, size_type n, const value_type &value);
120
-
121
- template<typename InputIterator>
122
- __host__ __device__
123
- iterator uninitialized_copy(InputIterator first, InputIterator last, iterator result);
124
-
125
- template<typename System, typename InputIterator>
126
- __host__ __device__
127
- iterator uninitialized_copy(thrust::execution_policy<System> &from_system,
128
- InputIterator first,
129
- InputIterator last,
130
- iterator result);
131
-
132
- template<typename InputIterator, typename Size>
133
- __host__ __device__
134
- iterator uninitialized_copy_n(InputIterator first, Size n, iterator result);
135
-
136
- template<typename System, typename InputIterator, typename Size>
137
- __host__ __device__
138
- iterator uninitialized_copy_n(thrust::execution_policy<System> &from_system,
139
- InputIterator first,
140
- Size n,
141
- iterator result);
142
-
143
- __host__ __device__
144
- void destroy(iterator first, iterator last);
145
-
146
- __host__ __device__
147
- void deallocate_on_allocator_mismatch(const contiguous_storage &other);
148
-
149
- __host__ __device__
150
- void destroy_on_allocator_mismatch(const contiguous_storage &other,
151
- iterator first, iterator last);
152
-
153
- __host__ __device__
154
- void set_allocator(const allocator_type &alloc);
155
-
156
- __host__ __device__
157
- bool is_allocator_not_equal(const allocator_type &alloc) const;
158
-
159
- __host__ __device__
160
- bool is_allocator_not_equal(const contiguous_storage &other) const;
161
-
162
- __host__ __device__
163
- void propagate_allocator(const contiguous_storage &other);
164
-
165
- #if THRUST_CPP_DIALECT >= 2011
166
- __host__ __device__
167
- void propagate_allocator(contiguous_storage &other);
168
-
169
- // allow move assignment for a sane implementation of allocator propagation
170
- // on move assignment
171
- __host__ __device__
172
- contiguous_storage &operator=(contiguous_storage &&other);
173
- #endif
174
-
175
- private:
176
- // XXX we could inherit from this to take advantage of empty base class optimization
177
- allocator_type m_allocator;
178
-
179
- iterator m_begin;
180
-
181
- size_type m_size;
182
-
183
- // disallow assignment
184
- contiguous_storage &operator=(const contiguous_storage &x);
185
-
186
- __host__ __device__
187
- void swap_allocators(true_type, const allocator_type &);
188
-
189
- __host__ __device__
190
- void swap_allocators(false_type, allocator_type &);
191
-
192
- __host__ __device__
193
- bool is_allocator_not_equal_dispatch(true_type, const allocator_type &) const;
194
-
195
- __host__ __device__
196
- bool is_allocator_not_equal_dispatch(false_type, const allocator_type &) const;
197
-
198
- __host__ __device__
199
- void deallocate_on_allocator_mismatch_dispatch(true_type, const contiguous_storage &other);
200
-
201
- __host__ __device__
202
- void deallocate_on_allocator_mismatch_dispatch(false_type, const contiguous_storage &other);
203
-
204
- __host__ __device__
205
- void destroy_on_allocator_mismatch_dispatch(true_type, const contiguous_storage &other,
206
- iterator first, iterator last);
207
-
208
- __host__ __device__
209
- void destroy_on_allocator_mismatch_dispatch(false_type, const contiguous_storage &other,
210
- iterator first, iterator last);
211
-
212
- __host__ __device__
213
- void propagate_allocator_dispatch(true_type, const contiguous_storage &other);
214
-
215
- __host__ __device__
216
- void propagate_allocator_dispatch(false_type, const contiguous_storage &other);
217
-
218
- #if THRUST_CPP_DIALECT >= 2011
219
- __host__ __device__
220
- void propagate_allocator_dispatch(true_type, contiguous_storage &other);
221
-
222
- __host__ __device__
223
- void propagate_allocator_dispatch(false_type, contiguous_storage &other);
224
- #endif
225
- }; // end contiguous_storage
226
-
227
- } // end detail
228
-
229
- template<typename T, typename Alloc>
230
- __host__ __device__
231
- void swap(detail::contiguous_storage<T,Alloc> &lhs, detail::contiguous_storage<T,Alloc> &rhs);
232
-
233
- } // end thrust
234
-
235
- #include <thrust/detail/contiguous_storage.inl>
236
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py DELETED
@@ -1,237 +0,0 @@
1
- import mmcv
2
- import numpy as np
3
- import torch
4
-
5
- from ..builder import BBOX_CODERS
6
- from .base_bbox_coder import BaseBBoxCoder
7
-
8
-
9
- @BBOX_CODERS.register_module()
10
- class DeltaXYWHBBoxCoder(BaseBBoxCoder):
11
- """Delta XYWH BBox coder.
12
-
13
- Following the practice in `R-CNN <https://arxiv.org/abs/1311.2524>`_,
14
- this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and
15
- decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2).
16
-
17
- Args:
18
- target_means (Sequence[float]): Denormalizing means of target for
19
- delta coordinates
20
- target_stds (Sequence[float]): Denormalizing standard deviation of
21
- target for delta coordinates
22
- clip_border (bool, optional): Whether clip the objects outside the
23
- border of the image. Defaults to True.
24
- """
25
-
26
- def __init__(self,
27
- target_means=(0., 0., 0., 0.),
28
- target_stds=(1., 1., 1., 1.),
29
- clip_border=True):
30
- super(BaseBBoxCoder, self).__init__()
31
- self.means = target_means
32
- self.stds = target_stds
33
- self.clip_border = clip_border
34
-
35
- def encode(self, bboxes, gt_bboxes):
36
- """Get box regression transformation deltas that can be used to
37
- transform the ``bboxes`` into the ``gt_bboxes``.
38
-
39
- Args:
40
- bboxes (torch.Tensor): Source boxes, e.g., object proposals.
41
- gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
42
- ground-truth boxes.
43
-
44
- Returns:
45
- torch.Tensor: Box transformation deltas
46
- """
47
-
48
- assert bboxes.size(0) == gt_bboxes.size(0)
49
- assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
50
- encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds)
51
- return encoded_bboxes
52
-
53
- def decode(self,
54
- bboxes,
55
- pred_bboxes,
56
- max_shape=None,
57
- wh_ratio_clip=16 / 1000):
58
- """Apply transformation `pred_bboxes` to `boxes`.
59
-
60
- Args:
61
- bboxes (torch.Tensor): Basic boxes. Shape (B, N, 4) or (N, 4)
62
- pred_bboxes (Tensor): Encoded offsets with respect to each roi.
63
- Has shape (B, N, num_classes * 4) or (B, N, 4) or
64
- (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
65
- when rois is a grid of anchors.Offset encoding follows [1]_.
66
- max_shape (Sequence[int] or torch.Tensor or Sequence[
67
- Sequence[int]],optional): Maximum bounds for boxes, specifies
68
- (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then
69
- the max_shape should be a Sequence[Sequence[int]]
70
- and the length of max_shape should also be B.
71
- wh_ratio_clip (float, optional): The allowed ratio between
72
- width and height.
73
-
74
- Returns:
75
- torch.Tensor: Decoded boxes.
76
- """
77
-
78
- assert pred_bboxes.size(0) == bboxes.size(0)
79
- if pred_bboxes.ndim == 3:
80
- assert pred_bboxes.size(1) == bboxes.size(1)
81
- decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means, self.stds,
82
- max_shape, wh_ratio_clip, self.clip_border)
83
-
84
- return decoded_bboxes
85
-
86
-
87
- @mmcv.jit(coderize=True)
88
- def bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)):
89
- """Compute deltas of proposals w.r.t. gt.
90
-
91
- We usually compute the deltas of x, y, w, h of proposals w.r.t ground
92
- truth bboxes to get regression target.
93
- This is the inverse function of :func:`delta2bbox`.
94
-
95
- Args:
96
- proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)
97
- gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)
98
- means (Sequence[float]): Denormalizing means for delta coordinates
99
- stds (Sequence[float]): Denormalizing standard deviation for delta
100
- coordinates
101
-
102
- Returns:
103
- Tensor: deltas with shape (N, 4), where columns represent dx, dy,
104
- dw, dh.
105
- """
106
- assert proposals.size() == gt.size()
107
-
108
- proposals = proposals.float()
109
- gt = gt.float()
110
- px = (proposals[..., 0] + proposals[..., 2]) * 0.5
111
- py = (proposals[..., 1] + proposals[..., 3]) * 0.5
112
- pw = proposals[..., 2] - proposals[..., 0]
113
- ph = proposals[..., 3] - proposals[..., 1]
114
-
115
- gx = (gt[..., 0] + gt[..., 2]) * 0.5
116
- gy = (gt[..., 1] + gt[..., 3]) * 0.5
117
- gw = gt[..., 2] - gt[..., 0]
118
- gh = gt[..., 3] - gt[..., 1]
119
-
120
- dx = (gx - px) / pw
121
- dy = (gy - py) / ph
122
- dw = torch.log(gw / pw)
123
- dh = torch.log(gh / ph)
124
- deltas = torch.stack([dx, dy, dw, dh], dim=-1)
125
-
126
- means = deltas.new_tensor(means).unsqueeze(0)
127
- stds = deltas.new_tensor(stds).unsqueeze(0)
128
- deltas = deltas.sub_(means).div_(stds)
129
-
130
- return deltas
131
-
132
-
133
- @mmcv.jit(coderize=True)
134
- def delta2bbox(rois,
135
- deltas,
136
- means=(0., 0., 0., 0.),
137
- stds=(1., 1., 1., 1.),
138
- max_shape=None,
139
- wh_ratio_clip=16 / 1000,
140
- clip_border=True):
141
- """Apply deltas to shift/scale base boxes.
142
-
143
- Typically the rois are anchor or proposed bounding boxes and the deltas are
144
- network outputs used to shift/scale those boxes.
145
- This is the inverse function of :func:`bbox2delta`.
146
-
147
- Args:
148
- rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4)
149
- deltas (Tensor): Encoded offsets with respect to each roi.
150
- Has shape (B, N, num_classes * 4) or (B, N, 4) or
151
- (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
152
- when rois is a grid of anchors.Offset encoding follows [1]_.
153
- means (Sequence[float]): Denormalizing means for delta coordinates
154
- stds (Sequence[float]): Denormalizing standard deviation for delta
155
- coordinates
156
- max_shape (Sequence[int] or torch.Tensor or Sequence[
157
- Sequence[int]],optional): Maximum bounds for boxes, specifies
158
- (H, W, C) or (H, W). If rois shape is (B, N, 4), then
159
- the max_shape should be a Sequence[Sequence[int]]
160
- and the length of max_shape should also be B.
161
- wh_ratio_clip (float): Maximum aspect ratio for boxes.
162
- clip_border (bool, optional): Whether clip the objects outside the
163
- border of the image. Defaults to True.
164
-
165
- Returns:
166
- Tensor: Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or
167
- (N, num_classes * 4) or (N, 4), where 4 represent
168
- tl_x, tl_y, br_x, br_y.
169
-
170
- References:
171
- .. [1] https://arxiv.org/abs/1311.2524
172
-
173
- Example:
174
- >>> rois = torch.Tensor([[ 0., 0., 1., 1.],
175
- >>> [ 0., 0., 1., 1.],
176
- >>> [ 0., 0., 1., 1.],
177
- >>> [ 5., 5., 5., 5.]])
178
- >>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
179
- >>> [ 1., 1., 1., 1.],
180
- >>> [ 0., 0., 2., -1.],
181
- >>> [ 0.7, -1.9, -0.5, 0.3]])
182
- >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))
183
- tensor([[0.0000, 0.0000, 1.0000, 1.0000],
184
- [0.1409, 0.1409, 2.8591, 2.8591],
185
- [0.0000, 0.3161, 4.1945, 0.6839],
186
- [5.0000, 5.0000, 5.0000, 5.0000]])
187
- """
188
- means = deltas.new_tensor(means).view(1,
189
- -1).repeat(1,
190
- deltas.size(-1) // 4)
191
- stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4)
192
- denorm_deltas = deltas * stds + means
193
- dx = denorm_deltas[..., 0::4]
194
- dy = denorm_deltas[..., 1::4]
195
- dw = denorm_deltas[..., 2::4]
196
- dh = denorm_deltas[..., 3::4]
197
- max_ratio = np.abs(np.log(wh_ratio_clip))
198
- dw = dw.clamp(min=-max_ratio, max=max_ratio)
199
- dh = dh.clamp(min=-max_ratio, max=max_ratio)
200
- x1, y1 = rois[..., 0], rois[..., 1]
201
- x2, y2 = rois[..., 2], rois[..., 3]
202
- # Compute center of each roi
203
- px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx)
204
- py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy)
205
- # Compute width/height of each roi
206
- pw = (x2 - x1).unsqueeze(-1).expand_as(dw)
207
- ph = (y2 - y1).unsqueeze(-1).expand_as(dh)
208
- # Use exp(network energy) to enlarge/shrink each roi
209
- gw = pw * dw.exp()
210
- gh = ph * dh.exp()
211
- # Use network energy to shift the center of each roi
212
- gx = px + pw * dx
213
- gy = py + ph * dy
214
- # Convert center-xy/width/height to top-left, bottom-right
215
- x1 = gx - gw * 0.5
216
- y1 = gy - gh * 0.5
217
- x2 = gx + gw * 0.5
218
- y2 = gy + gh * 0.5
219
-
220
- bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())
221
-
222
- if clip_border and max_shape is not None:
223
- if not isinstance(max_shape, torch.Tensor):
224
- max_shape = x1.new_tensor(max_shape)
225
- max_shape = max_shape[..., :2].type_as(x1)
226
- if max_shape.ndim == 2:
227
- assert bboxes.ndim == 3
228
- assert max_shape.size(0) == bboxes.size(0)
229
-
230
- min_xy = x1.new_tensor(0)
231
- max_xy = torch.cat(
232
- [max_shape] * (deltas.size(-1) // 2),
233
- dim=-1).flip(-1).unsqueeze(-2)
234
- bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
235
- bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
236
-
237
- return bboxes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/detectors/point_rend.py DELETED
@@ -1,29 +0,0 @@
1
- from ..builder import DETECTORS
2
- from .two_stage import TwoStageDetector
3
-
4
-
5
- @DETECTORS.register_module()
6
- class PointRend(TwoStageDetector):
7
- """PointRend: Image Segmentation as Rendering
8
-
9
- This detector is the implementation of
10
- `PointRend <https://arxiv.org/abs/1912.08193>`_.
11
-
12
- """
13
-
14
- def __init__(self,
15
- backbone,
16
- rpn_head,
17
- roi_head,
18
- train_cfg,
19
- test_cfg,
20
- neck=None,
21
- pretrained=None):
22
- super(PointRend, self).__init__(
23
- backbone=backbone,
24
- neck=neck,
25
- rpn_head=rpn_head,
26
- roi_head=roi_head,
27
- train_cfg=train_cfg,
28
- test_cfg=test_cfg,
29
- pretrained=pretrained)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/utils/serialize.py DELETED
@@ -1,29 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import cloudpickle
3
-
4
-
5
- class PicklableWrapper(object):
6
- """
7
- Wrap an object to make it more picklable, note that it uses
8
- heavy weight serialization libraries that are slower than pickle.
9
- It's best to use it only on closures (which are usually not picklable).
10
-
11
- This is a simplified version of
12
- https://github.com/joblib/joblib/blob/master/joblib/externals/loky/cloudpickle_wrapper.py
13
- """
14
-
15
- def __init__(self, obj):
16
- self._obj = obj
17
-
18
- def __reduce__(self):
19
- s = cloudpickle.dumps(self._obj)
20
- return cloudpickle.loads, (s,)
21
-
22
- def __call__(self, *args, **kwargs):
23
- return self._obj(*args, **kwargs)
24
-
25
- def __getattr__(self, attr):
26
- # Ensure that the wrapped object can be used seamlessly as the previous object.
27
- if attr not in ["_obj"]:
28
- return getattr(self._obj, attr)
29
- return getattr(self, attr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChevyWithAI/rvc-aicover/vc_infer_pipeline.py DELETED
@@ -1,306 +0,0 @@
1
- import numpy as np, parselmouth, torch, pdb
2
- from time import time as ttime
3
- import torch.nn.functional as F
4
- from config import x_pad, x_query, x_center, x_max
5
- import scipy.signal as signal
6
- import pyworld, os, traceback, faiss
7
- from scipy import signal
8
-
9
- bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
10
-
11
-
12
- class VC(object):
13
- def __init__(self, tgt_sr, device, is_half):
14
- self.sr = 16000 # hubert输入采样率
15
- self.window = 160 # 每帧点数
16
- self.t_pad = self.sr * x_pad # 每条前后pad时间
17
- self.t_pad_tgt = tgt_sr * x_pad
18
- self.t_pad2 = self.t_pad * 2
19
- self.t_query = self.sr * x_query # 查询切点前后查询时间
20
- self.t_center = self.sr * x_center # 查询切点位置
21
- self.t_max = self.sr * x_max # 免查询时长阈值
22
- self.device = device
23
- self.is_half = is_half
24
-
25
- def get_f0(self, x, p_len, f0_up_key, f0_method, inp_f0=None):
26
- time_step = self.window / self.sr * 1000
27
- f0_min = 50
28
- f0_max = 1100
29
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
30
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
31
- if f0_method == "pm":
32
- f0 = (
33
- parselmouth.Sound(x, self.sr)
34
- .to_pitch_ac(
35
- time_step=time_step / 1000,
36
- voicing_threshold=0.6,
37
- pitch_floor=f0_min,
38
- pitch_ceiling=f0_max,
39
- )
40
- .selected_array["frequency"]
41
- )
42
- pad_size = (p_len - len(f0) + 1) // 2
43
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
44
- f0 = np.pad(
45
- f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
46
- )
47
- elif f0_method == "harvest":
48
- f0, t = pyworld.harvest(
49
- x.astype(np.double),
50
- fs=self.sr,
51
- f0_ceil=f0_max,
52
- f0_floor=f0_min,
53
- frame_period=10,
54
- )
55
- f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
56
- f0 = signal.medfilt(f0, 3)
57
- f0 *= pow(2, f0_up_key / 12)
58
- # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
59
- tf0 = self.sr // self.window # 每秒f0点数
60
- if inp_f0 is not None:
61
- delta_t = np.round(
62
- (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
63
- ).astype("int16")
64
- replace_f0 = np.interp(
65
- list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
66
- )
67
- shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0]
68
- f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape]
69
- # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
70
- f0bak = f0.copy()
71
- f0_mel = 1127 * np.log(1 + f0 / 700)
72
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
73
- f0_mel_max - f0_mel_min
74
- ) + 1
75
- f0_mel[f0_mel <= 1] = 1
76
- f0_mel[f0_mel > 255] = 255
77
- f0_coarse = np.rint(f0_mel).astype(np.int)
78
- return f0_coarse, f0bak # 1-0
79
-
80
- def vc(
81
- self,
82
- model,
83
- net_g,
84
- sid,
85
- audio0,
86
- pitch,
87
- pitchf,
88
- times,
89
- index,
90
- big_npy,
91
- index_rate,
92
- ): # ,file_index,file_big_npy
93
- feats = torch.from_numpy(audio0)
94
- if self.is_half:
95
- feats = feats.half()
96
- else:
97
- feats = feats.float()
98
- if feats.dim() == 2: # double channels
99
- feats = feats.mean(-1)
100
- assert feats.dim() == 1, feats.dim()
101
- feats = feats.view(1, -1)
102
- padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
103
-
104
- inputs = {
105
- "source": feats.to(self.device),
106
- "padding_mask": padding_mask,
107
- "output_layer": 9, # layer 9
108
- }
109
- t0 = ttime()
110
- with torch.no_grad():
111
- logits = model.extract_features(**inputs)
112
- feats = model.final_proj(logits[0])
113
-
114
- if (
115
- isinstance(index, type(None)) == False
116
- and isinstance(big_npy, type(None)) == False
117
- and index_rate != 0
118
- ):
119
- npy = feats[0].cpu().numpy()
120
- if self.is_half:
121
- npy = npy.astype("float32")
122
- _, I = index.search(npy, 1)
123
- npy = big_npy[I.squeeze()]
124
- if self.is_half:
125
- npy = npy.astype("float16")
126
- feats = (
127
- torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
128
- + (1 - index_rate) * feats
129
- )
130
-
131
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
132
- t1 = ttime()
133
- p_len = audio0.shape[0] // self.window
134
- if feats.shape[1] < p_len:
135
- p_len = feats.shape[1]
136
- if pitch != None and pitchf != None:
137
- pitch = pitch[:, :p_len]
138
- pitchf = pitchf[:, :p_len]
139
- p_len = torch.tensor([p_len], device=self.device).long()
140
- with torch.no_grad():
141
- if pitch != None and pitchf != None:
142
- audio1 = (
143
- (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768)
144
- .data.cpu()
145
- .float()
146
- .numpy()
147
- .astype(np.int16)
148
- )
149
- else:
150
- audio1 = (
151
- (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768)
152
- .data.cpu()
153
- .float()
154
- .numpy()
155
- .astype(np.int16)
156
- )
157
- del feats, p_len, padding_mask
158
- if torch.cuda.is_available():
159
- torch.cuda.empty_cache()
160
- t2 = ttime()
161
- times[0] += t1 - t0
162
- times[2] += t2 - t1
163
- return audio1
164
-
165
- def pipeline(
166
- self,
167
- model,
168
- net_g,
169
- sid,
170
- audio,
171
- times,
172
- f0_up_key,
173
- f0_method,
174
- file_index,
175
- file_big_npy,
176
- index_rate,
177
- if_f0,
178
- f0_file=None,
179
- ):
180
- if (
181
- file_big_npy != ""
182
- and file_index != ""
183
- and os.path.exists(file_big_npy) == True
184
- and os.path.exists(file_index) == True
185
- and index_rate != 0
186
- ):
187
- try:
188
- index = faiss.read_index(file_index)
189
- big_npy = np.load(file_big_npy)
190
- except:
191
- traceback.print_exc()
192
- index = big_npy = None
193
- else:
194
- index = big_npy = None
195
- print("Feature retrieval library doesn't exist or ratio is 0")
196
- audio = signal.filtfilt(bh, ah, audio)
197
- audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
198
- opt_ts = []
199
- if audio_pad.shape[0] > self.t_max:
200
- audio_sum = np.zeros_like(audio)
201
- for i in range(self.window):
202
- audio_sum += audio_pad[i : i - self.window]
203
- for t in range(self.t_center, audio.shape[0], self.t_center):
204
- opt_ts.append(
205
- t
206
- - self.t_query
207
- + np.where(
208
- np.abs(audio_sum[t - self.t_query : t + self.t_query])
209
- == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
210
- )[0][0]
211
- )
212
- s = 0
213
- audio_opt = []
214
- t = None
215
- t1 = ttime()
216
- audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
217
- p_len = audio_pad.shape[0] // self.window
218
- inp_f0 = None
219
- if hasattr(f0_file, "name") == True:
220
- try:
221
- with open(f0_file.name, "r") as f:
222
- lines = f.read().strip("\n").split("\n")
223
- inp_f0 = []
224
- for line in lines:
225
- inp_f0.append([float(i) for i in line.split(",")])
226
- inp_f0 = np.array(inp_f0, dtype="float32")
227
- except:
228
- traceback.print_exc()
229
- sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
230
- pitch, pitchf = None, None
231
- if if_f0 == 1:
232
- pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key, f0_method, inp_f0)
233
- pitch = pitch[:p_len]
234
- pitchf = pitchf[:p_len]
235
- pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
236
- pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
237
- t2 = ttime()
238
- times[1] += t2 - t1
239
- for t in opt_ts:
240
- t = t // self.window * self.window
241
- if if_f0 == 1:
242
- audio_opt.append(
243
- self.vc(
244
- model,
245
- net_g,
246
- sid,
247
- audio_pad[s : t + self.t_pad2 + self.window],
248
- pitch[:, s // self.window : (t + self.t_pad2) // self.window],
249
- pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
250
- times,
251
- index,
252
- big_npy,
253
- index_rate,
254
- )[self.t_pad_tgt : -self.t_pad_tgt]
255
- )
256
- else:
257
- audio_opt.append(
258
- self.vc(
259
- model,
260
- net_g,
261
- sid,
262
- audio_pad[s : t + self.t_pad2 + self.window],
263
- None,
264
- None,
265
- times,
266
- index,
267
- big_npy,
268
- index_rate,
269
- )[self.t_pad_tgt : -self.t_pad_tgt]
270
- )
271
- s = t
272
- if if_f0 == 1:
273
- audio_opt.append(
274
- self.vc(
275
- model,
276
- net_g,
277
- sid,
278
- audio_pad[t:],
279
- pitch[:, t // self.window :] if t is not None else pitch,
280
- pitchf[:, t // self.window :] if t is not None else pitchf,
281
- times,
282
- index,
283
- big_npy,
284
- index_rate,
285
- )[self.t_pad_tgt : -self.t_pad_tgt]
286
- )
287
- else:
288
- audio_opt.append(
289
- self.vc(
290
- model,
291
- net_g,
292
- sid,
293
- audio_pad[t:],
294
- None,
295
- None,
296
- times,
297
- index,
298
- big_npy,
299
- index_rate,
300
- )[self.t_pad_tgt : -self.t_pad_tgt]
301
- )
302
- audio_opt = np.concatenate(audio_opt)
303
- del pitch, pitchf, sid
304
- if torch.cuda.is_available():
305
- torch.cuda.empty_cache()
306
- return audio_opt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chomkwoy/Nilkessye/utils/hanja.py DELETED
@@ -1,65 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- import re
3
- import json
4
-
5
- LHan = [[0x2E80, 0x2E99], # Han # So [26] CJK RADICAL REPEAT, CJK RADICAL RAP
6
- [0x2E9B, 0x2EF3], # Han # So [89] CJK RADICAL CHOKE, CJK RADICAL C-SIMPLIFIED TURTLE
7
- [0x2F00, 0x2FD5], # Han # So [214] KANGXI RADICAL ONE, KANGXI RADICAL FLUTE
8
- 0x3005, # Han # Lm IDEOGRAPHIC ITERATION MARK
9
- 0x3007, # Han # Nl IDEOGRAPHIC NUMBER ZERO
10
- [0x3021, 0x3029], # Han # Nl [9] HANGZHOU NUMERAL ONE, HANGZHOU NUMERAL NINE
11
- [0x3038, 0x303A], # Han # Nl [3] HANGZHOU NUMERAL TEN, HANGZHOU NUMERAL THIRTY
12
- 0x303B, # Han # Lm VERTICAL IDEOGRAPHIC ITERATION MARK
13
- [0x3400, 0x4DB5], # Han # Lo [6582] CJK UNIFIED IDEOGRAPH-3400, CJK UNIFIED IDEOGRAPH-4DB5
14
- [0x4E00, 0x9FC3], # Han # Lo [20932] CJK UNIFIED IDEOGRAPH-4E00, CJK UNIFIED IDEOGRAPH-9FC3
15
- [0xF900, 0xFA2D], # Han # Lo [302] CJK COMPATIBILITY IDEOGRAPH-F900, CJK COMPATIBILITY IDEOGRAPH-FA2D
16
- [0xFA30, 0xFA6A], # Han # Lo [59] CJK COMPATIBILITY IDEOGRAPH-FA30, CJK COMPATIBILITY IDEOGRAPH-FA6A
17
- [0xFA70, 0xFAD9], # Han # Lo [106] CJK COMPATIBILITY IDEOGRAPH-FA70, CJK COMPATIBILITY IDEOGRAPH-FAD9
18
- [0x20000, 0x2A6D6], # Han # Lo [42711] CJK UNIFIED IDEOGRAPH-20000, CJK UNIFIED IDEOGRAPH-2A6D6
19
- [0x2F800, 0x2FA1D]] # Han # Lo [542] CJK COMPATIBILITY IDEOGRAPH-2F800, CJK COMPATIBILITY IDEOGRAPH-2FA1D
20
-
21
-
22
- def build_re():
23
- L = []
24
- for i in LHan:
25
- if isinstance(i, list):
26
- f, t = i
27
- f = chr(f)
28
- t = chr(t)
29
- L.append('%s-%s' % (f, t))
30
-
31
- else:
32
- L.append(chr(i))
33
-
34
- RE = '[%s]' % ''.join(L)
35
- return re.compile(RE, re.UNICODE)
36
-
37
-
38
- def load_dgju():
39
- dgju_list = []
40
- with open("동국정운.json", "r") as fp:
41
- dgju = json.load(fp)
42
- head = None
43
- for line in dgju:
44
- if line['orig_tag'] == 'head':
45
- head = line['text']
46
- elif line['orig_tag'] == 'chr':
47
- dgju_list.append((head, line['html']))
48
-
49
- dgju_dict = {}
50
- for pron, char in dgju_list:
51
- if pron.startswith('·'):
52
- pron = pron[1:]+'H'
53
- elif pron.startswith(':'):
54
- pron = pron[1:]+'R'
55
- else:
56
- pron = pron+'L'
57
- pron = pron.replace('ᆑ', 'ywe').replace('ᆒ', 'ywey').replace('.', '')
58
- for x in re.finditer(r'[^a-zA-Z]', pron):
59
- print(pron)
60
- for x in re.finditer(r'<glyph>([^<]*)</glyph>', char):
61
- hanja = str(x.group(1))
62
- if hanja not in dgju_dict:
63
- dgju_dict[hanja] = []
64
- dgju_dict[hanja].append(pron)
65
- return dgju_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CjangCjengh/Shanghainese-TTS/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Shanghainese TTS
3
- emoji: 📈
4
- colorFrom: purple
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.4.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/dataframe.py DELETED
@@ -1,304 +0,0 @@
1
- """gr.Dataframe() component"""
2
-
3
- from __future__ import annotations
4
-
5
- from typing import TYPE_CHECKING, Any, Callable, Literal
6
-
7
- import numpy as np
8
- import pandas as pd
9
- from gradio_client.documentation import document, set_documentation_group
10
- from gradio_client.serializing import JSONSerializable
11
-
12
- from gradio import utils
13
- from gradio.components.base import IOComponent, _Keywords
14
- from gradio.events import (
15
- Changeable,
16
- EventListenerMethod,
17
- Inputable,
18
- Selectable,
19
- )
20
-
21
- if TYPE_CHECKING:
22
- from typing import TypedDict
23
-
24
- class DataframeData(TypedDict):
25
- headers: list[str]
26
- data: list[list[str | int | bool]]
27
-
28
-
29
- set_documentation_group("component")
30
-
31
-
32
- @document()
33
- class Dataframe(Changeable, Inputable, Selectable, IOComponent, JSONSerializable):
34
- """
35
- Accepts or displays 2D input through a spreadsheet-like component for dataframes.
36
- Preprocessing: passes the uploaded spreadsheet data as a {pandas.DataFrame}, {numpy.array}, {List[List]}, or {List} depending on `type`
37
- Postprocessing: expects a {pandas.DataFrame}, {numpy.array}, {List[List]}, {List}, a {Dict} with keys `data` (and optionally `headers`), or {str} path to a csv, which is rendered in the spreadsheet.
38
- Examples-format: a {str} filepath to a csv with data, a pandas dataframe, or a list of lists (excluding headers) where each sublist is a row of data.
39
- Demos: filter_records, matrix_transpose, tax_calculator
40
- """
41
-
42
- markdown_parser = None
43
-
44
- def __init__(
45
- self,
46
- value: list[list[Any]] | Callable | None = None,
47
- *,
48
- headers: list[str] | None = None,
49
- row_count: int | tuple[int, str] = (1, "dynamic"),
50
- col_count: int | tuple[int, str] | None = None,
51
- datatype: str | list[str] = "str",
52
- type: Literal["pandas", "numpy", "array"] = "pandas",
53
- max_rows: int | None = 20,
54
- max_cols: int | None = None,
55
- overflow_row_behaviour: Literal["paginate", "show_ends"] = "paginate",
56
- label: str | None = None,
57
- every: float | None = None,
58
- show_label: bool | None = None,
59
- scale: int | None = None,
60
- min_width: int = 160,
61
- interactive: bool | None = None,
62
- visible: bool = True,
63
- elem_id: str | None = None,
64
- elem_classes: list[str] | str | None = None,
65
- wrap: bool = False,
66
- **kwargs,
67
- ):
68
- """
69
- Parameters:
70
- value: Default value as a 2-dimensional list of values. If callable, the function will be called whenever the app loads to set the initial value of the component.
71
- headers: List of str header names. If None, no headers are shown.
72
- row_count: Limit number of rows for input and decide whether user can create new rows. The first element of the tuple is an `int`, the row count; the second should be 'fixed' or 'dynamic', the new row behaviour. If an `int` is passed the rows default to 'dynamic'
73
- col_count: Limit number of columns for input and decide whether user can create new columns. The first element of the tuple is an `int`, the number of columns; the second should be 'fixed' or 'dynamic', the new column behaviour. If an `int` is passed the columns default to 'dynamic'
74
- datatype: Datatype of values in sheet. Can be provided per column as a list of strings, or for the entire sheet as a single string. Valid datatypes are "str", "number", "bool", "date", and "markdown".
75
- type: Type of value to be returned by component. "pandas" for pandas dataframe, "numpy" for numpy array, or "array" for a Python array.
76
- label: component name in interface.
77
- max_rows: Maximum number of rows to display at once. Set to None for infinite.
78
- max_cols: Maximum number of columns to display at once. Set to None for infinite.
79
- overflow_row_behaviour: If set to "paginate", will create pages for overflow rows. If set to "show_ends", will show initial and final rows and truncate middle rows.
80
- label: component name in interface.
81
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
82
- show_label: if True, will display label.
83
- scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
84
- min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
85
- interactive: if True, will allow users to edit the dataframe; if False, can only be used to display data. If not provided, this is inferred based on whether the component is used as an input or output.
86
- visible: If False, component will be hidden.
87
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
88
- elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
89
- wrap: if True text in table cells will wrap when appropriate, if False the table will scroll horizontally. Defaults to False.
90
- """
91
-
92
- self.wrap = wrap
93
- self.row_count = self.__process_counts(row_count)
94
- self.col_count = self.__process_counts(
95
- col_count, len(headers) if headers else 3
96
- )
97
-
98
- self.__validate_headers(headers, self.col_count[0])
99
-
100
- self.headers = (
101
- headers if headers is not None else list(range(1, self.col_count[0] + 1))
102
- )
103
- self.datatype = (
104
- datatype if isinstance(datatype, list) else [datatype] * self.col_count[0]
105
- )
106
- valid_types = ["pandas", "numpy", "array"]
107
- if type not in valid_types:
108
- raise ValueError(
109
- f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}"
110
- )
111
- self.type = type
112
- values = {
113
- "str": "",
114
- "number": 0,
115
- "bool": False,
116
- "date": "01/01/1970",
117
- "markdown": "",
118
- "html": "",
119
- }
120
- column_dtypes = (
121
- [datatype] * self.col_count[0] if isinstance(datatype, str) else datatype
122
- )
123
- self.empty_input = [
124
- [values[c] for c in column_dtypes] for _ in range(self.row_count[0])
125
- ]
126
-
127
- self.max_rows = max_rows
128
- self.max_cols = max_cols
129
- self.overflow_row_behaviour = overflow_row_behaviour
130
- self.select: EventListenerMethod
131
- """
132
- Event listener for when the user selects cell within Dataframe.
133
- Uses event data gradio.SelectData to carry `value` referring to value of selected cell, and `index` tuple to refer to index row and column.
134
- See EventData documentation on how to use this event data.
135
- """
136
- IOComponent.__init__(
137
- self,
138
- label=label,
139
- every=every,
140
- show_label=show_label,
141
- scale=scale,
142
- min_width=min_width,
143
- interactive=interactive,
144
- visible=visible,
145
- elem_id=elem_id,
146
- elem_classes=elem_classes,
147
- value=value,
148
- **kwargs,
149
- )
150
-
151
- def get_config(self):
152
- return {
153
- "headers": self.headers,
154
- "datatype": self.datatype,
155
- "row_count": self.row_count,
156
- "col_count": self.col_count,
157
- "value": self.value,
158
- "max_rows": self.max_rows,
159
- "max_cols": self.max_cols,
160
- "overflow_row_behaviour": self.overflow_row_behaviour,
161
- "wrap": self.wrap,
162
- **IOComponent.get_config(self),
163
- }
164
-
165
- @staticmethod
166
- def update(
167
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
168
- max_rows: int | None = None,
169
- max_cols: str | None = None,
170
- label: str | None = None,
171
- show_label: bool | None = None,
172
- scale: int | None = None,
173
- min_width: int | None = None,
174
- interactive: bool | None = None,
175
- visible: bool | None = None,
176
- ):
177
- return {
178
- "max_rows": max_rows,
179
- "max_cols": max_cols,
180
- "label": label,
181
- "show_label": show_label,
182
- "scale": scale,
183
- "min_width": min_width,
184
- "interactive": interactive,
185
- "visible": visible,
186
- "value": value,
187
- "__type__": "update",
188
- }
189
-
190
- def preprocess(self, x: DataframeData):
191
- """
192
- Parameters:
193
- x: 2D array of str, numeric, or bool data
194
- Returns:
195
- Dataframe in requested format
196
- """
197
- if self.type == "pandas":
198
- if x.get("headers") is not None:
199
- return pd.DataFrame(x["data"], columns=x.get("headers"))
200
- else:
201
- return pd.DataFrame(x["data"])
202
- if self.type == "numpy":
203
- return np.array(x["data"])
204
- elif self.type == "array":
205
- return x["data"]
206
- else:
207
- raise ValueError(
208
- "Unknown type: "
209
- + str(self.type)
210
- + ". Please choose from: 'pandas', 'numpy', 'array'."
211
- )
212
-
213
- def postprocess(
214
- self, y: str | pd.DataFrame | np.ndarray | list[list[str | float]] | dict
215
- ) -> dict:
216
- """
217
- Parameters:
218
- y: dataframe in given format
219
- Returns:
220
- JSON object with key 'headers' for list of header names, 'data' for 2D array of string or numeric data
221
- """
222
- if y is None:
223
- return self.postprocess(self.empty_input)
224
- if isinstance(y, dict):
225
- return y
226
- if isinstance(y, str):
227
- dataframe = pd.read_csv(y)
228
- return {
229
- "headers": list(dataframe.columns),
230
- "data": Dataframe.__process_markdown(
231
- dataframe.to_dict(orient="split")["data"], self.datatype
232
- ),
233
- }
234
- if isinstance(y, pd.DataFrame):
235
- return {
236
- "headers": list(y.columns), # type: ignore
237
- "data": Dataframe.__process_markdown(
238
- y.to_dict(orient="split")["data"], self.datatype # type: ignore
239
- ),
240
- }
241
- if isinstance(y, (np.ndarray, list)):
242
- if len(y) == 0:
243
- return self.postprocess([[]])
244
- if isinstance(y, np.ndarray):
245
- y = y.tolist()
246
- assert isinstance(y, list), "output cannot be converted to list"
247
-
248
- _headers = self.headers
249
-
250
- if len(self.headers) < len(y[0]):
251
- _headers = [
252
- *self.headers,
253
- *list(range(len(self.headers) + 1, len(y[0]) + 1)),
254
- ]
255
- elif len(self.headers) > len(y[0]):
256
- _headers = self.headers[: len(y[0])]
257
-
258
- return {
259
- "headers": _headers,
260
- "data": Dataframe.__process_markdown(y, self.datatype),
261
- }
262
- raise ValueError("Cannot process value as a Dataframe")
263
-
264
- @staticmethod
265
- def __process_counts(count, default=3) -> tuple[int, str]:
266
- if count is None:
267
- return (default, "dynamic")
268
- if type(count) == int or type(count) == float:
269
- return (int(count), "dynamic")
270
- else:
271
- return count
272
-
273
- @staticmethod
274
- def __validate_headers(headers: list[str] | None, col_count: int):
275
- if headers is not None and len(headers) != col_count:
276
- raise ValueError(
277
- f"The length of the headers list must be equal to the col_count int.\n"
278
- f"The column count is set to {col_count} but `headers` has {len(headers)} items. "
279
- f"Check the values passed to `col_count` and `headers`."
280
- )
281
-
282
- @classmethod
283
- def __process_markdown(cls, data: list[list[Any]], datatype: list[str]):
284
- if "markdown" not in datatype:
285
- return data
286
-
287
- if cls.markdown_parser is None:
288
- cls.markdown_parser = utils.get_markdown_parser()
289
-
290
- for i in range(len(data)):
291
- for j in range(len(data[i])):
292
- if datatype[j] == "markdown":
293
- data[i][j] = cls.markdown_parser.render(data[i][j])
294
-
295
- return data
296
-
297
- def as_example(self, input_data: pd.DataFrame | np.ndarray | str | None):
298
- if input_data is None:
299
- return ""
300
- elif isinstance(input_data, pd.DataFrame):
301
- return input_data.head(n=5).to_dict(orient="split")["data"] # type: ignore
302
- elif isinstance(input_data, np.ndarray):
303
- return input_data.tolist()
304
- return input_data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/_async/http11.py DELETED
@@ -1,331 +0,0 @@
1
- import enum
2
- import logging
3
- import time
4
- from types import TracebackType
5
- from typing import (
6
- AsyncIterable,
7
- AsyncIterator,
8
- List,
9
- Optional,
10
- Tuple,
11
- Type,
12
- Union,
13
- cast,
14
- )
15
-
16
- import h11
17
-
18
- from .._backends.base import AsyncNetworkStream
19
- from .._exceptions import (
20
- ConnectionNotAvailable,
21
- LocalProtocolError,
22
- RemoteProtocolError,
23
- map_exceptions,
24
- )
25
- from .._models import Origin, Request, Response
26
- from .._synchronization import AsyncLock, AsyncShieldCancellation
27
- from .._trace import Trace
28
- from .interfaces import AsyncConnectionInterface
29
-
30
- logger = logging.getLogger("httpcore.http11")
31
-
32
-
33
- # A subset of `h11.Event` types supported by `_send_event`
34
- H11SendEvent = Union[
35
- h11.Request,
36
- h11.Data,
37
- h11.EndOfMessage,
38
- ]
39
-
40
-
41
- class HTTPConnectionState(enum.IntEnum):
42
- NEW = 0
43
- ACTIVE = 1
44
- IDLE = 2
45
- CLOSED = 3
46
-
47
-
48
- class AsyncHTTP11Connection(AsyncConnectionInterface):
49
- READ_NUM_BYTES = 64 * 1024
50
- MAX_INCOMPLETE_EVENT_SIZE = 100 * 1024
51
-
52
- def __init__(
53
- self,
54
- origin: Origin,
55
- stream: AsyncNetworkStream,
56
- keepalive_expiry: Optional[float] = None,
57
- ) -> None:
58
- self._origin = origin
59
- self._network_stream = stream
60
- self._keepalive_expiry: Optional[float] = keepalive_expiry
61
- self._expire_at: Optional[float] = None
62
- self._state = HTTPConnectionState.NEW
63
- self._state_lock = AsyncLock()
64
- self._request_count = 0
65
- self._h11_state = h11.Connection(
66
- our_role=h11.CLIENT,
67
- max_incomplete_event_size=self.MAX_INCOMPLETE_EVENT_SIZE,
68
- )
69
-
70
- async def handle_async_request(self, request: Request) -> Response:
71
- if not self.can_handle_request(request.url.origin):
72
- raise RuntimeError(
73
- f"Attempted to send request to {request.url.origin} on connection "
74
- f"to {self._origin}"
75
- )
76
-
77
- async with self._state_lock:
78
- if self._state in (HTTPConnectionState.NEW, HTTPConnectionState.IDLE):
79
- self._request_count += 1
80
- self._state = HTTPConnectionState.ACTIVE
81
- self._expire_at = None
82
- else:
83
- raise ConnectionNotAvailable()
84
-
85
- try:
86
- kwargs = {"request": request}
87
- async with Trace("send_request_headers", logger, request, kwargs) as trace:
88
- await self._send_request_headers(**kwargs)
89
- async with Trace("send_request_body", logger, request, kwargs) as trace:
90
- await self._send_request_body(**kwargs)
91
- async with Trace(
92
- "receive_response_headers", logger, request, kwargs
93
- ) as trace:
94
- (
95
- http_version,
96
- status,
97
- reason_phrase,
98
- headers,
99
- ) = await self._receive_response_headers(**kwargs)
100
- trace.return_value = (
101
- http_version,
102
- status,
103
- reason_phrase,
104
- headers,
105
- )
106
-
107
- return Response(
108
- status=status,
109
- headers=headers,
110
- content=HTTP11ConnectionByteStream(self, request),
111
- extensions={
112
- "http_version": http_version,
113
- "reason_phrase": reason_phrase,
114
- "network_stream": self._network_stream,
115
- },
116
- )
117
- except BaseException as exc:
118
- with AsyncShieldCancellation():
119
- async with Trace("response_closed", logger, request) as trace:
120
- await self._response_closed()
121
- raise exc
122
-
123
- # Sending the request...
124
-
125
- async def _send_request_headers(self, request: Request) -> None:
126
- timeouts = request.extensions.get("timeout", {})
127
- timeout = timeouts.get("write", None)
128
-
129
- with map_exceptions({h11.LocalProtocolError: LocalProtocolError}):
130
- event = h11.Request(
131
- method=request.method,
132
- target=request.url.target,
133
- headers=request.headers,
134
- )
135
- await self._send_event(event, timeout=timeout)
136
-
137
- async def _send_request_body(self, request: Request) -> None:
138
- timeouts = request.extensions.get("timeout", {})
139
- timeout = timeouts.get("write", None)
140
-
141
- assert isinstance(request.stream, AsyncIterable)
142
- async for chunk in request.stream:
143
- event = h11.Data(data=chunk)
144
- await self._send_event(event, timeout=timeout)
145
-
146
- await self._send_event(h11.EndOfMessage(), timeout=timeout)
147
-
148
- async def _send_event(
149
- self, event: h11.Event, timeout: Optional[float] = None
150
- ) -> None:
151
- bytes_to_send = self._h11_state.send(event)
152
- if bytes_to_send is not None:
153
- await self._network_stream.write(bytes_to_send, timeout=timeout)
154
-
155
- # Receiving the response...
156
-
157
- async def _receive_response_headers(
158
- self, request: Request
159
- ) -> Tuple[bytes, int, bytes, List[Tuple[bytes, bytes]]]:
160
- timeouts = request.extensions.get("timeout", {})
161
- timeout = timeouts.get("read", None)
162
-
163
- while True:
164
- event = await self._receive_event(timeout=timeout)
165
- if isinstance(event, h11.Response):
166
- break
167
- if (
168
- isinstance(event, h11.InformationalResponse)
169
- and event.status_code == 101
170
- ):
171
- break
172
-
173
- http_version = b"HTTP/" + event.http_version
174
-
175
- # h11 version 0.11+ supports a `raw_items` interface to get the
176
- # raw header casing, rather than the enforced lowercase headers.
177
- headers = event.headers.raw_items()
178
-
179
- return http_version, event.status_code, event.reason, headers
180
-
181
- async def _receive_response_body(self, request: Request) -> AsyncIterator[bytes]:
182
- timeouts = request.extensions.get("timeout", {})
183
- timeout = timeouts.get("read", None)
184
-
185
- while True:
186
- event = await self._receive_event(timeout=timeout)
187
- if isinstance(event, h11.Data):
188
- yield bytes(event.data)
189
- elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)):
190
- break
191
-
192
- async def _receive_event(
193
- self, timeout: Optional[float] = None
194
- ) -> Union[h11.Event, Type[h11.PAUSED]]:
195
- while True:
196
- with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}):
197
- event = self._h11_state.next_event()
198
-
199
- if event is h11.NEED_DATA:
200
- data = await self._network_stream.read(
201
- self.READ_NUM_BYTES, timeout=timeout
202
- )
203
-
204
- # If we feed this case through h11 we'll raise an exception like:
205
- #
206
- # httpcore.RemoteProtocolError: can't handle event type
207
- # ConnectionClosed when role=SERVER and state=SEND_RESPONSE
208
- #
209
- # Which is accurate, but not very informative from an end-user
210
- # perspective. Instead we handle this case distinctly and treat
211
- # it as a ConnectError.
212
- if data == b"" and self._h11_state.their_state == h11.SEND_RESPONSE:
213
- msg = "Server disconnected without sending a response."
214
- raise RemoteProtocolError(msg)
215
-
216
- self._h11_state.receive_data(data)
217
- else:
218
- # mypy fails to narrow the type in the above if statement above
219
- return cast(Union[h11.Event, Type[h11.PAUSED]], event)
220
-
221
- async def _response_closed(self) -> None:
222
- async with self._state_lock:
223
- if (
224
- self._h11_state.our_state is h11.DONE
225
- and self._h11_state.their_state is h11.DONE
226
- ):
227
- self._state = HTTPConnectionState.IDLE
228
- self._h11_state.start_next_cycle()
229
- if self._keepalive_expiry is not None:
230
- now = time.monotonic()
231
- self._expire_at = now + self._keepalive_expiry
232
- else:
233
- await self.aclose()
234
-
235
- # Once the connection is no longer required...
236
-
237
- async def aclose(self) -> None:
238
- # Note that this method unilaterally closes the connection, and does
239
- # not have any kind of locking in place around it.
240
- self._state = HTTPConnectionState.CLOSED
241
- await self._network_stream.aclose()
242
-
243
- # The AsyncConnectionInterface methods provide information about the state of
244
- # the connection, allowing for a connection pooling implementation to
245
- # determine when to reuse and when to close the connection...
246
-
247
- def can_handle_request(self, origin: Origin) -> bool:
248
- return origin == self._origin
249
-
250
- def is_available(self) -> bool:
251
- # Note that HTTP/1.1 connections in the "NEW" state are not treated as
252
- # being "available". The control flow which created the connection will
253
- # be able to send an outgoing request, but the connection will not be
254
- # acquired from the connection pool for any other request.
255
- return self._state == HTTPConnectionState.IDLE
256
-
257
- def has_expired(self) -> bool:
258
- now = time.monotonic()
259
- keepalive_expired = self._expire_at is not None and now > self._expire_at
260
-
261
- # If the HTTP connection is idle but the socket is readable, then the
262
- # only valid state is that the socket is about to return b"", indicating
263
- # a server-initiated disconnect.
264
- server_disconnected = (
265
- self._state == HTTPConnectionState.IDLE
266
- and self._network_stream.get_extra_info("is_readable")
267
- )
268
-
269
- return keepalive_expired or server_disconnected
270
-
271
- def is_idle(self) -> bool:
272
- return self._state == HTTPConnectionState.IDLE
273
-
274
- def is_closed(self) -> bool:
275
- return self._state == HTTPConnectionState.CLOSED
276
-
277
- def info(self) -> str:
278
- origin = str(self._origin)
279
- return (
280
- f"{origin!r}, HTTP/1.1, {self._state.name}, "
281
- f"Request Count: {self._request_count}"
282
- )
283
-
284
- def __repr__(self) -> str:
285
- class_name = self.__class__.__name__
286
- origin = str(self._origin)
287
- return (
288
- f"<{class_name} [{origin!r}, {self._state.name}, "
289
- f"Request Count: {self._request_count}]>"
290
- )
291
-
292
- # These context managers are not used in the standard flow, but are
293
- # useful for testing or working with connection instances directly.
294
-
295
- async def __aenter__(self) -> "AsyncHTTP11Connection":
296
- return self
297
-
298
- async def __aexit__(
299
- self,
300
- exc_type: Optional[Type[BaseException]] = None,
301
- exc_value: Optional[BaseException] = None,
302
- traceback: Optional[TracebackType] = None,
303
- ) -> None:
304
- await self.aclose()
305
-
306
-
307
- class HTTP11ConnectionByteStream:
308
- def __init__(self, connection: AsyncHTTP11Connection, request: Request) -> None:
309
- self._connection = connection
310
- self._request = request
311
- self._closed = False
312
-
313
- async def __aiter__(self) -> AsyncIterator[bytes]:
314
- kwargs = {"request": self._request}
315
- try:
316
- async with Trace("receive_response_body", logger, self._request, kwargs):
317
- async for chunk in self._connection._receive_response_body(**kwargs):
318
- yield chunk
319
- except BaseException as exc:
320
- # If we get an exception while streaming the response,
321
- # we want to close the response (and possibly the connection)
322
- # before raising that exception.
323
- with AsyncShieldCancellation():
324
- await self.aclose()
325
- raise exc
326
-
327
- async def aclose(self) -> None:
328
- if not self._closed:
329
- self._closed = True
330
- async with Trace("response_closed", logger, self._request):
331
- await self._connection._response_closed()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/_backends/trio.py DELETED
@@ -1,161 +0,0 @@
1
- import ssl
2
- import typing
3
-
4
- import trio
5
-
6
- from .._exceptions import (
7
- ConnectError,
8
- ConnectTimeout,
9
- ExceptionMapping,
10
- ReadError,
11
- ReadTimeout,
12
- WriteError,
13
- WriteTimeout,
14
- map_exceptions,
15
- )
16
- from .base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream
17
-
18
-
19
- class TrioStream(AsyncNetworkStream):
20
- def __init__(self, stream: trio.abc.Stream) -> None:
21
- self._stream = stream
22
-
23
- async def read(
24
- self, max_bytes: int, timeout: typing.Optional[float] = None
25
- ) -> bytes:
26
- timeout_or_inf = float("inf") if timeout is None else timeout
27
- exc_map: ExceptionMapping = {
28
- trio.TooSlowError: ReadTimeout,
29
- trio.BrokenResourceError: ReadError,
30
- trio.ClosedResourceError: ReadError,
31
- }
32
- with map_exceptions(exc_map):
33
- with trio.fail_after(timeout_or_inf):
34
- data: bytes = await self._stream.receive_some(max_bytes=max_bytes)
35
- return data
36
-
37
- async def write(
38
- self, buffer: bytes, timeout: typing.Optional[float] = None
39
- ) -> None:
40
- if not buffer:
41
- return
42
-
43
- timeout_or_inf = float("inf") if timeout is None else timeout
44
- exc_map: ExceptionMapping = {
45
- trio.TooSlowError: WriteTimeout,
46
- trio.BrokenResourceError: WriteError,
47
- trio.ClosedResourceError: WriteError,
48
- }
49
- with map_exceptions(exc_map):
50
- with trio.fail_after(timeout_or_inf):
51
- await self._stream.send_all(data=buffer)
52
-
53
- async def aclose(self) -> None:
54
- await self._stream.aclose()
55
-
56
- async def start_tls(
57
- self,
58
- ssl_context: ssl.SSLContext,
59
- server_hostname: typing.Optional[str] = None,
60
- timeout: typing.Optional[float] = None,
61
- ) -> AsyncNetworkStream:
62
- timeout_or_inf = float("inf") if timeout is None else timeout
63
- exc_map: ExceptionMapping = {
64
- trio.TooSlowError: ConnectTimeout,
65
- trio.BrokenResourceError: ConnectError,
66
- }
67
- ssl_stream = trio.SSLStream(
68
- self._stream,
69
- ssl_context=ssl_context,
70
- server_hostname=server_hostname,
71
- https_compatible=True,
72
- server_side=False,
73
- )
74
- with map_exceptions(exc_map):
75
- try:
76
- with trio.fail_after(timeout_or_inf):
77
- await ssl_stream.do_handshake()
78
- except Exception as exc: # pragma: nocover
79
- await self.aclose()
80
- raise exc
81
- return TrioStream(ssl_stream)
82
-
83
- def get_extra_info(self, info: str) -> typing.Any:
84
- if info == "ssl_object" and isinstance(self._stream, trio.SSLStream):
85
- # Type checkers cannot see `_ssl_object` attribute because trio._ssl.SSLStream uses __getattr__/__setattr__.
86
- # Tracked at https://github.com/python-trio/trio/issues/542
87
- return self._stream._ssl_object # type: ignore[attr-defined]
88
- if info == "client_addr":
89
- return self._get_socket_stream().socket.getsockname()
90
- if info == "server_addr":
91
- return self._get_socket_stream().socket.getpeername()
92
- if info == "socket":
93
- stream = self._stream
94
- while isinstance(stream, trio.SSLStream):
95
- stream = stream.transport_stream
96
- assert isinstance(stream, trio.SocketStream)
97
- return stream.socket
98
- if info == "is_readable":
99
- socket = self.get_extra_info("socket")
100
- return socket.is_readable()
101
- return None
102
-
103
- def _get_socket_stream(self) -> trio.SocketStream:
104
- stream = self._stream
105
- while isinstance(stream, trio.SSLStream):
106
- stream = stream.transport_stream
107
- assert isinstance(stream, trio.SocketStream)
108
- return stream
109
-
110
-
111
- class TrioBackend(AsyncNetworkBackend):
112
- async def connect_tcp(
113
- self,
114
- host: str,
115
- port: int,
116
- timeout: typing.Optional[float] = None,
117
- local_address: typing.Optional[str] = None,
118
- socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
119
- ) -> AsyncNetworkStream:
120
- # By default for TCP sockets, trio enables TCP_NODELAY.
121
- # https://trio.readthedocs.io/en/stable/reference-io.html#trio.SocketStream
122
- if socket_options is None:
123
- socket_options = [] # pragma: no cover
124
- timeout_or_inf = float("inf") if timeout is None else timeout
125
- exc_map: ExceptionMapping = {
126
- trio.TooSlowError: ConnectTimeout,
127
- trio.BrokenResourceError: ConnectError,
128
- OSError: ConnectError,
129
- }
130
- with map_exceptions(exc_map):
131
- with trio.fail_after(timeout_or_inf):
132
- stream: trio.abc.Stream = await trio.open_tcp_stream(
133
- host=host, port=port, local_address=local_address
134
- )
135
- for option in socket_options:
136
- stream.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover
137
- return TrioStream(stream)
138
-
139
- async def connect_unix_socket(
140
- self,
141
- path: str,
142
- timeout: typing.Optional[float] = None,
143
- socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
144
- ) -> AsyncNetworkStream: # pragma: nocover
145
- if socket_options is None:
146
- socket_options = []
147
- timeout_or_inf = float("inf") if timeout is None else timeout
148
- exc_map: ExceptionMapping = {
149
- trio.TooSlowError: ConnectTimeout,
150
- trio.BrokenResourceError: ConnectError,
151
- OSError: ConnectError,
152
- }
153
- with map_exceptions(exc_map):
154
- with trio.fail_after(timeout_or_inf):
155
- stream: trio.abc.Stream = await trio.open_unix_socket(path)
156
- for option in socket_options:
157
- stream.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover
158
- return TrioStream(stream)
159
-
160
- async def sleep(self, seconds: float) -> None:
161
- await trio.sleep(seconds) # pragma: nocover
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DaFujaTyping/hf-Chat-ui/src/lib/server/models.ts DELETED
@@ -1,60 +0,0 @@
1
- import { HF_ACCESS_TOKEN, MODELS } from "$env/static/private";
2
- import { z } from "zod";
3
-
4
- const modelsRaw = z
5
- .array(
6
- z.object({
7
- /** Used as an identifier in DB */
8
- id: z.string().optional(),
9
- /** Used to link to the model page, and for inference */
10
- name: z.string().min(1),
11
- displayName: z.string().min(1).optional(),
12
- description: z.string().min(1).optional(),
13
- websiteUrl: z.string().url().optional(),
14
- datasetName: z.string().min(1).optional(),
15
- userMessageToken: z.string().min(1),
16
- assistantMessageToken: z.string().min(1),
17
- messageEndToken: z.string().min(1).optional(),
18
- preprompt: z.string().default(""),
19
- prepromptUrl: z.string().url().optional(),
20
- promptExamples: z
21
- .array(
22
- z.object({
23
- title: z.string().min(1),
24
- prompt: z.string().min(1),
25
- })
26
- )
27
- .optional(),
28
- endpoints: z
29
- .array(
30
- z.object({
31
- url: z.string().url(),
32
- authorization: z.string().min(1).default(`Bearer ${HF_ACCESS_TOKEN}`),
33
- weight: z.number().int().positive().default(1),
34
- })
35
- )
36
- .optional(),
37
- parameters: z
38
- .object({
39
- temperature: z.number().min(0).max(1),
40
- truncate: z.number().int().positive(),
41
- max_new_tokens: z.number().int().positive(),
42
- stop: z.array(z.string()).optional(),
43
- })
44
- .passthrough(),
45
- })
46
- )
47
- .parse(JSON.parse(MODELS));
48
-
49
- export const models = await Promise.all(
50
- modelsRaw.map(async (m) => ({
51
- ...m,
52
- id: m.id || m.name,
53
- displayName: m.displayName || m.name,
54
- preprompt: m.prepromptUrl ? await fetch(m.prepromptUrl).then((r) => r.text()) : m.preprompt,
55
- }))
56
- );
57
-
58
- export type BackendModel = (typeof models)[0];
59
-
60
- export const defaultModel = models[0];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dinoking/Guccio-AI-Designer/models/stylegan2/stylegan2-pytorch/fid.py DELETED
@@ -1,107 +0,0 @@
1
- import argparse
2
- import pickle
3
-
4
- import torch
5
- from torch import nn
6
- import numpy as np
7
- from scipy import linalg
8
- from tqdm import tqdm
9
-
10
- from model import Generator
11
- from calc_inception import load_patched_inception_v3
12
-
13
-
14
- @torch.no_grad()
15
- def extract_feature_from_samples(
16
- generator, inception, truncation, truncation_latent, batch_size, n_sample, device
17
- ):
18
- n_batch = n_sample // batch_size
19
- resid = n_sample - (n_batch * batch_size)
20
- batch_sizes = [batch_size] * n_batch + [resid]
21
- features = []
22
-
23
- for batch in tqdm(batch_sizes):
24
- latent = torch.randn(batch, 512, device=device)
25
- img, _ = g([latent], truncation=truncation, truncation_latent=truncation_latent)
26
- feat = inception(img)[0].view(img.shape[0], -1)
27
- features.append(feat.to('cpu'))
28
-
29
- features = torch.cat(features, 0)
30
-
31
- return features
32
-
33
-
34
- def calc_fid(sample_mean, sample_cov, real_mean, real_cov, eps=1e-6):
35
- cov_sqrt, _ = linalg.sqrtm(sample_cov @ real_cov, disp=False)
36
-
37
- if not np.isfinite(cov_sqrt).all():
38
- print('product of cov matrices is singular')
39
- offset = np.eye(sample_cov.shape[0]) * eps
40
- cov_sqrt = linalg.sqrtm((sample_cov + offset) @ (real_cov + offset))
41
-
42
- if np.iscomplexobj(cov_sqrt):
43
- if not np.allclose(np.diagonal(cov_sqrt).imag, 0, atol=1e-3):
44
- m = np.max(np.abs(cov_sqrt.imag))
45
-
46
- raise ValueError(f'Imaginary component {m}')
47
-
48
- cov_sqrt = cov_sqrt.real
49
-
50
- mean_diff = sample_mean - real_mean
51
- mean_norm = mean_diff @ mean_diff
52
-
53
- trace = np.trace(sample_cov) + np.trace(real_cov) - 2 * np.trace(cov_sqrt)
54
-
55
- fid = mean_norm + trace
56
-
57
- return fid
58
-
59
-
60
- if __name__ == '__main__':
61
- device = 'cuda'
62
-
63
- parser = argparse.ArgumentParser()
64
-
65
- parser.add_argument('--truncation', type=float, default=1)
66
- parser.add_argument('--truncation_mean', type=int, default=4096)
67
- parser.add_argument('--batch', type=int, default=64)
68
- parser.add_argument('--n_sample', type=int, default=50000)
69
- parser.add_argument('--size', type=int, default=256)
70
- parser.add_argument('--inception', type=str, default=None, required=True)
71
- parser.add_argument('ckpt', metavar='CHECKPOINT')
72
-
73
- args = parser.parse_args()
74
-
75
- ckpt = torch.load(args.ckpt)
76
-
77
- g = Generator(args.size, 512, 8).to(device)
78
- g.load_state_dict(ckpt['g_ema'])
79
- g = nn.DataParallel(g)
80
- g.eval()
81
-
82
- if args.truncation < 1:
83
- with torch.no_grad():
84
- mean_latent = g.mean_latent(args.truncation_mean)
85
-
86
- else:
87
- mean_latent = None
88
-
89
- inception = nn.DataParallel(load_patched_inception_v3()).to(device)
90
- inception.eval()
91
-
92
- features = extract_feature_from_samples(
93
- g, inception, args.truncation, mean_latent, args.batch, args.n_sample, device
94
- ).numpy()
95
- print(f'extracted {features.shape[0]} features')
96
-
97
- sample_mean = np.mean(features, 0)
98
- sample_cov = np.cov(features, rowvar=False)
99
-
100
- with open(args.inception, 'rb') as f:
101
- embeds = pickle.load(f)
102
- real_mean = embeds['mean']
103
- real_cov = embeds['cov']
104
-
105
- fid = calc_fid(sample_mean, sample_cov, real_mean, real_cov)
106
-
107
- print('fid:', fid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dorado607/ChuanhuChatGPT/modules/overwrites.py DELETED
@@ -1,92 +0,0 @@
1
- from __future__ import annotations
2
- import logging
3
-
4
- from typing import List, Tuple
5
- from gradio_client import utils as client_utils
6
- from gradio import utils
7
- import inspect
8
-
9
- from modules.presets import *
10
- from modules.index_func import *
11
-
12
-
13
- def postprocess(
14
- self,
15
- y: List[List[str | Tuple[str] | Tuple[str, str] | None] | Tuple],
16
- ) -> List[List[str | Dict | None]]:
17
- """
18
- Parameters:
19
- y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.
20
- Returns:
21
- List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed.
22
- """
23
- if y is None:
24
- return []
25
- processed_messages = []
26
- for message_pair in y:
27
- assert isinstance(
28
- message_pair, (tuple, list)
29
- ), f"Expected a list of lists or list of tuples. Received: {message_pair}"
30
- assert (
31
- len(message_pair) == 2
32
- ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}"
33
-
34
- processed_messages.append(
35
- [
36
- self._postprocess_chat_messages(message_pair[0], "user"),
37
- self._postprocess_chat_messages(message_pair[1], "bot"),
38
- ]
39
- )
40
- return processed_messages
41
-
42
- def postprocess_chat_messages(
43
- self, chat_message: str | tuple | list | None, role: str
44
- ) -> str | dict | None:
45
- if chat_message is None:
46
- return None
47
- elif isinstance(chat_message, (tuple, list)):
48
- file_uri = chat_message[0]
49
- if utils.validate_url(file_uri):
50
- filepath = file_uri
51
- else:
52
- filepath = self.make_temp_copy_if_needed(file_uri)
53
-
54
- mime_type = client_utils.get_mimetype(filepath)
55
- return {
56
- "name": filepath,
57
- "mime_type": mime_type,
58
- "alt_text": chat_message[1] if len(chat_message) > 1 else None,
59
- "data": None, # These last two fields are filled in by the frontend
60
- "is_file": True,
61
- }
62
- elif isinstance(chat_message, str):
63
- # chat_message = inspect.cleandoc(chat_message)
64
- # escape html spaces
65
- # chat_message = chat_message.replace(" ", "&nbsp;")
66
- if role == "bot":
67
- chat_message = convert_bot_before_marked(chat_message)
68
- elif role == "user":
69
- chat_message = convert_user_before_marked(chat_message)
70
- return chat_message
71
- else:
72
- raise ValueError(f"Invalid message for Chatbot component: {chat_message}")
73
-
74
- with open("./assets/custom.js", "r", encoding="utf-8") as f, \
75
- open("./assets/external-scripts.js", "r", encoding="utf-8") as f1:
76
- customJS = f.read()
77
- externalScripts = f1.read()
78
-
79
-
80
- def reload_javascript():
81
- print("Reloading javascript...")
82
- js = f'<script>{customJS}</script><script async>{externalScripts}</script>'
83
- js += '<script async src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>'
84
- def template_response(*args, **kwargs):
85
- res = GradioTemplateResponseOriginal(*args, **kwargs)
86
- res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
87
- res.init_headers()
88
- return res
89
-
90
- gr.routes.templates.TemplateResponse = template_response
91
-
92
- GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse