parquet-converter commited on
Commit
7bf4d45
·
1 Parent(s): 3849e21

Update parquet files (step 45 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/A Step-by-Step Guide to OBS Studio Download for Windows 7 64 Bit and Installation.md +0 -34
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Activation Code Airdroid Premium Crack.md +0 -47
  3. spaces/1gistliPinn/ChatGPT4/Examples/Arma 3 1.14 Multiplayer Crack.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/DISQLite3 Pro 5.22.0 D4-XE10.2.md +0 -6
  5. spaces/1gistliPinn/ChatGPT4/Examples/Download The Last Train - Bullet Train Download] [Torrent]l Everything You Need to Know About the Movie and the Torrent.md +0 -28
  6. spaces/1line/AutoGPT/autogpt/processing/text.py +0 -132
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/Build-a-lot 7 - Fairy Tales - Full PreCracked - Foxy Games Torrent !EXCLUSIVE!.md +0 -74
  8. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Beach Buggy Racing 2 How to Unlock and Upgrade Over 40 Powerups.md +0 -116
  9. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator Indonesia How to Download and Install Jai Guru Jinn Livery.md +0 -113
  10. spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/onnx_ijbc.py +0 -267
  11. spaces/8star/DeepDanbooru_string/app.py +0 -185
  12. spaces/AI4PD/hexviz/README.md +0 -36
  13. spaces/AIConsultant/MusicGen/audiocraft/modules/streaming.py +0 -131
  14. spaces/AIZeroToHero/Video-Automatic-Speech-Recognition/README.md +0 -13
  15. spaces/AIatUIUC/CodeLATS/executors/py_executor.py +0 -88
  16. spaces/Adapter/CoAdapter/ldm/modules/extra_condition/api.py +0 -269
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/board/Fill.js +0 -36
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/click/Click.js +0 -2
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dropdownlist/DropDownList.d.ts +0 -130
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectangle/RoundRectangle.d.ts +0 -2
  21. spaces/Ajit025/Text_to_Image_conversion/app.py +0 -15
  22. spaces/Aki004/herta-so-vits/flask_api.py +0 -62
  23. spaces/AlexWang/lama/bin/paper_runfiles/env.sh +0 -8
  24. spaces/Alfasign/nomic-ai-gpt4all-13b-snoozy/README.md +0 -12
  25. spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/__init__.py +0 -9
  26. spaces/Amrrs/DragGan-Inversion/dnnlib/util.py +0 -504
  27. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/.github/ISSUE_TEMPLATE/feedback.md +0 -12
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/generate_logits.py +0 -127
  29. spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py +0 -13
  30. spaces/Andy1621/uniformer_image_detection/configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py +0 -18
  31. spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes.py +0 -4
  32. spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes.py +0 -11
  33. spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_20k_voc12aug.py +0 -39
  34. spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/generators.py +0 -151
  35. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/filewrapper.py +0 -111
  36. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/resources.py +0 -358
  37. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/extension.py +0 -248
  38. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docker/README.md +0 -45
  39. spaces/Axolotlily/DalleMini/app.py +0 -3
  40. spaces/Bart92/RVC_HF/lib/infer_pack/onnx_inference.py +0 -145
  41. spaces/Benjov/Demo-IR/README.md +0 -13
  42. spaces/Benson/text-generation/Examples/Anime Life Simulator.md +0 -110
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/encoding.py +0 -36
  44. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/euctwprober.py +0 -47
  45. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/grid-feats-vqa/CODE_OF_CONDUCT.md +0 -76
  46. spaces/CVPR/GFPGAN-example/PaperModel.md +0 -76
  47. spaces/CVPR/WALT/mmcv_custom/runner/epoch_based_runner.py +0 -104
  48. spaces/CVPR/WALT/mmdet/core/bbox/coder/base_bbox_coder.py +0 -17
  49. spaces/CVPR/WALT/mmdet/core/bbox/transforms.py +0 -240
  50. spaces/Catmeow/Face2Painting_From_Photo/paintingface.py +0 -110
spaces/1acneusushi/gradio-2dmoleculeeditor/data/A Step-by-Step Guide to OBS Studio Download for Windows 7 64 Bit and Installation.md DELETED
@@ -1,34 +0,0 @@
1
- <br />
2
- <h1>How to OBS Studio Download for Windows 7 64 Bit and Use It for Streaming and Recording</h1>
3
- <p>OBS Studio is a free and open source software that allows you to stream and record your video and audio content. OBS Studio stands for Open Broadcaster Software Studio, and it is one of the most popular tools for live streaming and video recording. OBS Studio supports various platforms, such as Windows, Mac OS, and Linux. OBS Studio also supports various streaming services, such as Twitch, YouTube, Facebook, and more.</p>
4
- <h2>obs studio download for windows 7 64 bit</h2><br /><p><b><b>Download</b> &#10004; <a href="https://byltly.com/2uKxUo">https://byltly.com/2uKxUo</a></b></p><br /><br />
5
- <p>If you want to use OBS Studio for your streaming and recording needs, you need to download it from the official website and install it on your computer. In this article, we will show you how to do that step by step for Windows 7 64 bit.</p>
6
-
7
- <h2>How to OBS Studio Download for Windows 7 64 Bit</h2>
8
- <p>Follow these steps to download and install OBS Studio for Windows 7 64 bit:</p>
9
- <ol>
10
- <li>Go to the official website of OBS Studio. The URL is https://obsproject.com/.</li>
11
- <li>On the website, you will see a button that says "Download Installer". Click on it to start the download process.</li>
12
- <li>You will see a file named OBS-Studio-x.x.x-Full-Installer-x64.exe, where x.x.x is the version number. This is the installer file for OBS Studio. Save it to your preferred location on your computer.</li>
13
- <li>Once the download is complete, double-click on the installer file to launch it. Follow the instructions on the screen to complete the installation process. You can choose the default settings or customize them according to your preferences.</li>
14
- <li>After the installation is finished, you will have OBS Studio installed on your computer. You can verify this by opening the Start menu and looking for OBS Studio in the list of programs.</li>
15
- </ol>
16
- <p>Congratulations! You have successfully downloaded and installed OBS Studio for Windows 7 64 bit.</p>
17
- <p></p>
18
-
19
- <h2>How to Use OBS Studio for Streaming and Recording</h2>
20
- <p>Now that you have OBS Studio installed on your computer, you can start using it for your streaming and recording purposes. Here are some basic steps to get you started:</p>
21
- <ol>
22
- <li>Launch OBS Studio by clicking on its icon in the Start menu or on your desktop.</li>
23
- <li>You will see the main window of OBS Studio with four sections: Scenes, Sources, Mixer, and Controls. Scenes are collections of sources that you want to show on your stream or recording. Sources are the elements that you want to capture, such as your webcam, microphone, game window, browser window, etc. Mixer is where you can adjust the audio levels of your sources. Controls are where you can start and stop your stream or recording, as well as access other settings and options.</li>
24
- <li>To add a scene, click on the "+" button in the Scenes section. Give your scene a name and click OK.</li>
25
- <li>To add a source, click on the "+" button in the Sources section. Choose the type of source that you want to add from the list of options. For example, if you want to capture your webcam, choose Video Capture Device. Give your source a name and click OK.</li>
26
- <li>You will see a window with various settings for your source. Adjust them according to your needs and click OK.</li>
27
- <li>You can resize and reposition your source by dragging its edges or corners in the preview window. You can also right-click on your source and choose Transform to access more options for cropping, rotating, flipping, etc.</li>
28
- <li>You can add more scenes and sources as needed by repeating steps 3 to 6.</li>
29
- <li>To start streaming, click on the Settings button in the Controls section. Go to the Stream tab and choose the service that you want to stream to from the drop-down menu. Enter your stream key or log in with your account credentials. Click Apply and OK.</li>
30
- <li>To start recording, click on the Settings button in the Controls section. Go to the Output tab and choose the mode that you want to use: Simple or Advanced. Adjust the settings for video quality, audio quality, file format, etc. Click Apply and OK.</li>
31
- <li>When you are ready to go live or record, click on the Start Streaming or Start Recording button in the Controls section.</li>
32
- <li>When you are done with your stream or recording, click on the Stop Streaming or Stop Recording button in</p> ddb901b051<br />
33
- <br />
34
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Activation Code Airdroid Premium Crack.md DELETED
@@ -1,47 +0,0 @@
1
- <br />
2
- <h1>Activation Code Airdroid Premium Crack: What Is It and How to Use It?</h1>
3
- <p>If you are an Android user who wants to access and manage your device from your computer, you may have heard of <a href="(^6^)">Airdroid</a>, a popular tool that lets you do just that. But what if you want to enjoy more features and benefits without paying for the premium subscription? You may have also heard of <strong>Airdroid Premium Crack</strong>, a modified version of Airdroid that claims to offer you all the premium features for free. But is it safe and legal to use? And are there any alternatives to it? In this article, we will answer these questions and more.</p>
4
- <h2>What Is Airdroid and What Are Its Features and Benefits?</h2>
5
- <p><a href="(^6^)">Airdroid</a> is a cross-platform tool that allows you to access and manage your Android devices wirelessly over the web. You can use it to transfer files, control mobile devices remotely, receive and reply to messages, mirror screen, and more. It works on Windows, Mac, Linux, Chrome, Firefox, Safari, Edge, Opera, and other browsers.</p>
6
- <h2>Activation Code Airdroid Premium Crack</h2><br /><p><b><b>Download</b> &ndash;&ndash;&ndash; <a href="https://byltly.com/2uKzVp">https://byltly.com/2uKzVp</a></b></p><br /><br />
7
- <h3>Airdroid offers multiple features to enhance productivity and convenience</h3>
8
- <p>Some of the main features of Airdroid are:</p>
9
- <ul>
10
- <li><strong>File Transfer & Management:</strong> You can transfer files across devices, control mobile devices remotely, receive and reply to messages on computer. You can also enjoy high-speed file transfer within the local area network, and switch seamlessly between Wi-Fi, 4G, or 5G network.</li>
11
- <li><strong>Screen Mirroring:</strong> You can mirror Android phone screens to your computer and view your content on a bigger display. Screen mirroring doesn't require the phones and the computer to be on the same network. It is useful for various scenarios, such as remote meetings, online teaching, live streaming, gaming, etc.</li>
12
- <li><strong>Remote Control:</strong> You can remotely control your Android devices from your computer without rooting them. You can use your mouse and keyboard to operate your phone or tablet, access any app or file, take screenshots, record screen, etc.</li>
13
- <li><strong>Remote Monitoring:</strong> You can use your old Android phones as security cameras by using the remote camera feature. You can view the live feed from your phone's camera on your computer, and even switch between the front and rear cameras. You can also record videos or take photos remotely.</li>
14
- <li><strong>Notification Mirror:</strong> You can receive and reply to notifications from your Android devices on your computer. You can also mute or block unwanted notifications, and customize the notification settings.</li>
15
- <li><strong>Find Phone:</strong> You can locate your lost or stolen Android devices using Airdroid. You can also lock, erase, or ring your devices remotely.</li>
16
- <li><strong>Backup & Sync:</strong> You can backup and sync your contacts, photos, videos, music, and other files between your Android devices and your computer. You can also restore your data from the backup easily.</li>
17
- <li><strong>App Management:</strong> You can install, uninstall, or update apps on your Android devices from your computer. You can also view the app details, permissions, storage usage, etc.</li>
18
- <li><strong>Clipboard Sharing:</strong> You can share the clipboard content between your Android devices and your computer. You can copy text or images on one device and paste them on another device.</li>
19
- </ul>
20
- <p>Airdroid also supports multiple languages, dark mode, QR code login, SMS backup, call logs, etc.</p>
21
- <h3>Airdroid has some drawbacks and limitations that may affect user experience</h3>
22
- <p>Despite its many features and benefits, Airdroid is not perfect. Some of the drawbacks and limitations of Airdroid are:</p>
23
- <ul>
24
- <li><strong>Limited Free Version:</strong> The free version of Airdroid has some restrictions on the file transfer size, remote control duration, screen mirroring quality, backup storage space, etc. It also shows ads on the app and the web interface.</li>
25
- <li><strong>Potential Security Risks:</strong> Airdroid requires you to grant various permissions to access and manage your Android devices. This may pose some security risks if your account is hacked or compromised. In 2016, Airdroid was found to have a <a href="">critical vulnerability</a> that could allow attackers to intercept user data or execute malicious code.</li>
26
- <li><strong>Possible Compatibility Issues:</strong> Airdroid may not work well with some Android devices or versions due to different manufacturers' customizations or system updates. Some features may require rooting or enabling developer options on your devices.</li>
27
- <li><strong>Dependence on Internet Connection:</strong> Airdroid relies on a stable and fast internet connection to function properly. If your network is slow or unstable, you may experience lagging, buffering, disconnecting, or other issues.</li>
28
- </ul>
29
- <h2>What Is Airdroid Premium and How to Get It?</h2>
30
- <p>Airdroid Premium is a paid subscription that unlocks more features and benefits for Airdroid users. With Airdroid Premium, you can enjoy:</p>
31
- <ul>
32
- <li><strong>No Ads:</strong> You can remove all the ads from the app and the web interface.</li>
33
- <li><strong>No File Transfer Limit:</strong> You can transfer files of any size between your devices without any restriction.</li>
34
- <li><strong>No Remote Control Limit:</strong> You can remotely control your devices for as long as you want without any interruption.</li>
35
- <li><strong>No Screen Mirroring Limit:</strong> You can mirror your screen in HD quality without any degradation.</li>
36
- <li><strong>No Backup Storage Limit:</strong> You can backup and sync unlimited data between your devices without any limitation.</li>
37
- <li><strong>No Notification Limit:</strong> You can receive and reply to unlimited notifications from your devices without any limitation.</li>
38
- <li><strong>Premium Customer Support:</strong> You can get priority customer support from the Airdroid team via email or phone.</li>
39
- </ul>
40
- <h3>Airdroid Premium costs $1.99 per month or $19.99 per year</h3>
41
- <p>The price of Airdroid Premium is $1.99 per month or $19.99 per year. You can also get a 7-day free trial before you decide to purchase it. You can pay with PayPal, credit card, debit card, Google Play balance, etc.</p>
42
- <p></p>
43
- <h3>Airdroid Premium can be purchased from the official website or the app</h3>
44
- <p>To buy Airdroid Premium, you can either visit the <a href="">official website</a> or open the app on your device. Then you need to sign in with your Airdroid account or create one if you don't have one. Next, you need to choose the plan that suits you best and follow the instructions to complete the payment process. Once you have purchased Airdroid Premium, you can activate it on up to six devices using the same account.</p>
45
- <h</p> b2dd77e56b<br />
46
- <br />
47
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Arma 3 1.14 Multiplayer Crack.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>arma 3 1.14 multiplayer crack</h2><br /><p><b><b>Download</b> &#10004;&#10004;&#10004; <a href="https://imgfil.com/2uxZDl">https://imgfil.com/2uxZDl</a></b></p><br /><br />
2
-
3
- Arma 3 1.14 Crack Education Program are autonomy about 30 utilities and ... open occurrences much. bachata music free online and ST& this PowerPoint ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/DISQLite3 Pro 5.22.0 D4-XE10.2.md DELETED
@@ -1,6 +0,0 @@
1
-
2
- <p> in addition, disqlite3 pro is a powerful application for the creation and manage of database programs and databases. it is not very difficult to use the application, and more importantly, it has a graphical interface for the creation and management of the database. in addition, it is possible to create all types of databases and database files using this application. all the databases are stored in the same directory, and the user does not have to enter the path of the database. it is also possible to create the database program by the application. this application can be used for the creation of the database files, in addition to the creation of the database files from the url. the application is also available for windows and mac os. users can make use of the application for the creation and management of the database and the database program.</p>
3
- <p> furthermore, disqlite3 pro is a powerful application for the creation and management of database programs and databases. it is not very difficult to use the application, and more importantly, it has a graphical interface for the creation and management of the database. in addition, it is possible to create all types of databases and database files using this application. all the databases are stored in the same directory, and the user does not have to enter the path of the database. it is also possible to create the database program by the application. this application can be used for the creation of the database files, in addition to the creation of the database files from the url. the application is also available for windows and mac os. users can make use of the application for the creation and management of the database and the database program.</p>
4
- <h2>DISQLite3 Pro 5.22.0 D4-XE10.2</h2><br /><p><b><b>Download</b> &#127379; <a href="https://imgfil.com/2uxZs4">https://imgfil.com/2uxZs4</a></b></p><br /><br /> 899543212b<br />
5
- <br />
6
- <br />
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download The Last Train - Bullet Train Download] [Torrent]l Everything You Need to Know About the Movie and the Torrent.md DELETED
@@ -1,28 +0,0 @@
1
- <br />
2
- <p>Its extensive torrent index makes it one of the <strong>best movie torrent sites</strong> out there. You can download movies of all genres from The Pirate Bay without worrying about downloading suspicious files.</p>
3
- <p>There is a list of backup trackers given on the torrents page listing. Add them to get every last bit of available speed. GloTorrents also has an active forum where you can request torrents, subtitles, and more.</p>
4
- <h2>The Last Train - Bullet Train Download] [Torrent]l</h2><br /><p><b><b>Download File</b> &#10001; <a href="https://imgfil.com/2uxZbq">https://imgfil.com/2uxZbq</a></b></p><br /><br />
5
- <p>This article was co-authored by wikiHow Staff. Our trained team of editors and researchers validate articles for accuracy and comprehensiveness. wikiHow's Content Management Team carefully monitors the work from our editorial staff to ensure that each article is backed by trusted research and meets our high quality standards.<br><br> This article has been viewed 199,709 times.<br><br>Learn more...</p>
6
- <p>It is especially helpful in preventing hackers from stealing your data while connected to an unsecure public Wi-Fi network. A VPN for torrenting allows you the anonymity to download as much as you want.</p>
7
- <p>Technically, it is safe to torrent. It is based on a P2P (peer-to-peer) network where all participants share bits of a file. As more people download a file or some portion of it, they can become an active participant.</p>
8
- <p>It depends on where you are downloading the file more than anything else. Public torrents are swarming with trojans that infect your system with malware such as a cryptominer. To prevent this from happening, always be mindful of what you download. Copyrighted material such as games are usually a honeypot for hackers.</p>
9
- <p>Privacy experts recommend the use of a Torrent VPN to make your torrent activities anonymous. With a VPN for torrenting, you can download torrent files securely in countries dominated by DMCAs and copyright laws.</p>
10
- <p>Kickasstorrents.to is probably the oldest and still functioning Kickass clones that users can access right now. You can access it using a VPN for all your torrenting needs. it offers complete Kickass torrents database with a whole connection of movies, series, documentaries, and much more for users to download. The site also has its Kickass community where it provides regular updates of the latest torrents available for download.</p>
11
- <p>Tor-cr.org is yet another great Kickass clone. It has turned up to be a very useful clone website as it offers the complete list of Kickass Torrents. The website is easily accessible from all regions unless your ISP has imposed regional-restrictions on these versions of Kickass. However, using a VPN will give you full access to Tor-cr.org and download torrents from a wide range of content categories.</p>
12
- <p></p>
13
- <p>Kat.li is another top Kickass clone website with a fast and powerful Torrents search engine similar to the one we had with the original Kickass website. The site indexes torrent files from multiple domains and provide a huge collection of Kickass torrents for users to download their favorite content including TV Shows, Movies, Games, Music, Apps and many more.</p>
14
- <p>Although there is a very slight chance that the above mentioned torrenting clone websites could get shut down in the near future, if they do, you can make do with non-English torrenting sites to find your favorite content. These Non-English torrenting websites may be difficult to use for English-only downloaders, you can still use the help of Google translator to translate and change to the language of the website to make it easy for you to download stuff easily.</p>
15
- <p>The popular animetorrents indexing website got shut down recently, causing concerns for all torrent fans who relied on the website to download anime content. But it is now back with a new interface and the same directory of torrents. You can download your favorite anime movie and series without any problems.</p>
16
- <p>ArenaBG is a Bulgarian-torrents indexing website. It has been a target of a lot of investigations for violating copyright laws, but it is still up and running. Initially it was only available to access in Bulgaria, however, since 2011, users from around the world can access it easily. ArenaBG offers a huge selection of torrents for download and you can access it easily from anywhere. But remember, to avoid any trouble, you can use a Kickass VPN to stay anonymous and private.</p>
17
- <p>ExtraTorrent is a great torrent website and thousands of users use it to download their favorite torrents every day. It offers a huge database of torrents for download and is surely one of the best Kickass alternatives you must consider.</p>
18
- <p>Torrents.me work like a multi-search engine that allows you to search and download your favorite torrents from popular torrenting websites like the Pirate Bay, ExtraTorrent, and LimeTorrents. You can easily add your preferred torrenting websites in the search and find your favorite torrents through their database.</p>
19
- <p>Since 1985, SERTC has provided hands-on, realistic training in surface transportation hazmat response. With new facilities and expanding curriculum, the SERTC trainee community is growing to keep local, state, tribal and territorial communities even safer.</p>
20
- <p>As he was older and stronger than any of the other members who took upracing, and as he always rode the lightest and best wheel that money couldprocure, he had, without much hard work, easily maintained a lead in theracing field, and had come to consider himself as invincible. He regardedhimself as such a sure winner of this last[Pg 6] race for the Railroad Cup,that he had not taken the trouble to go into training for it. He would noteven give up his cigarette smoking, a habit that he had acquired becausehe considered it fashionable and manly. Now he was beaten, disgracefully,and that by a boy nearly two years younger than himself. It was too much,and he determined to find some excuse for his defeat, that should at thesame time remove the disgrace from him, and place it upon other shoulders.</p>
21
- <p>With this Rod plunged down the steep bank to the railroad track, anddisappeared in the darkness. He went in the direction of the next stationto Euston, about five miles away, as he did not wish to be recognized whenhe made the attempt to secure a ride on some train to New York. It was tobe an attempt only; for he had not a cent of money in his pockets, and hadno idea of how he should obtain the coveted ride. In addition to beingpenniless, he was hungry, and his hunger was increased tenfold by theknowledge that he had no means of satisfying it. Still he was a boy withunlimited confidence in himself. He always had fallen on his feet; and,though this was the worse fix in which he had ever found himself, he hadfaith that he would come out[Pg 32]of it all right somehow. His heart wasalready so much lighter since he had learned from Dan that some of hisfriends, and especially Eltje Vanderveer, still believed in him, that hissituation did not seem half so desperate as it had an hour before.</p>
22
- <p>Rod was already enough of a railroad man to know that, as he was goingeast, he must walk on the west bound track. By so doing he would be ableto see trains bound west, while they were still at some distance from him,and would be in no danger from those bound east and overtaking him.</p>
23
- <p>When he was about half a mile from the little station, toward which he waswalking, he heard the long-drawn, far-away whistle of a locomotive. Was itahead of him or behind? On account of the bewildering echoes he could nottell. To settle the question he kneeled down, and placed his ear againstone of rails of the west bound track. It was cold and silent. Then hetried the east bound track in the same way. This rail seemed to tinglewith life, and a faint, humming sound came from it. It was a perfectrailroad telephone, and it informed the listener as plainly as words couldhave told him, that a train was approaching from the west.</p>
24
- <p>[Pg 33]He stopped to note its approach. In a few minutes the rails of the eastbound track began to quiver with light from the powerful reflector infront of its locomotive. Then they stretched away toward the oncomingtrain in gleaming bands of indefinite length, while the dazzling lightseemed to cut a bright pathway between walls of solid blackness for theuse of the advancing monster. As the bewildering glare passed him, Rod sawthat the train was a long, heavy-laden freight, and that some of its carscontained cattle. He stood motionless as it rushed past him, shaking thesolid earth with its ponderous weight, and he drew a decided breath ofrelief at the sight of the blinking red eyes on the rear platform of itscaboose. How he wished he was in that caboose, riding comfortably towardNew York, instead of plodding wearily along on foot, with nothing butuncertainties ahead of him.</p>
25
- <p>As Rod stood gazing at the receding train he noticed a human figure stepfrom the lighted interior of the caboose, through the open doorway, to theplatform, apparently kick at something, and almost instantly return intothe car. At the same time the boy fancied he heard a sharp cry of pain;but was not sure. As he resumed his tiresome walk, gazing longingly afterthe vanishing train lights, he saw another light, a white one that movedtoward him with a swinging motion, close to the ground. While he waswondering what it was, he almost stumbled over a small animal that stoodmotionless on the track, directly in front of him. It was a dog. Now Roddearly loved dogs, and seemed instinctively to know that this one was insome sort of trouble. As he stopped to pat it, the creature uttered alittle whine, as though [Pg 35]askinghis sympathy and help. At the same time it licked his hand.</p>
26
- <p>The latter told the boy that the young tramp, as they called him, wasbilled through to New York, to look after some cattle that were on thetrain; but that he was a worthless, ugly fellow, who had not paid theslightest attention to them, and whose only object in accepting the jobwas evidently to obtain a free ride in the caboose. Smiler, whom he hadbeen delighted to find on the train when it was turned over to him, hadtaken a great dislike to the[Pg 45] fellowfrom the first. He had growled andshown his teeth whenever the tramp moved about the car, and several timesthe latter had threatened to teach him better manners. When he andBrakeman Joe went to the forward end of the train, to make ready forside-tracking it, they left the dog sitting on the rear platform of thecaboose, and the tramp apparently asleep, as Rod had found him, on one ofthe lockers. He must have taken advantage of their absence to deal the dogthe cruel kick that cut his ear, and landed him, stunned and bruised, onthe track where he had been discovered.</p> aaccfb2cb3<br />
27
- <br />
28
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/processing/text.py DELETED
@@ -1,132 +0,0 @@
1
- """Text processing functions"""
2
- from typing import Dict, Generator, Optional
3
-
4
- from selenium.webdriver.remote.webdriver import WebDriver
5
-
6
- from autogpt.config import Config
7
- from autogpt.llm_utils import create_chat_completion
8
- from autogpt.memory import get_memory
9
-
10
- CFG = Config()
11
- MEMORY = get_memory(CFG)
12
-
13
-
14
- def split_text(text: str, max_length: int = 8192) -> Generator[str, None, None]:
15
- """Split text into chunks of a maximum length
16
-
17
- Args:
18
- text (str): The text to split
19
- max_length (int, optional): The maximum length of each chunk. Defaults to 8192.
20
-
21
- Yields:
22
- str: The next chunk of text
23
-
24
- Raises:
25
- ValueError: If the text is longer than the maximum length
26
- """
27
- paragraphs = text.split("\n")
28
- current_length = 0
29
- current_chunk = []
30
-
31
- for paragraph in paragraphs:
32
- if current_length + len(paragraph) + 1 <= max_length:
33
- current_chunk.append(paragraph)
34
- current_length += len(paragraph) + 1
35
- else:
36
- yield "\n".join(current_chunk)
37
- current_chunk = [paragraph]
38
- current_length = len(paragraph) + 1
39
-
40
- if current_chunk:
41
- yield "\n".join(current_chunk)
42
-
43
-
44
- def summarize_text(
45
- url: str, text: str, question: str, driver: Optional[WebDriver] = None
46
- ) -> str:
47
- """Summarize text using the OpenAI API
48
-
49
- Args:
50
- url (str): The url of the text
51
- text (str): The text to summarize
52
- question (str): The question to ask the model
53
- driver (WebDriver): The webdriver to use to scroll the page
54
-
55
- Returns:
56
- str: The summary of the text
57
- """
58
- if not text:
59
- return "Error: No text to summarize"
60
-
61
- text_length = len(text)
62
- print(f"Text length: {text_length} characters")
63
-
64
- summaries = []
65
- chunks = list(split_text(text))
66
- scroll_ratio = 1 / len(chunks)
67
-
68
- for i, chunk in enumerate(chunks):
69
- if driver:
70
- scroll_to_percentage(driver, scroll_ratio * i)
71
- print(f"Adding chunk {i + 1} / {len(chunks)} to memory")
72
-
73
- memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
74
-
75
- MEMORY.add(memory_to_add)
76
-
77
- print(f"Summarizing chunk {i + 1} / {len(chunks)}")
78
- messages = [create_message(chunk, question)]
79
-
80
- summary = create_chat_completion(
81
- model=CFG.fast_llm_model,
82
- messages=messages,
83
- )
84
- summaries.append(summary)
85
- print(f"Added chunk {i + 1} summary to memory")
86
-
87
- memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}"
88
-
89
- MEMORY.add(memory_to_add)
90
-
91
- print(f"Summarized {len(chunks)} chunks.")
92
-
93
- combined_summary = "\n".join(summaries)
94
- messages = [create_message(combined_summary, question)]
95
-
96
- return create_chat_completion(
97
- model=CFG.fast_llm_model,
98
- messages=messages,
99
- )
100
-
101
-
102
- def scroll_to_percentage(driver: WebDriver, ratio: float) -> None:
103
- """Scroll to a percentage of the page
104
-
105
- Args:
106
- driver (WebDriver): The webdriver to use
107
- ratio (float): The percentage to scroll to
108
-
109
- Raises:
110
- ValueError: If the ratio is not between 0 and 1
111
- """
112
- if ratio < 0 or ratio > 1:
113
- raise ValueError("Percentage should be between 0 and 1")
114
- driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {ratio});")
115
-
116
-
117
- def create_message(chunk: str, question: str) -> Dict[str, str]:
118
- """Create a message for the chat completion
119
-
120
- Args:
121
- chunk (str): The chunk of text to summarize
122
- question (str): The question to answer
123
-
124
- Returns:
125
- Dict[str, str]: The message to send to the chat completion
126
- """
127
- return {
128
- "role": "user",
129
- "content": f'"""{chunk}""" Using the above text, answer the following'
130
- f' question: "{question}" -- if the question cannot be answered using the text,'
131
- " summarize the text.",
132
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/Build-a-lot 7 - Fairy Tales - Full PreCracked - Foxy Games Torrent !EXCLUSIVE!.md DELETED
@@ -1,74 +0,0 @@
1
- ## Build-a-lot 7 - Fairy Tales - Full PreCracked - Foxy Games Torrent
2
-
3
-
4
-
5
-
6
-
7
- ![Build-a-lot 7 - Fairy Tales - Full PreCracked - Foxy Games Torrent !EXCLUSIVE!](https://cdn-games.bigfishsites.com/en_buildalot-fairy-tales/screen2.jpg)
8
-
9
-
10
-
11
-
12
-
13
- **CLICK HERE ••• [https://www.google.com/url?q=https%3A%2F%2Fbytlly.com%2F2txjm0&sa=D&sntz=1&usg=AOvVaw1SVqXiA0JjUeIJDUtRRRY4](https://www.google.com/url?q=https%3A%2F%2Fbytlly.com%2F2txjm0&sa=D&sntz=1&usg=AOvVaw1SVqXiA0JjUeIJDUtRRRY4)**
14
-
15
-
16
-
17
-
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
-
26
-
27
- # How to Download and Play Build-a-lot 7 - Fairy Tales - Full PreCracked - Foxy Games Torrent
28
-
29
-
30
-
31
- If you are looking for a fun and relaxing game that combines city-building and fairy tale elements, then you should try Build-a-lot 7 - Fairy Tales. This is the seventh installment of the popular Build-a-lot series, and it offers you a chance to create your own magical kingdom with castles, cottages, fountains, and more. You can also explore different fairy tale worlds, meet famous characters, and complete challenging quests.
32
-
33
-
34
-
35
- But how can you get this game for free? The answer is by downloading and playing the Build-a-lot 7 - Fairy Tales - Full PreCracked - Foxy Games Torrent. This is a file that contains the full version of the game, already cracked and ready to play. You don't need to install anything or register any account. You just need to follow these simple steps:
36
-
37
-
38
-
39
- 1. Download a torrent client, such as uTorrent or BitTorrent, and install it on your computer.
40
-
41
- 2. Go to a torrent site, such as The Pirate Bay or Kickass Torrents, and search for "Build-a-lot 7 - Fairy Tales - Full PreCracked - Foxy Games".
42
-
43
- 3. Choose the torrent file that has the most seeders and leechers, and download it to your computer.
44
-
45
- 4. Open the torrent file with your torrent client, and select the destination folder where you want to save the game.
46
-
47
- 5. Wait for the download to finish. It may take some time depending on your internet speed and the number of peers.
48
-
49
- 6. Once the download is complete, open the destination folder and double-click on the game icon. The game will launch automatically.
50
-
51
- 7. Enjoy playing Build-a-lot 7 - Fairy Tales!
52
-
53
-
54
-
55
- Note: Downloading and playing torrent files may be illegal in some countries. Please check your local laws before proceeding. Also, be careful of viruses and malware that may be hidden in some torrent files. Always scan your files with an antivirus program before opening them.
56
-
57
-
58
-
59
- Build-a-lot 7 - Fairy Tales is a game that will appeal to both casual and hardcore gamers. You can choose from four different modes: Campaign, Casual, Expert, and Sandbox. Each mode has its own objectives and challenges, and you can adjust the difficulty level according to your preference. You can also unlock achievements and trophies as you progress through the game.
60
-
61
-
62
-
63
- The game features stunning graphics and sound effects that will immerse you in the fairy tale atmosphere. You can customize your kingdom with different types of buildings, decorations, and landscaping. You can also interact with various fairy tale characters, such as Cinderella, Snow White, Rapunzel, and more. You can help them with their problems, or cause some mischief if you feel like it.
64
-
65
-
66
-
67
- Build-a-lot 7 - Fairy Tales is a game that will keep you entertained for hours. You can download and play it for free by using the Build-a-lot 7 - Fairy Tales - Full PreCracked - Foxy Games Torrent. Just follow the instructions above and start building your dream kingdom today!
68
-
69
- 1b8d091108
70
-
71
-
72
-
73
-
74
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Beach Buggy Racing 2 How to Unlock and Upgrade Over 40 Powerups.md DELETED
@@ -1,116 +0,0 @@
1
-
2
- <h1>Beach Buggy Racing 2: A Fun and Exciting Kart Racing Game</h1>
3
- <p>Do you love kart racing games? Do you want to experience a thrilling adventure on a mysterious island? Do you want to compete against other players from around the world? If you answered yes to any of these questions, then you should try Beach Buggy Racing 2, a fun and exciting kart racing game that you can download from Microsoft Store. In this article, we will tell you everything you need to know about this game, including what it is, how to download it, what are its features, how to play it, and why you should play it.</p>
4
- <h2>beach buggy racing 2 download microsoft store</h2><br /><p><b><b>Download File</b> &#8230;&#8230;&#8230; <a href="https://urlin.us/2uSSvc">https://urlin.us/2uSSvc</a></b></p><br /><br />
5
- <h2>What is Beach Buggy Racing 2?</h2>
6
- <p>Beach Buggy Racing 2 is a sequel to the popular Beach Buggy Racing, a game that introduced over 100 million international mobile players to console-style kart racing with a playful off-road twist. Beach Buggy Racing 2 is a fully 3D off-road kart racing game with amazing physics, detailed cars and characters, and spectacular weapons, powered by Vector Engine and NVIDIA's PhysX. It's like a console game in the palm of your hand!</p>
7
- <p>Beach Buggy Racing 2 is a game that you can play solo or with friends in split screen or online modes. You can join the Beach Buggy Racing League and compete against drivers and cars from around the world. You can race through Egyptian pyramids, dragon-infested castles, pirate ship wrecks, and experimental alien bio-labs. You can collect and upgrade an arsenal of fun and wacky powerups. You can recruit new drivers, assemble a garage full of cars, and race your way to the top of the league.</p>
8
- <h2>How to download Beach Buggy Racing 2 from Microsoft Store?</h2>
9
- <p>If you want to download Beach Buggy Racing 2 on your Windows 10 device, you can follow these simple steps:</p>
10
- <p>beach buggy racing 2 island adventure xbox one<br />
11
- beach buggy racing 2 hot wheels edition<br />
12
- beach buggy racing 2 split screen<br />
13
- beach buggy racing 2 game crafting<br />
14
- beach buggy racing 2 adventure mode<br />
15
- beach buggy racing 2 xbox series x<br />
16
- beach buggy racing 2 oddball car pack<br />
17
- beach buggy racing 2 firework fury<br />
18
- beach buggy racing 2 vector unit<br />
19
- beach buggy racing 2 xbox local multiplayer<br />
20
- beach buggy racing 2 kart racer<br />
21
- beach buggy racing 2 powerups<br />
22
- beach buggy racing 2 championships<br />
23
- beach buggy racing 2 drift attack<br />
24
- beach buggy racing 2 tropical rivals<br />
25
- beach buggy racing 2 official sequel<br />
26
- beach buggy racing 2 free driving game<br />
27
- beach buggy racing 2 moon buggies<br />
28
- beach buggy racing 2 monster trucks<br />
29
- beach buggy racing 2 ancient temples<br />
30
- beach buggy racing 2 dragon castles<br />
31
- beach buggy racing 2 ice cream stands<br />
32
- beach buggy racing 2 rag-tag crew<br />
33
- beach buggy racing 2 mysterious island<br />
34
- beach buggy racing 2 epic race<br />
35
- beach buggy racing 2 ultimate trophy<br />
36
- beach buggy racing 2 mayhem-filled kart racer<br />
37
- beach buggy racing 2 solo or with friends<br />
38
- beach buggy racing 2 story-driven adventure mode<br />
39
- beach buggy racing 2 adrenaline-pumping races<br />
40
- beach buggy racing 2 skill-mastering drift attacks<br />
41
- beach buggy racing 2 custom game modes<br />
42
- beach buggy racing 2 zany race rules<br />
43
- beach buggy racing 2 bouncy tires powerup<br />
44
- beach buggy racing 2 rocket boost powerup<br />
45
- beach buggy racing 2 police chase powerup<br />
46
- beach buggy racing 2 fast-paced driving action game<br />
47
- beach buggy racing 2 explosive fun for all skill levels<br />
48
- beach buggy racing 2 net energy gain experiment<br />
49
- beach buggy racing 2 holy grail fusion experiment<br />
50
- beach buggy racing 2 mini sun experiment<br />
51
- beach buggy racing 2 seven times hotter than the sun core experiment</p>
52
- <ol>
53
- <li>Open Microsoft Store app on your device.</li>
54
- <li>Search for Beach Buggy Racing 2 in the search bar.</li>
55
- <li>Select the game from the search results.</li>
56
- <li>Click on Get or Install button.</li>
57
- <li>Wait for the download and installation process to complete.</li>
58
- <li>Launch the game and enjoy!</li>
59
- </ol>
60
- <p>The system requirements for Beach Buggy Racing 2 are:</p>
61
- <ul>
62
- <li>OS: Windows 10 version 18362.0 or higher</li>
63
- <li>Architecture: x64</li>
64
- <li>DirectX: Version 11</li>
65
- <li>Memory: 4 GB</li>
66
- <li>Processor: Intel Core i5-6500 or equivalent</li>
67
- <li>Graphics: NVIDIA GeForce GTX750 Ti or equivalent</li>
68
- </ul>
69
- <p>The price of Beach Buggy Racing 2 is $19.99. However, you can also buy the Hot Wheels Edition bundle for $26.98, which includes the game and two DLC packs: Hot Wheels Booster Pack and Oddball Car <p>One of the benefits of downloading the game from Microsoft Store is that you can enjoy the Hot Wheels Booster Pack DLC, an exciting new content expansion that adds seven legendary Hot Wheels cars and four new tracks, complete with twisting orange track pieces, to the Beach Buggy Racing League. You can also get the Oddball Car Pack DLC, which adds four wacky and weird cars to your garage: the Rocket Car, the Shark Car, the Alien Car, and the Monster Truck. These DLC packs are sold separately or as a bundle with the game for a discounted price.</p>
70
- <h2>What are the features of Beach Buggy Racing 2?</h2>
71
- <p>Beach Buggy Racing 2 is not just a simple racing game. It has many features that make it a fun and exciting kart racing game. Here are some of them:</p>
72
- <h3>The different game modes and challenges</h3>
73
- <p>You can choose from different game modes and challenges to test your skills and have fun. You can play the Adventure mode, where you can explore the island and unlock new tracks, cars, drivers, and powerups. You can also play the Quick Race mode, where you can race on any track you want with any car you want. You can also play the Championship mode, where you can compete in a series of races and earn trophies. You can also play the Daily Challenges mode, where you can complete different tasks and earn rewards. You can also play the Special Events mode, where you can join limited-time events and win exclusive prizes.</p>
74
- <h3>The variety of cars, drivers, and powerups</h3>
75
- <p>You can collect and upgrade over 40 cars, each with their own unique stats and abilities. You can also recruit over 20 drivers, each with their own special power. You can also collect and upgrade over 40 powerups, each with their own effects and strategies. You can mix and match different cars, drivers, and powerups to create your own style and strategy.</p>
76
- <h3>The customization options and the achievements</h3>
77
- <p>You can customize your cars with different paints, decals, wheels, spoilers, and more. You can also customize your drivers with different outfits, hats, glasses, and more. You can also customize your powerup deck with different combinations of powerups. You can also unlock over 100 achievements and show off your skills and progress.</p>
78
- <h2>How to play Beach Buggy Racing 2?</h2>
79
- <p>Beach Buggy Racing 2 is easy to play but hard to master. Here are some tips and tricks to help you play better:</p>
80
- <h3>The controls and the tips for racing</h3>
81
- <p>You can choose from different control options: tilt, touch, or gamepad. You can also adjust the sensitivity and the steering assist. The basic controls are: accelerate, brake, steer, drift, use powerup, use driver ability. The tips for racing are: use drift to take sharp turns and fill up your boost meter; use boost to speed up and overtake your opponents; use powerups wisely and strategically; use driver ability at the right time and situation; avoid obstacles and traps; collect coins and gems; look for shortcuts and secrets.</p>
82
- <h3>The powerup deck and the special abilities</h3>
83
- <p>You can create your own powerup deck with up to eight powerups. You can choose from offensive, defensive, or utility powerups. You can also upgrade your powerups to make them more effective. Some examples of powerups are: firework (shoots a rocket that explodes on impact); oil slick (drops a slippery puddle that spins out other racers); shield (protects you from attacks for a short time); nitro (gives you a burst of speed); magnet (attracts coins and gems); lightning (zaps nearby racers); tornado (creates a swirling wind that blows away other racers); ice cream (freezes other racers in place). You can also use your driver ability once per race. Each driver has a unique ability that can give you an edge over your opponents. Some examples of driver abilities are: beach ball barrage (launches beach balls everywhere); fire breath (breathes fire in front of you); teleport (teleports you to a random position); coin storm (makes coins rain from the sky); banana split (splits into three copies of yourself).</p>
84
- <h3>The online competitions and tournaments</h3>
85
- <p>You can join the Beach Buggy Racing League and compete against other players from around the world in online races. You can earn trophies and rank up in different leagues. You can also join online tournaments and win exclusive rewards. You can also create or join a team and chat with other players.</p>
86
- <h2>Why should you play Beach Buggy Racing 2?</h2>
87
- <p>Beach Buggy Racing 2 is a game that you should play if you love kart racing games. Here are some reasons why you should play Beach Buggy Racing 2:</p>
88
- <h3>The fun and addictive gameplay</h3>
89
- <p>Beach Buggy Racing 2 is a game that will keep you hooked for hours. You will never get bored of racing on different tracks, using different powerups, and unlocking new cars, drivers, and upgrades. You will also enjoy the challenge of competing against other players and improving your skills and rank. You will also have fun exploring the island and discovering its secrets and surprises.</p>
90
- <h3>The stunning graphics and sound effects</h3>
91
- <p>Beach Buggy Racing 2 is a game that will impress you with its graphics and sound effects. You will admire the detailed and colorful 3D graphics that bring the island to life. You will also appreciate the realistic physics and animations that make the racing experience more immersive. You will also enjoy the catchy and upbeat music and sound effects that match the mood and theme of the game.</p>
92
- <h3>The replay value and the updates</h3>
93
- <p>Beach Buggy Racing 2 is a game that will keep you coming back for more. You will always find something new and exciting to do in the game. You will also benefit from the regular updates that add new content and features to the game. You will also be able to play the game offline or online, depending on your preference and availability.</p>
94
- <h2>Conclusion</h2>
95
- <p>Beach Buggy Racing 2 is a fun and exciting kart racing game that you can download from Microsoft Store. It is a sequel to the popular Beach Buggy Racing, a game that introduced over 100 million international mobile players to console-style kart racing with a playful off-road twist. Beach Buggy Racing 2 is a fully 3D off-road kart racing game with amazing physics, detailed cars and characters, and spectacular weapons, powered by Vector Engine and NVIDIA's PhysX. It's like a console game in the palm of your hand!</p>
96
- <p>Beach Buggy Racing 2 is a game that you can play solo or with friends in split screen or online modes. You can join the Beach Buggy Racing League and compete against drivers and cars from around the world. You can race through Egyptian pyramids, dragon-infested castles, pirate ship wrecks, and experimental alien bio-labs. You can collect and upgrade an arsenal of fun and wacky powerups. You can recruit new drivers, assemble a garage full of cars, and race your way to the top of the league.</p>
97
- <p>Beach Buggy Racing 2 is a game that has many features that make it a fun and exciting kart racing game. You can choose from different game modes and challenges to test your skills and have fun. You can collect and upgrade over 40 cars, each with their own unique stats and abilities. You can also recruit over 20 drivers, each with their own special power. You can also collect and upgrade over 40 powerups, each with their own effects and strategies. You can mix and match different cars, drivers, and powerups to create your own style and strategy.</p>
98
- <p>Beach Buggy Racing 2 is a game that is easy to play but hard to master. You can choose from different control options: tilt, touch, or gamepad. You can also adjust the sensitivity and the steering assist. The basic controls are: accelerate, brake, steer, drift, use powerup, use driver ability. The tips for racing are: use drift to take sharp turns and fill up your boost meter; use boost to speed up and overtake your opponents; use powerups wisely and strategically; use driver ability at the right time and situation; avoid obstacles and traps; collect coins and gems; look for shortcuts and secrets.</p>
99
- <p>Beach Buggy Racing 2 is a game that you should play if you love kart racing games. You will enjoy the fun and addictive gameplay, the stunning graphics and sound effects, and the replay value and the updates. You will also have fun playing with your friends or other players online. You will also be able to customize your cars, drivers, and powerups to suit your preferences and style.</p>
100
- <p>If you are ready to join the Beach Buggy Racing League and have a blast on the island, download Beach Buggy Racing 2 from Microsoft Store today and start your engine!</p>
101
- <h2>FAQs</h2>
102
- <p>Here are some frequently asked questions about Beach Buggy Racing 2:</p>
103
- <ol>
104
- <li>How can I get more coins and gems in the game?</li>
105
- <p>You can get more coins and gems by racing on different tracks, completing daily challenges, participating in special events, watching ads, or buying them with real money.</p>
106
- <li>How can I unlock more cars and drivers in the game?</li>
107
- <p>You can unlock more cars and drivers by progressing through the adventure mode, winning championships, opening chests, or buying them with coins or gems.</p>
108
- <li>How can I upgrade my cars and powerups in the game?</li>
109
- <p>You can upgrade your cars and powerups by using upgrade cards that you can get from chests, daily challenges, special events, or buying them with coins or gems.</p>
110
- <li>How can I join a team or create my own team in the game?</li>
111
- <p>You can join a team or create your own team by tapping on the team icon on the main menu. You can search for an existing team or create a new one with a name, a logo, and a description. You can also invite other players to join your team or accept invitations from other teams. You can chat with your team members, share tips and strategies, and compete in team tournaments.</p>
112
- <li>How can I contact the developers of the game or report a bug or a problem?</li>
113
- <p>You can contact the developers of the game or report a bug or a problem by tapping on the settings icon on the main menu. You can then tap on the help icon and choose from different options: FAQ, support, feedback, privacy policy, terms of service, credits. You can also visit their website at https://www.vectorunit.com/ or follow them on social media at https://www.facebook.com/VectorUnit/ or https://twitter.com/VectorUnit/.</p>
114
- </ol></p> 197e85843d<br />
115
- <br />
116
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator Indonesia How to Download and Install Jai Guru Jinn Livery.md DELETED
@@ -1,113 +0,0 @@
1
-
2
- <h1>Bus Simulator Indonesia: How to Download and Install Jai Guru Livery</h1>
3
- <p>Do you love driving buses in realistic and authentic environments? Do you want to customize your bus with cool and fun designs? If yes, then you should try Bus Simulator Indonesia, a popular game that lets you experience what it likes being a bus driver in Indonesia. And if you are looking for a unique and stylish livery for your bus, then you should check out the Jai Guru livery, a beautiful and eye-catching design that will make your bus stand out from the crowd. In this article, we will tell you more about Bus Simulator Indonesia, Jai Guru livery, and how to download and install it in your game.</p>
4
- <h2>bus simulator indonesia jai guru livery download</h2><br /><p><b><b>Download Zip</b> &#9734;&#9734;&#9734;&#9734;&#9734; <a href="https://urlin.us/2uSYjQ">https://urlin.us/2uSYjQ</a></b></p><br /><br />
5
- <h2>What is Bus Simulator Indonesia?</h2>
6
- <p>Bus Simulator Indonesia (aka BUSSID) is a game developed by Maleo, an Indonesian game studio. It was released in 2017 and has been updated regularly with new features and improvements. The game is available for Android and iOS devices, as well as PC via emulator. The game has over 100 million downloads on Google Play Store and has received positive reviews from players and critics.</p>
7
- <h3>Game features</h3>
8
- <p>Some of the top features of Bus Simulator Indonesia are:</p>
9
- <ul>
10
- <li><b>Design your own livery</b>: You can create your own livery for your bus using the template provided by the developer or using your own 3D model. You can also download and use livery from other players or creators.</li>
11
- <li><b>Very easy and intuitive control</b>: You can choose between tilt, steering wheel, or buttons to control your bus. You can also adjust the sensitivity and camera angle according to your preference.</li>
12
- <li><b>Authentic Indonesian cities and places</b>: You can drive your bus in various cities and places in Indonesia, such as Jakarta, Surabaya, Bali, Sumatra, Java, etc. You can also see landmarks, buildings, traffic signs, and other details that make the game more realistic.</li>
13
- <li><b>Variety of Indonesian buses with unique features</b>: You can choose from different types of buses, such as mini bus, double decker, articulated bus, etc. Each bus has its own characteristics, such as speed, handling, capacity, etc.</li>
14
- <li><b>Cool and fun honks</b>: You can honk your horn with different sounds, such as the iconic "Om Telolet Om!" honk that became viral on social media. You can also hear other buses honking back at you.</li>
15
- <li><b>High-quality and detailed 3D graphics</b>: The game has stunning graphics that show the beauty of Indonesia. You can see the shadows, reflections, weather effects, day and night cycle, etc.</li>
16
- <li><b>No obstructive ads while driving</b>: The game does not show ads while you are driving your bus. You can enjoy the game without any interruption or distraction.</li>
17
- <li><b>Leaderboard and online data saving</b>: You can compete with other players on the leaderboard based on your score and achievements. You can also save your data online so you don't lose your progress.</li>
18
- <li><b>Online multiplayer convoy</b>: You can join or create a convoy with other players online. You can chat with them, follow them, or challenge them.</li>
19
- </ul>
20
- <h3>Livery customization</h3>
21
- <p>One of the most fun features of Bus Simulator Indonesia is the livery customization. You can design your own livery for your bus using the template provided by the developer or using your own 3D model. You can also download and use livery from other players or creators. Livery is a term that refers to the paint scheme or design of a vehicle, especially a bus or a plane. Livery can be used to express your personality, style, or preference. You can also use livery to promote your brand, business, or cause. Livery can make your bus more attractive, unique, and recognizable.</p>
22
- <h2>What is Jai Guru Livery?</h2>
23
- <p>Jai Guru livery is a livery created by Jai Guru, a popular and talented livery maker in the BUSSID community. Jai Guru has made many liveries for different types of buses, such as Srikandi SHD, Jetbus 3+, Legacy SR2 XHD Prime, etc. Jai Guru livery is known for its high-quality, colorful, and artistic design. Jai Guru livery is also inspired by Indian culture and religion, as well as other themes and motifs.</p>
24
- <h3>Design and style</h3>
25
- <p>Jai Guru livery has a distinctive design and style that makes it stand out from other liveries. Some of the features of Jai Guru livery are:</p>
26
- <ul>
27
- <li><b>Bright and vibrant colors</b>: Jai Guru livery uses a combination of bright and vibrant colors, such as red, yellow, green, blue, purple, etc. The colors create a contrast and harmony that make the livery more eye-catching and appealing.</li>
28
- <li><b>Indian symbols and images</b>: Jai Guru livery incorporates various symbols and images from Indian culture and religion, such as the Om sign, the lotus flower, the elephant, the peacock, etc. The symbols and images represent different meanings and values, such as peace, wisdom, prosperity, beauty, etc.</li>
29
- <li><b>Floral and geometric patterns</b>: Jai Guru livery also uses floral and geometric patterns to decorate the bus. The patterns add more detail and texture to the livery. The patterns are also influenced by Indian art and architecture.</li>
30
- <li><b>Texts and slogans</b>: Jai Guru livery also includes texts and slogans on the bus. The texts and slogans are usually in Hindi or English. They can be the name of the bus company, the destination of the bus, or a message to the passengers or other drivers.</li>
31
- </ul>
32
- <h3>Download link and credit</h3>
33
- <p>If you want to download and use Jai Guru livery in your game, you can find the download link on Jai Guru's YouTube channel or Facebook page. You can also find other liveries made by Jai Guru on these platforms. Please note that you need to have the compatible bus model in your game before you can use the livery. You can also download the bus model from Jai Guru's channel or page.</p>
34
- <p>When you download and use Jai Guru livery, please give credit to Jai Guru as the original creator of the livery. Do not claim the livery as your own or modify it without permission from Jai Guru. Do not upload or share the livery on other platforms without giving proper credit to Jai Guru. Respect the work and effort of Jai Guru and support him by subscribing to his channel or liking his page.</p>
35
- <h2>How to Install Jai Guru Livery in Bus Simulator Indonesia?</h2>
36
- <p>Installing Jai Guru livery in Bus Simulator Indonesia is easy and simple. Just follow these steps:</p>
37
- <p>jai guru bus mod video download<br />
38
- jai guru bus jinn livery link<br />
39
- jai guru bus simulator indonesia gameplay<br />
40
- jai guru bus skin for bussid<br />
41
- jai guru bus mod apk download<br />
42
- jai guru bus mod for jetbus<br />
43
- jai guru bus mod by team tvz official<br />
44
- jai guru bus mod with gandharvan link<br />
45
- jai guru bus mod with evonex link<br />
46
- jai guru bus mod with scania link<br />
47
- jai guru bus mod with haryanto link<br />
48
- jai guru bus mod with bejeu link<br />
49
- jai guru bus mod with bandung express link<br />
50
- jai guru bus mod with armada jaya perkasa link<br />
51
- jai guru bus mod with budiman link<br />
52
- jai guru bus mod with gede trans link<br />
53
- jai guru bus mod with a.l.s link<br />
54
- jai guru bus mod with akas link<br />
55
- jai guru bus mod with agra mas link<br />
56
- jai guru bus mod with eagle high link<br />
57
- jai guru bus mod with dewi sri link<br />
58
- jai guru bus mod with garuda mas link<br />
59
- jai guru bus mod with eka cepat link<br />
60
- jai guru bus mod with family raya link<br />
61
- jai guru bus mod with gunung mulia link<br />
62
- jai guru bus mod with gunung harta link<br />
63
- jai guru bus mod with handoyo blangkon link<br />
64
- jai guru bus mod with harapan jaya link<br />
65
- jai guru bus mod with sempati star link<br />
66
- jai guru bus mod with shantika link<br />
67
- jai guru bus mod with sinar jaya link<br />
68
- jai guru bus mod with sudiro tungga jaya link<br />
69
- jai guru bus mod with sugeng rahayu link<br />
70
- jai guru bus mod with sumba putra link<br />
71
- jai guru bus mod with sumber rejeki link<br />
72
- jai guru bus mod with sumber selamat link<br />
73
- jai guru bus mod with haryanto gold link<br />
74
- jai guru bus mod with haryanto oren link<br />
75
- jai guru bus mod with haryanto kuning link<br />
76
- jai guru bus mod with raya by thobie link<br />
77
- jai guru bus mod with rosalia indah by doel link<br />
78
- jai guru bus mod with rukun jaya by doel link <br />
79
- jai guru bus mod with sahabat by doel link <br />
80
- jai guru bus mod with santoso by doel link <br />
81
- jai guru bus mod with luragung star by mbs team link <br />
82
- jai guru bus mod with maju lancar by doel link <br />
83
- jai guru bus mod with mira by dyt'z link <br />
84
- jai guru bus mod with pahala kencana by hanafi art link <br />
85
- jai guru bus mod with haryanto becak by agusgps link</p>
86
- <h3>Step 1: Download the livery file</h3>
87
- <p>The first step is to download the livery file from Jai Guru's channel or page. The file will be in .bussid format, which is a special format for BUSSID liveries. The file size will vary depending on the type of bus and the complexity of the design.</p>
88
- <h3>Step 2: Move the livery file to the BUSSID folder</h3>
89
- <p>The next step is to move the livery file to the BUSSID folder on your device. You can use any file manager app to do this. The BUSSID folder is usually located in Internal Storage > Android > data > com.maleo.bussimulatorid > files > BUSSID.</p>
90
- <h3>Step 3: Open the game and select the garage menu</h3>
91
- <p>The third step is to open Bus Simulator Indonesia on your device and select the garage menu from the main menu. The garage menu is where you can choose and customize your bus.</p>
92
- <h3>Step 4: Select the livery file menu and click BUSSID file manager</h3>
93
- <p>The fourth step is to select the livery file menu from the garage menu. The l ivery file menu is where you can see the list of livery files that you have downloaded or created. From the livery file menu, click on the BUSSID file manager button. The BUSSID file manager is where you can access the BUSSID folder and see the livery files that you have moved there.</p>
94
- <h3>Step 5: Choose the livery you want to use and click open</h3>
95
- <p>The final step is to choose the Jai Guru livery that you want to use for your bus and click on the open button. The game will load the livery and apply it to your bus. You can see the preview of your bus with the Jai Guru livery on the screen. You can also change the color, accessories, or other features of your bus if you want. When you are satisfied with your bus, click on the save button and exit the garage menu.</p>
96
- <h2>Conclusion</h2>
97
- <p>Bus Simulator Indonesia is a fun and realistic game that lets you drive buses in Indonesia. You can also customize your bus with different liveries, such as the Jai Guru livery, a beautiful and eye-catching design inspired by Indian culture and religion. To download and install Jai Guru livery in your game, you just need to follow five simple steps: download the livery file, move it to the BUSSID folder, open the game and select the garage menu, select the livery file menu and click BUSSID file manager, and choose the livery you want to use and click open. Enjoy your bus with Jai Guru livery and have a safe and happy journey!</p>
98
- <h2>FAQs</h2>
99
- <p>Here are some frequently asked questions about Bus Simulator Indonesia and Jai Guru livery:</p>
100
- <ul>
101
- <li><b>Q: How can I get more buses in Bus Simulator Indonesia?</b></li>
102
- <li><b>A: You can get more buses in Bus Simulator Indonesia by buying them with coins or diamonds. You can earn coins or diamonds by playing the game, completing missions, watching ads, or buying them with real money.</li>
103
- <li><b>Q: How can I create my own livery in Bus Simulator Indonesia?</b></li>
104
- <li><b>A: You can create your own livery in Bus Simulator Indonesia by using the template provided by the developer or using your own 3D model. You can find the template and instructions on how to use it on Maleo's website or YouTube channel.</li>
105
- <li><b>Q: How can I share my livery with other players in Bus Simulator Indonesia?</b></li>
106
- <li><b>A: You can share your livery with other players in Bus Simulator Indonesia by uploading it to Maleo's website or any other platform that supports .bussid files. You can also join online multiplayer convoys and show off your livery to other players.</li>
107
- <li><b>Q: How can I contact Jai Guru or request a custom livery from him?</b></li>
108
- <li><b>A: You can contact Jai Guru or request a custom livery from him by sending him a message on his YouTube channel or Facebook page. He will reply to you as soon as possible.</li>
109
- <li><b>Q: How can I support Jai Guru and his work?</b></li>
110
- <li><b>A: You can support Jai Guru and his work by subscribing to his YouTube channel, liking his Facebook page, giving him feedback, sharing his liveries with others, and donating to him if you want.</li>
111
- </ul></p> 197e85843d<br />
112
- <br />
113
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/onnx_ijbc.py DELETED
@@ -1,267 +0,0 @@
1
- import argparse
2
- import os
3
- import pickle
4
- import timeit
5
-
6
- import cv2
7
- import mxnet as mx
8
- import numpy as np
9
- import pandas as pd
10
- import prettytable
11
- import skimage.transform
12
- from sklearn.metrics import roc_curve
13
- from sklearn.preprocessing import normalize
14
-
15
- from onnx_helper import ArcFaceORT
16
-
17
- SRC = np.array(
18
- [
19
- [30.2946, 51.6963],
20
- [65.5318, 51.5014],
21
- [48.0252, 71.7366],
22
- [33.5493, 92.3655],
23
- [62.7299, 92.2041]]
24
- , dtype=np.float32)
25
- SRC[:, 0] += 8.0
26
-
27
-
28
- class AlignedDataSet(mx.gluon.data.Dataset):
29
- def __init__(self, root, lines, align=True):
30
- self.lines = lines
31
- self.root = root
32
- self.align = align
33
-
34
- def __len__(self):
35
- return len(self.lines)
36
-
37
- def __getitem__(self, idx):
38
- each_line = self.lines[idx]
39
- name_lmk_score = each_line.strip().split(' ')
40
- name = os.path.join(self.root, name_lmk_score[0])
41
- img = cv2.cvtColor(cv2.imread(name), cv2.COLOR_BGR2RGB)
42
- landmark5 = np.array([float(x) for x in name_lmk_score[1:-1]], dtype=np.float32).reshape((5, 2))
43
- st = skimage.transform.SimilarityTransform()
44
- st.estimate(landmark5, SRC)
45
- img = cv2.warpAffine(img, st.params[0:2, :], (112, 112), borderValue=0.0)
46
- img_1 = np.expand_dims(img, 0)
47
- img_2 = np.expand_dims(np.fliplr(img), 0)
48
- output = np.concatenate((img_1, img_2), axis=0).astype(np.float32)
49
- output = np.transpose(output, (0, 3, 1, 2))
50
- output = mx.nd.array(output)
51
- return output
52
-
53
-
54
- def extract(model_root, dataset):
55
- model = ArcFaceORT(model_path=model_root)
56
- model.check()
57
- feat_mat = np.zeros(shape=(len(dataset), 2 * model.feat_dim))
58
-
59
- def batchify_fn(data):
60
- return mx.nd.concat(*data, dim=0)
61
-
62
- data_loader = mx.gluon.data.DataLoader(
63
- dataset, 128, last_batch='keep', num_workers=4,
64
- thread_pool=True, prefetch=16, batchify_fn=batchify_fn)
65
- num_iter = 0
66
- for batch in data_loader:
67
- batch = batch.asnumpy()
68
- batch = (batch - model.input_mean) / model.input_std
69
- feat = model.session.run(model.output_names, {model.input_name: batch})[0]
70
- feat = np.reshape(feat, (-1, model.feat_dim * 2))
71
- feat_mat[128 * num_iter: 128 * num_iter + feat.shape[0], :] = feat
72
- num_iter += 1
73
- if num_iter % 50 == 0:
74
- print(num_iter)
75
- return feat_mat
76
-
77
-
78
- def read_template_media_list(path):
79
- ijb_meta = pd.read_csv(path, sep=' ', header=None).values
80
- templates = ijb_meta[:, 1].astype(np.int)
81
- medias = ijb_meta[:, 2].astype(np.int)
82
- return templates, medias
83
-
84
-
85
- def read_template_pair_list(path):
86
- pairs = pd.read_csv(path, sep=' ', header=None).values
87
- t1 = pairs[:, 0].astype(np.int)
88
- t2 = pairs[:, 1].astype(np.int)
89
- label = pairs[:, 2].astype(np.int)
90
- return t1, t2, label
91
-
92
-
93
- def read_image_feature(path):
94
- with open(path, 'rb') as fid:
95
- img_feats = pickle.load(fid)
96
- return img_feats
97
-
98
-
99
- def image2template_feature(img_feats=None,
100
- templates=None,
101
- medias=None):
102
- unique_templates = np.unique(templates)
103
- template_feats = np.zeros((len(unique_templates), img_feats.shape[1]))
104
- for count_template, uqt in enumerate(unique_templates):
105
- (ind_t,) = np.where(templates == uqt)
106
- face_norm_feats = img_feats[ind_t]
107
- face_medias = medias[ind_t]
108
- unique_medias, unique_media_counts = np.unique(face_medias, return_counts=True)
109
- media_norm_feats = []
110
- for u, ct in zip(unique_medias, unique_media_counts):
111
- (ind_m,) = np.where(face_medias == u)
112
- if ct == 1:
113
- media_norm_feats += [face_norm_feats[ind_m]]
114
- else: # image features from the same video will be aggregated into one feature
115
- media_norm_feats += [np.mean(face_norm_feats[ind_m], axis=0, keepdims=True), ]
116
- media_norm_feats = np.array(media_norm_feats)
117
- template_feats[count_template] = np.sum(media_norm_feats, axis=0)
118
- if count_template % 2000 == 0:
119
- print('Finish Calculating {} template features.'.format(
120
- count_template))
121
- template_norm_feats = normalize(template_feats)
122
- return template_norm_feats, unique_templates
123
-
124
-
125
- def verification(template_norm_feats=None,
126
- unique_templates=None,
127
- p1=None,
128
- p2=None):
129
- template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)
130
- for count_template, uqt in enumerate(unique_templates):
131
- template2id[uqt] = count_template
132
- score = np.zeros((len(p1),))
133
- total_pairs = np.array(range(len(p1)))
134
- batchsize = 100000
135
- sublists = [total_pairs[i: i + batchsize] for i in range(0, len(p1), batchsize)]
136
- total_sublists = len(sublists)
137
- for c, s in enumerate(sublists):
138
- feat1 = template_norm_feats[template2id[p1[s]]]
139
- feat2 = template_norm_feats[template2id[p2[s]]]
140
- similarity_score = np.sum(feat1 * feat2, -1)
141
- score[s] = similarity_score.flatten()
142
- if c % 10 == 0:
143
- print('Finish {}/{} pairs.'.format(c, total_sublists))
144
- return score
145
-
146
-
147
- def verification2(template_norm_feats=None,
148
- unique_templates=None,
149
- p1=None,
150
- p2=None):
151
- template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)
152
- for count_template, uqt in enumerate(unique_templates):
153
- template2id[uqt] = count_template
154
- score = np.zeros((len(p1),)) # save cosine distance between pairs
155
- total_pairs = np.array(range(len(p1)))
156
- batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation
157
- sublists = [total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)]
158
- total_sublists = len(sublists)
159
- for c, s in enumerate(sublists):
160
- feat1 = template_norm_feats[template2id[p1[s]]]
161
- feat2 = template_norm_feats[template2id[p2[s]]]
162
- similarity_score = np.sum(feat1 * feat2, -1)
163
- score[s] = similarity_score.flatten()
164
- if c % 10 == 0:
165
- print('Finish {}/{} pairs.'.format(c, total_sublists))
166
- return score
167
-
168
-
169
- def main(args):
170
- use_norm_score = True # if Ture, TestMode(N1)
171
- use_detector_score = True # if Ture, TestMode(D1)
172
- use_flip_test = True # if Ture, TestMode(F1)
173
- assert args.target == 'IJBC' or args.target == 'IJBB'
174
-
175
- start = timeit.default_timer()
176
- templates, medias = read_template_media_list(
177
- os.path.join('%s/meta' % args.image_path, '%s_face_tid_mid.txt' % args.target.lower()))
178
- stop = timeit.default_timer()
179
- print('Time: %.2f s. ' % (stop - start))
180
-
181
- start = timeit.default_timer()
182
- p1, p2, label = read_template_pair_list(
183
- os.path.join('%s/meta' % args.image_path,
184
- '%s_template_pair_label.txt' % args.target.lower()))
185
- stop = timeit.default_timer()
186
- print('Time: %.2f s. ' % (stop - start))
187
-
188
- start = timeit.default_timer()
189
- img_path = '%s/loose_crop' % args.image_path
190
- img_list_path = '%s/meta/%s_name_5pts_score.txt' % (args.image_path, args.target.lower())
191
- img_list = open(img_list_path)
192
- files = img_list.readlines()
193
- dataset = AlignedDataSet(root=img_path, lines=files, align=True)
194
- img_feats = extract(args.model_root, dataset)
195
-
196
- faceness_scores = []
197
- for each_line in files:
198
- name_lmk_score = each_line.split()
199
- faceness_scores.append(name_lmk_score[-1])
200
- faceness_scores = np.array(faceness_scores).astype(np.float32)
201
- stop = timeit.default_timer()
202
- print('Time: %.2f s. ' % (stop - start))
203
- print('Feature Shape: ({} , {}) .'.format(img_feats.shape[0], img_feats.shape[1]))
204
- start = timeit.default_timer()
205
-
206
- if use_flip_test:
207
- img_input_feats = img_feats[:, 0:img_feats.shape[1] // 2] + img_feats[:, img_feats.shape[1] // 2:]
208
- else:
209
- img_input_feats = img_feats[:, 0:img_feats.shape[1] // 2]
210
-
211
- if use_norm_score:
212
- img_input_feats = img_input_feats
213
- else:
214
- img_input_feats = img_input_feats / np.sqrt(np.sum(img_input_feats ** 2, -1, keepdims=True))
215
-
216
- if use_detector_score:
217
- print(img_input_feats.shape, faceness_scores.shape)
218
- img_input_feats = img_input_feats * faceness_scores[:, np.newaxis]
219
- else:
220
- img_input_feats = img_input_feats
221
-
222
- template_norm_feats, unique_templates = image2template_feature(
223
- img_input_feats, templates, medias)
224
- stop = timeit.default_timer()
225
- print('Time: %.2f s. ' % (stop - start))
226
-
227
- start = timeit.default_timer()
228
- score = verification(template_norm_feats, unique_templates, p1, p2)
229
- stop = timeit.default_timer()
230
- print('Time: %.2f s. ' % (stop - start))
231
- save_path = os.path.join(args.result_dir, "{}_result".format(args.target))
232
- if not os.path.exists(save_path):
233
- os.makedirs(save_path)
234
- score_save_file = os.path.join(save_path, "{}.npy".format(args.model_root))
235
- np.save(score_save_file, score)
236
- files = [score_save_file]
237
- methods = []
238
- scores = []
239
- for file in files:
240
- methods.append(os.path.basename(file))
241
- scores.append(np.load(file))
242
- methods = np.array(methods)
243
- scores = dict(zip(methods, scores))
244
- x_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]
245
- tpr_fpr_table = prettytable.PrettyTable(['Methods'] + [str(x) for x in x_labels])
246
- for method in methods:
247
- fpr, tpr, _ = roc_curve(label, scores[method])
248
- fpr = np.flipud(fpr)
249
- tpr = np.flipud(tpr)
250
- tpr_fpr_row = []
251
- tpr_fpr_row.append("%s-%s" % (method, args.target))
252
- for fpr_iter in np.arange(len(x_labels)):
253
- _, min_index = min(
254
- list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr)))))
255
- tpr_fpr_row.append('%.2f' % (tpr[min_index] * 100))
256
- tpr_fpr_table.add_row(tpr_fpr_row)
257
- print(tpr_fpr_table)
258
-
259
-
260
- if __name__ == '__main__':
261
- parser = argparse.ArgumentParser(description='do ijb test')
262
- # general
263
- parser.add_argument('--model-root', default='', help='path to load model.')
264
- parser.add_argument('--image-path', default='', type=str, help='')
265
- parser.add_argument('--result-dir', default='.', type=str, help='')
266
- parser.add_argument('--target', default='IJBC', type=str, help='target, set to IJBC or IJBB')
267
- main(parser.parse_args())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/8star/DeepDanbooru_string/app.py DELETED
@@ -1,185 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- from __future__ import annotations
4
-
5
- import argparse
6
- import functools
7
- import os
8
- import html
9
- import pathlib
10
- import tarfile
11
-
12
- import deepdanbooru as dd
13
- import gradio as gr
14
- import huggingface_hub
15
- import numpy as np
16
- import PIL.Image
17
- import tensorflow as tf
18
- import piexif
19
- import piexif.helper
20
-
21
- TITLE = 'DeepDanbooru String'
22
-
23
- TOKEN = os.environ['TOKEN']
24
- MODEL_REPO = 'CikeyQI/DeepDanbooru_string'
25
- MODEL_FILENAME = 'model-resnet_custom_v3.h5'
26
- LABEL_FILENAME = 'tags.txt'
27
-
28
-
29
- def parse_args() -> argparse.Namespace:
30
- parser = argparse.ArgumentParser()
31
- parser.add_argument('--score-slider-step', type=float, default=0.05)
32
- parser.add_argument('--score-threshold', type=float, default=0.5)
33
- parser.add_argument('--theme', type=str, default='dark-grass')
34
- parser.add_argument('--live', action='store_true')
35
- parser.add_argument('--share', action='store_true')
36
- parser.add_argument('--port', type=int)
37
- parser.add_argument('--disable-queue',
38
- dest='enable_queue',
39
- action='store_false')
40
- parser.add_argument('--allow-flagging', type=str, default='never')
41
- return parser.parse_args()
42
-
43
-
44
- def load_sample_image_paths() -> list[pathlib.Path]:
45
- image_dir = pathlib.Path('images')
46
- if not image_dir.exists():
47
- dataset_repo = 'hysts/sample-images-TADNE'
48
- path = huggingface_hub.hf_hub_download(dataset_repo,
49
- 'images.tar.gz',
50
- repo_type='dataset',
51
- use_auth_token=TOKEN)
52
- with tarfile.open(path) as f:
53
- f.extractall()
54
- return sorted(image_dir.glob('*'))
55
-
56
-
57
- def load_model() -> tf.keras.Model:
58
- path = huggingface_hub.hf_hub_download(MODEL_REPO,
59
- MODEL_FILENAME,
60
- use_auth_token=TOKEN)
61
- model = tf.keras.models.load_model(path)
62
- return model
63
-
64
-
65
- def load_labels() -> list[str]:
66
- path = huggingface_hub.hf_hub_download(MODEL_REPO,
67
- LABEL_FILENAME,
68
- use_auth_token=TOKEN)
69
- with open(path) as f:
70
- labels = [line.strip() for line in f.readlines()]
71
- return labels
72
-
73
- def plaintext_to_html(text):
74
- text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
75
- return text
76
-
77
- def predict(image: PIL.Image.Image, score_threshold: float,
78
- model: tf.keras.Model, labels: list[str]) -> dict[str, float]:
79
- rawimage = image
80
- _, height, width, _ = model.input_shape
81
- image = np.asarray(image)
82
- image = tf.image.resize(image,
83
- size=(height, width),
84
- method=tf.image.ResizeMethod.AREA,
85
- preserve_aspect_ratio=True)
86
- image = image.numpy()
87
- image = dd.image.transform_and_pad_image(image, width, height)
88
- image = image / 255.
89
- probs = model.predict(image[None, ...])[0]
90
- probs = probs.astype(float)
91
- res = dict()
92
- for prob, label in zip(probs.tolist(), labels):
93
- if prob < score_threshold:
94
- continue
95
- res[label] = prob
96
- b = dict(sorted(res.items(),key=lambda item:item[1], reverse=True))
97
- a = ', '.join(list(b.keys())).replace('_',' ').replace('(','\(').replace(')','\)')
98
- c = ', '.join(list(b.keys()))
99
-
100
- items = rawimage.info
101
- geninfo = ''
102
-
103
- if "exif" in rawimage.info:
104
- exif = piexif.load(rawimage.info["exif"])
105
- exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b'')
106
- try:
107
- exif_comment = piexif.helper.UserComment.load(exif_comment)
108
- except ValueError:
109
- exif_comment = exif_comment.decode('utf8', errors="ignore")
110
-
111
- items['exif comment'] = exif_comment
112
- geninfo = exif_comment
113
-
114
- for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif',
115
- 'loop', 'background', 'timestamp', 'duration']:
116
- items.pop(field, None)
117
-
118
- geninfo = items.get('parameters', geninfo)
119
-
120
- info = f"""
121
- <p><h4>PNG Info</h4></p>
122
- """
123
- for key, text in items.items():
124
- info += f"""
125
- <div>
126
- <p><b>{plaintext_to_html(str(key))}</b></p>
127
- <p>{plaintext_to_html(str(text))}</p>
128
- </div>
129
- """.strip()+"\n"
130
-
131
- if len(info) == 0:
132
- message = "Nothing found in the image."
133
- info = f"<div><p>{message}<p></div>"
134
-
135
- return (a,c,res,info)
136
-
137
-
138
- def main():
139
- args = parse_args()
140
- model = load_model()
141
- labels = load_labels()
142
-
143
- func = functools.partial(predict, model=model, labels=labels)
144
- func = functools.update_wrapper(func, predict)
145
-
146
- gr.Interface(
147
- func,
148
- [
149
- gr.inputs.Image(type='pil', label='Input'),
150
- gr.inputs.Slider(0,
151
- 1,
152
- step=args.score_slider_step,
153
- default=args.score_threshold,
154
- label='Score Threshold'),
155
- ],
156
- [
157
- gr.outputs.Textbox(label='Output (string)'),
158
- gr.outputs.Textbox(label='Output (raw string)'),
159
- gr.outputs.Label(label='Output (label)'),
160
- gr.outputs.HTML()
161
- ],
162
- examples=[
163
- ['miku.jpg',0.5],
164
- ['miku2.jpg',0.5]
165
- ],
166
- title=TITLE,
167
- description='''
168
- Demo for [KichangKim/DeepDanbooru](https://github.com/KichangKim/DeepDanbooru) with "ready to copy" prompt and a prompt analyzer.
169
-
170
- Modified from [hysts/DeepDanbooru](https://huggingface.co/spaces/hysts/DeepDanbooru)
171
-
172
- PNG Info code forked from [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
173
- ''',
174
- theme=args.theme,
175
- allow_flagging=args.allow_flagging,
176
- live=args.live,
177
- ).launch(
178
- enable_queue=args.enable_queue,
179
- server_port=args.port,
180
- share=args.share,
181
- )
182
-
183
-
184
- if __name__ == '__main__':
185
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI4PD/hexviz/README.md DELETED
@@ -1,36 +0,0 @@
1
- ---
2
- title: Hexviz
3
- emoji: 👁️🧬
4
- colorFrom: green
5
- colorTo: purple
6
- sdk: streamlit
7
- sdk_version: 1.17.0
8
- python_version: 3.10.5
9
- app_file: ./hexviz/🧬Attention_Visualization.py
10
- pinned: true
11
- tags:
12
- - protein language models
13
- - attention analysis
14
- - protein structure
15
- - biology
16
- ---
17
- # hexviz
18
- Visualize attention pattern on 3D protein structures
19
-
20
- ## Install and run
21
-
22
- ```shell
23
- poetry install
24
-
25
- poetry run streamlit run hexviz/streamlit/Attention_On_Structure.py
26
- ```
27
-
28
- ## Export dependecies from poetry
29
- Spaces [require](https://huggingface.co/docs/hub/spaces-dependencies#adding-your-own-dependencies) dependencies in a `requirements.txt` file. Export depencies from poetry's `pyproject.toml` file with:
30
- ```shell
31
- poetry export -f requirements.txt --output requirements.txt --without-hashes
32
- ```
33
-
34
- ## Acknowledgements
35
- This project builds on the attention visualization introduced and developed in
36
- https://github.com/salesforce/provis#provis-attention-visualizer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/modules/streaming.py DELETED
@@ -1,131 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- """
8
- Streaming module API that should be implemented by all Streaming components,
9
- """
10
-
11
- from contextlib import contextmanager
12
- import typing as tp
13
- from torch import nn
14
- import torch
15
-
16
-
17
- State = tp.Dict[str, torch.Tensor]
18
-
19
-
20
- class StreamingModule(nn.Module):
21
- """Common API for streaming components.
22
-
23
- Each streaming component has a streaming state, which is just a dict[str, Tensor].
24
- By convention, the first dim of each tensor must be the batch size.
25
- Don't use dots in the key names, as this would clash with submodules
26
- (like in state_dict).
27
-
28
- If `self._is_streaming` is True, the component should use and remember
29
- the proper state inside `self._streaming_state`.
30
-
31
- To set a streaming component in streaming state, use
32
-
33
- with module.streaming():
34
- ...
35
-
36
- This will automatically reset the streaming state when exiting the context manager.
37
- This also automatically propagates to all streaming children module.
38
-
39
- Some module might also implement the `StreamingModule.flush` method, although
40
- this one is trickier, as all parents module must be StreamingModule and implement
41
- it as well for it to work properly. See `StreamingSequential` after.
42
- """
43
- def __init__(self) -> None:
44
- super().__init__()
45
- self._streaming_state: State = {}
46
- self._is_streaming = False
47
-
48
- def _apply_named_streaming(self, fn: tp.Any):
49
- for name, module in self.named_modules():
50
- if isinstance(module, StreamingModule):
51
- fn(name, module)
52
-
53
- def _set_streaming(self, streaming: bool):
54
- def _set_streaming(name, module):
55
- module._is_streaming = streaming
56
- self._apply_named_streaming(_set_streaming)
57
-
58
- @contextmanager
59
- def streaming(self):
60
- """Context manager to enter streaming mode. Reset streaming state on exit."""
61
- self._set_streaming(True)
62
- try:
63
- yield
64
- finally:
65
- self._set_streaming(False)
66
- self.reset_streaming()
67
-
68
- def reset_streaming(self):
69
- """Reset the streaming state."""
70
- def _reset(name: str, module: StreamingModule):
71
- module._streaming_state.clear()
72
-
73
- self._apply_named_streaming(_reset)
74
-
75
- def get_streaming_state(self) -> State:
76
- """Return the streaming state, including that of sub-modules."""
77
- state: State = {}
78
-
79
- def _add(name: str, module: StreamingModule):
80
- if name:
81
- name += "."
82
- for key, value in module._streaming_state.items():
83
- state[name + key] = value
84
-
85
- self._apply_named_streaming(_add)
86
- return state
87
-
88
- def set_streaming_state(self, state: State):
89
- """Set the streaming state, including that of sub-modules."""
90
- state = dict(state)
91
-
92
- def _set(name: str, module: StreamingModule):
93
- if name:
94
- name += "."
95
- module._streaming_state.clear()
96
- for key, value in list(state.items()):
97
- # complexity is not ideal here, but probably fine.
98
- if key.startswith(name):
99
- local_key = key[len(name):]
100
- if '.' not in local_key:
101
- module._streaming_state[local_key] = value
102
- del state[key]
103
-
104
- self._apply_named_streaming(_set)
105
- assert len(state) == 0, list(state.keys())
106
-
107
- def flush(self, x: tp.Optional[torch.Tensor] = None):
108
- """Flush any remaining outputs that were waiting for completion.
109
- Typically, for convolutions, this will add the final padding
110
- and process the last buffer.
111
-
112
- This should take an optional argument `x`, which will be provided
113
- if a module before this one in the streaming pipeline has already
114
- spitted out a flushed out buffer.
115
- """
116
- if x is None:
117
- return None
118
- else:
119
- return self(x)
120
-
121
-
122
- class StreamingSequential(StreamingModule, nn.Sequential):
123
- """A streaming compatible alternative of `nn.Sequential`.
124
- """
125
- def flush(self, x: tp.Optional[torch.Tensor] = None):
126
- for module in self:
127
- if isinstance(module, StreamingModule):
128
- x = module.flush(x)
129
- elif x is not None:
130
- x = module(x)
131
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZeroToHero/Video-Automatic-Speech-Recognition/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Video Automatic Speech Recognition
3
- emoji: 💻
4
- colorFrom: indigo
5
- colorTo: green
6
- sdk: streamlit
7
- sdk_version: 1.10.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIatUIUC/CodeLATS/executors/py_executor.py DELETED
@@ -1,88 +0,0 @@
1
- import ast
2
- import signal
3
- import astunparse
4
-
5
- from .executor_utils import function_with_timeout
6
-
7
- from typing import List
8
- from .executor_types import ExecuteResult, Executor
9
-
10
- class PyExecutor(Executor):
11
- def execute(self, func: str, tests: List[str], timeout: int = 5) -> ExecuteResult:
12
- # Combine function code and assert statement
13
- imports = 'from typing import *'
14
- func_test_list = [f'{imports}\n{func}\n{test}' for test in tests]
15
-
16
- # Run the tests and collect the results
17
- success_tests = []
18
- failed_tests = []
19
- is_passing = True
20
- num_tests = len(func_test_list)
21
- for i in range(num_tests):
22
- try:
23
-
24
- function_with_timeout(exec, (func_test_list[i], globals()), timeout)
25
-
26
- success_tests += [tests[i]]
27
- except Exception:
28
- output = get_output(func, tests[i], timeout=timeout)
29
- failed_tests += [f"{tests[i]} # output: {output}"]
30
- is_passing = False
31
-
32
- state = []
33
- for test in tests:
34
- if test in success_tests:
35
- state += [True]
36
- else:
37
- state += [False]
38
-
39
- state = tuple(state)
40
-
41
- feedback = "Tested passed:"
42
- for test in success_tests:
43
- feedback += f"\n{test}"
44
- feedback += "\n\nTests failed:"
45
- for test in failed_tests:
46
- feedback += f"\n{test}"
47
-
48
- return ExecuteResult(is_passing, feedback, state)
49
-
50
- def evaluate(self, name: str, func: str, test: str, timeout: int = 5) -> bool:
51
- """
52
- Evaluates the implementation on Human-Eval Python.
53
-
54
- probably should be written in a dataset-agnostic way but not now
55
- """
56
- code = f"""{func}
57
-
58
- {test}
59
-
60
- check({name})
61
- """
62
- try:
63
-
64
- function_with_timeout(exec, (code, globals()), timeout)
65
-
66
- return True
67
- except Exception:
68
- return False
69
-
70
- def get_call_str(assert_statement: str) -> str:
71
- ast_parsed = ast.parse(assert_statement)
72
- try:
73
- call_str = ast_parsed.body[0].test.left # type: ignore
74
- except:
75
- call_str = ast_parsed.body[0].test # type: ignore
76
-
77
- return astunparse.unparse(call_str).strip()
78
-
79
- def get_output(func: str, assert_statement: str, timeout: int = 5) -> str:
80
- try:
81
- exec(f"from typing import *\n{func}", globals())
82
- func_call = get_call_str(assert_statement)
83
- output = function_with_timeout(eval, (func_call, globals()), timeout)
84
- return output
85
- except TimeoutError:
86
- return "TIMEOUT"
87
- except Exception as e:
88
- return str(e)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/modules/extra_condition/api.py DELETED
@@ -1,269 +0,0 @@
1
- from enum import Enum, unique
2
-
3
- import cv2
4
- import torch
5
- from basicsr.utils import img2tensor
6
- from ldm.util import resize_numpy_image
7
- from PIL import Image
8
- from torch import autocast
9
-
10
-
11
- @unique
12
- class ExtraCondition(Enum):
13
- sketch = 0
14
- keypose = 1
15
- seg = 2
16
- depth = 3
17
- canny = 4
18
- style = 5
19
- color = 6
20
- openpose = 7
21
-
22
-
23
- def get_cond_model(opt, cond_type: ExtraCondition):
24
- if cond_type == ExtraCondition.sketch:
25
- from ldm.modules.extra_condition.model_edge import pidinet
26
- model = pidinet()
27
- ckp = torch.load('models/table5_pidinet.pth', map_location='cpu')['state_dict']
28
- model.load_state_dict({k.replace('module.', ''): v for k, v in ckp.items()}, strict=True)
29
- model.to(opt.device)
30
- return model
31
- elif cond_type == ExtraCondition.seg:
32
- raise NotImplementedError
33
- elif cond_type == ExtraCondition.keypose:
34
- import mmcv
35
- from mmdet.apis import init_detector
36
- from mmpose.apis import init_pose_model
37
- det_config = 'configs/mm/faster_rcnn_r50_fpn_coco.py'
38
- det_checkpoint = 'models/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth'
39
- pose_config = 'configs/mm/hrnet_w48_coco_256x192.py'
40
- pose_checkpoint = 'models/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth'
41
- det_config_mmcv = mmcv.Config.fromfile(det_config)
42
- det_model = init_detector(det_config_mmcv, det_checkpoint, device=opt.device)
43
- pose_config_mmcv = mmcv.Config.fromfile(pose_config)
44
- pose_model = init_pose_model(pose_config_mmcv, pose_checkpoint, device=opt.device)
45
- return {'pose_model': pose_model, 'det_model': det_model}
46
- elif cond_type == ExtraCondition.depth:
47
- from ldm.modules.extra_condition.midas.api import MiDaSInference
48
- model = MiDaSInference(model_type='dpt_hybrid').to(opt.device)
49
- return model
50
- elif cond_type == ExtraCondition.canny:
51
- return None
52
- elif cond_type == ExtraCondition.style:
53
- from transformers import CLIPProcessor, CLIPVisionModel
54
- version = 'openai/clip-vit-large-patch14'
55
- processor = CLIPProcessor.from_pretrained(version)
56
- clip_vision_model = CLIPVisionModel.from_pretrained(version).to(opt.device)
57
- return {'processor': processor, 'clip_vision_model': clip_vision_model}
58
- elif cond_type == ExtraCondition.color:
59
- return None
60
- elif cond_type == ExtraCondition.openpose:
61
- from ldm.modules.extra_condition.openpose.api import OpenposeInference
62
- model = OpenposeInference().to(opt.device)
63
- return model
64
- else:
65
- raise NotImplementedError
66
-
67
-
68
- def get_cond_sketch(opt, cond_image, cond_inp_type, cond_model=None):
69
- if isinstance(cond_image, str):
70
- edge = cv2.imread(cond_image)
71
- else:
72
- # for gradio input, pay attention, it's rgb numpy
73
- edge = cv2.cvtColor(cond_image, cv2.COLOR_RGB2BGR)
74
- edge = resize_numpy_image(edge, max_resolution=opt.max_resolution, resize_short_edge=opt.resize_short_edge)
75
- opt.H, opt.W = edge.shape[:2]
76
- if cond_inp_type == 'sketch':
77
- edge = img2tensor(edge)[0].unsqueeze(0).unsqueeze(0) / 255.
78
- edge = edge.to(opt.device)
79
- elif cond_inp_type == 'image':
80
- edge = img2tensor(edge).unsqueeze(0) / 255.
81
- edge = cond_model(edge.to(opt.device))[-1]
82
- else:
83
- raise NotImplementedError
84
-
85
- # edge = 1-edge # for white background
86
- edge = edge > 0.5
87
- edge = edge.float()
88
-
89
- return edge
90
-
91
-
92
- def get_cond_seg(opt, cond_image, cond_inp_type='image', cond_model=None):
93
- if isinstance(cond_image, str):
94
- seg = cv2.imread(cond_image)
95
- else:
96
- seg = cv2.cvtColor(cond_image, cv2.COLOR_RGB2BGR)
97
- seg = resize_numpy_image(seg, max_resolution=opt.max_resolution, resize_short_edge=opt.resize_short_edge)
98
- opt.H, opt.W = seg.shape[:2]
99
- if cond_inp_type == 'seg':
100
- seg = img2tensor(seg).unsqueeze(0) / 255.
101
- seg = seg.to(opt.device)
102
- else:
103
- raise NotImplementedError
104
-
105
- return seg
106
-
107
-
108
- def get_cond_keypose(opt, cond_image, cond_inp_type='image', cond_model=None):
109
- if isinstance(cond_image, str):
110
- pose = cv2.imread(cond_image)
111
- else:
112
- pose = cv2.cvtColor(cond_image, cv2.COLOR_RGB2BGR)
113
- pose = resize_numpy_image(pose, max_resolution=opt.max_resolution, resize_short_edge=opt.resize_short_edge)
114
- opt.H, opt.W = pose.shape[:2]
115
- if cond_inp_type == 'keypose':
116
- pose = img2tensor(pose).unsqueeze(0) / 255.
117
- pose = pose.to(opt.device)
118
- elif cond_inp_type == 'image':
119
- from ldm.modules.extra_condition.utils import imshow_keypoints
120
- from mmdet.apis import inference_detector
121
- from mmpose.apis import (inference_top_down_pose_model, process_mmdet_results)
122
-
123
- # mmpose seems not compatible with autocast fp16
124
- with autocast("cuda", dtype=torch.float32):
125
- mmdet_results = inference_detector(cond_model['det_model'], pose)
126
- # keep the person class bounding boxes.
127
- person_results = process_mmdet_results(mmdet_results, 1)
128
-
129
- # optional
130
- return_heatmap = False
131
- dataset = cond_model['pose_model'].cfg.data['test']['type']
132
-
133
- # e.g. use ('backbone', ) to return backbone feature
134
- output_layer_names = None
135
- pose_results, returned_outputs = inference_top_down_pose_model(
136
- cond_model['pose_model'],
137
- pose,
138
- person_results,
139
- bbox_thr=0.2,
140
- format='xyxy',
141
- dataset=dataset,
142
- dataset_info=None,
143
- return_heatmap=return_heatmap,
144
- outputs=output_layer_names)
145
-
146
- # show the results
147
- pose = imshow_keypoints(pose, pose_results, radius=2, thickness=2)
148
- pose = img2tensor(pose).unsqueeze(0) / 255.
149
- pose = pose.to(opt.device)
150
- else:
151
- raise NotImplementedError
152
-
153
- return pose
154
-
155
-
156
- def get_cond_depth(opt, cond_image, cond_inp_type='image', cond_model=None):
157
- if isinstance(cond_image, str):
158
- depth = cv2.imread(cond_image)
159
- else:
160
- depth = cv2.cvtColor(cond_image, cv2.COLOR_RGB2BGR)
161
- depth = resize_numpy_image(depth, max_resolution=opt.max_resolution, resize_short_edge=opt.resize_short_edge)
162
- opt.H, opt.W = depth.shape[:2]
163
- if cond_inp_type == 'depth':
164
- depth = img2tensor(depth).unsqueeze(0) / 255.
165
- depth = depth.to(opt.device)
166
- elif cond_inp_type == 'image':
167
- depth = img2tensor(depth).unsqueeze(0) / 127.5 - 1.0
168
- depth = cond_model(depth.to(opt.device)).repeat(1, 3, 1, 1)
169
- depth -= torch.min(depth)
170
- depth /= torch.max(depth)
171
- else:
172
- raise NotImplementedError
173
-
174
- return depth
175
-
176
-
177
- def get_cond_canny(opt, cond_image, cond_inp_type='image', cond_model=None):
178
- if isinstance(cond_image, str):
179
- canny = cv2.imread(cond_image)
180
- else:
181
- canny = cv2.cvtColor(cond_image, cv2.COLOR_RGB2BGR)
182
- canny = resize_numpy_image(canny, max_resolution=opt.max_resolution, resize_short_edge=opt.resize_short_edge)
183
- opt.H, opt.W = canny.shape[:2]
184
- if cond_inp_type == 'canny':
185
- canny = img2tensor(canny)[0:1].unsqueeze(0) / 255.
186
- canny = canny.to(opt.device)
187
- elif cond_inp_type == 'image':
188
- canny = cv2.Canny(canny, 100, 200)[..., None]
189
- canny = img2tensor(canny).unsqueeze(0) / 255.
190
- canny = canny.to(opt.device)
191
- else:
192
- raise NotImplementedError
193
-
194
- return canny
195
-
196
-
197
- def get_cond_style(opt, cond_image, cond_inp_type='image', cond_model=None):
198
- assert cond_inp_type == 'image'
199
- if isinstance(cond_image, str):
200
- style = Image.open(cond_image)
201
- else:
202
- # numpy image to PIL image
203
- style = Image.fromarray(cond_image)
204
-
205
- style_for_clip = cond_model['processor'](images=style, return_tensors="pt")['pixel_values']
206
- style_feat = cond_model['clip_vision_model'](style_for_clip.to(opt.device))['last_hidden_state']
207
-
208
- return style_feat
209
-
210
-
211
- def get_cond_color(opt, cond_image, cond_inp_type='image', cond_model=None):
212
- if isinstance(cond_image, str):
213
- color = cv2.imread(cond_image)
214
- else:
215
- color = cv2.cvtColor(cond_image, cv2.COLOR_RGB2BGR)
216
- color = resize_numpy_image(color, max_resolution=opt.max_resolution, resize_short_edge=opt.resize_short_edge)
217
- opt.H, opt.W = color.shape[:2]
218
- if cond_inp_type == 'image':
219
- color = cv2.resize(color, (opt.W//64, opt.H//64), interpolation=cv2.INTER_CUBIC)
220
- color = cv2.resize(color, (opt.W, opt.H), interpolation=cv2.INTER_NEAREST)
221
- color = img2tensor(color).unsqueeze(0) / 255.
222
- color = color.to(opt.device)
223
- return color
224
-
225
-
226
- def get_cond_openpose(opt, cond_image, cond_inp_type='image', cond_model=None):
227
- if isinstance(cond_image, str):
228
- openpose_keypose = cv2.imread(cond_image)
229
- else:
230
- openpose_keypose = cv2.cvtColor(cond_image, cv2.COLOR_RGB2BGR)
231
- openpose_keypose = resize_numpy_image(
232
- openpose_keypose, max_resolution=opt.max_resolution, resize_short_edge=opt.resize_short_edge)
233
- opt.H, opt.W = openpose_keypose.shape[:2]
234
- if cond_inp_type == 'openpose':
235
- openpose_keypose = img2tensor(openpose_keypose).unsqueeze(0) / 255.
236
- openpose_keypose = openpose_keypose.to(opt.device)
237
- elif cond_inp_type == 'image':
238
- with autocast('cuda', dtype=torch.float32):
239
- openpose_keypose = cond_model(openpose_keypose)
240
- openpose_keypose = img2tensor(openpose_keypose).unsqueeze(0) / 255.
241
- openpose_keypose = openpose_keypose.to(opt.device)
242
-
243
- else:
244
- raise NotImplementedError
245
-
246
- return openpose_keypose
247
-
248
-
249
- def get_adapter_feature(inputs, adapters):
250
- ret_feat_map = None
251
- ret_feat_seq = None
252
- if not isinstance(inputs, list):
253
- inputs = [inputs]
254
- adapters = [adapters]
255
-
256
- for input, adapter in zip(inputs, adapters):
257
- cur_feature = adapter['model'](input)
258
- if isinstance(cur_feature, list):
259
- if ret_feat_map is None:
260
- ret_feat_map = list(map(lambda x: x * adapter['cond_weight'], cur_feature))
261
- else:
262
- ret_feat_map = list(map(lambda x, y: x + y * adapter['cond_weight'], ret_feat_map, cur_feature))
263
- else:
264
- if ret_feat_seq is None:
265
- ret_feat_seq = cur_feature * adapter['cond_weight']
266
- else:
267
- ret_feat_seq = torch.cat([ret_feat_seq, cur_feature * adapter['cond_weight']], dim=1)
268
-
269
- return ret_feat_map, ret_feat_seq
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/board/Fill.js DELETED
@@ -1,36 +0,0 @@
1
- /*
2
- 1. Fill empty grids
3
- */
4
-
5
- var Fill = function (map) {
6
- var upperBoard = false;
7
- if (typeof (map) === 'boolean') {
8
- upperBoard = map;
9
- map = undefined;
10
- }
11
-
12
- var symbol;
13
- var board = this.board,
14
- symbols = this.candidateSymbols;
15
-
16
- var height = this.board.height;
17
- if (upperBoard) {
18
- height /= 2;
19
- }
20
- for (var tileY = 0; tileY < height; tileY++) {
21
- for (var tileX = 0, width = this.board.width; tileX < width; tileX++) {
22
- if (board.contains(tileX, tileY, this.chessTileZ)) { // not empty
23
- continue;
24
- }
25
-
26
- if (map !== undefined) {
27
- symbol = map[tileX][tileY];
28
- if (symbol !== '?') {
29
- symbols = symbol;
30
- }
31
- }
32
- this.createChess(tileX, tileY, symbols);
33
- }
34
- }
35
- }
36
- export default Fill;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/click/Click.js DELETED
@@ -1,2 +0,0 @@
1
- import Click from '../../../plugins/button.js'
2
- export default Click;
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dropdownlist/DropDownList.d.ts DELETED
@@ -1,130 +0,0 @@
1
- import Label from '../label/Label';
2
-
3
- export default DropDownList;
4
-
5
- declare namespace DropDownList {
6
- type CreateButtonCallbackType = (
7
- this: DropDownList,
8
- scene: Phaser.Scene,
9
- option: any,
10
- index: number,
11
- options: any[]
12
- ) => Phaser.GameObjects.GameObject;
13
-
14
- type CreateBackgroundCallbackType = (
15
- this: DropDownList,
16
- scene: Phaser.Scene,
17
- ) => Phaser.GameObjects.GameObject;
18
-
19
- type OnButtonClickCallbackType = (
20
- this: DropDownList,
21
- button: Phaser.GameObjects.GameObject,
22
- index: number,
23
- pointer: Phaser.Input.Pointer,
24
- event: Phaser.Types.Input.EventData
25
- ) => void;
26
-
27
- type OnButtonOverCallbackType = (
28
- this: DropDownList,
29
- button: Phaser.GameObjects.GameObject,
30
- index: number,
31
- pointer: Phaser.Input.Pointer,
32
- event: Phaser.Types.Input.EventData
33
- ) => void;
34
-
35
- type OnButtonOutCallbackType = (
36
- this: DropDownList,
37
- button: Phaser.GameObjects.GameObject,
38
- index: number,
39
- pointer: Phaser.Input.Pointer,
40
- event: Phaser.Types.Input.EventData
41
- ) => void;
42
-
43
- type AlignParentType = 'text' | 'icon';
44
-
45
- type ExpandDirectionType = 0 | 1 | 'down' | 'up';
46
-
47
- type SetValueCallbackType = (
48
- dropDownList: DropDownList,
49
- value?: any,
50
- previousValue?: any,
51
- ) => void;
52
-
53
- type ListSpaceType = {
54
- left?: number, right?: number, top?: number, bottom?: number, item?: number
55
- };
56
-
57
- type WrapListSpaceType = {
58
- left?: number, right?: number, top?: number, bottom?: number, item?: number, line?: number
59
- }
60
-
61
- interface IConfig extends Label.IConfig {
62
- options?: any[],
63
- list?: {
64
- createBackgroundCallback?: CreateBackgroundCallbackType;
65
- createButtonCallback?: CreateButtonCallbackType;
66
-
67
- onButtonClick?: OnButtonClickCallbackType;
68
- onButtonOver?: OnButtonOverCallbackType;
69
- onButtonOut?: OnButtonOutCallbackType;
70
-
71
- easeIn?: number;
72
- easeOut?: number;
73
-
74
- wrap?: boolean;
75
- width?: number;
76
- height?: number;
77
- alignParent?: AlignParentType;
78
- alignSide?: string;
79
- expandDirection?: ExpandDirectionType;
80
- bounds?: Phaser.Geom.Rectangle;
81
-
82
- space?: ListSpaceType | WrapListSpaceType;
83
-
84
- draggable?: boolean;
85
- },
86
-
87
- setValueCallback?: SetValueCallbackType;
88
- setValueCallbackScope?: object;
89
- value?: any;
90
- }
91
- }
92
-
93
- declare class DropDownList extends Label {
94
- constructor(
95
- scene: Phaser.Scene,
96
- config?: DropDownList.IConfig
97
- );
98
-
99
- setOptions(options: any[]): this;
100
-
101
- openListPanel(): this;
102
- closeListPanel(): this;
103
- toggleListPanel(): this;
104
-
105
- setValue(value?: any): this;
106
- value: any;
107
-
108
- setCreateButtonCallback(callback?: DropDownList.CreateBackgroundCallbackType): this;
109
- setCreateBackgroundCallback(callback?: DropDownList.CreateBackgroundCallbackType): this;
110
-
111
- setButtonClickCallback(callback?: DropDownList.OnButtonClickCallbackType): this;
112
- setButtonOverCallback(callback?: DropDownList.OnButtonOverCallbackType): this;
113
- setButtonOutCallback(callback?: DropDownList.OnButtonOutCallbackType): this;
114
-
115
- setListEaseInDuration(duration?: number): this;
116
- setListEaseOutDuration(duration?: number): this;
117
-
118
- setWrapEnable(enable?: boolean): this;
119
- setListWidth(width?: number): this;
120
- setListHeight(height?: number): this;
121
- setListSize(width?: number, height?: number): this;
122
-
123
- setListAlignmentMode(mode?: DropDownList.AlignParentType): this;
124
- setListAlignmentSide(side?: string): this;
125
- setListBounds(bounds: Phaser.Geom.Rectangle): this;
126
-
127
- setListSpace(space?: DropDownList.ListSpaceType | DropDownList.WrapListSpaceType): this;
128
-
129
- setListDraggable(enable?: boolean): this;
130
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectangle/RoundRectangle.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import RoundRectangle from "../../../plugins/roundrectangle";
2
- export default RoundRectangle;
 
 
 
spaces/Ajit025/Text_to_Image_conversion/app.py DELETED
@@ -1,15 +0,0 @@
1
- from text_to_image import TextToImageTool
2
- import gradio as gr
3
-
4
- tool = TextToImageTool()
5
-
6
- def fn(*args, **kwargs):
7
- return tool(*args, **kwargs)
8
-
9
- gr.Interface(
10
- fn=fn,
11
- inputs=tool.inputs,
12
- outputs=tool.outputs,
13
- title="Text_to_Image",
14
- article=tool.description,
15
- ).queue(concurrency_count=5).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aki004/herta-so-vits/flask_api.py DELETED
@@ -1,62 +0,0 @@
1
- import io
2
- import logging
3
-
4
- import soundfile
5
- import torch
6
- import torchaudio
7
- from flask import Flask, request, send_file
8
- from flask_cors import CORS
9
-
10
- from inference.infer_tool import Svc, RealTimeVC
11
-
12
- app = Flask(__name__)
13
-
14
- CORS(app)
15
-
16
- logging.getLogger('numba').setLevel(logging.WARNING)
17
-
18
-
19
- @app.route("/voiceChangeModel", methods=["POST"])
20
- def voice_change_model():
21
- request_form = request.form
22
- wave_file = request.files.get("sample", None)
23
- # pitch changing information
24
- f_pitch_change = float(request_form.get("fPitchChange", 0))
25
- # DAW required sampling rate
26
- daw_sample = int(float(request_form.get("sampleRate", 0)))
27
- speaker_id = int(float(request_form.get("sSpeakId", 0)))
28
- # get wav from http and convert
29
- input_wav_path = io.BytesIO(wave_file.read())
30
-
31
- # inference
32
- if raw_infer:
33
- # out_audio, out_sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path)
34
- out_audio, out_sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path, cluster_infer_ratio=0,
35
- auto_predict_f0=False, noice_scale=0.4, f0_filter=False)
36
- tar_audio = torchaudio.functional.resample(out_audio, svc_model.target_sample, daw_sample)
37
- else:
38
- out_audio = svc.process(svc_model, speaker_id, f_pitch_change, input_wav_path, cluster_infer_ratio=0,
39
- auto_predict_f0=False, noice_scale=0.4, f0_filter=False)
40
- tar_audio = torchaudio.functional.resample(torch.from_numpy(out_audio), svc_model.target_sample, daw_sample)
41
- # return
42
- out_wav_path = io.BytesIO()
43
- soundfile.write(out_wav_path, tar_audio.cpu().numpy(), daw_sample, format="wav")
44
- out_wav_path.seek(0)
45
- return send_file(out_wav_path, download_name="temp.wav", as_attachment=True)
46
-
47
-
48
- if __name__ == '__main__':
49
- # True means splice directly. There may be explosive sounds at the splice.
50
- # False means use cross fade. There may be slight overlapping sounds at the splice.
51
- # Using 0.3-0.5s in VST plugin can reduce latency.
52
- # You can adjust the maximum slicing time of VST plugin to 1 second and set it to ture here to get a stable sound quality and a relatively large delay。
53
- # Choose an acceptable method on your own.
54
- raw_infer = True
55
- # each model and config are corresponding
56
- model_name = "logs/32k/G_174000-Copy1.pth"
57
- config_name = "configs/config.json"
58
- cluster_model_path = "logs/44k/kmeans_10000.pt"
59
- svc_model = Svc(model_name, config_name, cluster_model_path=cluster_model_path)
60
- svc = RealTimeVC()
61
- # corresponding to the vst plugin here
62
- app.run(port=6842, host="0.0.0.0", debug=False, threaded=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/bin/paper_runfiles/env.sh DELETED
@@ -1,8 +0,0 @@
1
- DIRNAME="$(dirname $0)"
2
- DIRNAME="$(realpath ""$DIRNAME"")"
3
-
4
- BINDIR="$DIRNAME/.."
5
- SRCDIR="$BINDIR/.."
6
- CONFIGDIR="$SRCDIR/configs"
7
-
8
- export PYTHONPATH="$SRCDIR:$PYTHONPATH"
 
 
 
 
 
 
 
 
 
spaces/Alfasign/nomic-ai-gpt4all-13b-snoozy/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Chat
3
- emoji: 📈
4
- colorFrom: blue
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.36.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/ops/__init__.py DELETED
@@ -1,9 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- # empty
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/dnnlib/util.py DELETED
@@ -1,504 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """Miscellaneous utility classes and functions."""
10
-
11
- import ctypes
12
- import fnmatch
13
- import importlib
14
- import inspect
15
- import numpy as np
16
- import os
17
- import shutil
18
- import sys
19
- import types
20
- import io
21
- import pickle
22
- import re
23
- import requests
24
- import html
25
- import hashlib
26
- import glob
27
- import tempfile
28
- import urllib
29
- import urllib.request
30
- import uuid
31
-
32
- from distutils.util import strtobool
33
- from typing import Any, List, Tuple, Union
34
-
35
-
36
- # Util classes
37
- # ------------------------------------------------------------------------------------------
38
-
39
-
40
- class EasyDict(dict):
41
- """Convenience class that behaves like a dict but allows access with the attribute syntax."""
42
-
43
- def __getattr__(self, name: str) -> Any:
44
- try:
45
- return self[name]
46
- except KeyError:
47
- raise AttributeError(name)
48
-
49
- def __setattr__(self, name: str, value: Any) -> None:
50
- self[name] = value
51
-
52
- def __delattr__(self, name: str) -> None:
53
- del self[name]
54
-
55
-
56
- class Logger(object):
57
- """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file."""
58
-
59
- def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True):
60
- self.file = None
61
-
62
- if file_name is not None:
63
- self.file = open(file_name, file_mode)
64
-
65
- self.should_flush = should_flush
66
- self.stdout = sys.stdout
67
- self.stderr = sys.stderr
68
-
69
- sys.stdout = self
70
- sys.stderr = self
71
-
72
- def __enter__(self) -> "Logger":
73
- return self
74
-
75
- def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
76
- self.close()
77
-
78
- def write(self, text: Union[str, bytes]) -> None:
79
- """Write text to stdout (and a file) and optionally flush."""
80
- if isinstance(text, bytes):
81
- text = text.decode()
82
- if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
83
- return
84
-
85
- if self.file is not None:
86
- self.file.write(text)
87
-
88
- self.stdout.write(text)
89
-
90
- if self.should_flush:
91
- self.flush()
92
-
93
- def flush(self) -> None:
94
- """Flush written text to both stdout and a file, if open."""
95
- if self.file is not None:
96
- self.file.flush()
97
-
98
- self.stdout.flush()
99
-
100
- def close(self) -> None:
101
- """Flush, close possible files, and remove stdout/stderr mirroring."""
102
- self.flush()
103
-
104
- # if using multiple loggers, prevent closing in wrong order
105
- if sys.stdout is self:
106
- sys.stdout = self.stdout
107
- if sys.stderr is self:
108
- sys.stderr = self.stderr
109
-
110
- if self.file is not None:
111
- self.file.close()
112
- self.file = None
113
-
114
-
115
- # Cache directories
116
- # ------------------------------------------------------------------------------------------
117
-
118
- _dnnlib_cache_dir = None
119
-
120
-
121
- def set_cache_dir(path: str) -> None:
122
- global _dnnlib_cache_dir
123
- _dnnlib_cache_dir = path
124
-
125
-
126
- def make_cache_dir_path(*paths: str) -> str:
127
- if _dnnlib_cache_dir is not None:
128
- return os.path.join(_dnnlib_cache_dir, *paths)
129
- if 'DNNLIB_CACHE_DIR' in os.environ:
130
- return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths)
131
- if 'HOME' in os.environ:
132
- return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths)
133
- if 'USERPROFILE' in os.environ:
134
- return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths)
135
- return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths)
136
-
137
- # Small util functions
138
- # ------------------------------------------------------------------------------------------
139
-
140
-
141
- def format_time(seconds: Union[int, float]) -> str:
142
- """Convert the seconds to human readable string with days, hours, minutes and seconds."""
143
- s = int(np.rint(seconds))
144
-
145
- if s < 60:
146
- return "{0}s".format(s)
147
- elif s < 60 * 60:
148
- return "{0}m {1:02}s".format(s // 60, s % 60)
149
- elif s < 24 * 60 * 60:
150
- return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
151
- else:
152
- return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
153
-
154
-
155
- def format_time_brief(seconds: Union[int, float]) -> str:
156
- """Convert the seconds to human readable string with days, hours, minutes and seconds."""
157
- s = int(np.rint(seconds))
158
-
159
- if s < 60:
160
- return "{0}s".format(s)
161
- elif s < 60 * 60:
162
- return "{0}m {1:02}s".format(s // 60, s % 60)
163
- elif s < 24 * 60 * 60:
164
- return "{0}h {1:02}m".format(s // (60 * 60), (s // 60) % 60)
165
- else:
166
- return "{0}d {1:02}h".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24)
167
-
168
-
169
- def ask_yes_no(question: str) -> bool:
170
- """Ask the user the question until the user inputs a valid answer."""
171
- while True:
172
- try:
173
- print("{0} [y/n]".format(question))
174
- return strtobool(input().lower())
175
- except ValueError:
176
- pass
177
-
178
-
179
- def tuple_product(t: Tuple) -> Any:
180
- """Calculate the product of the tuple elements."""
181
- result = 1
182
-
183
- for v in t:
184
- result *= v
185
-
186
- return result
187
-
188
-
189
- _str_to_ctype = {
190
- "uint8": ctypes.c_ubyte,
191
- "uint16": ctypes.c_uint16,
192
- "uint32": ctypes.c_uint32,
193
- "uint64": ctypes.c_uint64,
194
- "int8": ctypes.c_byte,
195
- "int16": ctypes.c_int16,
196
- "int32": ctypes.c_int32,
197
- "int64": ctypes.c_int64,
198
- "float32": ctypes.c_float,
199
- "float64": ctypes.c_double
200
- }
201
-
202
-
203
- def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]:
204
- """Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes."""
205
- type_str = None
206
-
207
- if isinstance(type_obj, str):
208
- type_str = type_obj
209
- elif hasattr(type_obj, "__name__"):
210
- type_str = type_obj.__name__
211
- elif hasattr(type_obj, "name"):
212
- type_str = type_obj.name
213
- else:
214
- raise RuntimeError("Cannot infer type name from input")
215
-
216
- assert type_str in _str_to_ctype.keys()
217
-
218
- my_dtype = np.dtype(type_str)
219
- my_ctype = _str_to_ctype[type_str]
220
-
221
- assert my_dtype.itemsize == ctypes.sizeof(my_ctype)
222
-
223
- return my_dtype, my_ctype
224
-
225
-
226
- def is_pickleable(obj: Any) -> bool:
227
- try:
228
- with io.BytesIO() as stream:
229
- pickle.dump(obj, stream)
230
- return True
231
- except:
232
- return False
233
-
234
-
235
- # Functionality to import modules/objects by name, and call functions by name
236
- # ------------------------------------------------------------------------------------------
237
-
238
- def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]:
239
- """Searches for the underlying module behind the name to some python object.
240
- Returns the module and the object name (original name with module part removed)."""
241
-
242
- # allow convenience shorthands, substitute them by full names
243
- obj_name = re.sub("^np.", "numpy.", obj_name)
244
- obj_name = re.sub("^tf.", "tensorflow.", obj_name)
245
-
246
- # list alternatives for (module_name, local_obj_name)
247
- parts = obj_name.split(".")
248
- name_pairs = [(".".join(parts[:i]), ".".join(parts[i:]))
249
- for i in range(len(parts), 0, -1)]
250
-
251
- # try each alternative in turn
252
- for module_name, local_obj_name in name_pairs:
253
- try:
254
- module = importlib.import_module(
255
- module_name) # may raise ImportError
256
- # may raise AttributeError
257
- get_obj_from_module(module, local_obj_name)
258
- return module, local_obj_name
259
- except:
260
- pass
261
-
262
- # maybe some of the modules themselves contain errors?
263
- for module_name, _local_obj_name in name_pairs:
264
- try:
265
- importlib.import_module(module_name) # may raise ImportError
266
- except ImportError:
267
- if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"):
268
- raise
269
-
270
- # maybe the requested attribute is missing?
271
- for module_name, local_obj_name in name_pairs:
272
- try:
273
- module = importlib.import_module(
274
- module_name) # may raise ImportError
275
- # may raise AttributeError
276
- get_obj_from_module(module, local_obj_name)
277
- except ImportError:
278
- pass
279
-
280
- # we are out of luck, but we have no idea why
281
- raise ImportError(obj_name)
282
-
283
-
284
- def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:
285
- """Traverses the object name and returns the last (rightmost) python object."""
286
- if obj_name == '':
287
- return module
288
- obj = module
289
- for part in obj_name.split("."):
290
- obj = getattr(obj, part)
291
- return obj
292
-
293
-
294
- def get_obj_by_name(name: str) -> Any:
295
- """Finds the python object with the given name."""
296
- module, obj_name = get_module_from_obj_name(name)
297
- return get_obj_from_module(module, obj_name)
298
-
299
-
300
- def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
301
- """Finds the python object with the given name and calls it as a function."""
302
- assert func_name is not None
303
- func_obj = get_obj_by_name(func_name)
304
- assert callable(func_obj)
305
- return func_obj(*args, **kwargs)
306
-
307
-
308
- def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any:
309
- """Finds the python class with the given name and constructs it with the given arguments."""
310
- return call_func_by_name(*args, func_name=class_name, **kwargs)
311
-
312
-
313
- def get_module_dir_by_obj_name(obj_name: str) -> str:
314
- """Get the directory path of the module containing the given object name."""
315
- module, _ = get_module_from_obj_name(obj_name)
316
- return os.path.dirname(inspect.getfile(module))
317
-
318
-
319
- def is_top_level_function(obj: Any) -> bool:
320
- """Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'."""
321
- return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__
322
-
323
-
324
- def get_top_level_function_name(obj: Any) -> str:
325
- """Return the fully-qualified name of a top-level function."""
326
- assert is_top_level_function(obj)
327
- module = obj.__module__
328
- if module == '__main__':
329
- module = os.path.splitext(os.path.basename(
330
- sys.modules[module].__file__))[0]
331
- return module + "." + obj.__name__
332
-
333
-
334
- # File system helpers
335
- # ------------------------------------------------------------------------------------------
336
-
337
- def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
338
- """List all files recursively in a given directory while ignoring given file and directory names.
339
- Returns list of tuples containing both absolute and relative paths."""
340
- assert os.path.isdir(dir_path)
341
- base_name = os.path.basename(os.path.normpath(dir_path))
342
-
343
- if ignores is None:
344
- ignores = []
345
-
346
- result = []
347
-
348
- for root, dirs, files in os.walk(dir_path, topdown=True):
349
- for ignore_ in ignores:
350
- dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
351
-
352
- # dirs need to be edited in-place
353
- for d in dirs_to_remove:
354
- dirs.remove(d)
355
-
356
- files = [f for f in files if not fnmatch.fnmatch(f, ignore_)]
357
-
358
- absolute_paths = [os.path.join(root, f) for f in files]
359
- relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
360
-
361
- if add_base_to_relative:
362
- relative_paths = [os.path.join(base_name, p)
363
- for p in relative_paths]
364
-
365
- assert len(absolute_paths) == len(relative_paths)
366
- result += zip(absolute_paths, relative_paths)
367
-
368
- return result
369
-
370
-
371
- def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
372
- """Takes in a list of tuples of (src, dst) paths and copies files.
373
- Will create all necessary directories."""
374
- for file in files:
375
- target_dir_name = os.path.dirname(file[1])
376
-
377
- # will create all intermediate-level directories
378
- if not os.path.exists(target_dir_name):
379
- os.makedirs(target_dir_name)
380
-
381
- shutil.copyfile(file[0], file[1])
382
-
383
-
384
- # URL helpers
385
- # ------------------------------------------------------------------------------------------
386
-
387
- def is_url(obj: Any, allow_file_urls: bool = False) -> bool:
388
- """Determine whether the given object is a valid URL string."""
389
- if not isinstance(obj, str) or not "://" in obj:
390
- return False
391
- if allow_file_urls and obj.startswith('file://'):
392
- return True
393
- try:
394
- res = requests.compat.urlparse(obj)
395
- if not res.scheme or not res.netloc or not "." in res.netloc:
396
- return False
397
- res = requests.compat.urlparse(requests.compat.urljoin(obj, "/"))
398
- if not res.scheme or not res.netloc or not "." in res.netloc:
399
- return False
400
- except:
401
- return False
402
- return True
403
-
404
-
405
- def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any:
406
- """Download the given URL and return a binary-mode file object to access the data."""
407
- assert num_attempts >= 1
408
- assert not (return_filename and (not cache))
409
-
410
- # Doesn't look like an URL scheme so interpret it as a local filename.
411
- if not re.match('^[a-z]+://', url):
412
- return url if return_filename else open(url, "rb")
413
-
414
- # Handle file URLs. This code handles unusual file:// patterns that
415
- # arise on Windows:
416
- #
417
- # file:///c:/foo.txt
418
- #
419
- # which would translate to a local '/c:/foo.txt' filename that's
420
- # invalid. Drop the forward slash for such pathnames.
421
- #
422
- # If you touch this code path, you should test it on both Linux and
423
- # Windows.
424
- #
425
- # Some internet resources suggest using urllib.request.url2pathname() but
426
- # but that converts forward slashes to backslashes and this causes
427
- # its own set of problems.
428
- if url.startswith('file://'):
429
- filename = urllib.parse.urlparse(url).path
430
- if re.match(r'^/[a-zA-Z]:', filename):
431
- filename = filename[1:]
432
- return filename if return_filename else open(filename, "rb")
433
-
434
- assert is_url(url)
435
-
436
- # Lookup from cache.
437
- if cache_dir is None:
438
- cache_dir = make_cache_dir_path('downloads')
439
-
440
- url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
441
- if cache:
442
- cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
443
- if len(cache_files) == 1:
444
- filename = cache_files[0]
445
- return filename if return_filename else open(filename, "rb")
446
-
447
- # Download.
448
- url_name = None
449
- url_data = None
450
- with requests.Session() as session:
451
- if verbose:
452
- print("Downloading %s ..." % url, end="", flush=True)
453
- for attempts_left in reversed(range(num_attempts)):
454
- try:
455
- with session.get(url) as res:
456
- res.raise_for_status()
457
- if len(res.content) == 0:
458
- raise IOError("No data received")
459
-
460
- if len(res.content) < 8192:
461
- content_str = res.content.decode("utf-8")
462
- if "download_warning" in res.headers.get("Set-Cookie", ""):
463
- links = [html.unescape(link) for link in content_str.split(
464
- '"') if "export=download" in link]
465
- if len(links) == 1:
466
- url = requests.compat.urljoin(url, links[0])
467
- raise IOError("Google Drive virus checker nag")
468
- if "Google Drive - Quota exceeded" in content_str:
469
- raise IOError(
470
- "Google Drive download quota exceeded -- please try again later")
471
-
472
- match = re.search(
473
- r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
474
- url_name = match[1] if match else url
475
- url_data = res.content
476
- if verbose:
477
- print(" done")
478
- break
479
- except KeyboardInterrupt:
480
- raise
481
- except:
482
- if not attempts_left:
483
- if verbose:
484
- print(" failed")
485
- raise
486
- if verbose:
487
- print(".", end="", flush=True)
488
-
489
- # Save to cache.
490
- if cache:
491
- safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
492
- cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
493
- temp_file = os.path.join(
494
- cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
495
- os.makedirs(cache_dir, exist_ok=True)
496
- with open(temp_file, "wb") as f:
497
- f.write(url_data)
498
- os.replace(temp_file, cache_file) # atomic
499
- if return_filename:
500
- return cache_file
501
-
502
- # Return data as file object.
503
- assert not return_filename
504
- return io.BytesIO(url_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/.github/ISSUE_TEMPLATE/feedback.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- name: "💬 Feedback about API Design"
3
- about: Give feedback about the current API design
4
- title: ''
5
- labels: ''
6
- assignees: ''
7
-
8
- ---
9
-
10
- **What API design would you like to have changed or added to the library? Why?**
11
-
12
- **What use case would this enable or better enable? Can you give us a code example?**
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/generate_logits.py DELETED
@@ -1,127 +0,0 @@
1
- import random
2
-
3
- import torch
4
- from huggingface_hub import HfApi
5
-
6
- from diffusers import UNet2DModel
7
-
8
-
9
- api = HfApi()
10
-
11
- results = {}
12
- # fmt: off
13
- results["google_ddpm_cifar10_32"] = torch.tensor([
14
- -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
15
- 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
16
- -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
17
- 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
18
- ])
19
- results["google_ddpm_ema_bedroom_256"] = torch.tensor([
20
- -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
21
- 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
22
- -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
23
- 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
24
- ])
25
- results["CompVis_ldm_celebahq_256"] = torch.tensor([
26
- -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
27
- -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
28
- -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
29
- 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
30
- ])
31
- results["google_ncsnpp_ffhq_1024"] = torch.tensor([
32
- 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
33
- -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
34
- 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
35
- -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
36
- ])
37
- results["google_ncsnpp_bedroom_256"] = torch.tensor([
38
- 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
39
- -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
40
- 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
41
- -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
42
- ])
43
- results["google_ncsnpp_celebahq_256"] = torch.tensor([
44
- 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
45
- -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
46
- 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
47
- -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
48
- ])
49
- results["google_ncsnpp_church_256"] = torch.tensor([
50
- 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
51
- -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
52
- 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
53
- -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
54
- ])
55
- results["google_ncsnpp_ffhq_256"] = torch.tensor([
56
- 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
57
- -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
58
- 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
59
- -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
60
- ])
61
- results["google_ddpm_cat_256"] = torch.tensor([
62
- -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
63
- 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
64
- -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
65
- 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
66
- results["google_ddpm_celebahq_256"] = torch.tensor([
67
- -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
68
- 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
69
- -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
70
- 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
71
- ])
72
- results["google_ddpm_ema_celebahq_256"] = torch.tensor([
73
- -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
74
- 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
75
- -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
76
- 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
77
- ])
78
- results["google_ddpm_church_256"] = torch.tensor([
79
- -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
80
- 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
81
- -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
82
- 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
83
- ])
84
- results["google_ddpm_bedroom_256"] = torch.tensor([
85
- -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
86
- 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
87
- -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
88
- 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
89
- ])
90
- results["google_ddpm_ema_church_256"] = torch.tensor([
91
- -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
92
- 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
93
- -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
94
- 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
95
- ])
96
- results["google_ddpm_ema_cat_256"] = torch.tensor([
97
- -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
98
- 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
99
- -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
100
- 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
101
- ])
102
- # fmt: on
103
-
104
- models = api.list_models(filter="diffusers")
105
- for mod in models:
106
- if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
107
- local_checkpoint = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
108
-
109
- print(f"Started running {mod.modelId}!!!")
110
-
111
- if mod.modelId.startswith("CompVis"):
112
- model = UNet2DModel.from_pretrained(local_checkpoint, subfolder="unet")
113
- else:
114
- model = UNet2DModel.from_pretrained(local_checkpoint)
115
-
116
- torch.manual_seed(0)
117
- random.seed(0)
118
-
119
- noise = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
120
- time_step = torch.tensor([10] * noise.shape[0])
121
- with torch.no_grad():
122
- logits = model(noise, time_step).sample
123
-
124
- assert torch.allclose(
125
- logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
126
- )
127
- print(f"{mod.modelId} has passed successfully!!!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py DELETED
@@ -1,13 +0,0 @@
1
- _base_ = './ga_faster_r50_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnext101_32x4d',
4
- backbone=dict(
5
- type='ResNeXt',
6
- depth=101,
7
- groups=32,
8
- base_width=4,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- frozen_stages=1,
12
- norm_cfg=dict(type='BN', requires_grad=True),
13
- style='pytorch'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py DELETED
@@ -1,18 +0,0 @@
1
- _base_ = './htc_r50_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnext101_64x4d',
4
- backbone=dict(
5
- type='ResNeXt',
6
- depth=101,
7
- groups=64,
8
- base_width=4,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- frozen_stages=1,
12
- norm_cfg=dict(type='BN', requires_grad=True),
13
- norm_eval=True,
14
- style='pytorch'))
15
- data = dict(samples_per_gpu=1, workers_per_gpu=1)
16
- # learning policy
17
- lr_config = dict(step=[16, 19])
18
- runner = dict(type='EpochBasedRunner', max_epochs=20)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/cityscapes.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
4
- ]
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes.py DELETED
@@ -1,11 +0,0 @@
1
- _base_ = './deeplabv3plus_r50-d8_769x769_80k_cityscapes.py'
2
- model = dict(
3
- pretrained='torchvision://resnet18',
4
- backbone=dict(type='ResNet', depth=18),
5
- decode_head=dict(
6
- c1_in_channels=64,
7
- c1_channels=12,
8
- in_channels=512,
9
- channels=128,
10
- ),
11
- auxiliary_head=dict(in_channels=256, channels=64))
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_20k_voc12aug.py DELETED
@@ -1,39 +0,0 @@
1
- _base_ = './ocrnet_hr18_512x512_20k_voc12aug.py'
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- pretrained='open-mmlab://msra/hrnetv2_w48',
5
- backbone=dict(
6
- extra=dict(
7
- stage2=dict(num_channels=(48, 96)),
8
- stage3=dict(num_channels=(48, 96, 192)),
9
- stage4=dict(num_channels=(48, 96, 192, 384)))),
10
- decode_head=[
11
- dict(
12
- type='FCNHead',
13
- in_channels=[48, 96, 192, 384],
14
- channels=sum([48, 96, 192, 384]),
15
- input_transform='resize_concat',
16
- in_index=(0, 1, 2, 3),
17
- kernel_size=1,
18
- num_convs=1,
19
- norm_cfg=norm_cfg,
20
- concat_input=False,
21
- dropout_ratio=-1,
22
- num_classes=21,
23
- align_corners=False,
24
- loss_decode=dict(
25
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
26
- dict(
27
- type='OCRHead',
28
- in_channels=[48, 96, 192, 384],
29
- channels=512,
30
- ocr_channels=256,
31
- input_transform='resize_concat',
32
- in_index=(0, 1, 2, 3),
33
- norm_cfg=norm_cfg,
34
- dropout_ratio=-1,
35
- num_classes=21,
36
- align_corners=False,
37
- loss_decode=dict(
38
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
39
- ])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/generators.py DELETED
@@ -1,151 +0,0 @@
1
- from fastai.vision import *
2
- from fastai.vision.learner import cnn_config
3
- from .unet import DynamicUnetWide, DynamicUnetDeep
4
- from .loss import FeatureLoss
5
- from .dataset import *
6
-
7
- # Weights are implicitly read from ./models/ folder
8
- def gen_inference_wide(
9
- root_folder: Path, weights_name: str, nf_factor: int = 2, arch=models.resnet101) -> Learner:
10
- data = get_dummy_databunch()
11
- learn = gen_learner_wide(
12
- data=data, gen_loss=F.l1_loss, nf_factor=nf_factor, arch=arch
13
- )
14
- learn.path = root_folder
15
- learn.load(weights_name)
16
- learn.model.eval()
17
- return learn
18
-
19
-
20
- def gen_learner_wide(
21
- data: ImageDataBunch, gen_loss, arch=models.resnet101, nf_factor: int = 2
22
- ) -> Learner:
23
- return unet_learner_wide(
24
- data,
25
- arch=arch,
26
- wd=1e-3,
27
- blur=True,
28
- norm_type=NormType.Spectral,
29
- self_attention=True,
30
- y_range=(-3.0, 3.0),
31
- loss_func=gen_loss,
32
- nf_factor=nf_factor,
33
- )
34
-
35
-
36
- # The code below is meant to be merged into fastaiv1 ideally
37
- def unet_learner_wide(
38
- data: DataBunch,
39
- arch: Callable,
40
- pretrained: bool = True,
41
- blur_final: bool = True,
42
- norm_type: Optional[NormType] = NormType,
43
- split_on: Optional[SplitFuncOrIdxList] = None,
44
- blur: bool = False,
45
- self_attention: bool = False,
46
- y_range: Optional[Tuple[float, float]] = None,
47
- last_cross: bool = True,
48
- bottle: bool = False,
49
- nf_factor: int = 1,
50
- **kwargs: Any
51
- ) -> Learner:
52
- "Build Unet learner from `data` and `arch`."
53
- meta = cnn_config(arch)
54
- body = create_body(arch, pretrained)
55
- model = to_device(
56
- DynamicUnetWide(
57
- body,
58
- n_classes=data.c,
59
- blur=blur,
60
- blur_final=blur_final,
61
- self_attention=self_attention,
62
- y_range=y_range,
63
- norm_type=norm_type,
64
- last_cross=last_cross,
65
- bottle=bottle,
66
- nf_factor=nf_factor,
67
- ),
68
- data.device,
69
- )
70
- learn = Learner(data, model, **kwargs)
71
- learn.split(ifnone(split_on, meta['split']))
72
- if pretrained:
73
- learn.freeze()
74
- apply_init(model[2], nn.init.kaiming_normal_)
75
- return learn
76
-
77
-
78
- # ----------------------------------------------------------------------
79
-
80
- # Weights are implicitly read from ./models/ folder
81
- def gen_inference_deep(
82
- root_folder: Path, weights_name: str, arch=models.resnet34, nf_factor: float = 1.5) -> Learner:
83
- data = get_dummy_databunch()
84
- learn = gen_learner_deep(
85
- data=data, gen_loss=F.l1_loss, arch=arch, nf_factor=nf_factor
86
- )
87
- learn.path = root_folder
88
- learn.load(weights_name)
89
- learn.model.eval()
90
- return learn
91
-
92
-
93
- def gen_learner_deep(
94
- data: ImageDataBunch, gen_loss, arch=models.resnet34, nf_factor: float = 1.5
95
- ) -> Learner:
96
- return unet_learner_deep(
97
- data,
98
- arch,
99
- wd=1e-3,
100
- blur=True,
101
- norm_type=NormType.Spectral,
102
- self_attention=True,
103
- y_range=(-3.0, 3.0),
104
- loss_func=gen_loss,
105
- nf_factor=nf_factor,
106
- )
107
-
108
-
109
- # The code below is meant to be merged into fastaiv1 ideally
110
- def unet_learner_deep(
111
- data: DataBunch,
112
- arch: Callable,
113
- pretrained: bool = True,
114
- blur_final: bool = True,
115
- norm_type: Optional[NormType] = NormType,
116
- split_on: Optional[SplitFuncOrIdxList] = None,
117
- blur: bool = False,
118
- self_attention: bool = False,
119
- y_range: Optional[Tuple[float, float]] = None,
120
- last_cross: bool = True,
121
- bottle: bool = False,
122
- nf_factor: float = 1.5,
123
- **kwargs: Any
124
- ) -> Learner:
125
- "Build Unet learner from `data` and `arch`."
126
- meta = cnn_config(arch)
127
- body = create_body(arch, pretrained)
128
- model = to_device(
129
- DynamicUnetDeep(
130
- body,
131
- n_classes=data.c,
132
- blur=blur,
133
- blur_final=blur_final,
134
- self_attention=self_attention,
135
- y_range=y_range,
136
- norm_type=norm_type,
137
- last_cross=last_cross,
138
- bottle=bottle,
139
- nf_factor=nf_factor,
140
- ),
141
- data.device,
142
- )
143
- learn = Learner(data, model, **kwargs)
144
- learn.split(ifnone(split_on, meta['split']))
145
- if pretrained:
146
- learn.freeze()
147
- apply_init(model[2], nn.init.kaiming_normal_)
148
- return learn
149
-
150
-
151
- # -----------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/filewrapper.py DELETED
@@ -1,111 +0,0 @@
1
- # SPDX-FileCopyrightText: 2015 Eric Larson
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- from tempfile import NamedTemporaryFile
6
- import mmap
7
-
8
-
9
- class CallbackFileWrapper(object):
10
- """
11
- Small wrapper around a fp object which will tee everything read into a
12
- buffer, and when that file is closed it will execute a callback with the
13
- contents of that buffer.
14
-
15
- All attributes are proxied to the underlying file object.
16
-
17
- This class uses members with a double underscore (__) leading prefix so as
18
- not to accidentally shadow an attribute.
19
-
20
- The data is stored in a temporary file until it is all available. As long
21
- as the temporary files directory is disk-based (sometimes it's a
22
- memory-backed-``tmpfs`` on Linux), data will be unloaded to disk if memory
23
- pressure is high. For small files the disk usually won't be used at all,
24
- it'll all be in the filesystem memory cache, so there should be no
25
- performance impact.
26
- """
27
-
28
- def __init__(self, fp, callback):
29
- self.__buf = NamedTemporaryFile("rb+", delete=True)
30
- self.__fp = fp
31
- self.__callback = callback
32
-
33
- def __getattr__(self, name):
34
- # The vaguaries of garbage collection means that self.__fp is
35
- # not always set. By using __getattribute__ and the private
36
- # name[0] allows looking up the attribute value and raising an
37
- # AttributeError when it doesn't exist. This stop thigns from
38
- # infinitely recursing calls to getattr in the case where
39
- # self.__fp hasn't been set.
40
- #
41
- # [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers
42
- fp = self.__getattribute__("_CallbackFileWrapper__fp")
43
- return getattr(fp, name)
44
-
45
- def __is_fp_closed(self):
46
- try:
47
- return self.__fp.fp is None
48
-
49
- except AttributeError:
50
- pass
51
-
52
- try:
53
- return self.__fp.closed
54
-
55
- except AttributeError:
56
- pass
57
-
58
- # We just don't cache it then.
59
- # TODO: Add some logging here...
60
- return False
61
-
62
- def _close(self):
63
- if self.__callback:
64
- if self.__buf.tell() == 0:
65
- # Empty file:
66
- result = b""
67
- else:
68
- # Return the data without actually loading it into memory,
69
- # relying on Python's buffer API and mmap(). mmap() just gives
70
- # a view directly into the filesystem's memory cache, so it
71
- # doesn't result in duplicate memory use.
72
- self.__buf.seek(0, 0)
73
- result = memoryview(
74
- mmap.mmap(self.__buf.fileno(), 0, access=mmap.ACCESS_READ)
75
- )
76
- self.__callback(result)
77
-
78
- # We assign this to None here, because otherwise we can get into
79
- # really tricky problems where the CPython interpreter dead locks
80
- # because the callback is holding a reference to something which
81
- # has a __del__ method. Setting this to None breaks the cycle
82
- # and allows the garbage collector to do it's thing normally.
83
- self.__callback = None
84
-
85
- # Closing the temporary file releases memory and frees disk space.
86
- # Important when caching big files.
87
- self.__buf.close()
88
-
89
- def read(self, amt=None):
90
- data = self.__fp.read(amt)
91
- if data:
92
- # We may be dealing with b'', a sign that things are over:
93
- # it's passed e.g. after we've already closed self.__buf.
94
- self.__buf.write(data)
95
- if self.__is_fp_closed():
96
- self._close()
97
-
98
- return data
99
-
100
- def _safe_read(self, amt):
101
- data = self.__fp._safe_read(amt)
102
- if amt == 2 and data == b"\r\n":
103
- # urllib executes this read to toss the CRLF at the end
104
- # of the chunk.
105
- return data
106
-
107
- self.__buf.write(data)
108
- if self.__is_fp_closed():
109
- self._close()
110
-
111
- return data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/resources.py DELETED
@@ -1,358 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- #
3
- # Copyright (C) 2013-2017 Vinay Sajip.
4
- # Licensed to the Python Software Foundation under a contributor agreement.
5
- # See LICENSE.txt and CONTRIBUTORS.txt.
6
- #
7
- from __future__ import unicode_literals
8
-
9
- import bisect
10
- import io
11
- import logging
12
- import os
13
- import pkgutil
14
- import sys
15
- import types
16
- import zipimport
17
-
18
- from . import DistlibException
19
- from .util import cached_property, get_cache_base, Cache
20
-
21
- logger = logging.getLogger(__name__)
22
-
23
-
24
- cache = None # created when needed
25
-
26
-
27
- class ResourceCache(Cache):
28
- def __init__(self, base=None):
29
- if base is None:
30
- # Use native string to avoid issues on 2.x: see Python #20140.
31
- base = os.path.join(get_cache_base(), str('resource-cache'))
32
- super(ResourceCache, self).__init__(base)
33
-
34
- def is_stale(self, resource, path):
35
- """
36
- Is the cache stale for the given resource?
37
-
38
- :param resource: The :class:`Resource` being cached.
39
- :param path: The path of the resource in the cache.
40
- :return: True if the cache is stale.
41
- """
42
- # Cache invalidation is a hard problem :-)
43
- return True
44
-
45
- def get(self, resource):
46
- """
47
- Get a resource into the cache,
48
-
49
- :param resource: A :class:`Resource` instance.
50
- :return: The pathname of the resource in the cache.
51
- """
52
- prefix, path = resource.finder.get_cache_info(resource)
53
- if prefix is None:
54
- result = path
55
- else:
56
- result = os.path.join(self.base, self.prefix_to_dir(prefix), path)
57
- dirname = os.path.dirname(result)
58
- if not os.path.isdir(dirname):
59
- os.makedirs(dirname)
60
- if not os.path.exists(result):
61
- stale = True
62
- else:
63
- stale = self.is_stale(resource, path)
64
- if stale:
65
- # write the bytes of the resource to the cache location
66
- with open(result, 'wb') as f:
67
- f.write(resource.bytes)
68
- return result
69
-
70
-
71
- class ResourceBase(object):
72
- def __init__(self, finder, name):
73
- self.finder = finder
74
- self.name = name
75
-
76
-
77
- class Resource(ResourceBase):
78
- """
79
- A class representing an in-package resource, such as a data file. This is
80
- not normally instantiated by user code, but rather by a
81
- :class:`ResourceFinder` which manages the resource.
82
- """
83
- is_container = False # Backwards compatibility
84
-
85
- def as_stream(self):
86
- """
87
- Get the resource as a stream.
88
-
89
- This is not a property to make it obvious that it returns a new stream
90
- each time.
91
- """
92
- return self.finder.get_stream(self)
93
-
94
- @cached_property
95
- def file_path(self):
96
- global cache
97
- if cache is None:
98
- cache = ResourceCache()
99
- return cache.get(self)
100
-
101
- @cached_property
102
- def bytes(self):
103
- return self.finder.get_bytes(self)
104
-
105
- @cached_property
106
- def size(self):
107
- return self.finder.get_size(self)
108
-
109
-
110
- class ResourceContainer(ResourceBase):
111
- is_container = True # Backwards compatibility
112
-
113
- @cached_property
114
- def resources(self):
115
- return self.finder.get_resources(self)
116
-
117
-
118
- class ResourceFinder(object):
119
- """
120
- Resource finder for file system resources.
121
- """
122
-
123
- if sys.platform.startswith('java'):
124
- skipped_extensions = ('.pyc', '.pyo', '.class')
125
- else:
126
- skipped_extensions = ('.pyc', '.pyo')
127
-
128
- def __init__(self, module):
129
- self.module = module
130
- self.loader = getattr(module, '__loader__', None)
131
- self.base = os.path.dirname(getattr(module, '__file__', ''))
132
-
133
- def _adjust_path(self, path):
134
- return os.path.realpath(path)
135
-
136
- def _make_path(self, resource_name):
137
- # Issue #50: need to preserve type of path on Python 2.x
138
- # like os.path._get_sep
139
- if isinstance(resource_name, bytes): # should only happen on 2.x
140
- sep = b'/'
141
- else:
142
- sep = '/'
143
- parts = resource_name.split(sep)
144
- parts.insert(0, self.base)
145
- result = os.path.join(*parts)
146
- return self._adjust_path(result)
147
-
148
- def _find(self, path):
149
- return os.path.exists(path)
150
-
151
- def get_cache_info(self, resource):
152
- return None, resource.path
153
-
154
- def find(self, resource_name):
155
- path = self._make_path(resource_name)
156
- if not self._find(path):
157
- result = None
158
- else:
159
- if self._is_directory(path):
160
- result = ResourceContainer(self, resource_name)
161
- else:
162
- result = Resource(self, resource_name)
163
- result.path = path
164
- return result
165
-
166
- def get_stream(self, resource):
167
- return open(resource.path, 'rb')
168
-
169
- def get_bytes(self, resource):
170
- with open(resource.path, 'rb') as f:
171
- return f.read()
172
-
173
- def get_size(self, resource):
174
- return os.path.getsize(resource.path)
175
-
176
- def get_resources(self, resource):
177
- def allowed(f):
178
- return (f != '__pycache__' and not
179
- f.endswith(self.skipped_extensions))
180
- return set([f for f in os.listdir(resource.path) if allowed(f)])
181
-
182
- def is_container(self, resource):
183
- return self._is_directory(resource.path)
184
-
185
- _is_directory = staticmethod(os.path.isdir)
186
-
187
- def iterator(self, resource_name):
188
- resource = self.find(resource_name)
189
- if resource is not None:
190
- todo = [resource]
191
- while todo:
192
- resource = todo.pop(0)
193
- yield resource
194
- if resource.is_container:
195
- rname = resource.name
196
- for name in resource.resources:
197
- if not rname:
198
- new_name = name
199
- else:
200
- new_name = '/'.join([rname, name])
201
- child = self.find(new_name)
202
- if child.is_container:
203
- todo.append(child)
204
- else:
205
- yield child
206
-
207
-
208
- class ZipResourceFinder(ResourceFinder):
209
- """
210
- Resource finder for resources in .zip files.
211
- """
212
- def __init__(self, module):
213
- super(ZipResourceFinder, self).__init__(module)
214
- archive = self.loader.archive
215
- self.prefix_len = 1 + len(archive)
216
- # PyPy doesn't have a _files attr on zipimporter, and you can't set one
217
- if hasattr(self.loader, '_files'):
218
- self._files = self.loader._files
219
- else:
220
- self._files = zipimport._zip_directory_cache[archive]
221
- self.index = sorted(self._files)
222
-
223
- def _adjust_path(self, path):
224
- return path
225
-
226
- def _find(self, path):
227
- path = path[self.prefix_len:]
228
- if path in self._files:
229
- result = True
230
- else:
231
- if path and path[-1] != os.sep:
232
- path = path + os.sep
233
- i = bisect.bisect(self.index, path)
234
- try:
235
- result = self.index[i].startswith(path)
236
- except IndexError:
237
- result = False
238
- if not result:
239
- logger.debug('_find failed: %r %r', path, self.loader.prefix)
240
- else:
241
- logger.debug('_find worked: %r %r', path, self.loader.prefix)
242
- return result
243
-
244
- def get_cache_info(self, resource):
245
- prefix = self.loader.archive
246
- path = resource.path[1 + len(prefix):]
247
- return prefix, path
248
-
249
- def get_bytes(self, resource):
250
- return self.loader.get_data(resource.path)
251
-
252
- def get_stream(self, resource):
253
- return io.BytesIO(self.get_bytes(resource))
254
-
255
- def get_size(self, resource):
256
- path = resource.path[self.prefix_len:]
257
- return self._files[path][3]
258
-
259
- def get_resources(self, resource):
260
- path = resource.path[self.prefix_len:]
261
- if path and path[-1] != os.sep:
262
- path += os.sep
263
- plen = len(path)
264
- result = set()
265
- i = bisect.bisect(self.index, path)
266
- while i < len(self.index):
267
- if not self.index[i].startswith(path):
268
- break
269
- s = self.index[i][plen:]
270
- result.add(s.split(os.sep, 1)[0]) # only immediate children
271
- i += 1
272
- return result
273
-
274
- def _is_directory(self, path):
275
- path = path[self.prefix_len:]
276
- if path and path[-1] != os.sep:
277
- path += os.sep
278
- i = bisect.bisect(self.index, path)
279
- try:
280
- result = self.index[i].startswith(path)
281
- except IndexError:
282
- result = False
283
- return result
284
-
285
-
286
- _finder_registry = {
287
- type(None): ResourceFinder,
288
- zipimport.zipimporter: ZipResourceFinder
289
- }
290
-
291
- try:
292
- # In Python 3.6, _frozen_importlib -> _frozen_importlib_external
293
- try:
294
- import _frozen_importlib_external as _fi
295
- except ImportError:
296
- import _frozen_importlib as _fi
297
- _finder_registry[_fi.SourceFileLoader] = ResourceFinder
298
- _finder_registry[_fi.FileFinder] = ResourceFinder
299
- # See issue #146
300
- _finder_registry[_fi.SourcelessFileLoader] = ResourceFinder
301
- del _fi
302
- except (ImportError, AttributeError):
303
- pass
304
-
305
-
306
- def register_finder(loader, finder_maker):
307
- _finder_registry[type(loader)] = finder_maker
308
-
309
-
310
- _finder_cache = {}
311
-
312
-
313
- def finder(package):
314
- """
315
- Return a resource finder for a package.
316
- :param package: The name of the package.
317
- :return: A :class:`ResourceFinder` instance for the package.
318
- """
319
- if package in _finder_cache:
320
- result = _finder_cache[package]
321
- else:
322
- if package not in sys.modules:
323
- __import__(package)
324
- module = sys.modules[package]
325
- path = getattr(module, '__path__', None)
326
- if path is None:
327
- raise DistlibException('You cannot get a finder for a module, '
328
- 'only for a package')
329
- loader = getattr(module, '__loader__', None)
330
- finder_maker = _finder_registry.get(type(loader))
331
- if finder_maker is None:
332
- raise DistlibException('Unable to locate finder for %r' % package)
333
- result = finder_maker(module)
334
- _finder_cache[package] = result
335
- return result
336
-
337
-
338
- _dummy_module = types.ModuleType(str('__dummy__'))
339
-
340
-
341
- def finder_for_path(path):
342
- """
343
- Return a resource finder for a path, which should represent a container.
344
-
345
- :param path: The path.
346
- :return: A :class:`ResourceFinder` instance for the path.
347
- """
348
- result = None
349
- # calls any path hooks, gets importer into cache
350
- pkgutil.get_importer(path)
351
- loader = sys.path_importer_cache.get(path)
352
- finder = _finder_registry.get(type(loader))
353
- if finder:
354
- module = _dummy_module
355
- module.__file__ = os.path.join(path, '')
356
- module.__loader__ = loader
357
- result = finder(module)
358
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/extension.py DELETED
@@ -1,248 +0,0 @@
1
- """distutils.extension
2
-
3
- Provides the Extension class, used to describe C/C++ extension
4
- modules in setup scripts."""
5
-
6
- import os
7
- import warnings
8
-
9
- # This class is really only used by the "build_ext" command, so it might
10
- # make sense to put it in distutils.command.build_ext. However, that
11
- # module is already big enough, and I want to make this class a bit more
12
- # complex to simplify some common cases ("foo" module in "foo.c") and do
13
- # better error-checking ("foo.c" actually exists).
14
- #
15
- # Also, putting this in build_ext.py means every setup script would have to
16
- # import that large-ish module (indirectly, through distutils.core) in
17
- # order to do anything.
18
-
19
-
20
- class Extension:
21
- """Just a collection of attributes that describes an extension
22
- module and everything needed to build it (hopefully in a portable
23
- way, but there are hooks that let you be as unportable as you need).
24
-
25
- Instance attributes:
26
- name : string
27
- the full name of the extension, including any packages -- ie.
28
- *not* a filename or pathname, but Python dotted name
29
- sources : [string]
30
- list of source filenames, relative to the distribution root
31
- (where the setup script lives), in Unix form (slash-separated)
32
- for portability. Source files may be C, C++, SWIG (.i),
33
- platform-specific resource files, or whatever else is recognized
34
- by the "build_ext" command as source for a Python extension.
35
- include_dirs : [string]
36
- list of directories to search for C/C++ header files (in Unix
37
- form for portability)
38
- define_macros : [(name : string, value : string|None)]
39
- list of macros to define; each macro is defined using a 2-tuple,
40
- where 'value' is either the string to define it to or None to
41
- define it without a particular value (equivalent of "#define
42
- FOO" in source or -DFOO on Unix C compiler command line)
43
- undef_macros : [string]
44
- list of macros to undefine explicitly
45
- library_dirs : [string]
46
- list of directories to search for C/C++ libraries at link time
47
- libraries : [string]
48
- list of library names (not filenames or paths) to link against
49
- runtime_library_dirs : [string]
50
- list of directories to search for C/C++ libraries at run time
51
- (for shared extensions, this is when the extension is loaded)
52
- extra_objects : [string]
53
- list of extra files to link with (eg. object files not implied
54
- by 'sources', static library that must be explicitly specified,
55
- binary resource files, etc.)
56
- extra_compile_args : [string]
57
- any extra platform- and compiler-specific information to use
58
- when compiling the source files in 'sources'. For platforms and
59
- compilers where "command line" makes sense, this is typically a
60
- list of command-line arguments, but for other platforms it could
61
- be anything.
62
- extra_link_args : [string]
63
- any extra platform- and compiler-specific information to use
64
- when linking object files together to create the extension (or
65
- to create a new static Python interpreter). Similar
66
- interpretation as for 'extra_compile_args'.
67
- export_symbols : [string]
68
- list of symbols to be exported from a shared extension. Not
69
- used on all platforms, and not generally necessary for Python
70
- extensions, which typically export exactly one symbol: "init" +
71
- extension_name.
72
- swig_opts : [string]
73
- any extra options to pass to SWIG if a source file has the .i
74
- extension.
75
- depends : [string]
76
- list of files that the extension depends on
77
- language : string
78
- extension language (i.e. "c", "c++", "objc"). Will be detected
79
- from the source extensions if not provided.
80
- optional : boolean
81
- specifies that a build failure in the extension should not abort the
82
- build process, but simply not install the failing extension.
83
- """
84
-
85
- # When adding arguments to this constructor, be sure to update
86
- # setup_keywords in core.py.
87
- def __init__(
88
- self,
89
- name,
90
- sources,
91
- include_dirs=None,
92
- define_macros=None,
93
- undef_macros=None,
94
- library_dirs=None,
95
- libraries=None,
96
- runtime_library_dirs=None,
97
- extra_objects=None,
98
- extra_compile_args=None,
99
- extra_link_args=None,
100
- export_symbols=None,
101
- swig_opts=None,
102
- depends=None,
103
- language=None,
104
- optional=None,
105
- **kw # To catch unknown keywords
106
- ):
107
- if not isinstance(name, str):
108
- raise AssertionError("'name' must be a string")
109
- if not (isinstance(sources, list) and all(isinstance(v, str) for v in sources)):
110
- raise AssertionError("'sources' must be a list of strings")
111
-
112
- self.name = name
113
- self.sources = sources
114
- self.include_dirs = include_dirs or []
115
- self.define_macros = define_macros or []
116
- self.undef_macros = undef_macros or []
117
- self.library_dirs = library_dirs or []
118
- self.libraries = libraries or []
119
- self.runtime_library_dirs = runtime_library_dirs or []
120
- self.extra_objects = extra_objects or []
121
- self.extra_compile_args = extra_compile_args or []
122
- self.extra_link_args = extra_link_args or []
123
- self.export_symbols = export_symbols or []
124
- self.swig_opts = swig_opts or []
125
- self.depends = depends or []
126
- self.language = language
127
- self.optional = optional
128
-
129
- # If there are unknown keyword options, warn about them
130
- if len(kw) > 0:
131
- options = [repr(option) for option in kw]
132
- options = ', '.join(sorted(options))
133
- msg = "Unknown Extension options: %s" % options
134
- warnings.warn(msg)
135
-
136
- def __repr__(self):
137
- return '<{}.{}({!r}) at {:#x}>'.format(
138
- self.__class__.__module__,
139
- self.__class__.__qualname__,
140
- self.name,
141
- id(self),
142
- )
143
-
144
-
145
- def read_setup_file(filename): # noqa: C901
146
- """Reads a Setup file and returns Extension instances."""
147
- from distutils.sysconfig import parse_makefile, expand_makefile_vars, _variable_rx
148
-
149
- from distutils.text_file import TextFile
150
- from distutils.util import split_quoted
151
-
152
- # First pass over the file to gather "VAR = VALUE" assignments.
153
- vars = parse_makefile(filename)
154
-
155
- # Second pass to gobble up the real content: lines of the form
156
- # <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
157
- file = TextFile(
158
- filename,
159
- strip_comments=1,
160
- skip_blanks=1,
161
- join_lines=1,
162
- lstrip_ws=1,
163
- rstrip_ws=1,
164
- )
165
- try:
166
- extensions = []
167
-
168
- while True:
169
- line = file.readline()
170
- if line is None: # eof
171
- break
172
- if _variable_rx.match(line): # VAR=VALUE, handled in first pass
173
- continue
174
-
175
- if line[0] == line[-1] == "*":
176
- file.warn("'%s' lines not handled yet" % line)
177
- continue
178
-
179
- line = expand_makefile_vars(line, vars)
180
- words = split_quoted(line)
181
-
182
- # NB. this parses a slightly different syntax than the old
183
- # makesetup script: here, there must be exactly one extension per
184
- # line, and it must be the first word of the line. I have no idea
185
- # why the old syntax supported multiple extensions per line, as
186
- # they all wind up being the same.
187
-
188
- module = words[0]
189
- ext = Extension(module, [])
190
- append_next_word = None
191
-
192
- for word in words[1:]:
193
- if append_next_word is not None:
194
- append_next_word.append(word)
195
- append_next_word = None
196
- continue
197
-
198
- suffix = os.path.splitext(word)[1]
199
- switch = word[0:2]
200
- value = word[2:]
201
-
202
- if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
203
- # hmm, should we do something about C vs. C++ sources?
204
- # or leave it up to the CCompiler implementation to
205
- # worry about?
206
- ext.sources.append(word)
207
- elif switch == "-I":
208
- ext.include_dirs.append(value)
209
- elif switch == "-D":
210
- equals = value.find("=")
211
- if equals == -1: # bare "-DFOO" -- no value
212
- ext.define_macros.append((value, None))
213
- else: # "-DFOO=blah"
214
- ext.define_macros.append((value[0:equals], value[equals + 2 :]))
215
- elif switch == "-U":
216
- ext.undef_macros.append(value)
217
- elif switch == "-C": # only here 'cause makesetup has it!
218
- ext.extra_compile_args.append(word)
219
- elif switch == "-l":
220
- ext.libraries.append(value)
221
- elif switch == "-L":
222
- ext.library_dirs.append(value)
223
- elif switch == "-R":
224
- ext.runtime_library_dirs.append(value)
225
- elif word == "-rpath":
226
- append_next_word = ext.runtime_library_dirs
227
- elif word == "-Xlinker":
228
- append_next_word = ext.extra_link_args
229
- elif word == "-Xcompiler":
230
- append_next_word = ext.extra_compile_args
231
- elif switch == "-u":
232
- ext.extra_link_args.append(word)
233
- if not value:
234
- append_next_word = ext.extra_link_args
235
- elif suffix in (".a", ".so", ".sl", ".o", ".dylib"):
236
- # NB. a really faithful emulation of makesetup would
237
- # append a .o file to extra_objects only if it
238
- # had a slash in it; otherwise, it would s/.o/.c/
239
- # and append it to sources. Hmmmm.
240
- ext.extra_objects.append(word)
241
- else:
242
- file.warn("unrecognized argument '%s'" % word)
243
-
244
- extensions.append(ext)
245
- finally:
246
- file.close()
247
-
248
- return extensions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docker/README.md DELETED
@@ -1,45 +0,0 @@
1
-
2
- ## Use the container (with docker ≥ 19.03)
3
-
4
- ```
5
- cd docker/
6
- # Build:
7
- docker build --build-arg USER_ID=$UID -t detectron2:v0 .
8
- # Launch (require GPUs):
9
- docker run --gpus all -it \
10
- --shm-size=8gb --env="DISPLAY" --volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" \
11
- --name=detectron2 detectron2:v0
12
-
13
- # Grant docker access to host X server to show images
14
- xhost +local:`docker inspect --format='{{ .Config.Hostname }}' detectron2`
15
- ```
16
-
17
- ## Use the container (with docker-compose ≥ 1.28.0)
18
-
19
- Install docker-compose and nvidia-docker-toolkit, then run:
20
- ```
21
- cd docker && USER_ID=$UID docker-compose run detectron2
22
- ```
23
-
24
- ## Use the deployment container (to test C++ examples)
25
- After building the base detectron2 container as above, do:
26
- ```
27
- # Build:
28
- docker build -t detectron2-deploy:v0 -f deploy.Dockerfile .
29
- # Launch:
30
- docker run --gpus all -it detectron2-deploy:v0
31
- ```
32
-
33
- #### Using a persistent cache directory
34
-
35
- You can prevent models from being re-downloaded on every run,
36
- by storing them in a cache directory.
37
-
38
- To do this, add `--volume=$HOME/.torch/fvcore_cache:/tmp:rw` in the run command.
39
-
40
- ## Install new dependencies
41
- Add the following to `Dockerfile` to make persistent changes.
42
- ```
43
- RUN sudo apt-get update && sudo apt-get install -y vim
44
- ```
45
- Or run them in the container to make temporary changes.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Axolotlily/DalleMini/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("huggingface/osanseviero/dalle-mini-fork").launch()
 
 
 
 
spaces/Bart92/RVC_HF/lib/infer_pack/onnx_inference.py DELETED
@@ -1,145 +0,0 @@
1
- import onnxruntime
2
- import librosa
3
- import numpy as np
4
- import soundfile
5
-
6
-
7
- class ContentVec:
8
- def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
9
- print("load model(s) from {}".format(vec_path))
10
- if device == "cpu" or device is None:
11
- providers = ["CPUExecutionProvider"]
12
- elif device == "cuda":
13
- providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
14
- elif device == "dml":
15
- providers = ["DmlExecutionProvider"]
16
- else:
17
- raise RuntimeError("Unsportted Device")
18
- self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
19
-
20
- def __call__(self, wav):
21
- return self.forward(wav)
22
-
23
- def forward(self, wav):
24
- feats = wav
25
- if feats.ndim == 2: # double channels
26
- feats = feats.mean(-1)
27
- assert feats.ndim == 1, feats.ndim
28
- feats = np.expand_dims(np.expand_dims(feats, 0), 0)
29
- onnx_input = {self.model.get_inputs()[0].name: feats}
30
- logits = self.model.run(None, onnx_input)[0]
31
- return logits.transpose(0, 2, 1)
32
-
33
-
34
- def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
35
- if f0_predictor == "pm":
36
- from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
37
-
38
- f0_predictor_object = PMF0Predictor(
39
- hop_length=hop_length, sampling_rate=sampling_rate
40
- )
41
- elif f0_predictor == "harvest":
42
- from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import (
43
- HarvestF0Predictor,
44
- )
45
-
46
- f0_predictor_object = HarvestF0Predictor(
47
- hop_length=hop_length, sampling_rate=sampling_rate
48
- )
49
- elif f0_predictor == "dio":
50
- from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
51
-
52
- f0_predictor_object = DioF0Predictor(
53
- hop_length=hop_length, sampling_rate=sampling_rate
54
- )
55
- else:
56
- raise Exception("Unknown f0 predictor")
57
- return f0_predictor_object
58
-
59
-
60
- class OnnxRVC:
61
- def __init__(
62
- self,
63
- model_path,
64
- sr=40000,
65
- hop_size=512,
66
- vec_path="vec-768-layer-12",
67
- device="cpu",
68
- ):
69
- vec_path = f"pretrained/{vec_path}.onnx"
70
- self.vec_model = ContentVec(vec_path, device)
71
- if device == "cpu" or device is None:
72
- providers = ["CPUExecutionProvider"]
73
- elif device == "cuda":
74
- providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
75
- elif device == "dml":
76
- providers = ["DmlExecutionProvider"]
77
- else:
78
- raise RuntimeError("Unsportted Device")
79
- self.model = onnxruntime.InferenceSession(model_path, providers=providers)
80
- self.sampling_rate = sr
81
- self.hop_size = hop_size
82
-
83
- def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
84
- onnx_input = {
85
- self.model.get_inputs()[0].name: hubert,
86
- self.model.get_inputs()[1].name: hubert_length,
87
- self.model.get_inputs()[2].name: pitch,
88
- self.model.get_inputs()[3].name: pitchf,
89
- self.model.get_inputs()[4].name: ds,
90
- self.model.get_inputs()[5].name: rnd,
91
- }
92
- return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
93
-
94
- def inference(
95
- self,
96
- raw_path,
97
- sid,
98
- f0_method="dio",
99
- f0_up_key=0,
100
- pad_time=0.5,
101
- cr_threshold=0.02,
102
- ):
103
- f0_min = 50
104
- f0_max = 1100
105
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
106
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
107
- f0_predictor = get_f0_predictor(
108
- f0_method,
109
- hop_length=self.hop_size,
110
- sampling_rate=self.sampling_rate,
111
- threshold=cr_threshold,
112
- )
113
- wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
114
- org_length = len(wav)
115
- if org_length / sr > 50.0:
116
- raise RuntimeError("Reached Max Length")
117
-
118
- wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
119
- wav16k = wav16k
120
-
121
- hubert = self.vec_model(wav16k)
122
- hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
123
- hubert_length = hubert.shape[1]
124
-
125
- pitchf = f0_predictor.compute_f0(wav, hubert_length)
126
- pitchf = pitchf * 2 ** (f0_up_key / 12)
127
- pitch = pitchf.copy()
128
- f0_mel = 1127 * np.log(1 + pitch / 700)
129
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
130
- f0_mel_max - f0_mel_min
131
- ) + 1
132
- f0_mel[f0_mel <= 1] = 1
133
- f0_mel[f0_mel > 255] = 255
134
- pitch = np.rint(f0_mel).astype(np.int64)
135
-
136
- pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
137
- pitch = pitch.reshape(1, len(pitch))
138
- ds = np.array([sid]).astype(np.int64)
139
-
140
- rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
141
- hubert_length = np.array([hubert_length]).astype(np.int64)
142
-
143
- out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
144
- out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
145
- return out_wav[0:org_length]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benjov/Demo-IR/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Demo IR
3
- emoji: 📚
4
- colorFrom: green
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.50.2
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Anime Life Simulator.md DELETED
@@ -1,110 +0,0 @@
1
-
2
- <h1>¿Qué es un simulador de vida de anime? </h1>
3
- <p>Simulador de vida de anime es un género de videojuegos que te permite crear y controlar un personaje en un mundo virtual inspirado en el anime. Anime es un término para la animación japonesa que es conocido por su estilo distintivo, gráficos coloridos, y diversos temas. Los fans del anime a menudo disfrutan sumergirse en las historias y personajes de sus programas o películas favoritas. Los juegos de simulador de vida de anime ofrecen una manera de experimentar una vida diferente o alternativa en un entorno de anime. </p>
4
- <h2>anime life simulator</h2><br /><p><b><b>DOWNLOAD</b> &#127379; <a href="https://bltlly.com/2v6IyI">https://bltlly.com/2v6IyI</a></b></p><br /><br />
5
- <p>Los juegos de simulador de vida de anime pueden variar en su alcance y enfoque, pero por lo general comparten algunas características comunes. A menudo tienen herramientas de creación de personajes que te permiten personalizar tu apariencia, personalidad, habilidades y preferencias. También tienen mecanismos de simulación que te permiten interactuar con otros personajes, explorar el entorno, realizar tareas y tomar decisiones. Algunos juegos también pueden tener elementos de otros géneros, como juegos de rol, estrategia o acción. </p>
6
- <p>Los juegos de simulador de vida de anime pueden atraer a diferentes tipos de jugadores por diferentes razones. Algunos pueden disfrutar de la libertad y la creatividad de crear su propio personaje e historia. Algunos pueden gustar el desafío y la variedad de la gestión de diferentes aspectos de su vida virtual. Algunos pueden buscar la diversión y la emoción de experimentar nuevas situaciones y aventuras. Algunos simplemente quieren relajarse y escapar de la realidad por un tiempo. </p>
7
- <h2>¿Cómo jugar un simulador de vida de anime? </h2>
8
- <p>No hay una respuesta definitiva a cómo jugar un simulador de vida de anime, ya que cada juego puede tener sus propias reglas y objetivos. Sin embargo, hay algunos pasos generales que pueden ayudarte a empezar con cualquier juego de este género. </p>
9
- <ol>
10
- <li>Elige un juego que se adapte a tus preferencias e intereses. Hay muchos juegos de simulación de anime disponibles en varias plataformas, como PC, móvil o consola. Puedes buscar reseñas, valoraciones, capturas de pantalla, vídeos o demos en línea para encontrar un juego que te guste. </li>
11
-
12
- <li>Comienza tu simulación y explora el mundo del juego. Normalmente puedes moverte usando el teclado, el ratón o los controles de la pantalla táctil. También puede interactuar con objetos o personajes haciendo clic o tocando en ellos. También puedes acceder a menús o inventarios para comprobar tu estado, artículos, misiones, etc.</li>
13
- <li>Sigue la historia del juego o crea la tuya. Algunos juegos pueden tener una trama <p>lineal o ramificada que te guía a través de los principales eventos y opciones. Algunos juegos pueden tener un estilo más abierto o sandbox que te permite crear tu propia historia y objetivos. Normalmente puedes avanzar la historia completando misiones, tareas u objetivos, o tomando decisiones que afecten el resultado. </li>
14
- <li>Disfruta de la simulación y diviértete. Por lo general, puede hacer varias actividades en el mundo del juego, como hablar con otros personajes, hacer amigos o enemigos, citas o casarse, trabajar o estudiar, ir de compras o hacer manualidades, luchar o explorar, etc. También puede experimentar diferentes emociones, como felicidad, tristeza, ira, miedo, etc. También puede desbloquear nuevo contenido, como elementos, ubicaciones, caracteres, etc.</li>
15
- </ol>
16
- <h3>Tipos de juegos de simulador de vida de anime</h3>
17
- <p>Los juegos de simulador de vida de anime se pueden clasificar en diferentes tipos o subgéneros según su tema, configuración o enfoque. Aquí están algunos de los tipos más comunes y populares de juegos de simulador de vida de anime:</p>
18
- <h4>Sim de citas</h4>
19
- <p>Un simulador de citas es un tipo de juego de simulador de vida de anime que se centra en el romance y las relaciones. En este tipo de juego, generalmente puedes elegir entre una variedad de intereses amorosos potenciales, cada uno con su propia personalidad, apariencia y trasfondo. También puedes interactuar con ellos de diferentes maneras, como hablar, coquetear, dar regalos, salir con alguien, etc. Tu objetivo generalmente es ganar su afecto y lograr un final feliz con ellos. </p>
20
- <p>Algunos ejemplos de juegos de simulación de citas son:</p>
21
- <p></p>
22
- <ul>
23
-
24
- <li><a href="">Dream Daddy: A Dad Dating Simulator</a>: Un juego que cuenta con un padre soltero que se muda a una nueva ciudad y se reúne con otros padres solteros que también son potenciales intereses amorosos. </li>
25
- <li><a href="">Hatoful Boyfriend</a>: Un juego que parodia el género haciendo que el jugador salga con palomas en un mundo post-apocalíptico. </li>
26
- </ul>
27
- <h4>Sim de la escuela</h4>
28
- <p>Un simulador de escuela es un tipo de juego de simulador de vida de anime que simula la vida diaria de un estudiante en una escuela de anime. En este tipo de juego, generalmente puedes crear tu propio personaje e inscribirte en una escuela de tu elección. También puedes asistir a clases, unirte a clubes, hacer amigos, estudiar para los exámenes, participar en eventos, etc. Tu objetivo generalmente es equilibrar tu vida académica y social y lograr tus sueños. </p>
29
- <p>Algunos ejemplos de juegos de simulación escolar son:</p>
30
- <ul>
31
- <li><a href="">Persona 5</a>: juego que combina elementos de simulación escolar con elementos de rol y acción. El jugador controla un grupo de estudiantes que utilizan sus habilidades sobrenaturales para luchar contra las fuerzas del mal en una dimensión alternativa. </li>
32
- <li><a href="">Academia: School Simulator</a>: Un juego que permite al jugador diseñar y gestionar su propia escuela. El jugador puede contratar personal, construir instalaciones, establecer políticas, abordar problemas, etc.</li>
33
- <li><a href="">High School Story</a>: Un juego que permite al jugador crear su propio personaje y construir su propia escuela secundaria. El jugador puede personalizar su escuela, reclutar estudiantes, organizar fiestas, ir a citas, etc.</li>
34
- </ul>
35
- <h4>Sim de fantasía</h4>
36
- <p>Un simulador de fantasía es un tipo de juego de simulador de vida de anime que incorpora elementos de magia, aventura y combate. En este tipo de juego, normalmente puedes crear tu propio personaje y entrar en un mundo de fantasía lleno de maravillas y peligros. También puedes aprender hechizos, empuñar armas, luchar contra enemigos, explorar mazmorras, recoger tesoros, etc. Tu objetivo suele ser completar misiones, salvar el mundo o cumplir tu destino. </p>
37
- <p>Algunos ejemplos de juegos de simulación de fantasía son:</p>
38
- <ul>
39
-
40
- <li><a href="">Stardew Valley</a>: Un juego que mezcla elementos de simulación agrícolas con elementos de fantasía. El jugador hereda una granja en un pueblo rural y puede cultivar, criar animales, extraer minerales, pescar, hacerse amigo de los aldeanos, etc.</li>
41
- <li><a href="">Final Fantasy XIV</a>: Un juego que es un juego de rol multijugador masivo en línea ubicado en un mundo de fantasía. El jugador puede elegir entre varias razas, clases y trabajos, y unirse a otros jugadores en misiones, incursiones, mazmorras, etc.</li>
42
- </ul>
43
- <h4>Sim de agricultura</h4>
44
- <p>Un simulador de agricultura es un tipo de juego de simulador de vida de anime que involucra el manejo de una granja e interactuar con animales y aldeanos. En este tipo de juego, normalmente puedes crear tu propio personaje y heredar o comprar una granja. También puede plantar cultivos, cosechar productos, criar ganado, vender bienes, etc. También puede socializar con la comunidad local, hacer amigos, citas, casarse, tener hijos, etc. Su objetivo es generalmente mejorar su granja y su vida. </p>
45
- <p>Algunos ejemplos de juegos de simulación de agricultura son:</p>
46
- <ul>
47
- <li><a href="">Harvest Moon</a>: Una serie de juegos que es uno de los pioneros del género. Los juegos cuentan con varios ajustes y personajes, pero todos comparten la misma jugabilidad básica de la agricultura y la simulación de la vida. </li>
48
- <li><a href="">Historia de las Estaciones</a>: Una serie de juegos que es un sucesor espiritual de Harvest Moon. Los juegos tienen elementos de juego similares, pero también introducen nuevas características, como personalización, multijugador y personajes cruzados. </li>
49
- <li><a href="">Rune Factory</a>: Una serie de juegos que es un spin-off de Harvest Moon. Los juegos combinan elementos de simulación de granja con elementos de simulación de fantasía, como magia, combate y mazmorras. </li>
50
- </ul>
51
- <h2>Beneficios de jugar un simulador de vida de anime? </h2>
52
- <p>Jugar un simulador de vida de anime puede tener varios beneficios para diferentes jugadores. Aquí están algunos de los posibles beneficios de jugar este género:</p>
53
- <ul>
54
-
55
- <li>Relajación: Jugar un simulador de vida anime puede ayudarle a relajarse y relajarse. Puede disfrutar de los gráficos coloridos y la música relajante. También puedes escapar del estrés y la presión de la realidad por un tiempo. </li>
56
- <li>Habilidades sociales: Jugar un simulador de vida de anime puede mejorar sus habilidades sociales y la confianza. Puede interactuar con varios personajes y aprender a comunicarse, empatizar y negociar. También puedes hacer amigos o encontrar el amor en el mundo del juego. </li>
57
- </ul>
58
- <h2>Desafíos de jugar un simulador de vida de anime? </h2>
59
- <p>Jugar un simulador de vida de anime también puede tener algunos desafíos o dificultades para algunos jugadores. Aquí están algunos de los posibles desafíos de jugar este género:</p>
60
- <ul>
61
- <li>Adicción: Jugar un simulador de vida de anime puede ser adictivo y consumir mucho tiempo. Puedes pasar horas o días jugando el juego sin darte cuenta. También puede descuidar sus responsabilidades o relaciones de la vida real. </li>
62
- <li>Expectativas poco realistas: Jugar un simulador de vida de anime puede crear expectativas o fantasías poco realistas. Puede comparar su vida real con su vida virtual y sentirse insatisfecho o infeliz. También puedes idealizar o idealizar los personajes o situaciones del juego. </li>
63
- <li>Diferencias culturales: Jugar un simulador de vida de anime puede exponerte a diferencias culturales o malentendidos. Es posible que encuentre términos, referencias o comportamientos que no le resultan familiares o confusos. También puedes ofender o faltar el respeto a los personajes u otros jugadores sin querer. </li>
64
- </ul>
65
- <h2>Consejos y trucos para jugar un simulador de vida de anime? </h2>
66
- <p>Jugar un simulador de vida de anime puede ser más agradable y gratificante si sigues algunos consejos y trucos. Estos son algunos de los consejos y trucos útiles para jugar este género:</p>
67
- <ul>
68
-
69
- <li>Guardar: Durante la reproducción de un simulador de vida de anime, usted debe guardar su progreso con frecuencia y en diferentes ranuras. De esta manera, puede evitar perder sus datos o el progreso debido a fallos o errores. También puede volver a los puntos o opciones anteriores si desea cambiar algo o probar algo diferente. </li>
70
- <li>Experimento: Mientras juegas un simulador de vida de anime, debes experimentar con diferentes opciones y resultados. No debes tener miedo de cometer errores o fallar. También deberías probar diferentes personajes, actividades, rutas, etc. para descubrir nuevos contenidos y posibilidades. </li>
71
- </ul>
72
- <h2>Ejemplos de juegos populares de simulador de vida de anime</h2>
73
- <p>Hay muchos juegos de simulador de vida de anime disponibles en varias plataformas y dispositivos. Aquí están algunos de los ejemplos de los juegos populares del simulador de la vida del anime:</p>
74
- <h4>Anime Play Life: Ilimitado</h4>
75
- <p>Anime Play Life: Unlimited es un juego <p>que te permite hacer misiones, encontrar un trabajo, comprar casas, pescado, picnic y más en un mundo de anime. También puedes personalizar tu personaje, ropa, mascotas, vehículos, etc. También puedes interactuar con otros jugadores en línea y unirte a clubes, fiestas o eventos. El juego está disponible en PC y dispositivos móviles. </p>
76
- <h4>Gotas de XOXO</h4>
77
- <p>XOXO gotitas es un juego que cuenta con una comedia citas sim con múltiples finales y personajes. Juegas como una chica que se une a una escuela para estudiantes problemáticos y conoce a seis chicos que son todos idiotas a su manera. También puede explorar la ciudad, tienda, trabajo, estudio, etc. El juego está disponible en PC y dispositivos móviles. </p>
78
- <h4>Viva la reina</h4>
79
- <p>Larga vida a la reina es un juego que te desafía a gobernar un reino como una princesa joven. Tienes que manejar tus estadísticas, habilidades, humor, atuendos, eventos, etc. También tienes que lidiar con la intriga política, la guerra, los intentos de asesinato, etc. El juego tiene muchos caminos ramificados y finales dependiendo de tus elecciones. El juego está disponible en PC y dispositivos móviles. </p>
80
- <h4>Mon-cuties para todos</h4>
81
-
82
- <h2>Conclusión</h2>
83
- <p>Simulador de vida anime es un género de videojuegos que te permite crear y controlar un personaje en un mundo virtual inspirado en el anime. Los juegos de simulador de vida de anime pueden tener diferentes tipos, características, beneficios, desafíos, consejos y ejemplos. Jugar un simulador de vida de anime puede ser una experiencia divertida y gratificante para los fanáticos del anime y los jugadores por igual. </p>
84
- <p>Si estás interesado en jugar un juego de simulador de vida de anime, puedes ver algunos de los juegos mencionados en este artículo o buscar otros juegos en línea. También puede compartir sus pensamientos y opiniones sobre este género en la sección de comentarios a continuación. ¡Gracias por leer y tener un gran día! </p>
85
- <h2>Preguntas frecuentes</h2>
86
- <p>Aquí están algunas de las preguntas y respuestas frecuentes sobre los juegos de simulador de vida de anime:</p>
87
- <ol>
88
- <li>¿Cuál es la diferencia entre un simulador de vida de anime y una novela visual de anime? </li>
89
- <p>Un simulador de vida de anime es un juego que simula la vida diaria de un personaje en un mundo de anime. Una novela visual anime es un juego que cuenta una historia a través de texto e imágenes en un estilo anime. Los juegos de simulador de vida de anime suelen tener más mecánica de juego e interactividad que las novelas visuales de anime. </p>
90
- <li>¿Cuáles son algunos de los mejores juegos de simulador de vida de anime para principiantes? </li>
91
- <p>Algunos de los mejores juegos de simulador de vida de anime para principiantes son:</p>
92
- <ul>
93
- <li><a href="">Animal Crossing: New Horizons</a>: Un juego que te permite crear tu propio paraíso en la isla e interactuar con lindos aldeanos de animales. </li>
94
- <li><a href="">Mi tiempo en Portia</a>: Un juego que te permite construir y ejecutar tu propio taller en un mundo post-apocalíptico. </li>
95
- <li><a href="">Doki Doki Literature Club</a>: Un juego que parece un lindo simulador de citas pero tiene un toque oscuro. </li>
96
- </ul>
97
- <li>¿Cómo puedo jugar un juego de simulador de vida de anime en mi teléfono? </li>
98
-
99
- <li>¿Cómo puedo hacer mi propio juego de simulador de vida de anime? </li>
100
- <p>Usted puede hacer su propio juego de simulador de vida anime mediante el uso de un motor de juego o una herramienta de software que le permite crear juegos sin codificación. Algunas de las herramientas populares son:</p>
101
- <ul>
102
- <li><a href="">Ren'Py</a>: Una herramienta que te permite crear novelas visuales y sims de citas. </li>
103
- <li><a href="">RPG Maker</a>: Una herramienta que te permite crear juegos de rol y sims de fantasía. </li>
104
- <li><a href="">Unity</a>: Una herramienta que te permite crear cualquier tipo de juego con gráficos 2D o 3D. </li>
105
- </ul>
106
- <li>¿Cómo puedo aprender más sobre los juegos de simulación de vida de anime? </li>
107
- <p>Puedes aprender más sobre juegos de simulador de vida de anime leyendo artículos en línea, blogs, revistas, libros, etc. sobre este género. También puedes ver videos en línea, transmisiones, podcasts, etc. sobre este género. También puede unirse a comunidades en línea, foros, grupos, etc. donde se puede discutir este género con otros fans y jugadores. </p>
108
- </ol></p> 64aa2da5cf<br />
109
- <br />
110
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/encoding.py DELETED
@@ -1,36 +0,0 @@
1
- import codecs
2
- import locale
3
- import re
4
- import sys
5
- from typing import List, Tuple
6
-
7
- BOMS: List[Tuple[bytes, str]] = [
8
- (codecs.BOM_UTF8, "utf-8"),
9
- (codecs.BOM_UTF16, "utf-16"),
10
- (codecs.BOM_UTF16_BE, "utf-16-be"),
11
- (codecs.BOM_UTF16_LE, "utf-16-le"),
12
- (codecs.BOM_UTF32, "utf-32"),
13
- (codecs.BOM_UTF32_BE, "utf-32-be"),
14
- (codecs.BOM_UTF32_LE, "utf-32-le"),
15
- ]
16
-
17
- ENCODING_RE = re.compile(rb"coding[:=]\s*([-\w.]+)")
18
-
19
-
20
- def auto_decode(data: bytes) -> str:
21
- """Check a bytes string for a BOM to correctly detect the encoding
22
-
23
- Fallback to locale.getpreferredencoding(False) like open() on Python3"""
24
- for bom, encoding in BOMS:
25
- if data.startswith(bom):
26
- return data[len(bom) :].decode(encoding)
27
- # Lets check the first two lines as in PEP263
28
- for line in data.split(b"\n")[:2]:
29
- if line[0:1] == b"#" and ENCODING_RE.search(line):
30
- result = ENCODING_RE.search(line)
31
- assert result is not None
32
- encoding = result.groups()[0].decode("ascii")
33
- return data.decode(encoding)
34
- return data.decode(
35
- locale.getpreferredencoding(False) or sys.getdefaultencoding(),
36
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/euctwprober.py DELETED
@@ -1,47 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is mozilla.org code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 1998
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- #
12
- # This library is free software; you can redistribute it and/or
13
- # modify it under the terms of the GNU Lesser General Public
14
- # License as published by the Free Software Foundation; either
15
- # version 2.1 of the License, or (at your option) any later version.
16
- #
17
- # This library is distributed in the hope that it will be useful,
18
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
19
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20
- # Lesser General Public License for more details.
21
- #
22
- # You should have received a copy of the GNU Lesser General Public
23
- # License along with this library; if not, write to the Free Software
24
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25
- # 02110-1301 USA
26
- ######################### END LICENSE BLOCK #########################
27
-
28
- from .chardistribution import EUCTWDistributionAnalysis
29
- from .codingstatemachine import CodingStateMachine
30
- from .mbcharsetprober import MultiByteCharSetProber
31
- from .mbcssm import EUCTW_SM_MODEL
32
-
33
-
34
- class EUCTWProber(MultiByteCharSetProber):
35
- def __init__(self) -> None:
36
- super().__init__()
37
- self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
38
- self.distribution_analyzer = EUCTWDistributionAnalysis()
39
- self.reset()
40
-
41
- @property
42
- def charset_name(self) -> str:
43
- return "EUC-TW"
44
-
45
- @property
46
- def language(self) -> str:
47
- return "Taiwan"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/grid-feats-vqa/CODE_OF_CONDUCT.md DELETED
@@ -1,76 +0,0 @@
1
- # Code of Conduct
2
-
3
- ## Our Pledge
4
-
5
- In the interest of fostering an open and welcoming environment, we as
6
- contributors and maintainers pledge to make participation in our project and
7
- our community a harassment-free experience for everyone, regardless of age, body
8
- size, disability, ethnicity, sex characteristics, gender identity and expression,
9
- level of experience, education, socio-economic status, nationality, personal
10
- appearance, race, religion, or sexual identity and orientation.
11
-
12
- ## Our Standards
13
-
14
- Examples of behavior that contributes to creating a positive environment
15
- include:
16
-
17
- * Using welcoming and inclusive language
18
- * Being respectful of differing viewpoints and experiences
19
- * Gracefully accepting constructive criticism
20
- * Focusing on what is best for the community
21
- * Showing empathy towards other community members
22
-
23
- Examples of unacceptable behavior by participants include:
24
-
25
- * The use of sexualized language or imagery and unwelcome sexual attention or
26
- advances
27
- * Trolling, insulting/derogatory comments, and personal or political attacks
28
- * Public or private harassment
29
- * Publishing others' private information, such as a physical or electronic
30
- address, without explicit permission
31
- * Other conduct which could reasonably be considered inappropriate in a
32
- professional setting
33
-
34
- ## Our Responsibilities
35
-
36
- Project maintainers are responsible for clarifying the standards of acceptable
37
- behavior and are expected to take appropriate and fair corrective action in
38
- response to any instances of unacceptable behavior.
39
-
40
- Project maintainers have the right and responsibility to remove, edit, or
41
- reject comments, commits, code, wiki edits, issues, and other contributions
42
- that are not aligned to this Code of Conduct, or to ban temporarily or
43
- permanently any contributor for other behaviors that they deem inappropriate,
44
- threatening, offensive, or harmful.
45
-
46
- ## Scope
47
-
48
- This Code of Conduct applies within all project spaces, and it also applies when
49
- an individual is representing the project or its community in public spaces.
50
- Examples of representing a project or community include using an official
51
- project e-mail address, posting via an official social media account, or acting
52
- as an appointed representative at an online or offline event. Representation of
53
- a project may be further defined and clarified by project maintainers.
54
-
55
- ## Enforcement
56
-
57
- Instances of abusive, harassing, or otherwise unacceptable behavior may be
58
- reported by contacting the project team at <[email protected]>. All
59
- complaints will be reviewed and investigated and will result in a response that
60
- is deemed necessary and appropriate to the circumstances. The project team is
61
- obligated to maintain confidentiality with regard to the reporter of an incident.
62
- Further details of specific enforcement policies may be posted separately.
63
-
64
- Project maintainers who do not follow or enforce the Code of Conduct in good
65
- faith may face temporary or permanent repercussions as determined by other
66
- members of the project's leadership.
67
-
68
- ## Attribution
69
-
70
- This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
71
- available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
72
-
73
- [homepage]: https://www.contributor-covenant.org
74
-
75
- For answers to common questions about this code of conduct, see
76
- https://www.contributor-covenant.org/faq
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/GFPGAN-example/PaperModel.md DELETED
@@ -1,76 +0,0 @@
1
- # Installation
2
-
3
- We now provide a *clean* version of GFPGAN, which does not require customized CUDA extensions. See [here](README.md#installation) for this easier installation.<br>
4
- If you want want to use the original model in our paper, please follow the instructions below.
5
-
6
- 1. Clone repo
7
-
8
- ```bash
9
- git clone https://github.com/xinntao/GFPGAN.git
10
- cd GFPGAN
11
- ```
12
-
13
- 1. Install dependent packages
14
-
15
- As StyleGAN2 uses customized PyTorch C++ extensions, you need to **compile them during installation** or **load them just-in-time(JIT)**.
16
- You can refer to [BasicSR-INSTALL.md](https://github.com/xinntao/BasicSR/blob/master/INSTALL.md) for more details.
17
-
18
- **Option 1: Load extensions just-in-time(JIT)** (For those just want to do simple inferences, may have less issues)
19
-
20
- ```bash
21
- # Install basicsr - https://github.com/xinntao/BasicSR
22
- # We use BasicSR for both training and inference
23
- pip install basicsr
24
-
25
- # Install facexlib - https://github.com/xinntao/facexlib
26
- # We use face detection and face restoration helper in the facexlib package
27
- pip install facexlib
28
-
29
- pip install -r requirements.txt
30
- python setup.py develop
31
-
32
- # remember to set BASICSR_JIT=True before your running commands
33
- ```
34
-
35
- **Option 2: Compile extensions during installation** (For those need to train/inference for many times)
36
-
37
- ```bash
38
- # Install basicsr - https://github.com/xinntao/BasicSR
39
- # We use BasicSR for both training and inference
40
- # Set BASICSR_EXT=True to compile the cuda extensions in the BasicSR - It may take several minutes to compile, please be patient
41
- # Add -vvv for detailed log prints
42
- BASICSR_EXT=True pip install basicsr -vvv
43
-
44
- # Install facexlib - https://github.com/xinntao/facexlib
45
- # We use face detection and face restoration helper in the facexlib package
46
- pip install facexlib
47
-
48
- pip install -r requirements.txt
49
- python setup.py develop
50
- ```
51
-
52
- ## :zap: Quick Inference
53
-
54
- Download pre-trained models: [GFPGANv1.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/GFPGANv1.pth)
55
-
56
- ```bash
57
- wget https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/GFPGANv1.pth -P experiments/pretrained_models
58
- ```
59
-
60
- - Option 1: Load extensions just-in-time(JIT)
61
-
62
- ```bash
63
- BASICSR_JIT=True python inference_gfpgan.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/whole_imgs --save_root results --arch original --channel 1
64
-
65
- # for aligned images
66
- BASICSR_JIT=True python inference_gfpgan.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/cropped_faces --save_root results --arch original --channel 1 --aligned
67
- ```
68
-
69
- - Option 2: Have successfully compiled extensions during installation
70
-
71
- ```bash
72
- python inference_gfpgan.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/whole_imgs --save_root results --arch original --channel 1
73
-
74
- # for aligned images
75
- python inference_gfpgan.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/cropped_faces --save_root results --arch original --channel 1 --aligned
76
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmcv_custom/runner/epoch_based_runner.py DELETED
@@ -1,104 +0,0 @@
1
- # Copyright (c) Open-MMLab. All rights reserved.
2
- import os.path as osp
3
- import platform
4
- import shutil
5
-
6
- import torch
7
- from torch.optim import Optimizer
8
-
9
- import mmcv
10
- from mmcv.runner import RUNNERS, EpochBasedRunner
11
- from .checkpoint import save_checkpoint
12
-
13
- try:
14
- import apex
15
- except:
16
- print('apex is not installed')
17
-
18
-
19
- @RUNNERS.register_module()
20
- class EpochBasedRunnerAmp(EpochBasedRunner):
21
- """Epoch-based Runner with AMP support.
22
-
23
- This runner train models epoch by epoch.
24
- """
25
-
26
- def save_checkpoint(self,
27
- out_dir,
28
- filename_tmpl='epoch_{}.pth',
29
- save_optimizer=True,
30
- meta=None,
31
- create_symlink=True):
32
- """Save the checkpoint.
33
-
34
- Args:
35
- out_dir (str): The directory that checkpoints are saved.
36
- filename_tmpl (str, optional): The checkpoint filename template,
37
- which contains a placeholder for the epoch number.
38
- Defaults to 'epoch_{}.pth'.
39
- save_optimizer (bool, optional): Whether to save the optimizer to
40
- the checkpoint. Defaults to True.
41
- meta (dict, optional): The meta information to be saved in the
42
- checkpoint. Defaults to None.
43
- create_symlink (bool, optional): Whether to create a symlink
44
- "latest.pth" to point to the latest checkpoint.
45
- Defaults to True.
46
- """
47
- if meta is None:
48
- meta = dict(epoch=self.epoch + 1, iter=self.iter)
49
- elif isinstance(meta, dict):
50
- meta.update(epoch=self.epoch + 1, iter=self.iter)
51
- else:
52
- raise TypeError(
53
- f'meta should be a dict or None, but got {type(meta)}')
54
- if self.meta is not None:
55
- meta.update(self.meta)
56
-
57
- filename = filename_tmpl.format(self.epoch + 1)
58
- filepath = osp.join(out_dir, filename)
59
- optimizer = self.optimizer if save_optimizer else None
60
- save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
61
- # in some environments, `os.symlink` is not supported, you may need to
62
- # set `create_symlink` to False
63
- if create_symlink:
64
- dst_file = osp.join(out_dir, 'latest.pth')
65
- if platform.system() != 'Windows':
66
- mmcv.symlink(filename, dst_file)
67
- else:
68
- shutil.copy(filepath, dst_file)
69
-
70
- def resume(self,
71
- checkpoint,
72
- resume_optimizer=True,
73
- map_location='default'):
74
- if map_location == 'default':
75
- if torch.cuda.is_available():
76
- device_id = torch.cuda.current_device()
77
- checkpoint = self.load_checkpoint(
78
- checkpoint,
79
- map_location=lambda storage, loc: storage.cuda(device_id))
80
- else:
81
- checkpoint = self.load_checkpoint(checkpoint)
82
- else:
83
- checkpoint = self.load_checkpoint(
84
- checkpoint, map_location=map_location)
85
-
86
- self._epoch = checkpoint['meta']['epoch']
87
- self._iter = checkpoint['meta']['iter']
88
- if 'optimizer' in checkpoint and resume_optimizer:
89
- if isinstance(self.optimizer, Optimizer):
90
- self.optimizer.load_state_dict(checkpoint['optimizer'])
91
- elif isinstance(self.optimizer, dict):
92
- for k in self.optimizer.keys():
93
- self.optimizer[k].load_state_dict(
94
- checkpoint['optimizer'][k])
95
- else:
96
- raise TypeError(
97
- 'Optimizer should be dict or torch.optim.Optimizer '
98
- f'but got {type(self.optimizer)}')
99
-
100
- if 'amp' in checkpoint:
101
- apex.amp.load_state_dict(checkpoint['amp'])
102
- self.logger.info('load amp state dict')
103
-
104
- self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/bbox/coder/base_bbox_coder.py DELETED
@@ -1,17 +0,0 @@
1
- from abc import ABCMeta, abstractmethod
2
-
3
-
4
- class BaseBBoxCoder(metaclass=ABCMeta):
5
- """Base bounding box coder."""
6
-
7
- def __init__(self, **kwargs):
8
- pass
9
-
10
- @abstractmethod
11
- def encode(self, bboxes, gt_bboxes):
12
- """Encode deltas between bboxes and ground truth boxes."""
13
-
14
- @abstractmethod
15
- def decode(self, bboxes, bboxes_pred):
16
- """Decode the predicted bboxes according to prediction and base
17
- boxes."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/bbox/transforms.py DELETED
@@ -1,240 +0,0 @@
1
- import numpy as np
2
- import torch
3
-
4
-
5
- def bbox_flip(bboxes, img_shape, direction='horizontal'):
6
- """Flip bboxes horizontally or vertically.
7
-
8
- Args:
9
- bboxes (Tensor): Shape (..., 4*k)
10
- img_shape (tuple): Image shape.
11
- direction (str): Flip direction, options are "horizontal", "vertical",
12
- "diagonal". Default: "horizontal"
13
-
14
- Returns:
15
- Tensor: Flipped bboxes.
16
- """
17
- assert bboxes.shape[-1] % 4 == 0
18
- assert direction in ['horizontal', 'vertical', 'diagonal']
19
- flipped = bboxes.clone()
20
- if direction == 'horizontal':
21
- flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4]
22
- flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4]
23
- elif direction == 'vertical':
24
- flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4]
25
- flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4]
26
- else:
27
- flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4]
28
- flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4]
29
- flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4]
30
- flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4]
31
- return flipped
32
-
33
-
34
- def bbox_mapping(bboxes,
35
- img_shape,
36
- scale_factor,
37
- flip,
38
- flip_direction='horizontal'):
39
- """Map bboxes from the original image scale to testing scale."""
40
- new_bboxes = bboxes * bboxes.new_tensor(scale_factor)
41
- if flip:
42
- new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction)
43
- return new_bboxes
44
-
45
-
46
- def bbox_mapping_back(bboxes,
47
- img_shape,
48
- scale_factor,
49
- flip,
50
- flip_direction='horizontal'):
51
- """Map bboxes from testing scale to original image scale."""
52
- new_bboxes = bbox_flip(bboxes, img_shape,
53
- flip_direction) if flip else bboxes
54
- new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor)
55
- return new_bboxes.view(bboxes.shape)
56
-
57
-
58
- def bbox2roi(bbox_list):
59
- """Convert a list of bboxes to roi format.
60
-
61
- Args:
62
- bbox_list (list[Tensor]): a list of bboxes corresponding to a batch
63
- of images.
64
-
65
- Returns:
66
- Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2]
67
- """
68
- rois_list = []
69
- for img_id, bboxes in enumerate(bbox_list):
70
- if bboxes.size(0) > 0:
71
- img_inds = bboxes.new_full((bboxes.size(0), 1), img_id)
72
- rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1)
73
- else:
74
- rois = bboxes.new_zeros((0, 5))
75
- rois_list.append(rois)
76
- rois = torch.cat(rois_list, 0)
77
- return rois
78
-
79
-
80
- def roi2bbox(rois):
81
- """Convert rois to bounding box format.
82
-
83
- Args:
84
- rois (torch.Tensor): RoIs with the shape (n, 5) where the first
85
- column indicates batch id of each RoI.
86
-
87
- Returns:
88
- list[torch.Tensor]: Converted boxes of corresponding rois.
89
- """
90
- bbox_list = []
91
- img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)
92
- for img_id in img_ids:
93
- inds = (rois[:, 0] == img_id.item())
94
- bbox = rois[inds, 1:]
95
- bbox_list.append(bbox)
96
- return bbox_list
97
-
98
-
99
- def bbox2result(bboxes, labels, num_classes):
100
- """Convert detection results to a list of numpy arrays.
101
-
102
- Args:
103
- bboxes (torch.Tensor | np.ndarray): shape (n, 5)
104
- labels (torch.Tensor | np.ndarray): shape (n, )
105
- num_classes (int): class number, including background class
106
-
107
- Returns:
108
- list(ndarray): bbox results of each class
109
- """
110
- if bboxes.shape[0] == 0:
111
- return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)]
112
- else:
113
- if isinstance(bboxes, torch.Tensor):
114
- bboxes = bboxes.detach().cpu().numpy()
115
- labels = labels.detach().cpu().numpy()
116
- return [bboxes[labels == i, :] for i in range(num_classes)]
117
-
118
-
119
- def distance2bbox(points, distance, max_shape=None):
120
- """Decode distance prediction to bounding box.
121
-
122
- Args:
123
- points (Tensor): Shape (B, N, 2) or (N, 2).
124
- distance (Tensor): Distance from the given point to 4
125
- boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4)
126
- max_shape (Sequence[int] or torch.Tensor or Sequence[
127
- Sequence[int]],optional): Maximum bounds for boxes, specifies
128
- (H, W, C) or (H, W). If priors shape is (B, N, 4), then
129
- the max_shape should be a Sequence[Sequence[int]]
130
- and the length of max_shape should also be B.
131
-
132
- Returns:
133
- Tensor: Boxes with shape (N, 4) or (B, N, 4)
134
- """
135
- x1 = points[..., 0] - distance[..., 0]
136
- y1 = points[..., 1] - distance[..., 1]
137
- x2 = points[..., 0] + distance[..., 2]
138
- y2 = points[..., 1] + distance[..., 3]
139
-
140
- bboxes = torch.stack([x1, y1, x2, y2], -1)
141
-
142
- if max_shape is not None:
143
- if not isinstance(max_shape, torch.Tensor):
144
- max_shape = x1.new_tensor(max_shape)
145
- max_shape = max_shape[..., :2].type_as(x1)
146
- if max_shape.ndim == 2:
147
- assert bboxes.ndim == 3
148
- assert max_shape.size(0) == bboxes.size(0)
149
-
150
- min_xy = x1.new_tensor(0)
151
- max_xy = torch.cat([max_shape, max_shape],
152
- dim=-1).flip(-1).unsqueeze(-2)
153
- bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
154
- bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
155
-
156
- return bboxes
157
-
158
-
159
- def bbox2distance(points, bbox, max_dis=None, eps=0.1):
160
- """Decode bounding box based on distances.
161
-
162
- Args:
163
- points (Tensor): Shape (n, 2), [x, y].
164
- bbox (Tensor): Shape (n, 4), "xyxy" format
165
- max_dis (float): Upper bound of the distance.
166
- eps (float): a small value to ensure target < max_dis, instead <=
167
-
168
- Returns:
169
- Tensor: Decoded distances.
170
- """
171
- left = points[:, 0] - bbox[:, 0]
172
- top = points[:, 1] - bbox[:, 1]
173
- right = bbox[:, 2] - points[:, 0]
174
- bottom = bbox[:, 3] - points[:, 1]
175
- if max_dis is not None:
176
- left = left.clamp(min=0, max=max_dis - eps)
177
- top = top.clamp(min=0, max=max_dis - eps)
178
- right = right.clamp(min=0, max=max_dis - eps)
179
- bottom = bottom.clamp(min=0, max=max_dis - eps)
180
- return torch.stack([left, top, right, bottom], -1)
181
-
182
-
183
- def bbox_rescale(bboxes, scale_factor=1.0):
184
- """Rescale bounding box w.r.t. scale_factor.
185
-
186
- Args:
187
- bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois
188
- scale_factor (float): rescale factor
189
-
190
- Returns:
191
- Tensor: Rescaled bboxes.
192
- """
193
- if bboxes.size(1) == 5:
194
- bboxes_ = bboxes[:, 1:]
195
- inds_ = bboxes[:, 0]
196
- else:
197
- bboxes_ = bboxes
198
- cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5
199
- cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5
200
- w = bboxes_[:, 2] - bboxes_[:, 0]
201
- h = bboxes_[:, 3] - bboxes_[:, 1]
202
- w = w * scale_factor
203
- h = h * scale_factor
204
- x1 = cx - 0.5 * w
205
- x2 = cx + 0.5 * w
206
- y1 = cy - 0.5 * h
207
- y2 = cy + 0.5 * h
208
- if bboxes.size(1) == 5:
209
- rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1)
210
- else:
211
- rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
212
- return rescaled_bboxes
213
-
214
-
215
- def bbox_cxcywh_to_xyxy(bbox):
216
- """Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2).
217
-
218
- Args:
219
- bbox (Tensor): Shape (n, 4) for bboxes.
220
-
221
- Returns:
222
- Tensor: Converted bboxes.
223
- """
224
- cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1)
225
- bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)]
226
- return torch.cat(bbox_new, dim=-1)
227
-
228
-
229
- def bbox_xyxy_to_cxcywh(bbox):
230
- """Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h).
231
-
232
- Args:
233
- bbox (Tensor): Shape (n, 4) for bboxes.
234
-
235
- Returns:
236
- Tensor: Converted bboxes.
237
- """
238
- x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1)
239
- bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)]
240
- return torch.cat(bbox_new, dim=-1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Catmeow/Face2Painting_From_Photo/paintingface.py DELETED
@@ -1,110 +0,0 @@
1
- import os
2
- os.system("pip install dlib")
3
- import sys
4
- import face_detection
5
- from PIL import Image, ImageOps, ImageFile
6
- import numpy as np
7
- import cv2 as cv
8
- import torch
9
- import gradio as gr
10
-
11
- torch.set_grad_enabled(False)
12
-
13
- device = "cuda" if torch.cuda.is_available() else "cpu"
14
- model = torch.hub.load("bryandlee/animegan2-pytorch:main", "generator", device=device).eval()
15
- model2 = torch.hub.load("AK391/animegan2-pytorch:main", "generator", pretrained="face_paint_512_v1", device=device).eval()
16
- face2paint = torch.hub.load("bryandlee/animegan2-pytorch:main", "face2paint", device=device)
17
- image_format = "png" #@param ["jpeg", "png"]
18
-
19
- def unsharp_mask(image, kernel_size=(5, 5), sigma=1.0, amount=2.0, threshold=0):
20
- """Return a sharpened version of the image, using an unsharp mask."""
21
- blurred = cv.GaussianBlur(image, kernel_size, sigma)
22
- sharpened = float(amount + 1) * image - float(amount) * blurred
23
- sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
24
- sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
25
- sharpened = sharpened.round().astype(np.uint8)
26
- if threshold > 0:
27
- low_contrast_mask = np.absolute(image - blurred) < threshold
28
- np.copyto(sharpened, image, where=low_contrast_mask)
29
- return sharpened
30
-
31
- def normPRED(d):
32
- ma = np.max(d)
33
- mi = np.min(d)
34
-
35
- dn = (d-mi)/(ma-mi)
36
-
37
- return dn
38
-
39
- def array_to_np(array_in):
40
- array_in = normPRED(array_in)
41
- array_in = np.squeeze(255.0*(array_in))
42
- array_in = np.transpose(array_in, (1, 2, 0))
43
- return array_in
44
-
45
- def array_to_image(array_in):
46
- array_in = normPRED(array_in)
47
- array_in = np.squeeze(255.0*(array_in))
48
- array_in = np.transpose(array_in, (1, 2, 0))
49
- im = Image.fromarray(array_in.astype(np.uint8))
50
- return im
51
-
52
-
53
- def image_as_array(image_in):
54
- image_in = np.array(image_in, np.float32)
55
- tmpImg = np.zeros((image_in.shape[0],image_in.shape[1],3))
56
- image_in = image_in/np.max(image_in)
57
- if image_in.shape[2]==1:
58
- tmpImg[:,:,0] = (image_in[:,:,0]-0.485)/0.229
59
- tmpImg[:,:,1] = (image_in[:,:,0]-0.485)/0.229
60
- tmpImg[:,:,2] = (image_in[:,:,0]-0.485)/0.229
61
- else:
62
- tmpImg[:,:,0] = (image_in[:,:,0]-0.485)/0.229
63
- tmpImg[:,:,1] = (image_in[:,:,1]-0.456)/0.224
64
- tmpImg[:,:,2] = (image_in[:,:,2]-0.406)/0.225
65
-
66
- tmpImg = tmpImg.transpose((2, 0, 1))
67
- image_out = np.expand_dims(tmpImg, 0)
68
- return image_out
69
-
70
- # detect a face
71
- def find_aligned_face(image_in, size=400):
72
- aligned_image, n_faces, quad = face_detection.align(image_in, face_index=0, output_size=size)
73
- return aligned_image, n_faces, quad
74
-
75
- # clip the face, return array
76
- def align_first_face(image_in, size=400):
77
- aligned_image, n_faces, quad = find_aligned_face(image_in,size=size)
78
- if n_faces == 0:
79
- try:
80
- image_in = ImageOps.exif_transpose(image_in)
81
- except:
82
- print("exif problem, not rotating")
83
- image_in = image_in.resize((size, size))
84
- im_array = image_as_array(image_in)
85
- else:
86
- im_array = image_as_array(aligned_image)
87
-
88
- return im_array
89
-
90
- def img_concat_h(im1, im2):
91
- dst = Image.new('RGB', (im1.width + im2.width, im1.height))
92
- dst.paste(im1, (0, 0))
93
- dst.paste(im2, (im1.width, 0))
94
- return dst
95
-
96
- def paintface(img: Image.Image,size: int) -> Image.Image:
97
- aligned_img = align_first_face(img,size)
98
- if aligned_img is None:
99
- output=None
100
- else:
101
- im_in = array_to_image(aligned_img).convert("RGB")
102
- im_out1 = face2paint(model, im_in, side_by_side=False)
103
- im_out2 = face2paint(model2, im_in, side_by_side=False)
104
-
105
- output = img_concat_h(im_out1, im_out2)
106
- return output
107
-
108
- def generate(img):
109
- out = paintface(img, 400)
110
- return out