parquet-converter commited on
Commit
2957772
·
1 Parent(s): b4b3e78

Update parquet files (step 70 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/AnyDesk for Mac A Free and Secure Remote Desktop Solution.md +0 -54
  2. spaces/1gistliPinn/ChatGPT4/Examples/Control Systems Engineering By Nagrath And Gopal 5th Edition Free Free Download.md +0 -8
  3. spaces/1gistliPinn/ChatGPT4/Examples/Counter Strike 1.6 Maps Free Download AA Dima - Why You Should Play on This Map.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/DMIFIT Tool And HPBQ138EXE BEST.md +0 -42
  5. spaces/1gistliPinn/ChatGPT4/Examples/Download UPD Panduan Penggunaan Canon Eos 600d Bahasa Indonesia.md +0 -6
  6. spaces/1phancelerku/anime-remove-background/Download GTA Vice City 5 and Experience the 80s in HD.md +0 -79
  7. spaces/1phancelerku/anime-remove-background/Download Garena Bed Wars APK for Android - Team up and destroy your enemies beds in this PVP game.md +0 -143
  8. spaces/1phancelerku/anime-remove-background/Download Warpath Jurassic Park for Android and Unleash Your Inner Dinosaur.md +0 -67
  9. spaces/1phancelerku/anime-remove-background/Dragon Ball Z Budokai Tenkaichi 3 PC Version How to Get and Install It.md +0 -173
  10. spaces/1toTree/lora_test/ppdiffusers/utils/dummy_paddle_objects.py +0 -617
  11. spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/glint360k_r50.py +0 -26
  12. spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/losses.py +0 -42
  13. spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/train.py +0 -141
  14. spaces/A00001/bingothoo/src/components/ui/dropdown-menu.tsx +0 -128
  15. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/model.py +0 -913
  16. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/datasets/pipelines/__init__.py +0 -0
  17. spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/model.py +0 -182
  18. spaces/Alpaca233/SadTalker/app.py +0 -80
  19. spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/cppipc/buffer.cpp +0 -87
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/pndm.md +0 -35
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/depth2img.md +0 -40
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/dance_diffusion/__init__.py +0 -0
  23. spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py +0 -6
  24. spaces/Andy1621/uniformer_image_segmentation/configs/point_rend/pointrend_r101_512x1024_80k_cityscapes.py +0 -2
  25. spaces/Ankush05/Newcode/getvalues.py +0 -87
  26. spaces/Artrajz/vits-simple-api/bert_vits2/bert/chinese-roberta-wwm-ext-large/README.md +0 -57
  27. spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/tokenizer.py +0 -197
  28. spaces/Axolotlily/DalleMini/README.md +0 -13
  29. spaces/BAAI/AltDiffusion-m9/ui_functions.py +0 -240
  30. spaces/Balalaxmi/JarvisAIchatbox/app.py +0 -34
  31. spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/utils.py +0 -121
  32. spaces/Benson/text-generation/Examples/Cmo Descargar Msica A Una Unidad USB De Youtube.md +0 -147
  33. spaces/Benson/text-generation/Examples/Descargar Agente Zabbix Para Windows Server 2019.md +0 -93
  34. spaces/Benson/text-generation/Examples/Descargar Carx Calle Apk.md +0 -46
  35. spaces/Benson/text-generation/Examples/Descargar Coche Escuela De Conduccin 2017 Mod Apk.md +0 -97
  36. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/_asyncio.py +0 -94
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/windows_support.py +0 -29
  38. spaces/Billyosoro/ESRGAN/scripts/generate_multiscale_DF2K.py +0 -48
  39. spaces/Boadiwaa/Recipes/openai/api_resources/abstract/deletable_api_resource.py +0 -24
  40. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/.github/CONTRIBUTING.md +0 -52
  41. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/shape_spec.py +0 -20
  42. spaces/CVPR/Image-Animation-using-Thin-Plate-Spline-Motion-Model/style.css +0 -19
  43. spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators/bitwise_operators.h +0 -338
  44. spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/replace.h +0 -44
  45. spaces/CVPR/WALT/mmdet/models/backbones/darknet.py +0 -199
  46. spaces/CVPR/WALT/mmdet/models/losses/cross_entropy_loss.py +0 -216
  47. spaces/CikeyQI/meme-api/meme_generator/memes/hold_tight/__init__.py +0 -18
  48. spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/midas/__init__.py +0 -0
  49. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/README.md +0 -90
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/ttGlyphPen.py +0 -335
spaces/1acneusushi/gradio-2dmoleculeeditor/data/AnyDesk for Mac A Free and Secure Remote Desktop Solution.md DELETED
@@ -1,54 +0,0 @@
1
- <br />
2
- <h1>How to Download and Use AnyDesk for Free on Mac</h1>
3
- <p>AnyDesk is a fast and secure remote desktop application that allows you to access and control any computer or device from anywhere. You can use AnyDesk for various purposes, such as remote support, online collaboration, file transfer, screen sharing, and more. AnyDesk is compatible with multiple platforms, including Windows, macOS, Linux, Android, iOS, and Chrome OS. In this article, we will show you how to download and use AnyDesk for free on Mac.</p>
4
- <h2>anydesk free mac download</h2><br /><p><b><b>Download</b> >>>>> <a href="https://byltly.com/2uKx4h">https://byltly.com/2uKx4h</a></b></p><br /><br />
5
- <h2>How to Download AnyDesk for Free on Mac</h2>
6
- <p>Downloading AnyDesk for free on Mac is easy and quick. Here are the steps you need to follow:</p>
7
- <ul>
8
- <li><b>Step 1: Go to https://anydesk.com/en/downloads/mac-os</b></li>
9
- <li><b>Step 2: Click on the green Download Now button.</b></li>
10
- <li><b>Step 3: Wait for the download to complete and open the .dmg file.</b></li>
11
- <li><b>Step 4: Drag and drop the AnyDesk icon to the Applications folder.</b></li>
12
- <li><b>Step 5: Launch AnyDesk from the Applications folder or the Launchpad.</b></li>
13
- </ul>
14
- <h2>How to Use AnyDesk for Free on Mac</h2>
15
- <p>Using AnyDesk for free on Mac is simple and intuitive. Here are some of the basic functions you can perform with AnyDesk:</p>
16
- <ul>
17
- <li><b>To access a remote computer or device:</b></li>
18
- <ul>
19
- <li>Launch AnyDesk on your Mac and enter the AnyDesk address or alias of the remote computer or device in the Remote Desk field.</li>
20
- <li>Click on Connect and wait for the remote user to accept your request.</li>
21
- <li>You can now see and control the remote screen as if you were sitting in front of it.</li>
22
- <li>You can also use the toolbar at the top of the screen to access various options, such as chat, file transfer, audio, video, settings, and more.</li>
23
- </ul>
24
- <li><b>To allow access to your Mac from a remote computer or device:</b></li>
25
- <ul>
26
- <li>Launch AnyDesk on your Mac and note down your AnyDesk address or alias displayed on the main window.</li>
27
- <li>Share your AnyDesk address or alias with the remote user who wants to access your Mac.</li>
28
- <li>Wait for an incoming connection request and accept it by clicking on Accept.</li>
29
- <li>You can now see a green border around your screen indicating that your Mac is being accessed remotely.</li>
30
- <li>You can also use the toolbar at the bottom of the screen to access various options, such as chat, file transfer, audio, video, settings, and more.</li>
31
- </ul>
32
- </ul>
33
- <h2>Conclusion</h2>
34
- <p>AnyDesk is a powerful and reliable remote desktop application that can help you work and collaborate remotely with ease and security. You can download and use AnyDesk for free on Mac by following the steps above. You can also upgrade to a paid plan if you need more features and customization options. To learn more about AnyDesk, you can visit their official website or their online help center.</p><h2>How to Uninstall AnyDesk from Mac</h2>
35
- <p>If you want to uninstall AnyDesk from your Mac, you can follow these steps:</p>
36
- <ul>
37
- <li><b>Step 1: Quit AnyDesk if it is running on your Mac.</b></li>
38
- <li><b>Step 2: Open the Finder and go to the Applications folder.</b></li>
39
- <li><b>Step 3: Locate the AnyDesk icon and drag it to the Trash.</b></li>
40
- <li><b>Step 4: Empty the Trash to delete AnyDesk completely from your Mac.</b></li>
41
- </ul>
42
- <p>Note: This method may not remove all the files and folders associated with AnyDesk from your Mac. If you want to delete them manually, you can use a third-party app like AppCleaner or Finder's search function to find and remove them. Alternatively, you can use a terminal command to delete them. However, this method is not recommended for beginners as it may cause damage to your system if done incorrectly.</p>
43
- <p></p>
44
- <h2>How to Update AnyDesk on Mac</h2>
45
- <p>If you want to update AnyDesk on your Mac, you can follow these steps:</p>
46
- <ul>
47
- <li><b>Step 1: Launch AnyDesk on your Mac and click on the menu icon at the top left corner of the main window.</b></li>
48
- <li><b>Step 2: Select Check for Updates from the drop-down menu.</b></li>
49
- <li><b>Step 3: If there is a new version available, click on Download and Install.</b></li>
50
- <li><b>Step 4: Wait for the download and installation to complete and restart AnyDesk.</b></li>
51
- </ul>
52
- <p>Note: You can also enable automatic updates for AnyDesk by going to Settings > General > Updates and checking the box next to Automatically check for updates. This way, AnyDesk will notify you when there is a new version available and install it for you.</p> ddb901b051<br />
53
- <br />
54
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Control Systems Engineering By Nagrath And Gopal 5th Edition Free Free Download.md DELETED
@@ -1,8 +0,0 @@
1
-
2
- <p>Control System Engineering Book PDF By Nagnath & Gopal is useful for all those students who are studying Control System By Nagnath & Gopal Download Control System Engineering Book PDF Free You will be able to complete the preparation of Control System This book for 2 courses Provides an integrated treatment of constant time and signal processing, frequency domain analysis, modulation and demodulation techniques, DSP-based circuits and control engineering concepts for control systems. Written by the senior scholars of IIT, NIT and other reputed industry Institutes. Covering all the topics of control systems for the beginner engineer and professionals. Download Thesis Sample and start your career as an engineer with engineering degree from Top institutions and universities. or call +91-8370011001/8370077001 for more info </p>
3
- <p> This book is also helpful for engineering examination and this book is also helpful for IES GATE PSU examination and for various other government jobs examination download this book for solving the questions here you should understand one thing that the book is covering all the the communication channel concept and controlling of the communication system.</p>
4
- <h2>Control Systems Engineering By Nagrath And Gopal 5th Edition Free Download</h2><br /><p><b><b>DOWNLOAD</b> &#9913; <a href="https://imgfil.com/2uxXxB">https://imgfil.com/2uxXxB</a></b></p><br /><br />
5
- <p>Although the paper is concerned with four-port relay systems, it is expected that the results may be extended to general real transfer functions. The author hopes that the results will be of use in the further study of control and synchronization problems</p>
6
- <p>control system engineering : a technical handbook may provide you with answers to the questions about this topic that you are interested in. For example, if you want to learn more about control systems engineering, you may want to see what further information you can get from this article. So, if you want to gain more about this topic, read on.</p> 899543212b<br />
7
- <br />
8
- <br />
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Counter Strike 1.6 Maps Free Download AA Dima - Why You Should Play on This Map.md DELETED
@@ -1,6 +0,0 @@
1
-
2
- <p>Now go to where the files where all downloaded:<br/> C:\Program Files\Valve\HLDServer\cstrike<br/>Find the file named server.cfg To open click on it and chose select a program and then find notepad. Get used to notepad, it comes in handy for a HLDS server and many more computer tasks.<br/><br/>Your server.cfg file will contain some CVARs for customizing your server. Copy and paste the following, its long, and over write the original text in server.cfg. These CVARs offer more customization of your server!<br/><br/><strong>CODE</strong> Don't Copy this line.<br/>// Use this file to configure your DEDICATED server. <br/>// This config file is executed on server start.<br/>// This is a comment<br/><br/>//GENERAL<br/>// default server name. Change to "Bob's Server", etc.<br/>hostname "Counter-Strike 1.6 Server"<br/><br/>//sv_lan 0=Public/LAN, 1=LAN Default: 0 <br/>sv_lan 0<br/><br/>// sv_contact Contact email for server admin<br/>sv_contact "[email protected]"<br/><br/>// sv_region - The region of the world to report the server in.<br/>// -1 World<br/>// 0 US East coast<br/>// 1 US West coast<br/>// 2 South America<br/>// 3 Europe<br/>// 4 Asia<br/>// 5 Australia<br/>// 6 Middle East<br/>// 7 Africa<br/>sv_region 0<br/><br/>//ROUND<br/>// mp_buytime - The amount of time to allow purchasing weapons/equipment on round start<br/>mp_buytime 0.45<br/><br/>// mp_c4timer - How long before the c4 explodes<br/>mp_c4timer 45<br/><br/>// mp_timelimit - How long each map should be played before switching levels<br/>mp_timelimit 25<br/><br/>// mp_freezetime - How long players are unable to move during round starts<br/>mp_freezetime 5<br/><br/>//mp_roundtime How much time in minutes does a round last. Default: 5 <br/>mp_roundtime 5<br/><br/>// mp_startmoney - Specify how much money players start off with<br/>mp_startmoney 800<br/><br/>//mp_friendlyfire Turn on/off friendlyfire. Default: Off<br/>mp_friendlyfire 0<br/><br/>//mp_footsteps Turn on/off footsteps. Default: On<br/>mp_footsteps 1<br/><br/>//mp_flashlight Turn on/off the ability for clients to use flashlight. Default: Off<br/>mp_flashlight 0<br/><br/>//mp_fraglimit Amount of frags a player can exceed before changing maps. Default: 0 <br/>mp_fraglimit 0<br/><br/>//mp_maxrounds Amount of round to play before server changes maps. Default: 0 <br/>mp_maxrounds 0<br/><br/>//mp_winlimit Max number of rounds one team can win before server changes maps. Default: 0 <br/>mp_winlimit 0<br/><br/>// mp_spawnprotectiontime Time in seconds to Kick players who team-kill after round restart. Default: 5 <br/>mp_spawnprotectiontime 5<br/><br/>// mp_autoteambalance Force clients to auto-join the opposite team if they are not balanced. Default: On<br/>mp_autoteambalance 1<br/><br/>//mp_limitteams Max # of players 1 team can have over another. Default: 2 <br/>mp_limitteams 2<br/><br/>//mp_autokick Kick idle/team-killing players. Default Off<br/>mp_autokick 0<br/><br/>//mp_tkpunish Punish TK'ers on next round? Default: On<br/>mp_tkpunish 1<br/><br/>//mp_hostagepenalty How many hostages a Terrorist can kill before being kicked, 0 to disable. Default: 5 <br/>mp_hostagepenalty 5<br/><br/>// disable autoaim<br/>sv_aim 0<br/><br/>// sv_cheats - Whether to allow game cheat commands to be used by clients. 0 = off | 1 = on<br/>sv_cheats 0<br/><br/>//VOICE-CHATTING<br/>//sv_voiceenable Allow clients to use mic. Default: 1 <br/>sv_voiceenable 1<br/><br/>//sv_alltalk Players can hear all other players, no team restrictions. Default: Off<br/>sv_alltalk 0<br/><br/>//sv_voicecodec Specifies which voice codec DLL to use in a game. Set to the name of the DLL without the extension.. Default:voice_speex <br/>sv_voicecodec voice_speex<br/><br/>//sv_voicequality the bps of the voice.<br/>//1-2400bps<br/>//2-6000bps-DEFAULT<br/>//3-8000bps<br/>//4-11200bps<br/>//5-1520bps<br/>sv_voicequality 2<br/><br/>//mp_chattime amount of time in seconds players can chat after the game is over. Lower value = faster map load change. Default: 10 <br/>mp_chattime 10<br/><br/>//RATES-SPEEDS<br/>//sv_gravity World Gravity Default: 800<br/>sv_gravity 800<br/><br/>//sv_maxvelocity Maximum speed any ballistically moving object is allowed to attain per axis. Default: 3500 <br/>sv_maxvelocity 3500<br/><br/>//sv_maxspeed Maximum speed a player can move. Default: 320 <br/>sv_maxspeed 320<br/><br/>//CLEINT CVARS<br/>//decalfrequency Amount of time in seconds a player can spray their decal. Default: 10 <br/>decalfrequency 10<br/><br/>//sv_consistency Force cleints to pass consistency check for critical files before joining server? Default: 0<br/>sv_consistency 0<br/><br/>//sv_timeout After this many seconds without a message from a client, the client is dropped. Default: 65 <br/>sv_timeout 65<br/><br/>//mp_playerid Controls what information player see in the status bar: 0 all names; 1 team names; 2 no names. Default: 0 <br/>mp_playerid 0<br/><br/>// sv_pausable - Whether to allow clients to pause the server. 0 = off | 1 = on<br/>sv_pausable 0<br/><br/>//sv_allowupload Allow clients to upload their custom decals to the server. Default: 1 <br/>sv_allowupload 1<br/><br/>//sv_allowdownload Allow clients to downnload files. Default: 1 <br/>sv_allowdownload 1<br/><br/>//sv_unlag Enables player lag compensation. Default: 1 <br/>sv_unlag 1<br/><br/>//SPECTATING<br/>//mp_allowspectators Allow spectators on the server. Default: 1 <br/>mp_allowspectators 1<br/><br/>//mp_forcecamera Force dead players to first person mode, effectively disabling freelook. Default: Off<br/>mp_forcecamera 0<br/><br/>//sv_hltv Enables HLTV on the server. Default: 0 <br/>sv_hltv 0<br/><br/>//BANDWIDTH RATES<br/>//sv_minrate Min bandwidth rate allowed on server. Default: 0 (unlimited) <br/>sv_minrate 0<br/><br/>// sv_maxrate - The maximum bandwidth rate the server is allowed to transmit to clients<br/>sv_maxrate 10000<br/><br/>//sv_maxupdaterate Maximum updates per second that the server will allow. Default: 60 <br/>sv_maxupdaterate 60<br/><br/>//sv_minupdaterate Minimum updates per second that the server will allow. Default: 10 <br/>sv_minupdaterate 10<br/><br/>//sys_ticrate Max FPS (1000 Max) the server is to render<br/>sys_ticrate 200<br/><br/>//SERVER LOGGING<br/>// log Enable server logging? Default: Off <br/>log off<br/><br/>//sv_logbans Log server bans in the server logs. Default: 0 <br/>sv_logbans 0<br/><br/>// sv_logecho Echo log information to the console. Default: 1 <br/>sv_logecho 1<br/><br/>// sv_logfile Log server information in the log file. Default: 1 <br/>sv_logfile 1<br/><br/>//sv_log_onefile Log server information to only one file. Default: 0 <br/>sv_log_onefile 0<br/><br/>//sv_logsdir Folder in the game directory where server logs will be stored.<br/><br/>//RECON<br/>//rcon_password Set rcon passsword. Leave blank to disable rcon <br/>rcon_password ""<br/><br/>//sv_rcon_banpenalty Number of minutes to ban users who fail rcon authentication. Default: 0 <br/>sv_rcon_banpenalty 0<br/><br/>//sv_rcon_maxfailures Max number of times a user can fail rcon authentication before being banned. Default: 10 <br/>sv_rcon_maxfailures 10<br/><br/>//sv_rcon_minfailures Number of times a user can fail rcon authentication in sv_rcon_minfailuretime before being banned. Default: 5 <br/>sv_rcon_minfailures 5<br/><br/>//sv_rcon_minfailuretime Number of seconds to track failed rcon authentications. Default: 30 <br/>sv_rcon_minfailuretime 30<br/><br/>// lists of banned players.<br/>// load ban files<br/>exec listip.cfg<br/>exec banned.cfg<br/><br/><strong> END OF CODE:</strong> Don't copy this line.<br/><br/>Now save and look though all the CVARs. They are all explained and most of them you will not need to change, but you can. It is recommended that you change the servers name and most of the subsection labeled <strong>ROUND</strong>. Make sure to change the location to match up with you servers location! It is under <strong>GENERAL</strong>.<br/></p>
3
- <p>Now the part you have been waiting for. Starting the server.<br/><br/>1. Go to C:\Program Files\Valve\HLServer<br/>2. Find Program called HLDS<br/>3. RIGHT click on it and create a short cut. Drag the shot cut to the desktop.<br/>4. RIGHT click on the short cut and go to properties.<br/>5. In the target field add the following<br/> "C:\Program Files\Valve\HLServer\hlds.exe" -console -game cstrike -ip 192.168.254.253 -port 27015 +maxplayers 12 +map cs_assault<br/>This will start the server in the Command Prompt from earlier, conserves system resources. The game will be counter strike with the IP address of 192.168.254.253. You need to change this to match your IP address from ipconfig /all. There will be an maximum of 12 players and the starting map will be cs_assault. <br/><br/><strong>Note about Max players:</strong><br/>Steam recommends the following:<br/># 128k uplink = 4 players<br/># 256k uplink = 7 players<br/># 320k uplink = 9 players<br/># 512k uplink = 14 players<br/># 768k uplink = 21 players<br/># 1024k uplink = 28 player<br/># 1140k uplink = 32 players<br/><br/>A general rule of thumb is 35.5 kbs per player. What does this mean? This is you Internet speed. Preferably you upload speed. To find you speed go to DSL Reports and test with a test server nearest you. Use the results to determine your max players.<br/><br/>Now you can click apply and then OK. To start the server just double click the icon and it will start automatically. Success! <br/><br/>To join your server simply start up the computer you will be playing on. Start Steam and find 'Servers' click on favorites tab and add the IP address of you server to 'add a server' Don't forget to add :27015 after you IP address. Mine looks like this. 192.168.254.253:27015 Now just connect to your server.<br/><br/>To get others to join you will need to complete a few more steps.<br/><br/>Extra: You may want to have your server start automatically when Windows Starts. Doing this is easy! Go to Start -> All Programs -> Startup and copy the desktop short cut here. You will also want to do the same with Windows Media Player. By having media player running in the background you can boost FPS significantly and you don't even have to be playing a song! What have you got to lose.<br/></p>
4
- <h2>counter strike 1.6 maps free download aa dima</h2><br /><p><b><b>Download File</b> &mdash;&mdash;&mdash; <a href="https://imgfil.com/2uxXOl">https://imgfil.com/2uxXOl</a></b></p><br /><br /> aaccfb2cb3<br />
5
- <br />
6
- <br />
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/DMIFIT Tool And HPBQ138EXE BEST.md DELETED
@@ -1,42 +0,0 @@
1
- <h2>DMIFIT Tool And HPBQ138EXE</h2><br /><p><b><b>Download Zip</b> &raquo;&raquo;&raquo; <a href="https://imgfil.com/2uxXDm">https://imgfil.com/2uxXDm</a></b></p><br /><br />
2
- <br />
3
- tool are the result of the hard work of RescoSoft Inc. Therefore, we have to hope that the quality of these software tools will be as good as those of other software tools from RescoSoft Inc.
4
-
5
- Verdict: Like all the good software tools, the quality of RescoSoft tools for repair your HP printer is great. While the installation of RescoSoft tools can be a little tough, the usage of these software tools is a breeze.
6
-
7
- 7. Hp Repair Solution
8
-
9
- What Hp Repair Solution is?
10
-
11
- Hp Repair Solution is a great tool for HP printer repair. Like all the other tools from RescoSoft Inc, this tool is easy to use.
12
-
13
- How to use Hp Repair Solution?
14
-
15
- All you have to do is to install Hp Repair Solution from RescoSoft Inc to your Windows system. Once the installation is complete, you can run the tool and it will repair all the bugs present in your HP printer.
16
-
17
- Once you are done with the repair, you can uninstall Hp Repair Solution from your PC.
18
-
19
- Verdict: The great thing about Hp Repair Solution is that the tool automatically scans the printer and finds all the bugs present in your HP printer.
20
-
21
- 6. HP Software
22
-
23
- What is HP Software?
24
-
25
- HP Software is a wonderful tool for the repair of your HP printer. It is a combination of several tools that can help you to repair your printer automatically. It comes with all the tools required to repair your printer, such as the hpbq138exe tool, hpbq138dum tool, dmifit tool, hpbq138exee tool, hpbq138exeu tool, hpbq138exeo tool, hpbq138exeq tool, and hpbq138exea tool. This combination of tools will repair your HP printer automatically.
26
-
27
- How to use HP Software?
28
-
29
- You can download and install HP Software from RescoSoft Inc. Once the installation is complete, you can open the software and it will prompt you to repair your printer.
30
-
31
- Once you are done with the repair, you can uninstall HP Software from your PC.
32
-
33
- Verdict: The tools bundled in HP Software are very useful. These tools will help you to repair the printer automatically.
34
-
35
- 5. HP Printer Setup
36
-
37
- What is HP Printer Setup?
38
-
39
- HP Printer Setup is a small tool that can help you to setup your 4fefd39f24<br />
40
- <br />
41
- <br />
42
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download UPD Panduan Penggunaan Canon Eos 600d Bahasa Indonesia.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>download panduan penggunaan canon eos 600d bahasa indonesia</h2><br /><p><b><b>Download Zip</b> &#127379; <a href="https://imgfil.com/2uy1hk">https://imgfil.com/2uy1hk</a></b></p><br /><br />
2
- <br />
3
- Download Canon EOS 650D EOS Rebel T4i PDF Manual User Guide. ... Indonesia Buku Panduan Canon Eos 600d Bahasa Indonesia Home ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download GTA Vice City 5 and Experience the 80s in HD.md DELETED
@@ -1,79 +0,0 @@
1
- <br />
2
- <h1>GTA Vice City Download 5: How to Play the Classic Game on Your PC</h1>
3
- <p>Do you miss the nostalgia of playing GTA Vice City, one of the most popular and iconic games of all time? Do you want to relive the adventures of Tommy Vercetti, a former mobster who tries to take over the criminal underworld of Vice City in the 1980s? Do you want to enjoy the amazing soundtrack, graphics, and gameplay of GTA Vice City on your PC in 2023? If you answered yes to any of these questions, then this article is for you. In this article, we will show you how to download GTA Vice City on your PC using two different methods. We will also tell you why you should play GTA Vice City in 2023 and what makes it such a great game. So, without further ado, let's get started!</p>
4
- <h2>gta vice city download 5</h2><br /><p><b><b>Download</b> &mdash;&mdash;&mdash; <a href="https://jinyurl.com/2uNOka">https://jinyurl.com/2uNOka</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <h3>What is GTA Vice City?</h3>
7
- <p>GTA Vice City is an open-world action-adventure game developed by Rockstar North and published by Rockstar Games in 2002. It is the sixth installment in the Grand Theft Auto series and the first one to be set in a fictional city based on Miami, Florida. The game follows the story of Tommy Vercetti, who is sent by his boss Sonny Forelli to Vice City to establish a foothold for their organization. However, things go wrong when Tommy is ambushed by rival gangs and loses both the money and the drugs he was supposed to deliver. Tommy then sets out to find out who betrayed him and to take over the city by any means necessary.</p>
8
- <h3>Why should you play GTA Vice City in 2023?</h3>
9
- <p>GTA Vice City is a game that has stood the test of time and remains one of the most beloved and influential games ever made. Here are some of the reasons why you should play GTA Vice City in 2023:</p>
10
- <ul>
11
- <li><b>It has a captivating story and memorable characters.</b> GTA Vice City has a well-written and engaging story that keeps you hooked from start to finish. You will meet many colorful and interesting characters along the way, such as Lance Vance, Ken Rosenberg, Ricardo Diaz, Phil Cassidy, Avery Carrington, Umberto Robina, and many more. You will also encounter many references and parodies of popular movies, TV shows, celebrities, and events from the 1980s.</li>
12
- <li><b>It has a stunning soundtrack and atmosphere.</b> GTA Vice City has one of the best soundtracks in gaming history, featuring over 80 songs from various genres such as pop, rock, hip hop, disco, soul, reggae, metal, and more. You can listen to these songs on various radio stations while driving around the city or in certain locations. The game also has a great atmosphere that captures the vibe and style of the 1980s, with neon lights, palm trees, beaches, skyscrapers, cars, fashion, and culture.</li>
13
- <li><b>It has a fun and diverse gameplay.</b> GTA Vice City has a gameplay that offers you a lot of freedom and variety. You can explore the city at your own pace, either on foot or by using various vehicles such as cars, motorcycles, boats, helicopters, and planes. You can also complete various missions that advance the main story or give you extra rewards. You can also participate in many side activities such as rampages, races, stunts, robberies, property management, and more. You can also use a variety of weapons and items to fight against enemies or cause chaos in the city.</li>
14
- </ul>
15
- <h2>How to download GTA Vice City on your PC</h2>
16
- <p>If you want to play GTA Vice City on your PC in 2023, you have two options: you can either buy the game from the official Rockstar Games website or you can use an emulator like BlueStacks to play the mobile version of GTA Vice City on your PC. Here are the steps for each option:</p>
17
- <h3>Option 1: Buy the game from Rockstar Games website</h3>
18
- <p>This is the easiest and most reliable way to download GTA Vice City on your PC. All you need is a stable internet connection and a valid credit card or PayPal account. Here are the steps:</p>
19
- <h4>Step 1: Visit the Rockstar Games website and create an account</h4>
20
- <p>Go to <a href="">https://www.rockstargames.com/</a> and click on the Sign In button at the top right corner of the screen. If you already have an account, enter your email and password and click on Sign In. If you don't have an account, click on Create a New Account and follow the instructions to create one.</p>
21
- <h4>Step 2: Go to the Downloads section and find GTA Vice City</h4>
22
- <p>Once you are signed in, click on the Downloads button at the top of the screen. You will see a list of games that are available for download from Rockstar Games. Scroll down until you find GTA Vice City and click on it.</p>
23
- <h4>Step 3: Click on Buy Now and complete the payment process</h4>
24
- <p>You will see a page with the details of GTA Vice City, such as the price, the system requirements, and the screenshots. Click on the Buy Now button and choose your preferred payment method. You can pay with a credit card or PayPal. Follow the instructions to complete the payment process.</p>
25
- <h4>Step 4: Download and install the game on your PC</h4>
26
- <p>After you have completed the payment process, you will receive an email with a link to download GTA Vice City on your PC. Click on the link and follow the instructions to download and install the game on your PC. You will need about 1 GB of free space on your hard drive to install GTA Vice City.</p>
27
- <p>gta vice city definitive edition steam download<br />
28
- gta vice city rockstar games official site<br />
29
- gta vice city 1980s story and gameplay<br />
30
- gta vice city system requirements and compatibility<br />
31
- gta vice city free download for windows 10<br />
32
- gta vice city updated graphics and controls<br />
33
- gta vice city beach and urban sprawl map<br />
34
- gta vice city tommy vercetti's rise to power<br />
35
- gta vice city soundtrack and radio stations<br />
36
- gta vice city cheats and mods for pc<br />
37
- gta vice city trilogy bundle discount<br />
38
- gta vice city reviews and ratings<br />
39
- gta vice city online multiplayer mode<br />
40
- gta vice city tips and tricks for beginners<br />
41
- gta vice city comparison with gta 5<br />
42
- gta vice city best missions and side quests<br />
43
- gta vice city hidden packages and collectibles<br />
44
- gta vice city vehicles and weapons list<br />
45
- gta vice city characters and voice actors<br />
46
- gta vice city easter eggs and secrets<br />
47
- gta vice city walkthrough and guide<br />
48
- gta vice city save game files download<br />
49
- gta vice city patch and update notes<br />
50
- gta vice city achievements and trophies<br />
51
- gta vice city fan art and wallpapers</p>
52
- <h3>Option 2: Use an emulator like BlueStacks to play the mobile version of GTA Vice City on your PC</h3>
53
- <p>This is another way to download GTA Vice City on your PC, but it requires some extra steps and software. You will need to use an emulator like BlueStacks, which is a program that allows you to run Android apps on your PC. You will also need to buy the mobile version of GTA Vice City from the Google Play Store, which costs $4.99. Here are the steps:</p>
54
- <h4>Step 1: Download and install BlueStacks on your PC</h4>
55
- <p>Go to <a href="">https://www.bluestacks.com/</a> and click on the Download BlueStacks button. Follow the instructions to download and install BlueStacks on your PC. You will need about 5 GB of free space on your hard drive to install BlueStacks.</p>
56
- <h4>Step 2: Launch BlueStacks and sign in with your Google account</h4>
57
- <p>After you have installed BlueStacks, launch it from your desktop or start menu. You will see a window with various options and features. Click on the Google Play Store icon at the bottom right corner of the window. You will be asked to sign in with your Google account. If you already have one, enter your email and password and click on Sign In. If you don't have one, click on Create a New Account and follow the instructions to create one.</p>
58
- <h4>Step 3: Search for GTA Vice City in the Google Play Store and install it</h4>
59
- <p>Once you are signed in, you will see a page with various apps and games that are available for download from the Google Play Store. Type GTA Vice City in the search bar and press Enter. You will see a page with the details of GTA Vice City, such as the price, the rating, the reviews, and the screenshots. Click on the Install button and follow the instructions to buy and install GTA Vice City on your PC.</p>
60
- <h4>Step 4: Enjoy playing GTA Vice City on your PC with BlueStacks features</h4>
61
- <p>After you have installed GTA Vice City, you can launch it from the BlueStacks home screen. You will see a window with the game running on your PC. You can use your mouse and keyboard to control the game, or you can customize the controls according to your preference. You can also use the BlueStacks features to enhance your gaming experience, such as recording your gameplay, taking screenshots, streaming your gameplay, and more.</p>
62
- <h2>Conclusion</h2>
63
- <p>GTA Vice City is a classic game that deserves to be played by every gamer who loves open-world action-adventure games. It has a captivating story, a stunning soundtrack, and a fun and diverse gameplay that will keep you entertained for hours. In this article, we have shown you how to download GTA Vice City on your PC using two different methods: buying the game from the Rockstar Games website or using an emulator like BlueStacks to play the mobile version of GTA Vice City on your PC. Both methods are easy and reliable, and you can choose the one that suits you best. We hope you have enjoyed this article and found it helpful. Now, go ahead and download GTA Vice City on your PC and have fun!</p>
64
- <p>If you have any questions or feedback about this article, please feel free to leave a comment below. We would love to hear from you!</p>
65
- <h3>FAQs</h3>
66
- <ul>
67
- <li><b>Q: How much does GTA Vice City cost on PC?</b></li>
68
- <li>A: GTA Vice City costs $9.99 on the Rockstar Games website and $4.99 on the Google Play Store.</li>
69
- <li><b>Q: What are the system requirements for GTA Vice City on PC?</b></li>
70
- <li>A: The minimum system requirements for GTA Vice City on PC are: Windows XP/Vista/7/8/10, 800 MHz Intel Pentium III or AMD Athlon processor, 128 MB of RAM, 32 MB video card with DirectX 9.0 compatible drivers, 915 MB of free hard disk space, and DirectX 9.0 compatible sound card.</li>
71
- <li><b>Q: Can I play GTA Vice City online with other players?</b></li>
72
- <li>A: GTA Vice City does not have an official online multiplayer mode, but there are some unofficial mods that allow you to play GTA Vice City online with other players. One of them is <a href="">https://www.mtasa.com/</a>, which is a free mod for GTA San Andreas that also supports GTA Vice City.</li>
73
- <li><b>Q: Can I play GTA Vice City on other devices besides PC?</b></li>
74
- <li>A: Yes, you can play GTA Vice City on other devices besides PC. GTA Vice City is also available for PlayStation 2, PlayStation 3, PlayStation 4, Xbox, Xbox 360, Xbox One, iOS, Android, Mac OS X, and Fire OS.</li>
75
- <li><b>Q: What are some of the best cheats for GTA Vice City?</b></li>
76
- <li>A: There are many cheats for GTA Vice City that can make the game more fun and easy. Some of them are: THUGSTOOLS (all weapons), ASPIRINE (full health), PRECIOUSPROTECTION (full armor), LEAVEMEALONE (lower wanted level), PANZER (spawn a tank), GETTHEREFAST (spawn a Sabre Turbo), GETTHEREVERYFASTINDEED (spawn a Hotring Racer), GETTHEREAMAZINGLYFAST (spawn a Hotring Racer 2), FANNYMAGNET (women follow you), BIGBANG (blow up all cars), SEAWAYS (cars can drive on water), COMEFLYWITHME (cars can fly), ICANTTAKEITANYMORE (commit suicide), and LIFEISPASSINGMEBY (speed up time).</li>
77
- </ul></p> 401be4b1e0<br />
78
- <br />
79
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Garena Bed Wars APK for Android - Team up and destroy your enemies beds in this PVP game.md DELETED
@@ -1,143 +0,0 @@
1
- <br />
2
- <h1>Garena Bed Wars APK Download: How to Play the Ultimate Sandbox Game on Your Android Device</h1>
3
- <p>Are you looking for a fun and exciting game that lets you unleash your creativity and imagination? Do you want to play with your friends or other players from around the world in a team-based PVP game? If you answered yes, then you should try out <strong>Garena Bed Wars</strong>, the ultimate sandbox game that has attracted millions of players in <strong>Garena Blockman GO</strong>.</p>
4
- <h2>garena bed wars apk download</h2><br /><p><b><b>Download File</b> &#10038;&#10038;&#10038; <a href="https://jinyurl.com/2uNN9L">https://jinyurl.com/2uNN9L</a></b></p><br /><br />
5
- <p>Garena Bed Wars is a game where you have to protect your bed at your own base while using all the tools you have at your disposal to destroy the opponents' beds and become the final victor. You can collect resources, buy equipment, build bridges, attack enemies, and cooperate with your teammates in this thrilling and addictive game. You can also customize your character, choose from different maps and modes, and enjoy countless minigames from different genres.</p>
6
- <p>If you want to play Garena Bed Wars on your Android device, you will need to download and install the <strong>Garena Bed Wars APK</strong> file, which is a modified version of the original game that allows you to access all the features and benefits without any restrictions. In this article, we will show you how to download and install Garena Bed Wars APK on your Android device, how to play Garena Bed Wars as a beginner, and how to win Garena Bed Wars with tips and tricks from pro players. Let's get started!</p>
7
- <h2>How to Play Garena Bed Wars: A Beginner's Guide</h2>
8
- <p>If you are new to Garena Bed Wars, you might feel overwhelmed by the rules and mechanics of the game. Don't worry, we are here to help you learn the basics of how to play Garena Bed Wars in a few simple steps.</p>
9
- <h3>How to join a game and choose a team</h3>
10
- <p>When you launch Garena Bed Wars, you will see a list of available games that you can join. You can also create your own game by tapping on the "+" icon at the top right corner of the screen. You can choose from different maps, such as Sky Island, Desert Island, Snow Island, etc., and different modes, such as Solo, Duo, Squad, etc. You can also set a password for your game if you want to play with your friends only.</p>
11
- <p>garena bed wars android game free download<br />
12
- garena bed wars latest version apk<br />
13
- garena bed wars pvp game download<br />
14
- garena blockman go bed wars apk<br />
15
- garena bed wars mod apk unlimited money<br />
16
- garena bed wars online multiplayer game<br />
17
- garena bed wars apk for pc<br />
18
- garena bed wars hack apk download<br />
19
- garena bed wars adventures apk<br />
20
- garena bed wars new update apk<br />
21
- garena bed wars apk pure<br />
22
- garena bed wars offline game download<br />
23
- garena bed wars tips and tricks<br />
24
- garena bed wars apk mirror<br />
25
- garena bed wars cheats and codes<br />
26
- garena bed wars gameplay video download<br />
27
- garena bed wars apk old version<br />
28
- garena bed wars review and rating<br />
29
- garena bed wars apk xapk<br />
30
- garena bed wars strategy guide<br />
31
- garena bed wars apk combo<br />
32
- garena bed wars skins and costumes<br />
33
- garena bed wars apk no ads<br />
34
- garena bed wars best team and weapons<br />
35
- garena bed wars apk obb download<br />
36
- garena bed wars how to play and win<br />
37
- garena bed wars apk mod menu<br />
38
- garena bed wars maps and modes<br />
39
- garena bed wars apk uptodown<br />
40
- garena bed wars support and contact<br />
41
- garena bed wars apk rexdl<br />
42
- garena bed wars rewards and achievements<br />
43
- garena bed wars apk data download<br />
44
- garena bed wars system requirements and compatibility<br />
45
- garena bed wars apk mob.org<br />
46
- garena bed wars terms of service and privacy policy<br />
47
- garena bed wars apk apkpure.com<br />
48
- garena bed wars news and updates<br />
49
- garena bed wars apk revdl.com<br />
50
- garena bed wars faq and help center</p>
51
- <p>Once you join or create a game, you will be taken to the lobby where you can choose your team. There are four teams in each game: Red, Blue, Green, and Yellow. You can tap on the team icon at the bottom of the screen to join or switch teams. You can also chat with other players in the lobby or invite your friends by tapping on the "Invite" button at the top left corner of the screen.</p>
52
- <h3>How to collect resources and buy equipment</h3>
53
- <p>When the game starts, you will spawn on your island with your teammates. Each island has its own base with a bed. As long as the bed is not destroyed, you and your teammates can be revived if you die. Therefore, protecting your bed is the most important task in the game.</p>
54
- <p>To protect your bed, you will need to collect resources and buy equipment. There are three types of resources in the game: iron, gold, and diamonds. Iron and gold are generated by the generators on your island. You can use them to buy items from the shop, such as blocks, weapons, armor, tools, etc. Diamonds are generated by the generators on the middle island. You can use them to buy upgrades from the team shop, such as sharpness, protection, haste, etc.</p>
55
- <p>To collect resources, you will need to go to the generators and pick up the items that are dropped on the ground. You can also get resources by killing enemies or breaking their beds. To buy equipment, you will need to go to the shop or the team shop and tap on the item you want to buy. You can also use the quick buy menu at the bottom of the screen to buy items faster.</p>
56
- <h3>How to build bridges and attack enemies</h3>
57
- <p>To reach other islands and attack enemies, you will need to build bridges. You can use blocks that you buy from the shop to place them on the ground and create a path. You can also use ender pearls or launch pads to teleport or jump to other islands.</p>
58
- <p>To attack enemies, you will need to use weapons that you buy from the shop, such as swords, bows, axes, etc. You can also use items that have special effects, such as fireballs, TNT, snowballs, etc. Your goal is to break their beds and kill them before they respawn. You can also use traps that you buy from the team shop, such as alarm trap, counter-offensive trap, etc., to defend your island from invaders.</p>
59
- <h3>How to protect your bed and survive</h3>
60
- <p>To protect your bed, you will need to cover it with blocks that you buy from the shop. You can use different types of blocks, such as wool, wood, stone, etc., to make it harder for enemies to break your bed. You can also use items that have special effects, such as water buckets, obsidian blocks, iron golems, etc., to enhance your defense.</p>
61
- <p>To survive, you will need to avoid falling into the void or getting killed by enemies. You can use armor that you buy from the shop, such as leather armor, chainmail armor, iron armor, etc., to reduce the damage you take. You can also use items that have special effects, such as golden apples, potions, invisibility cloaks, etc., to heal yourself or gain an advantage.</p>
62
- <h2>How to Win Garena Bed Wars: Tips and Tricks from Pro Players</h2>
63
- <p>If you want to win Garena Bed Wars more often and become a pro player, you will need to learn some tips and tricks that can help you improve your skills and performance in the game. Here are some of them:</p>
64
- <h3>How to use the best strategies and tactics for different maps and modes</h3>
65
- <p>Each map and mode in Garena Bed Wars has its own characteristics and challenges that require different strategies and tactics. For example:</p>
66
- <ul>
67
- <li>In Sky Island map, you can use ender pearls or launch pads to move around quickly and surprise your enemies.</li>
68
- <li>In Desert Island map, you can use fireballs or TNT to destroy the sand bridges and cut off your enemies' access.</li>
69
- <li>In Snow Island map, you can use snowballs or ice bridges to knock off your enemies or create shortcuts.</li>
70
- <li>In Solo mode, you can focus on collecting diamonds and upgrading your equipment as soon as possible.</li>
71
- <li>In Duo mode, you can coordinate with your partner and split up tasks such as collecting resources, buying items, building bridges, etc.</li>
72
- <li>In Squad mode, you can communicate with your teammates and assign roles such as defender, attacker, supporter, etc.</li>
73
- </ul>
74
- <h3>How to cooperate with your teammates and communicate effectively</h3>
75
- <p>Garena Bed Wars is a team-based game that requires cooperation and communication among teammates. You can use the chat function or the voice chat function to communicate with your teammates and share information, such as enemy locations, resource status, attack plans, etc. You can also use the team signals or the emoticons to express your emotions or intentions, such as happy, angry, sad, etc.</p>
76
- <p>When cooperating with your teammates, you should follow some basic etiquette and rules, such as:</p>
77
- <ul>
78
- <li>Respect your teammates and do not insult or troll them.</li>
79
- <li>Listen to your teammates and do not ignore or contradict them.</li>
80
- <li>Help your teammates and do not abandon or betray them.</li>
81
- <li>Share your resources and do not hog or waste them.</li>
82
- <li>Follow your team's strategy and do not act on your own or sabotage it.</li>
83
- </ul>
84
- <h3>How to avoid common mistakes and deal with hackers</h3>
85
- <p>Garena Bed Wars is a game that requires skill and strategy, but also luck and chance. Sometimes, you might make some common mistakes that can cost you the game, such as:</p>
86
- <ul>
87
- <li>Leaving your bed unprotected or poorly defended.</li>
88
- <li>Rushing to attack without proper equipment or backup.</li>
89
- <li>Being too greedy or reckless and exposing yourself to danger.</li>
90
- <li>Being too passive or timid and missing opportunities to strike.</li>
91
- <li>Being too predictable or repetitive and allowing your enemies to counter you.</li>
92
- </ul>
93
- <p>To avoid these mistakes, you should always be aware of your surroundings and your enemies' actions. You should also learn from your mistakes and improve your skills and strategies. You can also watch some videos or streams of pro players and learn from their tips and tricks.</p>
94
- <p>Sometimes, you might encounter some hackers who use cheats or hacks to gain an unfair advantage in the game, such as flying, speed hacking, aimbotting, etc. These hackers can ruin the fun and balance of the game and make you lose unfairly. To deal with hackers, you should report them to the developers by tapping on the "Report" button at the end of the game. You can also block them by tapping on their name and choosing "Block". You can also avoid playing with hackers by joining games that have anti-cheat systems or moderators.</p>
95
- <h2>Conclusion</h2>
96
- <p>Garena Bed Wars is a game that offers endless fun and excitement for players who love sandbox games and PVP games. You can play with your friends or other players from around the world in a team-based game where you have to protect your bed and destroy the opponents' beds. You can also enjoy various features and benefits by downloading and installing Garena Bed Wars APK on your Android device.</p>
97
- <p>If you want to play Garena Bed Wars on your Android device, you can follow these steps:</p>
98
- <ol>
99
- <li>Download Garena Bed Wars APK from a trusted source by clicking on this link: [Garena Bed Wars APK Download].</li>
100
- <li>Allow unknown sources on your device by going to Settings > Security > Unknown Sources.</li>
101
- <li>Install Garena Bed Wars APK by tapping on the file and following the instructions.</li>
102
- <li>Launch Garena Bed Wars and enjoy the game!</li>
103
- </ol>
104
- <p>We hope this article has helped you learn how to play Garena Bed Wars on your Android device. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you!</p>
105
- <h2>FAQs</h2>
106
- <h3>What are the minimum requirements to play Garena Bed Wars on Android?</h3>
107
- <p>The minimum requirements to play Garena Bed Wars on Android are:</p>
108
- <ul>
109
- <li>Android 4.4 or higher</li>
110
- <li>2 GB of RAM or higher</li>
111
- <li>500 MB of free storage space or higher</li>
112
- <li>A stable internet connection</li>
113
- </ul>
114
- <h3>Is Garena Bed Wars safe and legal to download and play?</h3>
115
- <p>Garena Bed Wars is safe and legal to download and play as long as you download it from a trusted source, such as [Garena Blockman GO] or [Garena Bed Wars APK Download]. However, you should be careful of downloading Garena Bed Wars from unknown sources, as they might contain viruses or malware that can harm your device or steal your personal information. You should also avoid using cheats or hacks that can get you banned from the game or cause other problems.</p>
116
- <h3>How can I update Garena Bed Wars to the latest version?</h3>
117
- <p>You can update Garena Bed Wars to the latest version by following these steps:</p>
118
- <ol>
119
- <li>Go to [Garena Blockman GO] or [Garena Bed Wars APK Download] and check if there is a new version available.</li>
120
- <li>If there is a new version, download it and install it on your device.</li>
121
- <li>Launch Garena Bed Wars and enjoy the new features and improvements.</li>
122
- </ol>
123
- <p>You can also enable the auto-update function on your device by going to Settings > Apps > Garena Bed Wars > Auto-update. This way, you will always have the latest version of Garena Bed Wars on your device.</p>
124
- <h3>How can I contact the developers of Garena Bed Wars for support or feedback?</h3>
125
- <p>If you have any issues or suggestions regarding Garena Bed Wars, you can contact the developers of Garena Bed Wars by using one of these methods:</p>
126
- <ul>
127
- <li>Email: You can send an email to [[email protected]] and describe your problem or idea in detail. You should also attach some screenshots or videos if possible.</li>
128
- <li>Facebook: You can visit the official Facebook page of Garena Blockman GO at [https://www.facebook.com/Blockmango-608882679545354/] and leave a message or comment.</li>
129
- <li>Discord: You can join the official Discord server of Garena Blockman GO at [https://discord.gg/8fJ9Z7F] and chat with other players or moderators.</li>
130
- </ul>
131
- <p>The developers of Garena Bed Wars are always happy to hear from their players and will try their best to solve your problems or implement your suggestions.</p>
132
- <h3>How can I play Garena Bed Wars with my friends?</h3>
133
- <p>If you want to play Garena Bed Wars with your friends, you can follow these steps:</p>
134
- <ol>
135
- <li>Add your friends as contacts by tapping on the "Contacts" button at the bottom of the screen and entering their usernames or IDs.</li>
136
- <li>Create a game by tapping on the "+" icon at the top right corner of the screen and choosing a map and a mode.</li>
137
- <li>Set a password for your game by tapping on the "Password" button at the top left corner of the screen and entering a code.</li>
138
- <li>Invite your friends by tapping on the "Invite" button at the top left corner of the screen and selecting your contacts.</li>
139
- <li>Wait for your friends to join your game and choose a team.</li>
140
- <li>Start the game by tapping on the "Start" button at the bottom of the screen and have fun!</li>
141
- </ol></p> 401be4b1e0<br />
142
- <br />
143
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Warpath Jurassic Park for Android and Unleash Your Inner Dinosaur.md DELETED
@@ -1,67 +0,0 @@
1
- <br />
2
- <h1>Jurassic Park Warpath: How to Download and Play on Android</h1>
3
- <h2>Introduction</h2>
4
- <p>If you are a fan of dinosaurs and fighting games, you might be interested in Jurassic Park Warpath, a classic PlayStation game that lets you control various prehistoric creatures and battle against each other in different locations from the Jurassic Park movies. But did you know that you can also play this game on your Android device? In this article, we will show you how to download and play Jurassic Park Warpath on Android, as well as some tips and tricks to help you enjoy this game even more.</p>
5
- <h3>What is Jurassic Park Warpath?</h3>
6
- <p>Jurassic Park Warpath is a fighting video game released on the PlayStation console in 1999. It is a spin-off of the films Jurassic Park and The Lost World: Jurassic Park, and features 14 playable dinosaurs, each with their own moves, animations, and sounds. The game has four modes: arcade, versus, survival, and exhibition. In arcade mode, you choose a dinosaur and fight through eight stages against different opponents. In versus mode, you can play against another player or the computer. In survival mode, you have to defeat as many enemies as possible without dying. In exhibition mode, you can watch two dinosaurs fight without any input from you.</p>
7
- <h2>jurassic park warpath download android</h2><br /><p><b><b>Download File</b> &#10037;&#10037;&#10037; <a href="https://jinyurl.com/2uNTgy">https://jinyurl.com/2uNTgy</a></b></p><br /><br />
8
- <h3>Why play Jurassic Park Warpath on Android?</h3>
9
- <p>Playing Jurassic Park Warpath on Android has several advantages over playing it on the original PlayStation. First of all, you don't need to buy or own a PlayStation console or a physical copy of the game. You can simply download an emulator and the game file from the internet for free. Second, you can play the game anytime and anywhere, as long as you have your Android device with you. You don't need to plug in any wires or cables, or worry about battery life. Third, you can customize the game settings to your liking, such as changing the graphics quality, the sound volume, the controller layout, and more. You can also save and load your progress at any point in the game.</p>
10
- <h2>How to download Jurassic Park Warpath for Android</h2>
11
- <h3>Step 1: Download an emulator</h3>
12
- <p>An emulator is a software that allows you to run games from other platforms on your device. To play Jurassic Park Warpath on Android, you need an emulator that can run PlayStation games. There are many emulators available for Android, but one of the most popular and reliable ones is ePSXe. You can download ePSXe from the Google Play Store or from its official website . After downloading ePSXe, install it on your device and open it.</p>
13
- <h3>Step 2: Download the game file</h3>
14
- <p>The game file is the data that contains the actual game content. To play Jurassic Park Warpath on Android, you need to download the game file in ISO or BIN format. There are many websites that offer free downloads of PlayStation games, but one of the most trusted ones is Internet Archive . You can search for "Warpath Jurassic Park" on Internet Archive and download the file that has "USA" in its name. After downloading the file, save it in a folder that you can easily access.</p>
15
- <h3>Step 3: Install and run the game</h3>
16
- <p>Now that you have both the emulator and the game file, you are ready to install and run the game. To do this , and War of the Monsters . You can also check out our website for more recommendations on dinosaur and fighting games.</p>
17
- <p>jurassic park warpath android apk<br />
18
- jurassic park warpath psx iso download<br />
19
- jurassic park warpath emulator android<br />
20
- jurassic park warpath free download for android<br />
21
- jurassic park warpath rom android<br />
22
- jurassic park warpath game download android<br />
23
- jurassic park warpath ps1 android<br />
24
- jurassic park warpath online download android<br />
25
- jurassic park warpath mod apk android<br />
26
- jurassic park warpath cheats android download<br />
27
- jurassic park warpath full version download android<br />
28
- jurassic park warpath playstation download android<br />
29
- jurassic park warpath mobile download android<br />
30
- jurassic park warpath hack apk android<br />
31
- jurassic park warpath fighting game download android<br />
32
- jurassic park warpath dinosaurs download android<br />
33
- jurassic park warpath iso file download android<br />
34
- jurassic park warpath eboot download android<br />
35
- jurassic park warpath ppsspp android download<br />
36
- jurassic park warpath rar download android<br />
37
- jurassic park warpath zip download android<br />
38
- jurassic park warpath bin download android<br />
39
- jurassic park warpath cue download android<br />
40
- jurassic park warpath torrent download android<br />
41
- jurassic park warpath direct download android<br />
42
- jurassic park warpath mega download android<br />
43
- jurassic park warpath mediafire download android<br />
44
- jurassic park warpath google drive download android<br />
45
- jurassic park warpath zippyshare download android<br />
46
- jurassic park warpath 4shared download android<br />
47
- jurassic park warpath coolrom download android<br />
48
- jurassic park warpath freeroms download android<br />
49
- jurassic park warpath loveroms download android<br />
50
- jurassic park warpath romhustler download android<br />
51
- jurassic park warpath emuparadise download android<br />
52
- jurassic park warpath cdromance download android<br />
53
- jurassic park warpath nicoblog download android<br />
54
- jurassic park warpath vimm's lair download android<br />
55
- jurassic park warpath archive.org download android<br />
56
- jurassic park warpath reddit download android<br />
57
- how to download jurassic park warpath on android<br />
58
- where to download jurassic park warpath for android<br />
59
- best site to download jurassic park warpath for android<br />
60
- easiest way to download jurassic park warpath on android<br />
61
- fastest way to download jurassic park warpath on android<br />
62
- safest way to download jurassic park warpath on android<br />
63
- legal way to download jurassic park warpath on android<br />
64
- no survey download jurassic park warpath for android<br />
65
- no virus download jurassic park warpath for android</p> 401be4b1e0<br />
66
- <br />
67
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Dragon Ball Z Budokai Tenkaichi 3 PC Version How to Get and Install It.md DELETED
@@ -1,173 +0,0 @@
1
-
2
- <h1>How to Download Dragon Ball Z Budokai Tenkaichi 3 on PC</h1>
3
- <p>Dragon Ball Z Budokai Tenkaichi 3 is one of the most popular and beloved games in the Dragon Ball franchise. It features over 150 characters, 30 stages, and a variety of game modes that will keep you entertained for hours. But what if you want to play it on your PC instead of your PlayStation 2 or Wii console? Well, you're in luck, because in this article, we will show you how to download and play Dragon Ball Z Budokai Tenkaichi 3 on PC using a free emulator called Dolphin. We will also give you some tips and tricks to enhance your gaming experience. So, without further ado, let's get started!</p>
4
- <h2>how to download dragon ball z budokai tenkaichi 3 on pc</h2><br /><p><b><b>Download File</b> &#10145; <a href="https://jinyurl.com/2uNOXj">https://jinyurl.com/2uNOXj</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <h3>What is Dragon Ball Z Budokai Tenkaichi 3?</h3>
7
- <p>Dragon Ball Z Budokai Tenkaichi 3 is a fighting game based on the anime and manga series Dragon Ball. It was developed by Spike and published by Atari for the PlayStation 2 and Wii in 2007. It is the third and final installment in the Budokai Tenkaichi series, which is also known as Dragon Ball Z: Sparking! in Japan.</p>
8
- <p>The game follows the events of the Dragon Ball story from the Saiyan Saga to the Kid Buu Saga, as well as some original scenarios and what-if scenarios. You can choose from over 150 playable characters, each with their own unique moves, transformations, and abilities. You can also customize your characters with various items and skills that affect their stats and performance.</p>
9
- <p>The game offers several modes of play, such as Story Mode, where you can relive the epic battles of the series; Dragon History, where you can explore different timelines and scenarios; Ultimate Battle, where you can test your skills against various opponents; World Tournament, where you can compete for prizes and glory; Duel, where you can fight against a friend or the CPU; Training, where you can practice your moves and combos; Evolution Z, where you can edit your character's attributes; Data Center, where you can view your records and achievements; Replay, where you can watch your saved replays; and Options, where you can adjust the game settings.</p>
10
- <h3>Why play it on PC?</h3>
11
- <p>Dragon Ball Z Budokai Tenkaichi 3 is a great game that deserves to be played by every fan of the series. However, not everyone has access to a PlayStation 2 or Wii console, or maybe they just prefer to play games on their PC. That's why playing it on PC using an emulator is a good option for many reasons:</p>
12
- <ul>
13
- <li>You can enjoy the game in high-definition graphics and sound quality, thanks to the emulator's enhancements and features.</li>
14
- <li>You can use any controller or keyboard that suits your preference and comfort.</li>
15
- <li>You can save your progress anytime and anywhere, thanks to the emulator's save states and memory cards.</li>
16
- <li>You can unlock all the characters and stages without having to complete the game or use cheat codes.</li>
17
- <li>You can use cheats and mods to modify the game according to your liking.</li>
18
- <li>You can play online with other players using netplay or LAN.</li>
19
- <li>You can avoid any issues or errors that may <h2>Requirements and Preparations</h2>
20
- <h3>System Requirements</h3>
21
- <p>Before you download and play Dragon Ball Z Budokai Tenkaichi 3 on PC, you need to make sure that your PC meets the minimum system requirements for running the emulator and the game. Here are the recommended system requirements for Dolphin Emulator:</p>
22
- <table>
23
- <tr>
24
- <th>Component</th>
25
- <th>Minimum</th>
26
- <th>Recommended</th>
27
- </tr>
28
- <tr>
29
- <td>Operating System</td>
30
- <td>Windows 7 (x64) or above, macOS 10.10 or above, Linux</td>
31
- <td>Windows 10 (x64), macOS 10.13 or above, Linux</td>
32
- </tr>
33
- <tr>
34
- <td>Processor (CPU)</td>
35
- <td>A CPU with SSE2 support. A modern CPU with a high single-thread performance rating.</td>
36
- <td>An Intel Core i5-4670K or AMD Ryzen 5 3600 or better.</td>
37
- </tr>
38
- <tr>
39
- <td>Memory (RAM)</td>
40
- <td>2 GB or more</td>
41
- <td>4 GB or more</td>
42
- </tr>
43
- <tr>
44
- <td>Graphics Card (GPU)</td>
45
- <td>A GPU that supports DirectX 11.1 or OpenGL 4.4.</td>
46
- <td>A GPU that supports Vulkan, DirectX 12, or Metal.</td>
47
- </tr>
48
- <tr>
49
- <td>Storage Space</td>
50
- <td>At least 5 GB of free space for the emulator and the game files.</td>
51
- <td>At least 10 GB of free space for the emulator and the game files.</td>
52
- </tr>
53
- <tr>
54
- <td>Controller or Keyboard</td>
55
- <td>A compatible controller or keyboard that can be mapped to the emulator.</td>
56
- <td>A PlayStation 2 or Wii controller with an adapter, or a controller that mimics their layout.</td>
57
- </tr>
58
- </table>
59
- <h3>Emulator and Game Files</h3>
60
- <p>The emulator that we will use to play Dragon Ball Z Budokai Tenkaichi 3 on PC is Dolphin Emulator, which is a free and open-source software that can run games for the Nintendo GameCube and Wii consoles. You can download the latest version of Dolphin Emulator from its official website or from its GitHub page. You can choose between the stable version, which is more stable and tested, or the development version, which is more updated and has more features, but may have some bugs and issues.</p>
61
- <p>The game file that we will use to play Dragon Ball Z Budokai Tenkaichi 3 on PC is an ISO file, which is a disc image of the original game disc. You can either rip your own game disc using a DVD drive and a software like ImgBurn, or you can download an ISO file from a reputable source online. However, downloading an ISO file may be illegal in some countries, so we do not condone or encourage piracy. Please only download an ISO file if you own a legal copy of the game.</p>
62
- <p>how to install dragon ball z budokai tenkaichi 3 on pc<br />
63
- dragon ball z budokai tenkaichi 3 pc download free full version<br />
64
- dragon ball z budokai tenkaichi 3 pc game download<br />
65
- how to play dragon ball z budokai tenkaichi 3 on pc with dolphin emulator<br />
66
- dragon ball z budokai tenkaichi 3 pc download utorrent<br />
67
- dragon ball z budokai tenkaichi 3 pc system requirements<br />
68
- dragon ball z budokai tenkaichi 3 pc download original version<br />
69
- how to get dragon ball z budokai tenkaichi 3 on pc for free<br />
70
- dragon ball z budokai tenkaichi 3 pc download highly compressed<br />
71
- dragon ball z budokai tenkaichi 3 pc iso download<br />
72
- how to run dragon ball z budokai tenkaichi 3 on pc<br />
73
- dragon ball z budokai tenkaichi 3 pc download windows 10<br />
74
- dragon ball z budokai tenkaichi 3 pc gameplay<br />
75
- how to configure dolphin emulator for dragon ball z budokai tenkaichi 3 on pc<br />
76
- dragon ball z budokai tenkaichi 3 pc download rar<br />
77
- how to use ps2 controller for dragon ball z budokai tenkaichi 3 on pc<br />
78
- dragon ball z budokai tenkaichi 3 pc mods download<br />
79
- how to fix lag in dragon ball z budokai tenkaichi 3 on pc<br />
80
- dragon ball z budokai tenkaichi 3 pc download no survey<br />
81
- how to unlock all characters in dragon ball z budokai tenkaichi 3 on pc<br />
82
- dragon ball z budokai tenkaichi 3 pc cheats codes<br />
83
- how to save game in dragon ball z budokai tenkaichi 3 on pc<br />
84
- dragon ball z budokai tenkaichi 3 pc online multiplayer<br />
85
- how to change language in dragon ball z budokai tenkaichi 3 on pc<br />
86
- dragon ball z budokai tenkaichi 3 pc download ocean of games<br />
87
- how to do fusion in dragon ball z budokai tenkaichi 3 on pc<br />
88
- dragon ball z budokai tenkaichi 3 pc keyboard controls<br />
89
- how to transform in dragon ball z budokai tenkaichi 3 on pc<br />
90
- dragon ball z budokai tenkaichi 3 pc download softonic<br />
91
- how to use ultimate attacks in dragon ball z budokai tenkaichi 3 on pc<br />
92
- dragon ball z budokai tenkaichi 3 pc requirements test<br />
93
- how to play story mode in dragon ball z budokai tenkaichi 3 on pc<br />
94
- dragon ball z budokai tenkaichi 3 pc download apunkagames<br />
95
- how to do potara fusion in dragon ball z budokai tenkaichi 3 on pc<br />
96
- dragon ball z budokai tenkaichi 3 pc best settings<br />
97
- how to play vs mode in dragon ball z budokai tenkaichi 3 on pc<br />
98
- dragon ball z budokai tenkaichi 3 pc download mega.nz<br />
99
- how to do team battle in dragon ball z budokai tenkaichi 3 on pc<br />
100
- dragon ball z budokai tenkaichi 3 pc download zip file<br />
101
- how to do special moves in dragon ball z budokai tenkaichi 3 on pc<br />
102
- dragon ball z budokai tenkaichi 3 pc steam<br />
103
- how to play tournament mode in dragon ball z budokai tenkaichi 3 on pc<br />
104
- dragon ball z budokai tenkaichi 3 pc download google drive<br />
105
- how to do super saiyan in dragon ball z budokai tenkaichi 3 on pc<br />
106
- dragon ball z budokai tenkaichi 3 pc crack download<br />
107
- how to play survival mode in dragon ball z budokai tenkaichi 3 on pc</p>
108
- <h3>Controller and Keyboard Settings</h3>
109
- <p>To play Dragon Ball Z Budokai Tenkaichi 3 on PC, you will need a controller or a keyboard that can be configured to the emulator. You can use any controller or keyboard that is compatible with your PC, but we recommend using a PlayStation 2 or Wii controller with an adapter, or a controller that mimics their layout, such as a Logitech F310 or an Xbox One controller. This is because the game was designed for these controllers, and using them will give you a more authentic and comfortable experience.</p>
110
- <p>To configure your controller or keyboard to the emulator, you will need to go to the Controllers menu in Dolphin Emulator and select the appropriate device for each port. For example, if you want to use a PlayStation 2 controller for Port 1, you will need to select Emulated Wii Remote for Port 1, and then click Configure. Then, you will need to map each button and axis of your controller to the corresponding input of the Wii Remote. You can also adjust the sensitivity and deadzone of each input if needed. You can also save your configuration as a profile for future use.</p>
111
- <p>If you want to use a keyboard for Port 1, you will need to select Emulated Wii Remote for Port 1, and then click Configure. Then, you will need to map each key of your keyboard to the corresponding input of the Wii Remote. You can also adjust the sensitivity and deadzone of each input if needed. You can also save your configuration as a profile for future use.</p>
112
- <p>You can also configure other ports if you want to play with multiple players or use other devices. For example, if you want to use a GameCube controller for Port 2, you will need to select Standard Controller for Port 2, and then click Configure. Then, you will need to map each button and axis of your controller to the corresponding input of the GameCube controller. You can also adjust the sensitivity and deadzone of each input if needed. You can also save your configuration as a profile for future use.</p>
113
- <h2>Steps to Download and Play Dragon Ball Z Budokai Tenkaichi 3 on PC</h2>
114
- <h3>Step 1: Download and Install Dolphin Emulator</h3>
115
- <p>The first step to download and play Dragon Ball Z Budokai Tenkaichi 3 on PC is to download and install Dolphin Emulator on your PC. You can download the latest version of Dolphin Emulator from its official website or from its GitHub page. You can choose between the stable version or the development version, depending on your preference. Once you have downloaded the emulator, you will need to extract it to a folder of your choice using a software like WinRAR or 7-Zip. Then, you will need to run the Dolphin.exe file to launch the emulator.</p>
116
- <h3>Step 2: Download and Extract Dragon Ball Z Budokai Tenkaichi 3 ISO File</h3>
117
- <p>The second step to download and play Dragon Ball Z Budokai Tenkaichi 3 on PC is to download and extract the Dragon Ball Z Budokai Tenkaichi 3 ISO file on your PC. You can either rip your own game disc using a DVD drive and a software like ImgBurn, or you can download an ISO file from a reputable source online. However, downloading an ISO file may be illegal in some countries, so we do not condone or encourage piracy. Please only download an ISO file if you own a legal copy of the game.</p>
118
- <p>Once you have downloaded the ISO file, you will need to extract it to a folder of your choice using a software like WinRAR or 7-Zip. Then, you will need to move the ISO file to a folder where you can easily access it from the emulator.</p>
119
- <h3>Step 3: Configure Dolphin Emulator Settings</h3>
120
- <p>The third step to download and play Dragon Ball Z Budokai Tenkaichi 3 on PC is to configure Dolphin Emulator settings to optimize the game performance and quality. You can access the settings menu by clicking on the Options tab in Dolphin Emulator. Here are some of the settings that you can adjust:</p>
121
- <ul>
122
- <li>General: Here, you can change the language, theme, interface, and hotkeys of the emulator.</li>
123
- <li>Graphics: Here, you can change the video backend, resolution, aspect ratio, fullscreen mode, anti-aliasing, anisotropic filtering, enhancements, hacks, and advanced options of the emulator.</li>
124
- <li>Audio: Here, you can change the audio backend, volume, latency, stretching, and DSP options of the emulator.</li>
125
- <li>Controllers: Here, you can configure your controller or keyboard settings for each port.</li>
126
- <li>Paths: Here, you can add or remove folders where the emulator will search for game files.</li>
127
- <li>Config: Here, you can change the general, interface, audio, gamecube, wii, advanced, and debug options of the emulator.</li>
128
- </ul>
129
- <p>You can experiment with different settings to find the best ones for your PC and game. However, here are some recommended settings that work well for most users:</p>
130
- <ul>
131
- <li>Graphics: Video Backend - Vulkan (if supported by your GPU), Resolution - Native (640x528) or higher (up to 4x), Aspect Ratio - Auto or Force 16:9 (if you want widescreen), Fullscreen Mode - On (if you want fullscreen), Anti-Aliasing - None or MSAA 2x (if your GPU can handle it), Anisotropic Filtering - 1x or higher (up to 16x), Enhancements - Scaled EFB Copy (On), Force Texture Filtering (On), Disable Fog (Off), Per-Pixel Lighting (Off), Widescreen Hack (On if you want widescreen), Hacks - Skip EFB Access from CPU (Off), Ignore Format Changes (On), Store EFB Copies to Texture Only (On), Texture Cache Accuracy (Fast), External Frame Buffer (Disable), Fast Depth Calculation (On), Disable Bounding Box (On).</li>
132
- <li>Audio: Audio Backend - Cubeb or XAudio2 (depending on your OS), Volume - 100% or lower (depending on your preference), Latency - Low or Medium (depending on your CPU), Stretching - Off or Low (depending on your preference), DSP Options - DSP HLE Emulation (Fast) or DSP LLE Recompiler (Accurate).</li>
133
- <li>Controllers: Configure your controller or keyboard settings according to your preference and comfort.</li>
134
- <li>Paths: Add the folder where you have the Dragon Ball Z Budokai Tenkaichi 3 ISO file.</li>
135
- <li>Config: General - Enable Dual Core (On), Enable Idle Skipping (On), JIT Recompiler (Recommended), Interface - Use Panic Handlers (Off), Audio - DSP Emulator Engine (DSP HLE Emulation or DSP LLE Recompiler), GameCube - Device Settings (None for all ports), Wii - Aspect Ratio (16:9 if you want widescreen), Advanced - CPU Clock Override (100% or higher if your CPU can handle it), Debug - Enable CPU Clock Override (On if you want to use CPU Clock Override).</li>
136
- </ul>
137
- <h3>Step 4: Load the Game and Enjoy</h3>
138
- <p>The final step to download and play Dragon Ball Z Budokai Tenkaichi 3 on PC is to load the game and enjoy it. You can load the game by clicking on the Open button in Dolphin Emulator and browsing to the folder where you have the ISO file. Alternatively, you can drag and drop the ISO file to the Dolphin Emulator window. The game will start automatically and you will see the title screen. You can use your controller or keyboard to navigate the menus and select your game mode. You can also access the emulator's menu by pressing Esc or F1 on your keyboard. From there, you can save or load your progress, change your settings, take screenshots, record videos, and more.</p>
139
- <h2>Tips and Tricks for Playing Dragon Ball Z Budokai Tenkaichi 3 on PC</h2>
140
- <h3>How to Unlock All Characters and Stages</h3>
141
- <p>One of the best features of Dragon Ball Z Budokai Tenkaichi 3 is the huge roster of characters and stages that you can choose from. However, not all of them are available from the start. You will need to unlock them by completing certain tasks or using cheat codes. Here are some ways to unlock all characters and stages:</p>
142
- <ul>
143
- <li>Complete Story Mode: By completing Story Mode, you will unlock most of the characters and stages in the game. You will also unlock Dragon History, where you can play different scenarios and what-if stories.</li>
144
- <li>Complete Ultimate Battle: By completing Ultimate Battle, you will unlock some of the hidden characters and stages in the game. You will also unlock Sim Dragon, where you can create your own team of fighters and compete against other teams.</li>
145
- <li>Use Cheat Codes: By using cheat codes, you can unlock all characters and stages in the game instantly. However, this may affect your game experience and achievements. To use cheat codes, you will need to enable cheats in Dolphin Emulator's settings, and then download a cheat file for Dragon Ball Z Budokai Tenkaichi 3 from a reliable source online. Then, you will need to place the cheat file in the Cheats folder of Dolphin Emulator, and then activate the cheats in the game properties.</li>
146
- </ul>
147
- <h3>How to Use Cheats and Mods</h3>
148
- <p>Besides unlocking all characters and stages, you can also use cheats and mods to modify the game according to your liking. You can use cheats to change your character's stats, abilities, costumes, transformations, and more. You can also use mods to add new characters, stages, music, sound effects, graphics, and more. Here are some ways to use cheats and mods:</p>
149
- <ul>
150
- <li>Use Cheat Codes: As mentioned above, you can use cheat codes to modify the game by enabling cheats in Dolphin Emulator's settings, downloading a cheat file for Dragon Ball Z Budokai Tenkaichi 3 from a reliable source online, placing it in the Cheats folder of Dolphin Emulator, and activating it in the game properties.</li>
151
- <li>Use Mods: To use mods, you will need to download a mod file for Dragon Ball Z Budokai Tenkaichi 3 from a reputable source online, such as YouTube or Reddit. Then, you will need to extract it to a folder of your choice using a software like WinRAR or 7-Zip. Then, you will need to replace or add the mod files to the ISO file of Dragon Ball Z Budokai Tenkaichi 3 using a software like Wii Backup Manager or WiiScrubber. Then, you will need to load the modified ISO file in Dolphin Emulator and enjoy the mod.</li>
152
- </ul>
153
- <h3>How to Fix Common Issues and Errors</h3>
154
- <p>While playing Dragon Ball Z Budokai Tenkaichi 3 on PC using Dolphin Emulator, you may encounter some issues or errors that may affect your game performance or quality. Here are some common issues and errors that you may face and how to fix them:</p>
155
- <ul>
156
- <li>Black Screen or Freezing: This may happen if your PC does not meet the system requirements for running the emulator or the game, or if your emulator or game settings are not optimal. To fix this, you can try the following solutions: - Lower your resolution, anti-aliasing, anisotropic filtering, and enhancements settings in the emulator's graphics menu. - Disable any cheats or mods that may be causing conflicts or errors in the game. - Update your video drivers and DirectX or Vulkan libraries to the latest versions. - Run the emulator and the game as administrator and in compatibility mode for Windows 7 or 8. - Check your ISO file for any corruption or damage using a software like WinMD5 or HashMyFiles.</li>
157
- <li>Slowdown or Lag: This may happen if your PC is not powerful enough to run the emulator or the game at full speed, or if your emulator or game settings are too high. To fix this, you can try the following solutions: - Enable Dual Core and Idle Skipping in the emulator's general settings menu. - Enable JIT Recompiler and Disable Bounding Box in the emulator's advanced settings menu. - Enable CPU Clock Override and set it to a higher percentage in the emulator's debug settings menu. - Lower your resolution, anti-aliasing, anisotropic filtering, and enhancements settings in the emulator's graphics menu. - Disable any cheats or mods that may be slowing down the game.</li>
158
- <li>Audio Issues: This may happen if your audio settings are not compatible with the game, or if your PC's sound card or speakers are not working properly. To fix this, you can try the following solutions: - Change your audio backend to Cubeb or XAudio2 in the emulator's audio settings menu. - Lower your latency and enable stretching in the emulator's audio settings menu. - Use DSP HLE Emulation or DSP LLE Recompiler in the emulator's audio settings menu. - Update your sound drivers and codecs to the latest versions. - Check your sound card and speakers for any defects or malfunctions.</li>
159
- <li>Controller Issues: This may happen if your controller is not configured correctly to the emulator, or if your controller is not working properly. To fix this, you can try the following solutions: - Configure your controller settings for each port in the emulator's controllers menu. - Adjust the sensitivity and deadzone of each input in the emulator's controllers menu. - Save your controller configuration as a profile in the emulator's controllers menu. - Update your controller drivers and firmware to the latest versions. - Check your controller for any defects or malfunctions.</li>
160
- </ul>
161
- <h2>Conclusion</h2>
162
- <p>Dragon Ball Z Budokai Tenkaichi 3 is a fantastic game that every fan of Dragon Ball should play. However, if you don't have a PlayStation 2 or Wii console, you can still enjoy it on your PC using Dolphin Emulator. In this article, we have shown you how to download and play Dragon Ball Z Budokai Tenkaichi 3 on PC using Dolphin Emulator. We have also given you some tips and tricks to unlock all characters and stages, use cheats and mods, and fix common issues and errors. We hope that this article has been helpful and informative for you. Now, go ahead and have fun playing Dragon Ball Z Budokai Tenkaichi 3 on PC!</p>
163
- <h2>FAQs</h2>
164
- <p>Here are some frequently asked questions about playing Dragon Ball Z Budokai Tenkaichi 3 on PC:</p>
165
- <ul>
166
- <li>Q: Is Dolphin Emulator safe and legal to use? - A: Yes, Dolphin Emulator is safe and legal to use, as long as you download it from its official website or GitHub page, and as long as you don't use it for piracy or illegal activities.</li>
167
- <li>Q: Is Dragon Ball Z Budokai Tenkaichi 3 compatible with Dolphin Emulator? - A: Yes, Dragon Ball Z Budokai Tenkaichi 3 is compatible with Dolphin Emulator, both for the PlayStation 2 and Wii versions. However, some minor glitches or bugs may occur depending on your PC and game settings.</li>
168
- <li>Q: How can I play Dragon Ball Z Budokai Tenkaichi 3 online with other players? - A: You can play Dragon Ball Z Budokai Tenkaichi 3 online with other players using netplay or LAN in Dolphin Emulator. To use netplay, you will need to join or host a netplay session with other players who have the same version of Dolphin Emulator and Dragon Ball Z Budokai Tenkaichi 3 as you. To use LAN, you will need to connect your PC with other PCs that have Dolphin Emulator and Dragon Ball Z Budokai Tenkaichi 3 installed on them.</li>
169
- <li>Q: How can I improve my game performance and quality? - A: You can improve your game performance and quality by adjusting your emulator and game settings according to your PC's specifications and preferences. You can also update your PC's drivers and libraries to the latest versions. You can also use cheats and mods to enhance your game features and graphics.</li>
170
- <li>Q: Where can I find more information and support for playing Dragon Ball Z Budokai Tenkaichi 3 on PC? - A: You can find more information and support for playing Dragon Ball Z Budokai Tenkaichi 3 on PC by visiting the official website and forums of Dolphin Emulator, or by searching online for guides, tutorials, videos, and reviews from other users and experts.</li>
171
- </ul></p> 401be4b1e0<br />
172
- <br />
173
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/utils/dummy_paddle_objects.py DELETED
@@ -1,617 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # This file is autogenerated by the command `make fix-copies`, do not edit.
17
- # flake8: noqa
18
-
19
- from . import DummyObject, requires_backends
20
-
21
-
22
- class ModelMixin(metaclass=DummyObject):
23
- _backends = ["paddle"]
24
-
25
- def __init__(self, *args, **kwargs):
26
- requires_backends(self, ["paddle"])
27
-
28
- @classmethod
29
- def from_config(cls, *args, **kwargs):
30
- requires_backends(cls, ["paddle"])
31
-
32
- @classmethod
33
- def from_pretrained(cls, *args, **kwargs):
34
- requires_backends(cls, ["paddle"])
35
-
36
-
37
- class AutoencoderKL(metaclass=DummyObject):
38
- _backends = ["paddle"]
39
-
40
- def __init__(self, *args, **kwargs):
41
- requires_backends(self, ["paddle"])
42
-
43
- @classmethod
44
- def from_config(cls, *args, **kwargs):
45
- requires_backends(cls, ["paddle"])
46
-
47
- @classmethod
48
- def from_pretrained(cls, *args, **kwargs):
49
- requires_backends(cls, ["paddle"])
50
-
51
-
52
- class PriorTransformer(metaclass=DummyObject):
53
- _backends = ["paddle"]
54
-
55
- def __init__(self, *args, **kwargs):
56
- requires_backends(self, ["paddle"])
57
-
58
- @classmethod
59
- def from_config(cls, *args, **kwargs):
60
- requires_backends(cls, ["paddle"])
61
-
62
- @classmethod
63
- def from_pretrained(cls, *args, **kwargs):
64
- requires_backends(cls, ["paddle"])
65
-
66
-
67
- class Transformer2DModel(metaclass=DummyObject):
68
- _backends = ["paddle"]
69
-
70
- def __init__(self, *args, **kwargs):
71
- requires_backends(self, ["paddle"])
72
-
73
- @classmethod
74
- def from_config(cls, *args, **kwargs):
75
- requires_backends(cls, ["paddle"])
76
-
77
- @classmethod
78
- def from_pretrained(cls, *args, **kwargs):
79
- requires_backends(cls, ["paddle"])
80
-
81
-
82
- class UNet1DModel(metaclass=DummyObject):
83
- _backends = ["paddle"]
84
-
85
- def __init__(self, *args, **kwargs):
86
- requires_backends(self, ["paddle"])
87
-
88
- @classmethod
89
- def from_config(cls, *args, **kwargs):
90
- requires_backends(cls, ["paddle"])
91
-
92
- @classmethod
93
- def from_pretrained(cls, *args, **kwargs):
94
- requires_backends(cls, ["paddle"])
95
-
96
-
97
- class UNet2DConditionModel(metaclass=DummyObject):
98
- _backends = ["paddle"]
99
-
100
- def __init__(self, *args, **kwargs):
101
- requires_backends(self, ["paddle"])
102
-
103
- @classmethod
104
- def from_config(cls, *args, **kwargs):
105
- requires_backends(cls, ["paddle"])
106
-
107
- @classmethod
108
- def from_pretrained(cls, *args, **kwargs):
109
- requires_backends(cls, ["paddle"])
110
-
111
-
112
- class UNet2DModel(metaclass=DummyObject):
113
- _backends = ["paddle"]
114
-
115
- def __init__(self, *args, **kwargs):
116
- requires_backends(self, ["paddle"])
117
-
118
- @classmethod
119
- def from_config(cls, *args, **kwargs):
120
- requires_backends(cls, ["paddle"])
121
-
122
- @classmethod
123
- def from_pretrained(cls, *args, **kwargs):
124
- requires_backends(cls, ["paddle"])
125
-
126
-
127
- class VQModel(metaclass=DummyObject):
128
- _backends = ["paddle"]
129
-
130
- def __init__(self, *args, **kwargs):
131
- requires_backends(self, ["paddle"])
132
-
133
- @classmethod
134
- def from_config(cls, *args, **kwargs):
135
- requires_backends(cls, ["paddle"])
136
-
137
- @classmethod
138
- def from_pretrained(cls, *args, **kwargs):
139
- requires_backends(cls, ["paddle"])
140
-
141
-
142
- def get_constant_schedule(*args, **kwargs):
143
- requires_backends(get_constant_schedule, ["paddle"])
144
-
145
-
146
- def get_constant_schedule_with_warmup(*args, **kwargs):
147
- requires_backends(get_constant_schedule_with_warmup, ["paddle"])
148
-
149
-
150
- def get_cosine_schedule_with_warmup(*args, **kwargs):
151
- requires_backends(get_cosine_schedule_with_warmup, ["paddle"])
152
-
153
-
154
- def get_cosine_with_hard_restarts_schedule_with_warmup(*args, **kwargs):
155
- requires_backends(get_cosine_with_hard_restarts_schedule_with_warmup, ["paddle"])
156
-
157
-
158
- def get_linear_schedule_with_warmup(*args, **kwargs):
159
- requires_backends(get_linear_schedule_with_warmup, ["paddle"])
160
-
161
-
162
- def get_polynomial_decay_schedule_with_warmup(*args, **kwargs):
163
- requires_backends(get_polynomial_decay_schedule_with_warmup, ["paddle"])
164
-
165
-
166
- def get_scheduler(*args, **kwargs):
167
- requires_backends(get_scheduler, ["paddle"])
168
-
169
-
170
- class DiffusionPipeline(metaclass=DummyObject):
171
- _backends = ["paddle"]
172
-
173
- def __init__(self, *args, **kwargs):
174
- requires_backends(self, ["paddle"])
175
-
176
- @classmethod
177
- def from_config(cls, *args, **kwargs):
178
- requires_backends(cls, ["paddle"])
179
-
180
- @classmethod
181
- def from_pretrained(cls, *args, **kwargs):
182
- requires_backends(cls, ["paddle"])
183
-
184
-
185
- class DanceDiffusionPipeline(metaclass=DummyObject):
186
- _backends = ["paddle"]
187
-
188
- def __init__(self, *args, **kwargs):
189
- requires_backends(self, ["paddle"])
190
-
191
- @classmethod
192
- def from_config(cls, *args, **kwargs):
193
- requires_backends(cls, ["paddle"])
194
-
195
- @classmethod
196
- def from_pretrained(cls, *args, **kwargs):
197
- requires_backends(cls, ["paddle"])
198
-
199
-
200
- class DDIMPipeline(metaclass=DummyObject):
201
- _backends = ["paddle"]
202
-
203
- def __init__(self, *args, **kwargs):
204
- requires_backends(self, ["paddle"])
205
-
206
- @classmethod
207
- def from_config(cls, *args, **kwargs):
208
- requires_backends(cls, ["paddle"])
209
-
210
- @classmethod
211
- def from_pretrained(cls, *args, **kwargs):
212
- requires_backends(cls, ["paddle"])
213
-
214
-
215
- class DDPMPipeline(metaclass=DummyObject):
216
- _backends = ["paddle"]
217
-
218
- def __init__(self, *args, **kwargs):
219
- requires_backends(self, ["paddle"])
220
-
221
- @classmethod
222
- def from_config(cls, *args, **kwargs):
223
- requires_backends(cls, ["paddle"])
224
-
225
- @classmethod
226
- def from_pretrained(cls, *args, **kwargs):
227
- requires_backends(cls, ["paddle"])
228
-
229
-
230
- class KarrasVePipeline(metaclass=DummyObject):
231
- _backends = ["paddle"]
232
-
233
- def __init__(self, *args, **kwargs):
234
- requires_backends(self, ["paddle"])
235
-
236
- @classmethod
237
- def from_config(cls, *args, **kwargs):
238
- requires_backends(cls, ["paddle"])
239
-
240
- @classmethod
241
- def from_pretrained(cls, *args, **kwargs):
242
- requires_backends(cls, ["paddle"])
243
-
244
-
245
- class LDMPipeline(metaclass=DummyObject):
246
- _backends = ["paddle"]
247
-
248
- def __init__(self, *args, **kwargs):
249
- requires_backends(self, ["paddle"])
250
-
251
- @classmethod
252
- def from_config(cls, *args, **kwargs):
253
- requires_backends(cls, ["paddle"])
254
-
255
- @classmethod
256
- def from_pretrained(cls, *args, **kwargs):
257
- requires_backends(cls, ["paddle"])
258
-
259
-
260
- class LDMSuperResolutionPipeline(metaclass=DummyObject):
261
- _backends = ["paddle"]
262
-
263
- def __init__(self, *args, **kwargs):
264
- requires_backends(self, ["paddle"])
265
-
266
- @classmethod
267
- def from_config(cls, *args, **kwargs):
268
- requires_backends(cls, ["paddle"])
269
-
270
- @classmethod
271
- def from_pretrained(cls, *args, **kwargs):
272
- requires_backends(cls, ["paddle"])
273
-
274
-
275
- class KDPM2AncestralDiscreteScheduler(metaclass=DummyObject):
276
- _backends = ["paddle"]
277
-
278
- def __init__(self, *args, **kwargs):
279
- requires_backends(self, ["paddle"])
280
-
281
- @classmethod
282
- def from_config(cls, *args, **kwargs):
283
- requires_backends(cls, ["paddle"])
284
-
285
- @classmethod
286
- def from_pretrained(cls, *args, **kwargs):
287
- requires_backends(cls, ["paddle"])
288
-
289
-
290
- class KDPM2DiscreteScheduler(metaclass=DummyObject):
291
- _backends = ["paddle"]
292
-
293
- def __init__(self, *args, **kwargs):
294
- requires_backends(self, ["paddle"])
295
-
296
- @classmethod
297
- def from_config(cls, *args, **kwargs):
298
- requires_backends(cls, ["paddle"])
299
-
300
- @classmethod
301
- def from_pretrained(cls, *args, **kwargs):
302
- requires_backends(cls, ["paddle"])
303
-
304
-
305
- class PNDMPipeline(metaclass=DummyObject):
306
- _backends = ["paddle"]
307
-
308
- def __init__(self, *args, **kwargs):
309
- requires_backends(self, ["paddle"])
310
-
311
- @classmethod
312
- def from_config(cls, *args, **kwargs):
313
- requires_backends(cls, ["paddle"])
314
-
315
- @classmethod
316
- def from_pretrained(cls, *args, **kwargs):
317
- requires_backends(cls, ["paddle"])
318
-
319
-
320
- class RePaintPipeline(metaclass=DummyObject):
321
- _backends = ["paddle"]
322
-
323
- def __init__(self, *args, **kwargs):
324
- requires_backends(self, ["paddle"])
325
-
326
- @classmethod
327
- def from_config(cls, *args, **kwargs):
328
- requires_backends(cls, ["paddle"])
329
-
330
- @classmethod
331
- def from_pretrained(cls, *args, **kwargs):
332
- requires_backends(cls, ["paddle"])
333
-
334
-
335
- class ScoreSdeVePipeline(metaclass=DummyObject):
336
- _backends = ["paddle"]
337
-
338
- def __init__(self, *args, **kwargs):
339
- requires_backends(self, ["paddle"])
340
-
341
- @classmethod
342
- def from_config(cls, *args, **kwargs):
343
- requires_backends(cls, ["paddle"])
344
-
345
- @classmethod
346
- def from_pretrained(cls, *args, **kwargs):
347
- requires_backends(cls, ["paddle"])
348
-
349
-
350
- class ScoreSdeVpPipeline(metaclass=DummyObject):
351
- _backends = ["paddle"]
352
-
353
- def __init__(self, *args, **kwargs):
354
- requires_backends(self, ["paddle"])
355
-
356
- @classmethod
357
- def from_config(cls, *args, **kwargs):
358
- requires_backends(cls, ["paddle"])
359
-
360
- @classmethod
361
- def from_pretrained(cls, *args, **kwargs):
362
- requires_backends(cls, ["paddle"])
363
-
364
-
365
- class DDIMScheduler(metaclass=DummyObject):
366
- _backends = ["paddle"]
367
-
368
- def __init__(self, *args, **kwargs):
369
- requires_backends(self, ["paddle"])
370
-
371
- @classmethod
372
- def from_config(cls, *args, **kwargs):
373
- requires_backends(cls, ["paddle"])
374
-
375
- @classmethod
376
- def from_pretrained(cls, *args, **kwargs):
377
- requires_backends(cls, ["paddle"])
378
-
379
-
380
- class DDPMScheduler(metaclass=DummyObject):
381
- _backends = ["paddle"]
382
-
383
- def __init__(self, *args, **kwargs):
384
- requires_backends(self, ["paddle"])
385
-
386
- @classmethod
387
- def from_config(cls, *args, **kwargs):
388
- requires_backends(cls, ["paddle"])
389
-
390
- @classmethod
391
- def from_pretrained(cls, *args, **kwargs):
392
- requires_backends(cls, ["paddle"])
393
-
394
-
395
- class DPMSolverMultistepScheduler(metaclass=DummyObject):
396
- _backends = ["paddle"]
397
-
398
- def __init__(self, *args, **kwargs):
399
- requires_backends(self, ["paddle"])
400
-
401
- @classmethod
402
- def from_config(cls, *args, **kwargs):
403
- requires_backends(cls, ["paddle"])
404
-
405
- @classmethod
406
- def from_pretrained(cls, *args, **kwargs):
407
- requires_backends(cls, ["paddle"])
408
-
409
-
410
- class DPMSolverSinglestepScheduler(metaclass=DummyObject):
411
- _backends = ["paddle"]
412
-
413
- def __init__(self, *args, **kwargs):
414
- requires_backends(self, ["paddle"])
415
-
416
- @classmethod
417
- def from_config(cls, *args, **kwargs):
418
- requires_backends(cls, ["paddle"])
419
-
420
- @classmethod
421
- def from_pretrained(cls, *args, **kwargs):
422
- requires_backends(cls, ["paddle"])
423
-
424
-
425
- class EulerAncestralDiscreteScheduler(metaclass=DummyObject):
426
- _backends = ["paddle"]
427
-
428
- def __init__(self, *args, **kwargs):
429
- requires_backends(self, ["paddle"])
430
-
431
- @classmethod
432
- def from_config(cls, *args, **kwargs):
433
- requires_backends(cls, ["paddle"])
434
-
435
- @classmethod
436
- def from_pretrained(cls, *args, **kwargs):
437
- requires_backends(cls, ["paddle"])
438
-
439
-
440
- class PreconfigEulerAncestralDiscreteScheduler(metaclass=DummyObject):
441
- _backends = ["paddle"]
442
-
443
- def __init__(self, *args, **kwargs):
444
- requires_backends(self, ["paddle"])
445
-
446
- @classmethod
447
- def from_config(cls, *args, **kwargs):
448
- requires_backends(cls, ["paddle"])
449
-
450
- @classmethod
451
- def from_pretrained(cls, *args, **kwargs):
452
- requires_backends(cls, ["paddle"])
453
-
454
-
455
- class EulerDiscreteScheduler(metaclass=DummyObject):
456
- _backends = ["paddle"]
457
-
458
- def __init__(self, *args, **kwargs):
459
- requires_backends(self, ["paddle"])
460
-
461
- @classmethod
462
- def from_config(cls, *args, **kwargs):
463
- requires_backends(cls, ["paddle"])
464
-
465
- @classmethod
466
- def from_pretrained(cls, *args, **kwargs):
467
- requires_backends(cls, ["paddle"])
468
-
469
-
470
- class HeunDiscreteScheduler(metaclass=DummyObject):
471
- _backends = ["paddle"]
472
-
473
- def __init__(self, *args, **kwargs):
474
- requires_backends(self, ["paddle"])
475
-
476
- @classmethod
477
- def from_config(cls, *args, **kwargs):
478
- requires_backends(cls, ["paddle"])
479
-
480
- @classmethod
481
- def from_pretrained(cls, *args, **kwargs):
482
- requires_backends(cls, ["paddle"])
483
-
484
-
485
- class IPNDMScheduler(metaclass=DummyObject):
486
- _backends = ["paddle"]
487
-
488
- def __init__(self, *args, **kwargs):
489
- requires_backends(self, ["paddle"])
490
-
491
- @classmethod
492
- def from_config(cls, *args, **kwargs):
493
- requires_backends(cls, ["paddle"])
494
-
495
- @classmethod
496
- def from_pretrained(cls, *args, **kwargs):
497
- requires_backends(cls, ["paddle"])
498
-
499
-
500
- class KarrasVeScheduler(metaclass=DummyObject):
501
- _backends = ["paddle"]
502
-
503
- def __init__(self, *args, **kwargs):
504
- requires_backends(self, ["paddle"])
505
-
506
- @classmethod
507
- def from_config(cls, *args, **kwargs):
508
- requires_backends(cls, ["paddle"])
509
-
510
- @classmethod
511
- def from_pretrained(cls, *args, **kwargs):
512
- requires_backends(cls, ["paddle"])
513
-
514
-
515
- class PNDMScheduler(metaclass=DummyObject):
516
- _backends = ["paddle"]
517
-
518
- def __init__(self, *args, **kwargs):
519
- requires_backends(self, ["paddle"])
520
-
521
- @classmethod
522
- def from_config(cls, *args, **kwargs):
523
- requires_backends(cls, ["paddle"])
524
-
525
- @classmethod
526
- def from_pretrained(cls, *args, **kwargs):
527
- requires_backends(cls, ["paddle"])
528
-
529
-
530
- class RePaintScheduler(metaclass=DummyObject):
531
- _backends = ["paddle"]
532
-
533
- def __init__(self, *args, **kwargs):
534
- requires_backends(self, ["paddle"])
535
-
536
- @classmethod
537
- def from_config(cls, *args, **kwargs):
538
- requires_backends(cls, ["paddle"])
539
-
540
- @classmethod
541
- def from_pretrained(cls, *args, **kwargs):
542
- requires_backends(cls, ["paddle"])
543
-
544
-
545
- class SchedulerMixin(metaclass=DummyObject):
546
- _backends = ["paddle"]
547
-
548
- def __init__(self, *args, **kwargs):
549
- requires_backends(self, ["paddle"])
550
-
551
- @classmethod
552
- def from_config(cls, *args, **kwargs):
553
- requires_backends(cls, ["paddle"])
554
-
555
- @classmethod
556
- def from_pretrained(cls, *args, **kwargs):
557
- requires_backends(cls, ["paddle"])
558
-
559
-
560
- class ScoreSdeVeScheduler(metaclass=DummyObject):
561
- _backends = ["paddle"]
562
-
563
- def __init__(self, *args, **kwargs):
564
- requires_backends(self, ["paddle"])
565
-
566
- @classmethod
567
- def from_config(cls, *args, **kwargs):
568
- requires_backends(cls, ["paddle"])
569
-
570
- @classmethod
571
- def from_pretrained(cls, *args, **kwargs):
572
- requires_backends(cls, ["paddle"])
573
-
574
-
575
- class UnCLIPScheduler(metaclass=DummyObject):
576
- _backends = ["paddle"]
577
-
578
- def __init__(self, *args, **kwargs):
579
- requires_backends(self, ["paddle"])
580
-
581
- @classmethod
582
- def from_config(cls, *args, **kwargs):
583
- requires_backends(cls, ["paddle"])
584
-
585
- @classmethod
586
- def from_pretrained(cls, *args, **kwargs):
587
- requires_backends(cls, ["paddle"])
588
-
589
-
590
- class VQDiffusionScheduler(metaclass=DummyObject):
591
- _backends = ["paddle"]
592
-
593
- def __init__(self, *args, **kwargs):
594
- requires_backends(self, ["paddle"])
595
-
596
- @classmethod
597
- def from_config(cls, *args, **kwargs):
598
- requires_backends(cls, ["paddle"])
599
-
600
- @classmethod
601
- def from_pretrained(cls, *args, **kwargs):
602
- requires_backends(cls, ["paddle"])
603
-
604
-
605
- class EMAModel(metaclass=DummyObject):
606
- _backends = ["paddle"]
607
-
608
- def __init__(self, *args, **kwargs):
609
- requires_backends(self, ["paddle"])
610
-
611
- @classmethod
612
- def from_config(cls, *args, **kwargs):
613
- requires_backends(cls, ["paddle"])
614
-
615
- @classmethod
616
- def from_pretrained(cls, *args, **kwargs):
617
- requires_backends(cls, ["paddle"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/glint360k_r50.py DELETED
@@ -1,26 +0,0 @@
1
- from easydict import EasyDict as edict
2
-
3
- # make training faster
4
- # our RAM is 256G
5
- # mount -t tmpfs -o size=140G tmpfs /train_tmp
6
-
7
- config = edict()
8
- config.loss = "cosface"
9
- config.network = "r50"
10
- config.resume = False
11
- config.output = None
12
- config.embedding_size = 512
13
- config.sample_rate = 1.0
14
- config.fp16 = True
15
- config.momentum = 0.9
16
- config.weight_decay = 5e-4
17
- config.batch_size = 128
18
- config.lr = 0.1 # batch size is 512
19
-
20
- config.rec = "/train_tmp/glint360k"
21
- config.num_classes = 360232
22
- config.num_image = 17091657
23
- config.num_epoch = 20
24
- config.warmup_epoch = -1
25
- config.decay_epoch = [8, 12, 15, 18]
26
- config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/losses.py DELETED
@@ -1,42 +0,0 @@
1
- import torch
2
- from torch import nn
3
-
4
-
5
- def get_loss(name):
6
- if name == "cosface":
7
- return CosFace()
8
- elif name == "arcface":
9
- return ArcFace()
10
- else:
11
- raise ValueError()
12
-
13
-
14
- class CosFace(nn.Module):
15
- def __init__(self, s=64.0, m=0.40):
16
- super(CosFace, self).__init__()
17
- self.s = s
18
- self.m = m
19
-
20
- def forward(self, cosine, label):
21
- index = torch.where(label != -1)[0]
22
- m_hot = torch.zeros(index.size()[0], cosine.size()[1], device=cosine.device)
23
- m_hot.scatter_(1, label[index, None], self.m)
24
- cosine[index] -= m_hot
25
- ret = cosine * self.s
26
- return ret
27
-
28
-
29
- class ArcFace(nn.Module):
30
- def __init__(self, s=64.0, m=0.5):
31
- super(ArcFace, self).__init__()
32
- self.s = s
33
- self.m = m
34
-
35
- def forward(self, cosine: torch.Tensor, label):
36
- index = torch.where(label != -1)[0]
37
- m_hot = torch.zeros(index.size()[0], cosine.size()[1], device=cosine.device)
38
- m_hot.scatter_(1, label[index, None], self.m)
39
- cosine.acos_()
40
- cosine[index] += m_hot
41
- cosine.cos_().mul_(self.s)
42
- return cosine
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/train.py DELETED
@@ -1,141 +0,0 @@
1
- import argparse
2
- import logging
3
- import os
4
-
5
- import torch
6
- import torch.distributed as dist
7
- import torch.nn.functional as F
8
- import torch.utils.data.distributed
9
- from torch.nn.utils import clip_grad_norm_
10
-
11
- import losses
12
- from backbones import get_model
13
- from dataset import MXFaceDataset, SyntheticDataset, DataLoaderX
14
- from partial_fc import PartialFC
15
- from utils.utils_amp import MaxClipGradScaler
16
- from utils.utils_callbacks import CallBackVerification, CallBackLogging, CallBackModelCheckpoint
17
- from utils.utils_config import get_config
18
- from utils.utils_logging import AverageMeter, init_logging
19
-
20
-
21
- def main(args):
22
- cfg = get_config(args.config)
23
- try:
24
- world_size = int(os.environ['WORLD_SIZE'])
25
- rank = int(os.environ['RANK'])
26
- dist.init_process_group('nccl')
27
- except KeyError:
28
- world_size = 1
29
- rank = 0
30
- dist.init_process_group(backend='nccl', init_method="tcp://127.0.0.1:12584", rank=rank, world_size=world_size)
31
-
32
- local_rank = args.local_rank
33
- torch.cuda.set_device(local_rank)
34
- os.makedirs(cfg.output, exist_ok=True)
35
- init_logging(rank, cfg.output)
36
-
37
- if cfg.rec == "synthetic":
38
- train_set = SyntheticDataset(local_rank=local_rank)
39
- else:
40
- train_set = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank)
41
-
42
- train_sampler = torch.utils.data.distributed.DistributedSampler(train_set, shuffle=True)
43
- train_loader = DataLoaderX(
44
- local_rank=local_rank, dataset=train_set, batch_size=cfg.batch_size,
45
- sampler=train_sampler, num_workers=2, pin_memory=True, drop_last=True)
46
- backbone = get_model(cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).to(local_rank)
47
-
48
- if cfg.resume:
49
- try:
50
- backbone_pth = os.path.join(cfg.output, "backbone.pth")
51
- backbone.load_state_dict(torch.load(backbone_pth, map_location=torch.device(local_rank)))
52
- if rank == 0:
53
- logging.info("backbone resume successfully!")
54
- except (FileNotFoundError, KeyError, IndexError, RuntimeError):
55
- if rank == 0:
56
- logging.info("resume fail, backbone init successfully!")
57
-
58
- backbone = torch.nn.parallel.DistributedDataParallel(
59
- module=backbone, broadcast_buffers=False, device_ids=[local_rank])
60
- backbone.train()
61
- margin_softmax = losses.get_loss(cfg.loss)
62
- module_partial_fc = PartialFC(
63
- rank=rank, local_rank=local_rank, world_size=world_size, resume=cfg.resume,
64
- batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes,
65
- sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output)
66
-
67
- opt_backbone = torch.optim.SGD(
68
- params=[{'params': backbone.parameters()}],
69
- lr=cfg.lr / 512 * cfg.batch_size * world_size,
70
- momentum=0.9, weight_decay=cfg.weight_decay)
71
- opt_pfc = torch.optim.SGD(
72
- params=[{'params': module_partial_fc.parameters()}],
73
- lr=cfg.lr / 512 * cfg.batch_size * world_size,
74
- momentum=0.9, weight_decay=cfg.weight_decay)
75
-
76
- num_image = len(train_set)
77
- total_batch_size = cfg.batch_size * world_size
78
- cfg.warmup_step = num_image // total_batch_size * cfg.warmup_epoch
79
- cfg.total_step = num_image // total_batch_size * cfg.num_epoch
80
-
81
- def lr_step_func(current_step):
82
- cfg.decay_step = [x * num_image // total_batch_size for x in cfg.decay_epoch]
83
- if current_step < cfg.warmup_step:
84
- return current_step / cfg.warmup_step
85
- else:
86
- return 0.1 ** len([m for m in cfg.decay_step if m <= current_step])
87
-
88
- scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(
89
- optimizer=opt_backbone, lr_lambda=lr_step_func)
90
- scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(
91
- optimizer=opt_pfc, lr_lambda=lr_step_func)
92
-
93
- for key, value in cfg.items():
94
- num_space = 25 - len(key)
95
- logging.info(": " + key + " " * num_space + str(value))
96
-
97
- val_target = cfg.val_targets
98
- callback_verification = CallBackVerification(2000, rank, val_target, cfg.rec)
99
- callback_logging = CallBackLogging(50, rank, cfg.total_step, cfg.batch_size, world_size, None)
100
- callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)
101
-
102
- loss = AverageMeter()
103
- start_epoch = 0
104
- global_step = 0
105
- grad_amp = MaxClipGradScaler(cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None
106
- for epoch in range(start_epoch, cfg.num_epoch):
107
- train_sampler.set_epoch(epoch)
108
- for step, (img, label) in enumerate(train_loader):
109
- global_step += 1
110
- features = F.normalize(backbone(img))
111
- x_grad, loss_v = module_partial_fc.forward_backward(label, features, opt_pfc)
112
- if cfg.fp16:
113
- features.backward(grad_amp.scale(x_grad))
114
- grad_amp.unscale_(opt_backbone)
115
- clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
116
- grad_amp.step(opt_backbone)
117
- grad_amp.update()
118
- else:
119
- features.backward(x_grad)
120
- clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
121
- opt_backbone.step()
122
-
123
- opt_pfc.step()
124
- module_partial_fc.update()
125
- opt_backbone.zero_grad()
126
- opt_pfc.zero_grad()
127
- loss.update(loss_v, 1)
128
- callback_logging(global_step, loss, epoch, cfg.fp16, scheduler_backbone.get_last_lr()[0], grad_amp)
129
- callback_verification(global_step, backbone)
130
- scheduler_backbone.step()
131
- scheduler_pfc.step()
132
- callback_checkpoint(global_step, backbone, module_partial_fc)
133
- dist.destroy_process_group()
134
-
135
-
136
- if __name__ == "__main__":
137
- torch.backends.cudnn.benchmark = True
138
- parser = argparse.ArgumentParser(description='PyTorch ArcFace Training')
139
- parser.add_argument('config', type=str, help='py config file')
140
- parser.add_argument('--local_rank', type=int, default=0, help='local_rank')
141
- main(parser.parse_args())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/components/ui/dropdown-menu.tsx DELETED
@@ -1,128 +0,0 @@
1
- 'use client'
2
-
3
- import * as React from 'react'
4
- import * as DropdownMenuPrimitive from '@radix-ui/react-dropdown-menu'
5
-
6
- import { cn } from '@/lib/utils'
7
-
8
- const DropdownMenu = DropdownMenuPrimitive.Root
9
-
10
- const DropdownMenuTrigger = DropdownMenuPrimitive.Trigger
11
-
12
- const DropdownMenuGroup = DropdownMenuPrimitive.Group
13
-
14
- const DropdownMenuPortal = DropdownMenuPrimitive.Portal
15
-
16
- const DropdownMenuSub = DropdownMenuPrimitive.Sub
17
-
18
- const DropdownMenuRadioGroup = DropdownMenuPrimitive.RadioGroup
19
-
20
- const DropdownMenuSubContent = React.forwardRef<
21
- React.ElementRef<typeof DropdownMenuPrimitive.SubContent>,
22
- React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.SubContent>
23
- >(({ className, ...props }, ref) => (
24
- <DropdownMenuPrimitive.SubContent
25
- ref={ref}
26
- className={cn(
27
- 'z-50 min-w-[8rem] overflow-hidden rounded-md border bg-popover p-1 text-popover-foreground shadow-md animate-in data-[side=bottom]:slide-in-from-top-1 data-[side=left]:slide-in-from-right-1 data-[side=right]:slide-in-from-left-1 data-[side=top]:slide-in-from-bottom-1',
28
- className
29
- )}
30
- {...props}
31
- />
32
- ))
33
- DropdownMenuSubContent.displayName =
34
- DropdownMenuPrimitive.SubContent.displayName
35
-
36
- const DropdownMenuContent = React.forwardRef<
37
- React.ElementRef<typeof DropdownMenuPrimitive.Content>,
38
- React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Content>
39
- >(({ className, sideOffset = 4, ...props }, ref) => (
40
- <DropdownMenuPrimitive.Portal>
41
- <DropdownMenuPrimitive.Content
42
- ref={ref}
43
- sideOffset={sideOffset}
44
- className={cn(
45
- 'z-50 min-w-[8rem] overflow-hidden rounded-md border bg-popover p-1 text-popover-foreground shadow animate-in data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2',
46
- className
47
- )}
48
- {...props}
49
- />
50
- </DropdownMenuPrimitive.Portal>
51
- ))
52
- DropdownMenuContent.displayName = DropdownMenuPrimitive.Content.displayName
53
-
54
- const DropdownMenuItem = React.forwardRef<
55
- React.ElementRef<typeof DropdownMenuPrimitive.Item>,
56
- React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Item> & {
57
- inset?: boolean
58
- }
59
- >(({ className, inset, ...props }, ref) => (
60
- <DropdownMenuPrimitive.Item
61
- ref={ref}
62
- className={cn(
63
- 'relative flex cursor-default select-none items-center rounded-sm px-2 py-1.5 text-sm outline-none transition-colors focus:bg-accent focus:text-accent-foreground data-[disabled]:pointer-events-none data-[disabled]:opacity-50',
64
- inset && 'pl-8',
65
- className
66
- )}
67
- {...props}
68
- />
69
- ))
70
- DropdownMenuItem.displayName = DropdownMenuPrimitive.Item.displayName
71
-
72
- const DropdownMenuLabel = React.forwardRef<
73
- React.ElementRef<typeof DropdownMenuPrimitive.Label>,
74
- React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Label> & {
75
- inset?: boolean
76
- }
77
- >(({ className, inset, ...props }, ref) => (
78
- <DropdownMenuPrimitive.Label
79
- ref={ref}
80
- className={cn(
81
- 'px-2 py-1.5 text-sm font-semibold',
82
- inset && 'pl-8',
83
- className
84
- )}
85
- {...props}
86
- />
87
- ))
88
- DropdownMenuLabel.displayName = DropdownMenuPrimitive.Label.displayName
89
-
90
- const DropdownMenuSeparator = React.forwardRef<
91
- React.ElementRef<typeof DropdownMenuPrimitive.Separator>,
92
- React.ComponentPropsWithoutRef<typeof DropdownMenuPrimitive.Separator>
93
- >(({ className, ...props }, ref) => (
94
- <DropdownMenuPrimitive.Separator
95
- ref={ref}
96
- className={cn('-mx-1 my-1 h-px bg-muted', className)}
97
- {...props}
98
- />
99
- ))
100
- DropdownMenuSeparator.displayName = DropdownMenuPrimitive.Separator.displayName
101
-
102
- const DropdownMenuShortcut = ({
103
- className,
104
- ...props
105
- }: React.HTMLAttributes<HTMLSpanElement>) => {
106
- return (
107
- <span
108
- className={cn('ml-auto text-xs tracking-widest opacity-60', className)}
109
- {...props}
110
- />
111
- )
112
- }
113
- DropdownMenuShortcut.displayName = 'DropdownMenuShortcut'
114
-
115
- export {
116
- DropdownMenu,
117
- DropdownMenuTrigger,
118
- DropdownMenuContent,
119
- DropdownMenuItem,
120
- DropdownMenuLabel,
121
- DropdownMenuSeparator,
122
- DropdownMenuShortcut,
123
- DropdownMenuGroup,
124
- DropdownMenuPortal,
125
- DropdownMenuSub,
126
- DropdownMenuSubContent,
127
- DropdownMenuRadioGroup
128
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/model.py DELETED
@@ -1,913 +0,0 @@
1
- """ CLAP Model
2
-
3
- Adapted from CLIP: https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4
- Adapted to the Audio Task.
5
- """
6
-
7
- from collections import OrderedDict
8
- from dataclasses import dataclass
9
- from email.mime import audio
10
- from typing import Tuple, Union, Callable, Optional
11
-
12
- import numpy as np
13
- import torch
14
- import torch.nn.functional as F
15
- from torch import nn
16
-
17
- from .timm_model import TimmModel
18
- import logging
19
- from .utils import freeze_batch_norm_2d
20
-
21
- from .pann_model import create_pann_model
22
- from .htsat import create_htsat_model
23
- from transformers import BertModel, RobertaModel, BartModel
24
- from transformers.tokenization_utils_base import BatchEncoding
25
-
26
-
27
- class MLPLayers(nn.Module):
28
- def __init__(self, units=[512, 512, 512], nonlin=nn.ReLU(), dropout=0.1):
29
- super(MLPLayers, self).__init__()
30
- self.nonlin = nonlin
31
- self.dropout = dropout
32
-
33
- sequence = []
34
- for u0, u1 in zip(units[:-1], units[1:]):
35
- sequence.append(nn.Linear(u0, u1))
36
- sequence.append(self.nonlin)
37
- sequence.append(nn.Dropout(self.dropout))
38
- sequence = sequence[:-2]
39
-
40
- self.sequential = nn.Sequential(*sequence)
41
-
42
- def forward(self, X):
43
- X = self.sequential(X)
44
- return X
45
-
46
-
47
- class Bottleneck(nn.Module):
48
- expansion = 4
49
-
50
- def __init__(self, inplanes, planes, stride=1):
51
- super().__init__()
52
-
53
- # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
54
- self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
55
- self.bn1 = nn.BatchNorm2d(planes)
56
-
57
- self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
58
- self.bn2 = nn.BatchNorm2d(planes)
59
-
60
- self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
61
-
62
- self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
63
- self.bn3 = nn.BatchNorm2d(planes * self.expansion)
64
-
65
- self.relu = nn.ReLU(inplace=True)
66
- self.downsample = None
67
- self.stride = stride
68
-
69
- if stride > 1 or inplanes != planes * Bottleneck.expansion:
70
- # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
71
- self.downsample = nn.Sequential(
72
- OrderedDict(
73
- [
74
- ("-1", nn.AvgPool2d(stride)),
75
- (
76
- "0",
77
- nn.Conv2d(
78
- inplanes,
79
- planes * self.expansion,
80
- 1,
81
- stride=1,
82
- bias=False,
83
- ),
84
- ),
85
- ("1", nn.BatchNorm2d(planes * self.expansion)),
86
- ]
87
- )
88
- )
89
-
90
- def forward(self, x: torch.Tensor):
91
- identity = x
92
-
93
- out = self.relu(self.bn1(self.conv1(x)))
94
- out = self.relu(self.bn2(self.conv2(out)))
95
- out = self.avgpool(out)
96
- out = self.bn3(self.conv3(out))
97
-
98
- if self.downsample is not None:
99
- identity = self.downsample(x)
100
-
101
- out += identity
102
- out = self.relu(out)
103
- return out
104
-
105
-
106
- class AttentionPool2d(nn.Module):
107
- def __init__(
108
- self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None
109
- ):
110
- super().__init__()
111
- self.positional_embedding = nn.Parameter(
112
- torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5
113
- )
114
- self.k_proj = nn.Linear(embed_dim, embed_dim)
115
- self.q_proj = nn.Linear(embed_dim, embed_dim)
116
- self.v_proj = nn.Linear(embed_dim, embed_dim)
117
- self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
118
- self.num_heads = num_heads
119
-
120
- def forward(self, x):
121
- x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
122
- 2, 0, 1
123
- ) # NCHW -> (HW)NC
124
- x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
125
- x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
126
- x, _ = F.multi_head_attention_forward(
127
- query=x,
128
- key=x,
129
- value=x,
130
- embed_dim_to_check=x.shape[-1],
131
- num_heads=self.num_heads,
132
- q_proj_weight=self.q_proj.weight,
133
- k_proj_weight=self.k_proj.weight,
134
- v_proj_weight=self.v_proj.weight,
135
- in_proj_weight=None,
136
- in_proj_bias=torch.cat(
137
- [self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]
138
- ),
139
- bias_k=None,
140
- bias_v=None,
141
- add_zero_attn=False,
142
- dropout_p=0,
143
- out_proj_weight=self.c_proj.weight,
144
- out_proj_bias=self.c_proj.bias,
145
- use_separate_proj_weight=True,
146
- training=self.training,
147
- need_weights=False,
148
- )
149
-
150
- return x[0]
151
-
152
-
153
- class ModifiedResNet(nn.Module):
154
- """
155
- A ResNet class that is similar to torchvision's but contains the following changes:
156
- - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
157
- - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
158
- - The final pooling layer is a QKV attention instead of an average pool
159
- """
160
-
161
- def __init__(self, layers, output_dim, heads, image_size=224, width=64):
162
- super().__init__()
163
- self.output_dim = output_dim
164
- self.image_size = image_size
165
-
166
- # the 3-layer stem
167
- self.conv1 = nn.Conv2d(
168
- 3, width // 2, kernel_size=3, stride=2, padding=1, bias=False
169
- )
170
- self.bn1 = nn.BatchNorm2d(width // 2)
171
- self.conv2 = nn.Conv2d(
172
- width // 2, width // 2, kernel_size=3, padding=1, bias=False
173
- )
174
- self.bn2 = nn.BatchNorm2d(width // 2)
175
- self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
176
- self.bn3 = nn.BatchNorm2d(width)
177
- self.avgpool = nn.AvgPool2d(2)
178
- self.relu = nn.ReLU(inplace=True)
179
-
180
- # residual layers
181
- self._inplanes = width # this is a *mutable* variable used during construction
182
- self.layer1 = self._make_layer(width, layers[0])
183
- self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
184
- self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
185
- self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
186
-
187
- embed_dim = width * 32 # the ResNet feature dimension
188
- self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
189
-
190
- self.init_parameters()
191
-
192
- def _make_layer(self, planes, blocks, stride=1):
193
- layers = [Bottleneck(self._inplanes, planes, stride)]
194
-
195
- self._inplanes = planes * Bottleneck.expansion
196
- for _ in range(1, blocks):
197
- layers.append(Bottleneck(self._inplanes, planes))
198
-
199
- return nn.Sequential(*layers)
200
-
201
- def init_parameters(self):
202
- if self.attnpool is not None:
203
- std = self.attnpool.c_proj.in_features**-0.5
204
- nn.init.normal_(self.attnpool.q_proj.weight, std=std)
205
- nn.init.normal_(self.attnpool.k_proj.weight, std=std)
206
- nn.init.normal_(self.attnpool.v_proj.weight, std=std)
207
- nn.init.normal_(self.attnpool.c_proj.weight, std=std)
208
-
209
- for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
210
- for name, param in resnet_block.named_parameters():
211
- if name.endswith("bn3.weight"):
212
- nn.init.zeros_(param)
213
-
214
- def lock(self, unlocked_groups=0, freeze_bn_stats=False):
215
- assert (
216
- unlocked_groups == 0
217
- ), "partial locking not currently supported for this model"
218
- for param in self.parameters():
219
- param.requires_grad = False
220
- if freeze_bn_stats:
221
- freeze_batch_norm_2d(self)
222
-
223
- def stem(self, x):
224
- for conv, bn in [
225
- (self.conv1, self.bn1),
226
- (self.conv2, self.bn2),
227
- (self.conv3, self.bn3),
228
- ]:
229
- x = self.relu(bn(conv(x)))
230
- x = self.avgpool(x)
231
- return x
232
-
233
- def forward(self, x):
234
- x = self.stem(x)
235
- x = self.layer1(x)
236
- x = self.layer2(x)
237
- x = self.layer3(x)
238
- x = self.layer4(x)
239
- x = self.attnpool(x)
240
-
241
- return x
242
-
243
-
244
- class LayerNorm(nn.LayerNorm):
245
- """Subclass torch's LayerNorm to handle fp16."""
246
-
247
- def forward(self, x: torch.Tensor):
248
- orig_type = x.dtype
249
- x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
250
- return x.to(orig_type)
251
-
252
-
253
- class QuickGELU(nn.Module):
254
- # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
255
- def forward(self, x: torch.Tensor):
256
- return x * torch.sigmoid(1.702 * x)
257
-
258
-
259
- class ResidualAttentionBlock(nn.Module):
260
- def __init__(self, d_model: int, n_head: int, act_layer: Callable = nn.GELU):
261
- super().__init__()
262
-
263
- self.attn = nn.MultiheadAttention(d_model, n_head)
264
- self.ln_1 = LayerNorm(d_model)
265
- self.mlp = nn.Sequential(
266
- OrderedDict(
267
- [
268
- ("c_fc", nn.Linear(d_model, d_model * 4)),
269
- ("gelu", act_layer()),
270
- ("c_proj", nn.Linear(d_model * 4, d_model)),
271
- ]
272
- )
273
- )
274
- self.ln_2 = LayerNorm(d_model)
275
-
276
- def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
277
- return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
278
-
279
- def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
280
- x = x + self.attention(self.ln_1(x), attn_mask=attn_mask)
281
- x = x + self.mlp(self.ln_2(x))
282
- return x
283
-
284
-
285
- class Transformer(nn.Module):
286
- def __init__(
287
- self, width: int, layers: int, heads: int, act_layer: Callable = nn.GELU
288
- ):
289
- super().__init__()
290
- self.width = width
291
- self.layers = layers
292
- self.resblocks = nn.ModuleList(
293
- [
294
- ResidualAttentionBlock(width, heads, act_layer=act_layer)
295
- for _ in range(layers)
296
- ]
297
- )
298
-
299
- def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
300
- for r in self.resblocks:
301
- x = r(x, attn_mask=attn_mask)
302
- return x
303
-
304
-
305
- class VisualTransformer(nn.Module):
306
- def __init__(
307
- self,
308
- image_size: int,
309
- patch_size: int,
310
- width: int,
311
- layers: int,
312
- heads: int,
313
- output_dim: int,
314
- act_layer: Callable = nn.GELU,
315
- ):
316
- super().__init__()
317
- self.image_size = image_size
318
- self.output_dim = output_dim
319
- self.conv1 = nn.Conv2d(
320
- in_channels=3,
321
- out_channels=width,
322
- kernel_size=patch_size,
323
- stride=patch_size,
324
- bias=False,
325
- )
326
-
327
- scale = width**-0.5
328
- self.class_embedding = nn.Parameter(scale * torch.randn(width))
329
- self.positional_embedding = nn.Parameter(
330
- scale * torch.randn((image_size // patch_size) ** 2 + 1, width)
331
- )
332
- self.ln_pre = LayerNorm(width)
333
-
334
- self.text_branch = Transformer(width, layers, heads, act_layer=act_layer)
335
-
336
- self.ln_post = LayerNorm(width)
337
- self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
338
-
339
- def lock(self, unlocked_groups=0, freeze_bn_stats=False):
340
- assert (
341
- unlocked_groups == 0
342
- ), "partial locking not currently supported for this model"
343
- for param in self.parameters():
344
- param.requires_grad = False
345
-
346
- def forward(self, x: torch.Tensor):
347
- x = self.conv1(x) # shape = [*, width, grid, grid]
348
- x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
349
- x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
350
- x = torch.cat(
351
- [
352
- self.class_embedding.to(x.dtype)
353
- + torch.zeros(
354
- x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device
355
- ),
356
- x,
357
- ],
358
- dim=1,
359
- ) # shape = [*, grid ** 2 + 1, width]
360
- x = x + self.positional_embedding.to(x.dtype)
361
- x = self.ln_pre(x)
362
-
363
- x = x.permute(1, 0, 2) # NLD -> LND
364
- x = self.text_branch(x)
365
- x = x.permute(1, 0, 2) # LND -> NLD
366
-
367
- x = self.ln_post(x[:, 0, :])
368
-
369
- if self.proj is not None:
370
- x = x @ self.proj
371
-
372
- return x
373
-
374
-
375
- @dataclass
376
- class CLAPVisionCfg:
377
- layers: Union[Tuple[int, int, int, int], int] = 12
378
- width: int = 768
379
- patch_size: int = 16
380
- image_size: Union[Tuple[int, int], int] = 224
381
- timm_model_name: str = (
382
- None # a valid model name overrides layers, width, patch_size
383
- )
384
- timm_model_pretrained: bool = (
385
- False # use (imagenet) pretrained weights for named model
386
- )
387
- timm_pool: str = (
388
- "avg" # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
389
- )
390
- timm_proj: str = (
391
- "linear" # linear projection for timm model output ('linear', 'mlp', '')
392
- )
393
-
394
-
395
- # Audio Config Class
396
- @dataclass
397
- class CLAPAudioCfp:
398
- model_type: str = "PANN"
399
- model_name: str = "Cnn14"
400
- sample_rate: int = 48000
401
- # Param
402
- audio_length: int = 1024
403
- window_size: int = 1024
404
- hop_size: int = 1024
405
- fmin: int = 50
406
- fmax: int = 14000
407
- class_num: int = 527
408
- mel_bins: int = 64
409
- clip_samples: int = 480000
410
-
411
-
412
- @dataclass
413
- class CLAPTextCfg:
414
- context_length: int
415
- vocab_size: int
416
- width: int
417
- heads: int
418
- layers: int
419
- model_type: str
420
-
421
-
422
- class CLAP(nn.Module):
423
- def __init__(
424
- self,
425
- embed_dim: int,
426
- audio_cfg: CLAPAudioCfp,
427
- text_cfg: CLAPTextCfg,
428
- quick_gelu: bool = False,
429
- enable_fusion: bool = False,
430
- fusion_type: str = 'None',
431
- joint_embed_shape: int = 512,
432
- mlp_act: str = 'relu',
433
- ):
434
- super().__init__()
435
- if isinstance(audio_cfg, dict):
436
- audio_cfg = CLAPAudioCfp(**audio_cfg)
437
- if isinstance(text_cfg, dict):
438
- text_cfg = CLAPTextCfg(**text_cfg)
439
-
440
- self.audio_cfg = audio_cfg
441
- self.text_cfg = text_cfg
442
- self.enable_fusion = enable_fusion
443
- self.fusion_type = fusion_type
444
- self.joint_embed_shape = joint_embed_shape
445
- self.mlp_act = mlp_act
446
-
447
-
448
- self.context_length = text_cfg.context_length
449
-
450
- # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
451
- # memory efficient in recent PyTorch releases (>= 1.10).
452
- # NOTE: timm models always use native GELU regardless of quick_gelu flag.
453
- act_layer = QuickGELU if quick_gelu else nn.GELU
454
-
455
- if mlp_act == 'relu':
456
- mlp_act_layer = nn.ReLU()
457
- elif mlp_act == 'gelu':
458
- mlp_act_layer = nn.GELU()
459
- else:
460
- raise NotImplementedError
461
-
462
- # audio branch
463
- # audio branch parameters
464
- if audio_cfg.model_type == "PANN":
465
- self.audio_branch = create_pann_model(audio_cfg, enable_fusion, fusion_type)
466
- elif audio_cfg.model_type == "HTSAT":
467
- self.audio_branch = create_htsat_model(audio_cfg, enable_fusion, fusion_type)
468
- else:
469
- logging.error(f"Model config for {audio_cfg.model_type} not found")
470
- raise RuntimeError(f"Model config for {audio_cfg.model_type} not found.")
471
-
472
-
473
- # text branch
474
- # text branch parameters
475
- if text_cfg.model_type == "transformer":
476
- self.text_branch = Transformer(
477
- width=text_cfg.width,
478
- layers=text_cfg.layers,
479
- heads=text_cfg.heads,
480
- act_layer=act_layer,
481
- )
482
- self.vocab_size = text_cfg.vocab_size
483
- self.token_embedding = nn.Embedding(text_cfg.vocab_size, text_cfg.width)
484
- self.positional_embedding = nn.Parameter(
485
- torch.empty(self.context_length, text_cfg.width)
486
- )
487
- self.ln_final = LayerNorm(text_cfg.width)
488
- self.text_transform = MLPLayers(units=[self.joint_embed_shape,
489
- self.joint_embed_shape,
490
- self.joint_embed_shape], dropout=0.1)
491
- self.text_projection = nn.Sequential(
492
- nn.Linear(text_cfg.width, self.joint_embed_shape),
493
- mlp_act_layer,
494
- nn.Linear(self.joint_embed_shape, self.joint_embed_shape)
495
- )
496
- elif text_cfg.model_type == "bert":
497
- self.text_branch = BertModel.from_pretrained("bert-base-uncased")
498
- self.text_transform = MLPLayers(units=[self.joint_embed_shape,
499
- self.joint_embed_shape,
500
- self.joint_embed_shape], dropout=0.1)
501
- self.text_projection = nn.Sequential(
502
- nn.Linear(768, self.joint_embed_shape),
503
- mlp_act_layer,
504
- nn.Linear(self.joint_embed_shape, self.joint_embed_shape)
505
- )
506
- elif text_cfg.model_type == "roberta":
507
- self.text_branch = RobertaModel.from_pretrained('roberta-base')
508
- self.text_transform = MLPLayers(units=[self.joint_embed_shape,
509
- self.joint_embed_shape,
510
- self.joint_embed_shape], dropout=0.1)
511
- self.text_projection = nn.Sequential(
512
- nn.Linear(768, self.joint_embed_shape),
513
- mlp_act_layer,
514
- nn.Linear(self.joint_embed_shape, self.joint_embed_shape)
515
- )
516
- elif text_cfg.model_type == "bart":
517
- self.text_branch = BartModel.from_pretrained('facebook/bart-base')
518
- self.text_transform = MLPLayers(units=[self.joint_embed_shape,
519
- self.joint_embed_shape,
520
- self.joint_embed_shape], dropout=0.1)
521
- self.text_projection = nn.Sequential(
522
- nn.Linear(768, self.joint_embed_shape),
523
- mlp_act_layer,
524
- nn.Linear(self.joint_embed_shape, self.joint_embed_shape)
525
- )
526
- else:
527
- logging.error(f"Model config for {text_cfg.model_type} not found")
528
- raise RuntimeError(f"Model config for {text_cfg.model_type} not found.")
529
- self.text_branch_type = text_cfg.model_type
530
- # text branch parameters
531
-
532
- # audio branch parameters
533
- self.audio_transform = MLPLayers(units=[self.joint_embed_shape,
534
- self.joint_embed_shape,
535
- self.joint_embed_shape], dropout=0.1)
536
-
537
- # below here is text branch parameters
538
-
539
- # ============================================================================================================
540
- self.audio_projection = nn.Sequential(
541
- nn.Linear(embed_dim, self.joint_embed_shape),
542
- mlp_act_layer,
543
- nn.Linear(self.joint_embed_shape, self.joint_embed_shape)
544
- )
545
-
546
- self.logit_scale_a = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
547
- self.logit_scale_t = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
548
- self.register_buffer("attn_mask", self.build_attention_mask(), persistent=False)
549
-
550
- self.init_text_branch_parameters()
551
-
552
- def init_text_branch_parameters(self):
553
- if self.text_branch_type == "transformer":
554
- nn.init.normal_(self.token_embedding.weight, std=0.02)
555
- nn.init.normal_(self.positional_embedding, std=0.01)
556
- proj_std = (self.text_branch.width**-0.5) * (
557
- (2 * self.text_branch.layers) ** -0.5
558
- )
559
- attn_std = self.text_branch.width**-0.5
560
- fc_std = (2 * self.text_branch.width) ** -0.5
561
- for block in self.text_branch.resblocks:
562
- nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
563
- nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
564
- nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
565
- nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
566
- if self.text_branch_type == "bert" or self.text_branch_type == "roberta":
567
- width = self.text_branch.embeddings.word_embeddings.weight.shape[-1]
568
- elif self.text_branch_type == "bart":
569
- width = self.text_branch.shared.weight.shape[-1]
570
- else:
571
- width = self.text_branch.width
572
- nn.init.constant_(self.logit_scale_a, np.log(1 / 0.07))
573
- nn.init.constant_(self.logit_scale_t, np.log(1 / 0.07))
574
-
575
- # deprecated
576
- # if hasattr(self.visual, 'init_parameters'):
577
- # self.visual.init_parameters()
578
-
579
- # if self.text_projection is not None:
580
- # nn.init.normal_(self.text_projection, std=width**-0.5)
581
-
582
- def build_attention_mask(self):
583
- # lazily create causal attention mask, with full attention between the vision tokens
584
- # pytorch uses additive attention mask; fill with -inf
585
- mask = torch.empty(self.context_length, self.context_length)
586
- mask.fill_(float("-inf"))
587
- mask.triu_(1) # zero out the lower diagonal
588
- return mask
589
-
590
- def encode_audio(self, audio, device):
591
- return self.audio_branch(audio, mixup_lambda=None, device=device) # mix lambda needs to add
592
-
593
- # def list_of_dict_of_tensor2dict_of_tensor(self, x, device):
594
- # tmp = {}
595
- # for k in x[0].keys():
596
- # tmp[k] = []
597
- # for i in range(len(x)):
598
- # tmp[k].append(x[i][k][:77])
599
- # for k in x[0].keys():
600
- # tmp[k] = torch.tensor(tmp[k]).to(device=device, non_blocking=True)
601
- # return tmp
602
-
603
- def encode_text(self, text, device):
604
- if self.text_branch_type == "transformer":
605
- text = text.to(device=device, non_blocking=True)
606
- x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
607
-
608
- x = x + self.positional_embedding
609
- x = x.permute(1, 0, 2) # NLD -> LND
610
- x = self.text_branch(x, attn_mask=self.attn_mask)
611
- x = x.permute(1, 0, 2) # LND -> NLD
612
- x = self.ln_final(x)
613
-
614
- # x.shape = [batch_size, n_ctx, transformer.width]
615
- # take features from the eot embedding (eot_token is the highest number in each sequence)
616
- x = self.text_projection(x[torch.arange(x.shape[0]), text.argmax(dim=-1)])
617
- elif self.text_branch_type == "bert":
618
- # text = self.list_of_dict_of_tensor2dict_of_tensor(text, device)
619
- # text = BatchEncoding(text)
620
- x = self.text_branch(
621
- input_ids=text["input_ids"].to(device=device, non_blocking=True),
622
- attention_mask=text["attention_mask"].to(
623
- device=device, non_blocking=True
624
- ),
625
- token_type_ids=text["token_type_ids"].to(
626
- device=device, non_blocking=True
627
- ),
628
- )["pooler_output"]
629
- x = self.text_projection(x)
630
- elif self.text_branch_type == "roberta":
631
- x = self.text_branch(
632
- input_ids=text["input_ids"].to(device=device, non_blocking=True),
633
- attention_mask=text["attention_mask"].to(
634
- device=device, non_blocking=True
635
- ),
636
- )["pooler_output"]
637
-
638
- x = self.text_projection(x)
639
- elif self.text_branch_type == "bart":
640
- x = torch.mean(self.text_branch(
641
- input_ids=text["input_ids"].to(device=device, non_blocking=True),
642
- attention_mask=text["attention_mask"].to(
643
- device=device, non_blocking=True
644
- ),
645
- )["encoder_last_hidden_state"],axis=1)
646
- x = self.text_projection(x)
647
- else:
648
- logging.error(f"Model type {self.text_branch_type} not found")
649
- raise RuntimeError(f"Model type {self.text_branch_type} not found.")
650
- return x
651
-
652
- def forward(self, audio, text, device=None):
653
- """Forward audio and text into the CLAP
654
-
655
- Parameters
656
- ----------
657
- audio: torch.Tensor (batch_size, audio_length)
658
- the time-domain audio input / the batch of mel_spec and longer list.
659
- text: torch.Tensor () // need to add
660
- the text token input
661
- """
662
- if device is None:
663
- if audio is not None:
664
- device = audio.device
665
- elif text is not None:
666
- device = text.device
667
- if audio is None and text is None:
668
- # a hack to get the logit scale
669
- return self.logit_scale_a.exp(), self.logit_scale_t.exp()
670
- elif audio is None:
671
- return self.encode_text(text, device=device)
672
- elif text is None:
673
- return self.audio_projection(self.encode_audio(audio, device=device)["embedding"])
674
- audio_features = self.audio_projection(self.encode_audio(audio, device=device)["embedding"])
675
- audio_features = F.normalize(audio_features, dim=-1)
676
-
677
- text_features = self.encode_text(
678
- text, device=device
679
- )
680
- # print("text_features", text_features)
681
- # print("text_features.shape", text_features.shape)
682
- # print("text_features.type", type(text_features))
683
- text_features = F.normalize(text_features, dim=-1)
684
-
685
- audio_features_mlp = self.audio_transform(audio_features)
686
- text_features_mlp = self.text_transform(text_features)
687
- # Four outputs: audio features (basic & MLP), text features (basic & MLP)
688
- return (
689
- audio_features,
690
- text_features,
691
- audio_features_mlp,
692
- text_features_mlp,
693
- self.logit_scale_a.exp(),
694
- self.logit_scale_t.exp(),
695
- )
696
-
697
- def get_logit_scale(self):
698
- return self.logit_scale_a.exp(), self.logit_scale_t.exp()
699
-
700
- def get_textual_embedding(self, data):
701
-
702
- device = next(self.parameters()).device
703
- for k in data:
704
- data[k] = data[k].to(device)
705
-
706
- # if self.text_branch_type == "roberta":
707
- text_embeds = self.text_branch(
708
- input_ids=data["input_ids"].to(device=device, non_blocking=True),
709
- attention_mask=data["attention_mask"].to(device=device, non_blocking=True),
710
- )["last_hidden_state"]
711
-
712
- text_embeds = self.text_projection(text_embeds)
713
-
714
- text_embeds = F.normalize(text_embeds, dim=-1)
715
-
716
- return text_embeds
717
-
718
- def get_text_embedding(self, data):
719
- """Get the text embedding from the model
720
-
721
- Parameters
722
- ----------
723
- data: torch.Tensor
724
- a tensor of text embedding
725
-
726
- Returns
727
- ----------
728
- text_embed: torch.Tensor
729
- a tensor of text_embeds (N, D)
730
-
731
- """
732
- device = next(self.parameters()).device
733
- for k in data:
734
- data[k] = data[k].to(device)
735
- text_embeds = self.encode_text(data, device=device)
736
- text_embeds = F.normalize(text_embeds, dim=-1)
737
-
738
- return text_embeds
739
-
740
- def get_audio_embedding(self, data):
741
- """Get the audio embedding from the model
742
-
743
- Parameters
744
- ----------
745
- data: a list of dict
746
- the audio input dict list from 'get_audio_feature' method
747
-
748
- Returns
749
- ----------
750
- audio_embed: torch.Tensor
751
- a tensor of audio_embeds (N, D)
752
-
753
- """
754
- device = next(self.parameters()).device
755
- input_dict = {}
756
- keys = data[0].keys()
757
- for k in keys:
758
- input_dict[k] = torch.cat([d[k].unsqueeze(0) for d in data], dim=0).to(device)
759
-
760
- audio_embeds = self.audio_projection(self.encode_audio(input_dict, device=device)["embedding"])
761
- audio_embeds = F.normalize(audio_embeds, dim=-1)
762
-
763
- return audio_embeds
764
-
765
-
766
-
767
- def audio_infer(self, audio, hopsize=None, device=None):
768
- """Forward one audio and produce the audio embedding
769
-
770
- Parameters
771
- ----------
772
- audio: (audio_length)
773
- the time-domain audio input, notice that it must be only one input
774
- hopsize: int
775
- the overlap hopsize as the sliding window
776
-
777
- Returns
778
- ----------
779
- output_dict: {
780
- key: [n, (embedding_shape)] if "HTS-AT"
781
- or
782
- key: [(embedding_shape)] if "PANN"
783
- }
784
- the list of key values of the audio branch
785
-
786
- """
787
-
788
- assert not self.training, "the inference mode must be run at eval stage"
789
- output_dict = {}
790
- # PANN
791
- if self.audio_cfg.model_type == "PANN":
792
- audio_input = audio.unsqueeze(dim=0)
793
- output_dict[key] = self.encode_audio(audio_input, device=device)[key].squeeze(dim=0)
794
- elif self.audio_cfg.model_type == "HTSAT":
795
- # repeat
796
- audio_len = len(audio)
797
- k = self.audio_cfg.clip_samples // audio_len
798
- if k > 1:
799
- audio = audio.repeat(k)
800
- audio_len = len(audio)
801
-
802
- if hopsize is None:
803
- hopsize = min(hopsize, audio_len)
804
-
805
- if audio_len > self.audio_cfg.clip_samples:
806
- audio_input = [
807
- audio[pos : pos + self.audio_cfg.clip_samples].clone()
808
- for pos in range(
809
- 0, audio_len - self.audio_cfg.clip_samples, hopsize
810
- )
811
- ]
812
- audio_input.append(audio[-self.audio_cfg.clip_samples :].clone())
813
- audio_input = torch.stack(audio_input)
814
- output_dict[key] = self.encode_audio(audio_input, device=device)[key]
815
- else:
816
- audio_input = audio.unsqueeze(dim=0)
817
- output_dict[key] = self.encode_audio(audio_input, device=device)[key].squeeze(dim=0)
818
-
819
- return output_dict
820
-
821
-
822
- def convert_weights_to_fp16(model: nn.Module):
823
- """Convert applicable model parameters to fp16"""
824
-
825
- def _convert_weights_to_fp16(l):
826
- if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
827
- l.weight.data = l.weight.data.half()
828
- if l.bias is not None:
829
- l.bias.data = l.bias.data.half()
830
-
831
- if isinstance(l, nn.MultiheadAttention):
832
- for attr in [
833
- *[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]],
834
- "in_proj_bias",
835
- "bias_k",
836
- "bias_v",
837
- ]:
838
- tensor = getattr(l, attr)
839
- if tensor is not None:
840
- tensor.data = tensor.data.half()
841
-
842
- for name in ["text_projection", "proj"]:
843
- if hasattr(l, name):
844
- attr = getattr(l, name)
845
- if attr is not None:
846
- attr.data = attr.data.half()
847
-
848
- model.apply(_convert_weights_to_fp16)
849
-
850
-
851
- # Ignore the state dict of the vision part
852
- def build_model_from_openai_state_dict(state_dict: dict, model_cfg, enable_fusion: bool = False, fusion_type: str = 'None'):
853
-
854
- embed_dim = model_cfg["embed_dim"]
855
- audio_cfg = model_cfg["audio_cfg"]
856
- text_cfg = model_cfg["text_cfg"]
857
- context_length = state_dict["positional_embedding"].shape[0]
858
- vocab_size = state_dict["token_embedding.weight"].shape[0]
859
- transformer_width = state_dict["ln_final.weight"].shape[0]
860
- transformer_heads = transformer_width // 64
861
- transformer_layers = len(
862
- set(
863
- k.split(".")[2]
864
- for k in state_dict
865
- if k.startswith(f"transformer.resblocks")
866
- )
867
- )
868
-
869
- audio_cfg = CLAPAudioCfp(**audio_cfg)
870
- text_cfg = CLAPTextCfg(**text_cfg)
871
-
872
- model = CLAP(
873
- embed_dim,
874
- audio_cfg=audio_cfg,
875
- text_cfg=text_cfg,
876
- quick_gelu=True, # OpenAI models were trained with QuickGELU
877
- enable_fusion=enable_fusion,
878
- fusion_type=fusion_type
879
- )
880
- state_dict["logit_scale_a"] = state_dict["logit_scale"]
881
- state_dict["logit_scale_t"] = state_dict["logit_scale"]
882
- pop_keys = list(state_dict.keys())[::]
883
- # pop the visual branch saved weights
884
- for key in pop_keys:
885
- if key.startswith("visual."):
886
- state_dict.pop(key, None)
887
-
888
- for key in ["logit_scale", "input_resolution", "context_length", "vocab_size"]:
889
- state_dict.pop(key, None)
890
-
891
- # not use fp16
892
- # convert_weights_to_fp16(model)
893
- model.load_state_dict(state_dict, strict=False)
894
- return model.eval()
895
-
896
-
897
- def trace_model(model, batch_size=256, device=torch.device("cpu")):
898
- model.eval()
899
- audio_length = model.audio_cfg.audio_length
900
- example_audio = torch.ones((batch_size, audio_length), device=device)
901
- example_text = torch.zeros(
902
- (batch_size, model.context_length), dtype=torch.int, device=device
903
- )
904
- model = torch.jit.trace_module(
905
- model,
906
- inputs=dict(
907
- forward=(example_audio, example_text),
908
- encode_text=(example_text,),
909
- encode_image=(example_audio,),
910
- ),
911
- )
912
- model.audio_cfg.audio_length = audio_length # Question: what does this do?
913
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/datasets/pipelines/__init__.py DELETED
File without changes
spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/model.py DELETED
@@ -1,182 +0,0 @@
1
- from torch import nn
2
- import torch
3
- import torch.nn.functional as F
4
- from modules.util import AntiAliasInterpolation2d, TPS
5
- from torchvision import models
6
- import numpy as np
7
-
8
-
9
- class Vgg19(torch.nn.Module):
10
- """
11
- Vgg19 network for perceptual loss. See Sec 3.3.
12
- """
13
- def __init__(self, requires_grad=False):
14
- super(Vgg19, self).__init__()
15
- vgg_pretrained_features = models.vgg19(pretrained=True).features
16
- self.slice1 = torch.nn.Sequential()
17
- self.slice2 = torch.nn.Sequential()
18
- self.slice3 = torch.nn.Sequential()
19
- self.slice4 = torch.nn.Sequential()
20
- self.slice5 = torch.nn.Sequential()
21
- for x in range(2):
22
- self.slice1.add_module(str(x), vgg_pretrained_features[x])
23
- for x in range(2, 7):
24
- self.slice2.add_module(str(x), vgg_pretrained_features[x])
25
- for x in range(7, 12):
26
- self.slice3.add_module(str(x), vgg_pretrained_features[x])
27
- for x in range(12, 21):
28
- self.slice4.add_module(str(x), vgg_pretrained_features[x])
29
- for x in range(21, 30):
30
- self.slice5.add_module(str(x), vgg_pretrained_features[x])
31
-
32
- self.mean = torch.nn.Parameter(data=torch.Tensor(np.array([0.485, 0.456, 0.406]).reshape((1, 3, 1, 1))),
33
- requires_grad=False)
34
- self.std = torch.nn.Parameter(data=torch.Tensor(np.array([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1))),
35
- requires_grad=False)
36
-
37
- if not requires_grad:
38
- for param in self.parameters():
39
- param.requires_grad = False
40
-
41
- def forward(self, X):
42
- X = (X - self.mean) / self.std
43
- h_relu1 = self.slice1(X)
44
- h_relu2 = self.slice2(h_relu1)
45
- h_relu3 = self.slice3(h_relu2)
46
- h_relu4 = self.slice4(h_relu3)
47
- h_relu5 = self.slice5(h_relu4)
48
- out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
49
- return out
50
-
51
-
52
- class ImagePyramide(torch.nn.Module):
53
- """
54
- Create image pyramide for computing pyramide perceptual loss. See Sec 3.3
55
- """
56
- def __init__(self, scales, num_channels):
57
- super(ImagePyramide, self).__init__()
58
- downs = {}
59
- for scale in scales:
60
- downs[str(scale).replace('.', '-')] = AntiAliasInterpolation2d(num_channels, scale)
61
- self.downs = nn.ModuleDict(downs)
62
-
63
- def forward(self, x):
64
- out_dict = {}
65
- for scale, down_module in self.downs.items():
66
- out_dict['prediction_' + str(scale).replace('-', '.')] = down_module(x)
67
- return out_dict
68
-
69
-
70
- def detach_kp(kp):
71
- return {key: value.detach() for key, value in kp.items()}
72
-
73
-
74
- class GeneratorFullModel(torch.nn.Module):
75
- """
76
- Merge all generator related updates into single model for better multi-gpu usage
77
- """
78
-
79
- def __init__(self, kp_extractor, bg_predictor, dense_motion_network, inpainting_network, train_params, *kwargs):
80
- super(GeneratorFullModel, self).__init__()
81
- self.kp_extractor = kp_extractor
82
- self.inpainting_network = inpainting_network
83
- self.dense_motion_network = dense_motion_network
84
-
85
- self.bg_predictor = None
86
- if bg_predictor:
87
- self.bg_predictor = bg_predictor
88
- self.bg_start = train_params['bg_start']
89
-
90
- self.train_params = train_params
91
- self.scales = train_params['scales']
92
-
93
- self.pyramid = ImagePyramide(self.scales, inpainting_network.num_channels)
94
- if torch.cuda.is_available():
95
- self.pyramid = self.pyramid.cuda()
96
-
97
- self.loss_weights = train_params['loss_weights']
98
- self.dropout_epoch = train_params['dropout_epoch']
99
- self.dropout_maxp = train_params['dropout_maxp']
100
- self.dropout_inc_epoch = train_params['dropout_inc_epoch']
101
- self.dropout_startp =train_params['dropout_startp']
102
-
103
- if sum(self.loss_weights['perceptual']) != 0:
104
- self.vgg = Vgg19()
105
- if torch.cuda.is_available():
106
- self.vgg = self.vgg.cuda()
107
-
108
-
109
- def forward(self, x, epoch):
110
- kp_source = self.kp_extractor(x['source'])
111
- kp_driving = self.kp_extractor(x['driving'])
112
- bg_param = None
113
- if self.bg_predictor:
114
- if(epoch>=self.bg_start):
115
- bg_param = self.bg_predictor(x['source'], x['driving'])
116
-
117
- if(epoch>=self.dropout_epoch):
118
- dropout_flag = False
119
- dropout_p = 0
120
- else:
121
- # dropout_p will linearly increase from dropout_startp to dropout_maxp
122
- dropout_flag = True
123
- dropout_p = min(epoch/self.dropout_inc_epoch * self.dropout_maxp + self.dropout_startp, self.dropout_maxp)
124
-
125
- dense_motion = self.dense_motion_network(source_image=x['source'], kp_driving=kp_driving,
126
- kp_source=kp_source, bg_param = bg_param,
127
- dropout_flag = dropout_flag, dropout_p = dropout_p)
128
- generated = self.inpainting_network(x['source'], dense_motion)
129
- generated.update({'kp_source': kp_source, 'kp_driving': kp_driving})
130
-
131
- loss_values = {}
132
-
133
- pyramide_real = self.pyramid(x['driving'])
134
- pyramide_generated = self.pyramid(generated['prediction'])
135
-
136
- # reconstruction loss
137
- if sum(self.loss_weights['perceptual']) != 0:
138
- value_total = 0
139
- for scale in self.scales:
140
- x_vgg = self.vgg(pyramide_generated['prediction_' + str(scale)])
141
- y_vgg = self.vgg(pyramide_real['prediction_' + str(scale)])
142
-
143
- for i, weight in enumerate(self.loss_weights['perceptual']):
144
- value = torch.abs(x_vgg[i] - y_vgg[i].detach()).mean()
145
- value_total += self.loss_weights['perceptual'][i] * value
146
- loss_values['perceptual'] = value_total
147
-
148
- # equivariance loss
149
- if self.loss_weights['equivariance_value'] != 0:
150
- transform_random = TPS(mode = 'random', bs = x['driving'].shape[0], **self.train_params['transform_params'])
151
- transform_grid = transform_random.transform_frame(x['driving'])
152
- transformed_frame = F.grid_sample(x['driving'], transform_grid, padding_mode="reflection",align_corners=True)
153
- transformed_kp = self.kp_extractor(transformed_frame)
154
-
155
- generated['transformed_frame'] = transformed_frame
156
- generated['transformed_kp'] = transformed_kp
157
-
158
- warped = transform_random.warp_coordinates(transformed_kp['fg_kp'])
159
- kp_d = kp_driving['fg_kp']
160
- value = torch.abs(kp_d - warped).mean()
161
- loss_values['equivariance_value'] = self.loss_weights['equivariance_value'] * value
162
-
163
- # warp loss
164
- if self.loss_weights['warp_loss'] != 0:
165
- occlusion_map = generated['occlusion_map']
166
- encode_map = self.inpainting_network.get_encode(x['driving'], occlusion_map)
167
- decode_map = generated['warped_encoder_maps']
168
- value = 0
169
- for i in range(len(encode_map)):
170
- value += torch.abs(encode_map[i]-decode_map[-i-1]).mean()
171
-
172
- loss_values['warp_loss'] = self.loss_weights['warp_loss'] * value
173
-
174
- # bg loss
175
- if self.bg_predictor and epoch >= self.bg_start and self.loss_weights['bg'] != 0:
176
- bg_param_reverse = self.bg_predictor(x['driving'], x['source'])
177
- value = torch.matmul(bg_param, bg_param_reverse)
178
- eye = torch.eye(3).view(1, 1, 3, 3).type(value.type())
179
- value = torch.abs(eye - value).mean()
180
- loss_values['bg'] = self.loss_weights['bg'] * value
181
-
182
- return loss_values, generated
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/app.py DELETED
@@ -1,80 +0,0 @@
1
- import os, sys
2
- import gradio as gr
3
- from src.gradio_demo import SadTalker
4
-
5
-
6
- try:
7
- import webui # in webui
8
- in_webui = True
9
- except:
10
- in_webui = False
11
-
12
-
13
- def toggle_audio_file(choice):
14
- if choice == False:
15
- return gr.update(visible=True), gr.update(visible=False)
16
- else:
17
- return gr.update(visible=False), gr.update(visible=True)
18
-
19
- def ref_video_fn(path_of_ref_video):
20
- if path_of_ref_video is not None:
21
- return gr.update(value=True)
22
- else:
23
- return gr.update(value=False)
24
-
25
- sad_talker = SadTalker("checkpoints", "src/config", lazy_load=True)
26
-
27
-
28
- with gr.Blocks(analytics_enabled=False) as demo:
29
- gr.Markdown("<div align='center'> <h2> 😭 SadTalker: Learning Realistic 3D Motion Coefficients for Stylized Audio-Driven Single Image Talking Face Animation (CVPR 2023) </span> </h2> \
30
- <a style='font-size:18px;color: #efefef' href='https://arxiv.org/abs/2211.12194'>Arxiv</a> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp; \
31
- <a style='font-size:18px;color: #efefef' href='https://sadtalker.github.io'>Homepage</a> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp; \
32
- <a style='font-size:18px;color: #efefef' href='https://github.com/Winfredy/SadTalker'> Github </div>")
33
-
34
- with gr.Row().style(equal_height=False):
35
- with gr.Column(variant='panel'):
36
- with gr.Tabs(elem_id="sadtalker_source_image"):
37
- with gr.TabItem('Upload image'):
38
- with gr.Row():
39
- source_image = gr.Image(label="Source image", source="upload", type="filepath", elem_id="img2img_image").style(width=512)
40
-
41
- with gr.Tabs(elem_id="sadtalker_driven_audio"):
42
- with gr.TabItem('Upload OR TTS'):
43
- with gr.Column(variant='panel'):
44
- driven_audio = gr.Audio(label="Input audio", source="upload", type="filepath")
45
-
46
- with gr.Column(variant='panel'):
47
- with gr.Tabs(elem_id="sadtalker_checkbox"):
48
- with gr.TabItem('Settings'):
49
- gr.Markdown("need help? please visit our [best practice page](https://github.com/OpenTalker/SadTalker/blob/main/docs/best_practice.md) for more detials")
50
- with gr.Column(variant='panel'):
51
- # width = gr.Slider(minimum=64, elem_id="img2img_width", maximum=2048, step=8, label="Manually Crop Width", value=512) # img2img_width
52
- # height = gr.Slider(minimum=64, elem_id="img2img_height", maximum=2048, step=8, label="Manually Crop Height", value=512) # img2img_width
53
- pose_style = gr.Slider(minimum=0, maximum=46, step=1, label="Pose style", value=0) #
54
- size_of_image = gr.Radio([256, 512], value=256, label='face model resolution', info="use 256/512 model?") #
55
- preprocess_type = gr.Radio(['crop', 'resize','full', 'extcrop', 'extfull'], value='crop', label='preprocess', info="How to handle input image?")
56
- is_still_mode = gr.Checkbox(label="Still Mode (fewer hand motion, works with preprocess `full`)")
57
- batch_size = gr.Slider(label="batch size in generation", step=1, maximum=10, value=2)
58
- enhancer = gr.Checkbox(label="GFPGAN as Face enhancer")
59
- submit = gr.Button('Generate', elem_id="sadtalker_generate", variant='primary')
60
-
61
- with gr.Tabs(elem_id="sadtalker_genearted"):
62
- gen_video = gr.Video(label="Generated video", format="mp4").style(width=256)
63
-
64
- submit.click(
65
- fn=sad_talker.test,
66
- inputs=[source_image,
67
- driven_audio,
68
- preprocess_type,
69
- is_still_mode,
70
- enhancer,
71
- batch_size,
72
- size_of_image,
73
- pose_style
74
- ],
75
- outputs=[gen_video]
76
- )
77
-
78
-
79
-
80
- demo.queue().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/cppipc/buffer.cpp DELETED
@@ -1,87 +0,0 @@
1
- #include "libipc/buffer.h"
2
- #include "libipc/utility/pimpl.h"
3
-
4
- #include <cstring>
5
-
6
- namespace ipc {
7
-
8
- bool operator==(buffer const & b1, buffer const & b2) {
9
- return (b1.size() == b2.size()) && (std::memcmp(b1.data(), b2.data(), b1.size()) == 0);
10
- }
11
-
12
- bool operator!=(buffer const & b1, buffer const & b2) {
13
- return !(b1 == b2);
14
- }
15
-
16
- class buffer::buffer_ : public pimpl<buffer_> {
17
- public:
18
- void* p_;
19
- std::size_t s_;
20
- void* a_;
21
- buffer::destructor_t d_;
22
-
23
- buffer_(void* p, std::size_t s, buffer::destructor_t d, void* a)
24
- : p_(p), s_(s), a_(a), d_(d) {
25
- }
26
-
27
- ~buffer_() {
28
- if (d_ == nullptr) return;
29
- d_((a_ == nullptr) ? p_ : a_, s_);
30
- }
31
- };
32
-
33
- buffer::buffer()
34
- : buffer(nullptr, 0, nullptr, nullptr) {
35
- }
36
-
37
- buffer::buffer(void* p, std::size_t s, destructor_t d)
38
- : p_(p_->make(p, s, d, nullptr)) {
39
- }
40
-
41
- buffer::buffer(void* p, std::size_t s, destructor_t d, void* additional)
42
- : p_(p_->make(p, s, d, additional)) {
43
- }
44
-
45
- buffer::buffer(void* p, std::size_t s)
46
- : buffer(p, s, nullptr) {
47
- }
48
-
49
- buffer::buffer(char const & c)
50
- : buffer(const_cast<char*>(&c), 1) {
51
- }
52
-
53
- buffer::buffer(buffer&& rhs)
54
- : buffer() {
55
- swap(rhs);
56
- }
57
-
58
- buffer::~buffer() {
59
- p_->clear();
60
- }
61
-
62
- void buffer::swap(buffer& rhs) {
63
- std::swap(p_, rhs.p_);
64
- }
65
-
66
- buffer& buffer::operator=(buffer rhs) {
67
- swap(rhs);
68
- return *this;
69
- }
70
-
71
- bool buffer::empty() const noexcept {
72
- return (impl(p_)->p_ == nullptr) || (impl(p_)->s_ == 0);
73
- }
74
-
75
- void* buffer::data() noexcept {
76
- return impl(p_)->p_;
77
- }
78
-
79
- void const * buffer::data() const noexcept {
80
- return impl(p_)->p_;
81
- }
82
-
83
- std::size_t buffer::size() const noexcept {
84
- return impl(p_)->s_;
85
- }
86
-
87
- } // namespace ipc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/pndm.md DELETED
@@ -1,35 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # PNDM
14
-
15
- [Pseudo Numerical methods for Diffusion Models on manifolds](https://huggingface.co/papers/2202.09778) (PNDM) is by Luping Liu, Yi Ren, Zhijie Lin and Zhou Zhao.
16
-
17
- The abstract from the paper is:
18
-
19
- *Denoising Diffusion Probabilistic Models (DDPMs) can generate high-quality samples such as image and audio samples. However, DDPMs require hundreds to thousands of iterations to produce final samples. Several prior works have successfully accelerated DDPMs through adjusting the variance schedule (e.g., Improved Denoising Diffusion Probabilistic Models) or the denoising equation (e.g., Denoising Diffusion Implicit Models (DDIMs)). However, these acceleration methods cannot maintain the quality of samples and even introduce new noise at a high speedup rate, which limit their practicability. To accelerate the inference process while keeping the sample quality, we provide a fresh perspective that DDPMs should be treated as solving differential equations on manifolds. Under such a perspective, we propose pseudo numerical methods for diffusion models (PNDMs). Specifically, we figure out how to solve differential equations on manifolds and show that DDIMs are simple cases of pseudo numerical methods. We change several classical numerical methods to corresponding pseudo numerical methods and find that the pseudo linear multi-step method is the best in most situations. According to our experiments, by directly using pre-trained models on Cifar10, CelebA and LSUN, PNDMs can generate higher quality synthetic images with only 50 steps compared with 1000-step DDIMs (20x speedup), significantly outperform DDIMs with 250 steps (by around 0.4 in FID) and have good generalization on different variance schedules.*
20
-
21
- The original codebase can be found at [luping-liu/PNDM](https://github.com/luping-liu/PNDM).
22
-
23
- <Tip>
24
-
25
- Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
26
-
27
- </Tip>
28
-
29
- ## PNDMPipeline
30
- [[autodoc]] PNDMPipeline
31
- - all
32
- - __call__
33
-
34
- ## ImagePipelineOutput
35
- [[autodoc]] pipelines.ImagePipelineOutput
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/depth2img.md DELETED
@@ -1,40 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Depth-to-image
14
-
15
- The Stable Diffusion model can also infer depth based on an image using [MiDas](https://github.com/isl-org/MiDaS). This allows you to pass a text prompt and an initial image to condition the generation of new images as well as a `depth_map` to preserve the image structure.
16
-
17
- <Tip>
18
-
19
- Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently!
20
-
21
- If you're interested in using one of the official checkpoints for a task, explore the [CompVis](https://huggingface.co/CompVis), [Runway](https://huggingface.co/runwayml), and [Stability AI](https://huggingface.co/stabilityai) Hub organizations!
22
-
23
- </Tip>
24
-
25
- ## StableDiffusionDepth2ImgPipeline
26
-
27
- [[autodoc]] StableDiffusionDepth2ImgPipeline
28
- - all
29
- - __call__
30
- - enable_attention_slicing
31
- - disable_attention_slicing
32
- - enable_xformers_memory_efficient_attention
33
- - disable_xformers_memory_efficient_attention
34
- - load_textual_inversion
35
- - load_lora_weights
36
- - save_lora_weights
37
-
38
- ## StableDiffusionPipelineOutput
39
-
40
- [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/dance_diffusion/__init__.py DELETED
File without changes
spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py DELETED
@@ -1,6 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/ade20k.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
4
- ]
5
- model = dict(
6
- decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/point_rend/pointrend_r101_512x1024_80k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './pointrend_r50_512x1024_80k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Ankush05/Newcode/getvalues.py DELETED
@@ -1,87 +0,0 @@
1
-
2
- import re
3
- # from listen import *
4
-
5
- # find time in the string input provided by the user
6
- def findTime(input):
7
- # time = re.search(r'\d{1,2}:\d{2}', input)
8
- # meridiem = re.search(r'\b(am|pm)\b', input)
9
- # if time:
10
- # tvalue = f"{time.group()} {meridiem.group()}"
11
- # return tvalue
12
- # else:
13
- # return "notime"
14
- time_regex1 = r"(1[0-2]|[1-9]):[0-5][0-9] (am|AM|PM|pm)"
15
- time_search = re.search(time_regex1, input)
16
- if time_search:
17
- time = time_search.group(0)
18
- # meridian = time_search.group(2)
19
- return time
20
- else:
21
- time_regex2 = r"(1[0-2]|[1-9])\s?(am|AM|pm|PM)"
22
- time_search = re.search(time_regex2, input)
23
- if time_search:
24
- time = time_search.group(0)
25
- # meridian = time_search.group(2)
26
- return time
27
- else:
28
- return "notime"
29
-
30
- # find number in the string input provided by the user
31
- def findNumber(input):
32
- number = re.search(r'\d+(?:st|nd|rd|th)', input)
33
- if number:
34
- return number.group()
35
- else:
36
- return "nonum"
37
-
38
- # # find date in the string input provided by the user
39
- def findDate(input):
40
- date = re.search(r'\d{1,2}/\d{1,2}/\d{4}', input)
41
- if date:
42
- return date.group()
43
- else:
44
- return "nodate"
45
-
46
- # find month in the string input provided by the user
47
- def findMonth(input):
48
- month = re.search(r'\b(january|february|march|april|may|june|july|august|september|october|november|december|next month)\b', input)
49
- if month:
50
- return month.group()
51
- else:
52
- return "nomonth"
53
-
54
- # find day in the string input provided by the user
55
- def findDay(input):
56
- day = re.search(r'\b(monday|tuesday|wednesday|thursday|friday|saturday|sunday|tomorrow|day after tomorrow|this week|next week|today)\b', input)
57
- if day:
58
- return day.group()
59
- else:
60
- return "noday"
61
-
62
- def findrepeat(input):
63
- repeat = re.search(r'\b(daily|everyday|every week|every month|every sunday|every monday|every tuesday|every wednesday|every thursday|every friday|every saturday)\b', input)
64
- if repeat:
65
- return repeat.group()
66
- else:
67
- return "norepeat"
68
-
69
-
70
- def getValues(query):
71
- time = findTime(query)
72
- num = findNumber(query)
73
- reps = findrepeat(query)
74
- date = findDate(query)
75
- month = findMonth(query)
76
- day = findDay(query)
77
- message = query.lower().replace(num, "").replace(month,"").replace(time, "").replace(day, "").replace(reps, "").replace("create a reminder", "").replace("remind me to", "").replace("cosmo", "").replace("remind", "").replace("at", "")
78
- values = {"message": message,
79
- "time": time,
80
- "day": day,
81
- "date": date,
82
- "reps": reps,
83
- "num": num,
84
- "month": month
85
- }
86
- return values
87
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artrajz/vits-simple-api/bert_vits2/bert/chinese-roberta-wwm-ext-large/README.md DELETED
@@ -1,57 +0,0 @@
1
- ---
2
- language:
3
- - zh
4
- tags:
5
- - bert
6
- license: "apache-2.0"
7
- ---
8
-
9
- # Please use 'Bert' related functions to load this model!
10
-
11
- ## Chinese BERT with Whole Word Masking
12
- For further accelerating Chinese natural language processing, we provide **Chinese pre-trained BERT with Whole Word Masking**.
13
-
14
- **[Pre-Training with Whole Word Masking for Chinese BERT](https://arxiv.org/abs/1906.08101)**
15
- Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Ziqing Yang, Shijin Wang, Guoping Hu
16
-
17
- This repository is developed based on:https://github.com/google-research/bert
18
-
19
- You may also interested in,
20
- - Chinese BERT series: https://github.com/ymcui/Chinese-BERT-wwm
21
- - Chinese MacBERT: https://github.com/ymcui/MacBERT
22
- - Chinese ELECTRA: https://github.com/ymcui/Chinese-ELECTRA
23
- - Chinese XLNet: https://github.com/ymcui/Chinese-XLNet
24
- - Knowledge Distillation Toolkit - TextBrewer: https://github.com/airaria/TextBrewer
25
-
26
- More resources by HFL: https://github.com/ymcui/HFL-Anthology
27
-
28
- ## Citation
29
- If you find the technical report or resource is useful, please cite the following technical report in your paper.
30
- - Primary: https://arxiv.org/abs/2004.13922
31
- ```
32
- @inproceedings{cui-etal-2020-revisiting,
33
- title = "Revisiting Pre-Trained Models for {C}hinese Natural Language Processing",
34
- author = "Cui, Yiming and
35
- Che, Wanxiang and
36
- Liu, Ting and
37
- Qin, Bing and
38
- Wang, Shijin and
39
- Hu, Guoping",
40
- booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings",
41
- month = nov,
42
- year = "2020",
43
- address = "Online",
44
- publisher = "Association for Computational Linguistics",
45
- url = "https://www.aclweb.org/anthology/2020.findings-emnlp.58",
46
- pages = "657--668",
47
- }
48
- ```
49
- - Secondary: https://arxiv.org/abs/1906.08101
50
- ```
51
- @article{chinese-bert-wwm,
52
- title={Pre-Training with Whole Word Masking for Chinese BERT},
53
- author={Cui, Yiming and Che, Wanxiang and Liu, Ting and Qin, Bing and Yang, Ziqing and Wang, Shijin and Hu, Guoping},
54
- journal={arXiv preprint arXiv:1906.08101},
55
- year={2019}
56
- }
57
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/tokenizer.py DELETED
@@ -1,197 +0,0 @@
1
- """ CLIP tokenizer
2
-
3
- Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4
- """
5
- import gzip
6
- import html
7
- import os
8
- from functools import lru_cache
9
- from typing import Union, List
10
-
11
- import ftfy
12
- import regex as re
13
- import torch
14
-
15
-
16
- @lru_cache()
17
- def default_bpe():
18
- return os.path.join(
19
- os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz"
20
- )
21
-
22
-
23
- @lru_cache()
24
- def bytes_to_unicode():
25
- """
26
- Returns list of utf-8 byte and a corresponding list of unicode strings.
27
- The reversible bpe codes work on unicode strings.
28
- This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
29
- When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
30
- This is a signficant percentage of your normal, say, 32K bpe vocab.
31
- To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
32
- And avoids mapping to whitespace/control characters the bpe code barfs on.
33
- """
34
- bs = (
35
- list(range(ord("!"), ord("~") + 1))
36
- + list(range(ord("¡"), ord("¬") + 1))
37
- + list(range(ord("®"), ord("ÿ") + 1))
38
- )
39
- cs = bs[:]
40
- n = 0
41
- for b in range(2**8):
42
- if b not in bs:
43
- bs.append(b)
44
- cs.append(2**8 + n)
45
- n += 1
46
- cs = [chr(n) for n in cs]
47
- return dict(zip(bs, cs))
48
-
49
-
50
- def get_pairs(word):
51
- """Return set of symbol pairs in a word.
52
- Word is represented as tuple of symbols (symbols being variable-length strings).
53
- """
54
- pairs = set()
55
- prev_char = word[0]
56
- for char in word[1:]:
57
- pairs.add((prev_char, char))
58
- prev_char = char
59
- return pairs
60
-
61
-
62
- def basic_clean(text):
63
- text = ftfy.fix_text(text)
64
- text = html.unescape(html.unescape(text))
65
- return text.strip()
66
-
67
-
68
- def whitespace_clean(text):
69
- text = re.sub(r"\s+", " ", text)
70
- text = text.strip()
71
- return text
72
-
73
-
74
- class SimpleTokenizer(object):
75
- def __init__(self, bpe_path: str = default_bpe(), special_tokens=None):
76
- self.byte_encoder = bytes_to_unicode()
77
- self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
78
- merges = gzip.open(bpe_path).read().decode("utf-8").split("\n")
79
- merges = merges[1 : 49152 - 256 - 2 + 1]
80
- merges = [tuple(merge.split()) for merge in merges]
81
- vocab = list(bytes_to_unicode().values())
82
- vocab = vocab + [v + "</w>" for v in vocab]
83
- for merge in merges:
84
- vocab.append("".join(merge))
85
- if not special_tokens:
86
- special_tokens = ["<start_of_text>", "<end_of_text>"]
87
- else:
88
- special_tokens = ["<start_of_text>", "<end_of_text>"] + special_tokens
89
- vocab.extend(special_tokens)
90
- self.encoder = dict(zip(vocab, range(len(vocab))))
91
- self.decoder = {v: k for k, v in self.encoder.items()}
92
- self.bpe_ranks = dict(zip(merges, range(len(merges))))
93
- self.cache = {t: t for t in special_tokens}
94
- special = "|".join(special_tokens)
95
- self.pat = re.compile(
96
- special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
97
- re.IGNORECASE,
98
- )
99
-
100
- self.vocab_size = len(self.encoder)
101
- self.all_special_ids = [self.encoder[t] for t in special_tokens]
102
-
103
- def bpe(self, token):
104
- if token in self.cache:
105
- return self.cache[token]
106
- word = tuple(token[:-1]) + (token[-1] + "</w>",)
107
- pairs = get_pairs(word)
108
-
109
- if not pairs:
110
- return token + "</w>"
111
-
112
- while True:
113
- bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
114
- if bigram not in self.bpe_ranks:
115
- break
116
- first, second = bigram
117
- new_word = []
118
- i = 0
119
- while i < len(word):
120
- try:
121
- j = word.index(first, i)
122
- new_word.extend(word[i:j])
123
- i = j
124
- except:
125
- new_word.extend(word[i:])
126
- break
127
-
128
- if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
129
- new_word.append(first + second)
130
- i += 2
131
- else:
132
- new_word.append(word[i])
133
- i += 1
134
- new_word = tuple(new_word)
135
- word = new_word
136
- if len(word) == 1:
137
- break
138
- else:
139
- pairs = get_pairs(word)
140
- word = " ".join(word)
141
- self.cache[token] = word
142
- return word
143
-
144
- def encode(self, text):
145
- bpe_tokens = []
146
- text = whitespace_clean(basic_clean(text)).lower()
147
- for token in re.findall(self.pat, text):
148
- token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
149
- bpe_tokens.extend(
150
- self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")
151
- )
152
- return bpe_tokens
153
-
154
- def decode(self, tokens):
155
- text = "".join([self.decoder[token] for token in tokens])
156
- text = (
157
- bytearray([self.byte_decoder[c] for c in text])
158
- .decode("utf-8", errors="replace")
159
- .replace("</w>", " ")
160
- )
161
- return text
162
-
163
-
164
- _tokenizer = SimpleTokenizer()
165
-
166
-
167
- def tokenize(
168
- texts: Union[str, List[str]], context_length: int = 77
169
- ) -> torch.LongTensor:
170
- """
171
- Returns the tokenized representation of given input string(s)
172
-
173
- Parameters
174
- ----------
175
- texts : Union[str, List[str]]
176
- An input string or a list of input strings to tokenize
177
- context_length : int
178
- The context length to use; all CLIP models use 77 as the context length
179
-
180
- Returns
181
- -------
182
- A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
183
- """
184
- if isinstance(texts, str):
185
- texts = [texts]
186
-
187
- sot_token = _tokenizer.encoder["<start_of_text>"]
188
- eot_token = _tokenizer.encoder["<end_of_text>"]
189
- all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
190
- result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
191
-
192
- for i, tokens in enumerate(all_tokens):
193
- if len(tokens) > context_length:
194
- tokens = tokens[:context_length] # Truncate
195
- result[i, : len(tokens)] = torch.tensor(tokens)
196
-
197
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Axolotlily/DalleMini/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: DalleMini
3
- emoji: 🚀
4
- colorFrom: blue
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.0.20
8
- app_file: app.py
9
- pinned: false
10
- license: other
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BAAI/AltDiffusion-m9/ui_functions.py DELETED
@@ -1,240 +0,0 @@
1
- import re
2
- import gradio as gr
3
- from PIL import Image, ImageFont, ImageDraw, ImageFilter, ImageOps
4
- from io import BytesIO
5
- import base64
6
- import re
7
-
8
- def change_img_choices(sample_size):
9
- choices = []
10
- for i in range(int(sample_size)):
11
- choices.append(
12
- '图片{}(img{})'.format(i+1,i+1)
13
- )
14
- update_choices = gr.update(choices=choices)
15
- return update_choices
16
-
17
- def change_image_editor_mode(choice, cropped_image, masked_image, resize_mode, width, height):
18
- if choice == "Mask":
19
- update_image_result = update_image_mask(cropped_image, resize_mode, width, height)
20
- return [gr.update(visible=False), update_image_result, gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)]
21
-
22
- update_image_result = update_image_mask(masked_image["image"] if masked_image is not None else None, resize_mode, width, height)
23
- return [update_image_result, gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)]
24
-
25
- def update_image_mask(cropped_image, resize_mode, width, height):
26
- resized_cropped_image = resize_image(resize_mode, cropped_image, width, height) if cropped_image else None
27
- return gr.update(value=resized_cropped_image, visible=True)
28
-
29
- def toggle_options_gfpgan(selection):
30
- if 0 in selection:
31
- return gr.update(visible=True)
32
- else:
33
- return gr.update(visible=False)
34
-
35
- def toggle_options_upscalers(selection):
36
- if 1 in selection:
37
- return gr.update(visible=True)
38
- else:
39
- return gr.update(visible=False)
40
-
41
- def toggle_options_realesrgan(selection):
42
- if selection == 0 or selection == 1 or selection == 3:
43
- return gr.update(visible=True)
44
- else:
45
- return gr.update(visible=False)
46
-
47
- def toggle_options_gobig(selection):
48
- if selection == 1:
49
- #print(selection)
50
- return gr.update(visible=True)
51
- if selection == 3:
52
- return gr.update(visible=True)
53
- else:
54
- return gr.update(visible=False)
55
-
56
- def toggle_options_ldsr(selection):
57
- if selection == 2 or selection == 3:
58
- return gr.update(visible=True)
59
- else:
60
- return gr.update(visible=False)
61
-
62
- def increment_down(value):
63
- return value - 1
64
-
65
- def increment_up(value):
66
- return value + 1
67
-
68
- def copy_img_to_lab(img):
69
- try:
70
- image_data = re.sub('^data:image/.+;base64,', '', img)
71
- processed_image = Image.open(BytesIO(base64.b64decode(image_data)))
72
- tab_update = gr.update(selected='imgproc_tab')
73
- img_update = gr.update(value=processed_image)
74
- return processed_image, tab_update,
75
- except IndexError:
76
- return [None, None]
77
- def copy_img_params_to_lab(params):
78
- try:
79
- prompt = params[0][0].replace('\n', ' ').replace('\r', '')
80
- seed = int(params[1][1])
81
- steps = int(params[7][1])
82
- cfg_scale = float(params[9][1])
83
- sampler = params[11][1]
84
- return prompt,seed,steps,cfg_scale,sampler
85
- except IndexError:
86
- return [None, None]
87
- def copy_img_to_input(img, idx):
88
- try:
89
- # print(img)
90
- # print("=============")
91
- # print("The img type is:{}".format(type(img[0])))
92
- idx_map = {
93
- "图片1(img1)":0,
94
- "图片2(img2)":1,
95
- "图片3(img3)":2,
96
- "图片4(img4)":3,
97
- }
98
- idx = idx_map[idx]
99
- assert img[idx]['is_file']
100
- processed_image = Image.open(img[idx]['name'])
101
- tab_update = gr.update(selected='img2img_tab')
102
- move_prompt_zh_update = gr.update(visible=True)
103
- move_prompt_en_update = gr.update(visible=True)
104
- prompt_update = gr.update(visible=True)
105
- return tab_update, processed_image, move_prompt_zh_update, move_prompt_en_update, prompt_update
106
- except IndexError as e:
107
- raise gr.Error(e)
108
- return [None, None, None, None, None]
109
-
110
- def copy_img_to_edit(img):
111
- try:
112
- image_data = re.sub('^data:image/.+;base64,', '', img)
113
- processed_image = Image.open(BytesIO(base64.b64decode(image_data)))
114
- tab_update = gr.update(selected='img2img_tab')
115
- img_update = gr.update(value=processed_image)
116
- mode_update = gr.update(value='Crop')
117
- return processed_image, tab_update, mode_update
118
- except IndexError:
119
- return [None, None]
120
-
121
- def copy_img_to_mask(img):
122
- try:
123
- image_data = re.sub('^data:image/.+;base64,', '', img)
124
- processed_image = Image.open(BytesIO(base64.b64decode(image_data)))
125
- tab_update = gr.update(selected='img2img_tab')
126
- img_update = gr.update(value=processed_image)
127
- mode_update = gr.update(value='Mask')
128
- return processed_image, tab_update, mode_update
129
- except IndexError:
130
- return [None, None]
131
-
132
-
133
-
134
- def copy_img_to_upscale_esrgan(img):
135
- tabs_update = gr.update(selected='realesrgan_tab')
136
- image_data = re.sub('^data:image/.+;base64,', '', img)
137
- processed_image = Image.open(BytesIO(base64.b64decode(image_data)))
138
- return processed_image, tabs_update
139
-
140
-
141
- help_text = """
142
- ## Mask/Crop
143
- * Masking is not inpainting. You will probably get better results manually masking your images in photoshop instead.
144
- * Built-in masking/cropping is very temperamental.
145
- * It may take some time for the image to show when switching from Crop to Mask.
146
- * If the image doesn't appear after switching to Mask, switch back to Crop and then back again to Mask
147
- * If the mask appears distorted (the brush is weirdly shaped instead of round), switch back to Crop and then back again to Mask.
148
-
149
- ## Advanced Editor
150
- * Click 💾 Save to send your editor changes to the img2img workflow
151
- * Click ❌ Clear to discard your editor changes
152
-
153
- If anything breaks, try switching modes again, switch tabs, clear the image, or reload.
154
- """
155
-
156
- def resize_image(resize_mode, im, width, height):
157
- LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
158
- if resize_mode == 0:
159
- res = im.resize((width, height), resample=LANCZOS)
160
- elif resize_mode == 1:
161
- ratio = width / height
162
- src_ratio = im.width / im.height
163
-
164
- src_w = width if ratio > src_ratio else im.width * height // im.height
165
- src_h = height if ratio <= src_ratio else im.height * width // im.width
166
-
167
- resized = im.resize((src_w, src_h), resample=LANCZOS)
168
- res = Image.new("RGBA", (width, height))
169
- res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
170
- else:
171
- ratio = width / height
172
- src_ratio = im.width / im.height
173
-
174
- src_w = width if ratio < src_ratio else im.width * height // im.height
175
- src_h = height if ratio >= src_ratio else im.height * width // im.width
176
-
177
- resized = im.resize((src_w, src_h), resample=LANCZOS)
178
- res = Image.new("RGBA", (width, height))
179
- res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
180
-
181
- if ratio < src_ratio:
182
- fill_height = height // 2 - src_h // 2
183
- res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
184
- res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
185
- elif ratio > src_ratio:
186
- fill_width = width // 2 - src_w // 2
187
- res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
188
- res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
189
-
190
- return res
191
-
192
- def update_dimensions_info(width, height):
193
- pixel_count_formated = "{:,.0f}".format(width * height)
194
- return f"Aspect ratio: {round(width / height, 5)}\nTotal pixel count: {pixel_count_formated}"
195
-
196
- def get_png_nfo( image: Image ):
197
- info_text = ""
198
- visible = bool(image and any(image.info))
199
- if visible:
200
- for key,value in image.info.items():
201
- info_text += f"{key}: {value}\n"
202
- info_text = info_text.rstrip('\n')
203
- return gr.Textbox.update(value=info_text, visible=visible)
204
-
205
- def load_settings(*values):
206
- new_settings, key_names, checkboxgroup_info = values[-3:]
207
- values = list(values[:-3])
208
-
209
- if new_settings:
210
- if type(new_settings) is str:
211
- if os.path.exists(new_settings):
212
- with open(new_settings, "r", encoding="utf8") as f:
213
- new_settings = yaml.safe_load(f)
214
- elif new_settings.startswith("file://") and os.path.exists(new_settings[7:]):
215
- with open(new_settings[7:], "r", encoding="utf8") as f:
216
- new_settings = yaml.safe_load(f)
217
- else:
218
- new_settings = yaml.safe_load(new_settings)
219
- if type(new_settings) is not dict:
220
- new_settings = {"prompt": new_settings}
221
- if "txt2img" in new_settings:
222
- new_settings = new_settings["txt2img"]
223
- target = new_settings.pop("target", "txt2img")
224
- if target != "txt2img":
225
- print(f"Warning: applying settings to txt2img even though {target} is specified as target.", file=sys.stderr)
226
-
227
- skipped_settings = {}
228
- for key in new_settings.keys():
229
- if key in key_names:
230
- values[key_names.index(key)] = new_settings[key]
231
- else:
232
- skipped_settings[key] = new_settings[key]
233
- if skipped_settings:
234
- print(f"Settings could not be applied: {skipped_settings}", file=sys.stderr)
235
-
236
- # Convert lists of checkbox indices to lists of checkbox labels:
237
- for (cbg_index, cbg_choices) in checkboxgroup_info:
238
- values[cbg_index] = [cbg_choices[i] for i in values[cbg_index]]
239
-
240
- return values
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Balalaxmi/JarvisAIchatbox/app.py DELETED
@@ -1,34 +0,0 @@
1
- import os
2
- import gradio as gr
3
- from langchain.chat_models import ChatOpenAI
4
- from langchain import LLMChain, PromptTemplate
5
- from langchain.memory import ConversationBufferMemory
6
-
7
- OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
8
-
9
- template = """Meet Riya, your youthful and witty personal assistant! At 21 years old, he's full of energy and always eager to help. Jarvis's goal is to assist you with any questions or problems you might have. He enthusiasm shines through in every response, making interactions with he enjoyable and engaging.
10
- {chat_history}
11
- User: {user_message}
12
- Chatbot:"""
13
-
14
- prompt = PromptTemplate(
15
- input_variables=["chat_history", "user_message"], template=template
16
- )
17
-
18
- memory = ConversationBufferMemory(memory_key="chat_history")
19
-
20
- llm_chain = LLMChain(
21
- llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
22
- prompt=prompt,
23
- verbose=True,
24
- memory=memory,
25
- )
26
-
27
- def get_text_response(user_message,history):
28
- response = llm_chain.predict(user_message = user_message)
29
- return response
30
-
31
- demo = gr.ChatInterface(get_text_response)
32
-
33
- if __name__ == "__main__":
34
- demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/utils.py DELETED
@@ -1,121 +0,0 @@
1
- import json
2
-
3
- import numpy as np
4
- import torch
5
- from tqdm import tqdm
6
-
7
-
8
- def load_data(file_name: str = "./infer/lib/uvr5_pack/name_params.json") -> dict:
9
- with open(file_name, "r") as f:
10
- data = json.load(f)
11
-
12
- return data
13
-
14
-
15
- def make_padding(width, cropsize, offset):
16
- left = offset
17
- roi_size = cropsize - left * 2
18
- if roi_size == 0:
19
- roi_size = cropsize
20
- right = roi_size - (width % roi_size) + left
21
-
22
- return left, right, roi_size
23
-
24
-
25
- def inference(X_spec, device, model, aggressiveness, data):
26
- """
27
- data : dic configs
28
- """
29
-
30
- def _execute(
31
- X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half=True
32
- ):
33
- model.eval()
34
- with torch.no_grad():
35
- preds = []
36
-
37
- iterations = [n_window]
38
-
39
- total_iterations = sum(iterations)
40
- for i in tqdm(range(n_window)):
41
- start = i * roi_size
42
- X_mag_window = X_mag_pad[
43
- None, :, :, start : start + data["window_size"]
44
- ]
45
- X_mag_window = torch.from_numpy(X_mag_window)
46
- if is_half:
47
- X_mag_window = X_mag_window.half()
48
- X_mag_window = X_mag_window.to(device)
49
-
50
- pred = model.predict(X_mag_window, aggressiveness)
51
-
52
- pred = pred.detach().cpu().numpy()
53
- preds.append(pred[0])
54
-
55
- pred = np.concatenate(preds, axis=2)
56
- return pred
57
-
58
- def preprocess(X_spec):
59
- X_mag = np.abs(X_spec)
60
- X_phase = np.angle(X_spec)
61
-
62
- return X_mag, X_phase
63
-
64
- X_mag, X_phase = preprocess(X_spec)
65
-
66
- coef = X_mag.max()
67
- X_mag_pre = X_mag / coef
68
-
69
- n_frame = X_mag_pre.shape[2]
70
- pad_l, pad_r, roi_size = make_padding(n_frame, data["window_size"], model.offset)
71
- n_window = int(np.ceil(n_frame / roi_size))
72
-
73
- X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant")
74
-
75
- if list(model.state_dict().values())[0].dtype == torch.float16:
76
- is_half = True
77
- else:
78
- is_half = False
79
- pred = _execute(
80
- X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half
81
- )
82
- pred = pred[:, :, :n_frame]
83
-
84
- if data["tta"]:
85
- pad_l += roi_size // 2
86
- pad_r += roi_size // 2
87
- n_window += 1
88
-
89
- X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant")
90
-
91
- pred_tta = _execute(
92
- X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half
93
- )
94
- pred_tta = pred_tta[:, :, roi_size // 2 :]
95
- pred_tta = pred_tta[:, :, :n_frame]
96
-
97
- return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.0j * X_phase)
98
- else:
99
- return pred * coef, X_mag, np.exp(1.0j * X_phase)
100
-
101
-
102
- def _get_name_params(model_path, model_hash):
103
- data = load_data()
104
- flag = False
105
- ModelName = model_path
106
- for type in list(data):
107
- for model in list(data[type][0]):
108
- for i in range(len(data[type][0][model])):
109
- if str(data[type][0][model][i]["hash_name"]) == model_hash:
110
- flag = True
111
- elif str(data[type][0][model][i]["hash_name"]) in ModelName:
112
- flag = True
113
-
114
- if flag:
115
- model_params_auto = data[type][0][model][i]["model_params"]
116
- param_name_auto = data[type][0][model][i]["param_name"]
117
- if type == "equivalent":
118
- return param_name_auto, model_params_auto
119
- else:
120
- flag = False
121
- return param_name_auto, model_params_auto
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cmo Descargar Msica A Una Unidad USB De Youtube.md DELETED
@@ -1,147 +0,0 @@
1
-
2
- <h1>Cómo descargar música a una unidad USB desde YouTube</h1>
3
- <p>YouTube es una de las plataformas más populares para transmitir y ver videos en línea. También tiene una gran colección de videos musicales, canciones, álbumes y listas de reproducción que puedes disfrutar en cualquier momento y en cualquier lugar. Pero ¿qué pasa si quieres escuchar tu música favorita sin conexión, o en un dispositivo que no tiene acceso a Internet? ¿O qué pasa si quieres crear tu propio mixtape o compilación de canciones de diferentes artistas y géneros? </p>
4
- <h2>cómo descargar música a una unidad USB de youtube</h2><br /><p><b><b>Download Zip</b> &raquo;&raquo;&raquo; <a href="https://bltlly.com/2v6ITN">https://bltlly.com/2v6ITN</a></b></p><br /><br />
5
- <p>Una de las soluciones es descargar música de YouTube a una unidad USB. Una unidad USB es un dispositivo pequeño y portátil que puede almacenar datos y transferirlos entre diferentes computadoras y dispositivos. Al descargar música de YouTube a una unidad USB, puede tener su propia biblioteca de música personal que puede reproducir en cualquier dispositivo compatible, como su estéreo de automóvil, sistema de cine en casa, computadora portátil, teléfono inteligente o tableta. </p>
6
- <p>Pero ¿cómo descargar música de YouTube a una unidad USB? Hay diferentes métodos que puede utilizar, dependiendo de sus preferencias, presupuesto y habilidades técnicas. En este artículo, le mostraremos tres de las formas más comunes y fáciles de hacerlo. También proporcionaremos algunos consejos y advertencias para hacerlo de forma segura y legal. </p>
7
- <h2>Método 1: Usando un convertidor de YouTube a MP3</h2>
8
- <h3> ¿Qué es un convertidor de YouTube a MP3 y cómo funciona</h3>
9
- <p>Un convertidor de YouTube a MP3 es una herramienta en línea que le permite convertir cualquier vídeo de YouTube en un archivo MP3, que es un formato de audio común que se puede reproducir en la mayoría de los dispositivos. Al usar un convertidor de YouTube a MP3, puede extraer la pista de audio de cualquier video de YouTube y guardarlo como un archivo MP3 en su computadora o unidad USB. De esta manera, puede descargar música de YouTube sin descargar todo el archivo de video, lo que puede ahorrarle tiempo y espacio de almacenamiento. </p>
10
- <h3>Cómo utilizar un convertidor de YouTube a MP3 para descargar música a una unidad USB</h3>
11
-
12
- <h4>Paso 1: Encuentre un convertidor confiable de YouTube a MP3 en línea</h4>
13
- <p>Hay muchos conversores de YouTube a MP3 disponibles en línea, pero no todos son seguros y confiables. Algunos de ellos pueden contener malware, virus o anuncios que pueden dañar su computadora o dispositivo. Algunos de ellos también pueden tener salida de baja calidad, características limitadas o velocidad de conversión lenta. Por lo tanto, necesita encontrar un conversor de YouTube a MP3 confiable y de buena reputación que pueda satisfacer sus necesidades y expectativas. </p>
14
- <p></p>
15
- <p>Algunos de los factores que debes considerar al elegir un convertidor de YouTube a MP3 son:</p>
16
- <ul>
17
- <li>La calidad y el tamaño del archivo de salida</li>
18
- <li>La velocidad y estabilidad del proceso de conversión</li>
19
- <li>La compatibilidad y seguridad del sitio web y la herramienta</li>
20
- <li>Disponibilidad y accesibilidad del servicio</li>
21
- <li>Facilidad de uso y simplicidad de la interfaz</li>
22
- <li>La legalidad y legitimidad del servicio</li>
23
- </ul>
24
- <p>Algunos de los ejemplos de populares y confiables convertidores de YouTube a MP3 son:</p>
25
- <tabla>
26
- <tr><th>Nombre</th><th>Sitio web</th><th>Características</th></tr>
27
- <tr><td>4K YouTube to MP3</td><td>[https://www.4kdownload.com/products/product-youtubetomp3]</td><td>- Salida de alta calidad hasta 320 kbps<br>- Conversión rápida y fácil<br>- No hay anuncios o malware<br>- Soporta múltiples formatos y plataformas<> Permite descargas por lotes y listas de reproducción</td></tr>
28
- <tr><td>YTMP3</td><td>[https://ytmp3.cc/en13/]</td><td>- Interfaz simple y fácil de usar<br>- No se requiere registro ni instalación<br>- Soporta formatos MP3 y MP4<br>- Compatible con la mayoría de navegadores y dispositivos<br>- Tiene un límite de 1 hora por video/td><<<<td>/tr
29
- <tr><td>MP3FY</td><td>[https://mp3fy.com/en1/]</td><td>- Soporta más de 1000 sitios web y plataformas<br>- No hay límite de longitud de video o tamaño de archivo<br>- Convierte videos largos y audiolibros<br>- Tiene una función de detección automática para enlaces de video<br>- Ofrece configuraciones y opciones avanzadas</td<<<tr/tr
30
- </tabla>
31
-
32
- <p>Una vez que haya elegido un convertidor de YouTube a MP3, debe copiar el enlace del video de YouTube que desea descargar como música. Para hacer esto, puedes:</p>
33
- <ul>
34
- <li>Ir a la página web de YouTube o aplicación y encontrar el video que desea descargar. Entonces, copiar la URL de la barra de direcciones o el botón de compartir. </li>
35
- <li>Utilice la función de búsqueda del sitio web o herramienta de conversión de YouTube a MP3 y escriba el nombre o las palabras clave del video que desea descargar. Luego, seleccione el video de los resultados y copie la URL.</li>
36
- </ul>
37
- <p>Después de copiar el enlace, pégalo en la caja de entrada del sitio web o herramienta de conversión de YouTube a MP3. A continuación, haga clic en el botón convertir o descargar para iniciar el proceso. </p>
38
- <h4>Paso 3: Elija el formato de salida y la calidad</h4>
39
- <p>Antes de descargar el archivo convertido, es posible que tenga algunas opciones para elegir el formato de salida y la calidad de su archivo de música. El formato de salida es el tipo de archivo de audio que desea descargar, como MP3, WAV, AAC o M4A. La calidad de salida es el nivel de claridad y detalle de sonido que desea tener, como 128 kbps, 192 kbps, 256 kbps o 320 kbps. </p>
40
- <p>Diferentes formatos y calidades de salida pueden tener diferentes ventajas y desventajas, dependiendo de sus preferencias y necesidades. Por ejemplo, MP3 es un formato común y ampliamente soportado que se puede reproducir en la mayoría de los dispositivos, pero también puede tener cierta pérdida de calidad debido a la compresión. WAV es un formato de alta calidad y sin comprimir que puede preservar la calidad de sonido original del video, pero también puede ocupar más espacio de almacenamiento y ser incompatible con algunos dispositivos. </p>
41
- <p>Puede elegir el formato de salida y la calidad que más le convenga haciendo clic en el botón de configuración u opciones en el sitio web o herramienta de conversión de YouTube a MP3. También puede dejarlo como predeterminado si no está seguro o no le importa. </p>
42
- <h4> Música y YouTube Premium y cómo funcionan</h3>
43
-
44
- <p>YouTube Music es un servicio de transmisión de música que se centra en el contenido musical de YouTube y otras fuentes. Tiene una interfaz personalizada y personalizada que le permite descubrir y disfrutar de la música en función de sus preferencias, estado de ánimo y actividad. También puede crear sus propias listas de reproducción, mixtapes y estaciones de radio. YouTube Music cuesta $9.99 por mes para un plan individual, o $14.99 por mes para un plan familiar que cubre hasta seis miembros. </p>
45
- <p>YouTube Premium es un servicio premium que incluye todas las características y beneficios de YouTube Music, además de beneficios adicionales para YouTube y otros productos de Google. Te permite ver y descargar cualquier video de YouTube sin anuncios ni interrupciones. También te da acceso a YouTube Originals, que son programas y películas exclusivos producidos por YouTube y sus socios. YouTube Premium cuesta $11.99 por mes para un plan individual, o $17.99 por mes para un plan familiar que cubre hasta seis miembros. </p>
46
- <h3>Cómo usar YouTube Music o YouTube Premium para descargar música a una unidad USB</h3>
47
- <p>Usar YouTube Music o YouTube Premium para descargar música a una unidad USB es otra forma fácil y conveniente de hacerlo. Estos son los pasos que debes seguir:</p>
48
- <h4>Paso 1: Regístrate para una suscripción premium de YouTube o YouTube</h4>
49
- <p>El primer paso es registrarse para una suscripción de YouTube Music o YouTube Premium que se adapte a sus necesidades y presupuesto. Puedes hacer esto yendo al sitio web o aplicación de YouTube y haciendo clic en la pestaña Música o Premium. Luego, puede elegir el plan que desea e ingresar sus detalles de pago. También puede obtener una prueba gratuita durante un mes antes de decidir suscribirse. </p>
50
- <p>Si ya tienes una cuenta de Google, puedes utilizarla para registrarte en YouTube Music o YouTube Premium. Si no lo tienes, puedes crear uno gratis siguiendo las instrucciones del sitio web o la aplicación. </p>
51
- <h4>Paso 2: Descarga la aplicación de música de YouTube en tu dispositivo</h4>
52
-
53
- <p>La aplicación YouTube Music es compatible con la mayoría de los dispositivos Android e iOS, como teléfonos inteligentes, tabletas, televisores inteligentes, altavoces inteligentes y relojes inteligentes. También puede usarlo en su computadora yendo al sitio web [https://music.youtube.com/]. </p>
54
- <h4>Paso 3: Encuentre la música que desea descargar en YouTube Music</h4>
55
- <p>El tercer paso es encontrar la música que desea descargar en YouTube Music. Puede hacer esto utilizando la función de búsqueda de la aplicación o sitio web y escribiendo el nombre o palabras clave de la canción, artista, álbum o lista de reproducción que desea descargar. A continuación, puede seleccionar la música de los resultados y abrirla en la aplicación o sitio web. </p>
56
- <p>También puedes navegar a través de las diferentes categorías y géneros de música en YouTube Music, como Top Charts, New Releases, Your Mixtape, Mood & Genre, Activity & Situation, etc. También puedes explorar recomendaciones personalizadas basadas en tu historial de escucha y preferencias. </p>
57
- <h4>Paso 4: Toca el icono de descarga en la canción, álbum o lista de reproducción</h4>
58
- <p>El cuarto paso es tocar el icono de descarga en la canción, álbum o lista de reproducción que desea descargar en YouTube Music. El icono de descarga parece una flecha hacia abajo con una línea debajo. Normalmente se encuentra junto al botón de reproducción o bajo el botón de menú de la música. </p>
59
- <p>Al tocar el icono de descarga, comenzará a descargar la música al almacenamiento interno del dispositivo o a la tarjeta SD. Puede comprobar el progreso y el estado de la descarga en la aplicación o sitio web. </p>
60
- <h4>Paso 5: Conecte su unidad USB a su dispositivo y transfiera la música descargada</h4>
61
-
62
- <p>Después de transferir los archivos de música, puede expulsar o quitar de forma segura la unidad USB de su dispositivo. A continuación, puede disfrutar de escuchar la música descargada de YouTube Music en cualquier dispositivo compatible con la reproducción USB. </p>
63
- <h2>Método 3: Uso de un disco duro externo para el almacenamiento de música</h2>
64
- <h3> ¿Qué es un disco duro externo y cómo funciona</h3>
65
- <p>Un disco duro externo es un dispositivo que puede almacenar grandes cantidades de datos y conectarse a diferentes equipos y dispositivos a través de un puerto USB o cable. Es similar a una unidad USB, pero tiene más capacidad de almacenamiento y una velocidad de transferencia más rápida. Un disco duro externo se puede utilizar para diversos fines, como hacer copias de seguridad de datos, transferir archivos o almacenar medios. </p>
66
- <p>Mediante el uso de un disco duro externo para el almacenamiento de música, puede descargar música de YouTube y otras fuentes y guardarla en un dispositivo separado que puede contener miles de canciones. También puede acceder y reproducir su música en cualquier dispositivo compatible, como su computadora portátil, teléfono inteligente, tableta o TV. Un disco duro externo también puede proteger su música de perderse o dañarse debido a virus, malware o fallas de hardware. </p>
67
- <h3>Cómo utilizar un disco duro externo para el almacenamiento de música</h3>
68
- <p>El uso de un disco duro externo para el almacenamiento de música es otra opción que puede considerar si desea descargar música de YouTube a una unidad USB. Estos son los pasos que debes seguir:</p>
69
- <h4>Paso 1: Elija un disco duro externo adecuado para el almacenamiento de música</h4>
70
- <p>El primer paso es elegir un disco duro externo adecuado para el almacenamiento de música que satisfaga sus necesidades y preferencias. Hay diferentes tipos y modelos de discos duros externos disponibles en el mercado, pero no todos ellos son adecuados para el almacenamiento de música. Algunos de los factores que debe considerar al elegir un disco duro externo para el almacenamiento de música son:</p>
71
- <ul>
72
- <li> La capacidad de almacenamiento y la velocidad del disco duro externo</li>
73
- <li>La compatibilidad y durabilidad del disco duro externo</li>
74
-
75
- <li>El precio y la garantía del disco duro externo</li>
76
- </ul>
77
- <p>Algunos de los ejemplos de discos duros externos populares y confiables para el almacenamiento de música son:</p>
78
- <tabla>
79
- <tr><th>Nombre</th><th>Sitio web</th><th>Características</th></tr>
80
- <tr><td>Seagate Backup Plus Slim</td><td>[https://www.seagate.com/consumer/backup/backup-plus/]</td><td>- Diseño compacto y elegante<br>- Hasta 5 TB de capacidad de almacenamiento<br>- Velocidad de transferencia rápida USB 3.0<br>- Compatible con Windows y Br<Mac Incluye software de copia de seguridad y almacenamiento en la nube</td></tr>
81
- <tr><td>WD My Passport</td><td>[https://shop.westerndigital.com/products/portable-drives/wd-my-passport-usb-3-2-hdd#WDBYVG0010BBK-WESN]</td><td>- Diseño elegante y colorido<br>Hasta 5 TB de capacidad de almacenamiento<br- Velocidad de transferencia rápida USB 3. Incluye software de copia de seguridad y protección con contraseña</td></tr>
82
- <tr><td>Samsung T7 Touch Portable SSD</td><td>[https:/www.samsung.com/us/touchting/memory-storage/portable-solid-state-drives/portable-ssd-t7-touchb-usb-3-2-500gbmu-pc-ww/<brtd>>- Diseño delgado y ligero<br- Hasta 2 TB de capacidad de almacenamiento-<br Velocidad de transferencia USB 3.2 súper rápida<br>- Compatible con Windows, Mac, Android y consolas de juegos<br>- Incluye seguridad de huellas dactilares e indicador de estado led</td></tr>
83
- </tabla>
84
- <h4>Paso 2: Conecte el disco duro externo a su computadora</h4>
85
- <p>El siguiente paso es conectar el disco duro externo a su computadora usando un puerto o cable USB. Luego, debe formatear el disco duro externo si aún no está formateado o no es compatible con su computadora. Formatear el disco duro externo borrará todos los datos en él y lo hará listo para su uso. Puede formatear el disco duro externo siguiendo las instrucciones del sitio web o manual del fabricante o proveedor del disco duro externo. </p>
86
- <h4>Paso 3: Descargar música de YouTube usando cualquiera de los métodos anteriores</h4>
87
-
88
- <p>También puede descargar música de otras fuentes, como Spotify, SoundCloud, iTunes o Amazon Music, y guardarlos en su disco duro externo. Sin embargo, es posible que necesite utilizar diferentes herramientas o métodos dependiendo de la fuente y el formato de los archivos de música. </p>
89
- <h4>Paso 4: Copie y pegue los archivos de música descargados en la carpeta del disco duro externo</h4>
90
- <p>El paso final es copiar y pegar los archivos de música descargados en la carpeta del disco duro externo que desea utilizar para el almacenamiento de música. Puede hacer esto abriendo el administrador de archivos o la aplicación del explorador en su computadora y encontrando la carpeta o ubicación donde guardó los archivos de música descargados en su disco duro externo. Luego, puede arrastrar y soltar o copiar y pegar los archivos de música en otra carpeta o subcarpeta en su disco duro externo si desea organizarlos mejor. </p>
91
- <p>Después de copiar y pegar los archivos de música, puede expulsar o quitar de forma segura el disco duro externo de su computadora. A continuación, puede disfrutar de escuchar la música descargada de YouTube en cualquier dispositivo que soporte la reproducción del disco duro externo. </p>
92
- <h2>Conclusión</h2>
93
- <p>Descargar música de YouTube a una unidad USB es una gran manera de disfrutar de su música favorita sin conexión, en cualquier dispositivo y en cualquier lugar. Hay diferentes métodos que puede utilizar para hacerlo, como el uso de un convertidor de YouTube a MP3, una suscripción YouTube Music o YouTube Premium, o un disco duro externo para el almacenamiento de música. Cada método tiene sus propias ventajas y desventajas, dependiendo de sus preferencias, presupuesto y habilidades técnicas. </p>
94
- <p>Aquí hay algunos consejos y advertencias para descargar música de YouTube a una unidad USB:</p>
95
- <ul>
96
- <li>Asegúrese de que tiene suficiente espacio de almacenamiento en su unidad USB o disco duro externo para descargar música de YouTube. Puede comprobar la capacidad de almacenamiento y el uso de su dispositivo yendo a su configuración o propiedades. </li>
97
-
98
- <li>Asegúrate de respetar los derechos de propiedad intelectual y la privacidad de los creadores y propietarios de la música que descargues de YouTube. Solo debes descargar música gratuita, legal y autorizada para uso personal. No debe descargar música con derechos de autor, protegida o restringida por ley. </li>
99
- <li>Asegúrese de proteger su computadora y dispositivo de malware, virus o anuncios que puedan provenir de YouTube a convertidores de MP3 u otras fuentes. Solo debe usar herramientas o servicios seguros y confiables que tengan buenas críticas y calificaciones. También debe usar software antivirus y programas de firewall para analizar y bloquear cualquier amenaza potencial. </li>
100
- </ul>
101
- <p>Esperamos que este artículo te haya ayudado a aprender a descargar música de YouTube a una unidad USB. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. </p>
102
- <h2>Preguntas frecuentes</h2>
103
- <h3>Q1. ¿Es legal descargar música de YouTube? </h3>
104
- <p>A1. Depende de la fuente y el contenido de la música que descargues de YouTube. En términos generales, es legal descargar música de YouTube si es gratuita, legal y autorizada para uso personal. Sin embargo, es ilegal descargar música de YouTube si está protegida por derechos de autor, o restringida por la ley. Siempre debe respetar los derechos de propiedad intelectual y la privacidad de los creadores y propietarios de la música que descarga de YouTube. También debe comprobar los términos y condiciones del sitio web de YouTube y la fuente de música antes de descargar cualquier música de YouTube.</p>
105
- <h3>Q2. ¿Cuánto espacio de almacenamiento necesito para descargar música de YouTube? </h3>
106
-
107
- <h3>Q3. ¿Cómo puedo reproducir música desde una unidad USB en el estéreo de mi coche? </h3>
108
- <p>A3. Depende del tipo y modelo de su equipo de música y su unidad USB. En términos generales, puede reproducir música desde una unidad USB en el estéreo de su automóvil si el estéreo de su automóvil tiene un puerto USB o una ranura que puede leer y reproducir archivos de música desde su unidad USB. También puede utilizar un adaptador o cable USB que puede conectar su unidad USB a la entrada o salida auxiliar del estéreo del automóvil. Sin embargo, algunos estéreos de automóviles pueden no admitir algunos formatos o cualidades de archivos de música desde su unidad USB. Usted debe comprobar la compatibilidad y las especificaciones de su coche estéreo y su unidad USB antes de reproducir música desde una unidad USB en su coche estéreo. </p>
109
- <h3>Q4. ¿Cómo puedo editar o recortar los archivos de música descargados? </h3>
110
- <p>A4. Puede editar o recortar los archivos de música descargados utilizando un software de edición de audio o una herramienta en su computadora o dispositivo. Hay muchos programas de edición de audio o herramientas disponibles en línea, pero algunos de ellos pueden requerir instalación, registro o pago. Algunos de ellos también pueden tener características limitadas, salida de baja calidad o interfaz compleja. Por lo tanto, necesita encontrar un software o herramienta de edición de audio adecuado y confiable que pueda satisfacer sus necesidades y expectativas. </p>
111
- <p>Algunos de los factores que debes considerar al elegir un software o herramienta de edición de audio son:</p>
112
- <ul>
113
- <li>La funcionalidad y flexibilidad del software o herramienta de edición de audio</li>
114
- <li>La calidad y el formato del archivo de salida</li>
115
- <li>Compatibilidad y seguridad del software o herramienta</li>
116
- <li>Disponibilidad y accesibilidad del servicio</li>
117
- <li>Facilidad de uso y simplicidad de la interfaz</li>
118
- <li>La legalidad y legitimidad del servicio</li>
119
- </ul>
120
- <p>Algunos de los ejemplos de software o herramientas de edición de audio populares y confiables son:</p>
121
- <tabla>
122
- <tr><th>Nombre</th><th>Sitio web</th><th>Características</th></tr>
123
-
124
- <tr><td>WavePad</td><td>[https://www.nch.com.au/wavepad/index.html]</td><td>- Gratis para uso no comercial<br>- Soporta múltiples formatos y plataformas<br>- Ofrece varias funciones de edición y efectos<br>- Permite el procesamiento por lotes y la conversión<br>- Tiene un profesional y fácilinterfaz de uso</td></tr>
125
- <tr><td>Online Audio Cutter</td><td>[https://online-audio-cutter.com/]</td><td>- Herramienta gratuita y en línea<br>- Soporta múltiples formatos y plataformas<br>- Ofrece funciones básicas de edición y efectos<br>- Permite recortar, recortar, desvanecer y fusionar archivos de audiobr<>-br> Tiene una interfaz sencilla y fácil de usar</td></tr>
126
- </tabla>
127
- <h3>Q5. ¿Cómo puedo hacer copias de seguridad o restaurar mis archivos de música descargados? </h3>
128
- <p>A5. Puede realizar copias de seguridad o restaurar los archivos de música descargados utilizando un servicio de almacenamiento en la nube o una herramienta en su computadora o dispositivo. Un servicio o herramienta de almacenamiento en la nube es un servicio o herramienta en línea que le permite almacenar, sincronizar, compartir y acceder a sus datos en diferentes dispositivos a través de Internet. Mediante el uso de un servicio o herramienta de almacenamiento en la nube, puede realizar copias de seguridad de sus archivos de música descargados en una ubicación segura en línea a la que puede acceder en cualquier momento y en cualquier lugar. También puede restaurar sus archivos de música descargados desde el servicio de almacenamiento en la nube o la herramienta si pierde o daña su unidad USB o disco duro externo. </p>
129
- <p>Hay muchos servicios o herramientas de almacenamiento en la nube disponibles en línea, pero algunos de ellos pueden requerir instalación, registro o pago. Algunos de ellos también pueden tener espacio de almacenamiento limitado, baja velocidad de transferencia o una seguridad deficiente. Por lo tanto, necesita encontrar un servicio de almacenamiento en la nube adecuado y confiable o una herramienta que pueda satisfacer sus necesidades y expectativas. </p>
130
- <p>Algunos de los factores que debe considerar al elegir un servicio o herramienta de almacenamiento en la nube son:</p>
131
- <ul>
132
- <li>El espacio de almacenamiento y la velocidad del servicio de almacenamiento en la nube o herramienta</li>
133
- <li>La compatibilidad y la seguridad del servicio o herramienta de almacenamiento en la nube</li>
134
- <li>Disponibilidad y accesibilidad del servicio o herramienta</li>
135
-
136
- <li>La legalidad y legitimidad del servicio o herramienta</li>
137
- </ul>
138
- <p>Algunos de los ejemplos de servicios o herramientas de almacenamiento en la nube populares y confiables son:</p>
139
- <tabla>
140
- <tr><th>Nombre</th><th>Sitio web</th><th>Características</th></tr>
141
- <tr><td>Google Drive</td><td>[https://www.google.com/drive/]</td><td>- Gratis hasta 15 GB de espacio de almacenamiento<br>- Soporta múltiples formatos y plataformas<br>- Ofrece diversas opciones de sincronización, uso compartido y acceso<br>- Se integra con otros productos y servicios de Google br<br>-> Tiene una interfaz simple e intuitiva</td></tr>
142
- <tr><td>Dropbox</td><td>[https://www.dropbox.com/]</td><td>- Gratis por hasta 2 GB de espacio de almacenamiento<br>- Soporta múltiples formatos y plataformas<br>- Ofrece varias opciones de sincronización, uso compartido y acceso<br>- Se integra con otras aplicaciones y servicios<br>- Tiene una sincronización profesional y fácilinterfaz de uso</td></tr>
143
- <tr><td>pCloud</td><td>[https://www.pcloud.com/]</td><td>- Gratis por hasta 10 GB de espacio de almacenamiento<br>- Soporta múltiples formatos y plataformas<br>- Ofrece varias opciones de sincronización, uso compartido y acceso<br>- Proporciona cifrado de alto nivel y seguridad<br>- Tiene un elegante y usuariointerfaz amigable</td></tr>
144
- </tabla>
145
- <p>Esperamos que este artículo te haya ayudado a aprender a descargar música de YouTube a una unidad USB. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. </p> 64aa2da5cf<br />
146
- <br />
147
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Agente Zabbix Para Windows Server 2019.md DELETED
@@ -1,93 +0,0 @@
1
-
2
- <br> - ¿Cuáles son los modos de comunicación entre el agente de Zabbix y el servidor/ proxy <br> - ¿Cuáles son los beneficios de usar el agente de Zabbix para el monitoreo de Windows | | Descarga de Zabbix Agent | - Cómo descargar el pre-brcompilado binarios agente Zabbix o instalador MSI desde el sitio web oficial de Zabbix <br> - Cómo elegir la versión y la arquitectura correcta para su sistema | | Instalación de agente Zabbix | - Cómo instalar agente Zabbix como un servicio de Windows utilizando la línea de comandos o instalador MSI <br> - Cómo verificar que el servicio está instalado y ejecutándose | | Configuración del agente Zabbix | - Cómo editar el archivo de configuración del agente Zabbix <br> - Cómo establecer el nombre de host, la dirección del servidor y otros parámetros <br> - Cómo configurar el agente Zabbix para el modo pasivo o activo | | Adición de Windows Host a la interfaz web de Zabbix | - Cómo crear un host en la interfaz web de Zabbix y asignarlo a un grupo <br> - Cómo especificar la interfaz del agente y la dirección IP o el nombre DNS del host de Windows <br> - Cómo vincular una plantilla para el monitoreo de Windows por el agente Zabbix o agente Zabbix activo | | Conclusión | - Un resumen de lo que hemos aprendido en este artículo <br> - Una llamada a la acción para más aprendizaje y exploración | ## Artículo con formato HTML <p><strong>Cómo descargar e instalar Zabbix Agent para Windows Server 2019</strong></p>
3
- <p>Zabbix es una potente solución de monitoreo de código abierto que puede monitorear varios aspectos de su infraestructura de TI, como servidores, redes, aplicaciones, bases de datos, servicios de nube y más. Zabbix puede recopilar y visualizar métricas de diferentes fuentes, como SNMP, WMI, comprobaciones sin agentes o agente Zabbix. </p>
4
- <h2>descargar agente zabbix para Windows Server 2019</h2><br /><p><b><b>Download</b> &#10037;&#10037;&#10037; <a href="https://bltlly.com/2v6LZk">https://bltlly.com/2v6LZk</a></b></p><br /><br />
5
-
6
- <p>En este artículo, le mostraremos cómo descargar e instalar el agente Zabbix para Windows Server 2019, y cómo configurarlo para el modo pasivo o activo. También explicaremos los beneficios de usar el agente Zabbix para el monitoreo de Windows y cómo agregar su host de Windows a la interfaz web de Zabbix. </p>
7
- <h2>Descargar agente Zabbix</h2>
8
- <p>Para descargar el agente Zabbix para Windows Server 2019, tiene dos opciones: puede descargar los binarios precompilados en formato ZIP o usar el paquete de instalación MSI. Ambas opciones están disponibles en el sitio web oficial de Zabbix en <a href="( 1 )">https://www.zabbix.com/download_agents</a>. </p>
9
- <p>Los binarios precompilados son adecuados para la instalación y configuración manual utilizando la línea de comandos. Puede elegir entre dos generaciones de agentes Zabbix: agente Zabbix 1 (legado) o agente Zabbix 2 (nuevo). También necesita seleccionar la arquitectura apropiada para su sistema: 32 bits (x86) o 64 bits (x64). </p>
10
- <p>El paquete de instalación de MSI es adecuado para la instalación y configuración automatizada utilizando una interfaz gráfica de usuario. Solo necesita seleccionar la arquitectura de su sistema: 32 bits (x86) o 64 bits (x64). El instalador de MSI instalará el agente Zabbix 2 por defecto. </p>
11
- <h2>Instalación del agente Zabbix</h2>
12
- <p>Para instalar el agente Zabbix como un servicio de Windows en su máquina de Windows Server 2019, puede usar la línea de comandos o el instalador MSI. </p> <p>Si utiliza la línea de comandos, debe descomprimir el archivo ZIP descargado en una carpeta de su elección, como <code>C: zabbix</code>. Luego, abra un símbolo del sistema como administrador y vaya a esa carpeta. Para instalar el agente Zabbix como servicio, ejecute el siguiente comando:</p>
13
- <pre><code>zabbix_agentd.exe --config zabbix_agentd.win.conf --install </code></pre>
14
- <p>Para verificar que el servicio está instalado y ejecutándose, ejecute el siguiente comando:</p>
15
- <pre><code>sc query zabbix_agentd </code></pre>
16
-
17
- <p>Si utiliza el instalador MSI, debe ejecutar el archivo MSI descargado y seguir el asistente de instalación. Se le pedirá que acepte el acuerdo de licencia, elija la carpeta de instalación y configure algunos parámetros, como el nombre de host, la dirección del servidor y el modo de agente. También puede optar por iniciar el servicio automáticamente después de la instalación. Una vez finalizada la instalación, puede verificar que el servicio está instalado y ejecutándose comprobando el administrador de servicios de Windows o utilizando el comando <code>sc query</code> como se describe anteriormente. </p>
18
- <p></p>
19
- <h2>Configuración del agente Zabbix</h2>
20
- <p>Para configurar el agente Zabbix para Windows Server 2019, debe editar el archivo de configuración del agente Zabbix. El archivo de configuración se encuentra en la misma carpeta donde instaló el agente Zabbix, como <code>C: zabbix zabbix_agentd.win.conf</code>. Puede usar cualquier editor de texto para abrir y editar el archivo. </p>
21
- <p>El archivo de configuración contiene muchos parámetros que controlan el comportamiento y la funcionalidad del agente Zabbix. Algunos de los parámetros más importantes son:</p>
22
- <ul>
23
- <li><code>Hostname</code>: El nombre del host monitoreado tal como aparece en la interfaz web de Zabbix. Debe coincidir exactamente con el nombre de host que creará en la interfaz web de Zabbix más tarde. </li>
24
- <li><code>Servidor</code>: La dirección IP o el nombre DNS del servidor o proxy Zabbix que solicitará datos del agente Zabbix en modo pasivo. Puede especificar varios servidores o proxies separados por comas. </li>
25
- <li><code>ServerActive</code>: La dirección IP o el nombre DNS del servidor o proxy Zabbix que recibirá datos del agente Zabbix en modo activo. Puede especificar varios servidores o proxies separados por comas. </li>
26
- <li><code>StartAgents</code>: El número de conexiones concurrentes que el agente Zabbix puede aceptar desde el servidor Zabbix o el proxy en modo pasivo. El valor predeterminado es 3.</li>
27
-
28
- <li><code>Timeout</code>: El tiempo de espera en segundos para procesar cada elemento por el agente de Zabbix. El valor predeterminado es 3.</li>
29
- <li><code>EnableRemoteCommands</code>: Un indicador que habilita o inhabilita la ejecución de comandos remotos desde el servidor Zabbix o el proxy en el agente Zabbix. El valor predeterminado es 0 (desactivado). </li>
30
- <li><code>LogType</code>: El tipo de archivo de registro que usará el agente Zabbix. Los valores posibles son <code>file</code>, <code>system</code>, <code>console</code>, o <code>none</code>. El valor predeterminado es <code>file</code>. </li>
31
- <li><code>LogFile</code>: El nombre y la ruta del archivo de registro que el agente de Zabbix usará si <code>LogType</code> se establece en <code>file</code>. El valor predeterminado es <code>C: zabbix zabbix_agentd.log</code>. </li>
32
- <li><code>DebugLevel</code>: El nivel de detalle que el agente de Zabbix escribirá en el archivo de registro. Los valores posibles son 0 (basic), 1 (critical), 2 (error), 3 (warning), 4 (debug), o 5 (trace). El valor predeterminado es 3.</li>
33
- </ul>
34
- <p>Para configurar el agente Zabbix para el modo pasivo, debe establecer el parámetro <code>Server</code> en la dirección IP o el nombre DNS de su servidor Zabbix o proxy, y dejar el parámetro <code>ServerActive</code> vacío o comentado. Por ejemplo: resultado/p>
35
- <pre><code># Servidor de modo pasivo=192.168.1.10 #ServerActive= StartAgents=3 # Modo activo #Server= #ServerActive=192.168.1.10:10051 #RefreshActiveChecks=120 </pre></p>
36
- <p>Para configurar el agente Zabbix para el modo activo, debe establecer el parámetro <code>ServerActive</code> en la dirección IP o el nombre DNS de su servidor Zabbix o proxy , y dejar el parámetro <code>Server</code> vacío o comentado. También debe establecer el parámetro <code>RefreshActiveChecks</code> en la frecuencia deseada de envío de datos al servidor o proxy. Por ejemplo:</p>
37
- <pre><code># Modo pasivo #Server=192.168.1.10 #StartAgents=3 # Active mode ServerActive=192.168.1.10:10051 RefreshActiveChecks=120 </pre></p>
38
-
39
- <pre><code>sc stop zabbix_agentd sc start zabbix_agentd </code></pre>
40
- <h2>Agregar host de Windows a la interfaz web de Zabbix</h2>
41
- <p>Para agregar su host de Windows Server 2019 a la interfaz web de Zabbix, debe iniciar sesión en su servidor Zabbix o interfaz web proxy y navegar a <strong>Configuration</strong> > <strong>Hosts</strong>. Luego, haga clic en el botón <strong>Crear host</strong> en la esquina superior derecha. </p>
42
- <p>Verás un formulario donde necesitas introducir algunos detalles sobre tu host, como:</p>
43
- <ul>
44
- <li><strong>Nombre del host</strong>: El nombre de host de su host de Windows tal como aparece en el archivo de configuración del agente Zabbix. Debe coincidir exactamente con el parámetro <code>Hostname</code> que estableció en el archivo de configuración. </li>
45
- <li><strong>Nombre visible</strong>: Un alias opcional para su host que se mostrará en la interfaz web de Zabbix en lugar del nombre del host. </li>
46
- <li><strong>Grupos</strong>: Los grupos a los que pertenece su host. Puede seleccionar uno o más grupos existentes o crear uno nuevo. Por ejemplo, puede seleccionar o crear un grupo llamado <code>Servidores de Windows</code>. </li>
47
- <li><strong>Descripción</strong>: Una descripción opcional de su anfitrión que proporcionará información adicional sobre su propósito, ubicación, propietario, etc.</li>
48
- </ul>
49
- <p>Después de introducir estos detalles, haga clic en el botón <strong>Add</strong> en la parte inferior del formulario. </p>
50
- <p>El siguiente paso es especificar la interfaz del agente y la dirección IP o el nombre DNS de su host de Windows. Para ello, haga clic en la pestaña <strong>Interfaces</strong> y seleccione <strong>Zabbix agent</strong> en el menú desplegable. Luego, ingrese la dirección IP o el nombre DNS de su host de Windows en el campo <strong>IP address/DNS name</strong>. También puede cambiar el número de puerto predeterminado si es necesario. </p>
51
- <p>Si configuró el agente Zabbix para el modo activo, también debe seleccionar el agente <strong>Zabbix (active)</strong> en el menú desplegable e ingresar la misma dirección IP o nombre DNS que antes. </p>
52
-
53
- <p>Una plantilla es una colección de elementos, disparadores, gráficos y otros elementos que definen qué y cómo monitorear un host. Al vincular una plantilla a tu host, heredas todos estos elementos y ahorras tiempo y esfuerzo. </p>
54
- <p>También puede vincular otras plantillas que proporcionan funciones de monitoreo adicionales para su host de Windows, como CPU, memoria, disco, red, servicios, procesos, etc.</p>
55
- <p>Después de vincular las plantillas, haga clic en el botón <strong>Add</strong> en la parte inferior del formulario. </p>
56
- <h2>Conclusión</h2>
57
- <p>En este artículo, hemos aprendido cómo descargar e instalar el agente Zabbix para Windows Server 2019, y cómo configurarlo para el modo pasivo o activo. También hemos aprendido cómo agregar nuestro host de Windows a la interfaz web de Zabbix y vincular una plantilla para el monitoreo de Windows por el agente de Zabbix o agente de Zabbix activo. </p>
58
- <p>Zabbix agent es una herramienta útil que nos permite monitorear varios aspectos de nuestra máquina Windows Server 2019, como el rendimiento, el estado y la configuración. Al usar el agente Zabbix, podemos recopilar y visualizar métricas de nuestro host de Windows y recibir alertas cuando algo sale mal. </p>
59
- <p>Si desea obtener más información sobre Zabbix y el agente Zabbix, puede visitar el sitio web oficial de Zabbix en <a href="">https://www.zabbix.com/</a>, donde puede encontrar documentación, tutoriales, foros, blogs y otros recursos. </p>
60
- <h3>Preguntas frecuentes</h3>
61
- <ol>
62
- <li><strong>¿Cuáles son los requisitos para ejecutar el agente Zabbix en Windows Server 2019? </strong></li>
63
- <li>Los requisitos para ejecutar el agente Zabbix en Windows Server 2019 son mínimos. Necesita tener una máquina Windows Server 2019 con al menos 128 MB de RAM y 100 MB de espacio libre en disco. También necesita tener privilegios de administrador para instalar y configurar el agente Zabbix como un servicio. </li>
64
- <li><strong>¿Cómo puedo probar si el agente Zabbix funciona correctamente en mi host de Windows? </strong></li>
65
-
66
- <pre><code>zabbix_get -s <IP address or DNS name of your Windows host> -k "system.cpu.load[all,avg1]" </code></pre>
67
- <li>Este comando solicitará la carga media de la CPU durante el último minuto desde su host de Windows. Debería ver un valor numérico como respuesta. Si ve un mensaje de error, como <code>ZBX_NOTSUPPORTED</code> o <code>ZBX_TCP_READ()</code>, significa que hay un problema con la comunicación entre el agente Zabbix y el servidor o proxy, o con la configuración del agente Zabbix. </li>
68
- <li>Otra forma de probar si el agente Zabbix funciona correctamente en su host de Windows es usar la utilidad <code>zabbix_sender</code> que también viene con el agente Zabbix. Esta utilidad le permite enviar datos al servidor Zabbix o proxy en modo activo. Por ejemplo, puede ejecutar el siguiente comando desde su host de Windows:</li>
69
- <pre><code>zabbix_sender -z <IP address or DNS name of your Zabbix server or proxy> -s <hostname of your Windows host> -k "test.key" -o "test.value" </code></pre>
70
- <li>Este comando enviará un elemento personalizado con la clave <code>test.key</code> y el valor <code>test.value</code> desde su host de Windows al servidor o proxy de Zabbix. Deberías ver un mensaje como <code>sent: 1; omitido: 0; total: 1</code> como respuesta. Si ve un mensaje de error, como <code>ZBX_TCP_WRITE()</code> o <code>ZBX_TCP_READ()</code>, significa que hay un problema con la comunicación entre el agente Zabbix y el servidor o proxy, o con la configuración del agente Zabbix. </li>
71
-
72
- <li>Si ve algún problema con el estado o los datos de su host, como <code>ZBX_NOTSUPPORTED</code>, <code>No se recibieron datos</code>, o <code>No hay permisos para el objeto referido o no existe! </code>, significa que hay un problema con la comunicación entre el agente Zabbix y el servidor o proxy, o con la configuración del agente Zabbix. </li>
73
- <li><strong>¿Cómo puedo actualizar el agente Zabbix en mi host de Windows? </strong></li>
74
- <li>Para actualizar el agente de Zabbix en su host de Windows, debe descargar la última versión del agente de Zabbix desde el sitio web oficial de Zabbix en <a href="">https:/www.zabbix.com/download_agents</a>. Luego, debe detener el servicio del agente Zabbix, reemplazar los archivos antiguos con los nuevos e iniciar el servicio nuevamente. Puede usar los siguientes comandos para hacer esto:</li>
75
- <pre><code>sc stop zabbix_agentd xcopy /y C: zabbix *. exe C: zabbix *. dll C: zabbix *. conf C: zabbix *. bat C: zabbix *. ps1 C: zabbix *. vbs sc start zabbix_agentd </code></pre>
76
- <li>También puede necesitar editar el archivo de configuración del agente Zabbix si hay cambios o nuevos parámetros en la nueva versión. </li>
77
- <li><strong>¿Cómo puedo desinstalar el agente Zabbix de mi host de Windows? </strong></li>
78
- <li>Para desinstalar el agente Zabbix de su host de Windows, debe detener el servicio del agente Zabbix, eliminar el servicio del agente Zabbix y eliminar la carpeta del agente Zabbix. Puede usar los siguientes comandos para hacer esto:</li>
79
- <pre><code>sc stop zabbix_agentd zabbix_agentd.exe --config zabbix_agentd.win.conf --uninstall rmdir /s /q C: zabbix </code></pre>
80
- <li>Si utilizó el instalador MSI para instalar el agente Zabbix, también puede usar el panel de control de Windows o el instalador MSI para desinstalar el agente Zabbix. </li>
81
- <li><strong>¿Cómo puedo personalizar el agente Zabbix para mis necesidades específicas? </strong></li>
82
-
83
- <ul>
84
- <li><strong>Parámetros del usuario</strong>: Los parámetros del usuario le permiten definir elementos personalizados que el agente Zabbix puede monitorear. Puede usar cualquier script o comando que devuelva un valor como parámetro de usuario. Por ejemplo, puede crear un parámetro de usuario que devuelva el número de archivos de una carpeta, el estado de un servicio o la salida de un comando de PowerShell. Necesita definir los parámetros de usuario en el archivo de configuración del agente Zabbix usando la directiva <code>UserParameter</code>. Por ejemplo:</li>
85
- <pre><code>UserParameter=folder.count,powershell -NoProfile -Command "(Get-ChildItem C: temp -Recurse | Measure-Object). Count" UserParameter=service.status[*],sc query $1 | find "STATE" | find "RUNNING" UserParameter=powershell.output[*],powershell -NoProfile -Command "$1" </code></pre>
86
- <li>Entonces, necesitas crear elementos en la interfaz web de Zabbix que usen estos parámetros de usuario como claves. Por ejemplo:</li>
87
- <pre><code>folder.count service.status[Spooler] powershell.output[Get-Date] </code></pre>
88
- <li><strong>Comprobaciones activas</strong>: Las comprobaciones activas permiten enviar datos desde el agente Zabbix al servidor Zabbix o al proxy sin esperar solicitudes. Esto reduce la carga de red y mejora la escalabilidad de Zabbix. Puede usar comprobaciones activas para cualquier elemento que sea compatible con el agente Zabbix, como métricas del sistema, archivos de registro, contadores de rendimiento de Windows, etc. Debe configurar el agente Zabbix para el modo activo estableciendo los parámetros <code>ServerActive</code> y <code>RefreshActiveChecks</code> en el archivo de configuración. Luego, debe vincular una plantilla para el agente Zabbix activo en la interfaz web de Zabbix. </li>
89
-
90
- <li><strong>Cifrado</strong>: El cifrado le permite asegurar la comunicación entre el agente de Zabbix y el servidor o proxy utilizando certificados TLS. Esto puede evitar el acceso no autorizado y la manipulación de datos. Necesita generar e instalar certificados TLS en su host de Windows y en su servidor o proxy de Zabbix. Luego, debe configurar el agente y el servidor Zabbix o el proxy para usar el cifrado estableciendo el valor <code>TLSConnect</code>, <code>TLSAccept</code>, <code>TLScaFile</code>, <code>TLScaPath</code>, <code>TLScertFile</code>, y code <T>TLSkeyle</code. </li>
91
- </ul></p> 64aa2da5cf<br />
92
- <br />
93
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Carx Calle Apk.md DELETED
@@ -1,46 +0,0 @@
1
- <br />
2
- <h1>El zombi que camina 2 Mod APK: Un juego de disparos de supervivencia con dinero ilimitado</h1>
3
- <p>Si eres un fan de los juegos de zombis, es posible que hayas oído hablar de The Walking Zombie 2, un popular juego de disparos en primera persona que te permite luchar contra hordas de criaturas no muertas en un mundo post-apocalíptico. ¿Pero sabías que hay una versión modificada de este juego que te da dinero y recursos ilimitados para usar? En este artículo, le diremos todo lo que necesita saber sobre The Walking Zombie 2 Mod APK, incluyendo sus características, beneficios, y cómo descargar e instalar en su dispositivo. </p>
4
- <h2>descargar carx calle apk</h2><br /><p><b><b>DOWNLOAD</b> >>> <a href="https://bltlly.com/2v6K1s">https://bltlly.com/2v6K1s</a></b></p><br /><br />
5
- <h2>Introducción</h2>
6
- <h3>¿Qué es el zombi andante 2?</h3>
7
- <p>The Walking Zombie 2 es un juego de disparos de supervivencia desarrollado por Alda Games, un estudio con sede en la República Checa. El juego se desarrolla en un mundo que ha sido devastado por un virus zombi, donde juegas como uno de los pocos supervivientes que tiene que luchar por tu vida. Encontrarás varios tipos de zombies, como caminantes, corredores, mutantes y jefes, así como otros enemigos como bandidos, asaltantes y soldados. También conocerás a otros sobrevivientes que te ayudarán o te obstaculizarán en tu viaje. </p>
8
- <h3> ¿Qué es el zombie caminando 2 Mod APK? </h3>
9
- <p>El Walking Zombie 2 Mod APK es una versión modificada del juego original que le da acceso a dinero y recursos ilimitados. Esto significa que puede comprar cualquier arma, armadura, munición, kits de salud y otros artículos que necesite sin preocuparse por quedarse sin efectivo. También puede mejorar sus habilidades y habilidades para hacerse más fuerte y más resistente. Con este mod, podrás disfrutar del juego sin limitaciones ni restricciones. </p>
10
- <h3> ¿Por qué deberías jugar El zombie andante 2 Mod APK? </h3>
11
- <p>Hay muchas razones por las que debe jugar The Walking Zombie 2 Mod APK en lugar del juego original. Aquí están algunos de ellos:</p>
12
- <ul>
13
- <li> Usted puede tener más diversión y emoción con dinero y recursos ilimitados. </li>
14
-
15
- <li>Puede explorar más áreas y ubicaciones sin temor a quedarse sin suministros. </li>
16
- <li>Puedes desafiarte a ti mismo con dificultades y enemigos más difíciles sin frustrarte. </li>
17
- <li>Puedes apoyar a los desarrolladores viendo anuncios o haciendo compras en la aplicación si quieres. </li>
18
- </ul>
19
- <h2>Características de The Walking Zombie 2 Mod APK</h2>
20
- <h3>Historia inmersiva y jugabilidad</h3>
21
- <p>El Walking Zombie 2 Mod APK tiene una historia atractiva que te mantendrá enganchado de principio a fin. Experimentarás diferentes eventos y escenarios que afectarán el resultado del juego. También tendrás que tomar decisiones que darán forma a la personalidad y la moralidad de tu personaje. El juego tiene una dinámica de juego que se adaptará a sus acciones y comportamiento. Enfrentarás diferentes retos y consecuencias dependiendo de cómo juegues el juego. </p>
22
- <p></p>
23
- <h3>Impresionantes gráficos y efectos de sonido</h3>
24
- <p>El Walking Zombie 2 Mod APK tiene gráficos increíbles y efectos de sonido que te sumergen en el mundo del juego. El juego tiene un estilo único low-poly que le da una sensación retro. El juego también tiene iluminación realista y sombras que crean una atmósfera oscura y sombría. El juego tiene efectos de sonido de alta calidad que mejoran el estado de ánimo y la tensión del juego. Usted escuchará los gemidos y gritos de los zombies y los disparos y explosiones de las armas. También disfrutarás de la música y la actuación de voz que añaden más profundidad y emoción al juego. </h3>
25
- <h3>Varias armas y habilidades para elegir</h3>
26
-
27
- <h3>Múltiples modos de juego y misiones para completar</h3>
28
- <p>El Walking Zombie 2 Mod APK tiene varios modos de juego y misiones que se puede jugar y completar para ganar recompensas y experiencia. Puedes jugar en el modo historia principal, donde sigues la trama y el progreso a través del juego. También puedes jugar las misiones secundarias, donde ayudas a otros supervivientes o completar diferentes tareas. También puedes jugar al modo arena, donde luchas contra oleadas de zombies en un área cerrada. También puedes jugar en el modo online, donde compites con otros jugadores en PvP o batallas cooperativas. </h3>
29
- <h3>Dinero y recursos ilimitados para usar</h3>
30
- <p>El Walking Zombie 2 Mod APK tiene dinero y recursos ilimitados que se pueden utilizar para comprar y actualizar cualquier cosa que desee en el juego. Usted puede comprar cualquier arma, armadura, munición, kits de salud, y otros artículos que usted necesita de las tiendas o comerciantes. También puede mejorar sus habilidades y habilidades para hacerse más fuerte y más resistente. También puede crear y construir sus propios artículos y equipos a partir de los materiales que recoja o saquee. También puedes intercambiar con otros supervivientes o jugadores para obtener más dinero y recursos. </h3>
31
- <h2>Cómo descargar e instalar El Walking Zombie 2 Mod APK en su dispositivo</h2>
32
- <h3>Paso 1: Descargar el archivo APK de una fuente de confianza</h3>
33
- <p>El primer paso para descargar e instalar el Walking Zombie 2 Mod APK en su dispositivo es encontrar una fuente confiable que proporciona el archivo APK. Puede buscar en línea para sitios web o blogs que ofrecen el archivo APK de forma gratuita. Asegúrese de que la fuente es segura y segura, y que el archivo APK se actualiza y es compatible con su dispositivo. También puede escanear el archivo APK con un programa antivirus antes de descargarlo. </p>
34
- <h3>Paso 2: Habilitar fuentes desconocidas en la configuración del dispositivo</h3>
35
-
36
- <h3>Paso 3: Instalar el archivo APK y lanzar el juego</h3>
37
- <p>El tercer paso para descargar e instalar The Walking Zombie 2 Mod APK en su dispositivo es instalar el archivo APK y lanzar el juego. Para ello, busque el archivo APK que descargó en la carpeta de almacenamiento o descargas de su dispositivo. Toque en el archivo APK para iniciar el proceso de instalación. Siga las instrucciones de la pantalla para completar la instalación. Una vez hecho esto, abra el icono del juego en la pantalla de inicio o en el cajón de la aplicación. Disfruta jugando The Walking Zombie 2 Mod APK con dinero y recursos ilimitados. </p>
38
- <h2>Conclusión</h2>
39
- <h3>Resumen de los puntos principales</h3>
40
- <p>El Walking Zombie 2 Mod APK es un juego de disparos de supervivencia que le permite luchar contra zombies y otros enemigos en un mundo post-apocalíptico. El juego tiene una historia inmersiva y jugabilidad, impresionantes gráficos y efectos de sonido, varias armas y habilidades para elegir, múltiples modos de juego y misiones para completar, y dinero y recursos ilimitados para usar. El juego es divertido y emocionante, desafiante y gratificante, personalizable y flexible. </p>
41
- <h3>Llamada a la acción y recomendación</h3>
42
- <p>Si usted está buscando un juego de zombies que le mantendrá entretenido durante horas, entonces usted debe descargar The Walking Zombie 2 Mod APK en su dispositivo. Usted no se arrepentirá, ya que tendrá una explosión jugando a este juego con dinero y recursos ilimitados. También apoyarás a los desarrolladores viendo anuncios o haciendo compras en la aplicación si quieres. ¿Qué estás esperando? Descargar El Walking Zombie 2 Mod APK ahora y disfrutar de disparar zombies en la cabeza. </p>
43
-
44
- el mundo del juego tanto como sea posible. Encontrará más botín, materiales, secretos y huevos de Pascua que mejorarán su experiencia de juego. - Elige tus armas y habilidades de acuerdo a tu estilo de juego y situación. Diferentes armas y habilidades tienen diferentes ventajas y desventajas. Experimenta con diferentes combinaciones y ve qué funciona mejor para ti. - Ten cuidado con tus elecciones y acciones. Afectarán el resultado del juego y la personalidad y moralidad de tu personaje. Piensa antes de actuar y prepárate para las consecuencias. - Diviértete y disfruta del juego. No te lo tomes demasiado en serio ni te estreses por ello. Después de todo, es solo un juego. </p> 64aa2da5cf<br />
45
- <br />
46
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Coche Escuela De Conduccin 2017 Mod Apk.md DELETED
@@ -1,97 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar coche Driving School 2017 Mod Apk y disfrutar de dinero y vehículos ilimitados</h1>
3
- <p>Si usted está buscando un juego de simulador de conducción realista y divertido que le enseñará cómo conducir diferentes coches en varios entornos, entonces usted debe probar <strong>Car Driving School 2017</strong>. Este juego desafiará sus habilidades de conducción, el conocimiento de las reglas de la carretera, y la delicadeza al volante. Pero si quieres hacer el juego aún más agradable, usted debe descargar <strong>Car Driving School 2017 mod apk</strong>, que le dará dinero ilimitado y vehículos para desbloquear. En este artículo, le diremos lo que es Car Driving School 2017, por qué debe jugar, lo que es Car Driving School 2017 mod apk, cómo descargarlo, y algunos consejos y trucos para dominar el juego. </p>
4
- <h2>¿Qué es la escuela de conducción de coches 2017 y por qué usted debe jugar</h2>
5
- <p>Car Driving School 2017 es un juego de simulación de conducción para dispositivos Android e iOS que fue desarrollado por Ovilex Software. Es la nueva entrega de la popular serie Driving School que tiene más de 100 millones de descargas en todo el mundo. En este juego, usted aprenderá cómo conducir varios coches, autobuses y camiones en diferentes escenarios. También tendrás que pasar diferentes licencias, completar más de 80 niveles y explorar más de 15 mapas detallados. También puedes jugar con tus amigos en nuevos modos multijugador como carreras, paseo gratis y coger la bandera. </p>
6
- <h2>descargar coche escuela de conducción 2017 mod apk</h2><br /><p><b><b>Download</b> &#8230;&#8230;&#8230; <a href="https://bltlly.com/2v6M3h">https://bltlly.com/2v6M3h</a></b></p><br /><br />
7
- <h3>Características de la escuela de conducción de coches 2017</h3>
8
- <p>Algunas de las características sorprendentes de Car Driving School 2017 son:</p>
9
- <ul>
10
- <li>Casi 100 vehículos para desbloquear, que van desde coches deportivos, SUV, sedanes, autobuses, camiones y más. </li>
11
- <li>Más de 15 mapas detallados que incluyen ciudades, caminos rurales, carreteras, desiertos, montañas, etc.</li>
12
- <li> Manejo del automóvil suave y realista que le permite sentir cada golpe, giro y freno. </li>
13
- <li>Diferentes licencias para tomar, como licencias de automóvil, autobús y camión. </li>
14
-
15
- <li>Modo de viaje gratuito que te permite explorar los mapas a tu propio ritmo. </li>
16
- <li>Nuevos modos multijugador que te permiten competir contra otros jugadores, deambular libremente con ellos o capturar sus banderas. </li>
17
- <li>Interiores detallados de vehículos que muestran el salpicadero, el volante, los pedales, etc.</li>
18
- <li>Sistema de daños realista que muestra los efectos de colisiones y accidentes. </li>
19
- <li>Sistema de gas que requiere que llenes tu tanque en las gasolineras. </li>
20
- <li>Transmisión manual con embrague que le permite controlar sus engranajes manualmente. </li>
21
- <li>Dirección basculante, botones y volante táctil que te permiten elegir tu opción de control preferida. </li>
22
- <li>Tablas de clasificación en línea y logros que le permiten comparar su rendimiento con otros jugadores. </li>
23
- <li>Sonidos de motor reales que te hacen sentir como si estuvieras conduciendo un coche real. </li>
24
- <li>Condiciones meteorológicas de próxima generación que añaden realismo y variedad al juego. </li>
25
- </ul>
26
- <h3>Beneficios de jugar Car Driving School 2017</h3>
27
- <p>Jugar Car Driving School 2017 no solo es divertido sino también beneficioso por varias razones:</p>
28
- <ul>
29
- <li> Puede aprender a conducir una transmisión manual con embrague y palanca de cambios o mantener la caja de cambios automática clásica. </li>
30
- <li> Con este simulador de conducción intuitivo puedes conocer mejor las reglas de circulación. </li>
31
- <li> Puede mejorar sus habilidades de conducción en diferentes situaciones y entornos. </li>
32
- <li> Puede disfrutar de una experiencia de conducción realista e inmersiva con impresionantes gráficos y efectos de sonido. </li>
33
- <li>Puedes divertirte con tus amigos en modos multijugador o competir con otros jugadores en línea. </li>
34
- <li>Puede personalizar sus vehículos con diferentes colores, llantas, alerones, etc.</li>
35
- </ul>
36
- <h2>¿Qué es Car Driving School 2017 Mod Apk y cómo descargarlo</h2>
37
-
38
- <h3>Ventajas de la escuela de conducción de coches 2017 Mod Apk</h3>
39
- <p>Algunas de las ventajas de Car Driving School 2017 mod apk son:</p>
40
- <ul>
41
- <li>Puedes desbloquear todos los vehículos del juego, incluidos los premium que cuestan dinero real. </li>
42
- <li> Usted puede comprar cualquier actualización y personalizaciones para sus vehículos sin preocuparse por el costo. </li>
43
- <li>Puedes explorar todos los mapas y modos del juego sin tener que desbloquearlos primero. </li>
44
- <li>Puedes jugar el juego sin anuncios ni interrupciones. </li>
45
- <li> Puedes disfrutar del juego con mejor rendimiento y estabilidad. </li>
46
- </ul>
47
- <h3>Pasos para descargar e instalar coche Driving School 2017 Mod Apk</h3>
48
- <p>Si desea descargar e instalar Car Driving School 2017 mod apk, es necesario seguir estos sencillos pasos:</p>
49
- <ol>
50
- <li>En primer lugar, es necesario desinstalar la versión original de Car Driving School 2017 desde su dispositivo si lo tiene instalado. </li>
51
- <li>En segundo lugar, es necesario habilitar la instalación de aplicaciones de fuentes desconocidas en el dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. </li>
52
- <li>En tercer lugar, es necesario descargar el coche Driving School 2017 mod apk archivo de una fuente confiable. Puede utilizar este enlace para descargarlo de forma segura y rápida. </li>
53
- <li>Cuarto, es necesario localizar el archivo descargado en el dispositivo y toque en él para iniciar el proceso de instalación. </li>
54
- <li>Quinto, debe seguir las instrucciones en la pantalla y esperar a que termine la instalación. </li>
55
- <li>Sexto, es necesario lanzar el juego y disfrutar de dinero y vehículos ilimitados. </li>
56
- </ol>
57
- <h2>Consejos y trucos para dominar la escuela de conducción de automóviles 2017</h2>
58
- <p>Car Driving School 2017 es un juego divertido y desafiante que requiere habilidad y estrategia. Aquí hay algunos consejos y trucos que te ayudarán a dominar el juego:</p>
59
- <h3>Sigue la ley y el límite de velocidad</h3>
60
-
61
- <h3>Desactivar el modo deportivo y aprender el mapa</h3>
62
- <p>Si desea mejorar sus habilidades de conducción y pasar los niveles más fácilmente, debe desactivar el modo deportivo en la configuración. El modo deportivo hace que tu coche sea más rápido y sensible, pero también más difícil de controlar. También consume más gasolina y causa más daños. Por lo tanto, es mejor apagarlo y conducir más suave y cuidadosamente. También debe aprender el mapa de cada nivel antes de iniciarlo. De esta manera, sabrás a dónde ir, qué esperar y cómo evitar obstáculos. </p>
63
- <h3>Diviértete en modo libre y modos multijugador</h3>
64
- <p>Si quieres tomarte un descanso de los niveles y licencias, puedes divertirte en modo libre o en modo multijugador. En modo libre, puede conducir alrededor de cualquier mapa sin ningún objetivo o restricciones. También puede cambiar entre diferentes vehículos y personalizarlos a su gusto. En los modos multijugador, puedes jugar con tus amigos u otros jugadores en línea en varios modos como carreras, paseos gratis o coger la bandera. También puedes chatear con ellos y hacer nuevos amigos. </p>
65
- <p></p>
66
- <h2>Conclusión</h2>
67
- <p>Car Driving School 2017 es un gran juego de simulación de conducción que le enseñará cómo conducir diferentes vehículos en escenarios realistas. También se divertirá con varias características, modos, mapas y vehículos. Pero si quieres hacer el juego aún más agradable, usted debe descargar Car Driving School 2017 mod apk que le dará dinero ilimitado y vehículos. También puedes utilizar algunos consejos y trucos que te ayudarán a dominar el juego. ¿Qué estás esperando? Descargar Car Driving School 2017 mod apk y disfrutar de dinero ilimitado y vehículos. </p>
68
- <h2>Preguntas frecuentes</h2>
69
- <p>Aquí hay algunas preguntas frecuentes sobre Car Driving School 2017 y Car Driving School 2017 mod apk:</p>
70
- <tabla>
71
- <tr>
72
- <th>Pregunta</th>
73
- <th>Respuesta</th>
74
- </tr>
75
- <tr>
76
- <td>Es Car Driving School 2017 libre para jugar? </td>
77
-
78
- </tr>
79
- <tr>
80
- <td>Es coche Driving School 2017 mod apk seguro de usar? </td>
81
- <td>Sí, Car Driving School 2017 mod apk es seguro de usar siempre y cuando se descarga de una fuente confiable. Sin embargo, siempre debes tener cuidado al instalar aplicaciones de fuentes desconocidas y escanearlas en busca de virus o malware. </td>
82
- </tr>
83
- <tr>
84
- <td>¿Puedo jugar Car Driving School 2017 sin conexión? </td>
85
- <td>Sí, puedes jugar Car Driving School 2017 sin conexión en modo para un jugador. Sin embargo, necesitará una conexión a Internet para jugar modos multijugador o acceder a funciones en línea. </td>
86
- </tr>
87
- <tr>
88
- <td>¿Puedo jugar Car Driving School 2017 en PC? </td>
89
- <td>Sí, puedes jugar Car Driving School 2017 en PC usando un emulador de Android como Bluestacks o Nox Player. También puedes usar un teclado y un ratón para controlar el juego. </td>
90
- </tr>
91
- <tr>
92
- <td>¿Cómo puedo contactar a los desarrolladores de Car Driving School 2017? </td>
93
- <td>Puede ponerse en contacto con los desarrolladores de Car Driving School 2017 enviándoles un correo electrónico a [email protected] o visitando su sitio web en https://www.ovilex.com/.</td>
94
- </tr>
95
- </tabla></p> 64aa2da5cf<br />
96
- <br />
97
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/_asyncio.py DELETED
@@ -1,94 +0,0 @@
1
- # Copyright 2016 Étienne Bersac
2
- # Copyright 2016 Julien Danjou
3
- # Copyright 2016 Joshua Harlow
4
- # Copyright 2013-2014 Ray Holder
5
- #
6
- # Licensed under the Apache License, Version 2.0 (the "License");
7
- # you may not use this file except in compliance with the License.
8
- # You may obtain a copy of the License at
9
- #
10
- # http://www.apache.org/licenses/LICENSE-2.0
11
- #
12
- # Unless required by applicable law or agreed to in writing, software
13
- # distributed under the License is distributed on an "AS IS" BASIS,
14
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
- # See the License for the specific language governing permissions and
16
- # limitations under the License.
17
-
18
- import functools
19
- import sys
20
- import typing as t
21
- from asyncio import sleep
22
-
23
- from pip._vendor.tenacity import AttemptManager
24
- from pip._vendor.tenacity import BaseRetrying
25
- from pip._vendor.tenacity import DoAttempt
26
- from pip._vendor.tenacity import DoSleep
27
- from pip._vendor.tenacity import RetryCallState
28
-
29
- WrappedFnReturnT = t.TypeVar("WrappedFnReturnT")
30
- WrappedFn = t.TypeVar("WrappedFn", bound=t.Callable[..., t.Awaitable[t.Any]])
31
-
32
-
33
- class AsyncRetrying(BaseRetrying):
34
- sleep: t.Callable[[float], t.Awaitable[t.Any]]
35
-
36
- def __init__(self, sleep: t.Callable[[float], t.Awaitable[t.Any]] = sleep, **kwargs: t.Any) -> None:
37
- super().__init__(**kwargs)
38
- self.sleep = sleep
39
-
40
- async def __call__( # type: ignore[override]
41
- self, fn: WrappedFn, *args: t.Any, **kwargs: t.Any
42
- ) -> WrappedFnReturnT:
43
- self.begin()
44
-
45
- retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
46
- while True:
47
- do = self.iter(retry_state=retry_state)
48
- if isinstance(do, DoAttempt):
49
- try:
50
- result = await fn(*args, **kwargs)
51
- except BaseException: # noqa: B902
52
- retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type]
53
- else:
54
- retry_state.set_result(result)
55
- elif isinstance(do, DoSleep):
56
- retry_state.prepare_for_next_attempt()
57
- await self.sleep(do)
58
- else:
59
- return do # type: ignore[no-any-return]
60
-
61
- def __iter__(self) -> t.Generator[AttemptManager, None, None]:
62
- raise TypeError("AsyncRetrying object is not iterable")
63
-
64
- def __aiter__(self) -> "AsyncRetrying":
65
- self.begin()
66
- self._retry_state = RetryCallState(self, fn=None, args=(), kwargs={})
67
- return self
68
-
69
- async def __anext__(self) -> AttemptManager:
70
- while True:
71
- do = self.iter(retry_state=self._retry_state)
72
- if do is None:
73
- raise StopAsyncIteration
74
- elif isinstance(do, DoAttempt):
75
- return AttemptManager(retry_state=self._retry_state)
76
- elif isinstance(do, DoSleep):
77
- self._retry_state.prepare_for_next_attempt()
78
- await self.sleep(do)
79
- else:
80
- raise StopAsyncIteration
81
-
82
- def wraps(self, fn: WrappedFn) -> WrappedFn:
83
- fn = super().wraps(fn)
84
- # Ensure wrapper is recognized as a coroutine function.
85
-
86
- @functools.wraps(fn)
87
- async def async_wrapped(*args: t.Any, **kwargs: t.Any) -> t.Any:
88
- return await fn(*args, **kwargs)
89
-
90
- # Preserve attributes
91
- async_wrapped.retry = fn.retry # type: ignore[attr-defined]
92
- async_wrapped.retry_with = fn.retry_with # type: ignore[attr-defined]
93
-
94
- return async_wrapped # type: ignore[return-value]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/windows_support.py DELETED
@@ -1,29 +0,0 @@
1
- import platform
2
-
3
-
4
- def windows_only(func):
5
- if platform.system() != 'Windows':
6
- return lambda *args, **kwargs: None
7
- return func
8
-
9
-
10
- @windows_only
11
- def hide_file(path):
12
- """
13
- Set the hidden attribute on a file or directory.
14
-
15
- From http://stackoverflow.com/questions/19622133/
16
-
17
- `path` must be text.
18
- """
19
- import ctypes
20
- __import__('ctypes.wintypes')
21
- SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
22
- SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
23
- SetFileAttributes.restype = ctypes.wintypes.BOOL
24
-
25
- FILE_ATTRIBUTE_HIDDEN = 0x02
26
-
27
- ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN)
28
- if not ret:
29
- raise ctypes.WinError()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Billyosoro/ESRGAN/scripts/generate_multiscale_DF2K.py DELETED
@@ -1,48 +0,0 @@
1
- import argparse
2
- import glob
3
- import os
4
- from PIL import Image
5
-
6
-
7
- def main(args):
8
- # For DF2K, we consider the following three scales,
9
- # and the smallest image whose shortest edge is 400
10
- scale_list = [0.75, 0.5, 1 / 3]
11
- shortest_edge = 400
12
-
13
- path_list = sorted(glob.glob(os.path.join(args.input, '*')))
14
- for path in path_list:
15
- print(path)
16
- basename = os.path.splitext(os.path.basename(path))[0]
17
-
18
- img = Image.open(path)
19
- width, height = img.size
20
- for idx, scale in enumerate(scale_list):
21
- print(f'\t{scale:.2f}')
22
- rlt = img.resize((int(width * scale), int(height * scale)), resample=Image.LANCZOS)
23
- rlt.save(os.path.join(args.output, f'{basename}T{idx}.png'))
24
-
25
- # save the smallest image which the shortest edge is 400
26
- if width < height:
27
- ratio = height / width
28
- width = shortest_edge
29
- height = int(width * ratio)
30
- else:
31
- ratio = width / height
32
- height = shortest_edge
33
- width = int(height * ratio)
34
- rlt = img.resize((int(width), int(height)), resample=Image.LANCZOS)
35
- rlt.save(os.path.join(args.output, f'{basename}T{idx+1}.png'))
36
-
37
-
38
- if __name__ == '__main__':
39
- """Generate multi-scale versions for GT images with LANCZOS resampling.
40
- It is now used for DF2K dataset (DIV2K + Flickr 2K)
41
- """
42
- parser = argparse.ArgumentParser()
43
- parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder')
44
- parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_multiscale', help='Output folder')
45
- args = parser.parse_args()
46
-
47
- os.makedirs(args.output, exist_ok=True)
48
- main(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boadiwaa/Recipes/openai/api_resources/abstract/deletable_api_resource.py DELETED
@@ -1,24 +0,0 @@
1
- from urllib.parse import quote_plus
2
-
3
- from openai import error
4
- from openai.api_resources.abstract.api_resource import APIResource
5
- from openai.util import ApiType
6
-
7
- class DeletableAPIResource(APIResource):
8
- @classmethod
9
- def delete(cls, sid, api_type=None, api_version=None, **params):
10
- if isinstance(cls, APIResource):
11
- raise ValueError(".delete may only be called as a class method now.")
12
-
13
- base = cls.class_url()
14
- extn = quote_plus(sid)
15
-
16
- typed_api_type, api_version = cls._get_api_type_and_version(api_type, api_version)
17
- if typed_api_type == ApiType.AZURE:
18
- url = "/%s%s/%s?api-version=%s" % (cls.azure_api_prefix, base, extn, api_version)
19
- elif typed_api_type == ApiType.OPEN_AI:
20
- url = "%s/%s" % (base, extn)
21
- else:
22
- raise error.InvalidAPIType('Unsupported API type %s' % api_type)
23
-
24
- return cls._static_request("delete", url, api_type=api_type, api_version=api_version, **params)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/.github/CONTRIBUTING.md DELETED
@@ -1,52 +0,0 @@
1
- # Contributing to detectron2
2
- We want to make contributing to this project as easy and transparent as
3
- possible.
4
-
5
- ## Issues
6
- We use GitHub issues to track public bugs and questions.
7
- Please make sure to follow one of the
8
- [issue templates](https://github.com/facebookresearch/detectron2/issues/new/choose)
9
- when reporting any issues.
10
-
11
- Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe
12
- disclosure of security bugs. In those cases, please go through the process
13
- outlined on that page and do not file a public issue.
14
-
15
- ## Pull Requests
16
- We actively welcome your pull requests.
17
-
18
- However, if you're adding any significant features, please
19
- make sure to have a corresponding issue to discuss your motivation and proposals,
20
- before sending a PR. We do not always accept new features, and we take the following
21
- factors into consideration:
22
-
23
- 1. Whether the same feature can be achieved without modifying detectron2.
24
- Detectron2 is designed so that you can implement many extensions from the outside, e.g.
25
- those in [projects](https://github.com/facebookresearch/detectron2/tree/master/projects).
26
- If some part is not as extensible, you can also bring up the issue to make it more extensible.
27
- 2. Whether the feature is potentially useful to a large audience, or only to a small portion of users.
28
- 3. Whether the proposed solution has a good design / interface.
29
- 4. Whether the proposed solution adds extra mental/practical overhead to users who don't
30
- need such feature.
31
- 5. Whether the proposed solution breaks existing APIs.
32
-
33
- When sending a PR, please do:
34
-
35
- 1. If a PR contains multiple orthogonal changes, split it to several PRs.
36
- 2. If you've added code that should be tested, add tests.
37
- 3. For PRs that need experiments (e.g. adding a new model), you don't need to update model zoo,
38
- but do provide experiment results in the description of the PR.
39
- 4. If APIs are changed, update the documentation.
40
- 5. Ensure the test suite passes.
41
- 6. Make sure your code lints with `./dev/linter.sh`.
42
-
43
-
44
- ## Contributor License Agreement ("CLA")
45
- In order to accept your pull request, we need you to submit a CLA. You only need
46
- to do this once to work on any of Facebook's open source projects.
47
-
48
- Complete your CLA here: <https://code.facebook.com/cla>
49
-
50
- ## License
51
- By contributing to detectron2, you agree that your contributions will be licensed
52
- under the LICENSE file in the root directory of this source tree.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/shape_spec.py DELETED
@@ -1,20 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3
- from collections import namedtuple
4
-
5
-
6
- class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])):
7
- """
8
- A simple structure that contains basic shape specification about a tensor.
9
- It is often used as the auxiliary inputs/outputs of models,
10
- to obtain the shape inference ability among pytorch modules.
11
-
12
- Attributes:
13
- channels:
14
- height:
15
- width:
16
- stride:
17
- """
18
-
19
- def __new__(cls, *, channels=None, height=None, width=None, stride=None):
20
- return super().__new__(cls, channels, height, width, stride)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Image-Animation-using-Thin-Plate-Spline-Motion-Model/style.css DELETED
@@ -1,19 +0,0 @@
1
- h1 {
2
- text-align: center;
3
- }
4
- img#overview {
5
- max-width: 1000px;
6
- max-height: 600px;
7
- display: block;
8
- margin: auto;
9
- }
10
- img#style-image {
11
- max-width: 1000px;
12
- max-height: 600px;
13
- display: block;
14
- margin: auto;
15
- }
16
- img#visitor-badge {
17
- display: block;
18
- margin: auto;
19
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators/bitwise_operators.h DELETED
@@ -1,338 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/detail/functional/actor.h>
21
- #include <thrust/detail/functional/composite.h>
22
- #include <thrust/detail/functional/operators/operator_adaptors.h>
23
- #include <thrust/functional.h>
24
-
25
- namespace thrust
26
- {
27
- namespace detail
28
- {
29
- namespace functional
30
- {
31
-
32
- template<typename T1, typename T2>
33
- __host__ __device__
34
- actor<
35
- composite<
36
- transparent_binary_operator<bit_and<>>,
37
- actor<T1>,
38
- typename as_actor<T2>::type
39
- >
40
- >
41
- operator&(const actor<T1> &_1, const T2 &_2)
42
- {
43
- return compose(transparent_binary_operator<bit_and<>>(),
44
- make_actor(_1),
45
- make_actor(_2));
46
- } // end operator&()
47
-
48
- template<typename T1, typename T2>
49
- __host__ __device__
50
- actor<
51
- composite<
52
- transparent_binary_operator<bit_and<>>,
53
- typename as_actor<T1>::type,
54
- actor<T2>
55
- >
56
- >
57
- operator&(const T1 &_1, const actor<T2> &_2)
58
- {
59
- return compose(transparent_binary_operator<bit_and<>>(),
60
- make_actor(_1),
61
- make_actor(_2));
62
- } // end operator&()
63
-
64
- template<typename T1, typename T2>
65
- __host__ __device__
66
- actor<
67
- composite<
68
- transparent_binary_operator<bit_and<>>,
69
- actor<T1>,
70
- actor<T2>
71
- >
72
- >
73
- operator&(const actor<T1> &_1, const actor<T2> &_2)
74
- {
75
- return compose(transparent_binary_operator<bit_and<>>(),
76
- make_actor(_1),
77
- make_actor(_2));
78
- } // end operator&()
79
-
80
- template<typename T1, typename T2>
81
- __host__ __device__
82
- actor<
83
- composite<
84
- transparent_binary_operator<bit_or<>>,
85
- actor<T1>,
86
- typename as_actor<T2>::type
87
- >
88
- >
89
- operator|(const actor<T1> &_1, const T2 &_2)
90
- {
91
- return compose(transparent_binary_operator<bit_or<>>(),
92
- make_actor(_1),
93
- make_actor(_2));
94
- } // end operator|()
95
-
96
- template<typename T1, typename T2>
97
- __host__ __device__
98
- actor<
99
- composite<
100
- transparent_binary_operator<bit_or<>>,
101
- typename as_actor<T1>::type,
102
- actor<T2>
103
- >
104
- >
105
- operator|(const T1 &_1, const actor<T2> &_2)
106
- {
107
- return compose(transparent_binary_operator<bit_or<>>(),
108
- make_actor(_1),
109
- make_actor(_2));
110
- } // end operator|()
111
-
112
- template<typename T1, typename T2>
113
- __host__ __device__
114
- actor<
115
- composite<
116
- transparent_binary_operator<bit_or<>>,
117
- actor<T1>,
118
- actor<T2>
119
- >
120
- >
121
- operator|(const actor<T1> &_1, const actor<T2> &_2)
122
- {
123
- return compose(transparent_binary_operator<bit_or<>>(),
124
- make_actor(_1),
125
- make_actor(_2));
126
- } // end operator|()
127
-
128
- template<typename T1, typename T2>
129
- __host__ __device__
130
- actor<
131
- composite<
132
- transparent_binary_operator<bit_xor<>>,
133
- actor<T1>,
134
- typename as_actor<T2>::type
135
- >
136
- >
137
- operator^(const actor<T1> &_1, const T2 &_2)
138
- {
139
- return compose(transparent_binary_operator<bit_xor<>>(),
140
- make_actor(_1),
141
- make_actor(_2));
142
- } // end operator^()
143
-
144
- template<typename T1, typename T2>
145
- __host__ __device__
146
- actor<
147
- composite<
148
- transparent_binary_operator<bit_xor<>>,
149
- typename as_actor<T1>::type,
150
- actor<T2>
151
- >
152
- >
153
- operator^(const T1 &_1, const actor<T2> &_2)
154
- {
155
- return compose(transparent_binary_operator<bit_xor<>>(),
156
- make_actor(_1),
157
- make_actor(_2));
158
- } // end operator^()
159
-
160
- template<typename T1, typename T2>
161
- __host__ __device__
162
- actor<
163
- composite<
164
- transparent_binary_operator<bit_xor<>>,
165
- actor<T1>,
166
- actor<T2>
167
- >
168
- >
169
- operator^(const actor<T1> &_1, const actor<T2> &_2)
170
- {
171
- return compose(transparent_binary_operator<bit_xor<>>(),
172
- make_actor(_1),
173
- make_actor(_2));
174
- } // end operator^()
175
-
176
-
177
- // there's no standard bit_not functional, so roll an ad hoc one here
178
- struct bit_not
179
- {
180
- using is_transparent = void;
181
-
182
- __thrust_exec_check_disable__
183
- template <typename T1>
184
- __host__ __device__
185
- constexpr auto operator()(T1&& t1) const
186
- noexcept(noexcept(~THRUST_FWD(t1))) -> decltype(~THRUST_FWD(t1))
187
- {
188
- return ~THRUST_FWD(t1);
189
- }
190
- }; // end prefix_increment
191
-
192
- template<typename Eval>
193
- __host__ __device__
194
- actor<
195
- composite<
196
- transparent_unary_operator<bit_not>,
197
- actor<Eval>
198
- >
199
- >
200
- __host__ __device__
201
- operator~(const actor<Eval> &_1)
202
- {
203
- return compose(transparent_unary_operator<bit_not>(), _1);
204
- } // end operator~()
205
-
206
- // there's no standard bit_lshift functional, so roll an ad hoc one here
207
- struct bit_lshift
208
- {
209
- using is_transparent = void;
210
-
211
- __thrust_exec_check_disable__
212
- template <typename T1, typename T2>
213
- __host__ __device__
214
- constexpr auto operator()(T1&& t1, T2&& t2) const
215
- noexcept(noexcept(THRUST_FWD(t1) << THRUST_FWD(t2)))
216
- -> decltype(THRUST_FWD(t1) << THRUST_FWD(t2))
217
- {
218
- return THRUST_FWD(t1) << THRUST_FWD(t2);
219
- }
220
- };
221
-
222
- template<typename T1, typename T2>
223
- __host__ __device__
224
- actor<
225
- composite<
226
- transparent_binary_operator<bit_lshift>,
227
- actor<T1>,
228
- typename as_actor<T2>::type
229
- >
230
- >
231
- operator<<(const actor<T1> &_1, const T2 &_2)
232
- {
233
- return compose(transparent_binary_operator<bit_lshift>(),
234
- make_actor(_1),
235
- make_actor(_2));
236
- } // end operator<<()
237
-
238
- template<typename T1, typename T2>
239
- __host__ __device__
240
- actor<
241
- composite<
242
- transparent_binary_operator<bit_lshift>,
243
- typename as_actor<T1>::type,
244
- actor<T2>
245
- >
246
- >
247
- operator<<(const T1 &_1, const actor<T2> &_2)
248
- {
249
- return compose(transparent_binary_operator<bit_lshift>(),
250
- make_actor(_1),
251
- make_actor(_2));
252
- } // end operator<<()
253
-
254
- template<typename T1, typename T2>
255
- __host__ __device__
256
- actor<
257
- composite<
258
- transparent_binary_operator<bit_lshift>,
259
- actor<T1>,
260
- actor<T2>
261
- >
262
- >
263
- operator<<(const actor<T1> &_1, const actor<T2> &_2)
264
- {
265
- return compose(transparent_binary_operator<bit_lshift>(),
266
- make_actor(_1),
267
- make_actor(_2));
268
- } // end operator<<()
269
-
270
- // there's no standard bit_rshift functional, so roll an ad hoc one here
271
- struct bit_rshift
272
- {
273
- using is_transparent = void;
274
-
275
- __thrust_exec_check_disable__
276
- template <typename T1, typename T2>
277
- __host__ __device__
278
- constexpr auto operator()(T1& t1, T2&& t2) const
279
- noexcept(noexcept(THRUST_FWD(t1) >> THRUST_FWD(t2)))
280
- -> decltype(THRUST_FWD(t1) >> THRUST_FWD(t2))
281
- {
282
- return THRUST_FWD(t1) >> THRUST_FWD(t2);
283
- }
284
- };
285
-
286
-
287
- template<typename T1, typename T2>
288
- __host__ __device__
289
- actor<
290
- composite<
291
- transparent_binary_operator<bit_rshift>,
292
- actor<T1>,
293
- typename as_actor<T2>::type
294
- >
295
- >
296
- operator>>(const actor<T1> &_1, const T2 &_2)
297
- {
298
- return compose(transparent_binary_operator<bit_rshift>(),
299
- make_actor(_1),
300
- make_actor(_2));
301
- } // end operator>>()
302
-
303
- template<typename T1, typename T2>
304
- __host__ __device__
305
- actor<
306
- composite<
307
- transparent_binary_operator<bit_rshift>,
308
- typename as_actor<T1>::type,
309
- actor<T2>
310
- >
311
- >
312
- operator>>(const T1 &_1, const actor<T2> &_2)
313
- {
314
- return compose(transparent_binary_operator<bit_rshift>(),
315
- make_actor(_1),
316
- make_actor(_2));
317
- } // end operator>>()
318
-
319
- template<typename T1, typename T2>
320
- __host__ __device__
321
- actor<
322
- composite<
323
- transparent_binary_operator<bit_rshift>,
324
- actor<T1>,
325
- actor<T2>
326
- >
327
- >
328
- operator>>(const actor<T1> &_1, const actor<T2> &_2)
329
- {
330
- return compose(transparent_binary_operator<bit_rshift>(),
331
- make_actor(_1),
332
- make_actor(_2));
333
- } // end operator>>()
334
-
335
- } // end functional
336
- } // end detail
337
- } // end thrust
338
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/replace.h DELETED
@@ -1,44 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a fill of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // the purpose of this header is to #include the replace.h header
22
- // of the sequential, host, and device systems. It should be #included in any
23
- // code which uses adl to dispatch replace
24
-
25
- #include <thrust/system/detail/sequential/replace.h>
26
-
27
- // SCons can't see through the #defines below to figure out what this header
28
- // includes, so we fake it out by specifying all possible files we might end up
29
- // including inside an #if 0.
30
- #if 0
31
- #include <thrust/system/cpp/detail/replace.h>
32
- #include <thrust/system/cuda/detail/replace.h>
33
- #include <thrust/system/omp/detail/replace.h>
34
- #include <thrust/system/tbb/detail/replace.h>
35
- #endif
36
-
37
- #define __THRUST_HOST_SYSTEM_REPLACE_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/replace.h>
38
- #include __THRUST_HOST_SYSTEM_REPLACE_HEADER
39
- #undef __THRUST_HOST_SYSTEM_REPLACE_HEADER
40
-
41
- #define __THRUST_DEVICE_SYSTEM_REPLACE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/replace.h>
42
- #include __THRUST_DEVICE_SYSTEM_REPLACE_HEADER
43
- #undef __THRUST_DEVICE_SYSTEM_REPLACE_HEADER
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/backbones/darknet.py DELETED
@@ -1,199 +0,0 @@
1
- # Copyright (c) 2019 Western Digital Corporation or its affiliates.
2
-
3
- import logging
4
-
5
- import torch.nn as nn
6
- from mmcv.cnn import ConvModule, constant_init, kaiming_init
7
- from mmcv.runner import load_checkpoint
8
- from torch.nn.modules.batchnorm import _BatchNorm
9
-
10
- from ..builder import BACKBONES
11
-
12
-
13
- class ResBlock(nn.Module):
14
- """The basic residual block used in Darknet. Each ResBlock consists of two
15
- ConvModules and the input is added to the final output. Each ConvModule is
16
- composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer
17
- has half of the number of the filters as much as the second convLayer. The
18
- first convLayer has filter size of 1x1 and the second one has the filter
19
- size of 3x3.
20
-
21
- Args:
22
- in_channels (int): The input channels. Must be even.
23
- conv_cfg (dict): Config dict for convolution layer. Default: None.
24
- norm_cfg (dict): Dictionary to construct and config norm layer.
25
- Default: dict(type='BN', requires_grad=True)
26
- act_cfg (dict): Config dict for activation layer.
27
- Default: dict(type='LeakyReLU', negative_slope=0.1).
28
- """
29
-
30
- def __init__(self,
31
- in_channels,
32
- conv_cfg=None,
33
- norm_cfg=dict(type='BN', requires_grad=True),
34
- act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
35
- super(ResBlock, self).__init__()
36
- assert in_channels % 2 == 0 # ensure the in_channels is even
37
- half_in_channels = in_channels // 2
38
-
39
- # shortcut
40
- cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
41
-
42
- self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg)
43
- self.conv2 = ConvModule(
44
- half_in_channels, in_channels, 3, padding=1, **cfg)
45
-
46
- def forward(self, x):
47
- residual = x
48
- out = self.conv1(x)
49
- out = self.conv2(out)
50
- out = out + residual
51
-
52
- return out
53
-
54
-
55
- @BACKBONES.register_module()
56
- class Darknet(nn.Module):
57
- """Darknet backbone.
58
-
59
- Args:
60
- depth (int): Depth of Darknet. Currently only support 53.
61
- out_indices (Sequence[int]): Output from which stages.
62
- frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
63
- -1 means not freezing any parameters. Default: -1.
64
- conv_cfg (dict): Config dict for convolution layer. Default: None.
65
- norm_cfg (dict): Dictionary to construct and config norm layer.
66
- Default: dict(type='BN', requires_grad=True)
67
- act_cfg (dict): Config dict for activation layer.
68
- Default: dict(type='LeakyReLU', negative_slope=0.1).
69
- norm_eval (bool): Whether to set norm layers to eval mode, namely,
70
- freeze running stats (mean and var). Note: Effect on Batch Norm
71
- and its variants only.
72
-
73
- Example:
74
- >>> from mmdet.models import Darknet
75
- >>> import torch
76
- >>> self = Darknet(depth=53)
77
- >>> self.eval()
78
- >>> inputs = torch.rand(1, 3, 416, 416)
79
- >>> level_outputs = self.forward(inputs)
80
- >>> for level_out in level_outputs:
81
- ... print(tuple(level_out.shape))
82
- ...
83
- (1, 256, 52, 52)
84
- (1, 512, 26, 26)
85
- (1, 1024, 13, 13)
86
- """
87
-
88
- # Dict(depth: (layers, channels))
89
- arch_settings = {
90
- 53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512),
91
- (512, 1024)))
92
- }
93
-
94
- def __init__(self,
95
- depth=53,
96
- out_indices=(3, 4, 5),
97
- frozen_stages=-1,
98
- conv_cfg=None,
99
- norm_cfg=dict(type='BN', requires_grad=True),
100
- act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
101
- norm_eval=True):
102
- super(Darknet, self).__init__()
103
- if depth not in self.arch_settings:
104
- raise KeyError(f'invalid depth {depth} for darknet')
105
- self.depth = depth
106
- self.out_indices = out_indices
107
- self.frozen_stages = frozen_stages
108
- self.layers, self.channels = self.arch_settings[depth]
109
-
110
- cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
111
-
112
- self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg)
113
-
114
- self.cr_blocks = ['conv1']
115
- for i, n_layers in enumerate(self.layers):
116
- layer_name = f'conv_res_block{i + 1}'
117
- in_c, out_c = self.channels[i]
118
- self.add_module(
119
- layer_name,
120
- self.make_conv_res_block(in_c, out_c, n_layers, **cfg))
121
- self.cr_blocks.append(layer_name)
122
-
123
- self.norm_eval = norm_eval
124
-
125
- def forward(self, x):
126
- outs = []
127
- for i, layer_name in enumerate(self.cr_blocks):
128
- cr_block = getattr(self, layer_name)
129
- x = cr_block(x)
130
- if i in self.out_indices:
131
- outs.append(x)
132
-
133
- return tuple(outs)
134
-
135
- def init_weights(self, pretrained=None):
136
- if isinstance(pretrained, str):
137
- logger = logging.getLogger()
138
- load_checkpoint(self, pretrained, strict=False, logger=logger)
139
- elif pretrained is None:
140
- for m in self.modules():
141
- if isinstance(m, nn.Conv2d):
142
- kaiming_init(m)
143
- elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
144
- constant_init(m, 1)
145
-
146
- else:
147
- raise TypeError('pretrained must be a str or None')
148
-
149
- def _freeze_stages(self):
150
- if self.frozen_stages >= 0:
151
- for i in range(self.frozen_stages):
152
- m = getattr(self, self.cr_blocks[i])
153
- m.eval()
154
- for param in m.parameters():
155
- param.requires_grad = False
156
-
157
- def train(self, mode=True):
158
- super(Darknet, self).train(mode)
159
- self._freeze_stages()
160
- if mode and self.norm_eval:
161
- for m in self.modules():
162
- if isinstance(m, _BatchNorm):
163
- m.eval()
164
-
165
- @staticmethod
166
- def make_conv_res_block(in_channels,
167
- out_channels,
168
- res_repeat,
169
- conv_cfg=None,
170
- norm_cfg=dict(type='BN', requires_grad=True),
171
- act_cfg=dict(type='LeakyReLU',
172
- negative_slope=0.1)):
173
- """In Darknet backbone, ConvLayer is usually followed by ResBlock. This
174
- function will make that. The Conv layers always have 3x3 filters with
175
- stride=2. The number of the filters in Conv layer is the same as the
176
- out channels of the ResBlock.
177
-
178
- Args:
179
- in_channels (int): The number of input channels.
180
- out_channels (int): The number of output channels.
181
- res_repeat (int): The number of ResBlocks.
182
- conv_cfg (dict): Config dict for convolution layer. Default: None.
183
- norm_cfg (dict): Dictionary to construct and config norm layer.
184
- Default: dict(type='BN', requires_grad=True)
185
- act_cfg (dict): Config dict for activation layer.
186
- Default: dict(type='LeakyReLU', negative_slope=0.1).
187
- """
188
-
189
- cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
190
-
191
- model = nn.Sequential()
192
- model.add_module(
193
- 'conv',
194
- ConvModule(
195
- in_channels, out_channels, 3, stride=2, padding=1, **cfg))
196
- for idx in range(res_repeat):
197
- model.add_module('res{}'.format(idx),
198
- ResBlock(out_channels, **cfg))
199
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/losses/cross_entropy_loss.py DELETED
@@ -1,216 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
-
5
- from ..builder import LOSSES
6
- from .utils import weight_reduce_loss
7
-
8
-
9
- def cross_entropy(pred,
10
- label,
11
- weight=None,
12
- reduction='mean',
13
- avg_factor=None,
14
- class_weight=None):
15
- """Calculate the CrossEntropy loss.
16
-
17
- Args:
18
- pred (torch.Tensor): The prediction with shape (N, C), C is the number
19
- of classes.
20
- label (torch.Tensor): The learning label of the prediction.
21
- weight (torch.Tensor, optional): Sample-wise loss weight.
22
- reduction (str, optional): The method used to reduce the loss.
23
- avg_factor (int, optional): Average factor that is used to average
24
- the loss. Defaults to None.
25
- class_weight (list[float], optional): The weight for each class.
26
-
27
- Returns:
28
- torch.Tensor: The calculated loss
29
- """
30
- # element-wise losses
31
- loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none')
32
-
33
- # apply weights and do the reduction
34
- if weight is not None:
35
- weight = weight.float()
36
- loss = weight_reduce_loss(
37
- loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
38
-
39
- return loss
40
-
41
-
42
- def _expand_onehot_labels(labels, label_weights, label_channels):
43
- bin_labels = labels.new_full((labels.size(0), label_channels), 0)
44
- inds = torch.nonzero(
45
- (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()
46
- if inds.numel() > 0:
47
- bin_labels[inds, labels[inds]] = 1
48
-
49
- if label_weights is None:
50
- bin_label_weights = None
51
- else:
52
- bin_label_weights = label_weights.view(-1, 1).expand(
53
- label_weights.size(0), label_channels)
54
-
55
- return bin_labels, bin_label_weights
56
-
57
-
58
- def binary_cross_entropy(pred,
59
- label,
60
- weight=None,
61
- reduction='mean',
62
- avg_factor=None,
63
- class_weight=None):
64
- """Calculate the binary CrossEntropy loss.
65
-
66
- Args:
67
- pred (torch.Tensor): The prediction with shape (N, 1).
68
- label (torch.Tensor): The learning label of the prediction.
69
- weight (torch.Tensor, optional): Sample-wise loss weight.
70
- reduction (str, optional): The method used to reduce the loss.
71
- Options are "none", "mean" and "sum".
72
- avg_factor (int, optional): Average factor that is used to average
73
- the loss. Defaults to None.
74
- class_weight (list[float], optional): The weight for each class.
75
-
76
- Returns:
77
- torch.Tensor: The calculated loss
78
- """
79
- if pred.dim() != label.dim():
80
- label, weight = _expand_onehot_labels(label, weight, pred.size(-1))
81
-
82
- # weighted element-wise losses
83
- if weight is not None:
84
- weight = weight.float()
85
- loss = F.binary_cross_entropy_with_logits(
86
- pred, label.float(), pos_weight=class_weight, reduction='none')
87
- # do the reduction for the weighted loss
88
- loss = weight_reduce_loss(
89
- loss, weight, reduction=reduction, avg_factor=avg_factor)
90
-
91
- return loss
92
-
93
-
94
- def mask_cross_entropy(pred,
95
- target,
96
- label,
97
- reduction='mean',
98
- avg_factor=None,
99
- class_weight=None):
100
- """Calculate the CrossEntropy loss for masks.
101
-
102
- Args:
103
- pred (torch.Tensor): The prediction with shape (N, C, *), C is the
104
- number of classes. The trailing * indicates arbitrary shape.
105
- target (torch.Tensor): The learning label of the prediction.
106
- label (torch.Tensor): ``label`` indicates the class label of the mask
107
- corresponding object. This will be used to select the mask in the
108
- of the class which the object belongs to when the mask prediction
109
- if not class-agnostic.
110
- reduction (str, optional): The method used to reduce the loss.
111
- Options are "none", "mean" and "sum".
112
- avg_factor (int, optional): Average factor that is used to average
113
- the loss. Defaults to None.
114
- class_weight (list[float], optional): The weight for each class.
115
-
116
- Returns:
117
- torch.Tensor: The calculated loss
118
-
119
- Example:
120
- >>> N, C = 3, 11
121
- >>> H, W = 2, 2
122
- >>> pred = torch.randn(N, C, H, W) * 1000
123
- >>> target = torch.rand(N, H, W)
124
- >>> label = torch.randint(0, C, size=(N,))
125
- >>> reduction = 'mean'
126
- >>> avg_factor = None
127
- >>> class_weights = None
128
- >>> loss = mask_cross_entropy(pred, target, label, reduction,
129
- >>> avg_factor, class_weights)
130
- >>> assert loss.shape == (1,)
131
- """
132
- # TODO: handle these two reserved arguments
133
- assert reduction == 'mean' and avg_factor is None
134
- num_rois = pred.size()[0]
135
- inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
136
- pred_slice = pred[inds, label].squeeze(1)
137
- return F.binary_cross_entropy_with_logits(
138
- pred_slice, target, weight=class_weight, reduction='mean')[None]
139
-
140
-
141
- @LOSSES.register_module()
142
- class CrossEntropyLoss(nn.Module):
143
-
144
- def __init__(self,
145
- use_sigmoid=False,
146
- use_mask=False,
147
- reduction='mean',
148
- class_weight=None,
149
- loss_weight=1.0):
150
- """CrossEntropyLoss.
151
-
152
- Args:
153
- use_sigmoid (bool, optional): Whether the prediction uses sigmoid
154
- of softmax. Defaults to False.
155
- use_mask (bool, optional): Whether to use mask cross entropy loss.
156
- Defaults to False.
157
- reduction (str, optional): . Defaults to 'mean'.
158
- Options are "none", "mean" and "sum".
159
- class_weight (list[float], optional): Weight of each class.
160
- Defaults to None.
161
- loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
162
- """
163
- super(CrossEntropyLoss, self).__init__()
164
- assert (use_sigmoid is False) or (use_mask is False)
165
- self.use_sigmoid = use_sigmoid
166
- self.use_mask = use_mask
167
- self.reduction = reduction
168
- self.loss_weight = loss_weight
169
- self.class_weight = class_weight
170
-
171
- if self.use_sigmoid:
172
- self.cls_criterion = binary_cross_entropy
173
- elif self.use_mask:
174
- self.cls_criterion = mask_cross_entropy
175
- else:
176
- self.cls_criterion = cross_entropy
177
-
178
- def forward(self,
179
- cls_score,
180
- label,
181
- weight=None,
182
- avg_factor=None,
183
- reduction_override=None,
184
- **kwargs):
185
- """Forward function.
186
-
187
- Args:
188
- cls_score (torch.Tensor): The prediction.
189
- label (torch.Tensor): The learning label of the prediction.
190
- weight (torch.Tensor, optional): Sample-wise loss weight.
191
- avg_factor (int, optional): Average factor that is used to average
192
- the loss. Defaults to None.
193
- reduction (str, optional): The method used to reduce the loss.
194
- Options are "none", "mean" and "sum".
195
- Returns:
196
- torch.Tensor: The calculated loss
197
- """
198
- assert reduction_override in (None, 'none', 'mean', 'sum')
199
- reduction = (
200
- reduction_override if reduction_override else self.reduction)
201
- if self.class_weight is not None:
202
- class_weight = cls_score.new_tensor(
203
- self.class_weight, device=cls_score.device)
204
- else:
205
- class_weight = None
206
- loss_cls = self.loss_weight * self.cls_criterion(
207
- cls_score,
208
- label,
209
- weight,
210
- class_weight=class_weight,
211
- reduction=reduction,
212
- avg_factor=avg_factor,
213
- **kwargs)
214
- return loss_cls
215
-
216
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/hold_tight/__init__.py DELETED
@@ -1,18 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from pil_utils import BuildImage
5
-
6
- from meme_generator import add_meme
7
-
8
- img_dir = Path(__file__).parent / "images"
9
-
10
-
11
- def hold_tight(images: List[BuildImage], texts, args):
12
- img = images[0].convert("RGBA").resize((159, 171), keep_ratio=True)
13
- frame = BuildImage.open(img_dir / "0.png")
14
- frame.paste(img, (113, 205), below=True)
15
- return frame.save_jpg()
16
-
17
-
18
- add_meme("hold_tight", hold_tight, min_images=1, max_images=1, keywords=["抱紧"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/midas/__init__.py DELETED
File without changes
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/README.md DELETED
@@ -1,90 +0,0 @@
1
- # Setting Up Datasets
2
- This file describes how to perform training on other datasets.
3
-
4
- Only Pascal VOC dataset can be loaded from its original format and be outputted to Pascal style results currently.
5
-
6
- We expect the annotations from other datasets be converted to COCO json format, and
7
- the output will be in COCO-style. (i.e. AP, AP50, AP75, APs, APm, APl for bbox and segm)
8
-
9
- ## Creating Symlinks for PASCAL VOC
10
-
11
- We assume that your symlinked `datasets/voc/VOC<year>` directory has the following structure:
12
-
13
- ```
14
- VOC<year>
15
- |_ JPEGImages
16
- | |_ <im-1-name>.jpg
17
- | |_ ...
18
- | |_ <im-N-name>.jpg
19
- |_ Annotations
20
- | |_ pascal_train<year>.json (optional)
21
- | |_ pascal_val<year>.json (optional)
22
- | |_ pascal_test<year>.json (optional)
23
- | |_ <im-1-name>.xml
24
- | |_ ...
25
- | |_ <im-N-name>.xml
26
- |_ VOCdevkit<year>
27
- ```
28
-
29
- Create symlinks for `voc/VOC<year>`:
30
-
31
- ```
32
- cd ~/github/maskrcnn-benchmark
33
- mkdir -p datasets/voc/VOC<year>
34
- ln -s /path/to/VOC<year> /datasets/voc/VOC<year>
35
- ```
36
- Example configuration files for PASCAL VOC could be found [here](https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/configs/pascal_voc/).
37
-
38
- ### PASCAL VOC Annotations in COCO Format
39
- To output COCO-style evaluation result, PASCAL VOC annotations in COCO json format is required and could be downloaded from [here](https://storage.googleapis.com/coco-dataset/external/PASCAL_VOC.zip)
40
- via http://cocodataset.org/#external.
41
-
42
- ## Creating Symlinks for Cityscapes:
43
-
44
- We assume that your symlinked `datasets/cityscapes` directory has the following structure:
45
-
46
- ```
47
- cityscapes
48
- |_ images
49
- | |_ <im-1-name>.jpg
50
- | |_ ...
51
- | |_ <im-N-name>.jpg
52
- |_ annotations
53
- | |_ instanceonly_gtFile_train.json
54
- | |_ ...
55
- |_ raw
56
- |_ gtFine
57
- |_ ...
58
- |_ README.md
59
- ```
60
-
61
- Create symlinks for `cityscapes`:
62
-
63
- ```
64
- cd ~/github/maskrcnn-benchmark
65
- mkdir -p datasets/cityscapes
66
- ln -s /path/to/cityscapes datasets/data/cityscapes
67
- ```
68
-
69
- ### Steps to convert Cityscapes Annotations to COCO Format
70
- 1. Download gtFine_trainvaltest.zip from https://www.cityscapes-dataset.com/downloads/ (login required)
71
- 2. Extract it to /path/to/gtFine_trainvaltest
72
- ```
73
- cityscapes
74
- |_ gtFine_trainvaltest.zip
75
- |_ gtFine_trainvaltest
76
- |_ gtFine
77
- ```
78
- 3. Run the below commands to convert the annotations
79
-
80
- ```
81
- cd ~/github
82
- git clone https://github.com/mcordts/cityscapesScripts.git
83
- cd cityscapesScripts
84
- cp ~/github/maskrcnn-benchmark/tools/cityscapes/instances2dict_with_polygons.py cityscapesscripts/evaluation
85
- python setup.py install
86
- cd ~/github/maskrcnn-benchmark
87
- python tools/cityscapes/convert_cityscapes_to_coco.py --datadir /path/to/cityscapes --outdir /path/to/cityscapes/annotations
88
- ```
89
-
90
- Example configuration files for Cityscapes could be found [here](https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/configs/cityscapes/).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/ttGlyphPen.py DELETED
@@ -1,335 +0,0 @@
1
- from array import array
2
- from typing import Any, Callable, Dict, Optional, Tuple
3
- from fontTools.misc.fixedTools import MAX_F2DOT14, floatToFixedToFloat
4
- from fontTools.misc.loggingTools import LogMixin
5
- from fontTools.pens.pointPen import AbstractPointPen
6
- from fontTools.misc.roundTools import otRound
7
- from fontTools.pens.basePen import LoggingPen, PenError
8
- from fontTools.pens.transformPen import TransformPen, TransformPointPen
9
- from fontTools.ttLib.tables import ttProgram
10
- from fontTools.ttLib.tables._g_l_y_f import flagOnCurve, flagCubic
11
- from fontTools.ttLib.tables._g_l_y_f import Glyph
12
- from fontTools.ttLib.tables._g_l_y_f import GlyphComponent
13
- from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
14
- from fontTools.ttLib.tables._g_l_y_f import dropImpliedOnCurvePoints
15
- import math
16
-
17
-
18
- __all__ = ["TTGlyphPen", "TTGlyphPointPen"]
19
-
20
-
21
- class _TTGlyphBasePen:
22
- def __init__(
23
- self,
24
- glyphSet: Optional[Dict[str, Any]],
25
- handleOverflowingTransforms: bool = True,
26
- ) -> None:
27
- """
28
- Construct a new pen.
29
-
30
- Args:
31
- glyphSet (Dict[str, Any]): A glyphset object, used to resolve components.
32
- handleOverflowingTransforms (bool): See below.
33
-
34
- If ``handleOverflowingTransforms`` is True, the components' transform values
35
- are checked that they don't overflow the limits of a F2Dot14 number:
36
- -2.0 <= v < +2.0. If any transform value exceeds these, the composite
37
- glyph is decomposed.
38
-
39
- An exception to this rule is done for values that are very close to +2.0
40
- (both for consistency with the -2.0 case, and for the relative frequency
41
- these occur in real fonts). When almost +2.0 values occur (and all other
42
- values are within the range -2.0 <= x <= +2.0), they are clamped to the
43
- maximum positive value that can still be encoded as an F2Dot14: i.e.
44
- 1.99993896484375.
45
-
46
- If False, no check is done and all components are translated unmodified
47
- into the glyf table, followed by an inevitable ``struct.error`` once an
48
- attempt is made to compile them.
49
-
50
- If both contours and components are present in a glyph, the components
51
- are decomposed.
52
- """
53
- self.glyphSet = glyphSet
54
- self.handleOverflowingTransforms = handleOverflowingTransforms
55
- self.init()
56
-
57
- def _decompose(
58
- self,
59
- glyphName: str,
60
- transformation: Tuple[float, float, float, float, float, float],
61
- ):
62
- tpen = self.transformPen(self, transformation)
63
- getattr(self.glyphSet[glyphName], self.drawMethod)(tpen)
64
-
65
- def _isClosed(self):
66
- """
67
- Check if the current path is closed.
68
- """
69
- raise NotImplementedError
70
-
71
- def init(self) -> None:
72
- self.points = []
73
- self.endPts = []
74
- self.types = []
75
- self.components = []
76
-
77
- def addComponent(
78
- self,
79
- baseGlyphName: str,
80
- transformation: Tuple[float, float, float, float, float, float],
81
- identifier: Optional[str] = None,
82
- **kwargs: Any,
83
- ) -> None:
84
- """
85
- Add a sub glyph.
86
- """
87
- self.components.append((baseGlyphName, transformation))
88
-
89
- def _buildComponents(self, componentFlags):
90
- if self.handleOverflowingTransforms:
91
- # we can't encode transform values > 2 or < -2 in F2Dot14,
92
- # so we must decompose the glyph if any transform exceeds these
93
- overflowing = any(
94
- s > 2 or s < -2
95
- for (glyphName, transformation) in self.components
96
- for s in transformation[:4]
97
- )
98
- components = []
99
- for glyphName, transformation in self.components:
100
- if glyphName not in self.glyphSet:
101
- self.log.warning(f"skipped non-existing component '{glyphName}'")
102
- continue
103
- if self.points or (self.handleOverflowingTransforms and overflowing):
104
- # can't have both coordinates and components, so decompose
105
- self._decompose(glyphName, transformation)
106
- continue
107
-
108
- component = GlyphComponent()
109
- component.glyphName = glyphName
110
- component.x, component.y = (otRound(v) for v in transformation[4:])
111
- # quantize floats to F2Dot14 so we get same values as when decompiled
112
- # from a binary glyf table
113
- transformation = tuple(
114
- floatToFixedToFloat(v, 14) for v in transformation[:4]
115
- )
116
- if transformation != (1, 0, 0, 1):
117
- if self.handleOverflowingTransforms and any(
118
- MAX_F2DOT14 < s <= 2 for s in transformation
119
- ):
120
- # clamp values ~= +2.0 so we can keep the component
121
- transformation = tuple(
122
- MAX_F2DOT14 if MAX_F2DOT14 < s <= 2 else s
123
- for s in transformation
124
- )
125
- component.transform = (transformation[:2], transformation[2:])
126
- component.flags = componentFlags
127
- components.append(component)
128
- return components
129
-
130
- def glyph(
131
- self,
132
- componentFlags: int = 0x04,
133
- dropImpliedOnCurves: bool = False,
134
- *,
135
- round: Callable[[float], int] = otRound,
136
- ) -> Glyph:
137
- """
138
- Returns a :py:class:`~._g_l_y_f.Glyph` object representing the glyph.
139
-
140
- Args:
141
- componentFlags: Flags to use for component glyphs. (default: 0x04)
142
-
143
- dropImpliedOnCurves: Whether to remove implied-oncurve points. (default: False)
144
- """
145
- if not self._isClosed():
146
- raise PenError("Didn't close last contour.")
147
- components = self._buildComponents(componentFlags)
148
-
149
- glyph = Glyph()
150
- glyph.coordinates = GlyphCoordinates(self.points)
151
- glyph.endPtsOfContours = self.endPts
152
- glyph.flags = array("B", self.types)
153
- self.init()
154
-
155
- if components:
156
- # If both components and contours were present, they have by now
157
- # been decomposed by _buildComponents.
158
- glyph.components = components
159
- glyph.numberOfContours = -1
160
- else:
161
- glyph.numberOfContours = len(glyph.endPtsOfContours)
162
- glyph.program = ttProgram.Program()
163
- glyph.program.fromBytecode(b"")
164
- if dropImpliedOnCurves:
165
- dropImpliedOnCurvePoints(glyph)
166
- glyph.coordinates.toInt(round=round)
167
-
168
- return glyph
169
-
170
-
171
- class TTGlyphPen(_TTGlyphBasePen, LoggingPen):
172
- """
173
- Pen used for drawing to a TrueType glyph.
174
-
175
- This pen can be used to construct or modify glyphs in a TrueType format
176
- font. After using the pen to draw, use the ``.glyph()`` method to retrieve
177
- a :py:class:`~._g_l_y_f.Glyph` object representing the glyph.
178
- """
179
-
180
- drawMethod = "draw"
181
- transformPen = TransformPen
182
-
183
- def __init__(
184
- self,
185
- glyphSet: Optional[Dict[str, Any]] = None,
186
- handleOverflowingTransforms: bool = True,
187
- outputImpliedClosingLine: bool = False,
188
- ) -> None:
189
- super().__init__(glyphSet, handleOverflowingTransforms)
190
- self.outputImpliedClosingLine = outputImpliedClosingLine
191
-
192
- def _addPoint(self, pt: Tuple[float, float], tp: int) -> None:
193
- self.points.append(pt)
194
- self.types.append(tp)
195
-
196
- def _popPoint(self) -> None:
197
- self.points.pop()
198
- self.types.pop()
199
-
200
- def _isClosed(self) -> bool:
201
- return (not self.points) or (
202
- self.endPts and self.endPts[-1] == len(self.points) - 1
203
- )
204
-
205
- def lineTo(self, pt: Tuple[float, float]) -> None:
206
- self._addPoint(pt, flagOnCurve)
207
-
208
- def moveTo(self, pt: Tuple[float, float]) -> None:
209
- if not self._isClosed():
210
- raise PenError('"move"-type point must begin a new contour.')
211
- self._addPoint(pt, flagOnCurve)
212
-
213
- def curveTo(self, *points) -> None:
214
- assert len(points) % 2 == 1
215
- for pt in points[:-1]:
216
- self._addPoint(pt, flagCubic)
217
-
218
- # last point is None if there are no on-curve points
219
- if points[-1] is not None:
220
- self._addPoint(points[-1], 1)
221
-
222
- def qCurveTo(self, *points) -> None:
223
- assert len(points) >= 1
224
- for pt in points[:-1]:
225
- self._addPoint(pt, 0)
226
-
227
- # last point is None if there are no on-curve points
228
- if points[-1] is not None:
229
- self._addPoint(points[-1], 1)
230
-
231
- def closePath(self) -> None:
232
- endPt = len(self.points) - 1
233
-
234
- # ignore anchors (one-point paths)
235
- if endPt == 0 or (self.endPts and endPt == self.endPts[-1] + 1):
236
- self._popPoint()
237
- return
238
-
239
- if not self.outputImpliedClosingLine:
240
- # if first and last point on this path are the same, remove last
241
- startPt = 0
242
- if self.endPts:
243
- startPt = self.endPts[-1] + 1
244
- if self.points[startPt] == self.points[endPt]:
245
- self._popPoint()
246
- endPt -= 1
247
-
248
- self.endPts.append(endPt)
249
-
250
- def endPath(self) -> None:
251
- # TrueType contours are always "closed"
252
- self.closePath()
253
-
254
-
255
- class TTGlyphPointPen(_TTGlyphBasePen, LogMixin, AbstractPointPen):
256
- """
257
- Point pen used for drawing to a TrueType glyph.
258
-
259
- This pen can be used to construct or modify glyphs in a TrueType format
260
- font. After using the pen to draw, use the ``.glyph()`` method to retrieve
261
- a :py:class:`~._g_l_y_f.Glyph` object representing the glyph.
262
- """
263
-
264
- drawMethod = "drawPoints"
265
- transformPen = TransformPointPen
266
-
267
- def init(self) -> None:
268
- super().init()
269
- self._currentContourStartIndex = None
270
-
271
- def _isClosed(self) -> bool:
272
- return self._currentContourStartIndex is None
273
-
274
- def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None:
275
- """
276
- Start a new sub path.
277
- """
278
- if not self._isClosed():
279
- raise PenError("Didn't close previous contour.")
280
- self._currentContourStartIndex = len(self.points)
281
-
282
- def endPath(self) -> None:
283
- """
284
- End the current sub path.
285
- """
286
- # TrueType contours are always "closed"
287
- if self._isClosed():
288
- raise PenError("Contour is already closed.")
289
- if self._currentContourStartIndex == len(self.points):
290
- # ignore empty contours
291
- self._currentContourStartIndex = None
292
- return
293
-
294
- contourStart = self.endPts[-1] + 1 if self.endPts else 0
295
- self.endPts.append(len(self.points) - 1)
296
- self._currentContourStartIndex = None
297
-
298
- # Resolve types for any cubic segments
299
- flags = self.types
300
- for i in range(contourStart, len(flags)):
301
- if flags[i] == "curve":
302
- j = i - 1
303
- if j < contourStart:
304
- j = len(flags) - 1
305
- while flags[j] == 0:
306
- flags[j] = flagCubic
307
- j -= 1
308
- flags[i] = flagOnCurve
309
-
310
- def addPoint(
311
- self,
312
- pt: Tuple[float, float],
313
- segmentType: Optional[str] = None,
314
- smooth: bool = False,
315
- name: Optional[str] = None,
316
- identifier: Optional[str] = None,
317
- **kwargs: Any,
318
- ) -> None:
319
- """
320
- Add a point to the current sub path.
321
- """
322
- if self._isClosed():
323
- raise PenError("Can't add a point to a closed contour.")
324
- if segmentType is None:
325
- self.types.append(0)
326
- elif segmentType in ("line", "move"):
327
- self.types.append(flagOnCurve)
328
- elif segmentType == "qcurve":
329
- self.types.append(flagOnCurve)
330
- elif segmentType == "curve":
331
- self.types.append("curve")
332
- else:
333
- raise AssertionError(segmentType)
334
-
335
- self.points.append(pt)