parquet-converter commited on
Commit
130fb90
·
1 Parent(s): b00b764

Update parquet files (step 50 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cain and Abel IP Stresser Cracked Learn How to Perform Dictionary Brute-Force and Cryptanalysis Attacks on Encrypted Passwords.md +0 -103
  2. spaces/1gistliPinn/ChatGPT4/Examples/Auslogics BoostSpeed 10.0.19.0 Crack Premium With Serial Key 2019 What You Need to Know About This Powerful Software.md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Cisco Asa Vmware Image Download.md +0 -94
  4. spaces/1gistliPinn/ChatGPT4/Examples/Download Ebook Biokimia Harper Bahas.md +0 -10
  5. spaces/1gistliPinn/ChatGPT4/Examples/F1 Challenge 2007 __HOT__ Crack Download.md +0 -152
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ASMR Tapping Scratching and Brushing on Various Objects (No Talking) 3Dio Binaural Sounds.md +0 -124
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Doodle Alchemy How to Combine Air Water Fire and Earth in Fun Ways.md +0 -135
  8. spaces/1phancelerku/anime-remove-background/3dzip.org The Ultimate Source of 3D Model Free Download.md +0 -98
  9. spaces/1phancelerku/anime-remove-background/Car Parking Multiplayer Mod APK 2022 Drive Park and Customize Your Dream Cars.md +0 -77
  10. spaces/1phancelerku/anime-remove-background/Download Z-Cron Scheduler The Ultimate Windows Task Automation Tool.md +0 -214
  11. spaces/A00001/bingothoo/src/components/chat-header.tsx +0 -12
  12. spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/utils/__init__.py +0 -1
  13. spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/ckpt_utils.py +0 -68
  14. spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/plot.py +0 -56
  15. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/lr_scheduler.py +0 -98
  16. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov7/yolov7_x_syncbn_fast_8x16b-300e_coco.py +0 -15
  17. spaces/Aaajdhdhdhahdbbaabs/Hshdhdhd/README.md +0 -10
  18. spaces/Aditya9790/yolo7-object-tracking/export.py +0 -205
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/Factory.js +0 -13
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/holygrail/methods/LayoutMode0.js +0 -58
  21. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectanglecanvas/Factory.js +0 -13
  22. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/GetChildrenHeight.js +0 -24
  23. spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/__init__.py +0 -0
  24. spaces/Amrrs/DragGan-Inversion/stylegan_human/openpose/src/body.py +0 -243
  25. spaces/Amrrs/DragGan-Inversion/stylegan_human/run_pti.py +0 -54
  26. spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/utils/utils.py +0 -68
  27. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/README.md +0 -72
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_unidiffuser_to_diffusers.py +0 -776
  29. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/ddpm/__init__.py +0 -1
  30. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/deepfloyd_if/test_if_superresolution.py +0 -83
  31. spaces/Andy1621/uniformer_image_detection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py +0 -13
  32. spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py +0 -71
  33. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/bbox_heads/dii_head.py +0 -415
  34. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/evaluation/__init__.py +0 -8
  35. spaces/AnthonyTruchetPoC/persistent-docker/scripts/run-local-docker.sh +0 -15
  36. spaces/Arun1217/mygenaiapp/app.py +0 -34
  37. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatter.py +0 -94
  38. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/retry.py +0 -620
  39. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/collect_env.py +0 -242
  40. spaces/Bart92/RVC_HF/demucs/wav.py +0 -174
  41. spaces/Bart92/RVC_HF/infer/lib/infer_pack/modules/F0Predictor/F0Predictor.py +0 -16
  42. spaces/Benson/text-generation/Examples/Archery Battle.md +0 -200
  43. spaces/Benson/text-generation/Examples/Battlefield 3 Descargar.md +0 -95
  44. spaces/Benson/text-generation/Examples/Descargar Bump Pop Mod.md +0 -76
  45. spaces/Benson/text-generation/Examples/Descargar Fts 2020 Apk.md +0 -61
  46. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/datetime.py +0 -11
  47. spaces/Boadiwaa/Recipes/openai/api_resources/engine.py +0 -42
  48. spaces/BraydenMoore/a-random-unsecured-camera/main.py +0 -163
  49. spaces/CMU-80100/80-100-Pre-Writing-Chatbot-Section-H/README.md +0 -7
  50. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/proposal_generator/rrpn_outputs.py +0 -244
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cain and Abel IP Stresser Cracked Learn How to Perform Dictionary Brute-Force and Cryptanalysis Attacks on Encrypted Passwords.md DELETED
@@ -1,103 +0,0 @@
1
-
2
- <br>- A network sniffer and analyzer<br>- A tool for performing various attacks | | H2: What is an IP stresser? | - A service that tests the resilience of a network or a server<br>- A tool for launching distributed denial-of-service (DDoS) attacks<br>- A way to measure the bandwidth and latency of a network | | H2: How to use Cain and Abel as an IP stresser? | - How to install and configure Cain and Abel<br>- How to scan and spoof ARP packets<br>- How to launch a DDoS attack using Cain and Abel | | H2: What are the risks and benefits of using Cain and Abel as an IP stresser? | - The legal and ethical implications of DDoS attacks<br>- The possible countermeasures and defenses against DDoS attacks<br>- The advantages and disadvantages of using Cain and Abel compared to other IP stressers | | H2: Conclusion | - A summary of the main points of the article<br>- A call to action for the readers<br>- A disclaimer and a warning | **Table 2: Article with HTML formatting** <h1>What is Cain and Abel?</h1>
3
- <p>Cain and Abel is a password recovery tool for Microsoft Windows operating systems. It allows easy recovery of various kinds of passwords by sniffing the network, cracking encrypted passwords using dictionary, brute-force and cryptanalysis attacks, recording VoIP conversations, decoding scrambled passwords, revealing password boxes, uncovering cached passwords and analyzing routing protocols.</p>
4
- <h2>cain and abel ip stresser cracked</h2><br /><p><b><b>Download Zip</b> &#9733;&#9733;&#9733; <a href="https://byltly.com/2uKvF2">https://byltly.com/2uKvF2</a></b></p><br /><br />
5
- <p>But Cain and Abel is not just a password recovery tool. It is also a powerful network sniffer and analyzer that can capture and manipulate network traffic. It can perform various attacks such as ARP poisoning, DNS spoofing, man-in-the-middle, session hijacking, SSL stripping and more.</p>
6
- <p>One of the most notorious uses of Cain and Abel is to use it as an IP stresser. An IP stresser is a service that tests the resilience of a network or a server by sending a large amount of traffic to it. It can also be used to launch distributed denial-of-service (DDoS) attacks, which aim to disrupt or disable the target by overwhelming it with requests.</p>
7
- <h2>What is an IP stresser?</h2>
8
- <p>An IP stresser is a service that tests the resilience of a network or a server by sending a large amount of traffic to it. It can also be used to launch distributed denial-of-service (DDoS) attacks, which aim to disrupt or disable the target by overwhelming it with requests.</p>
9
- <p>An IP stresser can measure the bandwidth and latency of a network by sending packets of different sizes and frequencies. It can also simulate different types of traffic such as TCP, UDP, ICMP, HTTP, HTTPS, DNS, FTP, SMTP and more. An IP stresser can help network administrators to identify bottlenecks, vulnerabilities and performance issues in their networks.</p>
10
- <p>However, an IP stresser can also be used for malicious purposes. Some hackers use IP stressers to launch DDoS attacks against their enemies or competitors. They can target websites, servers, online games, applications or even individual devices. They can cause slowdowns, outages, data loss or damage to the target.</p>
11
- <h2>How to use Cain and Abel as an IP stresser?</h2>
12
- <p>Cain and Abel can be used as an IP stresser by exploiting its ability to spoof ARP packets. ARP stands for Address Resolution Protocol, which is used to map IP addresses to MAC addresses on a local area network (LAN). By spoofing ARP packets, Cain and Abel can trick other devices on the same LAN into thinking that it is the gateway or router. This way, it can intercept all the traffic that passes through the LAN.</p>
13
- <p>How to use Cain and Abel sniffer on Windows 10<br />
14
- Cain and Abel password recovery tool download<br />
15
- Cain and Abel APR (Arp Poison Routing) tutorial<br />
16
- Cain and Abel network security software review<br />
17
- Cain and Abel crack encrypted passwords with brute-force<br />
18
- How to fix Cain and Abel 0.0.0.0 IP address problem<br />
19
- Cain and Abel record VoIP conversations on LAN<br />
20
- Cain and Abel decode scrambled passwords in browsers<br />
21
- Cain and Abel reveal password boxes on Windows<br />
22
- Cain and Abel uncover cached passwords in applications<br />
23
- Cain and Abel analyze routing protocols with sniffer<br />
24
- Cain and Abel inject custom certificates into HTTPS<br />
25
- Cain and Abel root certificate generator configuration<br />
26
- Cain and Abel spoof DNS replies with APR-DNS<br />
27
- Cain and Abel perform man-in-the-middle attacks with APR<br />
28
- How to install Cain and Abel on Windows 8<br />
29
- Cain and Abel dictionary and cryptanalysis attacks tutorial<br />
30
- Cain and Abel password decoders for various protocols<br />
31
- Cain and Abel password/hash calculators for common algorithms<br />
32
- Cain and Abel non standard utilities for Windows users<br />
33
- How to use Cain and Abel with external WiFi card<br />
34
- Cain and Abel LSA secret dumper for Windows 8<br />
35
- Cain and Abel credential manager password decoder for Windows 8<br />
36
- Cain and Abel editbox revealer for Windows 8<br />
37
- Cain and Abel RDP client sniffer filter for Windows 8<br />
38
- How to use aircrack-ng with Cain and Abel on Windows<br />
39
- How to use image steganography tool with Cain and Abel<br />
40
- How to grab IP addresses from Xbox LIVE with Cain and Abel<br />
41
- How to use Winpcap library with Cain and Abel on Windows 8<br />
42
- How to use LANC vs OctoSniff network sniffer with Cain and Abel<br />
43
- How to use VPN to protect against DDOS attacks with Cain and Abel<br />
44
- How to use VNC with Cain and Abel on Windows 10<br />
45
- How to use SSH-1 protocol analyzer with Cain and Abel<br />
46
- How to use HTTPS protocol analyzer with Cain and Abel<br />
47
- How to use POP3 protocol analyzer with Cain and Abel<br />
48
- How to use IMAP protocol analyzer with Cain and Abel<br />
49
- How to use SMTP protocol analyzer with Cain and Abel<br />
50
- How to use FTP protocol analyzer with Cain and Abel<br />
51
- How to use Telnet protocol analyzer with Cain and Abel<br />
52
- How to use HTTP protocol analyzer with Cain and Abel<br />
53
- How to use NNTP protocol analyzer with Cain and Abel<br />
54
- How to use ICQ protocol analyzer with Cain and Abel<br />
55
- How to use IRC protocol analyzer with Cain and Abel<br />
56
- How to use Rlogin protocol analyzer with Cain and Abel<br />
57
- How to use SNMP protocol analyzer with Cain and Abel<br />
58
- How to use LDAP protocol analyzer with Cain and Abel<br />
59
- How to use SOCKS 4/5 protocol analyzer with Cain and Abel<br />
60
- How to use MySQL protocol analyzer with Cain and Abel <br />
61
- How to use MS SQL Server protocol analyzer with Cain and Abel <br />
62
- How to use Oracle Database Server protocol analyzer with Cain and Abel</p>
63
- <p>To use Cain and Abel as an IP stresser, you need to follow these steps:</p>
64
- <ol>
65
- <li>Download and install Cain and Abel from <a href="https://www.techspot.com/downloads/2416-cain-abel.html">here</a>. Make sure you have WinPcap installed as well.</li>
66
- <li>Run Cain and Abel as an administrator. Click on the Sniffer tab and then click on the Start/Stop Sniffer button.</li>
67
- <li>Click on the Configure button and select your network adapter from the list. Make sure you select the one that is connected to your LAN.</li>
68
- <li>Click on the Sniffer tab again and then click on the + button. Select All Hosts in my subnet from the list.</li>
69
- <li>Wait for Cain and Abel to scan your LAN for active hosts. You should see a list of IP addresses and MAC addresses in the table.</li>
70
- <li>Select one or more hosts that you want to target for your IP stress test or DDoS attack. Right-click on them and select Resolve Host Name to get their domain names.</li>
71
- <li>Click on the APR tab at the bottom. Click on the + button again and select Use Spoofed IP & MAC Addresses from the list.</li>
72
- <li>In the dialog box that appears, enter your own IP address in the first field and your own MAC address in the second field. You can find them by typing ipconfig /all in a command prompt window.</li>
73
- <li>In the third field, enter the IP address of your gateway or router. You can find it by typing ipconfig /all in a command prompt window as well.</li>
74
- <li>In the fourth field, enter 00-00-00-00-00-00 as the MAC address of your gateway or router.</li>
75
- <li>Click OK to close the dialog box.</li>
76
- <li>You should see two entries in the APR table: one for your own device (with your own IP address) spoofing as your gateway or router (with 00-00-00-00-00-00 as its MAC address), and one for your gateway or router (with its real IP address) spoofing as your own device (with your own MAC address).</li>
77
- <li>Select both entries in the APR table. Right-click on them and select Start ARP.</li>
78
- <li>You have now successfully spoofed ARP packets on your LAN. All traffic from your target hosts will now go through your device instead of your gateway or router.</li>
79
- <li>To launch an IP stress test or DDoS attack against your target hosts, click on the Attack tab at the bottom.</li>
80
- <li>Select one or more attack methods from the list. You can choose from TCP/UDP/ICMP Floods, HTTP Floods, DNS Floods, FTP Floods, SMTP Floods and more.</li>
81
- <li>Enter the parameters for each attack method such as port number, packet size, packet rate etc.</li>
82
- <li>Click on Start Attack button to begin sending packets to your target hosts.</li>
83
- <li>To stop an attack method, select it from the list again and click on Stop Attack button.</li>
84
- <li>To stop all attack methods at once, click on Stop All Attacks button.</li>
85
- <li>To stop spoofing ARP packets on your LAN, go back to APR tab at bottom.Select both entries in APR table.Right-click on them.Select Stop ARP.</li></ol>
86
- <h2>What are risks benefits using Cain Abel as an IP stresser?</h2>
87
- <p>Using Cain & Abel as an IP stresser has some risks & benefits depending on your purpose & perspective.Here are some of them:</p>
88
- <ul><li><b>Risks:</b></li><ul><li>You may violate laws & ethics by launching DDoS attacks against unauthorized targets.You may face legal consequences such as fines,jail time,lawsuits etc.You may also damage reputation & trustworthiness among peers & clients.</li><li>You may expose yourself & your device to security threats by intercepting & manipulating network traffic.You may compromise your privacy & confidentiality by revealing sensitive information such as passwords,usernames,email addresses etc.You may also infect your device with malware,viruses,trojans etc.</li><li>You may cause harm & inconvenience to other users & devices on same LAN by spoofing ARP packets.You may disrupt normal functioning & performance of network services & applications.You may also create conflicts & errors among devices due to duplicate IP addresses & MAC addresses.</li></ul><li><b>Benefits:</b></li><ul><li>You may test resilience & security of your own network or server by launching controlled DDoS attacks against authorized targets.You may identify bottlenecks,vulnerabilities & performance issues in your network.You may also improve & optimize your network configuration & security measures.</li><li>You may learn & practice various skills & techniques related to network sniffing & analysis by intercepting & manipulating network traffic.You may gain knowledge & experience about different protocols,packets,filters etc.You may also learn about different protocols, packets, filters etc. You may also have fun and challenge yourself by trying different attack scenarios and methods.</li><li>You may protect yourself & your device from DDoS attacks by using Cain & Abel as a defensive tool. You may monitor & analyze your network traffic for any suspicious activity or anomalies. You may also detect & block any malicious packets or requests that may be part of a DDoS attack.</li></ul></ul>
89
- <h2>Conclusion</h2>
90
- <p>Cain and Abel is a versatile tool that can be used for various purposes, including password recovery, network sniffing and analysis, and IP stressing. It can be a useful tool for network administrators, security professionals, students, and hobbyists who want to test, learn, or practice different skills and techniques related to network security.</p>
91
- <p>However, Cain and Abel can also be a dangerous tool that can be used for malicious purposes, such as launching DDoS attacks against unauthorized targets. Such attacks can cause serious harm and inconvenience to the victims and their users. They can also violate laws and ethics and result in legal consequences for the attackers.</p>
92
- <p>Therefore, it is important to use Cain and Abel responsibly and ethically. It is also important to protect yourself and your device from DDoS attacks by using appropriate security measures and countermeasures. Remember that with great power comes great responsibility.</p>
93
- <h2>FAQs</h2>
94
- <ul>
95
- <li><b>Q: Is Cain and Abel illegal?</b><br>A: Cain and Abel itself is not illegal, but how you use it may be. Using Cain and Abel to launch DDoS attacks against unauthorized targets is illegal and unethical. You should only use Cain and Abel for legitimate purposes such as testing your own network or server or learning about network security.</li>
96
- <li><b>Q: How can I prevent Cain and Abel from sniffing my network traffic?</b><br>A: You can prevent Cain and Abel from sniffing your network traffic by using encryption protocols such as HTTPS, SSL, or VPN. These protocols encrypt your data before sending it over the network, making it unreadable for anyone who intercepts it.</li>
97
- <li><b>Q: How can I detect if someone is using Cain and Abel to spoof ARP packets on my LAN?</b><br>A: You can detect if someone is using Cain and Abel to spoof ARP packets on your LAN by using tools such as Wireshark or Arpwatch. These tools can monitor your network traffic for any changes or anomalies in the ARP table or cache. If you notice any duplicate or mismatched IP addresses or MAC addresses, it may indicate that someone is spoofing ARP packets on your LAN.</li>
98
- <li><b>Q: How can I stop someone from using Cain and Abel to launch a DDoS attack against me?</b><br>A: You can stop someone from using Cain and Abel to launch a DDoS attack against you by using various countermeasures such as firewalls, routers, load balancers, proxies, filters, rate limiting, blacklisting etc. These countermeasures can help you block or filter out malicious traffic or requests that may be part of a DDoS attack.</li>
99
- <li><b>Q: Where can I download Cain and Abel?</b><br>A: You can download Cain and Abel from <a href="https://www.techspot.com/downloads/2416-cain-abel.html">here</a>. Make sure you have WinPcap installed as well.</li>
100
- </ul>
101
- </p> 0a6ba089eb<br />
102
- <br />
103
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Auslogics BoostSpeed 10.0.19.0 Crack Premium With Serial Key 2019 What You Need to Know About This Powerful Software.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Auslogics BoostSpeed 10.0.19.0 Crack Premium With Serial Key 2019</h2><br /><p><b><b>Download</b> &mdash;&mdash;&mdash;&mdash;&mdash; <a href="https://imgfil.com/2uy1ni">https://imgfil.com/2uy1ni</a></b></p><br /><br />
2
-
3
- aaccfb2cb3<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Cisco Asa Vmware Image Download.md DELETED
@@ -1,94 +0,0 @@
1
-
2
- <h1>Cisco ASA VMware Image Download: A Guide for Network Security Professionals</h1>
3
-
4
- <p>Cisco ASA (Adaptive Security Appliance) is a family of network security devices that provide firewall, VPN, intrusion prevention, and other security features for enterprise and service provider networks. Cisco ASA devices are widely used and trusted by network administrators and security experts around the world.</p>
5
- <h2>cisco asa vmware image download</h2><br /><p><b><b>Download File</b> &#10001; <a href="https://imgfil.com/2uy0LL">https://imgfil.com/2uy0LL</a></b></p><br /><br />
6
-
7
- <p>However, deploying and managing physical Cisco ASA devices can be costly and complex, especially for small and medium-sized businesses or remote offices. That is why Cisco offers a virtual version of the ASA device, called Cisco ASA Virtual (ASAv), that can run on any server class x86 CPU device that is capable of running VMware ESXi.</p>
8
-
9
- <p>Cisco ASAv is a software-only solution that provides the same features and functionality as the physical ASA device, but with more flexibility and scalability. Cisco ASAv can be deployed on any VMware ESXi host, either on-premises or in the cloud, and can be integrated with other VMware products and services. Cisco ASAv can also be easily migrated, cloned, backed up, restored, or updated using VMware tools and processes.</p>
10
-
11
- <h2>What are the benefits of Cisco ASA VMware Image Download?</h2>
12
-
13
- <p>Cisco ASA VMware Image Download has many benefits for network security professionals who want to use Cisco ASAv for their network security needs. Some of these benefits are:</p>
14
-
15
- <ul>
16
- <li>Cisco ASA VMware Image Download allows you to access the latest version of Cisco ASAv software from the official Cisco website. You can download the Cisco ASAv software as an OVA (Open Virtual Appliance) file, which contains the ASAv virtual machine image and configuration files.</li>
17
- <li>Cisco ASA VMware Image Download allows you to deploy Cisco ASAv on any VMware ESXi host in minutes. You can use the VMware vSphere Web Client, the VMware vSphere Standalone Client, or the OVF Tool to import the Cisco ASAv OVA file and create a new virtual machine based on it.</li>
18
- <li>Cisco ASA VMware Image Download allows you to configure Cisco ASAv using a Day 0 configuration file. You can use a text editor to create a Day 0 configuration file that contains the initial settings for your Cisco ASAv virtual machine, such as hostname, IP address, password, license, and interface mappings. You can then attach the Day 0 configuration file to your Cisco ASAv virtual machine during deployment.</li>
19
- <li>Cisco ASA VMware Image Download allows you to access Cisco ASAv console using the VMware console or a serial port connection. You can use the console to perform basic tasks such as verifying the status, changing the password, or applying a license for your Cisco ASAv virtual machine.</li>
20
- <li>Cisco ASA VMware Image Download allows you to upgrade your Cisco ASAv license to increase its performance and capacity. You can use the Smart Licensing feature to register your Cisco ASAv virtual machine with your Smart Account and apply a vCPU or throughput license that matches your network requirements.</li>
21
- </ul>
22
-
23
- <h2>How to use Cisco ASA VMware Image Download?</h2>
24
-
25
- <p>Cisco ASA VMware Image Download can be used for various purposes, such as:</p>
26
-
27
- <ul>
28
- <li>Testing and evaluating Cisco ASAv features and functionality before buying or deploying physical ASA devices. You can use Cisco ASAv as a sandbox or a lab environment to experiment with different network scenarios and security policies.</li>
29
- <li>Providing network security for small and medium-sized businesses or remote offices that do not have enough budget or space for physical ASA devices. You can use Cisco ASAv as a cost-effective and easy-to-manage solution that provides firewall, VPN, intrusion prevention, and other security features for your network.</li>
30
- <li>Extending network security to cloud environments that run on VMware ESXi hosts. You can use Cisco ASAv as a cloud-native solution that integrates with VMware products and services such as vSphere, NSX-T, vCloud Director, vRealize Automation, and vRealize Orchestrator.</li>
31
- <li>Enhancing network security with high availability and scalability features that are supported by VMware ESXi hosts. You can use Cisco ASAv as a resilient and elastic solution that leverages VMware features such as vMotion, HA (High Availability), DRS (Distributed Resource Scheduler), FT (Fault Tolerance), SRM (Site Recovery Manager), and vSAN (Virtual SAN).</li>
32
- </ul>
33
-
34
- <h2>Conclusion</h2>
35
-
36
- <p>Cisco ASA VMware Image Download is a valuable resource for network security professionals who want to use Cisco ASAv for their network security needs. Cisco ASAv is a software-only solution that provides the same features and functionality as the physical ASA device, but with more flexibility and scalability. Cisco ASAv can be deployed on any VMware ESXi host, either on-premises or in the cloud, and can be integrated with other VMware products and services. Cisco ASAv can also be easily migrated, cloned, backed up, restored, or updated using VMware tools and processes.</p>
37
- <p></p>
38
-
39
- <p>If you are looking for a quality solution for network security, you should definitely consider using Cisco ASA VMware Image Download. It is a valuable resource that can help you deploy and manage Cisco ASAv in an effective and convenient way.</p>
40
- <h2>What are the alternatives to Cisco ASA VMware Image Download?</h2>
41
-
42
- <p>Cisco ASA VMware Image Download is a great solution for network security, but it is not the only option available. There are some alternatives to Cisco ASAv that you may want to consider, depending on your network needs and preferences. Some of these alternatives are:</p>
43
-
44
- <ul>
45
- <li>Cisco Firepower Threat Defense Virtual (FTDv) - This is another virtual version of Cisco's network security device, but it provides more advanced features and functionality than Cisco ASAv. Cisco FTDv combines the firewall capabilities of Cisco ASAv with the threat detection and prevention capabilities of Cisco Firepower. Cisco FTDv can also be deployed on any VMware ESXi host, either on-premises or in the cloud, and can be integrated with other VMware products and services.</li>
46
- <li>Cisco Meraki MX - This is a cloud-managed network security device that provides firewall, VPN, SD-WAN, and other security features for distributed networks. Cisco Meraki MX is a physical device that can be installed at any location, and can be managed and monitored from a web-based dashboard. Cisco Meraki MX can also be integrated with other Cisco Meraki products and services.</li>
47
- <li>Cisco Secure Firewall Cloud Native (CNF) - This is a containerized network security solution that provides firewall, VPN, intrusion prevention, and other security features for cloud-native environments. Cisco CNF is a software-only solution that can run on any Kubernetes platform, either on-premises or in the cloud, and can be integrated with other cloud-native products and services.</li>
48
- </ul>
49
-
50
- <h2>Conclusion</h2>
51
-
52
- <p>Cisco ASA VMware Image Download is a valuable resource for network security professionals who want to use Cisco ASAv for their network security needs. Cisco ASAv is a software-only solution that provides the same features and functionality as the physical ASA device, but with more flexibility and scalability. Cisco ASAv can be deployed on any VMware ESXi host, either on-premises or in the cloud, and can be integrated with other VMware products and services. However, there are also some alternatives to Cisco ASAv that you may want to consider, depending on your network needs and preferences. You can use Cisco FTDv, Cisco Meraki MX, or Cisco CNF as other solutions for network security.</p>
53
-
54
- <p>If you are looking for a quality solution for network security, you should definitely consider using Cisco ASA VMware Image Download. It is a valuable resource that can help you deploy and manage Cisco ASAv in an effective and convenient way.</p>
55
- <h2>What are the resources and references for Cisco ASA VMware Image Download?</h2>
56
-
57
- <p>Cisco ASA VMware Image Download can help you learn and use Cisco ASAv for your network security needs, but you also need some resources and references to guide you along the way. Some of these resources and references are:</p>
58
-
59
- <ul>
60
- <li>Cisco ASAv Documentation - This is the official documentation for Cisco ASAv that provides detailed information and instructions on how to download, deploy, configure, manage, and troubleshoot Cisco ASAv. You can access the Cisco ASAv documentation from the Cisco website or from the Cisco ASAv console.</li>
61
- <li>Cisco ASAv Support - This is the official support portal for Cisco ASAv that provides technical assistance and solutions for any issues or problems related to Cisco ASAv. You can access the Cisco ASAv support portal from the Cisco website or from the Cisco ASAv console.</li>
62
- <li>Cisco ASAv Community - This is the official community forum for Cisco ASAv that provides a platform for users and experts to share their knowledge and experience with Cisco ASAv. You can access the Cisco ASAv community forum from the Cisco website or from the Cisco ASAv console.</li>
63
- <li>Cisco ASAv Training - This is the official training program for Cisco ASAv that provides courses and certifications for network security professionals who want to learn and master Cisco ASAv. You can access the Cisco ASAv training program from the Cisco website or from the Cisco Learning Network.</li>
64
- <li>Cisco ASAv Blogs - These are some blogs and articles that provide useful tips and insights on how to use and optimize Cisco ASAv for network security. You can access these blogs and articles from various online sources, such as:</li>
65
- <ul>
66
- <li><a href="https://blogs.cisco.com/security/cisco-asav-the-adaptive-security-virtual-appliance">Cisco ASAv: The Adaptive Security Virtual Appliance</a></li>
67
- <li><a href="https://www.packet6.com/how-to-install-cisco-asav-on-vmware-fusion/">How to Install Cisco ASAv on VMware Fusion</a></li>
68
- <li><a href="https://www.networkstraining.com/how-to-run-cisco-asav-virtual-appliance-on-vmware-workstation/">How to Run Cisco ASAV Virtual Appliance on VMware Workstation</a></li>
69
- <li><a href="https://www.firewall.cx/cisco-technical-knowledgebase/cisco-firewalls/1198-cisco-asa-virtual-appliance-asav.html">Cisco ASA Virtual Appliance (ASAv)</a></li>
70
- </ul>
71
- </ul>
72
-
73
- <h2>Conclusion</h2>
74
-
75
- <p>Cisco ASA VMware Image Download is a valuable resource for network security professionals who want to use Cisco ASAv for their network security needs. Cisco ASAv is a software-only solution that provides the same features and functionality as the physical ASA device, but with more flexibility and scalability. Cisco ASAv can be deployed on any VMware ESXi host, either on-premises or in the cloud, and can be integrated with other VMware products and services. However, you also need some resources and references to help you learn and use Cisco ASAv effectively and efficiently. You can use Cisco ASAv documentation, support, community, training, and blogs as other resources and references for network security.</p>
76
-
77
- <p>If you are looking for a quality solution for network security, you should definitely consider using Cisco ASA VMware Image Download. It is a valuable resource that can help you deploy and manage Cisco ASAv in an effective and convenient way.</p>
78
- <h2>Final Thoughts</h2>
79
-
80
- <p>Network security is one of the most important and challenging aspects of any network, as it protects the network from various threats and attacks that can compromise its performance and integrity. Network security requires a reliable and robust solution that can provide firewall, VPN, intrusion prevention, and other security features for the network.</p>
81
-
82
- <p>One such solution is Cisco ASA (Adaptive Security Appliance), a family of network security devices that are widely used and trusted by network administrators and security experts around the world. However, Cisco ASA devices can also be costly and complex to deploy and manage, especially for small and medium-sized businesses or remote offices.</p>
83
-
84
- <p>That is why Cisco offers a virtual version of the ASA device, called Cisco ASAv (Adaptive Security Virtual Appliance), that can run on any server class x86 CPU device that is capable of running VMware ESXi. Cisco ASAv is a software-only solution that provides the same features and functionality as the physical ASA device, but with more flexibility and scalability. Cisco ASAv can be deployed on any VMware ESXi host, either on-premises or in the cloud, and can be integrated with other VMware products and services.</p>
85
-
86
- <p>Cisco ASA VMware Image Download is a valuable resource for network security professionals who want to use Cisco ASAv for their network security needs. Cisco ASA VMware Image Download allows you to access the latest version of Cisco ASAv software from the official Cisco website, and deploy it on any VMware ESXi host in minutes. Cisco ASA VMware Image Download also allows you to configure, manage, and troubleshoot Cisco ASAv using various tools and methods.</p>
87
-
88
- <p>However, Cisco ASA VMware Image Download is not the only option available for network security. There are some alternatives to Cisco ASAv that you may want to consider, depending on your network needs and preferences. You can use Cisco FTDv (Firepower Threat Defense Virtual), Cisco Meraki MX, or Cisco CNF (Secure Firewall Cloud Native) as other solutions for network security.</p>
89
-
90
- <p>You also need some resources and references to help you learn and use Cisco ASAv effectively and efficiently. You can use Cisco ASAv documentation, support, community, training, and blogs as other resources and references for network security.</p>
91
-
92
- <p>If you are looking for a quality solution for network security, you should definitely consider using Cisco ASA VMware Image Download. It is a valuable resource that can help you deploy and manage Cisco ASAv in an effective and convenient way.</p> 3cee63e6c2<br />
93
- <br />
94
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download Ebook Biokimia Harper Bahas.md DELETED
@@ -1,10 +0,0 @@
1
- <br />
2
- <p>DOWNLOAD: https://urloso.com/2xjsk3. DOWNLOAD: http://mp3-mp4-converter.blogspot.com.ec/.. Free download. Book english of the free download is now - 2cmxex.. 2cmxex Crack Keygen.. Main menu. Direct download of Biokimia Harper Bahasa Indonesia... ctd biokimia harper bahas pertama kali.. beta biokimia harper bahasa.. </p>
3
- <h2>download ebook biokimia harper bahas</h2><br /><p><b><b>Download File</b> &#128504; <a href="https://imgfil.com/2uy12w">https://imgfil.com/2uy12w</a></b></p><br /><br />
4
- <p>DOWNLOAD: https://urloso.com/2f3v61. Download Wubi 11.20 Crack Kanishka Pdf Online Keygen Free. Kanishka Kanishka.ro Kanishka Download Kanishka Crack Kanishka Kanishka Kanishka Patched Crack Kanishka Download Kanishka Online Patch. </p>
5
- <p>DOWNLOAD: https://urloso.com/2g39ov. Download Parttime Informasi Masalah Sepak Bola Asli.info. Direct download of Biokimia Harper Bahasa Indonesia.. Biokimia Harper Bahasa Indonesia.pdf.. Biokimia Harper Bahasa Indonesia.pdf online now, exclusively on AccessPharmacy.. </p>
6
- <p>Smith, Harper'S Illustrated Biochemistry Edi. 27, ISBN 9780744808663. [Bahasa Indonesia]. Google Scholar.. hanya bisa di download akhirnya ketika pulang ke rumah kelas karena angka tersebut terlalu besar. </p>
7
- <p>Download Harper's Illustrated Biochemistry. Harper's Illustrated Biochemistry. Harper's Illustrated Biochemistry. Harper's Illustrated Biochemistry.. 27. Kindle edition. $6.50.. Smith, Harper'S Illustrated Biochemistry Edi. 27, ISBN 9780744808663. [Bahasa Indonesia]. Google Scholar.. apakah biokimia harper incisi tiada?. May 23, 2017. Berry, Biochemistry (Harper s Biochemistry), Revised Edition, Harper s Biochemistry,. . Berry, Harper'S Illustrated Biochemistry, Harper'S Illustrated Biochemistry,.. Berry, Biochemistry (Harper s Biochemistry), Revised Edition, Harper s Biochemistry,.. Berry, Harper'S Illustrated Biochemistry, Harper'S Illustrated Biochemistry, 1990, ISBN 9780674783367. [Bahasa Indonesia]. Brassey'S. </p>
8
- <p></p> 899543212b<br />
9
- <br />
10
- <br />
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/F1 Challenge 2007 __HOT__ Crack Download.md DELETED
@@ -1,152 +0,0 @@
1
- <br />
2
- <h1>F1 Challenge 2007 Crack Download: How to Enjoy the Best Racing Game of the Year</h1>
3
- <p>If you are a fan of Formula One racing, you might have heard of F1 Challenge 2007, a video game that simulates the 2007 season of the sport. F1 Challenge 2007 is a mod for F1 Challenge '99-'02, a game developed by EA Sports and released in 2003. The mod features updated cars, tracks, teams, drivers, and graphics to match the 2007 season.</p>
4
- <h2>F1 Challenge 2007 Crack Download</h2><br /><p><b><b>Download Zip</b> &mdash;&mdash;&mdash;>>> <a href="https://imgfil.com/2uxXRQ">https://imgfil.com/2uxXRQ</a></b></p><br /><br />
5
- <p>However, F1 Challenge 2007 is not an official game and it is not available for purchase. You can only download it from the internet for free. But how do you download and install F1 Challenge 2007 on your PC? And how do you get the crack that allows you to play it without any errors or limitations? In this article, we will show you how to do that in a few simple steps.</p>
6
- <h2>Step 1: Download F1 Challenge 2007 Mod</h2>
7
- <p>The first thing you need to do is to download the F1 Challenge 2007 mod from a reliable source. There are many websites that offer this mod, but some of them may contain viruses or malware that can harm your computer. We recommend you to use one of these links:</p>
8
- <ul>
9
- <li><a href="https://enrt.eu/bzS5aWy">FIMEDIA FIRE DOWNLOAD 518.2MB.rar</a></li>
10
- <li><a href="https://enrt.eu/bqXOq5u">MEGA DOWNLOAD 518.2MB.rar</a></li>
11
- </ul>
12
- <p>These links are from a YouTube video by B&J F1, who reissued the mod with some improvements and fixes. You can watch the video here: <a href="https://www.youtube.com/watch?v=DoxYxcuI9wA">DOWNLOAD F1 Challenge 2007</a>.</p>
13
- <p>After you download the mod, you will get a compressed file in .rar format. You will need to extract it using a software like WinRAR or 7-Zip.</p>
14
- <p></p>
15
- <h2>Step 2: Install F1 Challenge '99-'02</h2>
16
- <p>Before you can install the F1 Challenge 2007 mod, you need to have the original game F1 Challenge '99-'02 installed on your PC. If you already have it, you can skip this step. If you don't have it, you can download it from here: <a href="https://f1-challenge-2007-full-version.software.informer.com/1.0/">F1 Challenge 2007 Full version 1.0 Download</a>.</p>
17
- <p>This link is from Software Informer, a website that provides information and downloads for various software programs. The download is free and safe, but you may need to register an account to access it.</p>
18
- <p>After you download the game, you will get an executable file named F1Challenge2007.exe. Run it and follow the instructions to install the game on your PC.</p>
19
- <h2>Step 3: Install F1 Challenge 2007 Mod</h2>
20
- <p>Now that you have both the mod and the original game on your PC, you can install the mod by following these steps:</p>
21
- <ol>
22
- <li>Open the folder where you extracted the mod file and copy the folder named "F1C GGSF12007".</li>
23
- <li>Paste it into the folder where you installed the original game (usually C:\Program Files\EA SPORTS\F1 Challenge '99-'02).</li>
24
- <li>Replace any existing files if prompted.</li>
25
- <li>Open the folder "F1C GGSF12007" and run the file named "F12007.exe".</li>
26
- <li>Enjoy playing F1 Challenge 2007!</li>
27
- </ol>
28
- <h2>Step 4: Download and Apply F1 Challenge 2007 Crack</h2>
29
- <p>If you want to play F1 Challenge 2007 without any errors or limitations, you will need to download and apply a crack for it. A crack is a file that modifies or bypasses some features of a software program, such as copy protection or activation.</p>
30
- <p>You can download a crack for F1 Challenge 2007 from here: <a href="https://www.arealgamer.org/f1-2007/">Download F1 (2007) Free Full PC Game</a>.</p>
31
- <p>This link is from A Real Gamer, a website that offers free downloads of PC games. The crack is included in the game file that you can download from this link.</p>
32
- <p>After you download the game file, you will get another compressed file in .rar format. You will need to extract it using a software like WinRAR or 7-Zip.</p>
33
- <p>Inside the extracted folder, you will find a folder named "Crack". Open it and copy the file named "F12007.exe".</p>
34
- <p>Paste it into the folder where you installed the mod (usually C:\Program Files\EA SPORTS\F1 Challenge '99-'02\F1C GGSF12007).</li>
35
- <li>Replace any existing files if prompted.</li>
36
- <li>Run the file named "F12007.exe" from this folder.</li>
37
- <li>Enjoy playing F1 Challenge 2007 without any errors or limitations!</li>
38
-
39
- <h3>Conclusion</h3>
40
-
41
- <p>F1 Challenge 2007 is a great racing game that lets you experience the thrill of Formula One racing in your PC. With realistic graphics, sound effects, physics, and gameplay, it will make you feel like a real driver on the track.</p>
42
-
43
- <p>To play this game for free, you need to download and install both the mod and the original game, as well as apply a crack for it. This may seem complicated at first, but if you follow our guide step by step, you will be able to do it easily and quickly.</p>
44
-
45
- <p>We hope this article helped you learn how to download and install F1 Challenge 2007 on your PC. If you have any questions or comments, feel free to leave them below. Happy racing!</p>
46
- <h3>F1 Challenge 2007 Tips and Tricks</h3>
47
- <p>F1 Challenge 2007 is a mod that offers a realistic and immersive racing experience. However, it can also be quite challenging and difficult for some players, especially beginners. If you want to improve your skills and performance in F1 Challenge 2007, you can try some of these tips and tricks:</p>
48
- <ul>
49
- <li>Practice before you race. F1 Challenge 2007 has a time trial mode where you can practice on any track and car without any opponents or distractions. You can use this mode to learn the layout of the track, the best racing lines, the braking points, the cornering speeds, and the overtaking opportunities. You can also adjust your car settings and see how they affect your lap times.</li>
50
- <li>Use the driving aids wisely. F1 Challenge 2007 has several driving aids that can help you control your car better, such as traction control, anti-lock brakes, stability control, and automatic gearbox. However, these aids also reduce your speed and performance. You can choose to turn them on or off depending on your preference and skill level. You can also use the custom option to set the level of each aid individually.</li>
51
- <li>Watch the replays and telemetry. F1 Challenge 2007 has a replay feature that lets you watch your races from different angles and perspectives. You can use this feature to analyze your mistakes and improve your driving style. You can also use the telemetry feature that shows you various data about your car, such as speed, gear, throttle, brake, steering angle, tire temperature, and fuel level. You can use this feature to optimize your car settings and strategy.</li>
52
- <li>Learn from other drivers. F1 Challenge 2007 has a multiplayer mode where you can race against other players online or on a local network. You can use this mode to challenge yourself and learn from other drivers who have more experience and skill than you. You can also watch their replays and telemetry to see how they drive and what they do differently from you.</li>
53
- </ul>
54
-
55
- <h3>F1 Challenge 2007 Alternatives and Similar Games</h3>
56
- <p>F1 Challenge 2007 is a mod that is based on F1 Challenge '99-'02, a game that was released in 2003. Since then, many other games have been released that are similar or related to F1 Challenge 2007. Some of these games are:</p>
57
- <ul>
58
- <li>F1 2014: This is an official Formula One game that was released in 2014 by Codemasters. It features the cars, drivers, teams, tracks, rules, and graphics of the 2014 season. It also has various modes, such as career, championship, scenario, time trial, online multiplayer, and co-op.</li>
59
- <li>F-1Mania 2007: This is another mod for F1 Challenge '99-'02 that simulates the 2007 season of Formula One racing. It features updated cars, tracks, teams, drivers, graphics, sound effects, physics, AI, and gameplay. It also has some extra features, such as real V8 engine sound, new intro video, new race winner scenes, new drivers champion scene, new constructors champion scene, custom menu music.</li>
60
- <li>Formula 1 2007: This is a video game that was released in 2007 by Studio Liverpool and Sony Computer Entertainment. It is based on the official Formula One game for PlayStation Portable (PSP). It features the cars, drivers, teams, tracks, rules, and graphics of the 2007 season. It also has various modes, such as quick race, championship mode (with career option), time trial mode (with ghost car option), ad hoc wireless mode (for up to eight players), infrastructure mode (for online multiplayer).</li>
61
- </ul>
62
-
63
- <h3>Conclusion</h3>
64
-
65
- <p>F1 Challenge 2007 Crack Download is a query that will help you find and download F1 Challenge 2007 on your PC for free. F1 Challenge 2007 is a mod for F1 Challenge '99-'02 that simulates the 2007 season of Formula One racing. It features updated cars, tracks, teams, drivers, graphics, sound effects,
66
- physics,
67
- AI,
68
- and gameplay.
69
-
70
- To download
71
- and install
72
- F1 Challenge
73
- 2007 on
74
- your PC
75
- for free,
76
- you need
77
- to follow
78
- these steps:
79
-
80
- <ol>
81
- <li>Download F1 Challenge 2007 Mod from one of these links: <a href="https://enrt.eu/bzS5aWy">FIMEDIA FIRE DOWNLOAD 518.2MB.rar</a> or <a href="https://enrt.eu/bqXOq5u">MEGA DOWNLOAD 518.2MB.rar</a>.</li>
82
- <li>Extract the mod file using a software like WinRAR or 7-Zip.</li>
83
- <li>Download F1 Challenge '99-'02 from this link: <a href="https://f1-challenge-2007-full-version.software.informer.com/1.0/">F1 Challenge 2007 Full version 1.0 Download</a>.</li>
84
- <li>Install F1 Challenge '99-'02 on your PC.</li>
85
- <li>Copy the folder named "F1C GGSF12007" from the mod file and paste it into the folder where you installed F1 Challenge '99-'02 (usually C:\Program Files\EA SPORTS\F1 Challenge '99-'02).</li>
86
- <li>Replace any existing files if prompted.</li>
87
- <li>Download F1 (2007) Free Full PC Game from this link: <a href="https://www.arealgamer.org/f1-2007/">Download F1 (2007) Free Full PC Game</a>.</li>
88
- <li>Extract the game file using a software like WinRAR or 7-Zip.</li>
89
- <li>Copy the file named "F12007.exe" from the folder named "Crack" in the game file and paste it into the folder where you installed F1 Challenge '99-'02\F1C GGSF12007).</li>
90
- <li>Replace any existing files if prompted.</li>
91
- <li>Run F12007.exe from this folder.</li>
92
- <li>Enjoy playing F1 Challenge 2007 without any errors or limitations!</li>
93
-
94
- <p>We hope this article helped you learn how to download
95
- and install
96
- F1 Challenge
97
- 2007 on
98
- your PC
99
- for free.
100
- If you have
101
- any questions
102
- or comments,
103
- feel free
104
- to leave
105
- them below.
106
- Happy racing!</p>
107
- <h3>Conclusion</h3>
108
-
109
- <p>F1 Challenge 2007 Crack Download is a query that will help you find and download F1 Challenge 2007 on your PC for free. F1 Challenge 2007 is a mod for F1 Challenge '99-'02 that simulates the 2007 season of Formula One racing. It features updated cars, tracks, teams, drivers, graphics, sound effects,
110
- physics,
111
- AI,
112
- and gameplay.
113
-
114
- To download
115
- and install
116
- F1 Challenge
117
- 2007 on
118
- your PC
119
- for free,
120
- you need
121
- to follow
122
- these steps:
123
-
124
- <ol>
125
- <li>Download F1 Challenge 2007 Mod from one of these links: <a href="https://enrt.eu/bzS5aWy">FIMEDIA FIRE DOWNLOAD 518.2MB.rar</a> or <a href="https://enrt.eu/bqXOq5u">MEGA DOWNLOAD 518.2MB.rar</a>.</li>
126
- <li>Extract the mod file using a software like WinRAR or 7-Zip.</li>
127
- <li>Download F1 Challenge '99-'02 from this link: <a href="https://f1-challenge-2007-full-version.software.informer.com/1.0/">F1 Challenge 2007 Full version 1.0 Download</a>.</li>
128
- <li>Install F1 Challenge '99-'02 on your PC.</li>
129
- <li>Copy the folder named "F1C GGSF12007" from the mod file and paste it into the folder where you installed F1 Challenge '99-'02 (usually C:\Program Files\EA SPORTS\F1 Challenge '99-'02).</li>
130
- <li>Replace any existing files if prompted.</li>
131
- <li>Download F1 (2007) Free Full PC Game from this link: <a href="https://www.arealgamer.org/f1-2007/">Download F1 (2007) Free Full PC Game</a>.</li>
132
- <li>Extract the game file using a software like WinRAR or 7-Zip.</li>
133
- <li>Copy the file named "F12007.exe" from the folder named "Crack" in the game file and paste it into the folder where you installed F1 Challenge '99-'02\F1C GGSF12007).</li>
134
- <li>Replace any existing files if prompted.</li>
135
- <li>Run F12007.exe from this folder.</li>
136
- <li>Enjoy playing F1 Challenge 2007 without any errors or limitations!</li>
137
-
138
- <p>We hope this article helped you learn how to download
139
- and install
140
- F1 Challenge
141
- 2007 on
142
- your PC
143
- for free.
144
- If you have
145
- any questions
146
- or comments,
147
- feel free
148
- to leave
149
- them below.
150
- Happy racing!</p> 3cee63e6c2<br />
151
- <br />
152
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ASMR Tapping Scratching and Brushing on Various Objects (No Talking) 3Dio Binaural Sounds.md DELETED
@@ -1,124 +0,0 @@
1
- <br />
2
- <br> - Common triggers and effects of ASMR <br> - Scientific research and explanations of ASMR | | H2: How to experience ASMR? | - Tips for finding your personal triggers <br> - Examples of popular ASMR videos and channels <br> - How to create your own ASMR content | | H3: What are the benefits of ASMR? | - Physical and mental health benefits <br> - Relaxation and stress relief <br> - Creativity and productivity enhancement | | H4: What are the challenges and risks of ASMR? | - Misconceptions and stigma around ASMR <br> - Potential side effects and drawbacks of ASMR <br> - How to avoid overstimulation and addiction | Here is the second table with the article with HTML formatting: | Article | | --- | <h1>What is ASMR?</h1> <p>Have you ever felt a tingling sensation on your scalp or spine when someone whispers in your ear or brushes your hair? Have you ever felt relaxed or sleepy when listening to soft sounds or watching someone perform a mundane task? If so, you may have experienced ASMR.</p>
3
- <h2>asmr</h2><br /><p><b><b>Download File</b> &#187; <a href="https://urlin.us/2uSYWM">https://urlin.us/2uSYWM</a></b></p><br /><br /> <p>ASMR stands for Autonomous Sensory Meridian Response. It is a term coined in 2010 by Jennifer Allen, who created a Facebook group to connect with others who shared her experience. She defined it as "a physical sensation characterized by a pleasurable tingling that typically begins in the head and scalp, and often moves down the spine and through the limbs."</p> <p>ASMR is usually triggered by specific auditory or visual stimuli, such as whispering, tapping, scratching, crinkling, brushing, or personal attention. Some people also experience ASMR from cognitive stimuli, such as reading, writing, or meditating. The effects of ASMR vary from person to person, but they often include relaxation, calmness, happiness, euphoria, sleepiness, or even goosebumps.</p> <p>ASMR is not a new phenomenon, but it has gained popularity in recent years thanks to the internet. There are thousands of videos on YouTube dedicated to creating ASMR content for viewers who seek to experience it. Some of these videos have millions of views and subscribers. There are also podcasts, apps, websites, forums, and communities devoted to ASMR.</p>
4
- <p>asmr sleep sounds<br />
5
- asmr eating honeycomb<br />
6
- asmr role play doctor<br />
7
- asmr tapping and scratching<br />
8
- asmr whispering ear to ear<br />
9
- asmr slime videos<br />
10
- asmr haircut and scalp massage<br />
11
- asmr mouth sounds and kisses<br />
12
- asmr triggers for tingles<br />
13
- asmr no talking 10 hours<br />
14
- asmr gentle hand movements<br />
15
- asmr drawing and coloring<br />
16
- asmr keyboard typing sounds<br />
17
- asmr personal attention and affirmations<br />
18
- asmr mic brushing and blowing<br />
19
- asmr cooking and eating<br />
20
- asmr haircut and beard trim<br />
21
- asmr water sounds and bubbles<br />
22
- asmr relaxing music and nature sounds<br />
23
- asmr crinkles and plastic sounds<br />
24
- asmr unboxing and review<br />
25
- asmr makeup tutorial and application<br />
26
- asmr massage and spa treatment<br />
27
- asmr page turning and book sounds<br />
28
- asmr candle lighting and match sounds<br />
29
- asmr haircut and shampoo<br />
30
- asmr eating crunchy foods<br />
31
- asmr role play teacher<br />
32
- asmr scissors and cutting sounds<br />
33
- asmr whispering your name<br />
34
- asmr leather sounds and gloves<br />
35
- asmr painting and brush sounds<br />
36
- asmr cleaning and organizing<br />
37
- asmr knitting and crochet sounds<br />
38
- asmr wood carving and sanding sounds<br />
39
- asmr haircut and blow dry<br />
40
- asmr eating ice cream and popsicles<br />
41
- asmr role play dentist<br />
42
- asmr zipper sounds and clothing sounds<br />
43
- asmr countdown to sleep</p> <p>ASMR is also a subject of scientific interest. Although there is not much research on it yet, some studies have suggested that ASMR may have physiological and psychological benefits for some people. For example, one study found that ASMR reduced heart rate and increased skin conductance in participants who watched ASMR videos. Another study found that ASMR increased positive emotions and reduced stress levels in participants who experienced it.</p>
44
- <h2>How to experience ASMR?</h2>
45
- <p>If you are curious about ASMR or want to enhance your experience of it, here are some tips for finding your personal triggers:</p>
46
- <ul>
47
- ASMR content online, such as roleplays, soundscapes, tutorials, or personal stories. You can also search by specific triggers, such as whispering, tapping, or brushing. You may find some triggers that work for you and some that don't.</li>
48
- <li>Use headphones or earbuds when watching or listening to ASMR content. This will enhance the quality and intensity of the sounds and create a more immersive experience.</li>
49
- <li>Find a comfortable and quiet place to enjoy ASMR. You may want to dim the lights, close your eyes, or use a sleep mask to block out any distractions. You may also want to adjust the volume and speed of the ASMR content to suit your preferences.</li>
50
- <li>Be open-minded and patient. ASMR is not something that everyone can experience or enjoy. It may take some time and experimentation to find what works for you. Don't force yourself to watch or listen to something that makes you uncomfortable or bored. ASMR is supposed to be a pleasant and relaxing experience, not a stressful or annoying one.</li>
51
- </ul>
52
- <p>Here are some examples of popular ASMR videos and channels that you can check out:</p>
53
- <table>
54
- <tr>
55
- <th>Video</th>
56
- <th>Channel</th>
57
- <th>Description</th>
58
- </tr>
59
- <tr>
60
- <td><a href="">ASMR 20 Triggers To Help You Sleep ♥</a></td>
61
- <td>Gibi ASMR</td>
62
- <td>A compilation of various ASMR triggers, such as tapping, scratching, brushing, and whispering.</td>
63
- </tr>
64
- <tr>
65
- <td><a href="">[ASMR] Cranial Nerve Exam - Doctor Roleplay</a></td>
66
- <td>FrivolousFox ASMR</td>
67
- <td>A medical roleplay where the ASMRtist performs a cranial nerve exam on the viewer.</td>
68
- </tr>
69
- <tr>
70
- <td><a href="">ASMR | Relaxing Spa Facial Roleplay (Layered Sounds)</a></td>
71
- <td>ASMR Glow</td>
72
- <td>A spa roleplay where the ASMRtist gives the viewer a facial treatment with layered sounds.</td>
73
- </tr>
74
- <tr>
75
- <td><a href="">ASMR - The Ultimate Sleep Clinic (Intense Relaxation)</a></td>
76
- <td>The ASMR Ryan</td>
77
- <td>A sleep clinic roleplay where the ASMRtist helps the viewer fall asleep with various techniques.</td>
78
- </tr>
79
- <tr>
80
- <td><a href="">ASMR Baking Chocolate Chip Cookies ?✨ Soft Spoken</a></td>
81
- <td>Rapunzel ASMR</td>
82
- <td>A baking tutorial where the ASMRtist makes chocolate chip cookies with soft spoken narration.</td>
83
- </tr>
84
- </table>
85
- <p>If you want to create your own ASMR content, here are some tips for getting started:</p>
86
- <ul>
87
- <li>Choose a theme or niche for your content. You can either focus on one type of trigger or genre, or mix and match different ones. You can also create original content or inspired by other sources, such as movies, books, games, or cultures.</li>
88
- <li>Invest in a good microphone and camera. The quality of your sound and video will make a big difference in your ASMR content. You don't need to spend a fortune on equipment, but you should look for ones that have high sensitivity, low noise, and clear resolution.</li>
89
- <li>Plan and practice your content. You may want to write a script or an outline for your content, especially if you are doing a roleplay or a tutorial. You may also want to rehearse your content before recording it, to make sure it sounds natural and smooth.</li>
90
- <li>Edit and upload your content. You may want to use some editing software to enhance your sound and video quality, such as adjusting the volume, adding effects, or cutting out unwanted noises. You may also want to add some tags, descriptions, and thumbnails to your content, to make it more searchable and appealing.</li>
91
- <li>Engage with your audience. You may want to interact with your viewers by responding to their comments, requests, or feedback. You may also want to join some ASMR communities online, such as Reddit, Facebook, or Discord, to share your content and connect with other creators and fans.</li>
92
- </ul>
93
- <h3>What are the benefits of ASMR?</h3>
94
- <p>ASMR can have many benefits for some people who experience it. Here are some of them:</p>
95
- <ul>
96
- ) It can also help treat some mental health conditions, such as depression, anxiety, insomnia, or PTSD.</li>
97
- <li>Relaxation and stress relief. ASMR can induce a state of deep relaxation and calmness in the mind and body. It can help people cope with stress, anxiety, or negative emotions. It can also help people fall asleep faster and sleep better.</li>
98
- <li>Creativity and productivity enhancement. ASMR can stimulate the brain and enhance its creativity and productivity. It can help people focus, learn, solve problems, or generate new ideas. It can also help people express themselves and explore their passions.</li>
99
- </ul>
100
- <h4>What are the challenges and risks of ASMR?</h4>
101
- <p>ASMR is not without its challenges and risks. Here are some of them:</p>
102
- <ul>
103
- <li>Misconceptions and stigma around ASMR. ASMR is often misunderstood or misrepresented by some people who are not familiar with it or who have negative opinions about it. Some people may think that ASMR is sexual, weird, or creepy. Some people may also mock, judge, or harass ASMR creators or consumers.</li>
104
- <li>Potential side effects and drawbacks of ASMR. ASMR may not work for everyone or may have different effects on different people. Some people may not experience ASMR at all or may lose their ability to experience it over time. Some people may also experience adverse reactions to ASMR, such as headaches, nausea, or irritation.</li>
105
- <li>How to avoid overstimulation and addiction. ASMR may be addictive for some people who rely on it too much or use it too often. This may lead to overstimulation, tolerance, or withdrawal symptoms. It may also interfere with their daily life, relationships, or responsibilities. To avoid this, it is important to use ASMR in moderation and balance it with other healthy activities and habits.</li>
106
- </ul>
107
- <h5>Conclusion</h5>
108
- <p>ASMR is a fascinating and complex phenomenon that can have many benefits for some people who experience it. It can also be a fun and creative way to enjoy various types of content online or offline. However, ASMR is not a magic cure for everything and it may have some challenges and risks as well. Therefore, it is important to be informed, respectful, and responsible when engaging with ASMR.</p>
109
- <h6>FAQs</h6>
110
- <p>Here are some frequently asked questions about ASMR:</p>
111
- <ol>
112
- <li><b>What does ASMR stand for?</b><br>
113
- ASMR stands for Autonomous Sensory Meridian Response. It is a term coined in 2010 by Jennifer Allen, who created a Facebook group to connect with others who shared her experience.</li>
114
- <li><b>What causes ASMR?</b><br>
115
- ASMR is usually triggered by specific auditory or visual stimuli, such as whispering, tapping, scratching, crinkling, brushing, or personal attention. Some people also experience ASMR from cognitive stimuli, such as reading, writing, or meditating.</li>
116
- <li><b>Who can experience ASMR?</b><br>
117
- ASMR is not something that everyone can experience or enjoy. It may depend on various factors, such as genetics, personality, mood, environment, or exposure. Some people may experience ASMR more easily or intensely than others.</li>
118
- <li><b>Is ASMR sexual?</b><br>
119
- ASMR is not sexual in nature or intention. It is a sensory phenomenon that induces relaxation and pleasure in the mind and body. However, some people may find some ASMR triggers or content erotic or arousing, depending on their personal preferences and associations.</li>
120
- <li><b>Is ASMR safe?</b><br>
121
- ASMR is generally safe for most people who experience it. However, some people may have some side effects or drawbacks from ASMR, such as headaches, nausea, irritation, overstimulation, or addiction. To avoid this, it is important to use ASMR in moderation and balance it with other healthy activities and habits.</li>
122
- </ol></p> 197e85843d<br />
123
- <br />
124
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Doodle Alchemy How to Combine Air Water Fire and Earth in Fun Ways.md DELETED
@@ -1,135 +0,0 @@
1
- <br />
2
- <h1>Doodle Alchemy: A Fun and Creative Puzzle Game</h1>
3
- <p>Do you like to experiment with different elements and create new substances? Do you enjoy solving puzzles and discovering new combinations? If you answered yes, then you might want to try Doodle Alchemy, a casual simulation game that will challenge your creativity and logic. In this article, we will tell you everything you need to know about Doodle Alchemy, including what it is, how to play it, and where to download it.</p>
4
- <h2>doodle alchemy</h2><br /><p><b><b>Download Zip</b> &#9675;&#9675;&#9675; <a href="https://urlin.us/2uSY7o">https://urlin.us/2uSY7o</a></b></p><br /><br />
5
- <h2>What is Doodle Alchemy?</h2>
6
- <p>Doodle Alchemy is a game with amazing graphics and effects. Off-beat music and sounds create an unforgettable atmosphere! At the start, you have only 4 elements: air, water, earth, and fire. Combine these elements and create new ones. A fascinating journey into the world of knowledge awaits! Enjoy your discoveries!</p>
7
- <h3>The basic gameplay</h3>
8
- <p>The gameplay of Doodle Alchemy is simple and intuitive. You just need to drag and drop one element onto another to see if they can combine. If they do, you will get a new element that you can use for further combinations. You can also tap on an element to see its description and properties. Your goal is to discover all the possible elements in the game, which are divided into different categories such as animals, plants, countries, food, inventions, etc.</p>
9
- <h3>The graphics and sound effects</h3>
10
- <p>The graphics of Doodle Alchemy are colorful and charming. The elements are drawn in a doodle style that gives them a unique personality. The animations are smooth and realistic, showing how the elements react with each other. The sound effects are also well-designed, matching the mood and theme of the game. The music is off-beat and catchy, creating a relaxing and enjoyable atmosphere.</p>
11
- <h3>The benefits of playing Doodle Alchemy</h3>
12
- <p>Doodle Alchemy is not only a fun game, but also an educational one. By playing it, you can learn about different elements and their properties, as well as how they interact with each other. You can also expand your vocabulary and knowledge by reading the descriptions of the elements. Moreover, you can stimulate your creativity and logic by finding new combinations and solutions. Doodle Alchemy is a game that will keep you entertained and curious for hours.</p>
13
- <p>doodle alchemy mod apk download<br />
14
- doodle alchemy cheats and combinations<br />
15
- doodle alchemy game online free<br />
16
- doodle alchemy animals list<br />
17
- doodle alchemy how to make life<br />
18
- doodle alchemy walkthrough by category<br />
19
- doodle alchemy best elements<br />
20
- doodle alchemy tips and tricks<br />
21
- doodle alchemy vs little alchemy<br />
22
- doodle alchemy play store<br />
23
- doodle alchemy for pc windows 10<br />
24
- doodle alchemy all 288 elements<br />
25
- doodle alchemy unlimited hints<br />
26
- doodle alchemy similar games<br />
27
- doodle alchemy review and rating<br />
28
- doodle alchemy wiki and guide<br />
29
- doodle alchemy update and new features<br />
30
- doodle alchemy fun and addictive<br />
31
- doodle alchemy create your own world<br />
32
- doodle alchemy challenge and puzzle<br />
33
- doodle alchemy hack and mod menu<br />
34
- doodle alchemy solutions and answers<br />
35
- doodle alchemy levels and stages<br />
36
- doodle alchemy hints and clues<br />
37
- doodle alchemy codes and rewards<br />
38
- doodle alchemy genres and themes<br />
39
- doodle alchemy graphics and sound<br />
40
- doodle alchemy controls and interface<br />
41
- doodle alchemy bugs and issues<br />
42
- doodle alchemy support and feedback<br />
43
- doodle alchemy community and forum<br />
44
- doodle alchemy news and updates<br />
45
- doodle alchemy videos and tutorials<br />
46
- doodle alchemy screenshots and images<br />
47
- doodle alchemy facts and trivia<br />
48
- doodle alchemy history and origin<br />
49
- doodle alchemy developer and publisher<br />
50
- doodle alchemy release date and version<br />
51
- doodle alchemy system requirements and compatibility<br />
52
- doodle alchemy privacy policy and terms of service</p>
53
- <h2>How to play Doodle Alchemy?</h2>
54
- <p>If you are interested in playing Doodle Alchemy, here are some tips and tricks that will help you get started.</p>
55
- <h3>The four elements</h3>
56
- <p>The four elements that you start with are air, water, earth, and fire. These are the basic building blocks of everything in the game. You can combine them in different ways to create new elements. For example, air + fire = energy; water + earth = swamp; earth + fire = lava; water + air = steam; etc. Try to experiment with different combinations and see what happens.</p>
57
- <h3>The combinations and categories</h3>
58
- <p>As you discover new elements, they will be added to your collection. You can access your collection by tapping on the book icon at the bottom of the screen. You can also see how many elements you have discovered out of the total number in the game. The elements are grouped into different categories such as animals, plants, countries, food, inventions, etc. You can tap on a category to see all the elements that belong to it. You can also tap on an element to see its description and properties.</p>
59
- For example, human + metal = tool; tool + wood = wheel; wheel + wheel = car; etc. You can use the hint button at the top of the screen to get a clue about a possible combination. However, you have a limited number of hints, so use them wisely.</p>
60
- <h3>The tips and tricks</h3>
61
- <p>Here are some tips and tricks that will help you play Doodle Alchemy more effectively and enjoyably.</p>
62
- <ul>
63
- <li>Pay attention to the names and descriptions of the elements. They might give you some clues or hints about how to combine them.</li>
64
- <li>Think outside the box and try different combinations. Sometimes, the most unexpected combinations can lead to new discoveries.</li>
65
- <li>Use the search function to find an element quickly. You can type in the name of the element or a part of it, and the game will show you all the matching elements.</li>
66
- <li>Use the filter function to narrow down your collection. You can filter by category, alphabet, or date of discovery.</li>
67
- <li>Use the undo button to undo your last combination. This can be useful if you make a mistake or want to try a different combination.</li>
68
- </ul>
69
- <h2>Where to download and play Doodle Alchemy?</h2>
70
- <p>If you are interested in downloading and playing Doodle Alchemy, here are some information that you might want to know.</p>
71
- <h3>The platforms and devices</h3>
72
- <p>Doodle Alchemy is available for various platforms and devices. You can play it on your Android or iOS smartphone or tablet, as well as on your Windows or Mac computer. You can also play it online on your browser without downloading anything. The game is compatible with most devices and browsers, so you don't have to worry about technical issues.</p>
73
- <h3>The price and in-app purchases</h3>
74
- <p>Doodle Alchemy is free to download and play. However, it does contain some in-app purchases that can enhance your gaming experience. For example, you can buy more hints, remove ads, unlock all categories, or get a premium version of the game. The prices range from $0.99 to $4.99 depending on the item. You can also watch ads or complete offers to get free hints or coins.</p>
75
- <h3>The ratings and reviews</h3>
76
- <p>Doodle Alchemy has received positive ratings and reviews from players and critics alike. It has a 4.5 out of 5 stars rating on Google Play Store and a 4.6 out of 5 stars rating on App Store. It has also been featured on several websites and blogs as one of the best puzzle games for Android and iOS . Some of the common praises for Doodle Alchemy are its addictive gameplay, beautiful graphics, relaxing music, educational value, and originality.</p>
77
- <h2>Conclusion</h2>
78
- <p>Doodle Alchemy is a fun and creative puzzle game that will challenge your creativity and logic. You can experiment with different elements and create new substances, while learning about their properties and interactions. You can also enjoy the colorful graphics, realistic animations, off-beat music, and sound effects that create an unforgettable atmosphere. Doodle Alchemy is a game that will keep you entertained and curious for hours.</p>
79
- <h3>Summary of the main points</h3>
80
- <p>In this article, we have covered the following points about Doodle Alchemy:</p>
81
- <ul>
82
- <li>What is Doodle Alchemy? It is a casual simulation game that lets you combine elements and create new ones.</li>
83
- <li>How to play Doodle Alchemy? You just need to drag and drop one element onto another to see if they can combine. You can also tap on an element to see its description and properties.</li>
84
- <li>Where to download and play Doodle Alchemy? You can download it for free on your Android or iOS device, or play it online on your browser. You can also buy some in-app purchases to enhance your gaming experience.</li>
85
- </ul>
86
- <h3>Call to action</h3>
87
- <p>If you are looking for a game that will stimulate your creativity and logic, while providing you with hours of fun and learning, then you should definitely try Doodle Alchemy. Download it now and start your journey into the world of knowledge!</p>
88
- <h2>Frequently Asked Questions</h2>
89
- <p>Here are some frequently asked questions about Doodle Alchemy that you might find helpful.</p>
90
- <h4>Q: How many elements are there in Doodle Alchemy?</h4>
91
- <p>A: There are over 500 elements in Doodle Alchemy that you can discover by combining different elements.</p>
92
- <h4>Q: How do I reset my progress in Doodle Alchemy?</h4>
93
- <p>A: If you want to start over and erase all your discoveries, you can reset your progress in Doodle Alchemy by following these steps:</p>
94
- <ol>
95
- <li>Open the game and tap on the settings icon at the top right corner of the screen.</li>
96
- <li>Tap on the reset button and confirm your choice.</li>
97
- <li>Enjoy the game from scratch!</li>
98
- </ol>
99
- <h4>Q: How do I get more hints in Doodle Alchemy?</h4>
100
- <p>A: Hints are useful when you are stuck and need some guidance. You can get more hints in Doodle Alchemy by doing one of the following:</p>
101
- <ul>
102
- <li>Buy more hints with real money. You can choose from different packages depending on your needs.</li>
103
- <li>Watch ads or complete offers to get free hints or coins. You can use coins to buy hints as well.</li>
104
- <li>Wait for the hint timer to refill. You get one free hint every 10 minutes.</li>
105
- </ul>
106
- <h4>Q: What are the achievements in Doodle Alchemy?</h4>
107
- <p>A: Achievements are goals that you can complete by playing Doodle Alchemy. They are a way to track your progress and challenge yourself. You can access the achievements by tapping on the trophy icon at the bottom of the screen. You can see how many achievements you have unlocked out of the total number in the game. Some examples of achievements are:</p>
108
- <ul>
109
- <li>Create 10 elements</li>
110
- <li>Create 50 elements</li>
111
- <li>Create 100 elements</li>
112
- <li>Create all animals</li>
113
- <li>Create all countries</li>
114
- <li>Create all inventions</li>
115
- <li>Etc.</li>
116
- </ul>
117
- <h4>Q: What are the secrets in Doodle Alchemy?</h4>
118
- <p>A: Secrets are hidden elements that you can discover by combining certain elements in a specific order. They are not part of any category and they have a special icon. They are usually related to pop culture, mythology, or humor. Some examples of secrets are:</p>
119
- <ul>
120
- <li>Zombie = corpse + life</li>
121
- <li>Vampire = blood + human</li>
122
- <li>Lightsaber = sword + energy</li>
123
- <li>Ninja = human + shuriken</li>
124
- <li>Etc.</li>
125
- </ul>
126
- <h4>Q: How do I contact the developers of Doodle Alchemy?</h4>
127
- <p>A: If you have any questions, feedback, suggestions, or issues regarding Doodle Alchemy, you can contact the developers by using one of these methods:</p>
128
- <ul>
129
- <li>Email: [email protected]</li>
130
- <li>Facebook: https://www.facebook.com/byrilgames/</li>
131
- <li>Twitter: https://twitter.com/byrilgames</li>
132
- <li>Website: http://byril.com/</li>
133
- </ul></p> 197e85843d<br />
134
- <br />
135
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/3dzip.org The Ultimate Source of 3D Model Free Download.md DELETED
@@ -1,98 +0,0 @@
1
- <br />
2
- <h1>What is 3dzip.org and why you should use it</h1>
3
- <p>If you are an architect, designer, or hobbyist who loves to create realistic 3D scenes, you know how important it is to have a good collection of 3D models. But finding high-quality, free, and easy-to-use 3D models can be challenging. That's why you should check out <a href="(^1^)">3dzip.org</a>, a website that offers free download of 3D models for architecture and design.</p>
4
- <h2>3dzip.org</h2><br /><p><b><b>Download Zip</b> &#10026; <a href="https://jinyurl.com/2uNNI7">https://jinyurl.com/2uNNI7</a></b></p><br /><br />
5
- <h2>The benefits of using 3dzip.org</h2>
6
- <p>There are many reasons why you should use 3dzip.org for your 3D projects. Here are some of them:</p>
7
- <h3>High-quality 3D models for various categories</h3>
8
- <p>At 3dzip.org, you can find thousands of 3D models for different categories, such as furniture, lighting, decoration, kitchen, bathroom, plant, technology, and more. You can also find full scenes of interiors and exteriors, as well as textures, materials, scripts, and HDRI panoramas. All the models are realistic, detailed, and optimized for rendering.</p>
9
- <h3>Free and easy to download and use</h3>
10
- <p>All the resources on 3dzip.org are uploaded freely by users. They are only used for scientific research and teaching purposes. Therefore, you can download and use them for free without any commercial restrictions. You can also upload your own models to share with the community. The download process is simple and fast. You just need to click on the download button and enter your email address to get the link.</p>
11
- <h3>Updated regularly with new resources</h3>
12
- <p>One of the best things about 3dzip.org is that it is updated regularly with new resources. You can always find something new and fresh to inspire your creativity. You can also follow their social media accounts to get notified of the latest posts.</p>
13
- <p>3dzip.org free download 3d models<br />
14
- 3dzip.org dressing table<br />
15
- 3dzip.org shoe storage cabinet<br />
16
- 3dzip.org wardrobe and display cabinets<br />
17
- 3dzip.org sideboard and chest of drawer<br />
18
- 3dzip.org wine cabinet<br />
19
- 3dzip.org bed<br />
20
- 3dzip.org bar stool<br />
21
- 3dzip.org bookcase<br />
22
- 3dzip.org sofa<br />
23
- 3dzip.org stool<br />
24
- 3dzip.org tv cabinets<br />
25
- 3dzip.org tv wall<br />
26
- 3dzip.org table console table<br />
27
- 3dzip.org chair<br />
28
- 3dzip.org bench<br />
29
- 3dzip.org arm chair<br />
30
- 3dzip.org table and chair<br />
31
- 3dzip.org office furniture<br />
32
- 3dzip.org other soft seating<br />
33
- 3dzip.org kitchen tableware<br />
34
- 3dzip.org kitchen island<br />
35
- 3dzip.org kitchen appliance<br />
36
- 3dzip.org other kitchen accessories<br />
37
- 3dzip.org food and drinks<br />
38
- 3dzip.org sink faucet<br />
39
- 3dzip.org childroom full furniture set<br />
40
- 3dzip.org toy miscellaneous<br />
41
- 3dzip.org bathroom wash basin<br />
42
- 3dzip.org toilet and bidet<br />
43
- 3dzip.org bathtub shower<br />
44
- 3dzip.org bathroom furniture<br />
45
- 3dzip.org towel rail bathroom accessories<br />
46
- 3dzip.org decoration decorative plaster<br />
47
- 3dzip.org curtain mirror frame vase books pillows carpets decorative set wall decor sculpture other decorative objects clothes and shoes plant tree flower grass indoor plants outdoor plants lighting ceiling light wall light floor lamp table lamp spot light street lighting technical lighting technology pcs and other electrics household appliance tv phones audio tech miscellaneous other models windows doors gate and fence fireplace radiator shop transport sports people staircase musical instrument beauty salon weaponry restaurant creature billiards miscellaneous other models scenes exteriors interiors living room kitchen and dining room bedroom children room bathroom study room working room apartment suites hotel reception hall restaurant shop corridors and aisles showroom office other architectural elements textures wood floor coverings wall covering metal stone fabric natural materials miscellaneous hdri panorama tile leather brick roof rug materials wood metal leather fabric plastic stone glass liquid tile miscellaneous scripts scripts sketchup new posts uploaded download free bedroom interior model by gia the binh free stool model download free sofa model download free armchair model download free table model download free bar stool model download free plant model download free decorative shelves model download free living room kitchen interior model download by kien nguyen adblock detected donate upload files adblock detected please consider supporting us by disabling your ad blocker please disable your adblocker and refresh the page to view the site content.</p>
48
- <h2>How to use 3dzip.org</h2>
49
- <p>Using 3dzip.org is easy and fun. Here are some steps to help you get started:</p>
50
- <h3>Browse by tags, categories, or search keywords</h3>
51
- <p>You can browse the website by tags, categories, or search keywords to find the models you need. You can also filter the results by date, popularity, or rating. You can see the preview images, titles, descriptions, and file formats of each model.</p>
52
- <h3>Download the files in different formats</h3>
53
- <p>Once you find a model you like, you can download it in different formats, such as .max, .obj, .fbx, .skp, .rfa, .rvt, .dwg, .stl, .dae, .c4d, .blend, etc. Depending on the model, you <p>may also get the textures, materials, and maps that come with the model. You can also see the file size and the number of downloads for each model.</p>
54
- <h3>Import the models into your software of choice</h3>
55
- <p>After downloading the files, you can import them into your software of choice, such as 3ds Max, SketchUp, Blender, Cinema 4D, Revit, AutoCAD, etc. You can then edit, modify, or combine the models as you wish. You can also apply different renderers, such as V-Ray, Corona, Lumion, etc. to create stunning images and animations.</p>
56
- <h2>Some examples of 3D models from 3dzip.org</h2>
57
- <p>To give you an idea of what kind of models you can find on 3dzip.org, here are some examples from different categories:</p>
58
- <h3>Furniture and interior design</h3>
59
- <p>If you are looking for furniture and interior design models, you can find a variety of styles and types on 3dzip.org. You can find sofas, chairs, tables, cabinets, shelves, beds, desks, and more. You can also find models of different rooms, such as living room, bedroom, dining room, office, etc. Here are some examples:</p>
60
- <h4>Table and chair set by Pham Bao Toan</h4>
61
- <p>This is a modern and elegant table and chair set that can fit any dining room. The table has a wooden top and metal legs. The chairs have leather seats and backs. The model is in .max format and comes with V-Ray materials and textures.</p>
62
- <h4>Display cabinet by Nguyen Quang Hai</h4>
63
- <p>This is a stylish and functional display cabinet that can store and showcase your items. The cabinet has glass doors and shelves. The model is in .max format and comes with V-Ray materials and textures.</p>
64
- <h4>Sofa and armchair by 3dzip.org</h4>
65
- <p>This is a cozy and comfortable sofa and armchair set that can enhance any living room. The sofa and armchair have soft cushions and fabric covers. The model is in .max format and comes with V-Ray materials and textures.</p> <h3>Lighting and decoration</h3>
66
- <p>If you are looking for lighting and decoration models, you can find a variety of shapes and sizes on 3dzip.org. You can find lamps, chandeliers, sconces, candles, vases, books, paintings, sculptures, and more. You can also find models of different themes, such as modern, classic, rustic, etc. Here are some examples:</p>
67
- <h4>Ceiling light by 3dzip.org</h4>
68
- <p>This is a simple and elegant ceiling light that can illuminate any space. The light has a metal frame and a glass shade. The model is in .max format and comes with V-Ray materials and textures.</p>
69
- <h4>Vase and books by 3dzip.org</h4>
70
- <p>This is a lovely and realistic vase and books set that can add some charm to your shelf or table. The vase has a ceramic texture and a floral pattern. The books have different colors and titles. The model is in .max format and comes with V-Ray materials and textures.</p>
71
- <h4>Wall decor by 3dzip.org</h4>
72
- <p>This is a creative and stylish wall decor that can spice up your wall. The decor consists of metal letters that spell out the word "LOVE". The model is in .max format and comes with V-Ray materials and textures.</p>
73
- <h3>Kitchen and bathroom</h3>
74
- <p>If you are looking for kitchen and bathroom models, you can find a variety of appliances and fixtures on 3dzip.org. You can find stoves, refrigerators, microwaves, sinks, faucets, cabinets, countertops, bathtubs, showers, toilets, mirrors, and more. You can also find models of different designs, such as modern, traditional, minimalist, etc. Here are some examples:</p>
75
- <h4>Kitchen island by 3dzip.org</h4>
76
- <p>This is a spacious and functional kitchen island that can make your kitchen more convenient and attractive. The island has a wooden top and a white base. It also has drawers, shelves, and a sink. The model is in .max format and comes with V-Ray materials and textures.</p>
77
- <h4>Wash basin and faucet by 3dzip.org</h4>
78
- <p>This is a sleek and modern wash basin and faucet that can enhance your bathroom. The basin has a rectangular shape and a glossy finish. The faucet has a chrome finish and a curved spout. The model is in .max format and comes with V-Ray materials and textures.</p>
79
- <h4>Bathtub and shower by 3dzip.org</h4>
80
- <p>This is a luxurious and relaxing bathtub and shower that can make your bathroom more comfortable and enjoyable. The bathtub has a oval shape and a smooth surface. The shower has a glass enclosure and a rain shower head. The model is in .max format and comes with V-Ray materials and textures.</p>
81
- <h2>Conclusion and FAQs</h2>
82
- <p>As you can see, 3dzip.org is a great website for finding free 3D models for architecture and design. You can browse, download, and use thousands of high-quality models for various categories. You can also upload your own models to share with the community. Whether you are a professional or a hobbyist, you can benefit from using 3dzip.org for your 3D projects.</p>
83
- <p>Here are some frequently asked questions about 3dzip.org:</p>
84
- <ul>
85
- <li><b>Q: What are the file formats of the models on 3dzip.org?</b></li>
86
- <li>A: The models on 3dzip.org are available in different formats, such as .max, .obj, .fbx, .skp, .rfa, .rvt, .dwg, .stl, .dae, .c4d, .blend, etc.</li>
87
- <li><b>Q: How can I download the models on 3dzip.org?</b></li>
88
- <li>A: To download the models on 3dzip.org, you just need to click on the download button and enter your email address to get the link.</li>
89
- <li><b>Q: How can I use the models on 3dzip.org?</b></li>
90
- <li>A: You can use the models on 3dzip.org for scientific research and teaching purposes only. You cannot use them for commercial purposes without permission from the authors.</li>
91
- <li><b>Q: How can I upload my own models to 3dzip.org?</b></li>
92
- <li>A: To upload your own models to 3dzip.org, you need to register an account on the website. Then you can click on the upload button and fill in the required information.</li>
93
- <li><b>Q: How can I contact the authors of the models on 3d zip.org?</b></li>
94
- <li>A: You can contact the authors of the models on 3dzip.org by clicking on their names or profiles on the website. You can also leave a comment or a rating on their models.</li>
95
- </ul>
96
- <p>I hope you enjoyed this article and learned something new about 3dzip.org. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy 3D modeling!</p> 197e85843d<br />
97
- <br />
98
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Car Parking Multiplayer Mod APK 2022 Drive Park and Customize Your Dream Cars.md DELETED
@@ -1,77 +0,0 @@
1
- <br />
2
- <h1>Download Car Parking Multiplayer Mod APK 2022: A Guide for Car Lovers</h1>
3
- <p>If you are a fan of realistic car simulation games, you might have heard of Car Parking Multiplayer. It is one of the most popular and realistic car parking games on Android, with over 100 million downloads on Google Play. In this game, you can experience more than just parking: you can explore an open-world multiplayer mode, tune and customize your car, and even walk around and interact with other players. But what if you want to enjoy the game without any limitations or restrictions? That's where Car Parking Multiplayer mod apk 2022 comes in. In this article, we will tell you everything you need to know about this modded version of the game, including its features, benefits, and how to download and install it on your device.</p>
4
- <h2>What is Car Parking Multiplayer?</h2>
5
- <p>Car Parking Multiplayer is a realistic car parking simulator game developed by olzhass. It is available for free on Google Play and has been downloaded by more than 100 million users worldwide. The game offers a variety of features and modes that make it more than just a parking game. Here are some of the features of Car Parking Multiplayer:</p>
6
- <h2>download car parking multiplayer mod apk 2022</h2><br /><p><b><b>Download File</b> &#11088; <a href="https://jinyurl.com/2uNTCc">https://jinyurl.com/2uNTCc</a></b></p><br /><br />
7
- <h3>Features of Car Parking Multiplayer</h3>
8
- <h4>Open-world multiplayer mode</h4>
9
- <p>In this mode, you can join thousands of players online and explore different locations, such as cities, airports, deserts, and more. You can also chat with other players, join races, or create your own rules and challenges.</p>
10
- <h4>Car tuning and customization</h4>
11
- <p>The game allows you to tune and customize your car according to your preferences. You can choose from over 100 cars, ranging from sedans to supercars. You can also change the color, wheels, suspension, engine, transmission, and more. You can even add stickers, neon lights, spoilers, and other accessories to make your car stand out.</p>
12
- <h4>Free walking and interaction</h4>
13
- <p>Unlike other car parking games, Car Parking Multiplayer lets you get out of your car and walk around freely. You can also interact with other objects and players in the game world. For example, you can use gas stations, car washes, repair shops, police stations, etc. You can also exchange cars with other players or invite them to your house.</p>
14
- <h3>Why download Car Parking Multiplayer mod apk 2022?</h3>
15
- <p>As much as Car Parking Multiplayer is fun and realistic, it also has some drawbacks that might affect your gaming experience. For instance, the game requires a lot of money and coins to unlock new cars and accessories. It also has ads that might interrupt your gameplay. Moreover, some features are only available for premium users who have to pay real money to access them. That's why many players opt for Car Parking Multiplayer mod apk 2022. This is a modified version of the game that gives you unlimited resources and features for free. Here are some of the benefits of downloading Car Parking Multiplayer mod apk 2022:</p>
16
- <h4>Unlimited money and coins</h4>
17
- <p>With this mod apk, you don't have to worry about running out of money or coins in the game. You can use them to buy any car or accessory you want without any limitations. You can also upgrade your car to the maximum level without spending a dime.</p>
18
- <h4>Unlock all cars and accessories</h4>
19
- <p>This mod apk also <p>also unlocks all the cars and accessories in the game, including the premium ones. You can access over 100 cars, from classic to modern, and customize them with various options. You can also use any sticker, neon light, spoiler, or other accessory you like without any restrictions.</p>
20
- <p>car parking multiplayer mod apk 2022 unlimited money<br />
21
- car parking multiplayer mod apk 2022 latest version<br />
22
- car parking multiplayer mod apk 2022 free download<br />
23
- car parking multiplayer mod apk 2022 android 1<br />
24
- car parking multiplayer mod apk 2022 all cars unlocked<br />
25
- car parking multiplayer mod apk 2022 ios<br />
26
- car parking multiplayer mod apk 2022 online<br />
27
- car parking multiplayer mod apk 2022 hack<br />
28
- car parking multiplayer mod apk 2022 no root<br />
29
- car parking multiplayer mod apk 2022 rexdl<br />
30
- car parking multiplayer mod apk 2022 revdl<br />
31
- car parking multiplayer mod apk 2022 an1<br />
32
- car parking multiplayer mod apk 2022 happymod<br />
33
- car parking multiplayer mod apk 2022 unlimited everything<br />
34
- car parking multiplayer mod apk 2022 obb<br />
35
- car parking multiplayer mod apk 2022 update<br />
36
- car parking multiplayer mod apk 2022 new cars<br />
37
- car parking multiplayer mod apk 2022 offline<br />
38
- car parking multiplayer mod apk 2022 mega<br />
39
- car parking multiplayer mod apk 2022 mediafıre<br />
40
- car parking multiplayer mod apk 2022 original<br />
41
- car parking multiplayer mod apk 2022 premium<br />
42
- car parking multiplayer mod apk 2022 pro<br />
43
- car parking multiplayer mod apk 2022 unlocked all features<br />
44
- car parking multiplayer mod apk 2022 vip<br />
45
- car parking multiplayer mod apk 2022 with cheats<br />
46
- car parking multiplayer mod apk 2022 youtube<br />
47
- how to download car parking multiplayer mod apk 2022<br />
48
- where to download car parking multiplayer mod apk 2022<br />
49
- best site to download car parking multiplayer mod apk 2022<br />
50
- download link for car parking multiplayer mod apk 2022<br />
51
- download and install car parking multiplayer mod apk 2022<br />
52
- download and play car parking multiplayer mod apk 2022<br />
53
- download and enjoy car parking multiplayer mod apk 2022<br />
54
- download and review car parking multiplayer mod apk 2022<br />
55
- download and share car parking multiplayer mod apk 2022<br />
56
- download and rate car parking multiplayer mod apk 2022<br />
57
- download and comment on car parking multiplayer mod apk 2022<br />
58
- download and subscribe to car parking multiplayer mod apk 2022<br />
59
- download and support car parking multiplayer mod apk 2022</p>
60
- <h4>No ads and no root required</h4>
61
- <p>Another advantage of this mod apk is that it removes all the ads from the game, so you can enjoy a smooth and uninterrupted gameplay. You also don't need to root your device to install this mod apk, as it works on any Android device without any issues.</p>
62
- <h2>How to download and install Car Parking Multiplayer mod apk 2022?</h2>
63
- <p>Now that you know the benefits of Car Parking Multiplayer mod apk 2022, you might be wondering how to download and install it on your device. Don't worry, it's very easy and simple. Just follow these steps:</p>
64
- <h3>Step 1: Download the mod apk file from a trusted source</h3>
65
- <p>The first thing you need to do is to download the mod apk file from a reliable and safe source. There are many websites that offer this mod apk, but not all of them are trustworthy. Some of them might contain viruses or malware that can harm your device or steal your data. Therefore, we recommend you to download the mod apk file from [this link], which is verified and tested by us.</p>
66
- <h3>Step 2: Enable unknown sources on your device</h3>
67
- <p>The next thing you need to do is to enable unknown sources on your device. This is a security setting that allows you to install apps from sources other than Google Play. To do this, go to your device settings, then security, then unknown sources, and turn it on. You might see a warning message, but don't worry, it's safe to proceed.</p>
68
- <h3>Step 3: Install the mod apk file and launch the game</h3>
69
- <p>The final thing you need to do is to install the mod apk file and launch the game. To do this, go to your file manager, then locate the downloaded mod apk file, and tap on it. You might see a pop-up asking for permissions, just allow them and wait for the installation to finish. Once it's done, you can open the game and enjoy it with unlimited resources and features.</p>
70
- <h3>Step 4: Enjoy the game with unlimited resources and features</h3>
71
- <p>Congratulations! You have successfully downloaded and installed Car Parking Multiplayer mod apk 2022 on your device. Now you can enjoy the game with unlimited money and coins, unlock all cars and accessories, remove all ads, and access all premium features for free. You can also join the online multiplayer mode and chat with other players, race with them, or create your own rules and challenges. Have fun!</p>
72
- <h2>Conclusion</h2>
73
- <p>Car Parking Multiplayer is one of the best car parking simulator games on Android, with realistic graphics, physics, and gameplay. It offers a variety of features and modes that make it more than just a parking game. However, if you want to enjoy the game without any limitations or restrictions, you should download Car Parking Multiplayer mod apk 2022. This is a modified version of the game that gives you unlimited resources and features for free. You can use them to buy any car or accessory you want, upgrade your car to the maximum level, remove all ads, and access all premium features. You can also join the online multiplayer mode and explore different locations, chat with other players, join races, or create your own rules and challenges.</p>
74
- <p>We hope this article was helpful for you. If you have any questions or feedback, feel free to leave a comment below. We would love to hear from you. Thank you for reading!</p>
75
- FAQs Q: Is Car Parking Multiplayer mod apk 2022 safe to use? A: Yes, Car Parking Multiplayer mod apk 2022 is safe to use as long as you download it from a trusted source like [this link]. It does not contain any viruses or malware that can harm your device or steal your data. Q: Do I need an internet connection to play Car Parking Multiplayer mod apk 2022? A: No, you don't need an internet connection to play Car Parking Multiplayer mod apk 2022. You can play it offline without any problems. However, if you want to join the online multiplayer mode or update the game, you will need an internet connection. Q: How can I update Car Parking Multiplayer mod apk 2022? A: To update Car Parking Multiplayer mod apk 2022, you will need to download the latest version of the mod apk file from [this link] and install it on your device. You You will need to uninstall the previous version of the mod apk before installing the new one. You might also need to back up your game data before updating, as some updates might erase your progress. Q: Can I play Car Parking Multiplayer mod apk 2022 with my friends? A: Yes, you can play Car Parking Multiplayer mod apk 2022 with your friends. You can join the online multiplayer mode and invite your friends to join you. You can also chat with them, race with them, or create your own rules and challenges. Q: What are the minimum requirements to play Car Parking Multiplayer mod apk 2022? A: The minimum requirements to play Car Parking Multiplayer mod apk 2022 are: - Android version: 4.4 or higher - RAM: 1 GB or more - Storage: 300 MB or more - Internet connection: optional Q: Where can I find more information about Car Parking Multiplayer mod apk 2022? A: You can find more information about Car Parking Multiplayer mod apk 2022 on [this website], which is the official website of the mod apk. You can also check out [this YouTube channel], which is the official channel of the mod apk. You can also follow [this Facebook page], which is the official page of the mod apk.</p> 197e85843d<br />
76
- <br />
77
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Z-Cron Scheduler The Ultimate Windows Task Automation Tool.md DELETED
@@ -1,214 +0,0 @@
1
- <br />
2
- <h1>How to Download and Use Z-Cron Scheduler for Windows</h1>
3
- <p>If you are looking for a powerful and easy-to-use task scheduler for Windows, you might want to check out Z-Cron Scheduler. This program allows you to automate various tasks on your computer, such as starting and stopping applications, copying and deleting files, switching devices on or off, and more. You can schedule tasks to run daily, weekly, monthly, once, or at regular intervals. You can also use Z-Cron Scheduler as a system service, which means it can run tasks even if no user is logged in.</p>
4
- <h2>download z-cron scheduler</h2><br /><p><b><b>Download File</b> &#8250;&#8250;&#8250;&#8250;&#8250; <a href="https://jinyurl.com/2uNP7v">https://jinyurl.com/2uNP7v</a></b></p><br /><br />
5
- <p>In this article, we will show you how to download, install, and use Z-Cron Scheduler for Windows. We will also share some tips and tricks to help you get the most out of this program.</p>
6
- <h2>What is Z-Cron Scheduler?</h2>
7
- <p>Z-Cron Scheduler is a task scheduling program for Windows that is inspired by the Cron system from the GNU/Linux world. It was developed by Andreas Baumann and is available as a freeware version or a professional version with more features. You can use Z-Cron Scheduler to plan the execution of commands, programs, or scripts at specific times or intervals, so that recurring tasks on your PC are run automatically on schedule.</p>
8
- <h3>Features and Benefits of Z-Cron Scheduler</h3>
9
- <p>Some of the features and benefits of Z-Cron Scheduler are:</p>
10
- <ul>
11
- <li>It has an intuitive user interface with plenty of options.</li>
12
- <li>It supports more than 100 built-in tools that can perform various functions, such as backup, cleanup, FTP transfer, defragmentation, virus scan, etc.</li>
13
- <li>It can start and stop system services (professional version only).</li>
14
- <li>It can switch computers or devices in a network on or off (with compatible hardware).</li>
15
- <li>It can show popup windows with reminder messages or shut down the system.</li>
16
- <li>It can run tasks as a system service or as a normal application.</li>
17
- <li>It can exchange data between the service and the desktop.</li>
18
- <li>It has a web app that allows you to start tasks from your smartphone or tablet.</li>
19
- </ul>
20
- <h3>Supported Systems and Requirements</h3>
21
- <p>Z-Cron Scheduler supports the following systems:</p>
22
- <ul>
23
- <li>Windows 11, 10, 8, 7</li>
24
- <li>Windows Server 2022, 2019, 2016, 2012 (R2), 2008 (R2)</li>
25
- </ul>
26
- <p>The minimum requirements for Z-Cron Scheduler are:</p>
27
- <ul>
28
- <li>A Pentium processor or higher</li>
29
- <li>At least 256 MB of RAM</li>
30
- <li>At least 50 MB of free disk space</li>
31
- <li>A network connection (optional)</li>
32
- </ul>
33
- <h2>How to Download Z-Cron Scheduler</h2>
34
- <h3>Download from the Official Website</h3>
35
- <p>The easiest way to download Z-Cron Scheduler is from its official website. Here are the steps:</p>
36
- <ol>
37
- <li>Go to <a href="(^2^)">https://z-dbackup.de/en/z-cron-scheduler/</a>.</li>
38
- <li>Click on the "Freeware Download" button to download the freeware version or click on the "Buy Now" button to purchase the professional version.</li>
39
- <li>Save the setup file (ZCRON.EXE) to your computer.</li>
40
- </ol>
41
- <h3> Download from Alternative Sources</h3>
42
- <p>If you cannot access the official website or you want to download an older version of Z-Cron Scheduler, you can try some alternative sources. Here are some of them:</p>
43
- <p>How to download z-cron scheduler for Windows<br />
44
- Z-cron scheduler free download and installation guide<br />
45
- Download z-cron scheduler to automate your windows tasks<br />
46
- Z-cron scheduler download link and review<br />
47
- Z-cron scheduler features and benefits<br />
48
- Download z-cron scheduler for windows server 2022<br />
49
- Z-cron scheduler system service and data exchange<br />
50
- Z-cron scheduler tools and functions overview<br />
51
- Download z-cron scheduler for windows 11<br />
52
- Z-cron scheduler web app and remote control<br />
53
- Z-cron scheduler alternatives and comparisons<br />
54
- Download z-cron scheduler for windows 10<br />
55
- Z-cron scheduler backup and restore tasks<br />
56
- Z-cron scheduler license and pricing<br />
57
- Z-cron scheduler support and help<br />
58
- Download z-cron scheduler for windows 8<br />
59
- Z-cron scheduler task planning and scheduling<br />
60
- Z-cron scheduler FTP data transfer and synchronization<br />
61
- Z-cron scheduler update and upgrade<br />
62
- Z-cron scheduler tutorial and tips<br />
63
- Download z-cron scheduler for windows 7<br />
64
- Z-cron scheduler cron system and commands<br />
65
- Z-cron scheduler defragmentation and scan disk tasks<br />
66
- Z-cron scheduler virus scan and security tasks<br />
67
- Z-cron scheduler user interface and customization<br />
68
- Download z-cron scheduler for windows server 2019<br />
69
- Z-cron scheduler electrical device switching tasks<br />
70
- Z-cron scheduler internet/VPN connection tasks<br />
71
- Z-cron scheduler log file and error handling<br />
72
- Z-cron scheduler feedback and testimonials<br />
73
- Download z-cron scheduler for windows server 2016<br />
74
- Z-cron scheduler popup window and reminder tasks<br />
75
- Z-cron scheduler system shutdown and restart tasks<br />
76
- Z-cron scheduler file copy and delete tasks<br />
77
- Z-cron scheduler directory cleanup and zip tasks<br />
78
- Download z-cron scheduler for windows server 2012 (R2)<br />
79
- Z-cron scheduler network computer on/off tasks<br />
80
- Z-cron scheduler document and website loading tasks<br />
81
- Z-cron scheduler application start and stop tasks<br />
82
- Z-cron scheduler system service monitoring tasks (pro version)<br />
83
- Download z-cron scheduler for windows server 2008 (R2)<br />
84
- Z-cron scheduler daily, weekly, monthly, once, interval tasks<br />
85
- Z-Cron - Automate your windows tasks (official website)<br />
86
- How to uninstall z-cron scheduler from windows <br />
87
- Best practices for using z-cron scheduler <br />
88
- How to troubleshoot z-cron scheduler issues <br />
89
- How to import and export z-cron scheduler tasks</p>
90
- <ul>
91
- <li><a href="">https://www.softpedia.com/get/System/Launchers-Shutdown-Tools/Z-Cron.shtml</a></li>
92
- <li><a href="">https://www.majorgeeks.com/files/details/z_cron.html</a></li>
93
- <li><a href="">https://www.filehorse.com/download-z-cron/</a></li>
94
- </ul>
95
- <p>However, be careful when downloading from third-party websites, as they may contain malware or unwanted software. Always scan the downloaded files with a reliable antivirus program before installing them.</p>
96
- <h2>How to Install Z-Cron Scheduler</h2>
97
- <h3>Run the Setup File</h3>
98
- <p>After you have downloaded the setup file (ZCRON.EXE), you need to run it to start the installation process. Here are the steps:</p>
99
- <ol>
100
- <li>Double-click on the setup file or right-click on it and choose "Run as administrator".</li>
101
- <li>Click on "Yes" if a User Account Control prompt appears.</li>
102
- <li>Select your preferred language and click on "OK".</li>
103
- <li>Click on "Next" to continue.</li>
104
- </ol>
105
- <h3>Choose the Installation Options</h3>
106
- <p>The next step is to choose the installation options for Z-Cron Scheduler. Here are the steps:</p>
107
- <ol>
108
- <li>Read and accept the license agreement and click on "Next".</li>
109
- <li>Choose the destination folder for Z-Cron Scheduler and click on "Next".</li>
110
- <li>Select the components you want to install and click on "Next". You can choose between: <ul>
111
- <li>Z-Cron Service: This will install Z-Cron Scheduler as a system service that can run tasks even if no user is logged in.</li>
112
- <li>Z-Cron Desktop: This will install Z-Cron Scheduler as a normal application that can run tasks only if a user is logged in.</li>
113
- <li>Z-Cron Web App: This will install a web app that allows you to start tasks from your smartphone or tablet.</li>
114
- </ul></li>
115
- <li>Choose whether you want to create a desktop icon and a quick launch icon for Z-Cron Scheduler and click on "Next".</li>
116
- <li>Click on "Install" to begin the installation.</li>
117
- </ol>
118
- <h3>Start the Program or the Service</h3>
119
- <p>The final step is to start Z-Cron Scheduler either as a program or as a service. Here are the steps:</p>
120
- <ol>
121
- <li>Click on "Finish" to complete the installation.</li>
122
- <li>If you have installed Z-Cron Service, you need to start it manually from the Windows Services Manager or from the command line. Alternatively, you can restart your computer to start it automatically.</li>
123
- <li>If you have installed Z-Cron Desktop, you can start it from the Start menu, the desktop icon, or the quick launch icon.</li>
124
- <li>If you have installed Z-Cron Web App, you can access it from your web browser by typing <a href="">http://localhost:8080/</a> or <a href="">http://your-ip-address:8080/</a>.</li>
125
- </ol>
126
- <h2>How to Use Z-Cron Scheduler</h2>
127
- <h3>Create a New Task</h3>
128
- <p>To create a new task in Z-Cron Scheduler, you need to follow these steps:</p>
129
- <ol>
130
- <li>Open Z-Cron Scheduler either as a program or as a service.</li>
131
- <li>Click on the "New Task" button in the toolbar or choose "New Task" from the "File" menu.</li>
132
- <li>A dialog box will appear where you can enter the details of your task, such as: <ul>
133
- <li>Name: The name of your task.</li>
134
- <li>Description: A brief description of your task.</li>
135
- <li>Type: The type of your task, such as command, program, script, etc.</li>
136
- <li>Data: The data for your task, such as command line, file name, parameters, etc.</li>
137
- <li>Schedule: The schedule for your task, such as daily, weekly, monthly, once, etc.</li>
138
- <li>Options: The options for your task, such as priority, log file, error handling, etc.</li></ul></li><br> <li>Click on "OK" to save your task.</li>
139
- </ol>
140
- <p>You can also use the built-in tools to create tasks more easily. To do this, click on the "Tools" button in the toolbar or choose "Tools" from the "File" menu. You will see a list of tools that you can use, such as backup, cleanup, FTP transfer, defragmentation, virus scan, etc. Select the tool you want and follow the instructions to create a task with it.</p>
141
- <h3>Edit or Delete a Task</h3>
142
- <p>To edit or delete a task in Z-Cron Scheduler, you need to follow these steps:</p>
143
- <ol>
144
- <li>Open Z-Cron Scheduler either as a program or as a service.</li>
145
- <li>Select the task you want to edit or delete from the task list.</li>
146
- <li>To edit the task, click on the "Edit Task" button in the toolbar or choose "Edit Task" from the "File" menu. A dialog box will appear where you can modify the details of your task. Click on "OK" to save your changes.</li>
147
- <li>To delete the task, click on the "Delete Task" button in the toolbar or choose "Delete Task" from the "File" menu. A confirmation message will appear. Click on "Yes" to confirm your deletion.</li>
148
- </ol>
149
- <h3>Manage and Monitor Tasks</h3>
150
- <p>To manage and monitor tasks in Z-Cron Scheduler, you need to follow these steps:</p>
151
- <ol>
152
- <li>Open Z-Cron Scheduler either as a program or as a service.</li>
153
- <li>To start or stop a task manually, select the task from the task list and click on the "Start Task" or "Stop Task" button in the toolbar or choose "Start Task" or "Stop Task" from the "Task" menu.</li>
154
- <li>To enable or disable a task, select the task from the task list and click on the "Enable Task" or "Disable Task" button in the toolbar or choose "Enable Task" or "Disable Task" from the "Task" menu.</li>
155
- <li>To view the status of a task, select the task from the task list and look at the icons and colors in the columns. You can see if a task is enabled, disabled, running, stopped, successful, failed, etc.</li>
156
- <li>To view the log file of a task, select the task from the task list and click on the "View Log File" button in the toolbar or choose "View Log File" from the "Task" menu. A window will open where you can see the details of each execution of your task.</li>
157
- </ol>
158
- <h2>Tips and Tricks for Z-Cron Scheduler</h2>
159
- <h3>Use the Built-in Tools</h3>
160
- <p>As mentioned before, Z-Cron Scheduler has more than 100 built-in tools that can perform various functions on your PC. You can use these tools to create tasks more easily and efficiently. Some of these tools are:</p>
161
- <table border="1">
162
- <tr><th>Tool</th><th>Description</th></tr>
163
- <tr><td>Z-Backup</td><td>This tool allows you to backup files and folders to another location or device.</td></tr>
164
- <tr><td>Z-Cleaner</td><td>This tool allows you to clean up your disk space by deleting temporary files, cache files, recycle bin files, etc.</td></tr>
165
- <tr><td>Z-FTP</td><td>This tool allows you to transfer files between your PC and an FTP server.</td></tr>
166
- <tr><td>Z-Defrag</td><td>This tool allows you to defragment your hard drive to improve its performance.</td></tr>
167
- <tr><td>Z-VirusScan</td><td>This tool allows you to scan your PC for viruses and malware using an external antivirus program.</td></tr>
168
- <tr><td>Z-Email</td><td>This tool allows you to send emails with attachments using an SMTP server.</td></tr>
169
- <tr><td>Z-Print</td><td>This tool allows you to print documents using a printer connected to your PC or network.</td></tr>
170
- <tr><td>Z-Sound</td><td>This tool allows you to play sound files using your PC's speakers or headphones.</td></tr>
171
- <tr><td>Z-Message</td><td>This tool allows you to show popup windows with messages on your PC's screen.</td></tr>
172
- <tr><td>Z-Shutdown</td><td>This tool allows you to shut down, restart, log off, or lock your PC.</td></tr>
173
- </table>
174
- <h3>Use the Web App for Remote Control</h3>
175
- <p>If you have installed Z-Cron Web App, you can use it to start tasks from your smartphone or tablet. This is useful if you want to control your PC remotely without having to access it directly. To use the web app, you need to follow these steps:</p>
176
- <ol>
177
- <li>Make sure your PC and your smartphone or tablet are connected to the same network.</li>
178
- <li>Open your web browser on your smartphone or tablet and type <a href="">http://localhost:8080/</a> or <a href="">http://your-ip-address:8080/</a>.</li>
179
- <li>You will see a list of tasks that you have created in Z-Cron Scheduler on your PC.</li>
180
- <li>To start a task, tap on the "Start" button next to the task name.</li>
181
- <li>To stop a task, tap on the "Stop" button next to the task name.</li>
182
- <li>To refresh the list of tasks, tap on the "Refresh" button at the top of the screen.</li>
183
- </ol>
184
- <h3>Backup and Restore Tasks</h3>
185
- <p>If you want to backup and restore your tasks in Z-Cron Scheduler, you can use the built-in backup tool. This is useful if you want to transfer your tasks to another PC or if you want to recover your tasks in case of a system failure. To backup and restore your tasks, you need to follow these steps:</p>
186
- <ol>
187
- <li>Open Z-Cron Scheduler either as a program or as a service.</li>
188
- <li>Click on the "Tools" button in the toolbar or choose "Tools" from the "File" menu.</li>
189
- <li>Select "Backup Tasks" from the list of tools.</li>
190
- <li>A dialog box will appear where you can choose the destination folder for your backup file and the name of your backup file.</li>
191
- <li>Click on "OK" to start the backup process.</li>
192
- <li>To restore your tasks, click on the "Tools" button in the toolbar or choose "Tools" from the "File" menu.</li>
193
- <li>Select "Restore Tasks" from the list of tools.</li>
194
- <li>A dialog box will appear where you can choose the source folder for your backup file and the name of your backup file.</li>
195
- <li>Click on "OK" to start the restore process.</li>
196
- </ol>
197
- <h2>Conclusion</h2>
198
- <p>Z-Cron Scheduler is a powerful and easy-to-use task scheduler for Windows that can help you automate various tasks on your PC. You can download it from its official website or from alternative sources, install it as a program or as a service, and use it to create, edit, delete, manage, and monitor tasks. You can also use its built-in tools, web app, and backup tool to enhance its functionality and convenience. Z-Cron Scheduler is a great tool for anyone who wants to save time and effort by automating their PC tasks.</p>
199
- <h2>FAQs</h2>
200
- <p>Here are some frequently asked questions about Z-Cron Scheduler:</p>
201
- <ul>
202
- <li><b>Q: Is Z-Cron Scheduler free?</b></li>
203
- <li>A: Z-Cron Scheduler has a freeware version and a professional version. The freeware version has most of the features of the professional version, except for some advanced ones such as starting and stopping system services. The professional version costs $29.95 for a single license or $49.95 for a site license.</li>
204
- <li><b>Q: How can I contact the developer of Z-Cron Scheduler?</b></li>
205
- <li>A: You can contact Andreas Baumann, the developer of Z-Cron Scheduler, by email at <a href="mailto:[email protected]">[email protected]</a> or by phone at +49 89 189 47 41 67. You can also visit his website at <a href="">https://z-dbackup.de/en/</a>.</li>
206
- <li><b>Q: How can I update Z-Cron Scheduler?</b></li>
207
- <li>A: You can update Z-Cron Scheduler by downloading and installing the latest version from its official website or from alternative sources. You do not need to uninstall the previous version before installing the new one.</li>
208
- <li><b>Q: How can I uninstall Z-Cron Scheduler?</b></li>
209
- <li>A: You can uninstall Z-Cron Scheduler by using the Windows Add/Remove Programs feature or by using its own uninstaller. To use its own uninstaller, go to the Start menu, choose "All Programs", find "Z-Cron", and click on "Uninstall". Follow the instructions to complete the uninstallation process.</li>
210
- <li><b>Q: How can I get more help with Z-Cron Scheduler?</b></li>
211
- <li>A: You can get more help with Z-Cron Scheduler by reading its online manual at <a href="">https://z-dbackup.de/en/z-cron-scheduler/manual/</a>, by visiting its online forum at <a href="">https ://z-dbackup.de/en/z-cron-scheduler/forum/</a>, or by contacting the developer by email or phone.</li>
212
- </ul></p> 401be4b1e0<br />
213
- <br />
214
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/components/chat-header.tsx DELETED
@@ -1,12 +0,0 @@
1
- import LogoIcon from '@/assets/images/logo.svg'
2
- import Image from 'next/image'
3
-
4
- export function ChatHeader() {
5
- return (
6
- <div className="flex flex-col items-center justify-center">
7
- <Image alt="logo" src={LogoIcon} width={60}/>
8
- <div className="mt-8 text-4xl font-bold">欢迎使用新必应</div>
9
- <div className="mt-4 mb-8 text-lg">由 AI 支持的网页版 Copilot</div>
10
- </div>
11
- )
12
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/utils/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .utils import * # NOQA
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/ckpt_utils.py DELETED
@@ -1,68 +0,0 @@
1
- import glob
2
- import logging
3
- import os
4
- import re
5
- import torch
6
-
7
-
8
- def get_last_checkpoint(work_dir, steps=None):
9
- checkpoint = None
10
- last_ckpt_path = None
11
- ckpt_paths = get_all_ckpts(work_dir, steps)
12
- if len(ckpt_paths) > 0:
13
- last_ckpt_path = ckpt_paths[0]
14
- checkpoint = torch.load(last_ckpt_path, map_location='cpu')
15
- logging.info(f'load module from checkpoint: {last_ckpt_path}')
16
- return checkpoint, last_ckpt_path
17
-
18
-
19
- def get_all_ckpts(work_dir, steps=None):
20
- if steps is None:
21
- ckpt_path_pattern = f'{work_dir}/model_ckpt_steps_*.ckpt'
22
- else:
23
- ckpt_path_pattern = f'{work_dir}/model_ckpt_steps_{steps}.ckpt'
24
- return sorted(glob.glob(ckpt_path_pattern),
25
- key=lambda x: -int(re.findall('.*steps\_(\d+)\.ckpt', x)[0]))
26
-
27
-
28
- def load_ckpt(cur_model, ckpt_base_dir, model_name='model', force=True, strict=True):
29
- if os.path.isfile(ckpt_base_dir):
30
- base_dir = os.path.dirname(ckpt_base_dir)
31
- ckpt_path = ckpt_base_dir
32
- checkpoint = torch.load(ckpt_base_dir, map_location='cpu')
33
- else:
34
- base_dir = ckpt_base_dir
35
- checkpoint, ckpt_path = get_last_checkpoint(ckpt_base_dir)
36
- if checkpoint is not None:
37
- state_dict = checkpoint["state_dict"]
38
- if len([k for k in state_dict.keys() if '.' in k]) > 0:
39
- state_dict = {k[len(model_name) + 1:]: v for k, v in state_dict.items()
40
- if k.startswith(f'{model_name}.')}
41
- else:
42
- if '.' not in model_name:
43
- state_dict = state_dict[model_name]
44
- else:
45
- base_model_name = model_name.split('.')[0]
46
- rest_model_name = model_name[len(base_model_name) + 1:]
47
- state_dict = {
48
- k[len(rest_model_name) + 1:]: v for k, v in state_dict[base_model_name].items()
49
- if k.startswith(f'{rest_model_name}.')}
50
- if not strict:
51
- cur_model_state_dict = cur_model.state_dict()
52
- unmatched_keys = []
53
- for key, param in state_dict.items():
54
- if key in cur_model_state_dict:
55
- new_param = cur_model_state_dict[key]
56
- if new_param.shape != param.shape:
57
- unmatched_keys.append(key)
58
- print("| Unmatched keys: ", key, new_param.shape, param.shape)
59
- for key in unmatched_keys:
60
- del state_dict[key]
61
- cur_model.load_state_dict(state_dict, strict=strict)
62
- print(f"| load '{model_name}' from '{ckpt_path}'.")
63
- else:
64
- e_msg = f"| ckpt not found in {base_dir}."
65
- if force:
66
- assert False, e_msg
67
- else:
68
- print(e_msg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/plot.py DELETED
@@ -1,56 +0,0 @@
1
- import matplotlib.pyplot as plt
2
- import numpy as np
3
- import torch
4
-
5
- LINE_COLORS = ['w', 'r', 'y', 'cyan', 'm', 'b', 'lime']
6
-
7
-
8
- def spec_to_figure(spec, vmin=None, vmax=None):
9
- if isinstance(spec, torch.Tensor):
10
- spec = spec.cpu().numpy()
11
- fig = plt.figure(figsize=(12, 6))
12
- plt.pcolor(spec.T, vmin=vmin, vmax=vmax)
13
- return fig
14
-
15
-
16
- def spec_f0_to_figure(spec, f0s, figsize=None):
17
- max_y = spec.shape[1]
18
- if isinstance(spec, torch.Tensor):
19
- spec = spec.detach().cpu().numpy()
20
- f0s = {k: f0.detach().cpu().numpy() for k, f0 in f0s.items()}
21
- f0s = {k: f0 / 10 for k, f0 in f0s.items()}
22
- fig = plt.figure(figsize=(12, 6) if figsize is None else figsize)
23
- plt.pcolor(spec.T)
24
- for i, (k, f0) in enumerate(f0s.items()):
25
- plt.plot(f0.clip(0, max_y), label=k, c=LINE_COLORS[i], linewidth=1, alpha=0.8)
26
- plt.legend()
27
- return fig
28
-
29
-
30
- def dur_to_figure(dur_gt, dur_pred, txt):
31
- dur_gt = dur_gt.long().cpu().numpy()
32
- dur_pred = dur_pred.long().cpu().numpy()
33
- dur_gt = np.cumsum(dur_gt)
34
- dur_pred = np.cumsum(dur_pred)
35
- fig = plt.figure(figsize=(12, 6))
36
- for i in range(len(dur_gt)):
37
- shift = (i % 8) + 1
38
- plt.text(dur_gt[i], shift, txt[i])
39
- plt.text(dur_pred[i], 10 + shift, txt[i])
40
- plt.vlines(dur_gt[i], 0, 10, colors='b') # blue is gt
41
- plt.vlines(dur_pred[i], 10, 20, colors='r') # red is pred
42
- return fig
43
-
44
-
45
- def f0_to_figure(f0_gt, f0_cwt=None, f0_pred=None):
46
- fig = plt.figure()
47
- f0_gt = f0_gt.cpu().numpy()
48
- plt.plot(f0_gt, color='r', label='gt')
49
- if f0_cwt is not None:
50
- f0_cwt = f0_cwt.cpu().numpy()
51
- plt.plot(f0_cwt, color='b', label='cwt')
52
- if f0_pred is not None:
53
- f0_pred = f0_pred.cpu().numpy()
54
- plt.plot(f0_pred, color='green', label='pred')
55
- plt.legend()
56
- return fig
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/lr_scheduler.py DELETED
@@ -1,98 +0,0 @@
1
- import numpy as np
2
-
3
-
4
- class LambdaWarmUpCosineScheduler:
5
- """
6
- note: use with a base_lr of 1.0
7
- """
8
- def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0):
9
- self.lr_warm_up_steps = warm_up_steps
10
- self.lr_start = lr_start
11
- self.lr_min = lr_min
12
- self.lr_max = lr_max
13
- self.lr_max_decay_steps = max_decay_steps
14
- self.last_lr = 0.
15
- self.verbosity_interval = verbosity_interval
16
-
17
- def schedule(self, n, **kwargs):
18
- if self.verbosity_interval > 0:
19
- if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}")
20
- if n < self.lr_warm_up_steps:
21
- lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start
22
- self.last_lr = lr
23
- return lr
24
- else:
25
- t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps)
26
- t = min(t, 1.0)
27
- lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * (
28
- 1 + np.cos(t * np.pi))
29
- self.last_lr = lr
30
- return lr
31
-
32
- def __call__(self, n, **kwargs):
33
- return self.schedule(n,**kwargs)
34
-
35
-
36
- class LambdaWarmUpCosineScheduler2:
37
- """
38
- supports repeated iterations, configurable via lists
39
- note: use with a base_lr of 1.0.
40
- """
41
- def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0):
42
- assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths)
43
- self.lr_warm_up_steps = warm_up_steps
44
- self.f_start = f_start
45
- self.f_min = f_min
46
- self.f_max = f_max
47
- self.cycle_lengths = cycle_lengths
48
- self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths))
49
- self.last_f = 0.
50
- self.verbosity_interval = verbosity_interval
51
-
52
- def find_in_interval(self, n):
53
- interval = 0
54
- for cl in self.cum_cycles[1:]:
55
- if n <= cl:
56
- return interval
57
- interval += 1
58
-
59
- def schedule(self, n, **kwargs):
60
- cycle = self.find_in_interval(n)
61
- n = n - self.cum_cycles[cycle]
62
- if self.verbosity_interval > 0:
63
- if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
64
- f"current cycle {cycle}")
65
- if n < self.lr_warm_up_steps[cycle]:
66
- f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
67
- self.last_f = f
68
- return f
69
- else:
70
- t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle])
71
- t = min(t, 1.0)
72
- f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * (
73
- 1 + np.cos(t * np.pi))
74
- self.last_f = f
75
- return f
76
-
77
- def __call__(self, n, **kwargs):
78
- return self.schedule(n, **kwargs)
79
-
80
-
81
- class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2):
82
-
83
- def schedule(self, n, **kwargs):
84
- cycle = self.find_in_interval(n)
85
- n = n - self.cum_cycles[cycle]
86
- if self.verbosity_interval > 0:
87
- if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
88
- f"current cycle {cycle}")
89
-
90
- if n < self.lr_warm_up_steps[cycle]:
91
- f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
92
- self.last_f = f
93
- return f
94
- else:
95
- f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle])
96
- self.last_f = f
97
- return f
98
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov7/yolov7_x_syncbn_fast_8x16b-300e_coco.py DELETED
@@ -1,15 +0,0 @@
1
- _base_ = './yolov7_l_syncbn_fast_8x16b-300e_coco.py'
2
-
3
- model = dict(
4
- backbone=dict(arch='X'),
5
- neck=dict(
6
- in_channels=[640, 1280, 1280],
7
- out_channels=[160, 320, 640],
8
- block_cfg=dict(
9
- type='ELANBlock',
10
- middle_ratio=0.4,
11
- block_ratio=0.4,
12
- num_blocks=3,
13
- num_convs_in_block=2),
14
- use_repconv_outs=False),
15
- bbox_head=dict(head_module=dict(in_channels=[320, 640, 1280])))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aaajdhdhdhahdbbaabs/Hshdhdhd/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: Hshdhdhd
3
- emoji: 📊
4
- colorFrom: indigo
5
- colorTo: blue
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/Aditya9790/yolo7-object-tracking/export.py DELETED
@@ -1,205 +0,0 @@
1
- import argparse
2
- import sys
3
- import time
4
- import warnings
5
-
6
- sys.path.append('./') # to run '$ python *.py' files in subdirectories
7
-
8
- import torch
9
- import torch.nn as nn
10
- from torch.utils.mobile_optimizer import optimize_for_mobile
11
-
12
- import models
13
- from models.experimental import attempt_load, End2End
14
- from utils.activations import Hardswish, SiLU
15
- from utils.general import set_logging, check_img_size
16
- from utils.torch_utils import select_device
17
- from utils.add_nms import RegisterNMS
18
-
19
- if __name__ == '__main__':
20
- parser = argparse.ArgumentParser()
21
- parser.add_argument('--weights', type=str, default='./yolor-csp-c.pt', help='weights path')
22
- parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width
23
- parser.add_argument('--batch-size', type=int, default=1, help='batch size')
24
- parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes')
25
- parser.add_argument('--dynamic-batch', action='store_true', help='dynamic batch onnx for tensorrt and onnx-runtime')
26
- parser.add_argument('--grid', action='store_true', help='export Detect() layer grid')
27
- parser.add_argument('--end2end', action='store_true', help='export end2end onnx')
28
- parser.add_argument('--max-wh', type=int, default=None, help='None for tensorrt nms, int value for onnx-runtime nms')
29
- parser.add_argument('--topk-all', type=int, default=100, help='topk objects for every images')
30
- parser.add_argument('--iou-thres', type=float, default=0.45, help='iou threshold for NMS')
31
- parser.add_argument('--conf-thres', type=float, default=0.25, help='conf threshold for NMS')
32
- parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
33
- parser.add_argument('--simplify', action='store_true', help='simplify onnx model')
34
- parser.add_argument('--include-nms', action='store_true', help='export end2end onnx')
35
- parser.add_argument('--fp16', action='store_true', help='CoreML FP16 half-precision export')
36
- parser.add_argument('--int8', action='store_true', help='CoreML INT8 quantization')
37
- opt = parser.parse_args()
38
- opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
39
- opt.dynamic = opt.dynamic and not opt.end2end
40
- opt.dynamic = False if opt.dynamic_batch else opt.dynamic
41
- print(opt)
42
- set_logging()
43
- t = time.time()
44
-
45
- # Load PyTorch model
46
- device = select_device(opt.device)
47
- model = attempt_load(opt.weights, map_location=device) # load FP32 model
48
- labels = model.names
49
-
50
- # Checks
51
- gs = int(max(model.stride)) # grid size (max stride)
52
- opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples
53
-
54
- # Input
55
- img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection
56
-
57
- # Update model
58
- for k, m in model.named_modules():
59
- m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
60
- if isinstance(m, models.common.Conv): # assign export-friendly activations
61
- if isinstance(m.act, nn.Hardswish):
62
- m.act = Hardswish()
63
- elif isinstance(m.act, nn.SiLU):
64
- m.act = SiLU()
65
- # elif isinstance(m, models.yolo.Detect):
66
- # m.forward = m.forward_export # assign forward (optional)
67
- model.model[-1].export = not opt.grid # set Detect() layer grid export
68
- y = model(img) # dry run
69
- if opt.include_nms:
70
- model.model[-1].include_nms = True
71
- y = None
72
-
73
- # TorchScript export
74
- try:
75
- print('\nStarting TorchScript export with torch %s...' % torch.__version__)
76
- f = opt.weights.replace('.pt', '.torchscript.pt') # filename
77
- ts = torch.jit.trace(model, img, strict=False)
78
- ts.save(f)
79
- print('TorchScript export success, saved as %s' % f)
80
- except Exception as e:
81
- print('TorchScript export failure: %s' % e)
82
-
83
- # CoreML export
84
- try:
85
- import coremltools as ct
86
-
87
- print('\nStarting CoreML export with coremltools %s...' % ct.__version__)
88
- # convert model from torchscript and apply pixel scaling as per detect.py
89
- ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])])
90
- bits, mode = (8, 'kmeans_lut') if opt.int8 else (16, 'linear') if opt.fp16 else (32, None)
91
- if bits < 32:
92
- if sys.platform.lower() == 'darwin': # quantization only supported on macOS
93
- with warnings.catch_warnings():
94
- warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning
95
- ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
96
- else:
97
- print('quantization only supported on macOS, skipping...')
98
-
99
- f = opt.weights.replace('.pt', '.mlmodel') # filename
100
- ct_model.save(f)
101
- print('CoreML export success, saved as %s' % f)
102
- except Exception as e:
103
- print('CoreML export failure: %s' % e)
104
-
105
- # TorchScript-Lite export
106
- try:
107
- print('\nStarting TorchScript-Lite export with torch %s...' % torch.__version__)
108
- f = opt.weights.replace('.pt', '.torchscript.ptl') # filename
109
- tsl = torch.jit.trace(model, img, strict=False)
110
- tsl = optimize_for_mobile(tsl)
111
- tsl._save_for_lite_interpreter(f)
112
- print('TorchScript-Lite export success, saved as %s' % f)
113
- except Exception as e:
114
- print('TorchScript-Lite export failure: %s' % e)
115
-
116
- # ONNX export
117
- try:
118
- import onnx
119
-
120
- print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
121
- f = opt.weights.replace('.pt', '.onnx') # filename
122
- model.eval()
123
- output_names = ['classes', 'boxes'] if y is None else ['output']
124
- dynamic_axes = None
125
- if opt.dynamic:
126
- dynamic_axes = {'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640)
127
- 'output': {0: 'batch', 2: 'y', 3: 'x'}}
128
- if opt.dynamic_batch:
129
- opt.batch_size = 'batch'
130
- dynamic_axes = {
131
- 'images': {
132
- 0: 'batch',
133
- }, }
134
- if opt.end2end and opt.max_wh is None:
135
- output_axes = {
136
- 'num_dets': {0: 'batch'},
137
- 'det_boxes': {0: 'batch'},
138
- 'det_scores': {0: 'batch'},
139
- 'det_classes': {0: 'batch'},
140
- }
141
- else:
142
- output_axes = {
143
- 'output': {0: 'batch'},
144
- }
145
- dynamic_axes.update(output_axes)
146
- if opt.grid:
147
- if opt.end2end:
148
- print('\nStarting export end2end onnx model for %s...' % 'TensorRT' if opt.max_wh is None else 'onnxruntime')
149
- model = End2End(model,opt.topk_all,opt.iou_thres,opt.conf_thres,opt.max_wh,device,len(labels))
150
- if opt.end2end and opt.max_wh is None:
151
- output_names = ['num_dets', 'det_boxes', 'det_scores', 'det_classes']
152
- shapes = [opt.batch_size, 1, opt.batch_size, opt.topk_all, 4,
153
- opt.batch_size, opt.topk_all, opt.batch_size, opt.topk_all]
154
- else:
155
- output_names = ['output']
156
- else:
157
- model.model[-1].concat = True
158
-
159
- torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'],
160
- output_names=output_names,
161
- dynamic_axes=dynamic_axes)
162
-
163
- # Checks
164
- onnx_model = onnx.load(f) # load onnx model
165
- onnx.checker.check_model(onnx_model) # check onnx model
166
-
167
- if opt.end2end and opt.max_wh is None:
168
- for i in onnx_model.graph.output:
169
- for j in i.type.tensor_type.shape.dim:
170
- j.dim_param = str(shapes.pop(0))
171
-
172
- # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
173
-
174
- # # Metadata
175
- # d = {'stride': int(max(model.stride))}
176
- # for k, v in d.items():
177
- # meta = onnx_model.metadata_props.add()
178
- # meta.key, meta.value = k, str(v)
179
- # onnx.save(onnx_model, f)
180
-
181
- if opt.simplify:
182
- try:
183
- import onnxsim
184
-
185
- print('\nStarting to simplify ONNX...')
186
- onnx_model, check = onnxsim.simplify(onnx_model)
187
- assert check, 'assert check failed'
188
- except Exception as e:
189
- print(f'Simplifier failure: {e}')
190
-
191
- # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
192
- onnx.save(onnx_model,f)
193
- print('ONNX export success, saved as %s' % f)
194
-
195
- if opt.include_nms:
196
- print('Registering NMS plugin for ONNX...')
197
- mo = RegisterNMS(f)
198
- mo.register_nms()
199
- mo.save(f)
200
-
201
- except Exception as e:
202
- print('ONNX export failure: %s' % e)
203
-
204
- # Finish
205
- print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import GridSizer from './GridSizer.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('gridSizer', function (x, y, minWidth, minHeight, columnCount, rowCount, columnProportions, rowProportion, config) {
6
- var gameObject = new GridSizer(this.scene, x, y, minWidth, minHeight, columnCount, rowCount, columnProportions, rowProportion, config);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.UI.GridSizer', GridSizer);
12
-
13
- export default GridSizer;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/holygrail/methods/LayoutMode0.js DELETED
@@ -1,58 +0,0 @@
1
- /*
2
- Elements:
3
- ```
4
- HHH
5
- LCR
6
- FFF
7
- ```
8
- */
9
-
10
- import {
11
- GetAddHeaderConfig,
12
- GetAddLeftSideConfig, GetAddContentConfig, GetAddRightSideConfig,
13
- GetAddFooterConfig,
14
- GetAddContainerConfig
15
- } from './GetAddChildConfig.js';
16
- import CreatExpandContainer from './CreatExpandContainer.js';
17
-
18
- var LayoutMode0 = function (config) {
19
- var scene = this.scene;
20
-
21
- // Add Header
22
- var header = config.header;
23
- if (header) {
24
- this.add(header, GetAddHeaderConfig(config));
25
- }
26
-
27
- /*
28
- L C R
29
- */
30
- var bodySizer = CreatExpandContainer(scene, 0);
31
- this.add(bodySizer, GetAddContainerConfig(config));
32
-
33
- // Add Left-side
34
- var leftSide = config.leftSide;
35
- if (leftSide) {
36
- bodySizer.add(leftSide, GetAddLeftSideConfig(config));
37
- }
38
-
39
- // Add content
40
- var content = config.content;
41
- if (content) {
42
- bodySizer.add(content, GetAddContentConfig(config));
43
- }
44
-
45
- // Add Right-side
46
- var rightSide = config.rightSide;
47
- if (rightSide) {
48
- bodySizer.add(rightSide, GetAddRightSideConfig(config));
49
- }
50
-
51
- // Add Footer
52
- var footer = config.footer;
53
- if (footer) {
54
- this.add(footer, GetAddFooterConfig(config));
55
- }
56
- }
57
-
58
- export default LayoutMode0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectanglecanvas/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import RoundRectangleCanvas from './RoundRectangleCanvas.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('roundRectangleCanvas', function (x, y, width, height, radius, fillStyle, strokeStyle, lineWidth, fillColor2, isHorizontalGradient) {
6
- var gameObject = new RoundRectangleCanvas(this.scene, x, y, width, height, radius, fillStyle, strokeStyle, lineWidth, fillColor2, isHorizontalGradient);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.UI.RoundRectangleCanvas', RoundRectangleCanvas);
12
-
13
- export default RoundRectangleCanvas;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/GetChildrenHeight.js DELETED
@@ -1,24 +0,0 @@
1
- import { GetDisplayHeight } from '../../../../plugins/utils/size/GetDisplaySize.js';
2
-
3
- var GetChildrenHeight = function () {
4
- if (this.rexSizer.hidden) {
5
- return 0;
6
- }
7
-
8
- var result;
9
- var child = this.child,
10
- childConfig = child.rexSizer;
11
- if (childConfig.hidden) {
12
- result = 0;
13
- } else if (this.scrollMode === 0) { // scroll y
14
- result = 0;
15
- } else { // scroll x
16
- result = (child.isRexSizer) ?
17
- Math.max(child.minHeight, child.childrenHeight) :
18
- (child.hasOwnProperty('minHeight')) ? child.minHeight : GetDisplayHeight(child);
19
- }
20
-
21
- return result;
22
- }
23
-
24
- export default GetChildrenHeight;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/__init__.py DELETED
File without changes
spaces/Amrrs/DragGan-Inversion/stylegan_human/openpose/src/body.py DELETED
@@ -1,243 +0,0 @@
1
- import cv2
2
- import numpy as np
3
- import math
4
- import time
5
- from scipy.ndimage.filters import gaussian_filter
6
- import matplotlib.pyplot as plt
7
- import matplotlib
8
- import torch
9
- from torchvision import transforms
10
-
11
- from openpose.src import util
12
- from openpose.src.model import bodypose_model
13
-
14
-
15
- class Body(object):
16
- def __init__(self, model_path):
17
- self.model = bodypose_model()
18
- if torch.cuda.is_available():
19
- self.model = self.model.cuda()
20
- model_dict = util.transfer(self.model, torch.load(model_path))
21
- self.model.load_state_dict(model_dict)
22
- self.model.eval()
23
-
24
- def __call__(self, oriImg):
25
- # scale_search = [0.5, 1.0, 1.5, 2.0]
26
- scale_search = [0.5]
27
- boxsize = 368
28
- stride = 8
29
- padValue = 128
30
- thre1 = 0.1
31
- thre2 = 0.05
32
- multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
33
- heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
34
- paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
35
-
36
- for m in range(len(multiplier)):
37
- scale = multiplier[m]
38
- imageToTest = cv2.resize(
39
- oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
40
- imageToTest_padded, pad = util.padRightDownCorner(
41
- imageToTest, stride, padValue)
42
- im = np.transpose(np.float32(
43
- imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
44
- im = np.ascontiguousarray(im)
45
-
46
- data = torch.from_numpy(im).float()
47
- if torch.cuda.is_available():
48
- data = data.cuda()
49
- # data = data.permute([2, 0, 1]).unsqueeze(0).float()
50
- with torch.no_grad():
51
- Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data)
52
- Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy()
53
- Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy()
54
-
55
- # extract outputs, resize, and remove padding
56
- # heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps
57
- # output 1 is heatmaps
58
- heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0))
59
- heatmap = cv2.resize(heatmap, (0, 0), fx=stride,
60
- fy=stride, interpolation=cv2.INTER_CUBIC)
61
- heatmap = heatmap[:imageToTest_padded.shape[0] -
62
- pad[2], :imageToTest_padded.shape[1] - pad[3], :]
63
- heatmap = cv2.resize(
64
- heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
65
-
66
- # paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs
67
- paf = np.transpose(np.squeeze(Mconv7_stage6_L1),
68
- (1, 2, 0)) # output 0 is PAFs
69
- paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride,
70
- interpolation=cv2.INTER_CUBIC)
71
- paf = paf[:imageToTest_padded.shape[0] - pad[2],
72
- :imageToTest_padded.shape[1] - pad[3], :]
73
- paf = cv2.resize(
74
- paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
75
-
76
- heatmap_avg += heatmap_avg + heatmap / len(multiplier)
77
- paf_avg += + paf / len(multiplier)
78
- all_peaks = []
79
- peak_counter = 0
80
-
81
- for part in range(18):
82
- map_ori = heatmap_avg[:, :, part]
83
- one_heatmap = gaussian_filter(map_ori, sigma=3)
84
-
85
- map_left = np.zeros(one_heatmap.shape)
86
- map_left[1:, :] = one_heatmap[:-1, :]
87
- map_right = np.zeros(one_heatmap.shape)
88
- map_right[:-1, :] = one_heatmap[1:, :]
89
- map_up = np.zeros(one_heatmap.shape)
90
- map_up[:, 1:] = one_heatmap[:, :-1]
91
- map_down = np.zeros(one_heatmap.shape)
92
- map_down[:, :-1] = one_heatmap[:, 1:]
93
-
94
- peaks_binary = np.logical_and.reduce(
95
- (one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1))
96
- peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(
97
- peaks_binary)[0])) # note reverse
98
- peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
99
- peak_id = range(peak_counter, peak_counter + len(peaks))
100
- peaks_with_score_and_id = [
101
- peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))]
102
-
103
- all_peaks.append(peaks_with_score_and_id)
104
- peak_counter += len(peaks)
105
-
106
- # find connection in the specified sequence, center 29 is in the position 15
107
- limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10],
108
- [10, 11], [2, 12], [12, 13], [
109
- 13, 14], [2, 1], [1, 15], [15, 17],
110
- [1, 16], [16, 18], [3, 17], [6, 18]]
111
- # the middle joints heatmap correpondence
112
- mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22],
113
- [23, 24], [25, 26], [27, 28], [29, 30], [
114
- 47, 48], [49, 50], [53, 54], [51, 52],
115
- [55, 56], [37, 38], [45, 46]]
116
-
117
- connection_all = []
118
- special_k = []
119
- mid_num = 10
120
-
121
- for k in range(len(mapIdx)):
122
- score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
123
- candA = all_peaks[limbSeq[k][0] - 1]
124
- candB = all_peaks[limbSeq[k][1] - 1]
125
- nA = len(candA)
126
- nB = len(candB)
127
- indexA, indexB = limbSeq[k]
128
- if (nA != 0 and nB != 0):
129
- connection_candidate = []
130
- for i in range(nA):
131
- for j in range(nB):
132
- vec = np.subtract(candB[j][:2], candA[i][:2])
133
- norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
134
- norm = max(0.001, norm)
135
- vec = np.divide(vec, norm)
136
-
137
- startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num),
138
- np.linspace(candA[i][1], candB[j][1], num=mid_num)))
139
-
140
- vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0]
141
- for I in range(len(startend))])
142
- vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1]
143
- for I in range(len(startend))])
144
-
145
- score_midpts = np.multiply(
146
- vec_x, vec[0]) + np.multiply(vec_y, vec[1])
147
- score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(
148
- 0.5 * oriImg.shape[0] / norm - 1, 0)
149
- criterion1 = len(np.nonzero(score_midpts > thre2)[
150
- 0]) > 0.8 * len(score_midpts)
151
- criterion2 = score_with_dist_prior > 0
152
- if criterion1 and criterion2:
153
- connection_candidate.append(
154
- [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]])
155
-
156
- connection_candidate = sorted(
157
- connection_candidate, key=lambda x: x[2], reverse=True)
158
- connection = np.zeros((0, 5))
159
- for c in range(len(connection_candidate)):
160
- i, j, s = connection_candidate[c][0:3]
161
- if (i not in connection[:, 3] and j not in connection[:, 4]):
162
- connection = np.vstack(
163
- [connection, [candA[i][3], candB[j][3], s, i, j]])
164
- if (len(connection) >= min(nA, nB)):
165
- break
166
-
167
- connection_all.append(connection)
168
- else:
169
- special_k.append(k)
170
- connection_all.append([])
171
-
172
- # last number in each row is the total parts number of that person
173
- # the second last number in each row is the score of the overall configuration
174
- subset = -1 * np.ones((0, 20))
175
- candidate = np.array(
176
- [item for sublist in all_peaks for item in sublist])
177
-
178
- for k in range(len(mapIdx)):
179
- if k not in special_k:
180
- partAs = connection_all[k][:, 0]
181
- partBs = connection_all[k][:, 1]
182
- indexA, indexB = np.array(limbSeq[k]) - 1
183
-
184
- for i in range(len(connection_all[k])): # = 1:size(temp,1)
185
- found = 0
186
- subset_idx = [-1, -1]
187
- for j in range(len(subset)): # 1:size(subset,1):
188
- if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
189
- subset_idx[found] = j
190
- found += 1
191
-
192
- if found == 1:
193
- j = subset_idx[0]
194
- if subset[j][indexB] != partBs[i]:
195
- subset[j][indexB] = partBs[i]
196
- subset[j][-1] += 1
197
- subset[j][-2] += candidate[partBs[i].astype(
198
- int), 2] + connection_all[k][i][2]
199
- elif found == 2: # if found 2 and disjoint, merge them
200
- j1, j2 = subset_idx
201
- membership = ((subset[j1] >= 0).astype(
202
- int) + (subset[j2] >= 0).astype(int))[:-2]
203
- if len(np.nonzero(membership == 2)[0]) == 0: # merge
204
- subset[j1][:-2] += (subset[j2][:-2] + 1)
205
- subset[j1][-2:] += subset[j2][-2:]
206
- subset[j1][-2] += connection_all[k][i][2]
207
- subset = np.delete(subset, j2, 0)
208
- else: # as like found == 1
209
- subset[j1][indexB] = partBs[i]
210
- subset[j1][-1] += 1
211
- subset[j1][-2] += candidate[partBs[i].astype(
212
- int), 2] + connection_all[k][i][2]
213
-
214
- # if find no partA in the subset, create a new subset
215
- elif not found and k < 17:
216
- row = -1 * np.ones(20)
217
- row[indexA] = partAs[i]
218
- row[indexB] = partBs[i]
219
- row[-1] = 2
220
- row[-2] = sum(candidate[connection_all[k][i,
221
- :2].astype(int), 2]) + connection_all[k][i][2]
222
- subset = np.vstack([subset, row])
223
- # delete some rows of subset which has few parts occur
224
- deleteIdx = []
225
- for i in range(len(subset)):
226
- if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
227
- deleteIdx.append(i)
228
- subset = np.delete(subset, deleteIdx, axis=0)
229
-
230
- # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts
231
- # candidate: x, y, score, id
232
- return candidate, subset
233
-
234
-
235
- if __name__ == "__main__":
236
- body_estimation = Body('../model/body_pose_model.pth')
237
-
238
- test_image = '../images/ski.jpg'
239
- oriImg = cv2.imread(test_image) # B,G,R order
240
- candidate, subset = body_estimation(oriImg)
241
- canvas = util.draw_bodypose(oriImg, candidate, subset)
242
- plt.imshow(canvas[:, :, [2, 1, 0]])
243
- plt.show()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/run_pti.py DELETED
@@ -1,54 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- from random import choice
4
- from string import ascii_uppercase
5
- from torch.utils.data import DataLoader
6
- from torchvision.transforms import transforms
7
- import os
8
- from pti.pti_configs import global_config, paths_config
9
- import wandb
10
-
11
- from pti.training.coaches.multi_id_coach import MultiIDCoach
12
- from pti.training.coaches.single_id_coach import SingleIDCoach
13
- from utils.ImagesDataset import ImagesDataset
14
-
15
-
16
- def run_PTI(run_name='', use_wandb=False, use_multi_id_training=False):
17
- os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
18
- os.environ['CUDA_VISIBLE_DEVICES'] = global_config.cuda_visible_devices
19
-
20
- if run_name == '':
21
- global_config.run_name = ''.join(
22
- choice(ascii_uppercase) for i in range(12))
23
- else:
24
- global_config.run_name = run_name
25
-
26
- if use_wandb:
27
- run = wandb.init(project=paths_config.pti_results_keyword,
28
- reinit=True, name=global_config.run_name)
29
- global_config.pivotal_training_steps = 1
30
- global_config.training_step = 1
31
-
32
- embedding_dir_path = f'{paths_config.embedding_base_dir}/{paths_config.input_data_id}/{paths_config.pti_results_keyword}'
33
- # print('embedding_dir_path: ', embedding_dir_path) #./embeddings/barcelona/PTI
34
- os.makedirs(embedding_dir_path, exist_ok=True)
35
-
36
- dataset = ImagesDataset(paths_config.input_data_path, transforms.Compose([
37
- transforms.Resize((1024, 512)),
38
- transforms.ToTensor(),
39
- transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]))
40
-
41
- dataloader = DataLoader(dataset, batch_size=1, shuffle=False)
42
-
43
- if use_multi_id_training:
44
- coach = MultiIDCoach(dataloader, use_wandb)
45
- else:
46
- coach = SingleIDCoach(dataloader, use_wandb)
47
-
48
- coach.train()
49
-
50
- return global_config.run_name
51
-
52
-
53
- if __name__ == '__main__':
54
- run_PTI(run_name='', use_wandb=False, use_multi_id_training=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/utils/utils.py DELETED
@@ -1,68 +0,0 @@
1
- import numpy as np
2
- import scipy
3
-
4
- __LPIPS__ = {}
5
-
6
- import torch
7
-
8
-
9
- def init_lpips(net_name, device):
10
- assert net_name in ['alex', 'vgg']
11
- import lpips
12
- print(f'init_lpips: lpips_{net_name}')
13
- return lpips.LPIPS(net=net_name, version='0.1').eval().to(device)
14
-
15
- def rgb_lpips(np_gt, np_im, net_name, device):
16
- if net_name not in __LPIPS__:
17
- __LPIPS__[net_name] = init_lpips(net_name, device)
18
- gt = torch.from_numpy(np_gt).permute([2, 0, 1]).contiguous().to(device)
19
- im = torch.from_numpy(np_im).permute([2, 0, 1]).contiguous().to(device)
20
- return __LPIPS__[net_name](gt, im, normalize=True).item()
21
-
22
- def rgb_ssim(img0, img1, max_val,
23
- filter_size=11,
24
- filter_sigma=1.5,
25
- k1=0.01,
26
- k2=0.03,
27
- return_map=False):
28
- # Modified from https://github.com/google/mipnerf/blob/16e73dfdb52044dcceb47cda5243a686391a6e0f/internal/math.py#L58
29
- assert len(img0.shape) == 3
30
- assert img0.shape[-1] == 3
31
- assert img0.shape == img1.shape
32
-
33
- # Construct a 1D Gaussian blur filter.
34
- hw = filter_size // 2
35
- shift = (2 * hw - filter_size + 1) / 2
36
- f_i = ((np.arange(filter_size) - hw + shift) / filter_sigma)**2
37
- filt = np.exp(-0.5 * f_i)
38
- filt /= np.sum(filt)
39
-
40
- # Blur in x and y (faster than the 2D convolution).
41
- def convolve2d(z, f):
42
- return scipy.signal.convolve2d(z, f, mode='valid')
43
-
44
- filt_fn = lambda z: np.stack([
45
- convolve2d(convolve2d(z[...,i], filt[:, None]), filt[None, :])
46
- for i in range(z.shape[-1])], -1)
47
- mu0 = filt_fn(img0)
48
- mu1 = filt_fn(img1)
49
- mu00 = mu0 * mu0
50
- mu11 = mu1 * mu1
51
- mu01 = mu0 * mu1
52
- sigma00 = filt_fn(img0**2) - mu00
53
- sigma11 = filt_fn(img1**2) - mu11
54
- sigma01 = filt_fn(img0 * img1) - mu01
55
-
56
- # Clip the variances and covariances to valid values.
57
- # Variance must be non-negative:
58
- sigma00 = np.maximum(0., sigma00)
59
- sigma11 = np.maximum(0., sigma11)
60
- sigma01 = np.sign(sigma01) * np.minimum(
61
- np.sqrt(sigma00 * sigma11), np.abs(sigma01))
62
- c1 = (k1 * max_val)**2
63
- c2 = (k2 * max_val)**2
64
- numer = (2 * mu01 + c1) * (2 * sigma01 + c2)
65
- denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2)
66
- ssim_map = numer / denom
67
- ssim = np.mean(ssim_map)
68
- return ssim_map if return_map else ssim
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/README.md DELETED
@@ -1,72 +0,0 @@
1
- <!---
2
- Copyright 2023 The HuggingFace Team. All rights reserved.
3
- Licensed under the Apache License, Version 2.0 (the "License");
4
- you may not use this file except in compliance with the License.
5
- You may obtain a copy of the License at
6
-
7
- http://www.apache.org/licenses/LICENSE-2.0
8
-
9
- Unless required by applicable law or agreed to in writing, software
10
- distributed under the License is distributed on an "AS IS" BASIS,
11
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- See the License for the specific language governing permissions and
13
- limitations under the License.
14
- -->
15
-
16
- # 🧨 Diffusers Examples
17
-
18
- Diffusers examples are a collection of scripts to demonstrate how to effectively use the `diffusers` library
19
- for a variety of use cases involving training or fine-tuning.
20
-
21
- **Note**: If you are looking for **official** examples on how to use `diffusers` for inference,
22
- please have a look at [src/diffusers/pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines)
23
-
24
- Our examples aspire to be **self-contained**, **easy-to-tweak**, **beginner-friendly** and for **one-purpose-only**.
25
- More specifically, this means:
26
-
27
- - **Self-contained**: An example script shall only depend on "pip-install-able" Python packages that can be found in a `requirements.txt` file. Example scripts shall **not** depend on any local files. This means that one can simply download an example script, *e.g.* [train_unconditional.py](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py), install the required dependencies, *e.g.* [requirements.txt](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/requirements.txt) and execute the example script.
28
- - **Easy-to-tweak**: While we strive to present as many use cases as possible, the example scripts are just that - examples. It is expected that they won't work out-of-the box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs. To help you with that, most of the examples fully expose the preprocessing of the data and the training loop to allow you to tweak and edit them as required.
29
- - **Beginner-friendly**: We do not aim for providing state-of-the-art training scripts for the newest models, but rather examples that can be used as a way to better understand diffusion models and how to use them with the `diffusers` library. We often purposefully leave out certain state-of-the-art methods if we consider them too complex for beginners.
30
- - **One-purpose-only**: Examples should show one task and one task only. Even if a task is from a modeling
31
- point of view very similar, *e.g.* image super-resolution and image modification tend to use the same model and training method, we want examples to showcase only one task to keep them as readable and easy-to-understand as possible.
32
-
33
- We provide **official** examples that cover the most popular tasks of diffusion models.
34
- *Official* examples are **actively** maintained by the `diffusers` maintainers and we try to rigorously follow our example philosophy as defined above.
35
- If you feel like another important example should exist, we are more than happy to welcome a [Feature Request](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=) or directly a [Pull Request](https://github.com/huggingface/diffusers/compare) from you!
36
-
37
- Training examples show how to pretrain or fine-tune diffusion models for a variety of tasks. Currently we support:
38
-
39
- | Task | 🤗 Accelerate | 🤗 Datasets | Colab
40
- |---|---|:---:|:---:|
41
- | [**Unconditional Image Generation**](./unconditional_image_generation) | ✅ | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb)
42
- | [**Text-to-Image fine-tuning**](./text_to_image) | ✅ | ✅ |
43
- | [**Textual Inversion**](./textual_inversion) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb)
44
- | [**Dreambooth**](./dreambooth) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb)
45
- | [**ControlNet**](./controlnet) | ✅ | ✅ | -
46
- | [**InstructPix2Pix**](./instruct_pix2pix) | ✅ | ✅ | -
47
- | [**Reinforcement Learning for Control**](https://github.com/huggingface/diffusers/blob/main/examples/reinforcement_learning/run_diffusers_locomotion.py) | - | - | coming soon.
48
-
49
- ## Community
50
-
51
- In addition, we provide **community** examples, which are examples added and maintained by our community.
52
- Community examples can consist of both *training* examples or *inference* pipelines.
53
- For such examples, we are more lenient regarding the philosophy defined above and also cannot guarantee to provide maintenance for every issue.
54
- Examples that are useful for the community, but are either not yet deemed popular or not yet following our above philosophy should go into the [community examples](https://github.com/huggingface/diffusers/tree/main/examples/community) folder. The community folder therefore includes training examples and inference pipelines.
55
- **Note**: Community examples can be a [great first contribution](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) to show to the community how you like to use `diffusers` 🪄.
56
-
57
- ## Research Projects
58
-
59
- We also provide **research_projects** examples that are maintained by the community as defined in the respective research project folders. These examples are useful and offer the extended capabilities which are complementary to the official examples. You may refer to [research_projects](https://github.com/huggingface/diffusers/tree/main/examples/research_projects) for details.
60
-
61
- ## Important note
62
-
63
- To make sure you can successfully run the latest versions of the example scripts, you have to **install the library from source** and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
64
- ```bash
65
- git clone https://github.com/huggingface/diffusers
66
- cd diffusers
67
- pip install .
68
- ```
69
- Then cd in the example folder of your choice and run
70
- ```bash
71
- pip install -r requirements.txt
72
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_unidiffuser_to_diffusers.py DELETED
@@ -1,776 +0,0 @@
1
- # Convert the original UniDiffuser checkpoints into diffusers equivalents.
2
-
3
- import argparse
4
- from argparse import Namespace
5
-
6
- import torch
7
- from transformers import (
8
- CLIPImageProcessor,
9
- CLIPTextConfig,
10
- CLIPTextModel,
11
- CLIPTokenizer,
12
- CLIPVisionConfig,
13
- CLIPVisionModelWithProjection,
14
- GPT2Tokenizer,
15
- )
16
-
17
- from diffusers import (
18
- AutoencoderKL,
19
- DPMSolverMultistepScheduler,
20
- UniDiffuserModel,
21
- UniDiffuserPipeline,
22
- UniDiffuserTextDecoder,
23
- )
24
-
25
-
26
- SCHEDULER_CONFIG = Namespace(
27
- **{
28
- "beta_start": 0.00085,
29
- "beta_end": 0.012,
30
- "beta_schedule": "scaled_linear",
31
- "solver_order": 3,
32
- }
33
- )
34
-
35
-
36
- # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.shave_segments
37
- def shave_segments(path, n_shave_prefix_segments=1):
38
- """
39
- Removes segments. Positive values shave the first segments, negative shave the last segments.
40
- """
41
- if n_shave_prefix_segments >= 0:
42
- return ".".join(path.split(".")[n_shave_prefix_segments:])
43
- else:
44
- return ".".join(path.split(".")[:n_shave_prefix_segments])
45
-
46
-
47
- # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.renew_vae_resnet_paths
48
- def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0):
49
- """
50
- Updates paths inside resnets to the new naming scheme (local renaming)
51
- """
52
- mapping = []
53
- for old_item in old_list:
54
- new_item = old_item
55
-
56
- new_item = new_item.replace("nin_shortcut", "conv_shortcut")
57
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
58
-
59
- mapping.append({"old": old_item, "new": new_item})
60
-
61
- return mapping
62
-
63
-
64
- # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.renew_vae_attention_paths
65
- def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0):
66
- """
67
- Updates paths inside attentions to the new naming scheme (local renaming)
68
- """
69
- mapping = []
70
- for old_item in old_list:
71
- new_item = old_item
72
-
73
- new_item = new_item.replace("norm.weight", "group_norm.weight")
74
- new_item = new_item.replace("norm.bias", "group_norm.bias")
75
-
76
- new_item = new_item.replace("q.weight", "query.weight")
77
- new_item = new_item.replace("q.bias", "query.bias")
78
-
79
- new_item = new_item.replace("k.weight", "key.weight")
80
- new_item = new_item.replace("k.bias", "key.bias")
81
-
82
- new_item = new_item.replace("v.weight", "value.weight")
83
- new_item = new_item.replace("v.bias", "value.bias")
84
-
85
- new_item = new_item.replace("proj_out.weight", "proj_attn.weight")
86
- new_item = new_item.replace("proj_out.bias", "proj_attn.bias")
87
-
88
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
89
-
90
- mapping.append({"old": old_item, "new": new_item})
91
-
92
- return mapping
93
-
94
-
95
- # Modified from diffusers.pipelines.stable_diffusion.convert_from_ckpt.assign_to_checkpoint
96
- # config.num_head_channels => num_head_channels
97
- def assign_to_checkpoint(
98
- paths,
99
- checkpoint,
100
- old_checkpoint,
101
- attention_paths_to_split=None,
102
- additional_replacements=None,
103
- num_head_channels=1,
104
- ):
105
- """
106
- This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits
107
- attention layers, and takes into account additional replacements that may arise. Assigns the weights to the new
108
- checkpoint.
109
- """
110
- assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys."
111
-
112
- # Splits the attention layers into three variables.
113
- if attention_paths_to_split is not None:
114
- for path, path_map in attention_paths_to_split.items():
115
- old_tensor = old_checkpoint[path]
116
- channels = old_tensor.shape[0] // 3
117
-
118
- target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
119
-
120
- num_heads = old_tensor.shape[0] // num_head_channels // 3
121
-
122
- old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:])
123
- query, key, value = old_tensor.split(channels // num_heads, dim=1)
124
-
125
- checkpoint[path_map["query"]] = query.reshape(target_shape)
126
- checkpoint[path_map["key"]] = key.reshape(target_shape)
127
- checkpoint[path_map["value"]] = value.reshape(target_shape)
128
-
129
- for path in paths:
130
- new_path = path["new"]
131
-
132
- # These have already been assigned
133
- if attention_paths_to_split is not None and new_path in attention_paths_to_split:
134
- continue
135
-
136
- # Global renaming happens here
137
- new_path = new_path.replace("middle_block.0", "mid_block.resnets.0")
138
- new_path = new_path.replace("middle_block.1", "mid_block.attentions.0")
139
- new_path = new_path.replace("middle_block.2", "mid_block.resnets.1")
140
-
141
- if additional_replacements is not None:
142
- for replacement in additional_replacements:
143
- new_path = new_path.replace(replacement["old"], replacement["new"])
144
-
145
- # proj_attn.weight has to be converted from conv 1D to linear
146
- if "proj_attn.weight" in new_path:
147
- checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0]
148
- else:
149
- checkpoint[new_path] = old_checkpoint[path["old"]]
150
-
151
-
152
- # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.conv_attn_to_linear
153
- def conv_attn_to_linear(checkpoint):
154
- keys = list(checkpoint.keys())
155
- attn_keys = ["query.weight", "key.weight", "value.weight"]
156
- for key in keys:
157
- if ".".join(key.split(".")[-2:]) in attn_keys:
158
- if checkpoint[key].ndim > 2:
159
- checkpoint[key] = checkpoint[key][:, :, 0, 0]
160
- elif "proj_attn.weight" in key:
161
- if checkpoint[key].ndim > 2:
162
- checkpoint[key] = checkpoint[key][:, :, 0]
163
-
164
-
165
- def create_vae_diffusers_config(config_type):
166
- # Hardcoded for now
167
- if args.config_type == "test":
168
- vae_config = create_vae_diffusers_config_test()
169
- elif args.config_type == "big":
170
- vae_config = create_vae_diffusers_config_big()
171
- else:
172
- raise NotImplementedError(
173
- f"Config type {config_type} is not implemented, currently only config types"
174
- " 'test' and 'big' are available."
175
- )
176
- return vae_config
177
-
178
-
179
- def create_unidiffuser_unet_config(config_type, version):
180
- # Hardcoded for now
181
- if args.config_type == "test":
182
- unet_config = create_unidiffuser_unet_config_test()
183
- elif args.config_type == "big":
184
- unet_config = create_unidiffuser_unet_config_big()
185
- else:
186
- raise NotImplementedError(
187
- f"Config type {config_type} is not implemented, currently only config types"
188
- " 'test' and 'big' are available."
189
- )
190
- # Unidiffuser-v1 uses data type embeddings
191
- if version == 1:
192
- unet_config["use_data_type_embedding"] = True
193
- return unet_config
194
-
195
-
196
- def create_text_decoder_config(config_type):
197
- # Hardcoded for now
198
- if args.config_type == "test":
199
- text_decoder_config = create_text_decoder_config_test()
200
- elif args.config_type == "big":
201
- text_decoder_config = create_text_decoder_config_big()
202
- else:
203
- raise NotImplementedError(
204
- f"Config type {config_type} is not implemented, currently only config types"
205
- " 'test' and 'big' are available."
206
- )
207
- return text_decoder_config
208
-
209
-
210
- # Hardcoded configs for test versions of the UniDiffuser models, corresponding to those in the fast default tests.
211
- def create_vae_diffusers_config_test():
212
- vae_config = {
213
- "sample_size": 32,
214
- "in_channels": 3,
215
- "out_channels": 3,
216
- "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
217
- "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
218
- "block_out_channels": [32, 64],
219
- "latent_channels": 4,
220
- "layers_per_block": 1,
221
- }
222
- return vae_config
223
-
224
-
225
- def create_unidiffuser_unet_config_test():
226
- unet_config = {
227
- "text_dim": 32,
228
- "clip_img_dim": 32,
229
- "num_text_tokens": 77,
230
- "num_attention_heads": 2,
231
- "attention_head_dim": 8,
232
- "in_channels": 4,
233
- "out_channels": 4,
234
- "num_layers": 2,
235
- "dropout": 0.0,
236
- "norm_num_groups": 32,
237
- "attention_bias": False,
238
- "sample_size": 16,
239
- "patch_size": 2,
240
- "activation_fn": "gelu",
241
- "num_embeds_ada_norm": 1000,
242
- "norm_type": "layer_norm",
243
- "block_type": "unidiffuser",
244
- "pre_layer_norm": False,
245
- "use_timestep_embedding": False,
246
- "norm_elementwise_affine": True,
247
- "use_patch_pos_embed": False,
248
- "ff_final_dropout": True,
249
- "use_data_type_embedding": False,
250
- }
251
- return unet_config
252
-
253
-
254
- def create_text_decoder_config_test():
255
- text_decoder_config = {
256
- "prefix_length": 77,
257
- "prefix_inner_dim": 32,
258
- "prefix_hidden_dim": 32,
259
- "vocab_size": 1025, # 1024 + 1 for new EOS token
260
- "n_positions": 1024,
261
- "n_embd": 32,
262
- "n_layer": 5,
263
- "n_head": 4,
264
- "n_inner": 37,
265
- "activation_function": "gelu",
266
- "resid_pdrop": 0.1,
267
- "embd_pdrop": 0.1,
268
- "attn_pdrop": 0.1,
269
- "layer_norm_epsilon": 1e-5,
270
- "initializer_range": 0.02,
271
- }
272
- return text_decoder_config
273
-
274
-
275
- # Hardcoded configs for the UniDiffuser V1 model at https://huggingface.co/thu-ml/unidiffuser-v1
276
- # See also https://github.com/thu-ml/unidiffuser/blob/main/configs/sample_unidiffuser_v1.py
277
- def create_vae_diffusers_config_big():
278
- vae_config = {
279
- "sample_size": 256,
280
- "in_channels": 3,
281
- "out_channels": 3,
282
- "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"],
283
- "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
284
- "block_out_channels": [128, 256, 512, 512],
285
- "latent_channels": 4,
286
- "layers_per_block": 2,
287
- }
288
- return vae_config
289
-
290
-
291
- def create_unidiffuser_unet_config_big():
292
- unet_config = {
293
- "text_dim": 64,
294
- "clip_img_dim": 512,
295
- "num_text_tokens": 77,
296
- "num_attention_heads": 24,
297
- "attention_head_dim": 64,
298
- "in_channels": 4,
299
- "out_channels": 4,
300
- "num_layers": 30,
301
- "dropout": 0.0,
302
- "norm_num_groups": 32,
303
- "attention_bias": False,
304
- "sample_size": 64,
305
- "patch_size": 2,
306
- "activation_fn": "gelu",
307
- "num_embeds_ada_norm": 1000,
308
- "norm_type": "layer_norm",
309
- "block_type": "unidiffuser",
310
- "pre_layer_norm": False,
311
- "use_timestep_embedding": False,
312
- "norm_elementwise_affine": True,
313
- "use_patch_pos_embed": False,
314
- "ff_final_dropout": True,
315
- "use_data_type_embedding": False,
316
- }
317
- return unet_config
318
-
319
-
320
- # From https://huggingface.co/gpt2/blob/main/config.json, the GPT2 checkpoint used by UniDiffuser
321
- def create_text_decoder_config_big():
322
- text_decoder_config = {
323
- "prefix_length": 77,
324
- "prefix_inner_dim": 768,
325
- "prefix_hidden_dim": 64,
326
- "vocab_size": 50258, # 50257 + 1 for new EOS token
327
- "n_positions": 1024,
328
- "n_embd": 768,
329
- "n_layer": 12,
330
- "n_head": 12,
331
- "n_inner": 3072,
332
- "activation_function": "gelu",
333
- "resid_pdrop": 0.1,
334
- "embd_pdrop": 0.1,
335
- "attn_pdrop": 0.1,
336
- "layer_norm_epsilon": 1e-5,
337
- "initializer_range": 0.02,
338
- }
339
- return text_decoder_config
340
-
341
-
342
- # Based on diffusers.pipelines.stable_diffusion.convert_from_ckpt.shave_segments.convert_ldm_vae_checkpoint
343
- def convert_vae_to_diffusers(ckpt, diffusers_model, num_head_channels=1):
344
- """
345
- Converts a UniDiffuser autoencoder_kl.pth checkpoint to a diffusers AutoencoderKL.
346
- """
347
- # autoencoder_kl.pth ckpt is a torch state dict
348
- vae_state_dict = torch.load(ckpt, map_location="cpu")
349
-
350
- new_checkpoint = {}
351
-
352
- new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"]
353
- new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"]
354
- new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"]
355
- new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"]
356
- new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"]
357
- new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"]
358
-
359
- new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"]
360
- new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"]
361
- new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"]
362
- new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"]
363
- new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"]
364
- new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"]
365
-
366
- new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"]
367
- new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"]
368
- new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"]
369
- new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"]
370
-
371
- # Retrieves the keys for the encoder down blocks only
372
- num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer})
373
- down_blocks = {
374
- layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks)
375
- }
376
-
377
- # Retrieves the keys for the decoder up blocks only
378
- num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer})
379
- up_blocks = {
380
- layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks)
381
- }
382
-
383
- for i in range(num_down_blocks):
384
- resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
385
-
386
- if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
387
- new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop(
388
- f"encoder.down.{i}.downsample.conv.weight"
389
- )
390
- new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop(
391
- f"encoder.down.{i}.downsample.conv.bias"
392
- )
393
-
394
- paths = renew_vae_resnet_paths(resnets)
395
- meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}
396
- assign_to_checkpoint(
397
- paths,
398
- new_checkpoint,
399
- vae_state_dict,
400
- additional_replacements=[meta_path],
401
- num_head_channels=num_head_channels, # not used in vae
402
- )
403
-
404
- mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key]
405
- num_mid_res_blocks = 2
406
- for i in range(1, num_mid_res_blocks + 1):
407
- resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
408
-
409
- paths = renew_vae_resnet_paths(resnets)
410
- meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
411
- assign_to_checkpoint(
412
- paths,
413
- new_checkpoint,
414
- vae_state_dict,
415
- additional_replacements=[meta_path],
416
- num_head_channels=num_head_channels, # not used in vae
417
- )
418
-
419
- mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key]
420
- paths = renew_vae_attention_paths(mid_attentions)
421
- meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
422
- assign_to_checkpoint(
423
- paths,
424
- new_checkpoint,
425
- vae_state_dict,
426
- additional_replacements=[meta_path],
427
- num_head_channels=num_head_channels, # not used in vae
428
- )
429
- conv_attn_to_linear(new_checkpoint)
430
-
431
- for i in range(num_up_blocks):
432
- block_id = num_up_blocks - 1 - i
433
- resnets = [
434
- key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
435
- ]
436
-
437
- if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
438
- new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[
439
- f"decoder.up.{block_id}.upsample.conv.weight"
440
- ]
441
- new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[
442
- f"decoder.up.{block_id}.upsample.conv.bias"
443
- ]
444
-
445
- paths = renew_vae_resnet_paths(resnets)
446
- meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}
447
- assign_to_checkpoint(
448
- paths,
449
- new_checkpoint,
450
- vae_state_dict,
451
- additional_replacements=[meta_path],
452
- num_head_channels=num_head_channels, # not used in vae
453
- )
454
-
455
- mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key]
456
- num_mid_res_blocks = 2
457
- for i in range(1, num_mid_res_blocks + 1):
458
- resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
459
-
460
- paths = renew_vae_resnet_paths(resnets)
461
- meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
462
- assign_to_checkpoint(
463
- paths,
464
- new_checkpoint,
465
- vae_state_dict,
466
- additional_replacements=[meta_path],
467
- num_head_channels=num_head_channels, # not used in vae
468
- )
469
-
470
- mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key]
471
- paths = renew_vae_attention_paths(mid_attentions)
472
- meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
473
- assign_to_checkpoint(
474
- paths,
475
- new_checkpoint,
476
- vae_state_dict,
477
- additional_replacements=[meta_path],
478
- num_head_channels=num_head_channels, # not used in vae
479
- )
480
- conv_attn_to_linear(new_checkpoint)
481
-
482
- missing_keys, unexpected_keys = diffusers_model.load_state_dict(new_checkpoint)
483
- for missing_key in missing_keys:
484
- print(f"Missing key: {missing_key}")
485
- for unexpected_key in unexpected_keys:
486
- print(f"Unexpected key: {unexpected_key}")
487
-
488
- return diffusers_model
489
-
490
-
491
- def convert_uvit_block_to_diffusers_block(
492
- uvit_state_dict,
493
- new_state_dict,
494
- block_prefix,
495
- new_prefix="transformer.transformer_",
496
- skip_connection=False,
497
- ):
498
- """
499
- Maps the keys in a UniDiffuser transformer block (`Block`) to the keys in a diffusers transformer block
500
- (`UTransformerBlock`/`UniDiffuserBlock`).
501
- """
502
- prefix = new_prefix + block_prefix
503
- if skip_connection:
504
- new_state_dict[prefix + ".skip.skip_linear.weight"] = uvit_state_dict[block_prefix + ".skip_linear.weight"]
505
- new_state_dict[prefix + ".skip.skip_linear.bias"] = uvit_state_dict[block_prefix + ".skip_linear.bias"]
506
- new_state_dict[prefix + ".skip.norm.weight"] = uvit_state_dict[block_prefix + ".norm1.weight"]
507
- new_state_dict[prefix + ".skip.norm.bias"] = uvit_state_dict[block_prefix + ".norm1.bias"]
508
-
509
- # Create the prefix string for out_blocks.
510
- prefix += ".block"
511
-
512
- # Split up attention qkv.weight into to_q.weight, to_k.weight, to_v.weight
513
- qkv = uvit_state_dict[block_prefix + ".attn.qkv.weight"]
514
- new_attn_keys = [".attn1.to_q.weight", ".attn1.to_k.weight", ".attn1.to_v.weight"]
515
- new_attn_keys = [prefix + key for key in new_attn_keys]
516
- shape = qkv.shape[0] // len(new_attn_keys)
517
- for i, attn_key in enumerate(new_attn_keys):
518
- new_state_dict[attn_key] = qkv[i * shape : (i + 1) * shape]
519
-
520
- new_state_dict[prefix + ".attn1.to_out.0.weight"] = uvit_state_dict[block_prefix + ".attn.proj.weight"]
521
- new_state_dict[prefix + ".attn1.to_out.0.bias"] = uvit_state_dict[block_prefix + ".attn.proj.bias"]
522
- new_state_dict[prefix + ".norm1.weight"] = uvit_state_dict[block_prefix + ".norm2.weight"]
523
- new_state_dict[prefix + ".norm1.bias"] = uvit_state_dict[block_prefix + ".norm2.bias"]
524
- new_state_dict[prefix + ".ff.net.0.proj.weight"] = uvit_state_dict[block_prefix + ".mlp.fc1.weight"]
525
- new_state_dict[prefix + ".ff.net.0.proj.bias"] = uvit_state_dict[block_prefix + ".mlp.fc1.bias"]
526
- new_state_dict[prefix + ".ff.net.2.weight"] = uvit_state_dict[block_prefix + ".mlp.fc2.weight"]
527
- new_state_dict[prefix + ".ff.net.2.bias"] = uvit_state_dict[block_prefix + ".mlp.fc2.bias"]
528
- new_state_dict[prefix + ".norm3.weight"] = uvit_state_dict[block_prefix + ".norm3.weight"]
529
- new_state_dict[prefix + ".norm3.bias"] = uvit_state_dict[block_prefix + ".norm3.bias"]
530
-
531
- return uvit_state_dict, new_state_dict
532
-
533
-
534
- def convert_uvit_to_diffusers(ckpt, diffusers_model):
535
- """
536
- Converts a UniDiffuser uvit_v*.pth checkpoint to a diffusers UniDiffusersModel.
537
- """
538
- # uvit_v*.pth ckpt is a torch state dict
539
- uvit_state_dict = torch.load(ckpt, map_location="cpu")
540
-
541
- new_state_dict = {}
542
-
543
- # Input layers
544
- new_state_dict["vae_img_in.proj.weight"] = uvit_state_dict["patch_embed.proj.weight"]
545
- new_state_dict["vae_img_in.proj.bias"] = uvit_state_dict["patch_embed.proj.bias"]
546
- new_state_dict["clip_img_in.weight"] = uvit_state_dict["clip_img_embed.weight"]
547
- new_state_dict["clip_img_in.bias"] = uvit_state_dict["clip_img_embed.bias"]
548
- new_state_dict["text_in.weight"] = uvit_state_dict["text_embed.weight"]
549
- new_state_dict["text_in.bias"] = uvit_state_dict["text_embed.bias"]
550
-
551
- new_state_dict["pos_embed"] = uvit_state_dict["pos_embed"]
552
-
553
- # Handle data type token embeddings for UniDiffuser-v1
554
- if "token_embedding.weight" in uvit_state_dict and diffusers_model.use_data_type_embedding:
555
- new_state_dict["data_type_pos_embed_token"] = uvit_state_dict["pos_embed_token"]
556
- new_state_dict["data_type_token_embedding.weight"] = uvit_state_dict["token_embedding.weight"]
557
-
558
- # Also initialize the PatchEmbedding in UTransformer2DModel with the PatchEmbedding from the checkpoint.
559
- # This isn't used in the current implementation, so might want to remove.
560
- new_state_dict["transformer.pos_embed.proj.weight"] = uvit_state_dict["patch_embed.proj.weight"]
561
- new_state_dict["transformer.pos_embed.proj.bias"] = uvit_state_dict["patch_embed.proj.bias"]
562
-
563
- # Output layers
564
- new_state_dict["transformer.norm_out.weight"] = uvit_state_dict["norm.weight"]
565
- new_state_dict["transformer.norm_out.bias"] = uvit_state_dict["norm.bias"]
566
-
567
- new_state_dict["vae_img_out.weight"] = uvit_state_dict["decoder_pred.weight"]
568
- new_state_dict["vae_img_out.bias"] = uvit_state_dict["decoder_pred.bias"]
569
- new_state_dict["clip_img_out.weight"] = uvit_state_dict["clip_img_out.weight"]
570
- new_state_dict["clip_img_out.bias"] = uvit_state_dict["clip_img_out.bias"]
571
- new_state_dict["text_out.weight"] = uvit_state_dict["text_out.weight"]
572
- new_state_dict["text_out.bias"] = uvit_state_dict["text_out.bias"]
573
-
574
- # in_blocks
575
- in_blocks_prefixes = {".".join(layer.split(".")[:2]) for layer in uvit_state_dict if "in_blocks" in layer}
576
- for in_block_prefix in list(in_blocks_prefixes):
577
- convert_uvit_block_to_diffusers_block(uvit_state_dict, new_state_dict, in_block_prefix)
578
-
579
- # mid_block
580
- # Assume there's only one mid block
581
- convert_uvit_block_to_diffusers_block(uvit_state_dict, new_state_dict, "mid_block")
582
-
583
- # out_blocks
584
- out_blocks_prefixes = {".".join(layer.split(".")[:2]) for layer in uvit_state_dict if "out_blocks" in layer}
585
- for out_block_prefix in list(out_blocks_prefixes):
586
- convert_uvit_block_to_diffusers_block(uvit_state_dict, new_state_dict, out_block_prefix, skip_connection=True)
587
-
588
- missing_keys, unexpected_keys = diffusers_model.load_state_dict(new_state_dict)
589
- for missing_key in missing_keys:
590
- print(f"Missing key: {missing_key}")
591
- for unexpected_key in unexpected_keys:
592
- print(f"Unexpected key: {unexpected_key}")
593
-
594
- return diffusers_model
595
-
596
-
597
- def convert_caption_decoder_to_diffusers(ckpt, diffusers_model):
598
- """
599
- Converts a UniDiffuser caption_decoder.pth checkpoint to a diffusers UniDiffuserTextDecoder.
600
- """
601
- # caption_decoder.pth ckpt is a torch state dict
602
- checkpoint_state_dict = torch.load(ckpt, map_location="cpu")
603
- decoder_state_dict = {}
604
- # Remove the "module." prefix, if necessary
605
- caption_decoder_key = "module."
606
- for key in checkpoint_state_dict:
607
- if key.startswith(caption_decoder_key):
608
- decoder_state_dict[key.replace(caption_decoder_key, "")] = checkpoint_state_dict.get(key)
609
- else:
610
- decoder_state_dict[key] = checkpoint_state_dict.get(key)
611
-
612
- new_state_dict = {}
613
-
614
- # Encoder and Decoder
615
- new_state_dict["encode_prefix.weight"] = decoder_state_dict["encode_prefix.weight"]
616
- new_state_dict["encode_prefix.bias"] = decoder_state_dict["encode_prefix.bias"]
617
- new_state_dict["decode_prefix.weight"] = decoder_state_dict["decode_prefix.weight"]
618
- new_state_dict["decode_prefix.bias"] = decoder_state_dict["decode_prefix.bias"]
619
-
620
- # Internal GPT2LMHeadModel transformer model
621
- for key, val in decoder_state_dict.items():
622
- if key.startswith("gpt"):
623
- suffix = key[len("gpt") :]
624
- new_state_dict["transformer" + suffix] = val
625
-
626
- missing_keys, unexpected_keys = diffusers_model.load_state_dict(new_state_dict)
627
- for missing_key in missing_keys:
628
- print(f"Missing key: {missing_key}")
629
- for unexpected_key in unexpected_keys:
630
- print(f"Unexpected key: {unexpected_key}")
631
-
632
- return diffusers_model
633
-
634
-
635
- if __name__ == "__main__":
636
- parser = argparse.ArgumentParser()
637
-
638
- parser.add_argument(
639
- "--caption_decoder_checkpoint_path",
640
- default=None,
641
- type=str,
642
- required=False,
643
- help="Path to caption decoder checkpoint to convert.",
644
- )
645
- parser.add_argument(
646
- "--uvit_checkpoint_path", default=None, type=str, required=False, help="Path to U-ViT checkpoint to convert."
647
- )
648
- parser.add_argument(
649
- "--vae_checkpoint_path",
650
- default=None,
651
- type=str,
652
- required=False,
653
- help="Path to VAE checkpoint to convert.",
654
- )
655
- parser.add_argument(
656
- "--pipeline_output_path",
657
- default=None,
658
- type=str,
659
- required=True,
660
- help="Path to save the output pipeline to.",
661
- )
662
- parser.add_argument(
663
- "--config_type",
664
- default="test",
665
- type=str,
666
- help=(
667
- "Config type to use. Should be 'test' to create small models for testing or 'big' to convert a full"
668
- " checkpoint."
669
- ),
670
- )
671
- parser.add_argument(
672
- "--version",
673
- default=0,
674
- type=int,
675
- help="The UniDiffuser model type to convert to. Should be 0 for UniDiffuser-v0 and 1 for UniDiffuser-v1.",
676
- )
677
-
678
- args = parser.parse_args()
679
-
680
- # Convert the VAE model.
681
- if args.vae_checkpoint_path is not None:
682
- vae_config = create_vae_diffusers_config(args.config_type)
683
- vae = AutoencoderKL(**vae_config)
684
- vae = convert_vae_to_diffusers(args.vae_checkpoint_path, vae)
685
-
686
- # Convert the U-ViT ("unet") model.
687
- if args.uvit_checkpoint_path is not None:
688
- unet_config = create_unidiffuser_unet_config(args.config_type, args.version)
689
- unet = UniDiffuserModel(**unet_config)
690
- unet = convert_uvit_to_diffusers(args.uvit_checkpoint_path, unet)
691
-
692
- # Convert the caption decoder ("text_decoder") model.
693
- if args.caption_decoder_checkpoint_path is not None:
694
- text_decoder_config = create_text_decoder_config(args.config_type)
695
- text_decoder = UniDiffuserTextDecoder(**text_decoder_config)
696
- text_decoder = convert_caption_decoder_to_diffusers(args.caption_decoder_checkpoint_path, text_decoder)
697
-
698
- # Scheduler is the same for both the test and big models.
699
- scheduler_config = SCHEDULER_CONFIG
700
- scheduler = DPMSolverMultistepScheduler(
701
- beta_start=scheduler_config.beta_start,
702
- beta_end=scheduler_config.beta_end,
703
- beta_schedule=scheduler_config.beta_schedule,
704
- solver_order=scheduler_config.solver_order,
705
- )
706
-
707
- if args.config_type == "test":
708
- # Make a small random CLIPTextModel
709
- torch.manual_seed(0)
710
- clip_text_encoder_config = CLIPTextConfig(
711
- bos_token_id=0,
712
- eos_token_id=2,
713
- hidden_size=32,
714
- intermediate_size=37,
715
- layer_norm_eps=1e-05,
716
- num_attention_heads=4,
717
- num_hidden_layers=5,
718
- pad_token_id=1,
719
- vocab_size=1000,
720
- )
721
- text_encoder = CLIPTextModel(clip_text_encoder_config)
722
- clip_tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
723
-
724
- # Make a small random CLIPVisionModel and accompanying CLIPImageProcessor
725
- torch.manual_seed(0)
726
- clip_image_encoder_config = CLIPVisionConfig(
727
- image_size=32,
728
- patch_size=2,
729
- num_channels=3,
730
- hidden_size=32,
731
- projection_dim=32,
732
- num_hidden_layers=5,
733
- num_attention_heads=4,
734
- intermediate_size=37,
735
- dropout=0.1,
736
- attention_dropout=0.1,
737
- initializer_range=0.02,
738
- )
739
- image_encoder = CLIPVisionModelWithProjection(clip_image_encoder_config)
740
- image_processor = CLIPImageProcessor(crop_size=32, size=32)
741
-
742
- # Note that the text_decoder should already have its token embeddings resized.
743
- text_tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model")
744
- eos = "<|EOS|>"
745
- special_tokens_dict = {"eos_token": eos}
746
- text_tokenizer.add_special_tokens(special_tokens_dict)
747
- elif args.config_type == "big":
748
- text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
749
- clip_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
750
-
751
- image_encoder = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
752
- image_processor = CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32")
753
-
754
- # Note that the text_decoder should already have its token embeddings resized.
755
- text_tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
756
- eos = "<|EOS|>"
757
- special_tokens_dict = {"eos_token": eos}
758
- text_tokenizer.add_special_tokens(special_tokens_dict)
759
- else:
760
- raise NotImplementedError(
761
- f"Config type {args.config_type} is not implemented, currently only config types"
762
- " 'test' and 'big' are available."
763
- )
764
-
765
- pipeline = UniDiffuserPipeline(
766
- vae=vae,
767
- text_encoder=text_encoder,
768
- image_encoder=image_encoder,
769
- image_processor=image_processor,
770
- clip_tokenizer=clip_tokenizer,
771
- text_decoder=text_decoder,
772
- text_tokenizer=text_tokenizer,
773
- unet=unet,
774
- scheduler=scheduler,
775
- )
776
- pipeline.save_pretrained(args.pipeline_output_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/ddpm/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .pipeline_ddpm import DDPMPipeline
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/deepfloyd_if/test_if_superresolution.py DELETED
@@ -1,83 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import random
17
- import unittest
18
-
19
- import torch
20
-
21
- from diffusers import IFSuperResolutionPipeline
22
- from diffusers.utils import floats_tensor
23
- from diffusers.utils.import_utils import is_xformers_available
24
- from diffusers.utils.testing_utils import skip_mps, torch_device
25
-
26
- from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
27
- from ..test_pipelines_common import PipelineTesterMixin
28
- from . import IFPipelineTesterMixin
29
-
30
-
31
- @skip_mps
32
- class IFSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase):
33
- pipeline_class = IFSuperResolutionPipeline
34
- params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
35
- batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
36
- required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
37
-
38
- def get_dummy_components(self):
39
- return self._get_superresolution_dummy_components()
40
-
41
- def get_dummy_inputs(self, device, seed=0):
42
- if str(device).startswith("mps"):
43
- generator = torch.manual_seed(seed)
44
- else:
45
- generator = torch.Generator(device=device).manual_seed(seed)
46
-
47
- image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
48
-
49
- inputs = {
50
- "prompt": "A painting of a squirrel eating a burger",
51
- "image": image,
52
- "generator": generator,
53
- "num_inference_steps": 2,
54
- "output_type": "numpy",
55
- }
56
-
57
- return inputs
58
-
59
- @unittest.skipIf(
60
- torch_device != "cuda" or not is_xformers_available(),
61
- reason="XFormers attention is only available with CUDA and `xformers` installed",
62
- )
63
- def test_xformers_attention_forwardGenerator_pass(self):
64
- self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
65
-
66
- def test_save_load_optional_components(self):
67
- self._test_save_load_optional_components()
68
-
69
- @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
70
- def test_save_load_float16(self):
71
- # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
72
- super().test_save_load_float16(expected_max_diff=1e-1)
73
-
74
- def test_attention_slicing_forward_pass(self):
75
- self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
76
-
77
- def test_save_load_local(self):
78
- self._test_save_load_local()
79
-
80
- def test_inference_batch_single_identical(self):
81
- self._test_inference_batch_single_identical(
82
- expected_max_diff=1e-2,
83
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py DELETED
@@ -1,13 +0,0 @@
1
- _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- backbone=dict(plugins=[
4
- dict(
5
- cfg=dict(
6
- type='GeneralizedAttention',
7
- spatial_range=-1,
8
- num_heads=8,
9
- attention_type='0010',
10
- kv_stride=2),
11
- stages=(False, False, True, True),
12
- position='after_conv2')
13
- ]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py DELETED
@@ -1,71 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/retinanet_r50_fpn.py',
3
- '../_base_/datasets/coco_detection.py',
4
- '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
5
- ]
6
- # model settings
7
- norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
8
- model = dict(
9
- pretrained='torchvision://resnet101',
10
- backbone=dict(depth=101),
11
- bbox_head=dict(
12
- _delete_=True,
13
- type='SABLRetinaHead',
14
- num_classes=80,
15
- in_channels=256,
16
- stacked_convs=4,
17
- feat_channels=256,
18
- approx_anchor_generator=dict(
19
- type='AnchorGenerator',
20
- octave_base_scale=4,
21
- scales_per_octave=3,
22
- ratios=[0.5, 1.0, 2.0],
23
- strides=[8, 16, 32, 64, 128]),
24
- square_anchor_generator=dict(
25
- type='AnchorGenerator',
26
- ratios=[1.0],
27
- scales=[4],
28
- strides=[8, 16, 32, 64, 128]),
29
- norm_cfg=norm_cfg,
30
- bbox_coder=dict(
31
- type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
32
- loss_cls=dict(
33
- type='FocalLoss',
34
- use_sigmoid=True,
35
- gamma=2.0,
36
- alpha=0.25,
37
- loss_weight=1.0),
38
- loss_bbox_cls=dict(
39
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
40
- loss_bbox_reg=dict(
41
- type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
42
- # training and testing settings
43
- train_cfg=dict(
44
- assigner=dict(
45
- type='ApproxMaxIoUAssigner',
46
- pos_iou_thr=0.5,
47
- neg_iou_thr=0.4,
48
- min_pos_iou=0.0,
49
- ignore_iof_thr=-1),
50
- allowed_border=-1,
51
- pos_weight=-1,
52
- debug=False))
53
- img_norm_cfg = dict(
54
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
55
- train_pipeline = [
56
- dict(type='LoadImageFromFile'),
57
- dict(type='LoadAnnotations', with_bbox=True),
58
- dict(
59
- type='Resize',
60
- img_scale=[(1333, 640), (1333, 800)],
61
- multiscale_mode='range',
62
- keep_ratio=True),
63
- dict(type='RandomFlip', flip_ratio=0.5),
64
- dict(type='Normalize', **img_norm_cfg),
65
- dict(type='Pad', size_divisor=32),
66
- dict(type='DefaultFormatBundle'),
67
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
68
- ]
69
- data = dict(train=dict(pipeline=train_pipeline))
70
- # optimizer
71
- optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/bbox_heads/dii_head.py DELETED
@@ -1,415 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from mmcv.cnn import (bias_init_with_prob, build_activation_layer,
4
- build_norm_layer)
5
- from mmcv.runner import auto_fp16, force_fp32
6
-
7
- from mmdet.core import multi_apply
8
- from mmdet.models.builder import HEADS, build_loss
9
- from mmdet.models.dense_heads.atss_head import reduce_mean
10
- from mmdet.models.losses import accuracy
11
- from mmdet.models.utils import FFN, MultiheadAttention, build_transformer
12
- from .bbox_head import BBoxHead
13
-
14
-
15
- @HEADS.register_module()
16
- class DIIHead(BBoxHead):
17
- r"""Dynamic Instance Interactive Head for `Sparse R-CNN: End-to-End Object
18
- Detection with Learnable Proposals <https://arxiv.org/abs/2011.12450>`_
19
-
20
- Args:
21
- num_classes (int): Number of class in dataset.
22
- Defaults to 80.
23
- num_ffn_fcs (int): The number of fully-connected
24
- layers in FFNs. Defaults to 2.
25
- num_heads (int): The hidden dimension of FFNs.
26
- Defaults to 8.
27
- num_cls_fcs (int): The number of fully-connected
28
- layers in classification subnet. Defaults to 1.
29
- num_reg_fcs (int): The number of fully-connected
30
- layers in regression subnet. Defaults to 3.
31
- feedforward_channels (int): The hidden dimension
32
- of FFNs. Defaults to 2048
33
- in_channels (int): Hidden_channels of MultiheadAttention.
34
- Defaults to 256.
35
- dropout (float): Probability of drop the channel.
36
- Defaults to 0.0
37
- ffn_act_cfg (dict): The activation config for FFNs.
38
- dynamic_conv_cfg (dict): The convolution config
39
- for DynamicConv.
40
- loss_iou (dict): The config for iou or giou loss.
41
-
42
- """
43
-
44
- def __init__(self,
45
- num_classes=80,
46
- num_ffn_fcs=2,
47
- num_heads=8,
48
- num_cls_fcs=1,
49
- num_reg_fcs=3,
50
- feedforward_channels=2048,
51
- in_channels=256,
52
- dropout=0.0,
53
- ffn_act_cfg=dict(type='ReLU', inplace=True),
54
- dynamic_conv_cfg=dict(
55
- type='DynamicConv',
56
- in_channels=256,
57
- feat_channels=64,
58
- out_channels=256,
59
- input_feat_shape=7,
60
- act_cfg=dict(type='ReLU', inplace=True),
61
- norm_cfg=dict(type='LN')),
62
- loss_iou=dict(type='GIoULoss', loss_weight=2.0),
63
- **kwargs):
64
- super(DIIHead, self).__init__(
65
- num_classes=num_classes,
66
- reg_decoded_bbox=True,
67
- reg_class_agnostic=True,
68
- **kwargs)
69
- self.loss_iou = build_loss(loss_iou)
70
- self.in_channels = in_channels
71
- self.fp16_enabled = False
72
- self.attention = MultiheadAttention(in_channels, num_heads, dropout)
73
- self.attention_norm = build_norm_layer(dict(type='LN'), in_channels)[1]
74
-
75
- self.instance_interactive_conv = build_transformer(dynamic_conv_cfg)
76
- self.instance_interactive_conv_dropout = nn.Dropout(dropout)
77
- self.instance_interactive_conv_norm = build_norm_layer(
78
- dict(type='LN'), in_channels)[1]
79
-
80
- self.ffn = FFN(
81
- in_channels,
82
- feedforward_channels,
83
- num_ffn_fcs,
84
- act_cfg=ffn_act_cfg,
85
- dropout=dropout)
86
- self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1]
87
-
88
- self.cls_fcs = nn.ModuleList()
89
- for _ in range(num_cls_fcs):
90
- self.cls_fcs.append(
91
- nn.Linear(in_channels, in_channels, bias=False))
92
- self.cls_fcs.append(
93
- build_norm_layer(dict(type='LN'), in_channels)[1])
94
- self.cls_fcs.append(
95
- build_activation_layer(dict(type='ReLU', inplace=True)))
96
-
97
- # over load the self.fc_cls in BBoxHead
98
- if self.loss_cls.use_sigmoid:
99
- self.fc_cls = nn.Linear(in_channels, self.num_classes)
100
- else:
101
- self.fc_cls = nn.Linear(in_channels, self.num_classes + 1)
102
-
103
- self.reg_fcs = nn.ModuleList()
104
- for _ in range(num_reg_fcs):
105
- self.reg_fcs.append(
106
- nn.Linear(in_channels, in_channels, bias=False))
107
- self.reg_fcs.append(
108
- build_norm_layer(dict(type='LN'), in_channels)[1])
109
- self.reg_fcs.append(
110
- build_activation_layer(dict(type='ReLU', inplace=True)))
111
- # over load the self.fc_cls in BBoxHead
112
- self.fc_reg = nn.Linear(in_channels, 4)
113
-
114
- assert self.reg_class_agnostic, 'DIIHead only ' \
115
- 'suppport `reg_class_agnostic=True` '
116
- assert self.reg_decoded_bbox, 'DIIHead only ' \
117
- 'suppport `reg_decoded_bbox=True`'
118
-
119
- def init_weights(self):
120
- """Use xavier initialization for all weight parameter and set
121
- classification head bias as a specific value when use focal loss."""
122
- for p in self.parameters():
123
- if p.dim() > 1:
124
- nn.init.xavier_uniform_(p)
125
- else:
126
- # adopt the default initialization for
127
- # the weight and bias of the layer norm
128
- pass
129
- if self.loss_cls.use_sigmoid:
130
- bias_init = bias_init_with_prob(0.01)
131
- nn.init.constant_(self.fc_cls.bias, bias_init)
132
-
133
- @auto_fp16()
134
- def forward(self, roi_feat, proposal_feat):
135
- """Forward function of Dynamic Instance Interactive Head.
136
-
137
- Args:
138
- roi_feat (Tensor): Roi-pooling features with shape
139
- (batch_size*num_proposals, feature_dimensions,
140
- pooling_h , pooling_w).
141
- proposal_feat (Tensor): Intermediate feature get from
142
- diihead in last stage, has shape
143
- (batch_size, num_proposals, feature_dimensions)
144
-
145
- Returns:
146
- tuple[Tensor]: Usually a tuple of classification scores
147
- and bbox prediction and a intermediate feature.
148
-
149
- - cls_scores (Tensor): Classification scores for
150
- all proposals, has shape
151
- (batch_size, num_proposals, num_classes).
152
- - bbox_preds (Tensor): Box energies / deltas for
153
- all proposals, has shape
154
- (batch_size, num_proposals, 4).
155
- - obj_feat (Tensor): Object feature before classification
156
- and regression subnet, has shape
157
- (batch_size, num_proposal, feature_dimensions).
158
- """
159
- N, num_proposals = proposal_feat.shape[:2]
160
-
161
- # Self attention
162
- proposal_feat = proposal_feat.permute(1, 0, 2)
163
- proposal_feat = self.attention_norm(self.attention(proposal_feat))
164
-
165
- # instance interactive
166
- proposal_feat = proposal_feat.permute(1, 0,
167
- 2).reshape(-1, self.in_channels)
168
- proposal_feat_iic = self.instance_interactive_conv(
169
- proposal_feat, roi_feat)
170
- proposal_feat = proposal_feat + self.instance_interactive_conv_dropout(
171
- proposal_feat_iic)
172
- obj_feat = self.instance_interactive_conv_norm(proposal_feat)
173
-
174
- # FFN
175
- obj_feat = self.ffn_norm(self.ffn(obj_feat))
176
-
177
- cls_feat = obj_feat
178
- reg_feat = obj_feat
179
-
180
- for cls_layer in self.cls_fcs:
181
- cls_feat = cls_layer(cls_feat)
182
- for reg_layer in self.reg_fcs:
183
- reg_feat = reg_layer(reg_feat)
184
-
185
- cls_score = self.fc_cls(cls_feat).view(N, num_proposals, -1)
186
- bbox_delta = self.fc_reg(reg_feat).view(N, num_proposals, -1)
187
-
188
- return cls_score, bbox_delta, obj_feat.view(N, num_proposals, -1)
189
-
190
- @force_fp32(apply_to=('cls_score', 'bbox_pred'))
191
- def loss(self,
192
- cls_score,
193
- bbox_pred,
194
- labels,
195
- label_weights,
196
- bbox_targets,
197
- bbox_weights,
198
- imgs_whwh=None,
199
- reduction_override=None,
200
- **kwargs):
201
- """"Loss function of DIIHead, get loss of all images.
202
-
203
- Args:
204
- cls_score (Tensor): Classification prediction
205
- results of all class, has shape
206
- (batch_size * num_proposals_single_image, num_classes)
207
- bbox_pred (Tensor): Regression prediction results,
208
- has shape
209
- (batch_size * num_proposals_single_image, 4), the last
210
- dimension 4 represents [tl_x, tl_y, br_x, br_y].
211
- labels (Tensor): Label of each proposals, has shape
212
- (batch_size * num_proposals_single_image
213
- label_weights (Tensor): Classification loss
214
- weight of each proposals, has shape
215
- (batch_size * num_proposals_single_image
216
- bbox_targets (Tensor): Regression targets of each
217
- proposals, has shape
218
- (batch_size * num_proposals_single_image, 4),
219
- the last dimension 4 represents
220
- [tl_x, tl_y, br_x, br_y].
221
- bbox_weights (Tensor): Regression loss weight of each
222
- proposals's coordinate, has shape
223
- (batch_size * num_proposals_single_image, 4),
224
- imgs_whwh (Tensor): imgs_whwh (Tensor): Tensor with\
225
- shape (batch_size, num_proposals, 4), the last
226
- dimension means
227
- [img_width,img_height, img_width, img_height].
228
- reduction_override (str, optional): The reduction
229
- method used to override the original reduction
230
- method of the loss. Options are "none",
231
- "mean" and "sum". Defaults to None,
232
-
233
- Returns:
234
- dict[str, Tensor]: Dictionary of loss components
235
- """
236
- losses = dict()
237
- bg_class_ind = self.num_classes
238
- # note in spare rcnn num_gt == num_pos
239
- pos_inds = (labels >= 0) & (labels < bg_class_ind)
240
- num_pos = pos_inds.sum().float()
241
- avg_factor = reduce_mean(num_pos)
242
- if cls_score is not None:
243
- if cls_score.numel() > 0:
244
- losses['loss_cls'] = self.loss_cls(
245
- cls_score,
246
- labels,
247
- label_weights,
248
- avg_factor=avg_factor,
249
- reduction_override=reduction_override)
250
- losses['pos_acc'] = accuracy(cls_score[pos_inds],
251
- labels[pos_inds])
252
- if bbox_pred is not None:
253
- # 0~self.num_classes-1 are FG, self.num_classes is BG
254
- # do not perform bounding box regression for BG anymore.
255
- if pos_inds.any():
256
- pos_bbox_pred = bbox_pred.reshape(bbox_pred.size(0),
257
- 4)[pos_inds.type(torch.bool)]
258
- imgs_whwh = imgs_whwh.reshape(bbox_pred.size(0),
259
- 4)[pos_inds.type(torch.bool)]
260
- losses['loss_bbox'] = self.loss_bbox(
261
- pos_bbox_pred / imgs_whwh,
262
- bbox_targets[pos_inds.type(torch.bool)] / imgs_whwh,
263
- bbox_weights[pos_inds.type(torch.bool)],
264
- avg_factor=avg_factor)
265
- losses['loss_iou'] = self.loss_iou(
266
- pos_bbox_pred,
267
- bbox_targets[pos_inds.type(torch.bool)],
268
- bbox_weights[pos_inds.type(torch.bool)],
269
- avg_factor=avg_factor)
270
- else:
271
- losses['loss_bbox'] = bbox_pred.sum() * 0
272
- losses['loss_iou'] = bbox_pred.sum() * 0
273
- return losses
274
-
275
- def _get_target_single(self, pos_inds, neg_inds, pos_bboxes, neg_bboxes,
276
- pos_gt_bboxes, pos_gt_labels, cfg):
277
- """Calculate the ground truth for proposals in the single image
278
- according to the sampling results.
279
-
280
- Almost the same as the implementation in `bbox_head`,
281
- we add pos_inds and neg_inds to select positive and
282
- negative samples instead of selecting the first num_pos
283
- as positive samples.
284
-
285
- Args:
286
- pos_inds (Tensor): The length is equal to the
287
- positive sample numbers contain all index
288
- of the positive sample in the origin proposal set.
289
- neg_inds (Tensor): The length is equal to the
290
- negative sample numbers contain all index
291
- of the negative sample in the origin proposal set.
292
- pos_bboxes (Tensor): Contains all the positive boxes,
293
- has shape (num_pos, 4), the last dimension 4
294
- represents [tl_x, tl_y, br_x, br_y].
295
- neg_bboxes (Tensor): Contains all the negative boxes,
296
- has shape (num_neg, 4), the last dimension 4
297
- represents [tl_x, tl_y, br_x, br_y].
298
- pos_gt_bboxes (Tensor): Contains all the gt_boxes,
299
- has shape (num_gt, 4), the last dimension 4
300
- represents [tl_x, tl_y, br_x, br_y].
301
- pos_gt_labels (Tensor): Contains all the gt_labels,
302
- has shape (num_gt).
303
- cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.
304
-
305
- Returns:
306
- Tuple[Tensor]: Ground truth for proposals in a single image.
307
- Containing the following Tensors:
308
-
309
- - labels(Tensor): Gt_labels for all proposals, has
310
- shape (num_proposals,).
311
- - label_weights(Tensor): Labels_weights for all proposals, has
312
- shape (num_proposals,).
313
- - bbox_targets(Tensor):Regression target for all proposals, has
314
- shape (num_proposals, 4), the last dimension 4
315
- represents [tl_x, tl_y, br_x, br_y].
316
- - bbox_weights(Tensor):Regression weights for all proposals,
317
- has shape (num_proposals, 4).
318
- """
319
- num_pos = pos_bboxes.size(0)
320
- num_neg = neg_bboxes.size(0)
321
- num_samples = num_pos + num_neg
322
-
323
- # original implementation uses new_zeros since BG are set to be 0
324
- # now use empty & fill because BG cat_id = num_classes,
325
- # FG cat_id = [0, num_classes-1]
326
- labels = pos_bboxes.new_full((num_samples, ),
327
- self.num_classes,
328
- dtype=torch.long)
329
- label_weights = pos_bboxes.new_zeros(num_samples)
330
- bbox_targets = pos_bboxes.new_zeros(num_samples, 4)
331
- bbox_weights = pos_bboxes.new_zeros(num_samples, 4)
332
- if num_pos > 0:
333
- labels[pos_inds] = pos_gt_labels
334
- pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
335
- label_weights[pos_inds] = pos_weight
336
- if not self.reg_decoded_bbox:
337
- pos_bbox_targets = self.bbox_coder.encode(
338
- pos_bboxes, pos_gt_bboxes)
339
- else:
340
- pos_bbox_targets = pos_gt_bboxes
341
- bbox_targets[pos_inds, :] = pos_bbox_targets
342
- bbox_weights[pos_inds, :] = 1
343
- if num_neg > 0:
344
- label_weights[neg_inds] = 1.0
345
-
346
- return labels, label_weights, bbox_targets, bbox_weights
347
-
348
- def get_targets(self,
349
- sampling_results,
350
- gt_bboxes,
351
- gt_labels,
352
- rcnn_train_cfg,
353
- concat=True):
354
- """Calculate the ground truth for all samples in a batch according to
355
- the sampling_results.
356
-
357
- Almost the same as the implementation in bbox_head, we passed
358
- additional parameters pos_inds_list and neg_inds_list to
359
- `_get_target_single` function.
360
-
361
- Args:
362
- sampling_results (List[obj:SamplingResults]): Assign results of
363
- all images in a batch after sampling.
364
- gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,
365
- each tensor has shape (num_gt, 4), the last dimension 4
366
- represents [tl_x, tl_y, br_x, br_y].
367
- gt_labels (list[Tensor]): Gt_labels of all images in a batch,
368
- each tensor has shape (num_gt,).
369
- rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN.
370
- concat (bool): Whether to concatenate the results of all
371
- the images in a single batch.
372
-
373
- Returns:
374
- Tuple[Tensor]: Ground truth for proposals in a single image.
375
- Containing the following list of Tensors:
376
-
377
- - labels (list[Tensor],Tensor): Gt_labels for all
378
- proposals in a batch, each tensor in list has
379
- shape (num_proposals,) when `concat=False`, otherwise just
380
- a single tensor has shape (num_all_proposals,).
381
- - label_weights (list[Tensor]): Labels_weights for
382
- all proposals in a batch, each tensor in list has shape
383
- (num_proposals,) when `concat=False`, otherwise just a
384
- single tensor has shape (num_all_proposals,).
385
- - bbox_targets (list[Tensor],Tensor): Regression target
386
- for all proposals in a batch, each tensor in list has
387
- shape (num_proposals, 4) when `concat=False`, otherwise
388
- just a single tensor has shape (num_all_proposals, 4),
389
- the last dimension 4 represents [tl_x, tl_y, br_x, br_y].
390
- - bbox_weights (list[tensor],Tensor): Regression weights for
391
- all proposals in a batch, each tensor in list has shape
392
- (num_proposals, 4) when `concat=False`, otherwise just a
393
- single tensor has shape (num_all_proposals, 4).
394
- """
395
- pos_inds_list = [res.pos_inds for res in sampling_results]
396
- neg_inds_list = [res.neg_inds for res in sampling_results]
397
- pos_bboxes_list = [res.pos_bboxes for res in sampling_results]
398
- neg_bboxes_list = [res.neg_bboxes for res in sampling_results]
399
- pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
400
- pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
401
- labels, label_weights, bbox_targets, bbox_weights = multi_apply(
402
- self._get_target_single,
403
- pos_inds_list,
404
- neg_inds_list,
405
- pos_bboxes_list,
406
- neg_bboxes_list,
407
- pos_gt_bboxes_list,
408
- pos_gt_labels_list,
409
- cfg=rcnn_train_cfg)
410
- if concat:
411
- labels = torch.cat(labels, 0)
412
- label_weights = torch.cat(label_weights, 0)
413
- bbox_targets = torch.cat(bbox_targets, 0)
414
- bbox_weights = torch.cat(bbox_weights, 0)
415
- return labels, label_weights, bbox_targets, bbox_weights
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/evaluation/__init__.py DELETED
@@ -1,8 +0,0 @@
1
- from .class_names import get_classes, get_palette
2
- from .eval_hooks import DistEvalHook, EvalHook
3
- from .metrics import eval_metrics, mean_dice, mean_fscore, mean_iou
4
-
5
- __all__ = [
6
- 'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'mean_fscore',
7
- 'eval_metrics', 'get_classes', 'get_palette'
8
- ]
 
 
 
 
 
 
 
 
 
spaces/AnthonyTruchetPoC/persistent-docker/scripts/run-local-docker.sh DELETED
@@ -1,15 +0,0 @@
1
- #!/usr/bin/env bash
2
- source $(dirname $0)/common_header.sh
3
-
4
- # During development ONLY use a `bind mount` to enable
5
- # editing the code without having to rebuild the container.
6
- docker run --rm -it \
7
- -p 8501:8501 -p 7860:7860 \
8
- --env-file ${ROOT_DIRECTORY}/.env \
9
- --mount type=volume,src=$VOLUME_NAME,dst=/data \
10
- --mount type=bind,source=${ROOT_DIRECTORY}/src/,target=/app/,readonly \
11
- --mount type=bind,source=${ROOT_DIRECTORY}/.streamlit,target=/user/.streamlit,readonly \
12
- $CONTAINER_NAME:latest \
13
- $@ # Pass all command line argument quoted in a good way
14
-
15
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arun1217/mygenaiapp/app.py DELETED
@@ -1,34 +0,0 @@
1
- import os
2
- import gradio as gr
3
- from langchain.chat_models import ChatOpenAI
4
- from langchain import LLMChain, PromptTemplate
5
- from langchain.memory import ConversationBufferMemory
6
-
7
- OPENAI_API_KEY=os.getenv('OPENAI_API_KEY')
8
-
9
- template = """You are a helpful assistant to answer all user queries.
10
- {chat_history}
11
- User: {user_message}
12
- Chatbot:"""
13
-
14
- prompt = PromptTemplate(
15
- input_variables=["chat_history", "user_message"], template=template
16
- )
17
-
18
- memory = ConversationBufferMemory(memory_key="chat_history")
19
-
20
- llm_chain = LLMChain(
21
- llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
22
- prompt=prompt,
23
- verbose=True,
24
- memory=memory,
25
- )
26
-
27
- def get_text_response(user_message,history):
28
- response = llm_chain.predict(user_message = user_message)
29
- return response
30
-
31
- demo = gr.ChatInterface(get_text_response)
32
-
33
- if __name__ == "__main__":
34
- demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatter.py DELETED
@@ -1,94 +0,0 @@
1
- """
2
- pygments.formatter
3
- ~~~~~~~~~~~~~~~~~~
4
-
5
- Base formatter class.
6
-
7
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
8
- :license: BSD, see LICENSE for details.
9
- """
10
-
11
- import codecs
12
-
13
- from pip._vendor.pygments.util import get_bool_opt
14
- from pip._vendor.pygments.styles import get_style_by_name
15
-
16
- __all__ = ['Formatter']
17
-
18
-
19
- def _lookup_style(style):
20
- if isinstance(style, str):
21
- return get_style_by_name(style)
22
- return style
23
-
24
-
25
- class Formatter:
26
- """
27
- Converts a token stream to text.
28
-
29
- Options accepted:
30
-
31
- ``style``
32
- The style to use, can be a string or a Style subclass
33
- (default: "default"). Not used by e.g. the
34
- TerminalFormatter.
35
- ``full``
36
- Tells the formatter to output a "full" document, i.e.
37
- a complete self-contained document. This doesn't have
38
- any effect for some formatters (default: false).
39
- ``title``
40
- If ``full`` is true, the title that should be used to
41
- caption the document (default: '').
42
- ``encoding``
43
- If given, must be an encoding name. This will be used to
44
- convert the Unicode token strings to byte strings in the
45
- output. If it is "" or None, Unicode strings will be written
46
- to the output file, which most file-like objects do not
47
- support (default: None).
48
- ``outencoding``
49
- Overrides ``encoding`` if given.
50
- """
51
-
52
- #: Name of the formatter
53
- name = None
54
-
55
- #: Shortcuts for the formatter
56
- aliases = []
57
-
58
- #: fn match rules
59
- filenames = []
60
-
61
- #: If True, this formatter outputs Unicode strings when no encoding
62
- #: option is given.
63
- unicodeoutput = True
64
-
65
- def __init__(self, **options):
66
- self.style = _lookup_style(options.get('style', 'default'))
67
- self.full = get_bool_opt(options, 'full', False)
68
- self.title = options.get('title', '')
69
- self.encoding = options.get('encoding', None) or None
70
- if self.encoding in ('guess', 'chardet'):
71
- # can happen for e.g. pygmentize -O encoding=guess
72
- self.encoding = 'utf-8'
73
- self.encoding = options.get('outencoding') or self.encoding
74
- self.options = options
75
-
76
- def get_style_defs(self, arg=''):
77
- """
78
- Return the style definitions for the current style as a string.
79
-
80
- ``arg`` is an additional argument whose meaning depends on the
81
- formatter used. Note that ``arg`` can also be a list or tuple
82
- for some formatters like the html formatter.
83
- """
84
- return ''
85
-
86
- def format(self, tokensource, outfile):
87
- """
88
- Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
89
- tuples and write it into ``outfile``.
90
- """
91
- if self.encoding:
92
- # wrap the outfile in a StreamWriter
93
- outfile = codecs.lookup(self.encoding)[3](outfile)
94
- return self.format_unencoded(tokensource, outfile)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/retry.py DELETED
@@ -1,620 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- import email
4
- import logging
5
- import re
6
- import time
7
- import warnings
8
- from collections import namedtuple
9
- from itertools import takewhile
10
-
11
- from ..exceptions import (
12
- ConnectTimeoutError,
13
- InvalidHeader,
14
- MaxRetryError,
15
- ProtocolError,
16
- ProxyError,
17
- ReadTimeoutError,
18
- ResponseError,
19
- )
20
- from ..packages import six
21
-
22
- log = logging.getLogger(__name__)
23
-
24
-
25
- # Data structure for representing the metadata of requests that result in a retry.
26
- RequestHistory = namedtuple(
27
- "RequestHistory", ["method", "url", "error", "status", "redirect_location"]
28
- )
29
-
30
-
31
- # TODO: In v2 we can remove this sentinel and metaclass with deprecated options.
32
- _Default = object()
33
-
34
-
35
- class _RetryMeta(type):
36
- @property
37
- def DEFAULT_METHOD_WHITELIST(cls):
38
- warnings.warn(
39
- "Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
40
- "will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
41
- DeprecationWarning,
42
- )
43
- return cls.DEFAULT_ALLOWED_METHODS
44
-
45
- @DEFAULT_METHOD_WHITELIST.setter
46
- def DEFAULT_METHOD_WHITELIST(cls, value):
47
- warnings.warn(
48
- "Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
49
- "will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
50
- DeprecationWarning,
51
- )
52
- cls.DEFAULT_ALLOWED_METHODS = value
53
-
54
- @property
55
- def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls):
56
- warnings.warn(
57
- "Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
58
- "will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
59
- DeprecationWarning,
60
- )
61
- return cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
62
-
63
- @DEFAULT_REDIRECT_HEADERS_BLACKLIST.setter
64
- def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls, value):
65
- warnings.warn(
66
- "Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
67
- "will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
68
- DeprecationWarning,
69
- )
70
- cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT = value
71
-
72
- @property
73
- def BACKOFF_MAX(cls):
74
- warnings.warn(
75
- "Using 'Retry.BACKOFF_MAX' is deprecated and "
76
- "will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead",
77
- DeprecationWarning,
78
- )
79
- return cls.DEFAULT_BACKOFF_MAX
80
-
81
- @BACKOFF_MAX.setter
82
- def BACKOFF_MAX(cls, value):
83
- warnings.warn(
84
- "Using 'Retry.BACKOFF_MAX' is deprecated and "
85
- "will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead",
86
- DeprecationWarning,
87
- )
88
- cls.DEFAULT_BACKOFF_MAX = value
89
-
90
-
91
- @six.add_metaclass(_RetryMeta)
92
- class Retry(object):
93
- """Retry configuration.
94
-
95
- Each retry attempt will create a new Retry object with updated values, so
96
- they can be safely reused.
97
-
98
- Retries can be defined as a default for a pool::
99
-
100
- retries = Retry(connect=5, read=2, redirect=5)
101
- http = PoolManager(retries=retries)
102
- response = http.request('GET', 'http://example.com/')
103
-
104
- Or per-request (which overrides the default for the pool)::
105
-
106
- response = http.request('GET', 'http://example.com/', retries=Retry(10))
107
-
108
- Retries can be disabled by passing ``False``::
109
-
110
- response = http.request('GET', 'http://example.com/', retries=False)
111
-
112
- Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
113
- retries are disabled, in which case the causing exception will be raised.
114
-
115
- :param int total:
116
- Total number of retries to allow. Takes precedence over other counts.
117
-
118
- Set to ``None`` to remove this constraint and fall back on other
119
- counts.
120
-
121
- Set to ``0`` to fail on the first retry.
122
-
123
- Set to ``False`` to disable and imply ``raise_on_redirect=False``.
124
-
125
- :param int connect:
126
- How many connection-related errors to retry on.
127
-
128
- These are errors raised before the request is sent to the remote server,
129
- which we assume has not triggered the server to process the request.
130
-
131
- Set to ``0`` to fail on the first retry of this type.
132
-
133
- :param int read:
134
- How many times to retry on read errors.
135
-
136
- These errors are raised after the request was sent to the server, so the
137
- request may have side-effects.
138
-
139
- Set to ``0`` to fail on the first retry of this type.
140
-
141
- :param int redirect:
142
- How many redirects to perform. Limit this to avoid infinite redirect
143
- loops.
144
-
145
- A redirect is a HTTP response with a status code 301, 302, 303, 307 or
146
- 308.
147
-
148
- Set to ``0`` to fail on the first retry of this type.
149
-
150
- Set to ``False`` to disable and imply ``raise_on_redirect=False``.
151
-
152
- :param int status:
153
- How many times to retry on bad status codes.
154
-
155
- These are retries made on responses, where status code matches
156
- ``status_forcelist``.
157
-
158
- Set to ``0`` to fail on the first retry of this type.
159
-
160
- :param int other:
161
- How many times to retry on other errors.
162
-
163
- Other errors are errors that are not connect, read, redirect or status errors.
164
- These errors might be raised after the request was sent to the server, so the
165
- request might have side-effects.
166
-
167
- Set to ``0`` to fail on the first retry of this type.
168
-
169
- If ``total`` is not set, it's a good idea to set this to 0 to account
170
- for unexpected edge cases and avoid infinite retry loops.
171
-
172
- :param iterable allowed_methods:
173
- Set of uppercased HTTP method verbs that we should retry on.
174
-
175
- By default, we only retry on methods which are considered to be
176
- idempotent (multiple requests with the same parameters end with the
177
- same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.
178
-
179
- Set to a ``False`` value to retry on any verb.
180
-
181
- .. warning::
182
-
183
- Previously this parameter was named ``method_whitelist``, that
184
- usage is deprecated in v1.26.0 and will be removed in v2.0.
185
-
186
- :param iterable status_forcelist:
187
- A set of integer HTTP status codes that we should force a retry on.
188
- A retry is initiated if the request method is in ``allowed_methods``
189
- and the response status code is in ``status_forcelist``.
190
-
191
- By default, this is disabled with ``None``.
192
-
193
- :param float backoff_factor:
194
- A backoff factor to apply between attempts after the second try
195
- (most errors are resolved immediately by a second try without a
196
- delay). urllib3 will sleep for::
197
-
198
- {backoff factor} * (2 ** ({number of total retries} - 1))
199
-
200
- seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
201
- for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
202
- than :attr:`Retry.DEFAULT_BACKOFF_MAX`.
203
-
204
- By default, backoff is disabled (set to 0).
205
-
206
- :param bool raise_on_redirect: Whether, if the number of redirects is
207
- exhausted, to raise a MaxRetryError, or to return a response with a
208
- response code in the 3xx range.
209
-
210
- :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
211
- whether we should raise an exception, or return a response,
212
- if status falls in ``status_forcelist`` range and retries have
213
- been exhausted.
214
-
215
- :param tuple history: The history of the request encountered during
216
- each call to :meth:`~Retry.increment`. The list is in the order
217
- the requests occurred. Each list item is of class :class:`RequestHistory`.
218
-
219
- :param bool respect_retry_after_header:
220
- Whether to respect Retry-After header on status codes defined as
221
- :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
222
-
223
- :param iterable remove_headers_on_redirect:
224
- Sequence of headers to remove from the request when a response
225
- indicating a redirect is returned before firing off the redirected
226
- request.
227
- """
228
-
229
- #: Default methods to be used for ``allowed_methods``
230
- DEFAULT_ALLOWED_METHODS = frozenset(
231
- ["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
232
- )
233
-
234
- #: Default status codes to be used for ``status_forcelist``
235
- RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
236
-
237
- #: Default headers to be used for ``remove_headers_on_redirect``
238
- DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Authorization"])
239
-
240
- #: Maximum backoff time.
241
- DEFAULT_BACKOFF_MAX = 120
242
-
243
- def __init__(
244
- self,
245
- total=10,
246
- connect=None,
247
- read=None,
248
- redirect=None,
249
- status=None,
250
- other=None,
251
- allowed_methods=_Default,
252
- status_forcelist=None,
253
- backoff_factor=0,
254
- raise_on_redirect=True,
255
- raise_on_status=True,
256
- history=None,
257
- respect_retry_after_header=True,
258
- remove_headers_on_redirect=_Default,
259
- # TODO: Deprecated, remove in v2.0
260
- method_whitelist=_Default,
261
- ):
262
-
263
- if method_whitelist is not _Default:
264
- if allowed_methods is not _Default:
265
- raise ValueError(
266
- "Using both 'allowed_methods' and "
267
- "'method_whitelist' together is not allowed. "
268
- "Instead only use 'allowed_methods'"
269
- )
270
- warnings.warn(
271
- "Using 'method_whitelist' with Retry is deprecated and "
272
- "will be removed in v2.0. Use 'allowed_methods' instead",
273
- DeprecationWarning,
274
- stacklevel=2,
275
- )
276
- allowed_methods = method_whitelist
277
- if allowed_methods is _Default:
278
- allowed_methods = self.DEFAULT_ALLOWED_METHODS
279
- if remove_headers_on_redirect is _Default:
280
- remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
281
-
282
- self.total = total
283
- self.connect = connect
284
- self.read = read
285
- self.status = status
286
- self.other = other
287
-
288
- if redirect is False or total is False:
289
- redirect = 0
290
- raise_on_redirect = False
291
-
292
- self.redirect = redirect
293
- self.status_forcelist = status_forcelist or set()
294
- self.allowed_methods = allowed_methods
295
- self.backoff_factor = backoff_factor
296
- self.raise_on_redirect = raise_on_redirect
297
- self.raise_on_status = raise_on_status
298
- self.history = history or tuple()
299
- self.respect_retry_after_header = respect_retry_after_header
300
- self.remove_headers_on_redirect = frozenset(
301
- [h.lower() for h in remove_headers_on_redirect]
302
- )
303
-
304
- def new(self, **kw):
305
- params = dict(
306
- total=self.total,
307
- connect=self.connect,
308
- read=self.read,
309
- redirect=self.redirect,
310
- status=self.status,
311
- other=self.other,
312
- status_forcelist=self.status_forcelist,
313
- backoff_factor=self.backoff_factor,
314
- raise_on_redirect=self.raise_on_redirect,
315
- raise_on_status=self.raise_on_status,
316
- history=self.history,
317
- remove_headers_on_redirect=self.remove_headers_on_redirect,
318
- respect_retry_after_header=self.respect_retry_after_header,
319
- )
320
-
321
- # TODO: If already given in **kw we use what's given to us
322
- # If not given we need to figure out what to pass. We decide
323
- # based on whether our class has the 'method_whitelist' property
324
- # and if so we pass the deprecated 'method_whitelist' otherwise
325
- # we use 'allowed_methods'. Remove in v2.0
326
- if "method_whitelist" not in kw and "allowed_methods" not in kw:
327
- if "method_whitelist" in self.__dict__:
328
- warnings.warn(
329
- "Using 'method_whitelist' with Retry is deprecated and "
330
- "will be removed in v2.0. Use 'allowed_methods' instead",
331
- DeprecationWarning,
332
- )
333
- params["method_whitelist"] = self.allowed_methods
334
- else:
335
- params["allowed_methods"] = self.allowed_methods
336
-
337
- params.update(kw)
338
- return type(self)(**params)
339
-
340
- @classmethod
341
- def from_int(cls, retries, redirect=True, default=None):
342
- """Backwards-compatibility for the old retries format."""
343
- if retries is None:
344
- retries = default if default is not None else cls.DEFAULT
345
-
346
- if isinstance(retries, Retry):
347
- return retries
348
-
349
- redirect = bool(redirect) and None
350
- new_retries = cls(retries, redirect=redirect)
351
- log.debug("Converted retries value: %r -> %r", retries, new_retries)
352
- return new_retries
353
-
354
- def get_backoff_time(self):
355
- """Formula for computing the current backoff
356
-
357
- :rtype: float
358
- """
359
- # We want to consider only the last consecutive errors sequence (Ignore redirects).
360
- consecutive_errors_len = len(
361
- list(
362
- takewhile(lambda x: x.redirect_location is None, reversed(self.history))
363
- )
364
- )
365
- if consecutive_errors_len <= 1:
366
- return 0
367
-
368
- backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
369
- return min(self.DEFAULT_BACKOFF_MAX, backoff_value)
370
-
371
- def parse_retry_after(self, retry_after):
372
- # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
373
- if re.match(r"^\s*[0-9]+\s*$", retry_after):
374
- seconds = int(retry_after)
375
- else:
376
- retry_date_tuple = email.utils.parsedate_tz(retry_after)
377
- if retry_date_tuple is None:
378
- raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
379
- if retry_date_tuple[9] is None: # Python 2
380
- # Assume UTC if no timezone was specified
381
- # On Python2.7, parsedate_tz returns None for a timezone offset
382
- # instead of 0 if no timezone is given, where mktime_tz treats
383
- # a None timezone offset as local time.
384
- retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]
385
-
386
- retry_date = email.utils.mktime_tz(retry_date_tuple)
387
- seconds = retry_date - time.time()
388
-
389
- if seconds < 0:
390
- seconds = 0
391
-
392
- return seconds
393
-
394
- def get_retry_after(self, response):
395
- """Get the value of Retry-After in seconds."""
396
-
397
- retry_after = response.headers.get("Retry-After")
398
-
399
- if retry_after is None:
400
- return None
401
-
402
- return self.parse_retry_after(retry_after)
403
-
404
- def sleep_for_retry(self, response=None):
405
- retry_after = self.get_retry_after(response)
406
- if retry_after:
407
- time.sleep(retry_after)
408
- return True
409
-
410
- return False
411
-
412
- def _sleep_backoff(self):
413
- backoff = self.get_backoff_time()
414
- if backoff <= 0:
415
- return
416
- time.sleep(backoff)
417
-
418
- def sleep(self, response=None):
419
- """Sleep between retry attempts.
420
-
421
- This method will respect a server's ``Retry-After`` response header
422
- and sleep the duration of the time requested. If that is not present, it
423
- will use an exponential backoff. By default, the backoff factor is 0 and
424
- this method will return immediately.
425
- """
426
-
427
- if self.respect_retry_after_header and response:
428
- slept = self.sleep_for_retry(response)
429
- if slept:
430
- return
431
-
432
- self._sleep_backoff()
433
-
434
- def _is_connection_error(self, err):
435
- """Errors when we're fairly sure that the server did not receive the
436
- request, so it should be safe to retry.
437
- """
438
- if isinstance(err, ProxyError):
439
- err = err.original_error
440
- return isinstance(err, ConnectTimeoutError)
441
-
442
- def _is_read_error(self, err):
443
- """Errors that occur after the request has been started, so we should
444
- assume that the server began processing it.
445
- """
446
- return isinstance(err, (ReadTimeoutError, ProtocolError))
447
-
448
- def _is_method_retryable(self, method):
449
- """Checks if a given HTTP method should be retried upon, depending if
450
- it is included in the allowed_methods
451
- """
452
- # TODO: For now favor if the Retry implementation sets its own method_whitelist
453
- # property outside of our constructor to avoid breaking custom implementations.
454
- if "method_whitelist" in self.__dict__:
455
- warnings.warn(
456
- "Using 'method_whitelist' with Retry is deprecated and "
457
- "will be removed in v2.0. Use 'allowed_methods' instead",
458
- DeprecationWarning,
459
- )
460
- allowed_methods = self.method_whitelist
461
- else:
462
- allowed_methods = self.allowed_methods
463
-
464
- if allowed_methods and method.upper() not in allowed_methods:
465
- return False
466
- return True
467
-
468
- def is_retry(self, method, status_code, has_retry_after=False):
469
- """Is this method/status code retryable? (Based on allowlists and control
470
- variables such as the number of total retries to allow, whether to
471
- respect the Retry-After header, whether this header is present, and
472
- whether the returned status code is on the list of status codes to
473
- be retried upon on the presence of the aforementioned header)
474
- """
475
- if not self._is_method_retryable(method):
476
- return False
477
-
478
- if self.status_forcelist and status_code in self.status_forcelist:
479
- return True
480
-
481
- return (
482
- self.total
483
- and self.respect_retry_after_header
484
- and has_retry_after
485
- and (status_code in self.RETRY_AFTER_STATUS_CODES)
486
- )
487
-
488
- def is_exhausted(self):
489
- """Are we out of retries?"""
490
- retry_counts = (
491
- self.total,
492
- self.connect,
493
- self.read,
494
- self.redirect,
495
- self.status,
496
- self.other,
497
- )
498
- retry_counts = list(filter(None, retry_counts))
499
- if not retry_counts:
500
- return False
501
-
502
- return min(retry_counts) < 0
503
-
504
- def increment(
505
- self,
506
- method=None,
507
- url=None,
508
- response=None,
509
- error=None,
510
- _pool=None,
511
- _stacktrace=None,
512
- ):
513
- """Return a new Retry object with incremented retry counters.
514
-
515
- :param response: A response object, or None, if the server did not
516
- return a response.
517
- :type response: :class:`~urllib3.response.HTTPResponse`
518
- :param Exception error: An error encountered during the request, or
519
- None if the response was received successfully.
520
-
521
- :return: A new ``Retry`` object.
522
- """
523
- if self.total is False and error:
524
- # Disabled, indicate to re-raise the error.
525
- raise six.reraise(type(error), error, _stacktrace)
526
-
527
- total = self.total
528
- if total is not None:
529
- total -= 1
530
-
531
- connect = self.connect
532
- read = self.read
533
- redirect = self.redirect
534
- status_count = self.status
535
- other = self.other
536
- cause = "unknown"
537
- status = None
538
- redirect_location = None
539
-
540
- if error and self._is_connection_error(error):
541
- # Connect retry?
542
- if connect is False:
543
- raise six.reraise(type(error), error, _stacktrace)
544
- elif connect is not None:
545
- connect -= 1
546
-
547
- elif error and self._is_read_error(error):
548
- # Read retry?
549
- if read is False or not self._is_method_retryable(method):
550
- raise six.reraise(type(error), error, _stacktrace)
551
- elif read is not None:
552
- read -= 1
553
-
554
- elif error:
555
- # Other retry?
556
- if other is not None:
557
- other -= 1
558
-
559
- elif response and response.get_redirect_location():
560
- # Redirect retry?
561
- if redirect is not None:
562
- redirect -= 1
563
- cause = "too many redirects"
564
- redirect_location = response.get_redirect_location()
565
- status = response.status
566
-
567
- else:
568
- # Incrementing because of a server error like a 500 in
569
- # status_forcelist and the given method is in the allowed_methods
570
- cause = ResponseError.GENERIC_ERROR
571
- if response and response.status:
572
- if status_count is not None:
573
- status_count -= 1
574
- cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
575
- status = response.status
576
-
577
- history = self.history + (
578
- RequestHistory(method, url, error, status, redirect_location),
579
- )
580
-
581
- new_retry = self.new(
582
- total=total,
583
- connect=connect,
584
- read=read,
585
- redirect=redirect,
586
- status=status_count,
587
- other=other,
588
- history=history,
589
- )
590
-
591
- if new_retry.is_exhausted():
592
- raise MaxRetryError(_pool, url, error or ResponseError(cause))
593
-
594
- log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
595
-
596
- return new_retry
597
-
598
- def __repr__(self):
599
- return (
600
- "{cls.__name__}(total={self.total}, connect={self.connect}, "
601
- "read={self.read}, redirect={self.redirect}, status={self.status})"
602
- ).format(cls=type(self), self=self)
603
-
604
- def __getattr__(self, item):
605
- if item == "method_whitelist":
606
- # TODO: Remove this deprecated alias in v2.0
607
- warnings.warn(
608
- "Using 'method_whitelist' with Retry is deprecated and "
609
- "will be removed in v2.0. Use 'allowed_methods' instead",
610
- DeprecationWarning,
611
- )
612
- return self.allowed_methods
613
- try:
614
- return getattr(super(Retry, self), item)
615
- except AttributeError:
616
- return getattr(Retry, item)
617
-
618
-
619
- # For backwards compatibility (equivalent to pre-v1.9):
620
- Retry.DEFAULT = Retry(3)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/collect_env.py DELETED
@@ -1,242 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import importlib
3
- import numpy as np
4
- import os
5
- import re
6
- import subprocess
7
- import sys
8
- from collections import defaultdict
9
- import PIL
10
- import torch
11
- import torchvision
12
- from tabulate import tabulate
13
-
14
- __all__ = ["collect_env_info"]
15
-
16
-
17
- def collect_torch_env():
18
- try:
19
- import torch.__config__
20
-
21
- return torch.__config__.show()
22
- except ImportError:
23
- # compatible with older versions of pytorch
24
- from torch.utils.collect_env import get_pretty_env_info
25
-
26
- return get_pretty_env_info()
27
-
28
-
29
- def get_env_module():
30
- var_name = "DETECTRON2_ENV_MODULE"
31
- return var_name, os.environ.get(var_name, "<not set>")
32
-
33
-
34
- def detect_compute_compatibility(CUDA_HOME, so_file):
35
- try:
36
- cuobjdump = os.path.join(CUDA_HOME, "bin", "cuobjdump")
37
- if os.path.isfile(cuobjdump):
38
- output = subprocess.check_output(
39
- "'{}' --list-elf '{}'".format(cuobjdump, so_file), shell=True
40
- )
41
- output = output.decode("utf-8").strip().split("\n")
42
- arch = []
43
- for line in output:
44
- line = re.findall(r"\.sm_([0-9]*)\.", line)[0]
45
- arch.append(".".join(line))
46
- arch = sorted(set(arch))
47
- return ", ".join(arch)
48
- else:
49
- return so_file + "; cannot find cuobjdump"
50
- except Exception:
51
- # unhandled failure
52
- return so_file
53
-
54
-
55
- def collect_env_info():
56
- has_gpu = torch.cuda.is_available() # true for both CUDA & ROCM
57
- torch_version = torch.__version__
58
-
59
- # NOTE that CUDA_HOME/ROCM_HOME could be None even when CUDA runtime libs are functional
60
- from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
61
-
62
- has_rocm = False
63
- if (getattr(torch.version, "hip", None) is not None) and (ROCM_HOME is not None):
64
- has_rocm = True
65
- has_cuda = has_gpu and (not has_rocm)
66
-
67
- data = []
68
- data.append(("sys.platform", sys.platform)) # check-template.yml depends on it
69
- data.append(("Python", sys.version.replace("\n", "")))
70
- data.append(("numpy", np.__version__))
71
-
72
- try:
73
- import detectron2 # noqa
74
-
75
- data.append(
76
- ("detectron2", detectron2.__version__ + " @" + os.path.dirname(detectron2.__file__))
77
- )
78
- except ImportError:
79
- data.append(("detectron2", "failed to import"))
80
- except AttributeError:
81
- data.append(("detectron2", "imported a wrong installation"))
82
-
83
- try:
84
- import detectron2._C as _C
85
- except ImportError as e:
86
- data.append(("detectron2._C", f"not built correctly: {e}"))
87
-
88
- # print system compilers when extension fails to build
89
- if sys.platform != "win32": # don't know what to do for windows
90
- try:
91
- # this is how torch/utils/cpp_extensions.py choose compiler
92
- cxx = os.environ.get("CXX", "c++")
93
- cxx = subprocess.check_output("'{}' --version".format(cxx), shell=True)
94
- cxx = cxx.decode("utf-8").strip().split("\n")[0]
95
- except subprocess.SubprocessError:
96
- cxx = "Not found"
97
- data.append(("Compiler ($CXX)", cxx))
98
-
99
- if has_cuda and CUDA_HOME is not None:
100
- try:
101
- nvcc = os.path.join(CUDA_HOME, "bin", "nvcc")
102
- nvcc = subprocess.check_output("'{}' -V".format(nvcc), shell=True)
103
- nvcc = nvcc.decode("utf-8").strip().split("\n")[-1]
104
- except subprocess.SubprocessError:
105
- nvcc = "Not found"
106
- data.append(("CUDA compiler", nvcc))
107
- if has_cuda and sys.platform != "win32":
108
- try:
109
- so_file = importlib.util.find_spec("detectron2._C").origin
110
- except (ImportError, AttributeError):
111
- pass
112
- else:
113
- data.append(
114
- ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, so_file))
115
- )
116
- else:
117
- # print compilers that are used to build extension
118
- data.append(("Compiler", _C.get_compiler_version()))
119
- data.append(("CUDA compiler", _C.get_cuda_version())) # cuda or hip
120
- if has_cuda and getattr(_C, "has_cuda", lambda: True)():
121
- data.append(
122
- ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, _C.__file__))
123
- )
124
-
125
- data.append(get_env_module())
126
- data.append(("PyTorch", torch_version + " @" + os.path.dirname(torch.__file__)))
127
- data.append(("PyTorch debug build", torch.version.debug))
128
-
129
- if not has_gpu:
130
- has_gpu_text = "No: torch.cuda.is_available() == False"
131
- else:
132
- has_gpu_text = "Yes"
133
- data.append(("GPU available", has_gpu_text))
134
- if has_gpu:
135
- devices = defaultdict(list)
136
- for k in range(torch.cuda.device_count()):
137
- cap = ".".join((str(x) for x in torch.cuda.get_device_capability(k)))
138
- name = torch.cuda.get_device_name(k) + f" (arch={cap})"
139
- devices[name].append(str(k))
140
- for name, devids in devices.items():
141
- data.append(("GPU " + ",".join(devids), name))
142
-
143
- if has_rocm:
144
- msg = " - invalid!" if not (ROCM_HOME and os.path.isdir(ROCM_HOME)) else ""
145
- data.append(("ROCM_HOME", str(ROCM_HOME) + msg))
146
- else:
147
- try:
148
- from torch.utils.collect_env import get_nvidia_driver_version, run as _run
149
-
150
- data.append(("Driver version", get_nvidia_driver_version(_run)))
151
- except Exception:
152
- pass
153
- msg = " - invalid!" if not (CUDA_HOME and os.path.isdir(CUDA_HOME)) else ""
154
- data.append(("CUDA_HOME", str(CUDA_HOME) + msg))
155
-
156
- cuda_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None)
157
- if cuda_arch_list:
158
- data.append(("TORCH_CUDA_ARCH_LIST", cuda_arch_list))
159
- data.append(("Pillow", PIL.__version__))
160
-
161
- try:
162
- data.append(
163
- (
164
- "torchvision",
165
- str(torchvision.__version__) + " @" + os.path.dirname(torchvision.__file__),
166
- )
167
- )
168
- if has_cuda:
169
- try:
170
- torchvision_C = importlib.util.find_spec("torchvision._C").origin
171
- msg = detect_compute_compatibility(CUDA_HOME, torchvision_C)
172
- data.append(("torchvision arch flags", msg))
173
- except (ImportError, AttributeError):
174
- data.append(("torchvision._C", "Not found"))
175
- except AttributeError:
176
- data.append(("torchvision", "unknown"))
177
-
178
- try:
179
- import fvcore
180
-
181
- data.append(("fvcore", fvcore.__version__))
182
- except (ImportError, AttributeError):
183
- pass
184
-
185
- try:
186
- import iopath
187
-
188
- data.append(("iopath", iopath.__version__))
189
- except (ImportError, AttributeError):
190
- pass
191
-
192
- try:
193
- import cv2
194
-
195
- data.append(("cv2", cv2.__version__))
196
- except (ImportError, AttributeError):
197
- data.append(("cv2", "Not found"))
198
- env_str = tabulate(data) + "\n"
199
- env_str += collect_torch_env()
200
- return env_str
201
-
202
-
203
- def test_nccl_ops():
204
- num_gpu = torch.cuda.device_count()
205
- if os.access("/tmp", os.W_OK):
206
- import torch.multiprocessing as mp
207
-
208
- dist_url = "file:///tmp/nccl_tmp_file"
209
- print("Testing NCCL connectivity ... this should not hang.")
210
- mp.spawn(_test_nccl_worker, nprocs=num_gpu, args=(num_gpu, dist_url), daemon=False)
211
- print("NCCL succeeded.")
212
-
213
-
214
- def _test_nccl_worker(rank, num_gpu, dist_url):
215
- import torch.distributed as dist
216
-
217
- dist.init_process_group(backend="NCCL", init_method=dist_url, rank=rank, world_size=num_gpu)
218
- dist.barrier(device_ids=[rank])
219
-
220
-
221
- if __name__ == "__main__":
222
- try:
223
- from detectron2.utils.collect_env import collect_env_info as f
224
-
225
- print(f())
226
- except ImportError:
227
- print(collect_env_info())
228
-
229
- if torch.cuda.is_available():
230
- num_gpu = torch.cuda.device_count()
231
- for k in range(num_gpu):
232
- device = f"cuda:{k}"
233
- try:
234
- x = torch.tensor([1, 2.0], dtype=torch.float32)
235
- x = x.to(device)
236
- except Exception as e:
237
- print(
238
- f"Unable to copy tensor to device={device}: {e}. "
239
- "Your CUDA environment is broken."
240
- )
241
- if num_gpu > 1:
242
- test_nccl_ops()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/demucs/wav.py DELETED
@@ -1,174 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from collections import OrderedDict
8
- import hashlib
9
- import math
10
- import json
11
- from pathlib import Path
12
-
13
- import julius
14
- import torch as th
15
- from torch import distributed
16
- import torchaudio as ta
17
- from torch.nn import functional as F
18
-
19
- from .audio import convert_audio_channels
20
- from .compressed import get_musdb_tracks
21
-
22
- MIXTURE = "mixture"
23
- EXT = ".wav"
24
-
25
-
26
- def _track_metadata(track, sources):
27
- track_length = None
28
- track_samplerate = None
29
- for source in sources + [MIXTURE]:
30
- file = track / f"{source}{EXT}"
31
- info = ta.info(str(file))
32
- length = info.num_frames
33
- if track_length is None:
34
- track_length = length
35
- track_samplerate = info.sample_rate
36
- elif track_length != length:
37
- raise ValueError(
38
- f"Invalid length for file {file}: "
39
- f"expecting {track_length} but got {length}.")
40
- elif info.sample_rate != track_samplerate:
41
- raise ValueError(
42
- f"Invalid sample rate for file {file}: "
43
- f"expecting {track_samplerate} but got {info.sample_rate}.")
44
- if source == MIXTURE:
45
- wav, _ = ta.load(str(file))
46
- wav = wav.mean(0)
47
- mean = wav.mean().item()
48
- std = wav.std().item()
49
-
50
- return {"length": length, "mean": mean, "std": std, "samplerate": track_samplerate}
51
-
52
-
53
- def _build_metadata(path, sources):
54
- meta = {}
55
- path = Path(path)
56
- for file in path.iterdir():
57
- meta[file.name] = _track_metadata(file, sources)
58
- return meta
59
-
60
-
61
- class Wavset:
62
- def __init__(
63
- self,
64
- root, metadata, sources,
65
- length=None, stride=None, normalize=True,
66
- samplerate=44100, channels=2):
67
- """
68
- Waveset (or mp3 set for that matter). Can be used to train
69
- with arbitrary sources. Each track should be one folder inside of `path`.
70
- The folder should contain files named `{source}.{ext}`.
71
- Files will be grouped according to `sources` (each source is a list of
72
- filenames).
73
-
74
- Sample rate and channels will be converted on the fly.
75
-
76
- `length` is the sample size to extract (in samples, not duration).
77
- `stride` is how many samples to move by between each example.
78
- """
79
- self.root = Path(root)
80
- self.metadata = OrderedDict(metadata)
81
- self.length = length
82
- self.stride = stride or length
83
- self.normalize = normalize
84
- self.sources = sources
85
- self.channels = channels
86
- self.samplerate = samplerate
87
- self.num_examples = []
88
- for name, meta in self.metadata.items():
89
- track_length = int(self.samplerate * meta['length'] / meta['samplerate'])
90
- if length is None or track_length < length:
91
- examples = 1
92
- else:
93
- examples = int(math.ceil((track_length - self.length) / self.stride) + 1)
94
- self.num_examples.append(examples)
95
-
96
- def __len__(self):
97
- return sum(self.num_examples)
98
-
99
- def get_file(self, name, source):
100
- return self.root / name / f"{source}{EXT}"
101
-
102
- def __getitem__(self, index):
103
- for name, examples in zip(self.metadata, self.num_examples):
104
- if index >= examples:
105
- index -= examples
106
- continue
107
- meta = self.metadata[name]
108
- num_frames = -1
109
- offset = 0
110
- if self.length is not None:
111
- offset = int(math.ceil(
112
- meta['samplerate'] * self.stride * index / self.samplerate))
113
- num_frames = int(math.ceil(
114
- meta['samplerate'] * self.length / self.samplerate))
115
- wavs = []
116
- for source in self.sources:
117
- file = self.get_file(name, source)
118
- wav, _ = ta.load(str(file), frame_offset=offset, num_frames=num_frames)
119
- wav = convert_audio_channels(wav, self.channels)
120
- wavs.append(wav)
121
-
122
- example = th.stack(wavs)
123
- example = julius.resample_frac(example, meta['samplerate'], self.samplerate)
124
- if self.normalize:
125
- example = (example - meta['mean']) / meta['std']
126
- if self.length:
127
- example = example[..., :self.length]
128
- example = F.pad(example, (0, self.length - example.shape[-1]))
129
- return example
130
-
131
-
132
- def get_wav_datasets(args, samples, sources):
133
- sig = hashlib.sha1(str(args.wav).encode()).hexdigest()[:8]
134
- metadata_file = args.metadata / (sig + ".json")
135
- train_path = args.wav / "train"
136
- valid_path = args.wav / "valid"
137
- if not metadata_file.is_file() and args.rank == 0:
138
- train = _build_metadata(train_path, sources)
139
- valid = _build_metadata(valid_path, sources)
140
- json.dump([train, valid], open(metadata_file, "w"))
141
- if args.world_size > 1:
142
- distributed.barrier()
143
- train, valid = json.load(open(metadata_file))
144
- train_set = Wavset(train_path, train, sources,
145
- length=samples, stride=args.data_stride,
146
- samplerate=args.samplerate, channels=args.audio_channels,
147
- normalize=args.norm_wav)
148
- valid_set = Wavset(valid_path, valid, [MIXTURE] + sources,
149
- samplerate=args.samplerate, channels=args.audio_channels,
150
- normalize=args.norm_wav)
151
- return train_set, valid_set
152
-
153
-
154
- def get_musdb_wav_datasets(args, samples, sources):
155
- metadata_file = args.metadata / "musdb_wav.json"
156
- root = args.musdb / "train"
157
- if not metadata_file.is_file() and args.rank == 0:
158
- metadata = _build_metadata(root, sources)
159
- json.dump(metadata, open(metadata_file, "w"))
160
- if args.world_size > 1:
161
- distributed.barrier()
162
- metadata = json.load(open(metadata_file))
163
-
164
- train_tracks = get_musdb_tracks(args.musdb, is_wav=True, subsets=["train"], split="train")
165
- metadata_train = {name: meta for name, meta in metadata.items() if name in train_tracks}
166
- metadata_valid = {name: meta for name, meta in metadata.items() if name not in train_tracks}
167
- train_set = Wavset(root, metadata_train, sources,
168
- length=samples, stride=args.data_stride,
169
- samplerate=args.samplerate, channels=args.audio_channels,
170
- normalize=args.norm_wav)
171
- valid_set = Wavset(root, metadata_valid, [MIXTURE] + sources,
172
- samplerate=args.samplerate, channels=args.audio_channels,
173
- normalize=args.norm_wav)
174
- return train_set, valid_set
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/infer/lib/infer_pack/modules/F0Predictor/F0Predictor.py DELETED
@@ -1,16 +0,0 @@
1
- class F0Predictor(object):
2
- def compute_f0(self, wav, p_len):
3
- """
4
- input: wav:[signal_length]
5
- p_len:int
6
- output: f0:[signal_length//hop_length]
7
- """
8
- pass
9
-
10
- def compute_f0_uv(self, wav, p_len):
11
- """
12
- input: wav:[signal_length]
13
- p_len:int
14
- output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
15
- """
16
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Archery Battle.md DELETED
@@ -1,200 +0,0 @@
1
- <br />
2
- <h1>Batalla de tiro con arco: una manera divertida y emocionante para disparar flechas</h1>
3
- <p>¿Alguna vez has querido disparar flechas a tus amigos o enemigos sin hacerles daño? ¿Te gusta la emoción de competir contra otros jugadores en un juego de ritmo rápido y realista? Si es así, es posible que desee probar la batalla de tiro con arco, un deporte nuevo y popular que combina tiro con arco con balón prisionero. </p>
4
- <p>Batalla de tiro con arco es un juego donde dos equipos de jugadores disparan flechas con punta de espuma entre sí en una arena cubierta o al aire libre. El objetivo es eliminar a todos los miembros del equipo contrario golpeándolos con flechas o noqueando a sus objetivos. El juego es seguro, divertido y fácil de aprender, pero también requiere habilidad, estrategia y trabajo en equipo. </p>
5
- <h2>archery battle</h2><br /><p><b><b>Download File</b> &#10003; <a href="https://bltlly.com/2v6KUA">https://bltlly.com/2v6KUA</a></b></p><br /><br />
6
- <p>En este artículo, le diremos todo lo que necesita saber sobre la batalla de tiro con arco, incluyendo cómo jugar, qué equipo necesita, cómo mejorar sus técnicas de tiro y qué consejos puede seguir para convertirse en un mejor arquero. También le mostraremos cómo la batalla con arco puede beneficiar su salud y mente, así como responder a algunas preguntas frecuentes sobre este deporte. </p>
7
- <h2>Equipo</h2>
8
- <p>Antes de empezar a jugar a la batalla de tiro con arco, es necesario tener el equipo adecuado. Estos son algunos de los elementos esenciales que necesita para este juego:</p>
9
- <h3>Arcos y flechas</h3>
10
- <p>El equipo más importante para la batalla de tiro con arco son arcos y flechas. Hay diferentes tipos de arcos y flechas que puedes usar para este juego, dependiendo de tu preferencia y nivel de habilidad. </p>
11
- <p>El tipo más común de arco utilizado en la batalla de tiro con arco es el arco recurvo, que es el mismo estilo de arco utilizado en los Juegos Olímpicos. Tiene una forma curva que almacena más energía cuando se dibuja, haciéndolo más potente y preciso. Los arcos recurvos también son fáciles de usar y ajustar, por lo que son adecuados para principiantes y jugadores intermedios. </p>
12
-
13
- <p>Las flechas utilizadas en la batalla de tiro con arco están especialmente diseñadas para la seguridad y durabilidad. Tienen puntas de espuma que amortiguan el impacto cuando golpean a una persona o un objeto, evitando lesiones o daños. También tienen paletas de color brillante que los hacen más fáciles de. ver y rastrear en el aire. También tienen nocks de flecha iluminados que brillan en la oscuridad, lo que los hace ideales para juegos nocturnos o condiciones de poca luz. </p>
14
- <p>Al elegir un arco y una flecha para la batalla de tiro con arco, debe considerar la longitud de su sorteo, el peso del sorteo, la longitud de la flecha y la columna vertebral de la flecha. Estos factores afectan lo cómodo y preciso que es al disparar. Puedes medir la longitud del dibujo extendiendo los brazos y midiendo la distancia desde el pecho hasta la punta de los dedos. Puedes determinar tu peso al tirar de un arco y sentir cuánta fuerza puedes manejar. Puede encontrar la longitud de su flecha colocando una flecha en su arco y marcando donde se encuentra con el resto. Puedes revisar tu columna vertebral doblando una flecha y viendo cuánto se flexiona. </p>
15
- <p>También debes probar diferentes arcos y flechas antes de comprarlos o alquilarlos, para ver cuáles se adaptan a tu estilo y preferencia. Puedes pedir consejo a un profesional o a un jugador experimentado, o leer reseñas y valoraciones en línea de otros clientes. </p>
16
- <h3>Accesorios y engranajes de seguridad</h3>
17
- <p>Además de arcos y flechas, también necesita algunos accesorios y engranajes de seguridad para la batalla de tiro con arco. Estos artículos mejorarán su rendimiento y lo protegerán de lesiones o accidentes. Estos son algunos de los accesorios y engranajes de seguridad que necesitas para este juego:</p>
18
- <p></p>
19
- <ul>
20
- <li>Un carcaj: Un carcaj es un recipiente que sostiene las flechas y se une a su cinturón o espalda. Te permite llevar más flechas y acceder a ellas rápida y fácilmente. </li>
21
- <li>Una lengüeta del dedo o un guante: Una lengüeta del dedo o un guante es un pedazo de cuero o de tela que cubre sus dedos y los protege de la cadena cuando suelta una flecha. También mejora el agarre y evita ampollas o cortes. </li>
22
-
23
- <li>Un protector de pecho: Un protector de pecho es un chaleco o una camisa que cubre el pecho y evita que la cuerda se enganche en la ropa cuando disparas una flecha. También evita rozaduras o enganches. </li>
24
- <li>Una máscara o un casco: Una máscara o un casco es un casco que cubre tu cara y cabeza y los protege de flechas entrantes u otros objetos. También protege sus ojos del sol o del viento. </li>
25
- <li>Un silbato: Un silbato es un dispositivo que hace un sonido fuerte cuando lo soplas. Se utiliza para señalar el inicio y el final del juego, así como para comunicarse con sus compañeros de equipo o el árbitro. </li>
26
- </ul>
27
- <p>Siempre debes usar estos accesorios y engranajes de seguridad cuando juegues a la batalla de tiro con arco, incluso si eres un jugador experimentado. No solo te mantendrán a salvo, sino que también te harán ver genial y profesional. </p>
28
- <h2>Técnicas</h2>
29
- <p>Ahora que tienes el equipo adecuado, necesitas aprender algunas técnicas para mejorar tus habilidades de tiro. Estas son algunas de las técnicas que puedes practicar para convertirte en un mejor arquero:</p>
30
- <h3>Precisión y consistencia de disparo</h3>
31
- <p>La primera técnica que necesitas dominar es la precisión y consistencia del disparo. Esto significa golpear el objetivo donde lo quieras, cada vez que dispares. Para lograrlo, debes seguir estos pasos:</p>
32
- <ol>
33
- <li>Párate con los pies separados a la altura de los hombros, perpendicular al objetivo, con los dedos apuntando ligeramente hacia afuera. </li>
34
- <li>Mantén el arco en tu mano no dominante, con el codo ligeramente doblado y la muñeca relajada. </li>
35
- <li>Anote una flecha en la cadena, con la paleta de índice apuntando lejos del arco. </li>
36
- <li> Coloque tres dedos en la cadena, debajo de la flecha nock, con un dedo sobre ella. </li>
37
- <li>Levante el arco al nivel del ojo, con el brazo extendido pero no bloqueado. </li>
38
- <li>Dibuja la cuerda de vuelta a tu punto de anclaje, que suele ser la esquina de tu boca o mentón. </li>
39
- <li> Apunta a un punto pequeño en el objetivo, usando la punta de la flecha o un punto de vista como punto de referencia. </li>
40
-
41
- <li>Siga a través manteniendo su brazo de arco constante y apuntando hacia el objetivo hasta que la flecha lo golpea. </li>
42
- </ol> <p>Repite estos pasos para cada toma, e intenta mantener un ritmo y una forma constantes. También puede utilizar una aplicación de disparo o un cronógrafo para medir su velocidad y precisión, y comparar sus resultados con otros jugadores. </p>
43
- <h3>Disparar con ambos ojos abiertos</h3>
44
- <p>La siguiente técnica que necesitas aprender es disparar con ambos ojos abiertos. Esto significa mantener su ojo dominante en el objetivo, y su ojo no dominante en la flecha o la vista. Esto le dará una mejor percepción de profundidad, campo de visión y equilibrio, así como reducir la fatiga ocular y la tensión. </p>
45
- <p>Para disparar con ambos ojos abiertos, debes seguir estos pasos:</p>
46
- <ol>
47
- <li>Determina tu ojo dominante haciendo un triángulo con tus pulgares e índices, y mirando un objeto distante a través de él. </li>
48
- <li>Cerrar un ojo a la vez, y ver qué ojo mantiene el objeto en el centro del triángulo. Ese es el ojo dominante. </li>
49
- <li>Alinea tu ojo dominante con la cuerda, la flecha o el pasador de la vista, dependiendo de tu método de puntería. </li>
50
- <li>Mantén tu ojo no dominante abierto, pero enfócate en la vista de tu ojo dominante. </li>
51
- <li>Ignore cualquier visión doble o borrosa que pueda ocurrir, y confíe en el objetivo de su ojo dominante. </li>
52
- </ol>
53
- <p>Practica esta técnica hasta que te sientas cómodo y seguro con ella. También puedes usar un parche o un cegador para bloquear la vista de tu ojo no dominante y eliminarlo gradualmente a medida que te acostumbras a disparar con ambos ojos abiertos. </p>
54
- <h3>Relajando los dedos</h3>
55
- <p>La tercera técnica que necesitas dominar es relajar tus dedos. Esto significa mantener los dedos sueltos y relajados al sostener el arco y soltar la cuerda. Esto evitará que pares el arco, que se está torciendo o girando durante el disparo. Torcer el arco puede hacer que la flecha se desvíe o golpee el arco. </p>
56
- <p>Para relajar tus dedos, necesitas seguir estos pasos:</p>
57
-
58
- <li>Sostenga el arco en su mano no dominante, con un agarre ligero y una muñeca relajada. </li>
59
- <li> Coloque tres dedos en la cadena, debajo de la flecha nock, con un dedo sobre ella. </li>
60
- <li>Enganche la cadena con la primera articulación de los dedos, no las puntas o las almohadillas. </li>
61
- <li>Mantén los dedos relajados y ligeramente curvados, no tensos ni rectos. </li>
62
- <li>Dibuja la cuerda de vuelta a tu punto de anclaje, usando los músculos de la espalda y no los músculos del brazo. </li>
63
- <li>Libera la cuerda relajando tus dedos y dejando que se deslicen de la cuerda. </li>
64
- <li>Mantenga los dedos relajados y abiertos después de la liberación, no apretados o cerrados. </li>
65
- </ol>
66
- <p>Practica esta técnica hasta que te sientas natural y suave con ella. También puedes usar un cabestrillo para los dedos o un cabestrillo para la muñeca para evitar que se te caiga el arco después del lanzamiento y evitar agarrarlo demasiado fuerte. </p> <h3>Disparar paletas de color brillante y flechas iluminadas Nocks</h3>
67
- <p>La cuarta técnica que necesita aprender es disparar paletas de color brillante y nocks de flecha iluminada. Esto significa usar flechas que tienen paletas de color brillante y nocks de flecha iluminada, que son dispositivos que se unen a la parte posterior de la flecha y se iluminan cuando se dispara la flecha. Esto le ayudará a ver y rastrear su flecha en el aire, especialmente en distancias largas o condiciones de poca luz. </p>
68
- <p>Para disparar paletas de color brillante y nocks de flecha iluminada, debe seguir estos pasos:</p>
69
- <ol>
70
- <li>Elija flechas que tienen paletas de color brillante, como rojo, amarillo o verde. Estos colores contrastarán con el fondo y harán su flecha más visible. </li>
71
- <li>Elija las flechas que han iluminado nocks de flecha, como Nockturnal, Lumenok o Firenock. Estos nocks se activarán cuando se dispare la flecha y emitirán una luz brillante que hará que la flecha brille en la oscuridad. </li>
72
- <li>Alinea tu flecha con tu cuerda de arco, vista y objetivo, como de costumbre. </li>
73
- <li>Dispara tu flecha y observa cómo vuela en el aire. Deberías poder ver las paletas de colores brillantes y la flecha iluminada claramente. </li>
74
-
75
- </ol>
76
- <p>Practique esta técnica hasta que se acostumbre a disparar paletas de color brillante y nocks de flecha iluminada. También puede utilizar un telémetro o un visor para medir su distancia y precisión, y ajustar su objetivo en consecuencia. </p>
77
- <h2>Consejos</h2>
78
- <p>Además de aprender algunas técnicas, también necesitas algunos consejos para mejorar tu rendimiento de batalla con arco. Estos son algunos de los consejos que puedes seguir para convertirte en un mejor arquero:</p>
79
- <h3>Práctica de tiro a largas distancias</h3>
80
- <p>El primer consejo que necesitas seguir es practicar el tiro a largas distancias. Esto significa disparar su arco a distancias más allá de su zona de confort, como 50 metros o más. Esto te ayudará a amplificar tus defectos e identificar tus debilidades, así como a mejorar tu confianza y consistencia. </p>
81
- <p>Para practicar tiro a largas distancias, debes seguir estos pasos:</p>
82
- <ol>
83
- <li>Encuentra un lugar seguro y adecuado para disparar tu arco, como un campo de tiro con arco o un campo. </li>
84
- <li>Establezca un objetivo a larga distancia, como 50 metros o más. Puede utilizar un objetivo de tiro con arco estándar o un objetivo animal 3D, dependiendo de su preferencia. </li>
85
- <li>Dispara tu arco al objetivo, usando los mismos pasos y técnicas que antes. </li>
86
- <li>Analice sus disparos y ver dónde golpean o fallan el objetivo. Puede utilizar un sistema de puntuación o una medición de tamaño de grupo para evaluar su rendimiento. </li>
87
- <li>Identificar sus errores y corregirlos. Puedes usar un entrenador o una cámara de vídeo para obtener comentarios y consejos sobre cómo mejorar tu forma, objetivo, lanzamiento o seguimiento. </li>
88
- <li>Repita el proceso hasta lograr los resultados deseados. También puede aumentar la distancia o cambiar el objetivo a medida que avanza. </li>
89
- </ol>
90
- <p>Practica este consejo hasta que te sientas cómodo y seguro disparando a largas distancias. También puede desafiarse a sí mismo disparando en diferentes ángulos, alturas o condiciones de viento, para simular escenarios de la vida real. </p> <h3>Mantener la postura y posición correctas</h3>
91
-
92
- <p>Para mantener la postura y posición correctas, debe seguir estos pasos:</p>
93
- <ol>
94
- <li>Párate con los pies separados a la altura de los hombros, perpendicular al objetivo, con los dedos apuntando ligeramente hacia afuera. </li>
95
- <li>Mantén la espalda recta y los hombros relajados, no encorvados ni tensos. </li>
96
- <li>Mantén la cabeza erguida y la barbilla paralela al suelo, no inclinada ni torcida. </li>
97
- <li>Mantenga el brazo del arco extendido pero no bloqueado, con el codo ligeramente doblado y la muñeca relajada. </li>
98
- <li>Mantenga su brazo de dibujo en línea con su brazo de arco, con el codo ligeramente más alto que su hombro. </li>
99
- <li>Mantenga la mano del arco relajada y abierta, con un agarre ligero en el arco. </li>
100
- <li>Mantén la mano de la cuerda relajada y abierta, con un gancho ligero en la cuerda. </li>
101
- </ol>
102
- <p>Practica este consejo hasta que te sientas natural y cómodo con él. También puedes usar un espejo o un amigo para revisar tu postura y posición, y corregir cualquier error o desviación. </p>
103
- <h3>Consultar a un profesional o unirse a un club de tiro con arco</h3>
104
- <p>El tercer consejo que necesitas seguir es consultar a un profesional o unirte a un club de tiro con arco. Esto significa buscar orientación y retroalimentación de alguien que tiene más experiencia y conocimientos que tú en tiro con arco. Esto te ayudará a aprender nuevas habilidades y técnicas, así como a evitar malos hábitos o errores. </p>
105
- <p>Para consultar a un profesional o unirse a un club de tiro con arco, debe seguir estos pasos:</p>
106
- <ol>
107
- <li>Encuentra un instructor de tiro con arco certificado o entrenador que te puede enseñar los aspectos básicos y avanzados de tiro con arco. Puede buscar en línea o pedir recomendaciones de otros arqueros. </li>
108
- <li>Reserve una lección o una sesión con el instructor o entrenador, y siga sus instrucciones y consejos. Puede hacer preguntas, tomar notas o grabar vídeos para mejorar su experiencia de aprendizaje. </li>
109
- <li>Encuentre un club de tiro con arco o un grupo que organiza eventos de tiro con arco o actividades en su área. Puede buscar en línea o pedir referencias de otros arqueros. </li>
110
-
111
- </ol>
112
- <p>Practica este consejo hasta que te sientas más seguro y competente en tiro con arco. También puede unirse a foros o comunidades en línea donde puede interactuar con otros arqueros, compartir consejos y trucos, o pedir ayuda o consejo. </p>
113
- <h3>Acondicionando físicamente tu cuerpo</h3>
114
- <p>El cuarto consejo que necesitas seguir es acondicionar físicamente tu cuerpo. Esto significa ejercitar y fortalecer los músculos, las articulaciones y los huesos involucrados en el tiro con arco. Esto le ayudará a mejorar su resistencia, resistencia y flexibilidad, así como a prevenir lesiones o fatiga. </p>
115
- <p>Para acondicionar físicamente tu cuerpo, debes seguir estos pasos:</p>
116
- <ol>
117
- <li>Haz algunos ejercicios de calentamiento antes de disparar tu arco, como estirar, trotar o saltar. Esto aumentará su circulación sanguínea y preparará su cuerpo para la actividad. </li>
118
- <li>Haz algunos ejercicios de fuerza después de disparar tu arco, como flexiones, dominadas o tablas. Esto desarrollará tus músculos y mejorará tu potencia y estabilidad. </li>
119
- Haz algunos ejercicios cardiovasculares en tus días de descanso, como correr, andar en bicicleta o nadar. Esto aumentará su ritmo cardíaco y mejorará su ingesta y entrega de oxígeno. </li>
120
- <li>Haz algunos ejercicios de yoga en tus días de descanso, como saludos al sol, perro hacia abajo o pose de guerrero. Esto relajará sus músculos y mejorará su equilibrio y coordinación. </li>
121
- </ol>
122
- <p>Practica este consejo hasta que te sientas en forma y saludable. También puede consultar a un médico o entrenador antes de comenzar cualquier programa de ejercicio, especialmente si tiene alguna afección médica o lesiones. </p> <h3>Tomando los Primeros Auxilios Esenciales con Usted</h3>
123
- <p>El quinto consejo que necesitas seguir es llevar contigo lo esencial de primeros auxilios. Esto significa traer algunos artículos que pueden ayudarle a tratar lesiones menores o accidentes que pueden ocurrir durante la batalla de tiro con arco. Esto le ayudará a evitar complicaciones o infecciones, así como a reducir el dolor o la incomodidad. </p>
124
- <p>Para llevar consigo lo esencial de primeros auxilios, debe seguir estos pasos:</p>
125
- <ol>
126
-
127
- <li>Llene la bolsa con algunos artículos que le pueden ayudar a lidiar con lesiones comunes de tiro con arco, como cortes, moretones, ampollas, esguinces o quemaduras. Algunos de estos artículos son vendajes, gasas, cinta, tijeras, pinzas, toallitas antisépticas, ungüento antibiótico, analgésicos, compresas de hielo o gel de áloe vera. </li>
128
- <li>Mantenga la bolsa en un lugar seguro y accesible, como su automóvil, su mochila o su aljaba. </li>
129
- <li>Utilice los elementos cuando sea necesario, y siga las instrucciones sobre cómo aplicarlos. También puede pedir ayuda a un compañero de equipo o a un árbitro si no está seguro de cómo usarlos. </li>
130
- <li>Reemplace los artículos cuando están agotados o caducados, y revise la bolsa o la bolsa regularmente para detectar cualquier daño o contaminación. </li>
131
- </ol>
132
- <p>Practica este consejo hasta que te sientas preparado y seguro. También puede tomar un curso de primeros auxilios o leer un manual de primeros auxilios para aprender más sobre cómo manejar diferentes tipos de lesiones o emergencias. </p>
133
- <h2>Conclusión</h2>
134
- <p>La batalla con arco es una forma divertida y emocionante de disparar flechas a tus amigos o enemigos sin hacerles daño. Es un juego que combina tiro con arco con balón prisionero, donde dos equipos de jugadores disparan flechas con punta de espuma entre sí en una arena cubierta o al aire libre. El objetivo es eliminar a todos los miembros del equipo contrario golpeándolos con flechas o noqueando a sus objetivos. </p>
135
- <p>Para jugar a la batalla de tiro con arco, necesita tener el equipo adecuado, como arcos, flechas, accesorios y engranajes de seguridad. También necesita aprender algunas técnicas, como precisión y consistencia de disparo, disparar con los dos ojos abiertos, relajar los dedos y disparar paletas de color brillante y nocks de flecha iluminada. También necesitas seguir algunos consejos, como practicar tiro a largas distancias, mantener la postura y posición correctas, consultar a un profesional o unirte a un club de tiro con arco, acondicionar físicamente tu cuerpo y llevar contigo lo esencial de primeros auxilios. </p>
136
-
137
- <p>Si está interesado en probar la batalla de tiro con arco, puede encontrar más información sobre los siguientes recursos:</p>
138
- <ul>
139
- <li><a href="">Batalla de tiro con arco - El sitio web oficial de la batalla de tiro con arco</a></li>
140
- <li><a href="">Tiro con arco 360 - La revista en línea para arqueros recreativos</a></li>
141
- <li><a href="">World Archery - Federación Internacional de Deportes de Tiro con Arco</a></li>
142
- </ul>
143
- <p>Esperamos que hayas disfrutado leyendo este artículo y hayas aprendido algo nuevo sobre la batalla con arco. Te animamos a que lo pruebes y te diviertas con tus amigos o familiares. Recuerda estar siempre seguro y respetuoso cuando juegues a este juego. Y no te olvides de apuntar alto y disparar recto! </p>
144
- <p>Batalla de tiro con arco: el último juego de habilidad y emoción! </p>
145
- <h2>Preguntas frecuentes</h2>
146
- <h3>¿Cuáles son algunas lesiones o riesgos comunes involucrados en la batalla de tiro con arco? </h3>
147
- <p>Algunas de las lesiones o riesgos comunes involucrados en la batalla de tiro con arco son:</p>
148
- <ul>
149
- <li>Cortes o moretones de las flechas o de la cuerda que golpea tu piel. </li>
150
- <li>Ampollas o quemaduras por la fricción de la cuerda en los dedos. </li>
151
- <li>Esguinces o distensiones por estirar o retorcer los músculos o las articulaciones. </li>
152
- <li>Lesiones oculares por las flechas o el sol golpeando tus ojos. </li>
153
- <li>Reacciones alérgicas de las puntas de espuma o la pintura en las flechas. </li>
154
- </ul>
155
- <p>Para prevenir estas lesiones o riesgos, siempre debe usar engranajes de seguridad adecuados y accesorios al jugar batalla de tiro con arco. También debes seguir las reglas e instrucciones del juego y respetar a los demás jugadores. También debe calentar antes de jugar y refrescarse después de jugar. También debe buscar atención médica si experimenta algún dolor o molestia después de jugar. </p>
156
- <h3>¿Cómo puedo encontrar un lugar de batalla con arco o evento cerca de mí? </h3>
157
- <p>Para encontrar un lugar de batalla o evento cerca de mí, puede utilizar los siguientes métodos:</p>
158
- <ul>
159
-
160
- <li>Busque en línea lugares de batalla con arco o eventos en su área, usando palabras clave como "batalla con arco cerca de mí", "batalla con arco en (nombre de la ciudad)", o "batalla con arco (fecha)". También puede utilizar plataformas en línea como Google Maps, Yelp o TripAdvisor para encontrar comentarios y valoraciones de otros clientes. </li>
161
- <li>Pregunte por ahí para obtener recomendaciones de sus amigos, familiares o colegas que han intentado u oído hablar de la batalla de tiro con arco. También puede unirse a grupos o foros en línea donde puede interactuar con otros entusiastas del tiro con arco y obtener consejos y sugerencias. </li>
162
- </ul>
163
- <p>Antes de elegir un lugar de batalla o evento de tiro con arco, debe verificar su disponibilidad, precios, instalaciones, reglas y medidas de seguridad. También debe leer sus términos y condiciones y firmar un formulario de renuncia si es necesario. </p>
164
- <h3> ¿Cuáles son algunos otros tipos de juegos de tiro con arco o disciplinas que puedo probar? </h3>
165
- <p>Algunos de los otros tipos de juegos de tiro con arco o disciplinas que puedes probar son:</p>
166
- <ul>
167
- <li>Tiro con arco: Este es el tipo más común y tradicional de tiro con arco, donde se dispara a un objetivo circular con anillos de diferentes colores. También es el tipo de tiro con arco utilizado en los Juegos Olímpicos y otras competiciones. </li>
168
- <li>Tiro con arco de campo: Este es un tipo de tiro con arco donde se dispara a diferentes objetivos de diferentes tamaños y distancias en un entorno natural, como un bosque o una colina. También es un tipo de tiro con arco que pone a prueba tus habilidades en diferentes terrenos y condiciones climáticas. </li>
169
- <li>Tiro con arco en 3D: Este es un tipo de tiro con arco en el que disparas a objetivos animales realistas hechos de espuma o plástico. También es un tipo de tiro con arco que simula escenarios de caza y desafía su precisión y ética. </li>
170
- <li>Bowhunting: Este es un tipo de tiro con arco donde se cazan animales salvajes con un arco y una flecha. También es un tipo de tiro con arco que requiere una licencia, permiso y regulaciones en la mayoría de los lugares. </li>
171
-
172
- </ul>
173
- <p>Antes de probar cualquiera de estos tipos de juegos de tiro con arco o disciplinas, usted debe aprender los fundamentos y reglas de cada uno. También debe practicar sus habilidades y técnicas con el equipo adecuado y medidas de seguridad. También debes respetar el medio ambiente y los animales cuando juegues estos juegos o disciplinas. </p>
174
- <h3>¿Cuánto cuesta comprar o alquilar equipo de tiro con arco? </h3>
175
- <p>El costo de comprar o alquilar equipo de tiro con arco depende de varios factores, como la calidad, cantidad, marca y ubicación del equipo. Sin embargo, aquí hay algunas estimaciones promedio basadas en fuentes en línea:</p>
176
- <tabla>
177
- <tr><th>Artículo</th><th>Costo promedio para comprar</th><th>Costo promedio para alquilar</th></tr>
178
- <tr><td>Bow</td><td>$100-$300</td><td>$10-$20 por hora</td></tr>
179
- <tr><td>Arrow</td><td>$5-$10 per piece</td><td>$1-$2 per piece</td></tr>
180
- <tr><td>Quiver</td><td>$10-$20 por pieza</td><td>$1-$2 por pieza</td></tr>
181
- <tr><td>Finger Tab or Glove</td><td>$5-$10 per piece</td><td>$1-$2 per piece</td></tr>
182
- <tr><td>Protector de brazo</td><td>$5-$10 por pieza</td><td><td>$1-$2 por pieza</td></tr>
183
- <tr><td>Protector de pecho</td><td>$10-$20 por pieza</td><td><td>$1-$2 por pieza</td></tr>
184
- <tr><td>Máscara o casco</td><td>$20-$40 por pieza</td><td>>$2-$4 por pieza</td></tr>
185
- <tr><td>Silbato</td><td>$1-$5 por pieza</td><td>$0.5-$1 por pieza</td></tr>
186
- <tr><td>Costo total</td><td>$156-$436 por set</td><td>$16.5-$33 por set por hora</td></tr>
187
- </tabla>
188
- <p>Tenga en cuenta que estos son solo costos aproximados y pueden variar dependiendo de la fuente y el tiempo de compra o alquiler. Siempre debe comparar los precios de diferentes vendedores o proveedores antes de comprar o alquilar cualquier equipo. También debe verificar la calidad y el estado del equipo antes de usarlo. También debe cuidar el equipo y devolverlo en las mismas condiciones en que lo recibió. </p>
189
- <h3>¿Cómo puedo unirme o iniciar un equipo de batalla con arco o una liga? </h3>
190
-
191
- <p>Para unirse o iniciar un equipo de batalla con arco o una liga, debe seguir estos pasos:</p>
192
- <ol>
193
- <li>Encuentra algunos jugadores que quieran jugar a la batalla de tiro con arco contigo, como tus amigos, familiares, colegas o compañeros de clase. También puedes reclutar jugadores en línea o fuera de línea, usando redes sociales, folletos o de boca en boca. </li>
194
- <li>Elige un nombre y un logotipo para tu equipo o liga, y regístralo en el sitio web oficial de la batalla de tiro con arco o una asociación de tiro con arco local. También puedes crear un sitio web o una página de redes sociales para tu equipo o liga, donde puedes publicar actualizaciones, fotos, videos o noticias. </li>
195
- <li>Entrena y practica con tu equipo o liga regularmente, y desarrolla tus estrategias y tácticas. También puedes contratar a un entrenador o mentor para ayudarte a mejorar tu rendimiento y trabajo en equipo. </li>
196
- <li>Encuentre y únase a torneos o eventos que se adapten a su nivel y preferencia, como competiciones locales, regionales, nacionales o internacionales. También puedes organizar tus propios torneos o eventos, e invitar a otros equipos o ligas a participar. </li>
197
- </ol>
198
- <p>Practica este consejo hasta que te sientas orgulloso y satisfecho con tu equipo o liga. También puede unirse o iniciar varios equipos o ligas, dependiendo de su disponibilidad e interés. También debes respetar y apoyar a tus compañeros o miembros de la liga, y celebrar tus logros y fracasos juntos. </p> 64aa2da5cf<br />
199
- <br />
200
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Battlefield 3 Descargar.md DELETED
@@ -1,95 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar Battlefield 3 para PC</h1>
3
- <p>Battlefield 3 es uno de los juegos de disparos en primera persona más populares y aclamados de todos los tiempos. Ofrece una experiencia inmersiva y realista de la guerra moderna, con impresionantes gráficos, sonido y jugabilidad. Puedes jugar solo o con tus amigos en varios modos de juego, como campaña, cooperativo y multijugador. También puede explorar 29 mapas masivos y utilizar una variedad de vehículos, armas y gadgets para ayudarle a subir el calor. </p>
4
- <p>Si estás interesado en jugar a Battlefield 3 en tu PC, es posible que te estés preguntando cómo descargarlo. Bueno, has llegado al lugar correcto. En este artículo, te mostraremos cómo descargar Battlefield 3 para PC en cuatro sencillos pasos. También te daremos información y consejos útiles sobre cómo disfrutar del juego al máximo. Así que, empecemos. </p>
5
- <h2>battlefield 3 descargar</h2><br /><p><b><b>DOWNLOAD</b> &raquo; <a href="https://bltlly.com/2v6KNi">https://bltlly.com/2v6KNi</a></b></p><br /><br />
6
- <h2>Paso 1: Únete a EA Play o compra Battlefield 3 Premium Edition</h2>
7
- <p>Lo primero que tienes que hacer es decidir si quieres unirte a EA Play o comprar Battlefield 3 Premium Edition. EA Play es un servicio de suscripción que te da acceso a una colección de juegos de EA, incluyendo Battlefield 3. Puedes unirte a EA Play por $4.99 al mes o $29.99 al año. También obtendrás un 10% de descuento en compras de EA y contenido exclusivo. </p>
8
- <p>Battlefield 3 Premium Edition es un paquete que incluye el juego base y los cinco paquetes de expansión: Volver a Karkand, Close Quarters, Armored Kill, Aftermath y End Game. También obtendrá un kit de ventaja multijugador que desbloquea 15 armas avanzadas, gadgets, mejoras de vehículos y más. Puedes comprar Battlefield 3 Premium Edition por $39.99 en Steam u Origin.</p>
9
- <p></p>
10
- <h2>Paso 2: Descargar e instalar Origin</h2>
11
-
12
- <h2>Paso 3: Inicia Origin e inicia sesión con tu cuenta de EA</h2>
13
- <p>Una vez que haya instalado Origin, ejecútelo desde su escritorio o menú de inicio. Tendrás que iniciar sesión con tu cuenta de EA, que es la misma que tu cuenta de Origin. Si aún no tienes una cuenta de EA, puedes crear una gratis haciendo clic en Crear una cuenta. Tendrá que introducir su dirección de correo electrónico, contraseña, fecha de nacimiento, país y pregunta de seguridad. </p>
14
- <h2>Paso 4: Encuentra Battlefield 3 en tu biblioteca de juegos y haz clic en Descargar</h2>
15
- <p>Después de haber iniciado sesión con su cuenta de EA, verá su biblioteca de juegos en el lado izquierdo de la ventana de Origin. Aquí puedes encontrar todos los juegos que tienes o a los que tienes acceso a través de EA Play. Para encontrar Battlefield 3, puedes desplazarte por la lista o usar la barra de búsqueda en la parte superior. <p>Una vez que hayas encontrado Battlefield 3, haz clic en él para abrir su página de juego. Aquí puedes ver más detalles sobre el juego, como su descripción, capturas de pantalla, vídeos, reseñas y requisitos del sistema. También puedes acceder a la configuración del juego, logros y tablas de clasificación. Para comenzar a descargar el juego, haga clic en el botón Descargar en el lado derecho de la página. Puede elegir dónde guardar los archivos del juego y cuánto ancho de banda usar para la descarga. También puede pausar o reanudar la descarga en cualquier momento. </p>
16
- <h1>Lo que necesitas saber antes de jugar Battlefield 3</h1>
17
- <p>Ahora que has descargado Battlefield 3, estás listo para jugarlo. Pero antes de entrar en acción, hay algunas cosas que debes saber para aprovechar al máximo tu experiencia de juego. En esta sección, cubriremos los requisitos del sistema para PC, los modos de juego y características, y algunos consejos y trucos para principiantes. </p>
18
- <h2>Requisitos del sistema para PC</h2>
19
- <p>Battlefield 3 es un juego exigente que requiere un PC potente para funcionar sin problemas. Aquí están los requisitos mínimos y recomendados del sistema para PC:</p>
20
- <tabla>
21
- <tr><th>Mínimo</th><th>Recomendado</th></tr>
22
-
23
- <tr><td>Procesador: 2 GHz dual-core (Core 2 Duo 2.4 GHz o Athlon X2 2.7 GHz)</td><td>Procesador: Quad-core CPU</td></tr>
24
- <tr><td>Memoria: 2 GB de RAM</td><td>Memoria: 4 GB de RAM</td></tr>
25
- <tr><td>Gráficos: DirectX 10 compatible con 512 MB de RAM (NVIDIA GeForce 8, 9, 200, 300, 400 o 500 series con NVIDIA GeForce 8800 GT o ATI Radeon HD 3870)</td><td>Gráficos: DirectX 11 compatible con 1024 MB de RAM (NVIDIA Force GTX>>ATI Radeon HD<50/tr<)
26
- <tr><td>Almacenamiento: 20 GB de espacio disponible</td><td>Almacenamiento: 20 GB de espacio disponible</td></tr>
27
- <tr><td>Tarjeta de sonido: DirectX compatible</td><td>Tarjeta de sonido: DirectX compatible</td></tr>
28
- <tr><td>Conexión a Internet: Conexión de banda ancha para la activación en línea y el juego en línea - 512 Kbps o más rápido</td>Conexión a Internet: Conexión de banda ancha para la activación en línea y el juego en línea - 512 Kbps o más rápido</td></tr>
29
- </tabla>
30
- <p>Si tu PC cumple o supera estos requisitos, deberías poder disfrutar de Battlefield 3 sin problemas. Sin embargo, si su PC no cumple con estos requisitos, es posible que experimente retrasos, tartamudeo, baja velocidad de fotogramas o fallos. En ese caso, puede intentar bajar la configuración de gráficos, actualizar sus controladores o actualizar su hardware. </p>
31
- <h2>Modos de juego y características</h2>
32
- <p>Battlefield 3 ofrece una variedad de modos de juego y características que se adaptan a diferentes estilos de juego y preferencias. Estos son algunos de los principales:</p>
33
- <h3>Modo de campaña</h3>
34
- <p>El modo campaña es donde puedes seguir la historia de Battlefield 3, que tiene lugar en el año 2014. Usted jugará como diferentes personajes de la Infantería de Marina de los Estados Unidos, como el sargento Henry Blackburn y el sargento Jonathan Miller. Usted también será testigo de los acontecimientos desde la perspectiva de un agente ruso llamado Dimitri Mayakovsky. Viajará a través de varios lugares, como Irán, Irak, Francia y Nueva York. Se enfrentará a diferentes enemigos, como la Liberación y Resistencia Popular (PLR), una facción rebelde del ejército iraní. </p>
35
-
36
- <h3>Modo cooperativo</h3>
37
- <p>El modo cooperativo es donde puedes formar equipo con otro jugador en línea y completar seis misiones que están separadas del modo de campaña. Estas misiones se basan en eventos y escenarios del mundo real, como rescatar rehenes, infiltrarse en bases enemigas y escoltar VIPs. Tendrás que trabajar junto a tu pareja para lograr tus objetivos y sobrevivir. </p>
38
- <p>El modo cooperativo es una experiencia dinámica y cooperativa que dura entre dos y tres horas. Cuenta con chat de voz, tablas de clasificación y armas desbloqueables. También tiene cuatro niveles de dificultad: fácil, normal, duro y duro. Cuanto más alto sea el nivel de dificultad, más desafiantes serán los enemigos y las situaciones. </p>
39
- <h3>Mut <h3>Modo multijugador</h3>
40
- <p>El modo multijugador es donde puedes competir con o contra otros jugadores en línea en varios modos de juego, como Conquest, Rush, Team Deathmatch y Squad Deathmatch. Puedes elegir entre cuatro clases: Asalto, Ingeniero, Soporte y Reconocimiento. Cada clase tiene sus propias armas, dispositivos y roles. También puede utilizar vehículos, como tanques, helicópteros, jets y barcos. Puedes jugar en 29 mapas basados en las ubicaciones de los modos de campaña y cooperativo. También puede personalizar su loadout, apariencia y etiquetas de perro. </p>
41
- <p>El modo multijugador es una experiencia competitiva y dinámica que puede durar horas. Cuenta con chat de voz, escuadrones, rangos, premios, estadísticas y servidores. También tiene cuatro modos de juego: Normal, Hardcore, Solo Infantería y Personalizado. El modo de juego determina las reglas y ajustes del partido, tales como fuego amistoso, regeneración de la salud, minimapa y HUD.</p>
42
- <h1>Cómo disfrutar de Battlefield 3 con tus amigos</h1>
43
-
44
- <h2>Cómo unirse o crear un servidor multijugador</h2>
45
- <p>Para unirse o crear un servidor multijugador, debe ir al menú multijugador desde el menú principal. Aquí puede ver una lista de servidores disponibles a los que puede unirse. Puede filtrar los servidores por modo de juego, mapa, región, ping, jugadores y más. También puede buscar un servidor específico por nombre o palabra clave. Para unirse a un servidor, simplemente haga clic en él y espere a que el juego se cargue. </p>
46
- <p>Para crear un servidor multijugador, es necesario ir a la opción alquilar un servidor desde el menú multijugador. Aquí, puede alquilar un servidor de EA o un proveedor externo durante un determinado período de tiempo y precio. También puede personalizar la configuración del servidor, como el nombre, la descripción, la contraseña, el modo de juego, la rotación del mapa, el número de tickets, el número de jugadores y más. Para crear un servidor, simplemente haga clic en el botón de alquiler y confirme su pago. </p>
47
- <h2>Cómo comunicarse y cooperar con sus compañeros de equipo</h2>
48
- <p>Para comunicarse y cooperar con sus compañeros de equipo, debe usar las funciones de chat de voz y escuadrón. El chat de voz te permite hablar con tus compañeros de equipo usando el micrófono. Puedes usar el chat de voz para coordinar tus acciones, compartir información o simplemente chatear con tus amigos. Para usar el chat de voz, debe habilitarlo desde la configuración de audio y presionar el botón de pulsar para hablar (predeterminado: Alt izquierdo) cuando desee hablar. </p>
49
-
50
- <h2>Cómo personalizar tu loadout y desbloquear nuevos elementos</h2>
51
- <p>Para personalizar tu cargamento y desbloquear nuevos elementos, debes ir al menú de personalización del menú multijugador. Aquí puedes ver tus cuatro clases y sus respectivas armas, gadgets y especializaciones. Puede cambiar su carga seleccionando una clase y haciendo clic en los elementos que desea equipar. También puede ver las estadísticas y descripciones de cada elemento. </p>
52
- <p>Para desbloquear nuevos elementos, necesitas ganar puntos de experiencia (XP) y posicionarte. Puedes ganar XP jugando el juego, completando objetivos, matando enemigos, ayudando a compañeros de equipo y más. Al subir de rango, desbloquearás nuevas armas, gadgets y especializaciones para cada clase. También desbloquearás nuevas opciones de apariencia, como camuflajes, placas de identificación y emblemas. </p>
53
- <h1>Cómo obtener más de Battlefield 3</h1>
54
- <p>Battlefield 3 es un juego que ofrece mucho contenido y características para que lo disfrutes. Pero si quieres sacar más provecho, hay algunas maneras de hacerlo. En esta sección, te mostraremos cómo acceder a los paquetes de expansión y DLC, cómo usar la aplicación y el sitio web de Battlelog y cómo unirte a la comunidad de Battlefield y obtener actualizaciones. </p>
55
- <h2>Cómo acceder a los paquetes de expansión y DLCs</h2>
56
- <p>Battlefield 3 tiene cinco paquetes de expansión y dos DLC que añaden más mapas, modos, armas, vehículos, asignaciones, logros y trofeos al juego. Los packs de expansión son Back to Karkand, Close Quarters, Armored Kill, Aftermath y End Game. Los DLCs son Paquete de Guerra Física y de Vuelta a Karkand Paquete de Etiqueta de Perro.</p>
57
- <p>Para acceder a los packs de expansión y DLCs, necesitas unirte a EA Play o comprar Battlefield 3 Premium Edition. EA Play te da acceso a todos los packs de expansión y DLCs gratis mientras estés suscrito. Battlefield 3 Premium Edition incluye todos los paquetes de expansión y DLC en un solo paquete. También puedes comprar cada paquete de expansión o DLC por separado en Steam o Origin.</p>
58
-
59
- <h2>Cómo usar la aplicación y el sitio web de Battlelog</h2>
60
- <p>Battlelog es una aplicación gratuita y un sitio web que te permite acceder a tu perfil de Battlefield 3, estadísticas, amigos, servidores, noticias y más desde tu smartphone o navegador. Puedes usar Battlelog para realizar un seguimiento de tu progreso, comparar tu rendimiento con otros jugadores, unirte o crear pelotones (grupos de jugadores), chatear con tus amigos, navegar por los servidores y mucho más. Para usar Battlelog, necesitas tener una cuenta EA e iniciar sesión con ella. Puedes descargar la aplicación Battlelog desde Google Play Store o la App Store. También puede acceder al sitio web de Battlelog desde [battlelog.battlefield.com]. </p>
61
- <h2>Cómo unirte a la comunidad de Battlefield y recibir actualizaciones</h2>
62
- <p>Battlefield 3 tiene una gran y activa comunidad de jugadores y fans que comparten su pasión y entusiasmo por el juego. Puedes unirte a la comunidad de Battlefield y recibir actualizaciones sobre las últimas noticias, eventos, concursos, consejos y más. Estas son algunas formas de hacerlo:</p>
63
- <ul>
64
- <li>Sigue las cuentas de redes sociales oficiales de Battlefield en Facebook, Twitter, Instagram, YouTube y Twitch.</li>
65
- <li>Visite el sitio web oficial de Battlefield en [www.battlefield.com] y suscríbase al boletín de noticias. </li>
66
- <li>Únete a los foros oficiales de Battlefield en [forums.battlefield.com] y participa en discusiones, comentarios y soporte. </li>
67
- <li>Únete al servidor oficial de Battlefield Discord en [discord.gg/battlefield] y chatea con otros jugadores y desarrolladores. </li>
68
- <li>Únete al subreddit oficial de Battlefield en [r/battlefield3] y publica tus memes, clips, capturas de pantalla, preguntas y más. </li>
69
- </ul>
70
- <h1>Conclusión</h1>
71
- <p>Battlefield 3 es un juego que ofrece una experiencia emocionante e inmersiva de la guerra moderna. Puedes reproducirlo en tu PC siguiendo estos cuatro sencillos pasos:</p>
72
- <ol>
73
- <li>Únete a EA Play o compra Battlefield 3 Premium Edition.</li>
74
- <li>Descargar e instalar Origin.</li>
75
- <li>Inicie Origin e inicie sesión con su cuenta de EA. </li>
76
-
77
- </ol>
78
- <p>También puedes disfrutar del juego con tus amigos uniéndote o creando un servidor multijugador, comunicándote y cooperando con tus compañeros de equipo, y personalizando tu carga y desbloqueando nuevos elementos. También puedes sacar más provecho del juego accediendo a los paquetes de expansión y DLC, utilizando la aplicación y el sitio web de Battlelog, uniéndote a la comunidad de Battlefield y recibiendo actualizaciones. </p>
79
- <p>Battlefield 3 es un juego que te mantendrá entretenido durante horas con sus increíbles gráficos, sonido, jugabilidad y contenido. Si estás buscando un juego que te desafíe, te excite y te sumerja en una zona de guerra realista, Battlefield 3 es el juego para ti. ¿Qué estás esperando? ¡Descarga Battlefield 3 hoy y únete a la acción! </p>
80
- <h2>Preguntas frecuentes</h2>
81
- <p>Aquí hay algunas preguntas frecuentes sobre Battlefield 3:</p>
82
- <ul>
83
- <li><b>Q: ¿Cuánto dura Battlefield 3?</b></li>
84
- <li>A: El modo de campaña de Battlefield 3 dura entre seis y ocho horas. El modo cooperativo dura entre dos y tres horas. El modo multijugador puede durar horas dependiendo de tu preferencia. </li>
85
- <li><b>Q: ¿Battlefield 3 es multiplataforma? </b></li>
86
- <li>A: No, Battlefield 3 no es multiplataforma. Solo puedes jugar con otros jugadores que tengan la misma plataforma que tú (PC, Xbox 360 o PlayStation 3). </li>
87
- <li><b>Q: ¿Battlefield 3 sigue activo? </b></li>
88
- <li>A: Sí, Battlefield 3 sigue activo. Todavía hay muchos jugadores que juegan online en varios servidores. También puedes encontrar nuevo contenido y actualizaciones de EA y la comunidad. </li>
89
- <li><b>Q: ¿Battlefield 3 es gratis? </b></li>
90
- <li>A: No, Battlefield 3 no es gratis. Necesitas unirte a EA Play o comprar Battlefield 3 Premium Edition para jugar. Sin embargo, puedes probar el juego gratis por un tiempo limitado descargando la versión de prueba de Origin.</li>
91
- <li><b>Q: ¿Vale la pena Battlefield 3? </b></li>
92
-
93
- </ul></p> 64aa2da5cf<br />
94
- <br />
95
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Bump Pop Mod.md DELETED
@@ -1,76 +0,0 @@
1
- <br />
2
- <h1>Descargar Bump Pop Mod: Un divertido y único juego casual</h1>
3
- <p>Si estás buscando un juego casual que sea fácil de jugar pero difícil de dominar, deberías probar Bump Pop Mod. Este es un juego que desafiará sus reflejos, estrategia y creatividad a medida que pop globos y chocar con otros objetos. En este artículo, te diremos qué es Bump Pop Mod, cómo descargarlo e instalarlo, y algunos consejos y trucos para jugarlo. </p>
4
- <h2>descargar bump pop mod</h2><br /><p><b><b>Download</b> &bull;&bull;&bull; <a href="https://bltlly.com/2v6JO8">https://bltlly.com/2v6JO8</a></b></p><br /><br />
5
- <h2>¿Qué es Bump Pop Mod? </h2>
6
- <p>Bump Pop Mod es un juego que fue desarrollado por VOODOO, un popular estudio de juegos que crea juegos adictivos y casuales. El juego está disponible para dispositivos Android y se puede descargar de forma gratuita desde varios sitios web. El juego tiene más de 10 millones de descargas y una calificación de 4.4 estrellas en Google Play Store.</p>
7
- <h3>Características de Bump Pop Mod</h3>
8
- <p>Bump Pop Mod tiene muchas características que lo hacen divertido y único. Algunos de ellos son:</p>
9
- <ul>
10
- <li>Puedes personalizar tu personaje con diferentes pieles, sombreros, gafas y accesorios. </li>
11
- <li> Puedes desbloquear y usar varios potenciadores, como imanes, cohetes, bombas y escudos. </li>
12
- <li>Puedes explorar diferentes mundos con diferentes temas, como selva, desierto, ciudad y espacio. </li>
13
- <li>Puedes competir con otros jugadores en tablas de clasificación y torneos online. </li>
14
- <li>Puedes disfrutar de los gráficos coloridos y la música pegadiza. </li>
15
- </ul>
16
- <h3>Cómo jugar Bump Pop Mod</h3>
17
- <p>El modo de juego de Bump Pop Mod es simple pero adictivo. Controlas un personaje que sostiene un globo. Su objetivo es hacer estallar tantos globos como sea posible por chocar con ellos. También puedes toparte con otros objetos, como monedas, gemas, potenciadores, enemigos y obstáculos. Sin embargo, hay que tener cuidado de no hacer estallar su propio globo o chocar con objetos peligrosos, como picos, bombas o láseres. Si lo haces, perderás el juego. </p>
18
- <p></p>
19
-
20
- <h2>¿Cómo descargar e instalar Bump Pop Mod? </h2>
21
- <p>Si desea descargar e instalar Bump Pop Mod en su dispositivo Android, tendrá que seguir estos pasos:</p>
22
- <h3>Requisitos para Bump Pop Mod</h3>
23
- <p>Antes de descargar e instalar Bump Pop Mod, tendrá que asegurarse de que su dispositivo cumple con estos requisitos:</p>
24
- <ul>
25
- <li>Tu dispositivo debe tener Android 4.4 o superior. </li>
26
- <li> Su dispositivo debe tener al menos 100 MB de espacio de almacenamiento libre. </li>
27
- <li> Su dispositivo debe tener una conexión a Internet estable. </li>
28
- </ul>
29
- <h3>Pasos para descargar e instalar Bump Pop Mod</h3>
30
- <p>Una vez que haya comprobado los requisitos, puede proceder con estos pasos:</p>
31
- <ol>
32
- <li>Ir a un sitio web que ofrece Bump Pop Mod para su descarga. Algunos ejemplos son [1](https://modradar.cc/id/bump-pop), [2](https:s:/lygiang.net/bump-pop-mod-apk/), o [3](https://www.apksum.com/app/bump-pop-/modcom.voodoo.bumppop). </li>
33
- <li>Haga clic en el botón de descarga o enlace para comenzar a descargar el archivo mod. El archivo estará en formato ZIP o JAR. </li>
34
- <li>Una vez completada la descarga, localiza el archivo en la carpeta del administrador de archivos o descargas de tu dispositivo. </li>
35
- <li> <li>Extraiga el archivo utilizando una aplicación de extracción de archivos, como [4](https://play.google.com/store/apps/apps/details?id=id.c=com.rarlab.rar&hl=en_US&gl=US) o [5](https:s/play.google.com/stores/apps/apps/detas?id=id.winzip.andro&hl=_US&hlen/=glUS). </li>
36
- <li>Abra la carpeta extraída y busque el archivo APK. Este es el archivo que contiene el juego. </li>
37
- <li>Toque en el archivo APK para comenzar a instalar el juego. Es posible que necesite habilitar fuentes desconocidas en la configuración del dispositivo para permitir la instalación. </li>
38
- <li>Espera a que termine la instalación y luego abre el juego. Ahora puedes disfrutar jugando Bump Pop Mod con monedas ilimitadas, gemas y potenciadores. </li>
39
- </ol>
40
- <h2>Consejos y trucos para jugar Bump Pop Mod</h2>
41
-
42
- <h3>Usar potenciadores sabiamente</h3>
43
- <p>Los potenciadores son elementos que pueden darte una ventaja en el juego. Pueden ayudarte a hacer estallar más globos, evitar obstáculos o derrotar a los enemigos. Sin embargo, no son ilimitados y tienen un tiempo de reutilización. Por lo tanto, debe usarlos sabiamente y solo cuando los necesite. Algunos ejemplos de potenciadores son:</p>
44
- <ul>
45
- <li>imán: Este poder atrae monedas y gemas cercanas a usted. </li>
46
- <li>Cohete: Este encendido te lanza hacia adelante a alta velocidad, haciendo estallar cualquier globo en tu camino. </li>
47
- <li>Bomba: Este encendido explota y explota todos los globos en un gran radio a tu alrededor. </li>
48
- <li>Escudo: Este poder te protege de un golpe de un obstáculo o un enemigo. </li>
49
- </ul>
50
- <h3>Recoge monedas y gemas</h3>
51
- <p>Monedas y gemas son la moneda del juego. Puedes usarlas para comprar nuevas pieles y potenciadores. También puedes usarlos para revivirte si pierdes el juego. Puedes recoger monedas y gemas chocándote con ellas o usando el imán. También puedes obtener monedas y gemas de bonificación completando logros, viendo anuncios o participando en torneos. </p>
52
- <h3>Evitar obstáculos y enemigos</h3>
53
- <p>Los obstáculos y los enemigos son las cosas que pueden hacerte perder el juego. Pueden explotar tu globo, dañar tu salud o ralentizarte. Debes evitar toparte con ellos o usar potenciadores para enfrentarlos. Algunos ejemplos de obstáculos y enemigos son:</p>
54
- <ul>
55
- <li>Picos: Estos son objetos afilados que pueden hacer estallar el globo al instante. </li>
56
- <li>Bombas: Estos son objetos explosivos que pueden explotar su globo y dañar su salud. </li>
57
- <li>Láseres: Estos son rayos de luz que pueden hacer estallar su globo y dañar su salud. </li>
58
- <li>Cactus: Estas son plantas que pueden hacer estallar tu globo y ralentizarte. </li>
59
- <li>Aves: Estas son criaturas voladoras que pueden explotar su globo y dañar su salud. </li>
60
- <li>Coches: Estos son vehículos en movimiento que pueden explotar su globo y dañar su salud. </li>
61
- </ul>
62
- <h2>Conclusión</h2>
63
-
64
- <h2>Preguntas frecuentes</h2>
65
- <p>Aquí están algunas de las preguntas más comunes que la gente hace sobre Bump Pop Mod:</p>
66
- <h4>Q: ¿Es seguro descargar Bump Pop Mod? </h4>
67
- <p>A: Sí, Bump Pop Mod es seguro de descargar siempre y cuando lo descargue desde un sitio web de confianza. Sin embargo, siempre debes tener cuidado al descargar cualquier juego modificado o hackeado, ya que pueden contener virus o malware que pueden dañar tu dispositivo. También debe escanear el archivo con una aplicación antivirus antes de instalarlo. </p>
68
- <h4>Q: ¿Es legal jugar a Bump Pop Mod? </h4>
69
- <p>A: Sí, Bump Pop Mod es legal para jugar siempre y cuando no lo use para fines ilegales, como hacer trampa o hackear. Sin embargo, debe tener en cuenta que jugar juegos modificados o hackeados puede violar los términos de servicio del desarrollador o editor original del juego. Por lo tanto, puedes enfrentarte a algunos riesgos o consecuencias, como ser excluido del juego o perder los datos de tu cuenta. </p>
70
- <h4>Q: ¿Cómo puedo actualizar Bump Pop Mod? </h4>
71
- <p>A: Para actualizar Bump Pop Mod, usted <p>A: Para actualizar Bump Pop Mod, tendrá que descargar la última versión del archivo mod desde el mismo sitio web donde lo descargó antes. Entonces, tendrá que desinstalar la versión anterior del juego e instalar el nuevo. También es posible que tenga que borrar la caché y los datos del juego antes de instalar la nueva versión. </p>
72
- <h4>Q: ¿Cómo puedo desinstalar Bump Pop Mod? </h4>
73
- <p>A: Para desinstalar Bump Pop Mod, tendrá que ir a la configuración de su dispositivo y buscar la sección de aplicaciones o aplicaciones. Entonces, usted tendrá que encontrar y seleccionar Bump Pop Mod de la lista de aplicaciones instaladas. A continuación, deberá pulsar en el botón de desinstalación y confirmar su acción. También es posible que deba eliminar el archivo mod del almacenamiento del dispositivo. </p>
74
- <h4>Q: ¿Dónde puedo encontrar más juegos como Bump Pop Mod? </h4> 64aa2da5cf<br />
75
- <br />
76
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Fts 2020 Apk.md DELETED
@@ -1,61 +0,0 @@
1
-
2
- <h1>Descargar FTS 2020 APK: Cómo disfrutar del mejor juego de fútbol en su dispositivo Android</h1>
3
- <p>Si usted es un fan de los juegos de fútbol, es posible que haya oído hablar de FTS 2020, uno de los juegos de fútbol más populares y realistas para dispositivos Android. FTS 2020 es un juego que le permite experimentar la emoción y la emoción de jugar al fútbol en su teléfono inteligente o tableta. Puedes crear tu propio equipo, personalizar a tus jugadores, competir en varios torneos y desafiar a tus amigos en línea. En este artículo, le diremos todo lo que necesita saber sobre FTS 2020, incluidas sus características, beneficios y cómo descargarlo en su dispositivo Android. </p>
4
- <h2>¿Qué es FTS 2020? </h2>
5
- <p>FTS 2020 es la abreviatura de First Touch Soccer 2020, un juego de fútbol desarrollado por First Touch Games, una empresa especializada en la creación de juegos deportivos para plataformas móviles. FTS 2020 es la última entrega de la serie FTS, que ha existido desde 2011. FTS 2020 es una versión mejorada de FTS 2019, con nuevas características, gráficos, jugabilidad y contenido. FTS 2020 no está disponible en la Google Play Store, pero se puede descargar desde su sitio web oficial o de otras fuentes como un archivo APK. </p>
6
- <h2>descargar fts 2020 apk</h2><br /><p><b><b>DOWNLOAD</b> &#10002; &#10002; &#10002; <a href="https://bltlly.com/2v6Jne">https://bltlly.com/2v6Jne</a></b></p><br /><br />
7
- <h3>Características de FTS 2020</h3>
8
- <p>FTS 2020 tiene muchas características que lo convierten en uno de los mejores juegos de fútbol para dispositivos Android. Estos son algunos de ellos:</p>
9
- <h4>- Gráficos realistas y animaciones</h4>
10
- <p>FTS 2020 tiene gráficos impresionantes que hacen que el juego parezca un partido de fútbol de la vida real. Los jugadores, estadios, multitudes, kits y bolas están diseñados con detalles y texturas de alta calidad. Las animaciones también son suaves y realistas, mostrando los movimientos y expresiones de los jugadores. También puedes ajustar la configuración de gráficos según el rendimiento de tu dispositivo. </p>
11
- <h4>- Juego suave y sensible</h4>
12
-
13
- <h4>- Equipos y jugadores personalizables</h4>
14
- <p>FTS 2020 le permite crear su propio equipo desde cero o elegir entre más de 500 equipos de diferentes ligas y países. También puedes editar los nombres, apariciones, habilidades, posiciones y números de tus jugadores. También puede transferir jugadores entre equipos o comprar nuevos jugadores del mercado. También puede diseñar sus propios kits, logotipos y estadios para su equipo. </p>
15
- <h4>- Varios modos de juego y torneos</h4>
16
- <p>FTS 2020 tiene diferentes modos de juego que se adaptan a su estado de ánimo y estilo. Puedes jugar un partido rápido contra un oponente al azar o un amigo, o jugar un modo de carrera donde puedes administrar tu equipo y progresar a través de diferentes temporadas y competiciones. También puede participar en varios torneos, como la Copa del Mundo, la Liga de Campeones, la Europa League, la Copa América y más. También puedes crear tus propios torneos personalizados con tus propias reglas y equipos. </p>
17
- <h4>- Opciones multijugador offline y online</h4>
18
- <p>FTS 2020 se puede jugar fuera de línea o en línea, dependiendo de su preferencia. Puedes jugar sin conexión a Internet y disfrutar del juego sin anuncios ni interrupciones. También puedes jugar online con otros jugadores de todo el mundo y mostrar tus habilidades y clasificaciones. También puede unirse o crear sus propios clubes, y chatear con otros jugadores. </p>
19
- <h2>¿Por qué descargar FTS 2020 APK? </h2>
20
- <p>FTS 2020 no está disponible en la Google Play Store, pero todavía se puede descargar como un archivo APK de su sitio web oficial o de otras fuentes. Hay muchos beneficios de descargar FTS 2020 APK, tales como:</p>
21
- <h3>Beneficios de la descarga FTS 2020 APK</h3>
22
- <p>Aquí están algunos de los beneficios de descargar FTS 2020 APK:</p>
23
- <p></p>
24
- <h4>- Gratis y fácil de instalar</h4>
25
-
26
- <h4>- No hay necesidad de acceso root o archivos adicionales</h4>
27
- <p>FTS 2020 no requiere ningún acceso root o archivos adicionales para ejecutarse en su dispositivo. No es necesario modificar la configuración de su dispositivo o descargar cualquier dato adicional o archivos obb. Solo tienes que descargar el archivo APK e instalarlo, y ya está bien para ir. </p>
28
- <h4>- Compatible con la mayoría de los dispositivos Android</h4>
29
- <p>FTS 2020 es compatible con la mayoría de los dispositivos Android que tienen al menos 1 GB de RAM y Android 4.1 o superior. No necesita preocuparse por las especificaciones de su dispositivo o problemas de compatibilidad. FTS 2020 funcionará sin problemas y de manera eficiente en su dispositivo, siempre y cuando tenga suficiente espacio de almacenamiento y duración de la batería. </p>
30
- <h4>- Actualizaciones regulares y correcciones de errores</h4>
31
- <p>FTS 2020 se actualiza regularmente por sus desarrolladores, que siempre están trabajando para mejorar el juego y corregir cualquier error o fallo que pueda ocurrir. Siempre puede obtener la última versión de FTS 2020 descargándola desde su sitio web oficial o desde otras fuentes. También puedes consultar las actualizaciones dentro del juego y descargarlas directamente desde allí. </p>
32
- <h2>Cómo descargar FTS 2020 APK? </h2>
33
- <p>Ahora que sabe lo que es FTS 2020 y por qué debería descargarlo, es posible que se pregunte cómo descargarlo en su dispositivo Android. Bueno, no te preocupes, porque te tenemos cubierto. Aquí hay una guía paso a paso para descargar FTS 2020 APK en su dispositivo Android:</p>
34
- <h3> Guía paso a paso para descargar FTS 2020 APK</h3>
35
- <p>Siga estos pasos para descargar FTS 2020 APK en su dispositivo Android:</p>
36
- <h4>- Visite el sitio web oficial de FTS 2020 o haga clic en el enlace de abajo</h4>
37
- <p>El primer paso es visitar el sitio web oficial de FTS 2020 o hacer clic en el enlace de abajo, que le llevará a la página de descarga de FTS 2020 APK. Allí verás un botón de descarga que te permitirá descargar el archivo APK. </p>
38
- <h4>- Toque en el botón de descarga y esperar a que el archivo APK para ser descargado</h4>
39
-
40
- <h4>- Ir a la configuración del dispositivo y activar la opción "Fuentes desconocidas" </h4>
41
- <p>El tercer paso es ir a la configuración del dispositivo y habilitar la opción "Fuentes desconocidas", que le permitirá instalar aplicaciones de fuentes distintas de la Google Play Store. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. Esto le permitirá instalar FTS 2020 APK en su dispositivo. </p>
42
- <h4>- Localizar el archivo APK descargado en su administrador de archivos y toque en él para instalarlo</h4>
43
- <p>El cuarto paso es localizar el archivo APK descargado en el administrador de archivos y toque en él para instalarlo. El proceso de instalación tomará unos segundos, y verá un mensaje de confirmación cuando se haga. </p>
44
- <h4>- Lanzar el juego y disfrutar de jugar FTS 2020 en su dispositivo Android</h4>
45
- <p>El paso final es lanzar el juego y disfrutar jugando FTS 2020 en su dispositivo Android. Puedes encontrar el icono del juego en la pantalla de inicio o en el cajón de la aplicación. Toque en él para abrir el juego, y siga las instrucciones para configurar su perfil y preferencias. También puedes conectar tu juego a tu cuenta de Facebook o Google Play Games para guardar tu progreso y logros. ¡Ahora puedes empezar a jugar FTS 2020 y divertirte! </p>
46
- <h2>Conclusión</h2>
47
- <p>FTS 2020 es uno de los mejores juegos de fútbol para dispositivos Android, con gráficos realistas, jugabilidad fluida, equipos personalizables, varios modos de juego y opciones multijugador en línea. Puede descargar FTS 2020 APK desde su sitio web oficial o de otras fuentes, e instalarlo en su dispositivo sin ningún tipo de molestia. FTS 2020 le dará horas de entretenimiento y emoción, ya que juega al fútbol como nunca antes. Descargar FTS 2020 APK hoy y disfrutar del mejor juego de fútbol en su dispositivo Android! </p>
48
- <h2>Preguntas frecuentes</h2>
49
- <p>Aquí hay algunas preguntas frecuentes sobre FTS 2020 APK:</p>
50
- <h4>- ¿Es seguro descargar FTS 2020 APK? </h4>
51
-
52
- <h4>- ¿Es FTS 2020 APK legal para descargar? </h4>
53
- <p>Sí, FTS 2020 APK es legal para descargar, ya que no es una versión pirata o agrietada del juego. Es una versión original del juego que se distribuye por sus desarrolladores de forma gratuita. Usted no tiene que preocuparse por cualquier problema legal o sanciones, como FTS 2020 APK no viola ninguna ley o reglamento. </p>
54
- <h4>- ¿Cuánto espacio de almacenamiento requiere FTS 2020 APK? </h4>
55
- <p>FTS 2020 APK requiere alrededor de 300 MB de espacio de almacenamiento en su dispositivo, que no es mucho en comparación con otros juegos de calidad y contenido similares. También puede mover el juego a su tarjeta SD si desea ahorrar espacio de almacenamiento interno. </p>
56
- <h4>- ¿Cómo puedo actualizar FTS 2020 APK? </h4>
57
- <p>Puede actualizar FTS 2020 APK mediante la descarga de la última versión del juego desde su sitio web oficial o de otras fuentes, y la instalación sobre la versión existente. No necesita desinstalar la versión anterior o perder sus datos, ya que la actualización sobrescribirá los archivos antiguos y mantendrá su progreso y configuración intactos. </p>
58
- <h4>- ¿Cómo puedo contactar a los desarrolladores de FTS 2020 APK? </h4>
59
- <p>Puede ponerse en contacto con los desarrolladores de FTS 2020 APK visitando su sitio web oficial o sus páginas de redes sociales, donde se puede encontrar su información de contacto y formularios de comentarios. También puedes enviarles un correo electrónico o un mensaje, y te responderán lo antes posible. </p> 64aa2da5cf<br />
60
- <br />
61
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/datetime.py DELETED
@@ -1,11 +0,0 @@
1
- """For when pip wants to check the date or time.
2
- """
3
-
4
- import datetime
5
-
6
-
7
- def today_is_later_than(year: int, month: int, day: int) -> bool:
8
- today = datetime.date.today()
9
- given = datetime.date(year, month, day)
10
-
11
- return today > given
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boadiwaa/Recipes/openai/api_resources/engine.py DELETED
@@ -1,42 +0,0 @@
1
- import time
2
- import warnings
3
-
4
- from openai import util
5
- from openai.api_resources.abstract import ListableAPIResource, UpdateableAPIResource
6
- from openai.error import InvalidAPIType, TryAgain
7
- from openai.util import ApiType
8
-
9
-
10
- class Engine(ListableAPIResource, UpdateableAPIResource):
11
- OBJECT_NAME = "engines"
12
-
13
- def generate(self, timeout=None, **params):
14
- start = time.time()
15
- while True:
16
- try:
17
- return self.request(
18
- "post",
19
- self.instance_url() + "/generate",
20
- params,
21
- stream=params.get("stream"),
22
- plain_old_data=True,
23
- )
24
- except TryAgain as e:
25
- if timeout is not None and time.time() > start + timeout:
26
- raise
27
-
28
- util.log_info("Waiting for model to warm up", error=e)
29
-
30
- def search(self, **params):
31
- if self.typed_api_type == ApiType.AZURE:
32
- return self.request("post", self.instance_url("search"), params)
33
- elif self.typed_api_type == ApiType.OPEN_AI:
34
- return self.request("post", self.instance_url() + "/search", params)
35
- else:
36
- raise InvalidAPIType("Unsupported API type %s" % self.api_type)
37
-
38
- def embeddings(self, **params):
39
- warnings.warn(
40
- "Engine.embeddings is deprecated, use Embedding.create", DeprecationWarning
41
- )
42
- return self.request("post", self.instance_url() + "/embeddings", params)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BraydenMoore/a-random-unsecured-camera/main.py DELETED
@@ -1,163 +0,0 @@
1
- from flask import Flask, Response, render_template, send_file, stream_with_context, request, session, redirect, url_for
2
- import requests
3
- import random
4
- import pickle as pkl
5
- import pycountry
6
- import datetime as dt
7
- import pytz
8
- from io import BytesIO
9
- import logging
10
- import os
11
- import time
12
-
13
- app = Flask(__name__)
14
- app.secret_key = 'green-flounder'
15
-
16
- with open('video_urls.pkl', 'rb') as f:
17
- live_urls = pkl.load(f)
18
- live_urls = [i for i in live_urls if i!= 'http://2.40.36.158:8084/img/video.mjpeg']
19
- live_urls[4161] = live_urls[1163]
20
-
21
- with open('owner_dict.pkl', 'rb') as f:
22
- owner_dict = pkl.load(f)
23
-
24
- from urllib.parse import urlsplit, urlunsplit, quote, parse_qsl, urlencode
25
-
26
- def encode_url(url):
27
- scheme, netloc, path, query_string, fragment = urlsplit(url)
28
- query_params = parse_qsl(query_string)
29
- encoded_query_params = [(key, quote(value)) for key, value in query_params]
30
- encoded_query_string = urlencode(encoded_query_params)
31
- finished = urlunsplit((scheme, netloc, path, encoded_query_string, fragment))
32
- return finished
33
-
34
- from geolite2 import geolite2
35
- def get_location(ip):
36
- start_time = time.time()
37
- reader = geolite2.reader()
38
- location = reader.get(ip)
39
- geolite2.close()
40
- end_time = time.time()
41
-
42
- elapsed_time = end_time - start_time
43
- print(f"\nTime taken for get_location: {elapsed_time} seconds\n")
44
-
45
- if location:
46
- return {'country': location['country']['names']['en'] if 'country' in location else 'unknown country',
47
- 'city': location['city']['names']['en'] if 'city' in location else 'unknown city',
48
- 'region': location['subdivisions'][0]['names']['en'] if 'subdivisions' in location else 'unknown region',
49
- 'loc': str(location['location']['latitude']) + ',' + str(location['location']['longitude']) if 'location' in location else '0,0',
50
- 'timezone': location['location']['time_zone'] if 'location' in location and 'time_zone' in location['location'] else 'America/New_York'}
51
- else:
52
- return {'country': 'unknown country',
53
- 'city': 'unknown city',
54
- 'region': 'unknown region',
55
- 'loc': str(0) + ',' + str(0),
56
- 'timezone':'America/New_York'}
57
-
58
-
59
- def latlon_to_pixel(loc):
60
- latitude = float(loc.split(',')[0])
61
- longitude = float(loc.split(',')[1])
62
-
63
- y = ((90-latitude)/180)
64
- x = ((longitude+180)/360)
65
- return x*100, y*100
66
-
67
- from urllib.parse import urlparse, parse_qs
68
-
69
- @app.route('/proxy/<path:url>')
70
- def proxy(url):
71
- start_time = time.time()
72
-
73
- full_url = url
74
- query_string = request.query_string.decode("utf-8")
75
- if query_string:
76
- full_url += "?" + query_string
77
-
78
- headers = {
79
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
80
- 'Accept-Encoding': 'gzip, deflate',
81
- 'Accept-Language': 'en-US,en;q=0.9',
82
- 'Cache-Control': 'max-age=0',
83
- 'Connection': 'keep-alive',
84
- 'Dnt': '1',
85
- 'Upgrade-Insecure-Requests': '1',
86
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
87
- }
88
-
89
- clean_url = full_url.replace('proxy/', '')
90
- clean_url = encode_url(clean_url)
91
-
92
- try:
93
- req = requests.get(clean_url, headers=headers, stream=True, timeout=1)
94
-
95
- end_time = time.time()
96
- elapsed_time = end_time - start_time
97
- print(f"\n{clean_url}\nTime taken for proxy: {elapsed_time} seconds\n")
98
-
99
- return Response(req.iter_content(chunk_size=1024), content_type=req.headers['content-type'])
100
-
101
- except Exception as e:
102
- print(e)
103
- return Response("Error", status=500)
104
-
105
-
106
- @app.route('/')
107
- def index():
108
- id = request.args.get('id')
109
- if 'current_feed' in session and request.args.get('new', 'false') == 'false':
110
- feed = session['current_feed']
111
- url = live_urls[int(feed)]
112
- else:
113
- feed = random.randint(0, len(live_urls) - 1)
114
- url = live_urls[int(feed)]
115
- session['current_feed'] = feed
116
-
117
- if id:
118
- url = live_urls[int(id)]
119
- feed = id
120
- session['current_feed'] = id
121
-
122
- url = encode_url(url)
123
- url = url.replace('640x480','1280x960').replace('COUNTER','')
124
-
125
- id = feed
126
- ip = ''.join(url.split('//')[-1]).split(':')[0]
127
- info = get_location(ip)
128
- country = info['country'].lower()
129
- name = (info['city'] + ", " + info['region']).lower()
130
- page_title = (info['city'] + ", " + info['region'] + ", " + country).lower()
131
- timezone = pytz.timezone(info['timezone'])
132
- time = dt.datetime.now(timezone)
133
- time = time.strftime("%I:%M:%S %p")
134
- loc = info['loc']
135
- X, Y = latlon_to_pixel(info['loc'])
136
- proxy_url = 'proxy/' + url
137
- logging.info(f"Generated proxy URL: {proxy_url}")
138
- loc_link = f"https://www.google.com/maps/search/{loc}"
139
- ip_link = url
140
- try:
141
- owner = owner_dict[ip]
142
- except:
143
- owner = 'unknown'
144
- return render_template('index.html',
145
- name=name,
146
- url=encode_url(proxy_url),
147
- info=info,
148
- country=country,
149
- time=time,
150
- timezone=timezone,
151
- ip=ip,
152
- ip_link=ip_link,
153
- loc=loc,
154
- loc_link=loc_link,
155
- owner=owner,
156
- X=X,
157
- Y=Y,
158
- id=id,
159
- page_title=page_title)
160
-
161
-
162
- if __name__ == '__main__':
163
- app.run(host='0.0.0.0', port='7860')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CMU-80100/80-100-Pre-Writing-Chatbot-Section-H/README.md DELETED
@@ -1,7 +0,0 @@
1
- ---
2
- title: 80-100-Pre-Writing-Chatbot-Section-H
3
- app_file: hf_streaming_chatbot.py
4
- sdk: gradio
5
- sdk_version: 3.40.1
6
- duplicated_from: CMU-80100/80-100-Pre-Writing-Chatbot-Section-C
7
- ---
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/proposal_generator/rrpn_outputs.py DELETED
@@ -1,244 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import itertools
3
- import logging
4
- import torch
5
-
6
- from detectron2.layers import batched_nms_rotated, cat
7
- from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated
8
-
9
- from .rpn_outputs import RPNOutputs
10
-
11
- logger = logging.getLogger(__name__)
12
-
13
- """
14
- Shape shorthand in this module:
15
-
16
- N: number of images in the minibatch
17
- L: number of feature maps per image on which RRPN is run
18
- A: number of cell anchors (must be the same for all feature maps)
19
- Hi, Wi: height and width of the i-th feature map
20
- 5: size of the box parameterization
21
-
22
- Naming convention:
23
-
24
- objectness: refers to the binary classification of an anchor as object vs. not
25
- object.
26
-
27
- deltas: refers to the 5-d (dx, dy, dw, dh, da) deltas that parameterize the rotated box2box
28
- transform (see :class:`box_regression.Box2BoxTransformRotated`).
29
-
30
- pred_objectness_logits: predicted objectness scores in [-inf, +inf]; use
31
- sigmoid(pred_objectness_logits) to estimate P(object).
32
-
33
- gt_objectness_logits: ground-truth binary classification labels for objectness
34
-
35
- pred_anchor_deltas: predicted rotated box2box transform deltas
36
-
37
- gt_anchor_deltas: ground-truth rotated box2box transform deltas
38
- """
39
-
40
-
41
- def find_top_rrpn_proposals(
42
- proposals,
43
- pred_objectness_logits,
44
- images,
45
- nms_thresh,
46
- pre_nms_topk,
47
- post_nms_topk,
48
- min_box_side_len,
49
- training,
50
- ):
51
- """
52
- For each feature map, select the `pre_nms_topk` highest scoring proposals,
53
- apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk`
54
- highest scoring proposals among all the feature maps if `training` is True,
55
- otherwise, returns the highest `post_nms_topk` scoring proposals for each
56
- feature map.
57
-
58
- Args:
59
- proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 5).
60
- All proposal predictions on the feature maps.
61
- pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A).
62
- images (ImageList): Input images as an :class:`ImageList`.
63
- nms_thresh (float): IoU threshold to use for NMS
64
- pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS.
65
- When RRPN is run on multiple feature maps (as in FPN) this number is per
66
- feature map.
67
- post_nms_topk (int): number of top k scoring proposals to keep after applying NMS.
68
- When RRPN is run on multiple feature maps (as in FPN) this number is total,
69
- over all feature maps.
70
- min_box_side_len (float): minimum proposal box side length in pixels (absolute units
71
- wrt input images).
72
- training (bool): True if proposals are to be used in training, otherwise False.
73
- This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..."
74
- comment.
75
-
76
- Returns:
77
- proposals (list[Instances]): list of N Instances. The i-th Instances
78
- stores post_nms_topk object proposals for image i.
79
- """
80
- image_sizes = images.image_sizes # in (h, w) order
81
- num_images = len(image_sizes)
82
- device = proposals[0].device
83
-
84
- # 1. Select top-k anchor for every level and every image
85
- topk_scores = [] # #lvl Tensor, each of shape N x topk
86
- topk_proposals = []
87
- level_ids = [] # #lvl Tensor, each of shape (topk,)
88
- batch_idx = torch.arange(num_images, device=device)
89
- for level_id, proposals_i, logits_i in zip(
90
- itertools.count(), proposals, pred_objectness_logits
91
- ):
92
- Hi_Wi_A = logits_i.shape[1]
93
- num_proposals_i = min(pre_nms_topk, Hi_Wi_A)
94
-
95
- # sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812)
96
- # topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1)
97
- logits_i, idx = logits_i.sort(descending=True, dim=1)
98
- topk_scores_i = logits_i[batch_idx, :num_proposals_i]
99
- topk_idx = idx[batch_idx, :num_proposals_i]
100
-
101
- # each is N x topk
102
- topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 5
103
-
104
- topk_proposals.append(topk_proposals_i)
105
- topk_scores.append(topk_scores_i)
106
- level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device))
107
-
108
- # 2. Concat all levels together
109
- topk_scores = cat(topk_scores, dim=1)
110
- topk_proposals = cat(topk_proposals, dim=1)
111
- level_ids = cat(level_ids, dim=0)
112
-
113
- # 3. For each image, run a per-level NMS, and choose topk results.
114
- results = []
115
- for n, image_size in enumerate(image_sizes):
116
- boxes = RotatedBoxes(topk_proposals[n])
117
- scores_per_img = topk_scores[n]
118
- valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img)
119
- if not valid_mask.all():
120
- boxes = boxes[valid_mask]
121
- scores_per_img = scores_per_img[valid_mask]
122
- boxes.clip(image_size)
123
-
124
- # filter empty boxes
125
- keep = boxes.nonempty(threshold=min_box_side_len)
126
- lvl = level_ids
127
- if keep.sum().item() != len(boxes):
128
- boxes, scores_per_img, lvl = (boxes[keep], scores_per_img[keep], level_ids[keep])
129
-
130
- keep = batched_nms_rotated(boxes.tensor, scores_per_img, lvl, nms_thresh)
131
- # In Detectron1, there was different behavior during training vs. testing.
132
- # (https://github.com/facebookresearch/Detectron/issues/459)
133
- # During training, topk is over the proposals from *all* images in the training batch.
134
- # During testing, it is over the proposals for each image separately.
135
- # As a result, the training behavior becomes batch-dependent,
136
- # and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size.
137
- # This bug is addressed in Detectron2 to make the behavior independent of batch size.
138
- keep = keep[:post_nms_topk]
139
-
140
- res = Instances(image_size)
141
- res.proposal_boxes = boxes[keep]
142
- res.objectness_logits = scores_per_img[keep]
143
- results.append(res)
144
- return results
145
-
146
-
147
- class RRPNOutputs(RPNOutputs):
148
- def __init__(
149
- self,
150
- box2box_transform,
151
- anchor_matcher,
152
- batch_size_per_image,
153
- positive_fraction,
154
- images,
155
- pred_objectness_logits,
156
- pred_anchor_deltas,
157
- anchors,
158
- boundary_threshold=0,
159
- gt_boxes=None,
160
- smooth_l1_beta=0.0,
161
- ):
162
- """
163
- Args:
164
- box2box_transform (Box2BoxTransformRotated): :class:`Box2BoxTransformRotated`
165
- instance for anchor-proposal transformations.
166
- anchor_matcher (Matcher): :class:`Matcher` instance for matching anchors to
167
- ground-truth boxes; used to determine training labels.
168
- batch_size_per_image (int): number of proposals to sample when training
169
- positive_fraction (float): target fraction of sampled proposals that should be positive
170
- images (ImageList): :class:`ImageList` instance representing N input images
171
- pred_objectness_logits (list[Tensor]): A list of L elements.
172
- Element i is a tensor of shape (N, A, Hi, Wi) representing
173
- the predicted objectness logits for anchors.
174
- pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape
175
- (N, A*5, Hi, Wi) representing the predicted "deltas" used to transform anchors
176
- to proposals.
177
- anchors (list[list[RotatedBoxes]]): A list of N elements. Each element is a list of L
178
- RotatedBoxes. The RotatedBoxes at (n, l) stores the entire anchor array for
179
- feature map l in image n (i.e. the cell anchors repeated over all locations in
180
- feature map (n, l)).
181
- boundary_threshold (int): if >= 0, then anchors that extend beyond the image
182
- boundary by more than boundary_thresh are not used in training. Set to a very large
183
- number or < 0 to disable this behavior. Only needed in training.
184
- gt_boxes (list[RotatedBoxes], optional): A list of N elements. Element i a RotatedBoxes
185
- storing the ground-truth ("gt") rotated boxes for image i.
186
- smooth_l1_beta (float): The transition point between L1 and L2 loss in
187
- the smooth L1 loss function. When set to 0, the loss becomes L1. When
188
- set to +inf, the loss becomes constant 0.
189
- """
190
- super(RRPNOutputs, self).__init__(
191
- box2box_transform,
192
- anchor_matcher,
193
- batch_size_per_image,
194
- positive_fraction,
195
- images,
196
- pred_objectness_logits,
197
- pred_anchor_deltas,
198
- anchors,
199
- boundary_threshold,
200
- gt_boxes,
201
- smooth_l1_beta,
202
- )
203
-
204
- def _get_ground_truth(self):
205
- """
206
- Returns:
207
- gt_objectness_logits: list of N tensors. Tensor i is a vector whose length is the
208
- total number of anchors in image i (i.e., len(anchors[i])). Label values are
209
- in {-1, 0, 1}, with meanings: -1 = ignore; 0 = negative class; 1 = positive class.
210
- gt_anchor_deltas: list of N tensors. Tensor i has shape (len(anchors[i]), 5).
211
- """
212
- gt_objectness_logits = []
213
- gt_anchor_deltas = []
214
- # Concatenate anchors from all feature maps into a single RotatedBoxes per image
215
- anchors = [RotatedBoxes.cat(anchors_i) for anchors_i in self.anchors]
216
- for image_size_i, anchors_i, gt_boxes_i in zip(self.image_sizes, anchors, self.gt_boxes):
217
- """
218
- image_size_i: (h, w) for the i-th image
219
- anchors_i: anchors for i-th image
220
- gt_boxes_i: ground-truth boxes for i-th image
221
- """
222
- match_quality_matrix = pairwise_iou_rotated(gt_boxes_i, anchors_i)
223
- matched_idxs, gt_objectness_logits_i = self.anchor_matcher(match_quality_matrix)
224
-
225
- if self.boundary_threshold >= 0:
226
- # Discard anchors that go out of the boundaries of the image
227
- # NOTE: This is legacy functionality that is turned off by default in Detectron2
228
- anchors_inside_image = anchors_i.inside_box(image_size_i, self.boundary_threshold)
229
- gt_objectness_logits_i[~anchors_inside_image] = -1
230
-
231
- if len(gt_boxes_i) == 0:
232
- # These values won't be used anyway since the anchor is labeled as background
233
- gt_anchor_deltas_i = torch.zeros_like(anchors_i.tensor)
234
- else:
235
- # TODO wasted computation for ignored boxes
236
- matched_gt_boxes = gt_boxes_i[matched_idxs]
237
- gt_anchor_deltas_i = self.box2box_transform.get_deltas(
238
- anchors_i.tensor, matched_gt_boxes.tensor
239
- )
240
-
241
- gt_objectness_logits.append(gt_objectness_logits_i)
242
- gt_anchor_deltas.append(gt_anchor_deltas_i)
243
-
244
- return gt_objectness_logits, gt_anchor_deltas