parquet-converter commited on
Commit
7d4442e
·
1 Parent(s): 81f8b3b

Update parquet files (step 40 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cinema 4D R20 Crack Activation Key Free Download A Step-by-Step Tutorial.md +0 -143
  2. spaces/1gistliPinn/ChatGPT4/Examples/Ambeth Ocampos Rizal Without Overcoat Pdf Free PATCHED.md +0 -12
  3. spaces/1gistliPinn/ChatGPT4/Examples/Cbt Nuggets Ipv4 Subnetting Download A Step-by-Step Course on IP Addressing and Subnetting.md +0 -5
  4. spaces/1gistliPinn/ChatGPT4/Examples/EbneBatuta 2 Movie Download Utorrent [PORTABLE].md +0 -6
  5. spaces/1gistliPinn/ChatGPT4/Examples/F1 2013 Spolszczenie.md +0 -90
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK Oyun Tavsiyeleri 2023 Ylnn En Popler Android Oyunlar.md +0 -144
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dragon Ball Fanmade Fighter APK and Enjoy the Best DBZ Action.md +0 -92
  8. spaces/7hao/bingo/src/components/header.tsx +0 -12
  9. spaces/AB-TW/team-ai/agents/tools/smart_domain/association_impl.py +0 -97
  10. spaces/AP123/ai-avatars/README.md +0 -14
  11. spaces/ASJMO/freegpt/g4f/Provider/Providers/Forefront.py +0 -30
  12. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/schedules/.ipynb_checkpoints/custom_schedule-checkpoint.py +0 -40
  13. spaces/Adapter/T2I-Adapter/ldm/modules/attention.py +0 -344
  14. spaces/Adapter/T2I-Adapter/ldm/modules/encoders/__init__.py +0 -0
  15. spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/midas/midas/vit.py +0 -491
  16. spaces/Aditya9790/yolo7-object-tracking/utils/google_utils.py +0 -123
  17. spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/order/sequential.py +0 -28
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridbuttons/RemoveChildMethods.js +0 -50
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/ResolveHeight.js +0 -24
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/label/methods/ResetDisplayContent.js +0 -53
  21. spaces/Ajay-user/Optical-Character-Recognition/README.md +0 -12
  22. spaces/Ajaymekala/gradiolangchainChatBotOpenAI-1/README.md +0 -12
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/inference/image_to_image.py +0 -9
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/controlnet/multicontrolnet.py +0 -185
  25. spaces/Andy1621/uniformer_image_detection/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py +0 -7
  26. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/global_context_head.py +0 -102
  27. spaces/Andy1621/uniformer_image_segmentation/configs/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes.py +0 -2
  28. spaces/AnimaLab/bias-test-gpt-pairs/error_messages.py +0 -9
  29. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/pipelines/loading.py +0 -153
  30. spaces/ArtGAN/Diffusion-API/diffusion_webui/utils/scheduler_list.py +0 -39
  31. spaces/Artrajz/vits-simple-api/vits/modules.py +0 -387
  32. spaces/Aymene/FakeNewsDetector/app.py +0 -59
  33. spaces/Banbri/zcvzcv/src/lib/fonts.ts +0 -119
  34. spaces/Banbri/zcvzcv/src/lib/replaceTextInSpeechBubbles.ts +0 -98
  35. spaces/Benson/text-generation/Examples/Angry Birds Star Wars 2 Fotos.md +0 -51
  36. spaces/Benson/text-generation/Examples/Descarga Fuente Kruti Dev 21.md +0 -71
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/utils.py +0 -0
  38. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/distributions/__init__.py +0 -21
  39. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/testing.py +0 -331
  40. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/stop.py +0 -103
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_resources/_common.py +0 -104
  42. spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/ssl_.py +0 -495
  43. spaces/BreadBytes1/CC-Dashboard/README.md +0 -13
  44. spaces/CVPR/GFPGAN-example/gfpgan/archs/stylegan2_clean_arch.py +0 -368
  45. spaces/CVPR/LIVE/thrust/thrust/iterator/detail/join_iterator.h +0 -134
  46. spaces/CVPR/transfiner/demo/predictor.py +0 -220
  47. spaces/Cat125/text-generator-v2/train.py +0 -67
  48. spaces/CorvaeOboro/gen_ability_icon/torch_utils/ops/conv2d_resample.py +0 -156
  49. spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/models/diffusion/dpm_solver/dpm_solver.py +0 -1154
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/freetypePen.py +0 -458
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cinema 4D R20 Crack Activation Key Free Download A Step-by-Step Tutorial.md DELETED
@@ -1,143 +0,0 @@
1
-
2
- <h1>Pronest 2012 Full License Crack 41: How to Download and Install the Best Nesting Software</h1>
3
- <p>If you are looking for a powerful and efficient solution for your cutting and fabrication needs, you might have heard of Pronest 2012. This is one of the industry-leading CAD/CAM software for programming and optimizing cutting machines. But what if you don't have a license to use it? Don't worry, in this article, we will show you how to download and install Pronest 2012 Full License Crack 41, which will give you access to all the features and benefits of this amazing software. Read on to find out more!</p>
4
- <h2>Introduction</h2>
5
- <h3>What is Pronest 2012?</h3>
6
- <p>Pronest 2012 is a nesting software that provides mechanized cutting users with an intuitive and efficient solution for advanced multi-process profiling operations. It integrates seamlessly with your business and is fully configurable to meet your specific needs. It also offers high-quality technical support and software updates.</p>
7
- <h2>Pronest 2012 Full License Crack 41</h2><br /><p><b><b>DOWNLOAD</b> &mdash;&mdash;&mdash;&mdash;&mdash; <a href="https://byltly.com/2uKxBm">https://byltly.com/2uKxBm</a></b></p><br /><br />
8
- <p>Pronest 2012 can handle various cutting processes, such as plasma, laser, waterjet, oxyfuel, punch/plasma, punch/laser, drill, plate and rotary tube. It also supports Hypertherm's Integrated Cutting Solutions, such as True Hole and Rapid Part technologies.</p>
9
- <h3>Why do you need Pronest 2012 Full License Crack 41?</h3>
10
- <p>Pronest 2012 is not a free software. You need a license to use it legally and enjoy its full functionality. However, a license can be expensive and hard to obtain for some users. That's why some people look for alternative ways to get Pronest 2012 without paying for it.</p>
11
- <p>One of these ways is using a crack. A crack is a program that modifies or bypasses the security features of a software to make it work without a license. Pronest 2012 Full License Crack 41 is one of the most popular cracks for Pronest 2012. It allows you to use Pronest 2012 without any limitations or restrictions.</p>
12
- <p>By using Pronest 2012 Full License Crack 41, you can save money and time. You don't have to pay for a license or wait for it to be delivered. You can download and install Pronest 2012 Full License Crack 41 in minutes and start using it right away.</p>
13
- <h3>How to download Pronest 2012 Full License Crack 41?</h3>
14
- <p>Downloading Pronest 2012 Full License Crack 41 is easy and fast. You just need to follow these simple steps:</p>
15
- <ol>
16
- <li>Go to <a href="https://getintopc.com/softwares/3d-cad/pronest-2012-v10-free-download-1064054/">this link</a> and click on the green button that says "Download Now".</li>
17
- <li>Wait for the download to finish and then extract the zip file using WinRAR or any other extraction tool.</li>
18
- <li>Open the extracted folder and run the setup file named "ProNest_2012_v10.0.0.exe".</li>
19
- <li>Follow the installation wizard instructions and accept the terms and conditions.</li>
20
- <li>When the installation is complete, do not run the software yet.</li>
21
- <li>Copy the file named "ProNest.exe" from the crack folder and paste it into the installation directory (usually C:\Program Files\ProNest).</li>
22
- <li>Replace the original file when prompted.</li>
23
- <li>Congratulations! You have successfully installed Pronest 2012 Full License Crack 41.</li>
24
- </ol>
25
- <h2>Features of Pronest 2012</h2>
26
- <h3>Built-in process expertise</h3>
27
- <p>Pronest 2012 contains the industry's most advanced process expertise. It has incorporated decades of proven cutting experience to deliver nesting software with the most powerful process settings and parameters. These settings and parameters are applied to your specific cut process and machine from day one, ensuring optimal results.</p>
28
- <h3>Ease of use</h3>
29
- <p>Pronest 2012 is designed to be incredibly easy to learn and use. It has a straightforward user interface, thoughtful screen layout, and intuitive navigation. It also has a comprehensive help system that provides detailed information on every feature and function.</p>
30
- <h3>Support for multiple cutting processes</h3>
31
- <p>Pronest 2012 can handle various cutting processes, such as plasma, laser, waterjet, oxyfuel, punch/plasma, punch/laser, drill, plate and rotary tube. It can also support multiple machines, materials, thicknesses, and gases within a single nest.</p>
32
- <p>Pronest 2012 cracked version download<br />
33
- How to get Pronest 2012 full license for free<br />
34
- Pronest 2012 license key generator online<br />
35
- Pronest 2012 crack patch serial number<br />
36
- Pronest 2012 full software with activation code<br />
37
- Download Pronest 2012 torrent file with crack<br />
38
- Pronest 2012 crack only no survey no password<br />
39
- Pronest 2012 full setup installer with crack<br />
40
- Pronest 2012 latest version crack download link<br />
41
- Pronest 2012 crack fix working 100%<br />
42
- Pronest 2012 license code crack registration key<br />
43
- Pronest 2012 crack rar zip file password<br />
44
- Pronest 2012 full crack direct download free<br />
45
- Pronest 2012 crack instructions step by step guide<br />
46
- Pronest 2012 crack update new features<br />
47
- Pronest 2012 full license crack offline activation<br />
48
- Pronest 2012 crack review testimonials feedback<br />
49
- Pronest 2012 full license crack system requirements<br />
50
- Pronest 2012 full license crack support contact<br />
51
- Pronest 2012 full license crack warranty guarantee<br />
52
- Pronest 2012 full license crack comparison alternatives<br />
53
- Pronest 2012 full license crack benefits advantages<br />
54
- Pronest 2012 full license crack disadvantages drawbacks<br />
55
- Pronest 2012 full license crack tips tricks hacks<br />
56
- Pronest 2012 full license crack tutorial video demo<br />
57
- Pronest 2012 full license crack FAQ frequently asked questions<br />
58
- Pronest 2012 full license crack error solution fix<br />
59
- Pronest 2012 full license crack best practices recommendations<br />
60
- Pronest 2012 full license crack case study examples success stories<br />
61
- Pronest 2012 full license crack coupon code discount offer deal<br />
62
- Pronest 2012 full license crack affiliate program commission earn money<br />
63
- Pronest 2012 full license crack blog post article content writing<br />
64
- Pronest 2012 full license crack forum thread discussion comment<br />
65
- Pronest 2012 full license crack social media share like follow subscribe<br />
66
- Pronest 2012 full license crack email newsletter sign up opt in<br />
67
- Pronest 2012 full license crack landing page squeeze page sales page<br />
68
- Pronest 2012 full license crack webinar live event online training course<br />
69
- Pronest 2012 full license crack ebook pdf report guide download free<br />
70
- Pronest 2012 full license crack infographic image graphic design visual<br />
71
- Pronest 2012 full license crack podcast audio interview listen online free<br />
72
- Pronest 2012 full license crack youtube video watch online free<br />
73
- Pronest 2012 full license crack slideshare presentation slide deck view online free<br />
74
- Pronest 2012 full license crack amazon product review rating feedback buy online cheap<br />
75
- Pronest 2012 full license crack ebay product listing description sell online fast easy</p>
76
- <h3>OneClick production module</h3>
77
- <p>Pronest 2012 has a powerful production module that automates various tasks in a job. With OneClick, you can create nests, generate NC code, print reports, export data, send email notifications, and more with just one click.</p>
78
- <h3>Data Sync module</h3>
79
- <p>Pronest 2012 has a new module that allows you to add and update plates from your MRP/ERP system to ProNest's Plate Inventory. With Data Sync, you can ensure that your inventory data is always accurate and up-to-date.</p>
80
- <h3>True Bevel technology</h3>
81
- <p>Pronest 2012 has enhanced True Bevel technology that provides optimal bevel cut quality for plasma machines. True Bevel automatically calculates all bevel parameters based on material type, thickness, grade, angle, machine type, consumables type, etc.</p>
82
- <h2>How to install Pronest 2012 Full License Crack 41?</h2>
83
- <h3>System requirements</h3>
84
- <p>Before you install Pronest 2012 Full License Crack 41, make sure your PC meets these minimum system requirements:</p>
85
- <ul>
86
- <li>Operating System: Windows XP/Vista/7/8/10</li>
87
- <li>Memory (RAM): 4 GB of RAM required.</li>
88
- <li>Hard Disk Space: 4 GB of free space required.</li>
89
- <li>Processor: Intel Pentium IV or later.</li>
90
- </ul>
91
- <h3>Installation steps</h3>
92
- <p>To install Pronest 2012 Full License Crack 41 on your PC, follow these steps:</p>
93
- <ol>
94
- <li>Download Pronest 2012 Full License Crack 41 from <a href="https://getintopc.com/softwares/3d-cad/pronest-2012-v10-free-download-1064054/">this link</a>.</li>
95
- <li>Extract the zip file using WinRAR or any other extraction tool.</li>
96
- <li>Open the extracted folder and run the setup file named "ProNest_2012_v10.0.0.exe".</li>
97
- <li>Follow the installation wizard instructions and accept the terms and conditions.</li>
98
- <li>When the installation is complete, do not run the software yet.</li>
99
- <li>Copy the file named "ProNest.exe" from the crack folder and paste it into the installation directory (usually C:\Program Files\ProNest).</li>
100
- <li>Replace the original file when prompted.</li>
101
- <li>Congratulations! You have successfully installed Pronest 2012 Full License Crack 41.</li>
102
- </ol>
103
- <h3>Troubleshooting tips</h3>
104
- <p>If you encounter any problems while installing or using Pronest 2012 Full License Crack 41, you can try these troubleshooting tips:</p>
105
- <ul>
106
- <li>Make sure your PC meets the minimum system requirements for running Pronest 2012.</li>
107
- <li>Disable your antivirus or firewall before installing or running Pronest 2012 Full License Crack 41, as they may interfere with its functionality.</li>
108
- ```html have any problems with the crack, you can try to download it from another source or use a different version.</li>
109
- <li>If you have any questions or issues with Pronest 2012, you can refer to the manual or contact the technical support team .</li>
110
- </ul>
111
- <h2>Conclusion</h2>
112
- <h3>Summary of the main points</h3>
113
- <p>In this article, we have shown you how to download and install Pronest 2012 Full License Crack 41, which is a crack that allows you to use Pronest 2012 without a license. We have also explained what Pronest 2012 is and why it is a great nesting software for your cutting and fabrication needs. We have highlighted some of its features, such as built-in process expertise, ease of use, support for multiple cutting processes, OneClick production module, Data Sync module, and True Bevel technology. We have also given you some installation steps and troubleshooting tips.</p>
114
- <h3>Call to action</h3>
115
- <p>If you are ready to take your cutting operation to the next level, don't hesitate to download Pronest 2012 Full License Crack 41 today. You will be amazed by how much time and money you can save with this software. You will also be able to produce high-quality parts with less waste and errors. To download Pronest 2012 Full License Crack 41, just follow the link below and enjoy!</p>
116
- <p><a href="https://getintopc.com/softwares/3d-cad/pronest-2012-v10-free-download-1064054/">Download Pronest 2012 Full License Crack 41 here</a></p>
117
- <h3>FAQs</h3>
118
- <p>Here are some frequently asked questions about Pronest 2012 Full License Crack 41:</p>
119
- <ul>
120
- <li><b>What is a nesting software?</b></li>
121
- <p>A nesting software is a software that optimizes the arrangement of parts on a sheet of material to minimize waste and maximize efficiency. It also generates the cutting paths and codes for the cutting machines.</p>
122
- <li><b>What is a crack?</b></li>
123
- <p>A crack is a program that modifies or bypasses the security features of a software to make it work without a license.</p>
124
- <li><b>Is using a crack legal?</b></li>
125
- <p>No, using a crack is not legal. It violates the intellectual property rights of the software developers and distributors. It may also expose your computer to viruses or malware.</p>
126
- <li><b>What are the advantages of using Pronest 2012 Full License Crack 41?</b></li>
127
- <p>The advantages of using Pronest 2012 Full License Crack 41 are:</p>
128
- <ul>
129
- <li>You can use Pronest 2012 without paying for a license.</li>
130
- <li>You can access all the features and benefits of Pronest 2012.</li>
131
- <li>You can download and install Pronest 2012 Full License Crack 41 easily and quickly.</li>
132
- </ul>
133
- <li><b>What are the disadvantages of using Pronest 2012 Full License Crack 41?</b></li>
134
- <p>The disadvantages of using Pronest 2012 Full License Crack 41 are:</p>
135
- <ul>
136
- <li>You may face legal consequences for using a cracked software.</li>
137
- <li>You may not receive technical support or software updates from the official source.</li>
138
- <li>You may encounter problems or errors with the crack or the software.</li>
139
- </ul>
140
- </ul>
141
- </p> 0a6ba089eb<br />
142
- <br />
143
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Ambeth Ocampos Rizal Without Overcoat Pdf Free PATCHED.md DELETED
@@ -1,12 +0,0 @@
1
- <h2>ambeth ocampo's rizal without overcoat pdf free</h2><br /><p><b><b>Download Zip</b> &#8250;&#8250;&#8250;&#8250;&#8250; <a href="https://imgfil.com/2uxZEh">https://imgfil.com/2uxZEh</a></b></p><br /><br />
2
-
3
-  [@ade:Rizal], it was argued that Rizal was a hero because he lived an exemplary life according to the ethics of his time. In this paper, we argue that Rizal should not be viewed as a hero because his actions may have been done out of a kind of heroism-of-daring, courage-of-enduring, compassion-of-feeling, and the like.
4
-
5
- We will argue here that the bravery of Rizal’s action is better categorized as boldness-of-enduring. Rizal, in his last few days, did not show any sign of being timid. At the beginning of his revolt, on September 21, 1896, he was surrounded by a hundred troops. He chose to show himself in a defiant attitude and refuse to surrender. Rizal’s protest, no matter how futile it may seem today, is important because it was an act of courage-of-enduring. This is the case even though Rizal was almost captured by the Spanish authorities that day.
6
-
7
- It is also an act of boldness-of-enduring, for even if he managed to escape, Rizal’s life was in danger from the Spanish authorities. [@adm:mga] After the suppression of the Philippine Revolution of 1896, Rizal was exiled to Dapitan, and two years later, he was murdered. In the two-year period that Rizal spent in exile, he composed at least two of his most famous poems, “Mi Último Adiós” and “Mi Ultimo Adios.” These poems express the hope of his life that he had to endure before his death. [@adm:mga]
8
-
9
- Rizal’s act of bravery-of-enduring and boldness-of-enduring are manifested in his life. During his high school days, Rizal’s answer to the question “Who is the wisest man in the world?” was Anak sa Diyos (child of God). While he was at the University, a student from the University of Santo Tomas (UST) was beaten to death by students at the UST for the crime of burning a  [@adm:stn] (smokeless) cigar. Rizal not only did not condemn the students who had beaten the 4fefd39f24<br />
10
- <br />
11
- <br />
12
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Cbt Nuggets Ipv4 Subnetting Download A Step-by-Step Course on IP Addressing and Subnetting.md DELETED
@@ -1,5 +0,0 @@
1
-
2
- <p>On top of gaining an understanding of the OSI Model and ARP, you should have a foundational understanding of IPv4 to take on the Network+ exam. This week, Keith is going to ask you to jump over to his IPv4 subnetting course: IPv4 Subnetting: The Ultimate Guide with Keith Barker. We think it's a really good idea for you to join him.You should also download and review the CompTIA Network+ exam objectives this week. Use these objectives to help shape your training in the weeks ahead!</p>
3
- <h2>Cbt Nuggets Ipv4 Subnetting Download</h2><br /><p><b><b>Download File</b> &#10037;&#10037;&#10037; <a href="https://imgfil.com/2uxYJf">https://imgfil.com/2uxYJf</a></b></p><br /><br /> aaccfb2cb3<br />
4
- <br />
5
- <br />
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/EbneBatuta 2 Movie Download Utorrent [PORTABLE].md DELETED
@@ -1,6 +0,0 @@
1
- <h2>EbneBatuta 2 Movie Download Utorrent</h2><br /><p><b><b>DOWNLOAD</b> &ndash;&ndash;&ndash;&ndash;&ndash;>>> <a href="https://imgfil.com/2uxXD9">https://imgfil.com/2uxXD9</a></b></p><br /><br />
2
-
3
- aaccfb2cb3<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/F1 2013 Spolszczenie.md DELETED
@@ -1,90 +0,0 @@
1
-
2
- <h1>F1 2013 Spolszczenie: Jak grać w Formułę 1 po polsku</h1>
3
- <p>F1 2013 to jedna z najlepszych gier wyścigowych, która pozwala na wcielenie się w kierowcę Formuły 1 i rywalizację z innymi graczami na najbardziej znanych torach świata. Gra oferuje wiele trybów rozgrywki, w tym karierę, mistrzostwa, wyścigi online i tryb klasyczny, w którym można jeździć bolidami z lat 80. i 90.</p>
4
- <p>Jednak jeśli nie znasz angielskiego lub po prostu wolisz grać w swoim ojczystym języku, możesz mieć problem ze zrozumieniem wszystkich opcji, komend i komentarzy w grze. Na szczęście istnieje sposób na spolszczenie F1 2013 i cieszenie się grą po polsku.</p>
5
- <h2>F1 2013 Spolszczenie</h2><br /><p><b><b>Download Zip</b> &#9734; <a href="https://imgfil.com/2uxXoI">https://imgfil.com/2uxXoI</a></b></p><br /><br />
6
- <h2>Jak zainstalować spolszczenie F1 2013</h2>
7
- <p>Spolszczenie F1 2013 to plik, który zawiera polskie napisy i głosy do gry. Aby go zainstalować, musisz wykonać kilka prostych kroków:</p>
8
- <ol>
9
- <li>Pobierz spolszczenie F1 2013 z jednej z wielu stron internetowych, które je oferują. Możesz na przykład skorzystać z linków podanych na stronie <a href="https://zajefajna.com/spolszczenia-poradniki-123/f1-2013-a-264870-print/">Zajefajna.com</a> lub na kanale YouTube <a href="https://www.youtube.com/watch?v=wtiYGvJLTDI">Artur Orzeł</a>.</li>
10
- <li>Rozpakuj pobrany plik za pomocą programu do archiwizacji danych, na przykład WinRAR lub 7-Zip.</li>
11
- <li>Zainstaluj spolszczenie podając główny folder, gdzie zainstalowana jest gra, na przykład C:/F1 2013.</li>
12
- <li>Uruchom grę i ciesz się polskimi napisami i głosami.</li>
13
- </ol>
14
- <h2>Jakie są zalety spolszczenia F1 2013</h2>
15
- <p>Spolszczenie F1 2013 ma wiele zalet dla polskich graczy. Oto niektóre z nich:</p>
16
- <ul>
17
- <li>Lepsze zrozumienie fabuły, opcji i instrukcji w grze.</li>
18
- <li>Mniejsze ryzyko popełnienia błędów lub pomyłek podczas wyboru ustawień, strategii czy bolidu.</li>
19
- <li>Większa immersja i realizm dzięki polskim komentarzom i dialogom.</li>
20
- <li>Możliwość korzystania z polskich poradników, recenzji i społeczności graczy.</li>
21
- </ul>
22
- <h2>Jakie są wady spolszczenia F1 2013</h2>
23
- <p>Spolszczenie F1 2013 nie jest oficjalnym produktem firmy Codemasters, która wyprodukowała grę. Dlatego może mieć pewne wady lub niedoskonałości. Oto niektóre z nich:</p>
24
- <ul>
25
- <li>Niektóre fragmenty tekstu mogą być źle przetłumaczone lub niepasujące do kontekstu.</li>
26
- <li>Niektóre głosy mogą być nienaturalne lub nieodpowiednie do postaci.</li>
27
- <li>Spolszczenie może powodować błędy lub problemy techniczne z grą.</li>
28
- <li>Spolszczenie może być niekompatybilne z niektórymi aktualizacjami lub dodatkami do gry.</li>
29
- </ul>
30
- <h2>Czy warto zainstalować spolszczenie F1 2013</h2>
31
- <p>Odpowiedź na to pytanie zależy od twoich preferencji i oczekiwań. Jeśli chcesz grać w F1 2013 po polsku i nie przeszkadzają ci ewentualne niedociągnięcia spolszczenia, to warto je zainstalować. Jeśli jednak cenisz sobie jakość oryginalnej wersji językowej i nie masz problemów ze zrozumieniem angielskiego, to możesz sobie darować spolszczenie.</p>
32
- <p>W każdym razie spolszczenie F1 2013 jest darmowe i łatwe do usunięcia, więc nic nie stoi na przeszkodzie, żebyś sam sprawdził, czy ci się podoba. Może dzięki niemu odkryjesz nowe aspekty gry i poczujesz się jak prawdziwy kierowca Formuły 1.</p>
33
- <p></p>
34
- <h2>Jak grać w F1 2013 online po polsku</h2>
35
- <p>F1 2013 nie tylko oferuje świetną rozgrywkę dla pojedynczego gracza, ale także dla wielu graczy przez Internet. Możesz dołączyć do istniejących serwerów lub stworzyć własny i zaprosić znajomych. Możesz też rywalizować w rankingach i ligach online, które sprawdzają twoje umiejętności i refleks.</p>
36
- <p>Aby grać w F1 2013 online po polsku, musisz mieć zainstalowane spolszczenie i aktywne konto Steam. Następnie musisz uruchomić grę i wybrać opcję Multiplayer z menu głównego. Tam możesz wyszukać dostępne serwery lub utworzyć własny. Możesz też dołączyć do grupy Steam poświęconej F1 2013 i znaleźć innych polskich graczy.</p>
37
- <p>Grając w F1 2013 online po polsku, możesz komunikować się z innymi graczami za pomocą czatu tekstowego lub głosowego. Możesz też ustawić poziom trudności, liczbę okrążeń, pogodę i inne parametry wyścigu. Pamiętaj jednak, że niektóre serwery mogą mieć własne zasady i wymagania, więc zawsze sprawdzaj je przed dołączeniem.</p>
38
- <h2>Jakie są najlepsze porady i triki do F1 2013</h2>
39
- <p>F1 2013 to gra, która wymaga nie tylko szybkości, ale także strategii i taktyki. Aby osiągnąć najlepsze wyniki i pokonać konkurencję, musisz znać kilka porad i trików, które ułatwią ci grę. Oto niektóre z nich:</p>
40
- <ul>
41
- <li>Wybieraj bolid i ustawienia odpowiednie do twojego stylu jazdy i toru. Niektóre bolidy są lepsze na prostych, a inne na zakrętach. Niektóre ustawienia są lepsze na suchej nawierzchni, a inne na mokrej.</li>
42
- <li>Zaplanuj swoją strategię pit-stopów i zmiany opon. Niektóre opony są szybsze, ale szybciej się zużywają. Niektóre opony są lepsze na niskim paliwie, a inne na wysokim. Musisz znaleźć optymalny balans między prędkością a trwałością.</li>
43
- <li>Używaj systemów DRS i KERS z głową. DRS to system zmniejszający opór powietrza na prostych, a KERS to system zwiększający moc silnika na krótki czas. Używaj ich w odpowiednich momentach, aby wyprzedzać rywali lub bronić pozycji.</li>
44
- <li>Zwracaj uwagę na warunki pogodowe i flagi. Pogoda może się zmieniać podczas wyścigu i wpływać na przyczepność i widoczność. Flaga żółta oznacza niebezpieczeństwo na torze i zakaz wyprzedzania. Flaga niebieska oznacza, że musisz ustąpić miejsca szybszemu kierowcy.</li>
45
- <li>Trenuj i ucz się z błędów. F1 2013 to gra, która wymaga praktyki i doświadczenia. Im więcej grasz, tym lepiej poznajesz tor, bolid i rywali. Analizuj swoje czasy okrążeń, sektorów i punktów hamowania. Poprawiaj swoją technikę i szukaj sposobów na oszczędzanie paliwa i opon.</li>
46
- </ul>
47
- <h2>Jak grać w tryb klasyczny w F1 2013</h2>
48
- <p>F1 2013 to gra, która nie tylko pozwala na ściganie się w bolidach z sezonu 2013, ale także w bolidach z lat 80-tych i 90-tych. Tryb klasyczny to specjalna opcja, która umożliwia wcielenie się w legendarnych kierowców i ekip z tamtych czasów. Możesz jeździć na historycznych torach, takich jak Brands Hatch czy Jerez, i poczuć różnicę między nowoczesnymi a klasycznymi maszynami.</p>
49
- <p>Aby grać w tryb klasyczny w F1 2013, musisz mieć zainstalowane spolszczenie i wybrać opcję Classic z menu głównego. Tam możesz wybrać jeden z dwóch trybów: Grand Prix lub Scenario. W trybie Grand Prix możesz stworzyć własny wyścig z dowolnymi ustawieniami i bolidami. W trybie Scenario możesz podjąć się różnych wyzwań i odtworzyć historyczne momenty z Formuły 1.</p>
50
- <p>Grając w tryb klasyczny w F1 2013, możesz odblokowywać nowe bolidy, kierowców i tory. Możesz też porównywać swoje czasy i wyniki z innymi graczami na tablicy wyników online. Tryb klasyczny to świetna okazja, aby poznać historię i ewolucję Formuły 1 i poczuć się jak prawdziwy mistrz.</p>
51
- <h2>Jakie są najlepsze mody do F1 2013</h2>
52
- <p>F1 2013 to gra, która oferuje wiele możliwości personalizacji i modyfikacji. Możesz zmieniać ustawienia graficzne, dźwiękowe i sterowania, aby dostosować grę do swoich preferencji i sprzętu. Możesz też korzystać z wielu modów, czyli dodatkowych plików, które zmieniają lub dodają nowe elementy do gry.</p>
53
- <p>Mody do F1 2013 można znaleźć na wielu stronach internetowych, takich jak <a href="https://www.racedepartment.com/downloads/categories/f1-2013-mods.8/">RaceDepartment</a> czy <a href="https://www.nexusmods.com/f12013">Nexus Mods</a>. Możesz też sam stworzyć własny mod za pomocą specjalnych narzędzi i edytorów. Mody mogą dotyczyć różnych aspektów gry, takich jak:</p>
54
- <ul>
55
- <li>Grafika: możesz poprawić jakość tekstur, efektów świetlnych, cieni i odbić. Możesz też zmienić wygląd bolidów, kasków, ubrań i reklam.</li>
56
- <li>Dźwięk: możesz zmienić dźwięki silników, hamulców, opon i komunikatów radiowych. Możesz też dodać nowe ścieżki dźwiękowe lub komentarze.</li>
57
- <li>Sterowanie: możesz zmienić ustawienia klawiatury, myszy, pada lub kierownicy. Możesz też dodać nowe opcje sterowania, takie jak ręczna zmiana biegów czy aktywacja systemów DRS i KERS.</li>
58
- <li>Rozgrywka: możesz zmienić poziom trudności, liczbę okrążeń, pogodę i inne parametry wyścigu. Możesz też dodać nowe tryby rozgrywki, takie jak kariera czy mistrzostwa.</li>
59
- <li>Zawartość: możesz dodać nowe bolidy, kierowców, tory i sekcje scenariuszy. Możesz też zmienić nazwy i kolory ekip oraz aktualizować dane sezonu.</li>
60
- </ul>
61
- <p>Mody do F1 2013 mogą znacznie poprawić jakość i różnorodność gry. Jednak należy pamiętać, że niektóre mody mogą być niekompatybilne ze sobą lub z grą. Dlatego zawsze należy sprawdzać opisy i instrukcje instalacji modów oraz robić kopie zapasowe plików gry przed ich modyfikacją.</p>
62
- <h2>Jakie są opinie i recenzje o F1 2013</h2>
63
- <p>F1 2013 to gra, która spotkała się z bardzo pozytywnymi opiniami i recenzjami zarówno od graczy, jak i od krytyków. Gra otrzymała wysokie oceny na portalach takich jak <a href="https://www.metacritic.com/game/pc/f1-2013">Metacritic</a> czy <a href="https://store.steampowered.com/app/223670/F1_2013/">Steam</a>. Oto niektóre z nich:</p>
64
- <blockquote>
65
- <p>"F1 2013 to najlepsza gra wyścigowa roku. Codemasters po raz kolejny udowodniło, że potrafi stworzyć realistyczną i emocjonującą symulację Formuły 1. Gra oferuje wiele trybów rozgrywki, świetną grafikę i dźwięk, a także niesamowity tryb klasyczny, który pozwala na przeżycie największych chwil z historii tego sportu. F1 2013 to gra obowiązkowa dla każdego fana wyścigów." - <cite>GameSpot</cite></p>
66
- </blockquote>
67
- <blockquote>
68
- <p>"F1 2013 to gra, która zadowoli zarówno początkujących, jak i zaawansowanych graczy. Gra ma wiele opcji dostosowania poziomu trudności i ustawień bolidu, dzięki czemu każdy może znaleźć swój własny styl jazdy. Gra ma też doskonałą fizykę i sztuczną inteligencję, która sprawia, że rywale są wymagający i realistyczni. Tryb klasyczny to wisienka na torcie, która dodaje grze uroku i nostalgii." - <cite>IGN</cite></p>
69
- </blockquote>
70
- <blockquote>
71
- <p>"F1 2013 to gra, która oddaje istotę Formuły 1 w każdym aspekcie. Gra ma piękną grafikę i dźwięk, które tworzą niesamowity klimat wyścigów. Gra ma też bogatą zawartość i różnorodność, która zapewnia długie godziny zabawy. Tryb klasyczny to genialny pomysł, który pozwala na odkrycie historii i ewolucji Formuły 1. F1 2013 to gra, która nie może zabraknąć w kolekcji każdego fana wyścigów." - <cite>PC Gamer</cite></p>
72
- </blockquote>
73
- <h2>Jak kupić i pobrać F1 2013</h2>
74
- <p>F1 2013 to gra, którą można kupić i pobrać w kilka prostych kroków:</p>
75
- <ol>
76
- <li>Odwiedź stronę internetową jednego z oficjalnych dystrybutorów gry, takich jak <a href="https://store.steampowered.com/app/223670/F1_2013/">Steam</a>, <a href="https://www.gog.com/game/f1_2013">GOG.com</a> czy <a href="https://www.humblebundle.com/store/f12013">Humble Bundle</a>.</li>
77
- <li>Wybierz wersję gry, którą chcesz kupić: standardową lub Classic.</li>
78
- <li>Dodaj grę do koszyka i przejdź do płatności.</li>
79
- <li>Wybierz metodę płatności i podaj swoje dane.</li>
80
- <li>Potwierdź zakup i poczekaj na otrzymanie klucza aktywacyjnego gry.</li>
81
- <li>Pobierz i zainstaluj platformę dystrybucji cyfrowej, na której kupiłeś grę, na przykład Steam.</li>
82
- <li>Zarejestruj się lub zaloguj się na swoje konto na platformie.</li>
83
- <li>Aktywuj grę za pomocą otrzymanego klucza.</li>
84
- <li>Pobierz i zainstaluj grę na swoim komputerze.</li>
85
- <li>Ciesz się grą w F1 2013 po polsku.</li>
86
- </ol>
87
- <h2>Podsumowanie</h2>
88
- <p>F1 2013 to gra, która z pewnością sprawi wiele radości każdemu fanowi Formuły 1. Gra oferuje realistyczną i emocjonującą symulację wyścigów, wiele trybów rozgrywki, świetną grafikę i dźwięk, a także niesamowity tryb klasyczny. Gra jest dostępna w wielu wersjach językowych, w tym po polsku. Aby grać w F1 2013 po polsku, wystarczy zainstalować spolszczenie i cieszyć się grą w swoim ojczystym języku. F1 2013 to gra, która nie może zabraknąć w kolekcji każdego fana wyścigów.</p> 3cee63e6c2<br />
89
- <br />
90
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK Oyun Tavsiyeleri 2023 Ylnn En Popler Android Oyunlar.md DELETED
@@ -1,144 +0,0 @@
1
-
2
- <h1>Apkoyun: What is it and how to download it?</h1>
3
- <p>If you are a fan of Android games, you might have heard of apkoyun. Apkoyun is a website that offers thousands of free and modded games for Android devices. You can download and play games that are not available on the Google Play Store, or games that have unlimited money, gems, coins, or other features. But what is apkoyun exactly, and how can you download and use it safely? In this article, we will answer these questions and more.</p>
4
- <h2>apkoyun</h2><br /><p><b><b>Download Zip</b> &#9889; <a href="https://urlin.us/2uSYWg">https://urlin.us/2uSYWg</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <p>Android is one of the most popular operating systems in the world, with over 3 billion active devices. Android users can enjoy a variety of apps and games from the Google Play Store, which has over 3 million apps available. However, not all apps and games are free or accessible on the Play Store. Some apps and games may be geo-restricted, banned, or removed by Google for various reasons. Some apps and games may also require in-app purchases or subscriptions to unlock their full potential.</p>
7
- <p>This is where apkoyun comes in. Apkoyun is a website that provides free and modded games for Android devices. You can download and play games that are not on the Play Store, or games that have enhanced features such as unlimited money, gems, coins, or other resources. Apkoyun has a large collection of games from different genres, such as action, adventure, arcade, puzzle, racing, simulation, sports, strategy, and more. You can find games from popular developers such as Gameloft, EA, Ubisoft, Supercell, Netmarble, Nexon, and more.</p>
8
- <h2>What is apkoyun?</h2>
9
- <p>Apkoyun is a Turkish word that means "apk game". Apk is short for Android Package Kit, which is the file format used by Android devices to install apps and games. Apk files contain all the necessary components for an app or game to run on an Android device, such as code, resources, assets, certificates, and manifest.</p>
10
- <p>Apkoyun is a website that hosts apk files for various Android games. You can download these apk files from the website and install them on your device manually. You do not need to root your device or use any third-party app store to use apkoyun. However, you do need to enable unknown sources on your device settings to allow the installation of apps from sources other than the Play Store.</p>
11
- <p>apkoyun indir<br />
12
- apkoyun net<br />
13
- apkoyun clash of clans<br />
14
- apkoyun minecraft<br />
15
- apkoyun pubg mobile<br />
16
- apkoyun subway surfers<br />
17
- apkoyun gta san andreas<br />
18
- apkoyun dream league soccer<br />
19
- apkoyun among us<br />
20
- apkoyun candy crush saga<br />
21
- apkoyun asphalt 9<br />
22
- apkoyun fortnite<br />
23
- apkoyun brawl stars<br />
24
- apkoyun roblox<br />
25
- apkoyun plants vs zombies<br />
26
- apkoyun angry birds<br />
27
- apkoyun temple run<br />
28
- apkoyun fifa mobile<br />
29
- apkoyun call of duty mobile<br />
30
- apkoyun pokemon go<br />
31
- apkoyun hill climb racing<br />
32
- apkoyun shadow fight 2<br />
33
- apkoyun fruit ninja<br />
34
- apkoyun cut the rope<br />
35
- apkoyun jetpack joyride<br />
36
- apkoyun doodle jump<br />
37
- apkoyun flappy bird<br />
38
- apkoyun geometry dash<br />
39
- apkoyun crossy road<br />
40
- apkoyun 2048<br />
41
- apkoyun candy crush soda saga<br />
42
- apkoyun farm heroes saga<br />
43
- apkoyun pet rescue saga<br />
44
- apkoyun bubble witch saga<br />
45
- apkoyun hay day<br />
46
- apkoyun clash royale<br />
47
- apkoyun boom beach<br />
48
- apkoyun simcity buildit<br />
49
- apkoyun the sims mobile<br />
50
- apkoyun fallout shelter<br />
51
- apkoyun plague inc.<br />
52
- apkoyun monument valley<br />
53
- apkoyun limbo<br />
54
- apkoyun machinarium<br />
55
- apkoyun terraria<br />
56
- apkoyun stardew valley<br />
57
- apkoyun don't starve</p>
58
- <h2>Why do people use apkoyun?</h2>
59
- <p>People use apkoyun for various reasons. Some of the main reasons are:</p>
60
- <ul>
61
- <li>To access games that are not available on the Play Store due to geo-restrictions, bans, or removals.</li>
62
- <li>To play games that have modded features such as unlimited money, gems, coins, or other resources.</li>
63
- <li>To save money by avoiding in-app purchases or subscriptions.</li>
64
- <li>To try out new or unreleased games before they are officially launched.</li>
65
- <li>To enjoy old or classic games that are no longer supported or updated by their developers.</li>
66
- </ul>
67
- <h2>How to download apkoyun?</h2>
68
- <p>Downloading apkoyun is easy and simple. You just need to follow these steps:</p>
69
- <h3>Step 1: Find a reliable source</h3>
70
- <p>The first step is to find a reliable source for downloading apkoyun. There are many websites that claim to offer apkoyun, but not all of them are trustworthy or safe. Some websites may contain malware, viruses , or ads that can harm your device or compromise your privacy. Therefore, you should always do some research before downloading apkoyun from any website. You can check the reviews, ratings, comments, or feedback from other users to verify the credibility and safety of the website. You can also use antivirus or malware scanners to scan the website or the apk file before downloading it.</p>
71
- <p>One of the most reliable and trusted sources for downloading apkoyun is the official website of apkoyun, which is https://www.apkoyun.com/. This website has been providing apkoyun for over 10 years, and has a large and loyal user base. The website is regularly updated with new and popular games, and has a user-friendly interface and design. The website also has a blog section where you can find useful tips, tricks, guides, and news about apkoyun and Android games in general.</p>
72
- <h3>Step 2: Enable unknown sources on your device</h3>
73
- <p>The second step is to enable unknown sources on your device settings. This is necessary to allow the installation of apps from sources other than the Play Store. To enable unknown sources, you need to follow these steps:</p>
74
- <ol>
75
- <li>Go to your device settings and tap on Security or Privacy.</li>
76
- <li>Find the option that says Unknown sources or Install unknown apps and toggle it on.</li>
77
- <li>A warning message may appear, asking you to confirm your action. Tap on OK or Allow to proceed.</li>
78
- </ol>
79
- <p>Note that the exact steps may vary depending on your device model and Android version. You can also disable unknown sources after installing apkoyun if you want to.</p>
80
- <h3>Step 3: Download and install the apk file</h3>
81
- <p>The third step is to download and install the apk file of apkoyun. To do this, you need to follow these steps:</p>
82
- <ol>
83
- <li>Go to the website of apkoyun and find the game that you want to download. You can browse by categories, genres, popularity, or search by keywords.</li>
84
- <li>Tap on the game title or icon to open its page. You will see some information about the game, such as its description, screenshots, rating, size, version, and mod features.</li>
85
- <li>Tap on the Download button to start downloading the apk file. You may see some ads or pop-ups before the download starts. Close them or ignore them until the download begins.</li>
86
- <li>Once the download is complete, go to your device's file manager and locate the apk file. It should be in your Downloads folder or in a folder named after the website.</li>
87
- <li>Tap on the apk file to start installing it. You may see a prompt asking you to confirm your action. Tap on Install or Next to proceed.</li>
88
- <li>Wait for the installation to finish. You may see a message saying that the app was installed successfully. Tap on Open or Done to launch or exit the app.</li>
89
- </ol>
90
- <h2>How to use apkoyun?</h2>
91
- <p>Using apkoyun is easy and fun. You just need to follow these steps:</p>
92
- <h3>Browse and search for games</h3>
93
- <p>The first step is to browse and search for games that you want to play. You can use the website of apkoyun or the app itself to do this. The website and the app have similar features and functions, so you can choose whichever one you prefer.</p>
94
- <p>You can browse by categories, genres, popularity, or search by keywords. You can also filter by mod features, such as unlimited money, gems, coins, or other resources. You can also sort by date, name, rating, or downloads. You can also view the details of each game, such as its description, screenshots, rating, size, version, and mod features.</p>
95
- <h3>Download and play games</h3>
96
- <p>The second step is to download and play games that you like. You can use the same steps as described above for downloading and installing games from apkoyun. Once you have installed a game, you can launch it from your device's app drawer or home screen. You can also launch it from the app of apkoyun if you have it installed.</p>
97
- <p>You can play games as you normally would on any Android device. You can enjoy the mod features that are available for each game, such as unlimited money, gems , coins, or other resources. You can also access games that are not on the Play Store, or games that have been updated or improved by the modders. You can also share your feedback, comments, or suggestions with the apkoyun community on the website or the app.</p>
98
- <h3>Update and uninstall games</h3>
99
- <p>The third step is to update and uninstall games that you have downloaded from apkoyun. You can use the app of apkoyun to do this. The app will notify you when there are new updates available for the games that you have installed. You can also check for updates manually by tapping on the Menu button and selecting Check for updates. You can then choose to update the games that you want, or update all of them at once.</p>
100
- <p>To uninstall a game, you can simply tap and hold on its icon and select Uninstall. You can also go to your device settings and tap on Apps or Applications. Then, find the game that you want to uninstall and tap on it. Then, tap on Uninstall and confirm your action.</p>
101
- <h2>Benefits and risks of apkoyun</h2>
102
- <p>Apkoyun has many benefits and risks that you should be aware of before using it. Here are some of them:</p>
103
- <h3>Benefits of apkoyun</h3>
104
- <ul>
105
- <li>You can access games that are not available on the Play Store due to geo-restrictions, bans, or removals.</li>
106
- <li>You can play games that have modded features such as unlimited money, gems, coins, or other resources.</li>
107
- <li>You can save money by avoiding in-app purchases or subscriptions.</li>
108
- <li>You can try out new or unreleased games before they are officially launched.</li>
109
- <li>You can enjoy old or classic games that are no longer supported or updated by their developers.</li>
110
- <li>You can join a large and active community of apkoyun users who share their opinions, feedback, and suggestions.</li>
111
- </ul>
112
- <h3>Risks of apkoyun</h3>
113
- <ul>
114
- <li>You may download malware, viruses, or ads that can harm your device or compromise your privacy.</li>
115
- <li>You may violate the terms and conditions of the original developers or publishers of the games.</li>
116
- <li>You may face legal issues or penalties for downloading or distributing pirated or modded games.</li>
117
- <li>You may lose your progress, data, or account if the game is updated or patched by the original developers or publishers.</li>
118
- <li>You may encounter bugs, glitches, errors, or crashes while playing the games.</li>
119
- <li>You may not receive official support or customer service from the original developers or publishers of the games.</li>
120
- </ul>
121
- <h2>Conclusion</h2>
122
- <p>Apkoyun is a website that offers free and modded games for Android devices. You can download and play games that are not on the Play Store, or games that have enhanced features such as unlimited money, gems, coins, or other resources. Apkoyun has a large collection of games from different genres and developers. You can also use the app of apkoyun to browse, download, update, and uninstall games easily and conveniently.</p>
123
- <p>However, apkoyun also has some risks that you should be careful of. You may download malware, viruses, or ads that can harm your device or privacy. You may also violate the terms and conditions of the original developers or publishers of the games. You may also face legal issues or penalties for downloading or distributing pirated or modded games. You may also lose your progress, data, or account if the game is updated or patched by the original developers or publishers. You may also encounter bugs, glitches, errors, or crashes while playing the games. You may also not receive official support or customer service from the original developers or publishers of the games.</p>
124
- <p>Therefore, you should always do some research before downloading apkoyun from any website. You should also enable unknown sources on your device settings to allow the installation of apps from sources other than the Play Store. You should also scan the website or the apk file before downloading it with antivirus or malware scanners. You should also backup your data and account before installing any game from apkoyun. You should also respect the rights and efforts of the original developers and publishers of the games.</p>
125
- <h2>FAQs</h2>
126
- <p>Here are some frequently asked questions about apkoyun:</p>
127
- <h4>Q: Is apkoyun legal?</h4>
128
- <p>A: Apkoyun is not legal in most countries. Downloading or distributing pirated or modded games is considered a form of intellectual property theft and infringement. It violates the rights and interests of the original developers and publishers of the games. It may also expose you to legal issues or penalties depending on your country's laws and regulations.</p>
129
- <h4>Q: Is apkoyun safe <h4>A: Apkoyun is not safe in most cases. Downloading or installing apk files from unknown sources can expose your device to malware, viruses, or ads that can harm your device or compromise your privacy. It can also cause bugs, glitches, errors, or crashes while playing the games. It can also make your device vulnerable to hacking or spying by malicious actors. Therefore, you should always be careful and cautious when using apkoyun.</h4>
130
- <h4>Q: How to update games from apkoyun?</h4>
131
- <p>A: You can update games from apkoyun by using the app of apkoyun. The app will notify you when there are new updates available for the games that you have installed. You can also check for updates manually by tapping on the Menu button and selecting Check for updates. You can then choose to update the games that you want, or update all of them at once. However, you should note that updating games from apkoyun may overwrite or delete your mod features, progress, data, or account. Therefore, you should backup your data and account before updating any game from apkoyun.</p>
132
- <h4>Q: How to uninstall games from apkoyun?</h4>
133
- <p>A: You can uninstall games from apkoyun by using the app of apkoyun or your device settings. To uninstall a game using the app of apkoyun, you can simply tap and hold on its icon and select Uninstall. To uninstall a game using your device settings, you can go to your device settings and tap on Apps or Applications. Then, find the game that you want to uninstall and tap on it. Then, tap on Uninstall and confirm your action.</p>
134
- <h4>Q: What are some alternatives to apkoyun?</h4>
135
- <p>A: There are some alternatives to apkoyun that you can use if you want to download free or modded games for Android devices. Some of them are:</p>
136
- <ul>
137
- <li>APKPure: APKPure is a website and app that offers free and pure apk files for Android devices. You can download and install apps and games that are not on the Play Store, or apps and games that have been updated faster than the Play Store. APKPure has a large and diverse collection of apps and games from different categories and regions. You can also use APKPure to update or uninstall apps and games easily and conveniently.</li>
138
- <li>HappyMod: HappyMod is a website and app that offers modded apk files for Android devices. You can download and install apps and games that have modded features such as unlimited money, gems, coins, or other resources. HappyMod has a huge and active community of modders and users who share their mods, feedback, and suggestions. You can also use HappyMod to request or upload mods, or report problems or bugs.</li>
139
- <li>Aptoide: Aptoide is a website and app that offers an alternative app store for Android devices. You can download and install apps and games that are not on the Play Store, or apps and games that have been customized by the Aptoide community. Aptoide has over 1 million apps and games available, and has a user-friendly interface and design. You can also use Aptoide to create your own app store, or follow other app stores created by other users.</li>
140
- </ul>
141
- <h4>Q: How to contact apkoyun?</h4>
142
- <p>A: You can contact apkoyun by using their email address, which is [email protected]. You can also use their social media accounts, such as Facebook, Twitter, Instagram, or YouTube. You can also use their contact form on their website, which is https://www.apkoyun.com/iletisim/. You can use these methods to ask questions, give feedback, report problems, or request games.</p> 197e85843d<br />
143
- <br />
144
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dragon Ball Fanmade Fighter APK and Enjoy the Best DBZ Action.md DELETED
@@ -1,92 +0,0 @@
1
- <br />
2
- <h1>Dragon Ball Fanmade Fighter APK: A New Way to Enjoy Your Favorite Anime</h1>
3
- <p>Are you a fan of Dragon Ball, the legendary anime series that has captivated millions of viewers around the world? Do you love watching Goku and his friends fight against powerful enemies and save the universe? Do you wish you could join them in their epic battles and unleash your own ki blasts and kamehamehas? If you answered yes to any of these questions, then you are in luck! There is a new way to enjoy your favorite anime on your Android device: Dragon Ball Fanmade Fighter APK!</p>
4
- <h2>dragon ball fanmade fighter apk</h2><br /><p><b><b>DOWNLOAD</b> &#10003; <a href="https://urlin.us/2uT0lG">https://urlin.us/2uT0lG</a></b></p><br /><br />
5
- <h2>What is Dragon Ball Fanmade Fighter APK?</h2>
6
- <p>Dragon Ball Fanmade Fighter APK is a fanmade game that lets you play as your favorite Dragon Ball characters in a 2D fighting game. It is not an official game from Bandai Namco or Toei Animation, but rather a passion project from a group of talented fans who wanted to create their own version of Dragon Ball. The game features:</p>
7
- <ul>
8
- <li>Over 100 characters from Dragon Ball, Dragon Ball Z, Dragon Ball GT, Dragon Ball Super, and Dragon Ball Heroes</li>
9
- <li>Multiple transformations for each character, such as Super Saiyan, Super Saiyan God, Super Saiyan Blue, Ultra Instinct, Golden Frieza, Majin Buu, etc.</li>
10
- <li>Different game modes, such as Arcade Mode, Story Mode, Survival Mode, Versus Mode, Training Mode, etc.</li>
11
- <li>Online multiplayer mode where you can challenge other players from around the world</li>
12
- <li>A ranking system that tracks your wins and losses</li>
13
- <li>A customization option where you can change your character's appearance, voice, skills, etc.</li>
14
- </ <p>So, how can you download and install Dragon Ball Fanmade Fighter APK on your Android device? It's very simple. Just follow these steps:</p>
15
- <ol>
16
- <li>Go to the official website of the game, which is <a href="">https://dbfanmadefighter.com</a> </li>
17
- <li>Click on the download button and choose the version that suits your device (there are different versions for different Android versions and devices)</li>
18
- <li>Wait for the download to finish and then locate the APK file on your device</li>
19
- <li>Tap on the APK file and allow the installation from unknown sources (this is necessary because the game is not available on the Google Play Store)</li>
20
- <li>Follow the instructions on the screen and wait for the installation to complete</li>
21
- <li>Launch the game and enjoy!</li>
22
- </ol>
23
- <h2>Why Should You Play Dragon Ball Fanmade Fighter APK?</h2>
24
- <p>You might be wondering why you should play this game over other official or unofficial Dragon Ball games. Well, there are many reasons why Dragon Ball Fanmade Fighter APK is a great choice for any Dragon Ball fan. Here are some of them:</p>
25
- <ul>
26
- <li>The game is free to play and does not require any in-app purchases or subscriptions. You can enjoy all the features and content without spending a dime.</li>
27
- <li>The game is constantly updated and improved by the developers, who listen to the feedback and suggestions of the players. You can expect new characters, stages, modes, and bug fixes regularly.</li>
28
- <li>The game has a unique gameplay mechanic that combines elements of classic 2D fighting games with modern 3D graphics and animations. You can move freely in a 3D environment, but you can also switch to a 2D perspective for more precise and strategic combat.</li>
29
- <li>The game has a variety of game modes that cater to different preferences and skill levels. You can play solo or with friends, online or offline, casually or competitively. You can also create your own custom matches and tournaments.</li>
30
- <li>The game has amazing graphics and sound effects that bring the anime to life. You can see every detail of your character's appearance, expression, and movement. You can also hear their original voice actors and iconic soundtracks from the anime.</li>
31
- </ul>
32
- <h2>How to Play Dragon Ball Fanmade Fighter APK?</h2>
33
- <p>If you are new to Dragon Ball Fanmade Fighter APK, you might need some help to get started. Don't worry, we got you covered. Here are some basic tips on how to play the game:</p>
34
- <p>dragon ball fighter z apk download<br />
35
- dragon ball super fanmade game apk<br />
36
- dragon ball fighter z mobile apk<br />
37
- dragon ball z fanmade mugen apk<br />
38
- dragon ball fighter z android apk<br />
39
- dragon ball super fanmade apk<br />
40
- dragon ball fighter z apk mod<br />
41
- dragon ball z fanmade game download apk<br />
42
- dragon ball fighter z apk obb<br />
43
- dragon ball super fanmade game download apk<br />
44
- dragon ball fighter z apk offline<br />
45
- dragon ball z fanmade mugen download apk<br />
46
- dragon ball fighter z apk free download<br />
47
- dragon ball super fanmade game mod apk<br />
48
- dragon ball fighter z apk latest version<br />
49
- dragon ball z fanmade game android apk<br />
50
- dragon ball fighter z apk no verification<br />
51
- dragon ball super fanmade game offline apk<br />
52
- dragon ball fighter z apk 2023<br />
53
- dragon ball z fanmade game 2023 apk<br />
54
- dragon ball fighter z apk for pc<br />
55
- dragon ball super fanmade game 2023 apk<br />
56
- dragon ball fighter z apk revdl<br />
57
- dragon ball z fanmade game mod apk<br />
58
- dragon ball fighter z apk rexdl<br />
59
- dragon ball super fanmade game online apk<br />
60
- dragon ball fighter z apk pure<br />
61
- dragon ball z fanmade game free download apk<br />
62
- dragon ball fighter z apk uptodown<br />
63
- dragon ball super fanmade game free apk<br />
64
- dragon ball fighter z apk data download<br />
65
- dragon ball z fanmade game full version apk<br />
66
- dragon ball fighter z apk highly compressed<br />
67
- dragon ball super fanmade game highly compressed apk<br />
68
- dragon ball fighter z apk and obb download<br />
69
- dragon ball z fanmade game latest version apk<br />
70
- dragon ball fighter z apk unlimited money<br />
71
- dragon ball super fanmade game unlimited money apk<br />
72
- dragon ball fighter z apk hack download<br />
73
- dragon ball z fanmade game hack download apk<br />
74
- dragon ball fighter z apk update download<br />
75
- dragon ball super fanmade game update download apk<br />
76
- dragon ball fighter z apk android 1<br />
77
- dragon ball z fanmade game android 1 apk<br />
78
- dragon ball fighter z apk android oyun club<br />
79
- dragon ball super fanmade game android oyun club apk</p>
80
- <ul>
81
- <li>The basic controls and commands for fighting and navigating are similar to most 2D fighting games. You can use the virtual joystick on the left side of the screen to move your character, and the buttons on the right side of the screen to perform attacks, blocks, dashes, jumps, etc.</li>
82
- <li>The different characters have different abilities and transformations that you can use in battle. Some abilities are passive, meaning they are always active or triggered by certain conditions. Some abilities are active, meaning you have to press a button or a combination of buttons to activate them. Some abilities require ki, which is the energy meter at the bottom of the screen. You can fill up your ki by attacking, blocking, or charging.</li>
83
- <li>The various stages and scenarios that you can explore and fight in are based on locations from the anime. Some stages have interactive elements that you can use to your advantage or disadvantage, such as destructible objects, environmental hazards, or power-ups.</li>
84
- </ul> <li><b>How can I support the developers of Dragon Ball Fanmade Fighter APK?</b></li>
85
- <p>The best way to support the developers of Dragon Ball Fanmade Fighter APK is to play the game and share it with your friends and family. You can also leave a positive rating and review on the website or the social media platforms. You can also donate to the developers through their PayPal account: <a href="">https://paypal.me/dbfanmadefighter</a>. Your support will help them to continue working on the game and adding more features and content.</p>
86
- <li><b>Can I play Dragon Ball Fanmade Fighter APK offline?</b></li>
87
- <p>Yes, you can play Dragon Ball Fanmade Fighter APK offline. You can access most of the game modes and features without an internet connection. However, some features, such as online multiplayer, ranking system, and updates, require an internet connection to function properly.</p>
88
- <li><b>Can I play Dragon Ball Fanmade Fighter APK with a controller?</b></li>
89
- <p>Yes, you can play Dragon Ball Fanmade Fighter APK with a controller. The game supports most Bluetooth controllers that are compatible with Android devices. You can also customize the controller layout and sensitivity in the settings menu.</p>
90
- </ol></p> 197e85843d<br />
91
- <br />
92
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/7hao/bingo/src/components/header.tsx DELETED
@@ -1,12 +0,0 @@
1
- import * as React from 'react'
2
- import { UserMenu } from './user-menu'
3
-
4
- export async function Header() {
5
- return (
6
- <header className="sticky top-0 z-50 flex items-center justify-between w-full h-16 px-4 border-b shrink-0 bg-gradient-to-b from-background/10 via-background/50 to-background/80 backdrop-blur-xl">
7
- <div className="flex items-center justify-end space-x-2 w-full">
8
- <UserMenu />
9
- </div>
10
- </header>
11
- )
12
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AB-TW/team-ai/agents/tools/smart_domain/association_impl.py DELETED
@@ -1,97 +0,0 @@
1
- from langchain import LLMChain, PromptTemplate
2
- from agents.tools.smart_domain.common import getPrefix
3
- from models import llm
4
- from langchain.agents import tool
5
-
6
- association_impl_architecture = """* Association Impletation: This component provide implementation of Association of domain layer, rely on Repository.
7
- ---eaxmple code:
8
- @Component
9
- @RequiredArgsConstructor
10
- public class FeaturesImpl implements Features{{
11
- private final FeatureDbRepository featureDbRepository;
12
-
13
- Flux<Feature> findAll() {{
14
- return featureDbRepository.findAll().map(FeatureDb::toFeature);
15
- }}
16
-
17
- Mono<Feature> save(Feature feature) {{
18
- return featureDbRepository.save(FeatureDb.fromFeature(feature)).map(FeatureDb::toFeature);
19
- }}
20
- }}
21
- ---end of eaxmple code"""
22
-
23
- association_impl_test_strategy="""For Association Impletation,we writ unit test, and stub repository method with Mockito.
24
- ---eaxmple code:
25
- @ExtendWith(MockitoExtension.class)
26
- class FeatureImplTest {{
27
- @Mock
28
- FeatureDbRepository repository;
29
-
30
- Features features;
31
-
32
- @BeforeEach
33
- void setUp() {{
34
- features = new FeaturesImpl(repository);
35
- }}
36
-
37
- @Test
38
- void should_add_success() {{
39
- when(repository.save(any(FeatureDb.class))).thenAnswer(invocation -> {{
40
- FeatureDb featureDb = invocation.getArgument(0);
41
- return Mono.just(featureDb);
42
- }});
43
-
44
- features.add(createFeature("featureKey1"))
45
- .as(StepVerifier::create)
46
- .expectNextMatches(config -> config.getId().featureKey().equals("featureKey1")
47
- && config.getDescription().updatedAt() != null
48
- && config.getDescription().createdAt() != null
49
- )
50
- .verifyComplete();
51
- }}
52
-
53
- @Test
54
- void should_add_return_error_when_repository_save_error() {{
55
- Feature feature = createFeature("featureKey1");
56
-
57
- when(repository.save(any(FeatureDb.class))).thenReturn(Mono.error(new DuplicateKeyException("save error")));
58
-
59
- features.add(feature)
60
- .as(StepVerifier::create)
61
- .expectError()
62
- .verify();
63
- }}
64
- }}"""
65
-
66
- association_impl_task = """Your task is to generate the Association Impletation tests and product code."""
67
-
68
- association_impl_teck_stack = """Java17、reactor、lombok、Junit5、reactor test、Mockito、 Spring Data Reactive Couchbase"""
69
-
70
- ASSOCIATION_IMPL = getPrefix(association_impl_task, association_impl_teck_stack, association_impl_architecture, association_impl_test_strategy) + """
71
-
72
- Use the following format:
73
- request: the request that you need to fulfill
74
-
75
- Association Impletation:
76
- ```
77
- the Association Impletation code that you write to fulfill the request, follow TechStack and Architecture
78
- ```
79
-
80
- Test:
81
- ```
82
- the test code of Association Impletation that you write to fulfill the request, follow TechStack Architecture and TestStrategy
83
- ```
84
-
85
- request: {input}
86
- """
87
-
88
- ASSOCIATION_IMPL_PROMOPT = PromptTemplate(input_variables=["input"], template=ASSOCIATION_IMPL,)
89
-
90
- asociationChain = LLMChain(llm = llm(temperature=0.1), prompt=ASSOCIATION_IMPL_PROMOPT)
91
-
92
-
93
- @tool("Generate Asociation Impletation Code", return_direct=True)
94
- def asociationImplCodeGenerator(input: str) -> str:
95
- '''useful for when you need to generate Asociation Impletation code'''
96
- response = asociationChain.run(input)
97
- return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AP123/ai-avatars/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Dreambooth Web UI
3
- emoji: ☁️
4
- colorFrom: pink
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.11
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- duplicated_from: MirageML/dreambooth
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/g4f/Provider/Providers/Forefront.py DELETED
@@ -1,30 +0,0 @@
1
- import os
2
- import json
3
- import requests
4
- from ...typing import sha256, Dict, get_type_hints
5
-
6
- url = 'https://forefront.com'
7
- model = ['gpt-3.5-turbo']
8
- supports_stream = True
9
- needs_auth = False
10
-
11
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
12
- json_data = {
13
- 'text': messages[-1]['content'],
14
- 'action': 'noauth',
15
- 'id': '',
16
- 'parentId': '',
17
- 'workspaceId': '',
18
- 'messagePersona': '607e41fe-95be-497e-8e97-010a59b2e2c0',
19
- 'model': 'gpt-4',
20
- 'messages': messages[:-1] if len(messages) > 1 else [],
21
- 'internetMode': 'auto'
22
- }
23
- response = requests.post( 'https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat',
24
- json=json_data, stream=True)
25
- for token in response.iter_lines():
26
- if b'delta' in token:
27
- token = json.loads(token.decode().split('data: ')[1])['delta']
28
- yield (token)
29
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
30
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/schedules/.ipynb_checkpoints/custom_schedule-checkpoint.py DELETED
@@ -1,40 +0,0 @@
1
- optim_wrapper = dict(
2
- # 使用 SGD 优化器来优化参数
3
- type='OptimWrapper',
4
- optimizer=dict(
5
- type='Adam',
6
- lr=0.0001,
7
- betas=(0.9, 0.999),
8
- eps=1e-08,
9
- weight_decay=0,
10
- amsgrad=False),
11
- accumulative_counts=4
12
- )
13
-
14
- # 学习率参数的调整策略
15
- param_scheduler = [
16
- # 在前10轮迭代中,逐迭代次数,线性预热
17
- dict(type='LinearLR',
18
- start_factor=0.00001,
19
- by_epoch=True,
20
- end=10,
21
- convert_to_iter_based=True, # 逐迭代次数更新学习率.
22
- ),
23
- # 在 10 轮次后,通过余弦退火衰减
24
- dict(type='MultiStepLR',
25
- by_epoch=True, # 按轮次更新学习率
26
- milestones=[30, 120, 200, 270, 330, 390, 450, 510, 580, 660, 750, 840, 930],
27
- gamma=0.9)
28
- ]
29
-
30
- # 'by_epoch=True' 默认使用 `EpochBaseLoop`, 'by_epoch=False' 默认使用 `IterBaseLoop`
31
- train_cfg = dict(by_epoch=True, max_epochs=1024, val_interval=16)
32
- # 使用默认的验证循环控制器
33
- val_cfg = dict()
34
- # 使用默认的测试循环控制器
35
- test_cfg = dict()
36
-
37
- # 通过默认策略自动缩放学习率,此策略适用于总批次大小 256
38
- # 如果你使用不同的总批量大小,比如 512 并启用自动学习率缩放
39
- # 我们将学习率扩大到 2 倍
40
- # auto_scale_lr = dict(base_batch_size=256)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/T2I-Adapter/ldm/modules/attention.py DELETED
@@ -1,344 +0,0 @@
1
- from inspect import isfunction
2
- import math
3
- import torch
4
- import torch.nn.functional as F
5
- from torch import nn, einsum
6
- from einops import rearrange, repeat
7
- from typing import Optional, Any
8
-
9
- from ldm.modules.diffusionmodules.util import checkpoint
10
-
11
-
12
- try:
13
- import xformers
14
- import xformers.ops
15
- XFORMERS_IS_AVAILBLE = True
16
- except:
17
- XFORMERS_IS_AVAILBLE = False
18
-
19
- # CrossAttn precision handling
20
- import os
21
- _ATTN_PRECISION = os.environ.get("ATTN_PRECISION", "fp32")
22
-
23
- if os.environ.get("DISABLE_XFORMERS", "false").lower() == 'true':
24
- XFORMERS_IS_AVAILBLE = False
25
-
26
-
27
- def exists(val):
28
- return val is not None
29
-
30
-
31
- def uniq(arr):
32
- return{el: True for el in arr}.keys()
33
-
34
-
35
- def default(val, d):
36
- if exists(val):
37
- return val
38
- return d() if isfunction(d) else d
39
-
40
-
41
- def max_neg_value(t):
42
- return -torch.finfo(t.dtype).max
43
-
44
-
45
- def init_(tensor):
46
- dim = tensor.shape[-1]
47
- std = 1 / math.sqrt(dim)
48
- tensor.uniform_(-std, std)
49
- return tensor
50
-
51
-
52
- # feedforward
53
- class GEGLU(nn.Module):
54
- def __init__(self, dim_in, dim_out):
55
- super().__init__()
56
- self.proj = nn.Linear(dim_in, dim_out * 2)
57
-
58
- def forward(self, x):
59
- x, gate = self.proj(x).chunk(2, dim=-1)
60
- return x * F.gelu(gate)
61
-
62
-
63
- class FeedForward(nn.Module):
64
- def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
65
- super().__init__()
66
- inner_dim = int(dim * mult)
67
- dim_out = default(dim_out, dim)
68
- project_in = nn.Sequential(
69
- nn.Linear(dim, inner_dim),
70
- nn.GELU()
71
- ) if not glu else GEGLU(dim, inner_dim)
72
-
73
- self.net = nn.Sequential(
74
- project_in,
75
- nn.Dropout(dropout),
76
- nn.Linear(inner_dim, dim_out)
77
- )
78
-
79
- def forward(self, x):
80
- return self.net(x)
81
-
82
-
83
- def zero_module(module):
84
- """
85
- Zero out the parameters of a module and return it.
86
- """
87
- for p in module.parameters():
88
- p.detach().zero_()
89
- return module
90
-
91
-
92
- def Normalize(in_channels):
93
- return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
94
-
95
-
96
- class SpatialSelfAttention(nn.Module):
97
- def __init__(self, in_channels):
98
- super().__init__()
99
- self.in_channels = in_channels
100
-
101
- self.norm = Normalize(in_channels)
102
- self.q = torch.nn.Conv2d(in_channels,
103
- in_channels,
104
- kernel_size=1,
105
- stride=1,
106
- padding=0)
107
- self.k = torch.nn.Conv2d(in_channels,
108
- in_channels,
109
- kernel_size=1,
110
- stride=1,
111
- padding=0)
112
- self.v = torch.nn.Conv2d(in_channels,
113
- in_channels,
114
- kernel_size=1,
115
- stride=1,
116
- padding=0)
117
- self.proj_out = torch.nn.Conv2d(in_channels,
118
- in_channels,
119
- kernel_size=1,
120
- stride=1,
121
- padding=0)
122
-
123
- def forward(self, x):
124
- h_ = x
125
- h_ = self.norm(h_)
126
- q = self.q(h_)
127
- k = self.k(h_)
128
- v = self.v(h_)
129
-
130
- # compute attention
131
- b,c,h,w = q.shape
132
- q = rearrange(q, 'b c h w -> b (h w) c')
133
- k = rearrange(k, 'b c h w -> b c (h w)')
134
- w_ = torch.einsum('bij,bjk->bik', q, k)
135
-
136
- w_ = w_ * (int(c)**(-0.5))
137
- w_ = torch.nn.functional.softmax(w_, dim=2)
138
-
139
- # attend to values
140
- v = rearrange(v, 'b c h w -> b c (h w)')
141
- w_ = rearrange(w_, 'b i j -> b j i')
142
- h_ = torch.einsum('bij,bjk->bik', v, w_)
143
- h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
144
- h_ = self.proj_out(h_)
145
-
146
- return x+h_
147
-
148
-
149
- class CrossAttention(nn.Module):
150
- def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):
151
- super().__init__()
152
- inner_dim = dim_head * heads
153
- context_dim = default(context_dim, query_dim)
154
-
155
- self.scale = dim_head ** -0.5
156
- self.heads = heads
157
-
158
- self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
159
- self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
160
- self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
161
-
162
- self.to_out = nn.Sequential(
163
- nn.Linear(inner_dim, query_dim),
164
- nn.Dropout(dropout)
165
- )
166
-
167
- def forward(self, x, context=None, mask=None):
168
- h = self.heads
169
-
170
- q = self.to_q(x)
171
- context = default(context, x)
172
- k = self.to_k(context)
173
- v = self.to_v(context)
174
-
175
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
176
-
177
- # force cast to fp32 to avoid overflowing
178
- if _ATTN_PRECISION =="fp32":
179
- with torch.autocast(enabled=False, device_type = 'cuda'):
180
- q, k = q.float(), k.float()
181
- sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
182
- else:
183
- sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
184
-
185
- del q, k
186
-
187
- if exists(mask):
188
- mask = rearrange(mask, 'b ... -> b (...)')
189
- max_neg_value = -torch.finfo(sim.dtype).max
190
- mask = repeat(mask, 'b j -> (b h) () j', h=h)
191
- sim.masked_fill_(~mask, max_neg_value)
192
-
193
- # attention, what we cannot get enough of
194
- sim = sim.softmax(dim=-1)
195
-
196
- out = einsum('b i j, b j d -> b i d', sim, v)
197
- out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
198
- return self.to_out(out)
199
-
200
-
201
- class MemoryEfficientCrossAttention(nn.Module):
202
- # https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
203
- def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):
204
- super().__init__()
205
- print(f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using "
206
- f"{heads} heads.")
207
- inner_dim = dim_head * heads
208
- context_dim = default(context_dim, query_dim)
209
-
210
- self.heads = heads
211
- self.dim_head = dim_head
212
-
213
- self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
214
- self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
215
- self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
216
-
217
- self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout))
218
- self.attention_op: Optional[Any] = None
219
-
220
- def forward(self, x, context=None, mask=None):
221
- q = self.to_q(x)
222
- context = default(context, x)
223
- k = self.to_k(context)
224
- v = self.to_v(context)
225
-
226
- b, _, _ = q.shape
227
- q, k, v = map(
228
- lambda t: t.unsqueeze(3)
229
- .reshape(b, t.shape[1], self.heads, self.dim_head)
230
- .permute(0, 2, 1, 3)
231
- .reshape(b * self.heads, t.shape[1], self.dim_head)
232
- .contiguous(),
233
- (q, k, v),
234
- )
235
-
236
- # actually compute the attention, what we cannot get enough of
237
- out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
238
-
239
- if exists(mask):
240
- raise NotImplementedError
241
- out = (
242
- out.unsqueeze(0)
243
- .reshape(b, self.heads, out.shape[1], self.dim_head)
244
- .permute(0, 2, 1, 3)
245
- .reshape(b, out.shape[1], self.heads * self.dim_head)
246
- )
247
- return self.to_out(out)
248
-
249
-
250
- class BasicTransformerBlock(nn.Module):
251
- ATTENTION_MODES = {
252
- "softmax": CrossAttention, # vanilla attention
253
- "softmax-xformers": MemoryEfficientCrossAttention
254
- }
255
- def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True,
256
- disable_self_attn=False):
257
- super().__init__()
258
- attn_mode = "softmax-xformers" if XFORMERS_IS_AVAILBLE else "softmax"
259
- assert attn_mode in self.ATTENTION_MODES
260
- attn_cls = self.ATTENTION_MODES[attn_mode]
261
- self.disable_self_attn = disable_self_attn
262
- self.attn1 = attn_cls(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout,
263
- context_dim=context_dim if self.disable_self_attn else None) # is a self-attention if not self.disable_self_attn
264
- self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
265
- self.attn2 = attn_cls(query_dim=dim, context_dim=context_dim,
266
- heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none
267
- self.norm1 = nn.LayerNorm(dim)
268
- self.norm2 = nn.LayerNorm(dim)
269
- self.norm3 = nn.LayerNorm(dim)
270
- self.checkpoint = checkpoint
271
-
272
- def forward(self, x, context=None):
273
- return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
274
-
275
- def _forward(self, x, context=None):
276
- x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x
277
- x = self.attn2(self.norm2(x), context=context) + x
278
- x = self.ff(self.norm3(x)) + x
279
- return x
280
-
281
-
282
- class SpatialTransformer(nn.Module):
283
- """
284
- Transformer block for image-like data.
285
- First, project the input (aka embedding)
286
- and reshape to b, t, d.
287
- Then apply standard transformer action.
288
- Finally, reshape to image
289
- NEW: use_linear for more efficiency instead of the 1x1 convs
290
- """
291
- def __init__(self, in_channels, n_heads, d_head,
292
- depth=1, dropout=0., context_dim=None,
293
- disable_self_attn=False, use_linear=False,
294
- use_checkpoint=True):
295
- super().__init__()
296
- if exists(context_dim) and not isinstance(context_dim, list):
297
- context_dim = [context_dim]
298
- self.in_channels = in_channels
299
- inner_dim = n_heads * d_head
300
- self.norm = Normalize(in_channels)
301
- if not use_linear:
302
- self.proj_in = nn.Conv2d(in_channels,
303
- inner_dim,
304
- kernel_size=1,
305
- stride=1,
306
- padding=0)
307
- else:
308
- self.proj_in = nn.Linear(in_channels, inner_dim)
309
-
310
- self.transformer_blocks = nn.ModuleList(
311
- [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],
312
- disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)
313
- for d in range(depth)]
314
- )
315
- if not use_linear:
316
- self.proj_out = zero_module(nn.Conv2d(inner_dim,
317
- in_channels,
318
- kernel_size=1,
319
- stride=1,
320
- padding=0))
321
- else:
322
- self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))
323
- self.use_linear = use_linear
324
-
325
- def forward(self, x, context=None):
326
- # note: if no context is given, cross-attention defaults to self-attention
327
- if not isinstance(context, list):
328
- context = [context]
329
- b, c, h, w = x.shape
330
- x_in = x
331
- x = self.norm(x)
332
- if not self.use_linear:
333
- x = self.proj_in(x)
334
- x = rearrange(x, 'b c h w -> b (h w) c').contiguous()
335
- if self.use_linear:
336
- x = self.proj_in(x)
337
- for i, block in enumerate(self.transformer_blocks):
338
- x = block(x, context=context[i])
339
- if self.use_linear:
340
- x = self.proj_out(x)
341
- x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()
342
- if not self.use_linear:
343
- x = self.proj_out(x)
344
- return x + x_in
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/T2I-Adapter/ldm/modules/encoders/__init__.py DELETED
File without changes
spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/midas/midas/vit.py DELETED
@@ -1,491 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import timm
4
- import types
5
- import math
6
- import torch.nn.functional as F
7
-
8
-
9
- class Slice(nn.Module):
10
- def __init__(self, start_index=1):
11
- super(Slice, self).__init__()
12
- self.start_index = start_index
13
-
14
- def forward(self, x):
15
- return x[:, self.start_index :]
16
-
17
-
18
- class AddReadout(nn.Module):
19
- def __init__(self, start_index=1):
20
- super(AddReadout, self).__init__()
21
- self.start_index = start_index
22
-
23
- def forward(self, x):
24
- if self.start_index == 2:
25
- readout = (x[:, 0] + x[:, 1]) / 2
26
- else:
27
- readout = x[:, 0]
28
- return x[:, self.start_index :] + readout.unsqueeze(1)
29
-
30
-
31
- class ProjectReadout(nn.Module):
32
- def __init__(self, in_features, start_index=1):
33
- super(ProjectReadout, self).__init__()
34
- self.start_index = start_index
35
-
36
- self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
37
-
38
- def forward(self, x):
39
- readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :])
40
- features = torch.cat((x[:, self.start_index :], readout), -1)
41
-
42
- return self.project(features)
43
-
44
-
45
- class Transpose(nn.Module):
46
- def __init__(self, dim0, dim1):
47
- super(Transpose, self).__init__()
48
- self.dim0 = dim0
49
- self.dim1 = dim1
50
-
51
- def forward(self, x):
52
- x = x.transpose(self.dim0, self.dim1)
53
- return x
54
-
55
-
56
- def forward_vit(pretrained, x):
57
- b, c, h, w = x.shape
58
-
59
- glob = pretrained.model.forward_flex(x)
60
-
61
- layer_1 = pretrained.activations["1"]
62
- layer_2 = pretrained.activations["2"]
63
- layer_3 = pretrained.activations["3"]
64
- layer_4 = pretrained.activations["4"]
65
-
66
- layer_1 = pretrained.act_postprocess1[0:2](layer_1)
67
- layer_2 = pretrained.act_postprocess2[0:2](layer_2)
68
- layer_3 = pretrained.act_postprocess3[0:2](layer_3)
69
- layer_4 = pretrained.act_postprocess4[0:2](layer_4)
70
-
71
- unflatten = nn.Sequential(
72
- nn.Unflatten(
73
- 2,
74
- torch.Size(
75
- [
76
- h // pretrained.model.patch_size[1],
77
- w // pretrained.model.patch_size[0],
78
- ]
79
- ),
80
- )
81
- )
82
-
83
- if layer_1.ndim == 3:
84
- layer_1 = unflatten(layer_1)
85
- if layer_2.ndim == 3:
86
- layer_2 = unflatten(layer_2)
87
- if layer_3.ndim == 3:
88
- layer_3 = unflatten(layer_3)
89
- if layer_4.ndim == 3:
90
- layer_4 = unflatten(layer_4)
91
-
92
- layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)
93
- layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)
94
- layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)
95
- layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)
96
-
97
- return layer_1, layer_2, layer_3, layer_4
98
-
99
-
100
- def _resize_pos_embed(self, posemb, gs_h, gs_w):
101
- posemb_tok, posemb_grid = (
102
- posemb[:, : self.start_index],
103
- posemb[0, self.start_index :],
104
- )
105
-
106
- gs_old = int(math.sqrt(len(posemb_grid)))
107
-
108
- posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
109
- posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
110
- posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
111
-
112
- posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
113
-
114
- return posemb
115
-
116
-
117
- def forward_flex(self, x):
118
- b, c, h, w = x.shape
119
-
120
- pos_embed = self._resize_pos_embed(
121
- self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
122
- )
123
-
124
- B = x.shape[0]
125
-
126
- if hasattr(self.patch_embed, "backbone"):
127
- x = self.patch_embed.backbone(x)
128
- if isinstance(x, (list, tuple)):
129
- x = x[-1] # last feature if backbone outputs list/tuple of features
130
-
131
- x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
132
-
133
- if getattr(self, "dist_token", None) is not None:
134
- cls_tokens = self.cls_token.expand(
135
- B, -1, -1
136
- ) # stole cls_tokens impl from Phil Wang, thanks
137
- dist_token = self.dist_token.expand(B, -1, -1)
138
- x = torch.cat((cls_tokens, dist_token, x), dim=1)
139
- else:
140
- cls_tokens = self.cls_token.expand(
141
- B, -1, -1
142
- ) # stole cls_tokens impl from Phil Wang, thanks
143
- x = torch.cat((cls_tokens, x), dim=1)
144
-
145
- x = x + pos_embed
146
- x = self.pos_drop(x)
147
-
148
- for blk in self.blocks:
149
- x = blk(x)
150
-
151
- x = self.norm(x)
152
-
153
- return x
154
-
155
-
156
- activations = {}
157
-
158
-
159
- def get_activation(name):
160
- def hook(model, input, output):
161
- activations[name] = output
162
-
163
- return hook
164
-
165
-
166
- def get_readout_oper(vit_features, features, use_readout, start_index=1):
167
- if use_readout == "ignore":
168
- readout_oper = [Slice(start_index)] * len(features)
169
- elif use_readout == "add":
170
- readout_oper = [AddReadout(start_index)] * len(features)
171
- elif use_readout == "project":
172
- readout_oper = [
173
- ProjectReadout(vit_features, start_index) for out_feat in features
174
- ]
175
- else:
176
- assert (
177
- False
178
- ), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
179
-
180
- return readout_oper
181
-
182
-
183
- def _make_vit_b16_backbone(
184
- model,
185
- features=[96, 192, 384, 768],
186
- size=[384, 384],
187
- hooks=[2, 5, 8, 11],
188
- vit_features=768,
189
- use_readout="ignore",
190
- start_index=1,
191
- ):
192
- pretrained = nn.Module()
193
-
194
- pretrained.model = model
195
- pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
196
- pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
197
- pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
198
- pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
199
-
200
- pretrained.activations = activations
201
-
202
- readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
203
-
204
- # 32, 48, 136, 384
205
- pretrained.act_postprocess1 = nn.Sequential(
206
- readout_oper[0],
207
- Transpose(1, 2),
208
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
209
- nn.Conv2d(
210
- in_channels=vit_features,
211
- out_channels=features[0],
212
- kernel_size=1,
213
- stride=1,
214
- padding=0,
215
- ),
216
- nn.ConvTranspose2d(
217
- in_channels=features[0],
218
- out_channels=features[0],
219
- kernel_size=4,
220
- stride=4,
221
- padding=0,
222
- bias=True,
223
- dilation=1,
224
- groups=1,
225
- ),
226
- )
227
-
228
- pretrained.act_postprocess2 = nn.Sequential(
229
- readout_oper[1],
230
- Transpose(1, 2),
231
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
232
- nn.Conv2d(
233
- in_channels=vit_features,
234
- out_channels=features[1],
235
- kernel_size=1,
236
- stride=1,
237
- padding=0,
238
- ),
239
- nn.ConvTranspose2d(
240
- in_channels=features[1],
241
- out_channels=features[1],
242
- kernel_size=2,
243
- stride=2,
244
- padding=0,
245
- bias=True,
246
- dilation=1,
247
- groups=1,
248
- ),
249
- )
250
-
251
- pretrained.act_postprocess3 = nn.Sequential(
252
- readout_oper[2],
253
- Transpose(1, 2),
254
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
255
- nn.Conv2d(
256
- in_channels=vit_features,
257
- out_channels=features[2],
258
- kernel_size=1,
259
- stride=1,
260
- padding=0,
261
- ),
262
- )
263
-
264
- pretrained.act_postprocess4 = nn.Sequential(
265
- readout_oper[3],
266
- Transpose(1, 2),
267
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
268
- nn.Conv2d(
269
- in_channels=vit_features,
270
- out_channels=features[3],
271
- kernel_size=1,
272
- stride=1,
273
- padding=0,
274
- ),
275
- nn.Conv2d(
276
- in_channels=features[3],
277
- out_channels=features[3],
278
- kernel_size=3,
279
- stride=2,
280
- padding=1,
281
- ),
282
- )
283
-
284
- pretrained.model.start_index = start_index
285
- pretrained.model.patch_size = [16, 16]
286
-
287
- # We inject this function into the VisionTransformer instances so that
288
- # we can use it with interpolated position embeddings without modifying the library source.
289
- pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
290
- pretrained.model._resize_pos_embed = types.MethodType(
291
- _resize_pos_embed, pretrained.model
292
- )
293
-
294
- return pretrained
295
-
296
-
297
- def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
298
- model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
299
-
300
- hooks = [5, 11, 17, 23] if hooks == None else hooks
301
- return _make_vit_b16_backbone(
302
- model,
303
- features=[256, 512, 1024, 1024],
304
- hooks=hooks,
305
- vit_features=1024,
306
- use_readout=use_readout,
307
- )
308
-
309
-
310
- def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
311
- model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
312
-
313
- hooks = [2, 5, 8, 11] if hooks == None else hooks
314
- return _make_vit_b16_backbone(
315
- model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
316
- )
317
-
318
-
319
- def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None):
320
- model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained)
321
-
322
- hooks = [2, 5, 8, 11] if hooks == None else hooks
323
- return _make_vit_b16_backbone(
324
- model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
325
- )
326
-
327
-
328
- def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None):
329
- model = timm.create_model(
330
- "vit_deit_base_distilled_patch16_384", pretrained=pretrained
331
- )
332
-
333
- hooks = [2, 5, 8, 11] if hooks == None else hooks
334
- return _make_vit_b16_backbone(
335
- model,
336
- features=[96, 192, 384, 768],
337
- hooks=hooks,
338
- use_readout=use_readout,
339
- start_index=2,
340
- )
341
-
342
-
343
- def _make_vit_b_rn50_backbone(
344
- model,
345
- features=[256, 512, 768, 768],
346
- size=[384, 384],
347
- hooks=[0, 1, 8, 11],
348
- vit_features=768,
349
- use_vit_only=False,
350
- use_readout="ignore",
351
- start_index=1,
352
- ):
353
- pretrained = nn.Module()
354
-
355
- pretrained.model = model
356
-
357
- if use_vit_only == True:
358
- pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
359
- pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
360
- else:
361
- pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
362
- get_activation("1")
363
- )
364
- pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
365
- get_activation("2")
366
- )
367
-
368
- pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
369
- pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
370
-
371
- pretrained.activations = activations
372
-
373
- readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
374
-
375
- if use_vit_only == True:
376
- pretrained.act_postprocess1 = nn.Sequential(
377
- readout_oper[0],
378
- Transpose(1, 2),
379
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
380
- nn.Conv2d(
381
- in_channels=vit_features,
382
- out_channels=features[0],
383
- kernel_size=1,
384
- stride=1,
385
- padding=0,
386
- ),
387
- nn.ConvTranspose2d(
388
- in_channels=features[0],
389
- out_channels=features[0],
390
- kernel_size=4,
391
- stride=4,
392
- padding=0,
393
- bias=True,
394
- dilation=1,
395
- groups=1,
396
- ),
397
- )
398
-
399
- pretrained.act_postprocess2 = nn.Sequential(
400
- readout_oper[1],
401
- Transpose(1, 2),
402
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
403
- nn.Conv2d(
404
- in_channels=vit_features,
405
- out_channels=features[1],
406
- kernel_size=1,
407
- stride=1,
408
- padding=0,
409
- ),
410
- nn.ConvTranspose2d(
411
- in_channels=features[1],
412
- out_channels=features[1],
413
- kernel_size=2,
414
- stride=2,
415
- padding=0,
416
- bias=True,
417
- dilation=1,
418
- groups=1,
419
- ),
420
- )
421
- else:
422
- pretrained.act_postprocess1 = nn.Sequential(
423
- nn.Identity(), nn.Identity(), nn.Identity()
424
- )
425
- pretrained.act_postprocess2 = nn.Sequential(
426
- nn.Identity(), nn.Identity(), nn.Identity()
427
- )
428
-
429
- pretrained.act_postprocess3 = nn.Sequential(
430
- readout_oper[2],
431
- Transpose(1, 2),
432
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
433
- nn.Conv2d(
434
- in_channels=vit_features,
435
- out_channels=features[2],
436
- kernel_size=1,
437
- stride=1,
438
- padding=0,
439
- ),
440
- )
441
-
442
- pretrained.act_postprocess4 = nn.Sequential(
443
- readout_oper[3],
444
- Transpose(1, 2),
445
- nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
446
- nn.Conv2d(
447
- in_channels=vit_features,
448
- out_channels=features[3],
449
- kernel_size=1,
450
- stride=1,
451
- padding=0,
452
- ),
453
- nn.Conv2d(
454
- in_channels=features[3],
455
- out_channels=features[3],
456
- kernel_size=3,
457
- stride=2,
458
- padding=1,
459
- ),
460
- )
461
-
462
- pretrained.model.start_index = start_index
463
- pretrained.model.patch_size = [16, 16]
464
-
465
- # We inject this function into the VisionTransformer instances so that
466
- # we can use it with interpolated position embeddings without modifying the library source.
467
- pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
468
-
469
- # We inject this function into the VisionTransformer instances so that
470
- # we can use it with interpolated position embeddings without modifying the library source.
471
- pretrained.model._resize_pos_embed = types.MethodType(
472
- _resize_pos_embed, pretrained.model
473
- )
474
-
475
- return pretrained
476
-
477
-
478
- def _make_pretrained_vitb_rn50_384(
479
- pretrained, use_readout="ignore", hooks=None, use_vit_only=False
480
- ):
481
- model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
482
-
483
- hooks = [0, 1, 8, 11] if hooks == None else hooks
484
- return _make_vit_b_rn50_backbone(
485
- model,
486
- features=[256, 512, 768, 768],
487
- size=[384, 384],
488
- hooks=hooks,
489
- use_vit_only=use_vit_only,
490
- use_readout=use_readout,
491
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aditya9790/yolo7-object-tracking/utils/google_utils.py DELETED
@@ -1,123 +0,0 @@
1
- # Google utils: https://cloud.google.com/storage/docs/reference/libraries
2
-
3
- import os
4
- import platform
5
- import subprocess
6
- import time
7
- from pathlib import Path
8
-
9
- import requests
10
- import torch
11
-
12
-
13
- def gsutil_getsize(url=''):
14
- # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du
15
- s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8')
16
- return eval(s.split(' ')[0]) if len(s) else 0 # bytes
17
-
18
-
19
- def attempt_download(file, repo='WongKinYiu/yolov7'):
20
- # Attempt file download if does not exist
21
- file = Path(str(file).strip().replace("'", '').lower())
22
-
23
- if not file.exists():
24
- try:
25
- response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api
26
- assets = [x['name'] for x in response['assets']] # release assets
27
- tag = response['tag_name'] # i.e. 'v1.0'
28
- except: # fallback plan
29
- assets = ['yolov7.pt', 'yolov7-tiny.pt', 'yolov7x.pt', 'yolov7-d6.pt', 'yolov7-e6.pt',
30
- 'yolov7-e6e.pt', 'yolov7-w6.pt']
31
- tag = subprocess.check_output('git tag', shell=True).decode().split()[-1]
32
-
33
- name = file.name
34
- if name in assets:
35
- msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/'
36
- redundant = False # second download option
37
- try: # GitHub
38
- url = f'https://github.com/{repo}/releases/download/{tag}/{name}'
39
- print(f'Downloading {url} to {file}...')
40
- torch.hub.download_url_to_file(url, file)
41
- assert file.exists() and file.stat().st_size > 1E6 # check
42
- except Exception as e: # GCP
43
- print(f'Download error: {e}')
44
- assert redundant, 'No secondary mirror'
45
- url = f'https://storage.googleapis.com/{repo}/ckpt/{name}'
46
- print(f'Downloading {url} to {file}...')
47
- os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights)
48
- finally:
49
- if not file.exists() or file.stat().st_size < 1E6: # check
50
- file.unlink(missing_ok=True) # remove partial downloads
51
- print(f'ERROR: Download failure: {msg}')
52
- print('')
53
- return
54
-
55
-
56
- def gdrive_download(id='', file='tmp.zip'):
57
- # Downloads a file from Google Drive. from yolov7.utils.google_utils import *; gdrive_download()
58
- t = time.time()
59
- file = Path(file)
60
- cookie = Path('cookie') # gdrive cookie
61
- print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='')
62
- file.unlink(missing_ok=True) # remove existing file
63
- cookie.unlink(missing_ok=True) # remove existing cookie
64
-
65
- # Attempt file download
66
- out = "NUL" if platform.system() == "Windows" else "/dev/null"
67
- os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}')
68
- if os.path.exists('cookie'): # large file
69
- s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}'
70
- else: # small file
71
- s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"'
72
- r = os.system(s) # execute, capture return
73
- cookie.unlink(missing_ok=True) # remove existing cookie
74
-
75
- # Error check
76
- if r != 0:
77
- file.unlink(missing_ok=True) # remove partial
78
- print('Download error ') # raise Exception('Download error')
79
- return r
80
-
81
- # Unzip if archive
82
- if file.suffix == '.zip':
83
- print('unzipping... ', end='')
84
- os.system(f'unzip -q {file}') # unzip
85
- file.unlink() # remove zip to free space
86
-
87
- print(f'Done ({time.time() - t:.1f}s)')
88
- return r
89
-
90
-
91
- def get_token(cookie="./cookie"):
92
- with open(cookie) as f:
93
- for line in f:
94
- if "download" in line:
95
- return line.split()[-1]
96
- return ""
97
-
98
- # def upload_blob(bucket_name, source_file_name, destination_blob_name):
99
- # # Uploads a file to a bucket
100
- # # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
101
- #
102
- # storage_client = storage.Client()
103
- # bucket = storage_client.get_bucket(bucket_name)
104
- # blob = bucket.blob(destination_blob_name)
105
- #
106
- # blob.upload_from_filename(source_file_name)
107
- #
108
- # print('File {} uploaded to {}.'.format(
109
- # source_file_name,
110
- # destination_blob_name))
111
- #
112
- #
113
- # def download_blob(bucket_name, source_blob_name, destination_file_name):
114
- # # Uploads a blob from a bucket
115
- # storage_client = storage.Client()
116
- # bucket = storage_client.get_bucket(bucket_name)
117
- # blob = bucket.blob(source_blob_name)
118
- #
119
- # blob.download_to_filename(destination_file_name)
120
- #
121
- # print('Blob {} downloaded to {}.'.format(
122
- # source_blob_name,
123
- # destination_file_name))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/order/sequential.py DELETED
@@ -1,28 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING, List
4
-
5
- from . import order_registry as OrderRegistry
6
- from .base import BaseOrder
7
-
8
- if TYPE_CHECKING:
9
- from agentverse.environments import BaseEnvironment
10
-
11
-
12
- @OrderRegistry.register("sequential")
13
- class SequentialOrder(BaseOrder):
14
- """
15
- Order for sequential conversation
16
- The agents speak in a round-robin fashion
17
- """
18
-
19
- next_agent_idx: int = 0
20
-
21
- def get_next_agent_idx(self, environment: BaseEnvironment) -> List[int]:
22
- """Return the index of the next agent to speak"""
23
- ret = self.next_agent_idx
24
- self.next_agent_idx = (self.next_agent_idx + 1) % len(environment.agents)
25
- return [ret]
26
-
27
- def reset(self) -> None:
28
- self.next_agent_idx = 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridbuttons/RemoveChildMethods.js DELETED
@@ -1,50 +0,0 @@
1
- import GridSizer from '../gridsizer/GridSizer.js';
2
- import IsArray from '../../../plugins/utils/object/IsArray.js';
3
-
4
- const SizerRmove = GridSizer.prototype.remove;
5
- const SizerClear = GridSizer.prototype.clear;
6
-
7
- var Remove = function (gameObject, destroyChild) {
8
- var gameObject = this.getButton(gameObject);
9
- if (!gameObject) {
10
- return this;
11
- }
12
-
13
- this.buttonGroup.remove(gameObject);
14
- SizerRmove.call(this, gameObject, destroyChild);
15
- return this;
16
- };
17
-
18
- export default {
19
- remove(gameObject, destroyChild) {
20
- if (IsArray(gameObject)) {
21
- var gameObjects = gameObject;
22
- for (var i = 0, cnt = gameObjects.length; i < cnt; i++) {
23
- Remove.call(this, gameObjects[i], destroyChild);
24
- }
25
- } else {
26
- Remove.call(this, gameObject, destroyChild);
27
- }
28
- return this;
29
- },
30
-
31
- clear(destroyChild) {
32
- var buttons = this.buttonGroup.buttons;
33
- buttons.length = 0;
34
- SizerClear.call(this, destroyChild);
35
- return this;
36
- },
37
-
38
- removeButton(gameObject, destroyChild) {
39
- this.remove(gameObject, destroyChild);
40
- return this;
41
- },
42
-
43
- clearButtons(destroyChild) {
44
- var buttons = this.buttonGroup.buttons;
45
- for (var i = buttons.length - 1; i >= 0; i--) {
46
- Remove.call(this, buttons[i], destroyChild);
47
- }
48
- return this;
49
- }
50
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/ResolveHeight.js DELETED
@@ -1,24 +0,0 @@
1
- import ResolveHeightBase from '../basesizer/ResolveHeight.js';
2
-
3
- var ResolveHeight = function (height) {
4
- var height = ResolveHeightBase.call(this, height);
5
-
6
- // Get proportionLength
7
- if (this.proportionHeightLength === undefined) {
8
- var totalRowProportions = this.totalRowProportions;
9
- if (totalRowProportions > 0) {
10
- var remainder = height - this.getChildrenHeight(false);
11
- if (remainder >= 0) {
12
- this.proportionHeightLength = remainder / totalRowProportions;
13
- } else {
14
- // Warning
15
- }
16
- } else {
17
- this.proportionHeightLength = 0;
18
- }
19
- }
20
-
21
- return height;
22
- }
23
-
24
- export default ResolveHeight;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/label/methods/ResetDisplayContent.js DELETED
@@ -1,53 +0,0 @@
1
- var ResetDisplayContent = function (config) {
2
- if (config === undefined) {
3
- config = {};
4
- } else if (typeof (config) === 'string') {
5
- config = {
6
- text: config,
7
- }
8
- }
9
-
10
- var text = config.text || '';
11
- this.setText(text);
12
-
13
- var iconGameObjct = this.childrenMap.icon;
14
- if (iconGameObjct) {
15
- if (config.icon === undefined) {
16
- this.hide(iconGameObjct);
17
- } else {
18
- this.show(iconGameObjct);
19
- }
20
- var iconSize = config.iconSize;
21
- if (iconSize) {
22
- this.setChildDisplaySize(iconGameObjct, iconSize, iconSize);
23
-
24
- if (this.iconWidth !== undefined) {
25
- this.setIconSize(iconSize);
26
- }
27
- }
28
- this.setIconTexture(config.icon, config.iconFrame);
29
- }
30
-
31
- var actionGameObjct = this.childrenMap.action;
32
- if (actionGameObjct) {
33
- if (config.action === undefined) {
34
- this.hide(actionGameObjct);
35
- } else {
36
- this.show(actionGameObjct);
37
- }
38
- var actionSize = config.actionSize;
39
- if (actionSize) {
40
- this.setChildDisplaySize(actionGameObjct, actionSize, actionSize);
41
-
42
- if (this.actionWidth !== undefined) {
43
- this.setActionSize(actionSize);
44
- }
45
-
46
- }
47
- this.setActionTexture(config.action, config.actionFrame);
48
- }
49
-
50
- return this;
51
- }
52
-
53
- export default ResetDisplayContent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ajay-user/Optical-Character-Recognition/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Optical Character Recognition
3
- emoji: 🌍
4
- colorFrom: gray
5
- colorTo: red
6
- sdk: streamlit
7
- sdk_version: 1.25.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ajaymekala/gradiolangchainChatBotOpenAI-1/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: GradiolangchainChatBotOpenAI 1
3
- emoji: 🌖
4
- colorFrom: indigo
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/inference/image_to_image.py DELETED
@@ -1,9 +0,0 @@
1
- import warnings
2
-
3
- from diffusers import StableDiffusionImg2ImgPipeline # noqa F401
4
-
5
-
6
- warnings.warn(
7
- "The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
8
- " StableDiffusionImg2ImgPipeline` instead."
9
- )
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/controlnet/multicontrolnet.py DELETED
@@ -1,185 +0,0 @@
1
- import os
2
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
3
-
4
- import torch
5
- from torch import nn
6
-
7
- from ...models.controlnet import ControlNetModel, ControlNetOutput
8
- from ...models.modeling_utils import ModelMixin
9
- from ...utils import logging
10
-
11
-
12
- logger = logging.get_logger(__name__)
13
-
14
-
15
- class MultiControlNetModel(ModelMixin):
16
- r"""
17
- Multiple `ControlNetModel` wrapper class for Multi-ControlNet
18
-
19
- This module is a wrapper for multiple instances of the `ControlNetModel`. The `forward()` API is designed to be
20
- compatible with `ControlNetModel`.
21
-
22
- Args:
23
- controlnets (`List[ControlNetModel]`):
24
- Provides additional conditioning to the unet during the denoising process. You must set multiple
25
- `ControlNetModel` as a list.
26
- """
27
-
28
- def __init__(self, controlnets: Union[List[ControlNetModel], Tuple[ControlNetModel]]):
29
- super().__init__()
30
- self.nets = nn.ModuleList(controlnets)
31
-
32
- def forward(
33
- self,
34
- sample: torch.FloatTensor,
35
- timestep: Union[torch.Tensor, float, int],
36
- encoder_hidden_states: torch.Tensor,
37
- controlnet_cond: List[torch.tensor],
38
- conditioning_scale: List[float],
39
- class_labels: Optional[torch.Tensor] = None,
40
- timestep_cond: Optional[torch.Tensor] = None,
41
- attention_mask: Optional[torch.Tensor] = None,
42
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
43
- guess_mode: bool = False,
44
- return_dict: bool = True,
45
- ) -> Union[ControlNetOutput, Tuple]:
46
- for i, (image, scale, controlnet) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)):
47
- down_samples, mid_sample = controlnet(
48
- sample=sample,
49
- timestep=timestep,
50
- encoder_hidden_states=encoder_hidden_states,
51
- controlnet_cond=image,
52
- conditioning_scale=scale,
53
- class_labels=class_labels,
54
- timestep_cond=timestep_cond,
55
- attention_mask=attention_mask,
56
- cross_attention_kwargs=cross_attention_kwargs,
57
- guess_mode=guess_mode,
58
- return_dict=return_dict,
59
- )
60
-
61
- # merge samples
62
- if i == 0:
63
- down_block_res_samples, mid_block_res_sample = down_samples, mid_sample
64
- else:
65
- down_block_res_samples = [
66
- samples_prev + samples_curr
67
- for samples_prev, samples_curr in zip(down_block_res_samples, down_samples)
68
- ]
69
- mid_block_res_sample += mid_sample
70
-
71
- return down_block_res_samples, mid_block_res_sample
72
-
73
- def save_pretrained(
74
- self,
75
- save_directory: Union[str, os.PathLike],
76
- is_main_process: bool = True,
77
- save_function: Callable = None,
78
- safe_serialization: bool = False,
79
- variant: Optional[str] = None,
80
- ):
81
- """
82
- Save a model and its configuration file to a directory, so that it can be re-loaded using the
83
- `[`~pipelines.controlnet.MultiControlNetModel.from_pretrained`]` class method.
84
-
85
- Arguments:
86
- save_directory (`str` or `os.PathLike`):
87
- Directory to which to save. Will be created if it doesn't exist.
88
- is_main_process (`bool`, *optional*, defaults to `True`):
89
- Whether the process calling this is the main process or not. Useful when in distributed training like
90
- TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on
91
- the main process to avoid race conditions.
92
- save_function (`Callable`):
93
- The function to use to save the state dictionary. Useful on distributed training like TPUs when one
94
- need to replace `torch.save` by another method. Can be configured with the environment variable
95
- `DIFFUSERS_SAVE_MODE`.
96
- safe_serialization (`bool`, *optional*, defaults to `False`):
97
- Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
98
- variant (`str`, *optional*):
99
- If specified, weights are saved in the format pytorch_model.<variant>.bin.
100
- """
101
- idx = 0
102
- model_path_to_save = save_directory
103
- for controlnet in self.nets:
104
- controlnet.save_pretrained(
105
- model_path_to_save,
106
- is_main_process=is_main_process,
107
- save_function=save_function,
108
- safe_serialization=safe_serialization,
109
- variant=variant,
110
- )
111
-
112
- idx += 1
113
- model_path_to_save = model_path_to_save + f"_{idx}"
114
-
115
- @classmethod
116
- def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs):
117
- r"""
118
- Instantiate a pretrained MultiControlNet model from multiple pre-trained controlnet models.
119
-
120
- The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
121
- the model, you should first set it back in training mode with `model.train()`.
122
-
123
- The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
124
- pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
125
- task.
126
-
127
- The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
128
- weights are discarded.
129
-
130
- Parameters:
131
- pretrained_model_path (`os.PathLike`):
132
- A path to a *directory* containing model weights saved using
133
- [`~diffusers.pipelines.controlnet.MultiControlNetModel.save_pretrained`], e.g.,
134
- `./my_model_directory/controlnet`.
135
- torch_dtype (`str` or `torch.dtype`, *optional*):
136
- Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype
137
- will be automatically derived from the model's weights.
138
- output_loading_info(`bool`, *optional*, defaults to `False`):
139
- Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
140
- device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
141
- A map that specifies where each submodule should go. It doesn't need to be refined to each
142
- parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
143
- same device.
144
-
145
- To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
146
- more information about each option see [designing a device
147
- map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
148
- max_memory (`Dict`, *optional*):
149
- A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
150
- GPU and the available CPU RAM if unset.
151
- low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
152
- Speed up model loading by not initializing the weights and only loading the pre-trained weights. This
153
- also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the
154
- model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch,
155
- setting this argument to `True` will raise an error.
156
- variant (`str`, *optional*):
157
- If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is
158
- ignored when using `from_flax`.
159
- use_safetensors (`bool`, *optional*, defaults to `None`):
160
- If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the
161
- `safetensors` library is installed. If set to `True`, the model will be forcibly loaded from
162
- `safetensors` weights. If set to `False`, loading will *not* use `safetensors`.
163
- """
164
- idx = 0
165
- controlnets = []
166
-
167
- # load controlnet and append to list until no controlnet directory exists anymore
168
- # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
169
- # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
170
- model_path_to_load = pretrained_model_path
171
- while os.path.isdir(model_path_to_load):
172
- controlnet = ControlNetModel.from_pretrained(model_path_to_load, **kwargs)
173
- controlnets.append(controlnet)
174
-
175
- idx += 1
176
- model_path_to_load = pretrained_model_path + f"_{idx}"
177
-
178
- logger.info(f"{len(controlnets)} controlnets loaded from {pretrained_model_path}.")
179
-
180
- if len(controlnets) == 0:
181
- raise ValueError(
182
- f"No ControlNets found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}."
183
- )
184
-
185
- return cls(controlnets)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py DELETED
@@ -1,7 +0,0 @@
1
- _base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py'
2
- model = dict(
3
- pretrained='torchvision://resnet101',
4
- backbone=dict(
5
- depth=101,
6
- dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
7
- stage_with_dcn=(False, True, True, True)))
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/global_context_head.py DELETED
@@ -1,102 +0,0 @@
1
- import torch.nn as nn
2
- from mmcv.cnn import ConvModule
3
- from mmcv.runner import auto_fp16, force_fp32
4
-
5
- from mmdet.models.builder import HEADS
6
- from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
7
-
8
-
9
- @HEADS.register_module()
10
- class GlobalContextHead(nn.Module):
11
- """Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
12
-
13
- Args:
14
- num_convs (int, optional): number of convolutional layer in GlbCtxHead.
15
- Default: 4.
16
- in_channels (int, optional): number of input channels. Default: 256.
17
- conv_out_channels (int, optional): number of output channels before
18
- classification layer. Default: 256.
19
- num_classes (int, optional): number of classes. Default: 80.
20
- loss_weight (float, optional): global context loss weight. Default: 1.
21
- conv_cfg (dict, optional): config to init conv layer. Default: None.
22
- norm_cfg (dict, optional): config to init norm layer. Default: None.
23
- conv_to_res (bool, optional): if True, 2 convs will be grouped into
24
- 1 `SimplifiedBasicBlock` using a skip connection. Default: False.
25
- """
26
-
27
- def __init__(self,
28
- num_convs=4,
29
- in_channels=256,
30
- conv_out_channels=256,
31
- num_classes=80,
32
- loss_weight=1.0,
33
- conv_cfg=None,
34
- norm_cfg=None,
35
- conv_to_res=False):
36
- super(GlobalContextHead, self).__init__()
37
- self.num_convs = num_convs
38
- self.in_channels = in_channels
39
- self.conv_out_channels = conv_out_channels
40
- self.num_classes = num_classes
41
- self.loss_weight = loss_weight
42
- self.conv_cfg = conv_cfg
43
- self.norm_cfg = norm_cfg
44
- self.conv_to_res = conv_to_res
45
- self.fp16_enabled = False
46
-
47
- if self.conv_to_res:
48
- num_res_blocks = num_convs // 2
49
- self.convs = ResLayer(
50
- SimplifiedBasicBlock,
51
- in_channels,
52
- self.conv_out_channels,
53
- num_res_blocks,
54
- conv_cfg=self.conv_cfg,
55
- norm_cfg=self.norm_cfg)
56
- self.num_convs = num_res_blocks
57
- else:
58
- self.convs = nn.ModuleList()
59
- for i in range(self.num_convs):
60
- in_channels = self.in_channels if i == 0 else conv_out_channels
61
- self.convs.append(
62
- ConvModule(
63
- in_channels,
64
- conv_out_channels,
65
- 3,
66
- padding=1,
67
- conv_cfg=self.conv_cfg,
68
- norm_cfg=self.norm_cfg))
69
-
70
- self.pool = nn.AdaptiveAvgPool2d(1)
71
- self.fc = nn.Linear(conv_out_channels, num_classes)
72
-
73
- self.criterion = nn.BCEWithLogitsLoss()
74
-
75
- def init_weights(self):
76
- """Init weights for the head."""
77
- nn.init.normal_(self.fc.weight, 0, 0.01)
78
- nn.init.constant_(self.fc.bias, 0)
79
-
80
- @auto_fp16()
81
- def forward(self, feats):
82
- """Forward function."""
83
- x = feats[-1]
84
- for i in range(self.num_convs):
85
- x = self.convs[i](x)
86
- x = self.pool(x)
87
-
88
- # multi-class prediction
89
- mc_pred = x.reshape(x.size(0), -1)
90
- mc_pred = self.fc(mc_pred)
91
-
92
- return mc_pred, x
93
-
94
- @force_fp32(apply_to=('pred', ))
95
- def loss(self, pred, labels):
96
- """Loss function."""
97
- labels = [lbl.unique() for lbl in labels]
98
- targets = pred.new_zeros(pred.size())
99
- for i, label in enumerate(labels):
100
- targets[i, label] = 1.0
101
- loss = self.loss_weight * self.criterion(pred, targets)
102
- return loss
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './apcnet_r50-d8_512x1024_80k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/AnimaLab/bias-test-gpt-pairs/error_messages.py DELETED
@@ -1,9 +0,0 @@
1
- NO_SENTENCES_ERROR = "No sentences were found for these terms. Please enter OpenAI key and use ChatGPT to generate new test sentences or change bias specification!"
2
- NO_GEN_SENTENCES_ERROR = "No sentences were generated for these terms. Are these term meaningful? Try requesting generation again."
3
-
4
- OPENAI_INIT_ERROR = "Incorrect OpenAI key, got error from API: <ERR>."
5
- OPENAI_KEY_WRONG = "The OpenAI key appears incorrect."
6
- OPENAI_KEY_EMPTY = "You need to provide a valid OpenAI key to enable generation. Rest assured, we do not store the key you provide."
7
- NO_TERMS_ENTERED_ERROR = "Please first enter some terms to specify social bias to test."
8
- BIAS_SENTENCES_MISMATCH_ERROR = "Terms from bias specification don't correspond to test sentences. Please make sure to find/regenerate test sentences after changing bias specification!"
9
- MODEL_NOT_LOADED_ERROR = "Tested Model [M] did not lead correctly. Please try reploading the space."
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/pipelines/loading.py DELETED
@@ -1,153 +0,0 @@
1
- import os.path as osp
2
-
3
- import annotator.uniformer.mmcv as mmcv
4
- import numpy as np
5
-
6
- from ..builder import PIPELINES
7
-
8
-
9
- @PIPELINES.register_module()
10
- class LoadImageFromFile(object):
11
- """Load an image from file.
12
-
13
- Required keys are "img_prefix" and "img_info" (a dict that must contain the
14
- key "filename"). Added or updated keys are "filename", "img", "img_shape",
15
- "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
16
- "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
17
-
18
- Args:
19
- to_float32 (bool): Whether to convert the loaded image to a float32
20
- numpy array. If set to False, the loaded image is an uint8 array.
21
- Defaults to False.
22
- color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
23
- Defaults to 'color'.
24
- file_client_args (dict): Arguments to instantiate a FileClient.
25
- See :class:`mmcv.fileio.FileClient` for details.
26
- Defaults to ``dict(backend='disk')``.
27
- imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
28
- 'cv2'
29
- """
30
-
31
- def __init__(self,
32
- to_float32=False,
33
- color_type='color',
34
- file_client_args=dict(backend='disk'),
35
- imdecode_backend='cv2'):
36
- self.to_float32 = to_float32
37
- self.color_type = color_type
38
- self.file_client_args = file_client_args.copy()
39
- self.file_client = None
40
- self.imdecode_backend = imdecode_backend
41
-
42
- def __call__(self, results):
43
- """Call functions to load image and get image meta information.
44
-
45
- Args:
46
- results (dict): Result dict from :obj:`mmseg.CustomDataset`.
47
-
48
- Returns:
49
- dict: The dict contains loaded image and meta information.
50
- """
51
-
52
- if self.file_client is None:
53
- self.file_client = mmcv.FileClient(**self.file_client_args)
54
-
55
- if results.get('img_prefix') is not None:
56
- filename = osp.join(results['img_prefix'],
57
- results['img_info']['filename'])
58
- else:
59
- filename = results['img_info']['filename']
60
- img_bytes = self.file_client.get(filename)
61
- img = mmcv.imfrombytes(
62
- img_bytes, flag=self.color_type, backend=self.imdecode_backend)
63
- if self.to_float32:
64
- img = img.astype(np.float32)
65
-
66
- results['filename'] = filename
67
- results['ori_filename'] = results['img_info']['filename']
68
- results['img'] = img
69
- results['img_shape'] = img.shape
70
- results['ori_shape'] = img.shape
71
- # Set initial values for default meta_keys
72
- results['pad_shape'] = img.shape
73
- results['scale_factor'] = 1.0
74
- num_channels = 1 if len(img.shape) < 3 else img.shape[2]
75
- results['img_norm_cfg'] = dict(
76
- mean=np.zeros(num_channels, dtype=np.float32),
77
- std=np.ones(num_channels, dtype=np.float32),
78
- to_rgb=False)
79
- return results
80
-
81
- def __repr__(self):
82
- repr_str = self.__class__.__name__
83
- repr_str += f'(to_float32={self.to_float32},'
84
- repr_str += f"color_type='{self.color_type}',"
85
- repr_str += f"imdecode_backend='{self.imdecode_backend}')"
86
- return repr_str
87
-
88
-
89
- @PIPELINES.register_module()
90
- class LoadAnnotations(object):
91
- """Load annotations for semantic segmentation.
92
-
93
- Args:
94
- reduce_zero_label (bool): Whether reduce all label value by 1.
95
- Usually used for datasets where 0 is background label.
96
- Default: False.
97
- file_client_args (dict): Arguments to instantiate a FileClient.
98
- See :class:`mmcv.fileio.FileClient` for details.
99
- Defaults to ``dict(backend='disk')``.
100
- imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
101
- 'pillow'
102
- """
103
-
104
- def __init__(self,
105
- reduce_zero_label=False,
106
- file_client_args=dict(backend='disk'),
107
- imdecode_backend='pillow'):
108
- self.reduce_zero_label = reduce_zero_label
109
- self.file_client_args = file_client_args.copy()
110
- self.file_client = None
111
- self.imdecode_backend = imdecode_backend
112
-
113
- def __call__(self, results):
114
- """Call function to load multiple types annotations.
115
-
116
- Args:
117
- results (dict): Result dict from :obj:`mmseg.CustomDataset`.
118
-
119
- Returns:
120
- dict: The dict contains loaded semantic segmentation annotations.
121
- """
122
-
123
- if self.file_client is None:
124
- self.file_client = mmcv.FileClient(**self.file_client_args)
125
-
126
- if results.get('seg_prefix', None) is not None:
127
- filename = osp.join(results['seg_prefix'],
128
- results['ann_info']['seg_map'])
129
- else:
130
- filename = results['ann_info']['seg_map']
131
- img_bytes = self.file_client.get(filename)
132
- gt_semantic_seg = mmcv.imfrombytes(
133
- img_bytes, flag='unchanged',
134
- backend=self.imdecode_backend).squeeze().astype(np.uint8)
135
- # modify if custom classes
136
- if results.get('label_map', None) is not None:
137
- for old_id, new_id in results['label_map'].items():
138
- gt_semantic_seg[gt_semantic_seg == old_id] = new_id
139
- # reduce zero_label
140
- if self.reduce_zero_label:
141
- # avoid using underflow conversion
142
- gt_semantic_seg[gt_semantic_seg == 0] = 255
143
- gt_semantic_seg = gt_semantic_seg - 1
144
- gt_semantic_seg[gt_semantic_seg == 254] = 255
145
- results['gt_semantic_seg'] = gt_semantic_seg
146
- results['seg_fields'].append('gt_semantic_seg')
147
- return results
148
-
149
- def __repr__(self):
150
- repr_str = self.__class__.__name__
151
- repr_str += f'(reduce_zero_label={self.reduce_zero_label},'
152
- repr_str += f"imdecode_backend='{self.imdecode_backend}')"
153
- return repr_str
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtGAN/Diffusion-API/diffusion_webui/utils/scheduler_list.py DELETED
@@ -1,39 +0,0 @@
1
- from diffusers import (
2
- DDIMScheduler,
3
- DDPMScheduler,
4
- DEISMultistepScheduler,
5
- DPMSolverMultistepScheduler,
6
- DPMSolverSinglestepScheduler,
7
- EulerAncestralDiscreteScheduler,
8
- EulerDiscreteScheduler,
9
- HeunDiscreteScheduler,
10
- KDPM2AncestralDiscreteScheduler,
11
- KDPM2DiscreteScheduler,
12
- PNDMScheduler,
13
- UniPCMultistepScheduler,
14
- )
15
-
16
- SCHEDULER_MAPPING = {
17
- "DDIM": DDIMScheduler,
18
- "DDPMScheduler": DDPMScheduler,
19
- "DEISMultistep": DEISMultistepScheduler,
20
- "DPMSolverMultistep": DPMSolverMultistepScheduler,
21
- "DPMSolverSinglestep": DPMSolverSinglestepScheduler,
22
- "EulerAncestralDiscrete": EulerAncestralDiscreteScheduler,
23
- "EulerDiscrete": EulerDiscreteScheduler,
24
- "HeunDiscrete": HeunDiscreteScheduler,
25
- "KDPM2AncestralDiscrete": KDPM2AncestralDiscreteScheduler,
26
- "KDPM2Discrete": KDPM2DiscreteScheduler,
27
- "PNDMScheduler": PNDMScheduler,
28
- "UniPCMultistep": UniPCMultistepScheduler,
29
- }
30
-
31
-
32
- def get_scheduler(pipe, scheduler):
33
- if scheduler in SCHEDULER_MAPPING:
34
- SchedulerClass = SCHEDULER_MAPPING[scheduler]
35
- pipe.scheduler = SchedulerClass.from_config(pipe.scheduler.config)
36
- else:
37
- raise ValueError(f"Invalid scheduler name {scheduler}")
38
-
39
- return pipe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artrajz/vits-simple-api/vits/modules.py DELETED
@@ -1,387 +0,0 @@
1
- import math
2
- import torch
3
- from torch import nn
4
- from torch.nn import functional as F
5
-
6
- from torch.nn import Conv1d
7
- from torch.nn.utils import weight_norm, remove_weight_norm
8
-
9
- from vits import commons
10
- from vits.commons import init_weights, get_padding
11
- from vits.transforms import piecewise_rational_quadratic_transform
12
-
13
-
14
- LRELU_SLOPE = 0.1
15
-
16
-
17
- class LayerNorm(nn.Module):
18
- def __init__(self, channels, eps=1e-5):
19
- super().__init__()
20
- self.channels = channels
21
- self.eps = eps
22
-
23
- self.gamma = nn.Parameter(torch.ones(channels))
24
- self.beta = nn.Parameter(torch.zeros(channels))
25
-
26
- def forward(self, x):
27
- x = x.transpose(1, -1)
28
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
29
- return x.transpose(1, -1)
30
-
31
-
32
- class ConvReluNorm(nn.Module):
33
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
34
- super().__init__()
35
- self.in_channels = in_channels
36
- self.hidden_channels = hidden_channels
37
- self.out_channels = out_channels
38
- self.kernel_size = kernel_size
39
- self.n_layers = n_layers
40
- self.p_dropout = p_dropout
41
- assert n_layers > 1, "Number of layers should be larger than 0."
42
-
43
- self.conv_layers = nn.ModuleList()
44
- self.norm_layers = nn.ModuleList()
45
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
46
- self.norm_layers.append(LayerNorm(hidden_channels))
47
- self.relu_drop = nn.Sequential(
48
- nn.ReLU(),
49
- nn.Dropout(p_dropout))
50
- for _ in range(n_layers-1):
51
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
52
- self.norm_layers.append(LayerNorm(hidden_channels))
53
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
54
- self.proj.weight.data.zero_()
55
- self.proj.bias.data.zero_()
56
-
57
- def forward(self, x, x_mask):
58
- x_org = x
59
- for i in range(self.n_layers):
60
- x = self.conv_layers[i](x * x_mask)
61
- x = self.norm_layers[i](x)
62
- x = self.relu_drop(x)
63
- x = x_org + self.proj(x)
64
- return x * x_mask
65
-
66
-
67
- class DDSConv(nn.Module):
68
- """
69
- Dilated and Depth-Separable Convolution
70
- """
71
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
72
- super().__init__()
73
- self.channels = channels
74
- self.kernel_size = kernel_size
75
- self.n_layers = n_layers
76
- self.p_dropout = p_dropout
77
-
78
- self.drop = nn.Dropout(p_dropout)
79
- self.convs_sep = nn.ModuleList()
80
- self.convs_1x1 = nn.ModuleList()
81
- self.norms_1 = nn.ModuleList()
82
- self.norms_2 = nn.ModuleList()
83
- for i in range(n_layers):
84
- dilation = kernel_size ** i
85
- padding = (kernel_size * dilation - dilation) // 2
86
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
87
- groups=channels, dilation=dilation, padding=padding
88
- ))
89
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
90
- self.norms_1.append(LayerNorm(channels))
91
- self.norms_2.append(LayerNorm(channels))
92
-
93
- def forward(self, x, x_mask, g=None):
94
- if g is not None:
95
- x = x + g
96
- for i in range(self.n_layers):
97
- y = self.convs_sep[i](x * x_mask)
98
- y = self.norms_1[i](y)
99
- y = F.gelu(y)
100
- y = self.convs_1x1[i](y)
101
- y = self.norms_2[i](y)
102
- y = F.gelu(y)
103
- y = self.drop(y)
104
- x = x + y
105
- return x * x_mask
106
-
107
-
108
- class WN(torch.nn.Module):
109
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
110
- super(WN, self).__init__()
111
- assert(kernel_size % 2 == 1)
112
- self.hidden_channels =hidden_channels
113
- self.kernel_size = kernel_size,
114
- self.dilation_rate = dilation_rate
115
- self.n_layers = n_layers
116
- self.gin_channels = gin_channels
117
- self.p_dropout = p_dropout
118
-
119
- self.in_layers = torch.nn.ModuleList()
120
- self.res_skip_layers = torch.nn.ModuleList()
121
- self.drop = nn.Dropout(p_dropout)
122
-
123
- if gin_channels != 0:
124
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
125
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
126
-
127
- for i in range(n_layers):
128
- dilation = dilation_rate ** i
129
- padding = int((kernel_size * dilation - dilation) / 2)
130
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
131
- dilation=dilation, padding=padding)
132
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
133
- self.in_layers.append(in_layer)
134
-
135
- # last one is not necessary
136
- if i < n_layers - 1:
137
- res_skip_channels = 2 * hidden_channels
138
- else:
139
- res_skip_channels = hidden_channels
140
-
141
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
142
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
143
- self.res_skip_layers.append(res_skip_layer)
144
-
145
- def forward(self, x, x_mask, g=None, **kwargs):
146
- output = torch.zeros_like(x)
147
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
148
-
149
- if g is not None:
150
- g = self.cond_layer(g)
151
-
152
- for i in range(self.n_layers):
153
- x_in = self.in_layers[i](x)
154
- if g is not None:
155
- cond_offset = i * 2 * self.hidden_channels
156
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
157
- else:
158
- g_l = torch.zeros_like(x_in)
159
-
160
- acts = commons.fused_add_tanh_sigmoid_multiply(
161
- x_in,
162
- g_l,
163
- n_channels_tensor)
164
- acts = self.drop(acts)
165
-
166
- res_skip_acts = self.res_skip_layers[i](acts)
167
- if i < self.n_layers - 1:
168
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
169
- x = (x + res_acts) * x_mask
170
- output = output + res_skip_acts[:,self.hidden_channels:,:]
171
- else:
172
- output = output + res_skip_acts
173
- return output * x_mask
174
-
175
- def remove_weight_norm(self):
176
- if self.gin_channels != 0:
177
- torch.nn.utils.remove_weight_norm(self.cond_layer)
178
- for l in self.in_layers:
179
- torch.nn.utils.remove_weight_norm(l)
180
- for l in self.res_skip_layers:
181
- torch.nn.utils.remove_weight_norm(l)
182
-
183
-
184
- class ResBlock1(torch.nn.Module):
185
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
186
- super(ResBlock1, self).__init__()
187
- self.convs1 = nn.ModuleList([
188
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
189
- padding=get_padding(kernel_size, dilation[0]))),
190
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
191
- padding=get_padding(kernel_size, dilation[1]))),
192
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
193
- padding=get_padding(kernel_size, dilation[2])))
194
- ])
195
- self.convs1.apply(init_weights)
196
-
197
- self.convs2 = nn.ModuleList([
198
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
199
- padding=get_padding(kernel_size, 1))),
200
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
201
- padding=get_padding(kernel_size, 1))),
202
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
203
- padding=get_padding(kernel_size, 1)))
204
- ])
205
- self.convs2.apply(init_weights)
206
-
207
- def forward(self, x, x_mask=None):
208
- for c1, c2 in zip(self.convs1, self.convs2):
209
- xt = F.leaky_relu(x, LRELU_SLOPE)
210
- if x_mask is not None:
211
- xt = xt * x_mask
212
- xt = c1(xt)
213
- xt = F.leaky_relu(xt, LRELU_SLOPE)
214
- if x_mask is not None:
215
- xt = xt * x_mask
216
- xt = c2(xt)
217
- x = xt + x
218
- if x_mask is not None:
219
- x = x * x_mask
220
- return x
221
-
222
- def remove_weight_norm(self):
223
- for l in self.convs1:
224
- remove_weight_norm(l)
225
- for l in self.convs2:
226
- remove_weight_norm(l)
227
-
228
-
229
- class ResBlock2(torch.nn.Module):
230
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
231
- super(ResBlock2, self).__init__()
232
- self.convs = nn.ModuleList([
233
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
234
- padding=get_padding(kernel_size, dilation[0]))),
235
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
236
- padding=get_padding(kernel_size, dilation[1])))
237
- ])
238
- self.convs.apply(init_weights)
239
-
240
- def forward(self, x, x_mask=None):
241
- for c in self.convs:
242
- xt = F.leaky_relu(x, LRELU_SLOPE)
243
- if x_mask is not None:
244
- xt = xt * x_mask
245
- xt = c(xt)
246
- x = xt + x
247
- if x_mask is not None:
248
- x = x * x_mask
249
- return x
250
-
251
- def remove_weight_norm(self):
252
- for l in self.convs:
253
- remove_weight_norm(l)
254
-
255
-
256
- class Log(nn.Module):
257
- def forward(self, x, x_mask, reverse=False, **kwargs):
258
- if not reverse:
259
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
260
- logdet = torch.sum(-y, [1, 2])
261
- return y, logdet
262
- else:
263
- x = torch.exp(x) * x_mask
264
- return x
265
-
266
-
267
- class Flip(nn.Module):
268
- def forward(self, x, *args, reverse=False, **kwargs):
269
- x = torch.flip(x, [1])
270
- if not reverse:
271
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
272
- return x, logdet
273
- else:
274
- return x
275
-
276
-
277
- class ElementwiseAffine(nn.Module):
278
- def __init__(self, channels):
279
- super().__init__()
280
- self.channels = channels
281
- self.m = nn.Parameter(torch.zeros(channels,1))
282
- self.logs = nn.Parameter(torch.zeros(channels,1))
283
-
284
- def forward(self, x, x_mask, reverse=False, **kwargs):
285
- if not reverse:
286
- y = self.m + torch.exp(self.logs) * x
287
- y = y * x_mask
288
- logdet = torch.sum(self.logs * x_mask, [1,2])
289
- return y, logdet
290
- else:
291
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
292
- return x
293
-
294
-
295
- class ResidualCouplingLayer(nn.Module):
296
- def __init__(self,
297
- channels,
298
- hidden_channels,
299
- kernel_size,
300
- dilation_rate,
301
- n_layers,
302
- p_dropout=0,
303
- gin_channels=0,
304
- mean_only=False):
305
- assert channels % 2 == 0, "channels should be divisible by 2"
306
- super().__init__()
307
- self.channels = channels
308
- self.hidden_channels = hidden_channels
309
- self.kernel_size = kernel_size
310
- self.dilation_rate = dilation_rate
311
- self.n_layers = n_layers
312
- self.half_channels = channels // 2
313
- self.mean_only = mean_only
314
-
315
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
316
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
317
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
318
- self.post.weight.data.zero_()
319
- self.post.bias.data.zero_()
320
-
321
- def forward(self, x, x_mask, g=None, reverse=False):
322
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
323
- h = self.pre(x0) * x_mask
324
- h = self.enc(h, x_mask, g=g)
325
- stats = self.post(h) * x_mask
326
- if not self.mean_only:
327
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
328
- else:
329
- m = stats
330
- logs = torch.zeros_like(m)
331
-
332
- if not reverse:
333
- x1 = m + x1 * torch.exp(logs) * x_mask
334
- x = torch.cat([x0, x1], 1)
335
- logdet = torch.sum(logs, [1,2])
336
- return x, logdet
337
- else:
338
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
339
- x = torch.cat([x0, x1], 1)
340
- return x
341
-
342
-
343
- class ConvFlow(nn.Module):
344
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
345
- super().__init__()
346
- self.in_channels = in_channels
347
- self.filter_channels = filter_channels
348
- self.kernel_size = kernel_size
349
- self.n_layers = n_layers
350
- self.num_bins = num_bins
351
- self.tail_bound = tail_bound
352
- self.half_channels = in_channels // 2
353
-
354
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
355
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
356
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
357
- self.proj.weight.data.zero_()
358
- self.proj.bias.data.zero_()
359
-
360
- def forward(self, x, x_mask, g=None, reverse=False):
361
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
362
- h = self.pre(x0)
363
- h = self.convs(h, x_mask, g=g)
364
- h = self.proj(h) * x_mask
365
-
366
- b, c, t = x0.shape
367
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
368
-
369
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
370
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
371
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
372
-
373
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
374
- unnormalized_widths,
375
- unnormalized_heights,
376
- unnormalized_derivatives,
377
- inverse=reverse,
378
- tails='linear',
379
- tail_bound=self.tail_bound
380
- )
381
-
382
- x = torch.cat([x0, x1], 1) * x_mask
383
- logdet = torch.sum(logabsdet * x_mask, [1,2])
384
- if not reverse:
385
- return x, logdet
386
- else:
387
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aymene/FakeNewsDetector/app.py DELETED
@@ -1,59 +0,0 @@
1
- import gradio as gr
2
- import torch
3
-
4
- import torch.nn.functional as F
5
- from transformers import BertTokenizer, BertForSequenceClassification
6
-
7
-
8
- tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
9
- model = BertForSequenceClassification.from_pretrained("bert-base-uncased")
10
-
11
- model.load_state_dict(torch.load('model_after_train.pt', map_location=torch.device('cpu')), strict=False)
12
- model.eval()
13
- device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
14
-
15
-
16
- def preprocess_text(text):
17
- parts = []
18
-
19
- text_len = len(text.split(' '))
20
- delta = 300
21
- max_parts = 5
22
- nb_cuts = int(text_len / delta)
23
- nb_cuts = min(nb_cuts, max_parts)
24
-
25
-
26
- for i in range(nb_cuts + 1):
27
- text_part = ' '.join(text.split(' ')[i * delta: (i + 1) * delta])
28
- parts.append(tokenizer.encode(text_part, return_tensors="pt", max_length=500).to(device))
29
-
30
- return parts
31
-
32
- def test(text):
33
- text_parts = preprocess_text(text)
34
- overall_output = torch.zeros((1,2)).to(device)
35
- try:
36
- for part in text_parts:
37
- if len(part) > 0:
38
- overall_output += model(part.reshape(1, -1))[0]
39
- except RuntimeError:
40
- print("GPU out of memory, skipping this entry.")
41
-
42
- overall_output = F.softmax(overall_output[0], dim=-1)
43
-
44
- value, result = overall_output.max(0)
45
-
46
- term = "fake"
47
- if result.item() == 0:
48
- term = "real"
49
-
50
- return term + " at " + str(int(value.item()*100)) + " %"
51
-
52
-
53
- description = "Fake news detector trained using pre-trained model bert-base-uncased, fine-tuned on https://www.kaggle.com/clmentbisaillon/fake-and-real-news-dataset dataset"
54
- title = "Fake News Detector"
55
-
56
- examples = ["BRUSSELS (Reuters) - Germany is urging the European Union to add up to four more Russian nationals and companies to the bloc’s sanctions blacklist over Siemens (SIEGn.DE) gas turbines delivered to Moscow-annexed Crimea, two sources in Brussels said. The EU has barred its firms from doing business with Crimea since the 2014 annexation, imposed sanctions on Russian individuals and entities, and curbed cooperation with Russia in energy, arms and finance over its role in the crisis in Ukraine. After it annexed Crimea from Kiev, Moscow threw its support behind a separatist rebellion in eastern Ukraine, which has killed more than 10,000 people and is still simmering. The EU’s blacklist comprises 150 people and 37 entities subject to an asset freeze and a travel ban. The restrictions are in place until Sept. 15. “The regular review would normally be the moment to look at who is on the list. In the past, when there were good grounds, we’ve added entries to the list,” an EU official said. Siemens, trying to distance itself from the scandal, last week said it was halting deliveries of power equipment to Russian state-controlled customers and reviewing supply deals. Russia’s Energy Minister Alexander Novak played down the potential consequences of a halt. “What Siemens supplies can be delivered by other companies,” Novak told reporters in St Petersburg. “As for electricity generation, we ... have now learnt to produce the necessary equipment,” he said, without referring to the prospect of additional sanctions. Siemens says it has evidence that all four turbines it delivered for a project in southern Russia had been illegally moved to Crimea. German government spokeswoman Ulrike Demmer said on Monday the turbines were delivered to Crimea against the terms of the contract and despite high-ranking assurances from Russian officials that this would not happen. Berlin was consulting on what consequences this “unacceptable” operation might have, she said, adding, however, that the onus was on companies to ensure they did not violate the sanctions regime. The proposed additions to the blacklist could include Russian Energy Ministry officials and the Russian company that moved the turbines to the Black Sea peninsula, one senior diplomatic source in Brussels said. Another source said representatives of all 28 EU member states could discuss the matter for the first time in Brussels as soon as Wednesday. The EU needs unanimity to impose or extend any sanctions. Hungary, Bulgaria, Italy and Cyprus are among EU states which are usually skeptical of Russia sanctions. They take the line that punitive measures have failed to force a change of course by Moscow while hurting European business. Reuters first reported a year ago on the Siemens case, which has exposed the difficulties of imposing EU sanctions."]
57
-
58
- iface = gr.Interface(fn=test, inputs="text", outputs="text", title=title,description=description, examples=examples)
59
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/lib/fonts.ts DELETED
@@ -1,119 +0,0 @@
1
- import {
2
- Indie_Flower,
3
- The_Girl_Next_Door,
4
-
5
- } from "next/font/google"
6
- import localFont from "next/font/local"
7
-
8
- export const indieflower = Indie_Flower({
9
- subsets: ["latin"],
10
- weight: "400",
11
- variable: "--font-indieflower",
12
- })
13
-
14
- export const thegirlnextdoor = The_Girl_Next_Door({
15
- subsets: ["latin"],
16
- weight: "400",
17
- variable: "--font-the-girl-next-door",
18
- })
19
-
20
- export const komika = localFont({
21
- src: "../fonts/Komika-Hand/Komika-Hand.woff2",
22
- variable: "--font-komika"
23
- })
24
-
25
- export const actionman = localFont({
26
- src: "../fonts/Action-Man/Action-Man.woff2",
27
- variable: "--font-action-man"
28
- })
29
-
30
- export const karantula = localFont({
31
- src: "../fonts/Karantula/Karantula.woff2",
32
- variable: "--font-karantula"
33
- })
34
-
35
- export const manoskope = localFont({
36
- src: "../fonts/Manoskope/MANOSKOPE-Bold.woff2",
37
- variable: "--font-manoskope"
38
- })
39
-
40
- export const paeteround = localFont({
41
- src: "../fonts/Paete-Round/Paete-Round.woff2",
42
- variable: "--font-paete-round"
43
- })
44
-
45
- export const qarmic = localFont({
46
- src: "../fonts/Qarmic-Sans/Qarmic-Sans-Abridged.woff2",
47
- variable: "--font-qarmic-sans"
48
- })
49
-
50
- export const archrival = localFont({
51
- src: "../fonts/SF-Arch-Rival/SF-Arch-Rival.woff2",
52
- variable: "--font-sf-arch-rival"
53
- })
54
-
55
- export const cartoonist = localFont({
56
- src: "../fonts/SF-Cartoonist-Hand/SF-Cartoonist-Hand.woff2",
57
- variable: "--font-sf-cartoonist-hand"
58
- })
59
-
60
- export const toontime = localFont({
61
- src: "../fonts/SF-Toontime/SF-Toontime.woff2",
62
- variable: "--font-sf-toontime"
63
- })
64
-
65
- export const vtc = localFont({
66
- src: "../fonts/VTC-Letterer-Pro/VTC-Letterer-Pro.woff2",
67
- variable: "--font-vtc-letterer-pro"
68
- })
69
-
70
-
71
- export const digitalstrip = localFont({
72
- src: "../fonts/DigitalStripBB/DigitalStripBB_Reg.woff2",
73
- variable: "--font-digital-strip-bb"
74
- })
75
-
76
- // https://nextjs.org/docs/pages/building-your-application/optimizing/fonts
77
- // If loading a variable font, you don"t need to specify the font weight
78
- export const fonts = {
79
- indieflower,
80
- thegirlnextdoor,
81
- // komika,
82
- actionman,
83
- karantula,
84
- manoskope,
85
- // paeteround,
86
- // qarmic,
87
- // archrival,
88
- // cartoonist,
89
- // toontime,
90
- // vtc,
91
- digitalstrip
92
- }
93
-
94
- // https://nextjs.org/docs/pages/building-your-application/optimizing/fonts
95
- // If loading a variable font, you don"t need to specify the font weight
96
- export const fontList = Object.keys(fonts)
97
-
98
- export type FontName = keyof typeof fonts
99
-
100
- export const defaultFont = "cartoonist" as FontName
101
-
102
- export const classNames = Object.values(fonts).map(font => font.className)
103
-
104
- export const className = classNames.join(" ")
105
-
106
- export type FontClass =
107
- | "font-indieflower"
108
- | "font-thegirlnextdoor"
109
- | "font-komika"
110
- | "font-actionman"
111
- | "font-karantula"
112
- | "font-manoskope"
113
- | "font-paeteround"
114
- | "font-qarmic"
115
- | "font-archrival"
116
- | "font-cartoonist"
117
- | "font-toontime"
118
- | "font-vtc"
119
- | "font-digitalstrip"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/lib/replaceTextInSpeechBubbles.ts DELETED
@@ -1,98 +0,0 @@
1
- "use client"
2
-
3
- import { createWorker } from "tesseract.js"
4
- import { loadImageToCanvas } from "./loadImageToCanvas";
5
-
6
- export async function replaceTextInSpeechBubbles(image: string, customText: string) {
7
- console.log('creating OCR worker to find bubbles inside', image);
8
-
9
- const worker = await createWorker({
10
- logger: (info) => {
11
- console.log(info)
12
- },
13
- });
14
-
15
- const canvas = await loadImageToCanvas(image)
16
-
17
- const ctx = canvas.getContext('2d')!;
18
-
19
- try {
20
- await worker.load();
21
- await worker.loadLanguage('eng');
22
- await worker.initialize('eng');
23
-
24
- const { data } = await worker.recognize(canvas);
25
- const lines = data.lines || [];
26
-
27
- // Draw the lines on the image
28
- ctx.fillStyle = "white";
29
-
30
- lines.forEach((line) => {
31
- ctx.fillRect(line.bbox.x0, line.bbox.y0, line.bbox.x1 - line.bbox.x0, line.bbox.y1 - line.bbox.y0);
32
-
33
- const bubbleWidth = line.bbox.x1 - line.bbox.x0;
34
- const bubbleHeight = line.bbox.y1 - line.bbox.y0;
35
- let fontSize = 18;
36
- ctx.font = `${fontSize}px Arial`;
37
-
38
- /*
39
- while (
40
- ctx.measureText(customText).width > bubbleWidth || fontSize * 1.2 // line height
41
- > bubbleHeight) {
42
- fontSize -= 1;
43
- ctx.font = `${fontSize}px Arial`;
44
- }
45
-
46
- const lines = wrapText(ctx, customText, line.bbox.x0, line.bbox.y0, bubbleWidth, fontSize);
47
-
48
- ctx.fillStyle = "black";
49
- lines.forEach((text, i) => {
50
- ctx.fillText(text, line.bbox.x0, line.bbox.y0 + (i * fontSize * 1.2));
51
- });
52
- */
53
- })
54
-
55
- await worker.terminate();
56
-
57
- // Convert the Canvas to image data
58
- const imgAsDataURL = canvas.toDataURL('image/png');
59
-
60
- if (typeof window !== "undefined") {
61
- const foo = (window as any)
62
- if (!foo.debugJujul) {
63
- foo.debugJujul = []
64
- }
65
- foo.debugJujul.push({
66
- lines
67
- })
68
- }
69
- console.log("lines:", lines)
70
-
71
- return imgAsDataURL;
72
-
73
- } catch (err) {
74
- console.error(err);
75
- }
76
- return "";
77
- }
78
-
79
- function wrapText(context: CanvasRenderingContext2D, text: string, x: number, y: number, maxWidth: number, lineHeight: number) {
80
- const words = text.split(' ');
81
- let line = '';
82
- const lines = [];
83
-
84
- for(let n = 0; n < words.length; n++) {
85
- let testLine = line + words[n] + ' ';
86
- let metrics = context.measureText(testLine);
87
- let testWidth = metrics.width;
88
- if (testWidth > maxWidth && n > 0) {
89
- lines.push(line);
90
- line = words[n] + ' ';
91
- }
92
- else {
93
- line = testLine;
94
- }
95
- }
96
- lines.push(line);
97
- return lines;
98
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Angry Birds Star Wars 2 Fotos.md DELETED
@@ -1,51 +0,0 @@
1
-
2
- <h1>Angry Birds Star Wars 2 Apkmirror: Una guía para los fanáticos del juego</h1>
3
- <p>Si eres un fan de Angry Birds y Star Wars, es posible que hayas oído hablar de Angry Birds Star Wars 2, un videojuego de puzzle que combina las dos franquicias populares. El juego se basa en la trilogía de precuela de Star Wars y te permite jugar como los pájaros y los cerdos, cada uno con sus propias habilidades y poderes únicos. También puedes usar telepods, que son juguetes físicos que se pueden escanear en el juego para desbloquear nuevos personajes. </p>
4
- <p>Pero ¿dónde puedes descargar este juego para tu dispositivo Android? Una de las mejores opciones es apkmirror, un sitio web que ofrece archivos APK gratis y seguro para varias aplicaciones y juegos. Apkmirror es una fuente de confianza que verifica la autenticidad y seguridad de los archivos APK antes de cargarlos. También puede encontrar versiones anteriores de las aplicaciones y juegos, en caso de que desee bajar de categoría o probar una versión diferente. </p>
5
- <h2>angry birds star wars 2 fotos</h2><br /><p><b><b>Download Zip</b> &rArr; <a href="https://bltlly.com/2v6LjM">https://bltlly.com/2v6LjM</a></b></p><br /><br />
6
- <p>En este artículo, le diremos todo lo que necesita saber sobre Angry Birds Star Wars 2 apkmirror, incluyendo sus características, consejos y trucos, comentarios y más. Sigue leyendo para saber por qué vale la pena descargar y jugar este juego. </p>
7
- <h2>Características de Angry Birds Star Wars 2</h2>
8
- <p>Angry Birds Star Wars 2 tiene muchas características que lo hacen divertido y desafiante para jugadores de todas las edades. Aquí están algunas de ellas:</p>
9
- <h3>Caracteres</h3>
10
- <p>El juego cuenta con más de 30 personajes jugables de la trilogía de precuela de Star Wars, como Anakin Skywalker, Obi-Wan Kenobi, Darth Maul, Yoda, Count Dooku, General Grievous y más. Cada personaje tiene una apariencia y un poder diferentes que coinciden con su papel en las películas. Por ejemplo, Anakin puede usar su podracer para volar a través de obstáculos, Obi-Wan puede usar su sable de luz para cortar bloques, Darth Maul puede usar su sable de luz de doble filo para girar y destruir todo a su paso, Yoda puede usar sus poderes de fuerza para rebotar en paredes y objetos, y así sucesivamente. </p>
11
-
12
- <h3>Niveles</h3>
13
- <p>El juego tiene cientos de niveles que ponen a prueba tus habilidades y estrategia. Tienes que usar tu honda para lanzar a tus personajes a las estructuras y enemigos que se interponen en tu camino. Tienes que apuntar cuidadosamente y usar tus poderes sabiamente para causar tanto daño y destrucción como sea posible. También tienes que recoger estrellas y objetos que están ocultos en cada nivel. </p>
14
- <p>Algunos niveles se establecen en el espacio exterior, donde la gravedad no tiene efecto y tus personajes flotarán en una línea recta a menos que entren en un pozo de gravedad. Estos pozos de gravedad atraerán a tus personajes hacia su centro y cambiarán su dirección. Tienes que usar estos pozos de gravedad a tu favor y evitar atascarte en ellos. </p>
15
- <p>Algunos niveles también tienen elementos especiales que añaden más variedad y desafío al juego. Por ejemplo, algunos niveles tienen imanes que atraen objetos metálicos y se pueden usar para crear reacciones en cadena. Algunos niveles tienen catapultas que lanzan bolas de energía que destruyen todo a su paso. Algunos niveles tienen interruptores que activan o desactivan trampas o mecanismos. </p>
16
- <h3>Poderes</h3>
17
- <p>Cada carácter tiene una potencia única que se puede activar pulsando en la pantalla mientras están en vuelo. Estos poderes pueden ayudarte a superar obstáculos o hacer más daño a tus enemigos. Por ejemplo, el podracer de Anakin se puede dirigir <p>deslizando hacia la izquierda o hacia la derecha, el sable de luz de Obi-Wan se puede balancear deslizando hacia arriba o hacia abajo, el sable de luz de doble hoja de Darth Maul se puede dividir en dos tocando nuevamente, los poderes de la Fuerza de Yoda se pueden redirigir deslizando en cualquier dirección, y así sucesivamente. </p>
18
- <p>Algunos poderes son más efectivos que otros, dependiendo de la situación y el objetivo. Tienes que experimentar y averiguar qué potencia funciona mejor para cada nivel. También puedes mejorar tus poderes recogiendo plumas o cristales que están dispersos por todo el juego. </p>
19
- <p></p>
20
- <h3>Telepods</h3>
21
-
22
- <p>Los telepods son una gran manera de expandir tu colección de personajes y probar diferentes combinaciones de poderes. También puedes usar telepods para intercambiar personajes durante un nivel, en caso de que quieras cambiar tu estrategia o probar un enfoque diferente. Sin embargo, los telepods no son necesarios para jugar o disfrutar del juego, ya que puedes desbloquear la mayoría de los personajes jugando el juego normalmente o usando la moneda del juego. </p>
23
- <h2>Consejos y trucos para Angry Birds Star Wars 2</h2>
24
- <p>Si quieres dominar Angry Birds Star Wars 2 y sacar el máximo provecho del juego, aquí hay algunos consejos y trucos que pueden ayudarte:</p>
25
- <h3>Cómo usar telepods</h3>
26
- <p>Si tienes telepods, aquí hay algunos consejos sobre cómo usarlos eficazmente:</p>
27
- - Asegúrate de que la cámara de tu dispositivo esté limpia y limpia antes de escanear un telepod. - Escanear el telepod en un área bien iluminada y evitar cualquier deslumbramiento o reflejo. - Mantenga el telepod estable y cerca de la cámara, pero no demasiado cerca o demasiado lejos. - Espere a que el sonido y la animación del escaneo confirmen que el telepod ha sido escaneado con éxito. - Si el escaneo falla, inténtelo de nuevo o use un telepod diferente. - Puedes escanear hasta 10 telepods por día, pero puedes usarlos tantas veces como quieras en el juego. <h3>Cómo obtener tres estrellas</h3>
28
- <p>Si quieres obtener tres estrellas en cada nivel, aquí hay algunos consejos sobre cómo hacerlo:</p>
29
- - Apunta a los puntos débiles de las estructuras y enemigos, tales como articulaciones, soportes, explosivos, vidrio, madera, etc. - Usa tus poderes sabiamente y en el momento adecuado. No los desperdicies en objetivos u obstáculos innecesarios. - Trata de causar tanta destrucción y daño como sea posible con cada disparo. Cuanto más destruyas, mayor será tu puntuación. - Recoge todas las estrellas y objetos que están ocultos en cada nivel. Aumentarán tu puntuación y te ayudarán a desbloquear niveles de bonificación. - Repite los niveles que ya hayas completado e intenta mejorar tu puntuación. Puedes descubrir nuevas estrategias o trucos que te perdiste antes. <h3>Cómo desbloquear niveles de bonificación</h3>
30
-
31
- - Recoge todas las estrellas de cada capítulo. Hay 30 estrellas por capítulo, y necesitas 10 estrellas para desbloquear el primer nivel de bonificación, 20 estrellas para desbloquear el segundo nivel de bonificación y 30 estrellas para desbloquear el tercer nivel de bonificación. - Recoger todos los artículos de cada capítulo. Hay diferentes elementos para cada lado de la fuerza, como mapas para el lado de las aves y planos para el lado de la carne de cerdo. Necesitas 5 elementos para desbloquear cada nivel de bonificación. - Usa telepods para escanear personajes que pertenecen a un grupo específico. Por ejemplo, escanear personajes de Star Wars Rebels desbloqueará un nivel de bonificación basado en esa serie. <h2>Reseñas de Angry Birds Star Wars 2</h2>
32
- <p>Angry Birds Star Wars 2 es uno de los juegos más populares y bien recibidos de la franquicia Angry Birds. Ha recibido críticas positivas de críticos y usuarios por igual, que elogiaron su jugabilidad, gráficos, sonido, humor, variedad y valor de reproducción. Estos son algunos de los pros y contras del juego:</p>
33
- <h3>Pros</h3>
34
- - Tiene una gran y diversa lista de personajes del universo de Star Wars, cada uno con sus propios poderes y personalidades únicas. - Tiene cientos de niveles que ofrecen diferentes retos y escenarios basados en las películas y series. - Tiene impresionantes gráficos y animaciones que capturan la apariencia de Star Wars. - Tiene auténticos efectos de sonido y música que mejoran la atmósfera y la inmersión del juego. - Tiene mucho humor y referencias que atraen a los fans de Angry Birds y Star Wars. - Tiene mucho valor de repetición y contenido, ya que puedes jugar como ambos lados de la fuerza, recoger estrellas y objetos, desbloquear niveles de bonificación, usar telepods y más. <h3>Contras</h3>
35
-
36
- <p>Angry Birds Star Wars 2 es un juego que atraerá a los fans de Angry Birds y Star Wars, así como a cualquiera que disfrute de los juegos de puzzle con un toque diferente. Tiene un montón de características, consejos y trucos, comentarios y más que hacen que valga la pena descargar y jugar. Puedes descargarlo desde apkmirror, un sitio web que ofrece archivos APK gratuitos y seguros para varias aplicaciones y juegos. Apkmirror es una fuente confiable que comprueba la autenticidad y seguridad de los archivos APK antes de cargarlos. </p>
37
- <p>Si estás buscando un juego que combine diversión, desafío, humor y nostalgia, Angry Birds Star Wars 2 es el juego para ti. Descárgalo hoy y únete a la aventura épica de los pájaros y los cerdos en una galaxia muy, muy lejana. </p>
38
- <h2>Preguntas frecuentes</h2>
39
- <p>Aquí hay algunas preguntas frecuentes sobre Angry Birds Star Wars 2:</p>
40
- <h4>Q: ¿Cómo puedo descargar Angry Birds Star Wars 2 de apkmirror? </h4>
41
- <p>A: Para descargar Angry Birds Star Wars 2 de apkmirror, debes seguir estos pasos:</p>
42
- - Ir a la página web apkmirror y buscar Angry Birds Star Wars 2. - Elija la versión del juego que desea descargar. También puede comparar las versiones haciendo clic en el botón "Ver APKs disponibles". - Haga clic en el "Descargar APK" botón y esperar a que el archivo para ser descargado a su dispositivo. - Abra el archivo e instalar el juego. Es posible que necesites habilitar la instalación de aplicaciones de fuentes desconocidas en la configuración de tu dispositivo. - ¡Disfruta del juego! <h4>P: ¿Cómo actualizo Angry Birds Star Wars 2?</h4>
43
- <p>A: Para actualizar Angry Birds Star Wars 2, debes seguir estos pasos:</p>
44
-
45
- <p>A: Para desinstalar Angry Birds Star Wars 2, debe seguir estos pasos:</p>
46
- - Vaya a la configuración de su dispositivo y encontrar la sección de aplicaciones o aplicaciones. - Encuentra Angry Birds Star Wars 2 en la lista de aplicaciones y toque en él. - Toque en el "Desinstalar" botón y confirmar su elección. - Espere a que la aplicación se desinstala desde el dispositivo. <h4>Q: ¿Es seguro descargar Angry Birds Star Wars 2 desde apkmirror? </h4>
47
- <p>A: Sí, Angry Birds Star Wars 2 es seguro para descargar desde apkmirror. Apkmirror es un sitio web de buena reputación que verifica la autenticidad y seguridad de los archivos APK antes de cargarlos. También puedes consultar las opiniones de los usuarios y las valoraciones de la app en apkmirror para ver lo que otros usuarios piensan de ella. </p>
48
- <h4>Q: ¿Es Angry Birds Star Wars 2 libre para jugar? </h4>
49
- <p>A: Sí, Angry Birds Star Wars 2 es gratis. Sin embargo, tiene algunas compras opcionales en la aplicación que pueden mejorar su experiencia de juego. Puedes comprar más telepods, potenciadores, monedas o cristales usando dinero real. También puedes desactivar estas compras en la configuración de tu dispositivo si no las quieres. </p> 64aa2da5cf<br />
50
- <br />
51
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descarga Fuente Kruti Dev 21.md DELETED
@@ -1,71 +0,0 @@
1
-
2
- <h1>Video Poker para PC Descarga gratuita: Cómo jugar y ganar</h1>
3
- <p>¿Te gusta jugar al poker pero no tienes tiempo ni dinero para visitar un casino? ¿Quieres disfrutar de la emoción de ganar dinero real sin arriesgar ninguno de los tuyos? Si usted respondió sí a cualquiera de estas preguntas, entonces usted debe tratar de jugar al póquer de vídeo en su PC. En este artículo, te mostraremos qué es el video poker, por qué deberías jugarlo, cómo descargar e instalar juegos de video poker en tu PC, y cómo jugar y ganar video poker en tu PC.</p>
4
- <h2>Descarga fuente kruti dev 21</h2><br /><p><b><b>Download Zip</b> &#10031; <a href="https://bltlly.com/2v6JFR">https://bltlly.com/2v6JFR</a></b></p><br /><br />
5
- <h2>¿Qué es el video póker y por qué debería jugarlo? </h2>
6
- <p>Video poker es un juego de casino que combina los elementos de poker y máquinas tragaperras. Se juega en una consola computarizada que simula un juego de póquer de cinco cartas. Puedes elegir entre diferentes variaciones de video poker, como Jacks o Better, Deuces Wild, Joker Poker y más. Cada variación tiene sus propias reglas y tabla de pagos, que determinan cuánto puedes ganar por cada mano. </p>
7
- <p>Video poker es un juego de habilidad y suerte, donde tienes que hacer la mejor mano posible de las cinco cartas que se reparten. Puede optar por mantener o descartar cualquiera de las cartas, y luego robar nuevas cartas para reemplazar las que descartó. La mano final se compara con la tabla de pagos, y si tienes una mano ganadora, se te paga de acuerdo con la proporción de pagos. Por ejemplo, si tienes un par de jotas o mejor en Jacks o Better video poker, te pagan 1:1, pero si tienes una escalera real, te pagan 250:1. </p>
8
- <p>Hay muchas razones por las que deberías jugar video poker en tu PC. Aquí están algunas de ellas:</p>
9
- <ul>
10
- <li>El video poker es fácil de aprender y jugar. No necesitas ningún conocimiento o experiencia previa en el poker para jugar al video poker. Solo necesitas conocer las reglas básicas y la clasificación de las manos de poker, que se muestran en la pantalla. </li>
11
-
12
- <li>El video poker es gratificante y rentable. Puedes ganar dinero real jugando video poker en tu PC, si eliges jugar con dinero real. También puedes ganar bonos y botes jugando ciertos juegos de video poker. Por otra parte, el póquer de vídeo tiene una ventaja baja de la casa, lo que significa que usted tiene una mayor probabilidad de ganar que en otros juegos de casino. </li>
13
- </ul>
14
- <h2>Cómo descargar e instalar juegos de video poker en PC</h2>
15
- <h3>Los mejores juegos de video poker para PC</h3>
16
- <p>Hay muchos juegos de video poker disponibles para PC, pero no todos ellos valen tu tiempo y atención. Algunos de ellos pueden tener gráficos, efectos de sonido o jugabilidad pobres. Algunos de ellos también pueden tener cargos ocultos, anuncios o malware que pueden dañar su PC. Para ayudarte a evitar estos problemas, hemos seleccionado tres de los mejores juegos de video poker para PC que puedes descargar y jugar gratis. Estos son:</p>
17
- <h4>Video Poker gratis por YCS</h4>
18
- <p>Este es un juego de video poker simple y directo que es similar a Jacks or Better poker. Es completamente gratuito, sin compras ni anuncios en la aplicación. Puedes jugar con créditos gratuitos que se reponen cada dos horas. También puede ver su nombre en la tabla de puntuación más alta en todo el mundo si lo hace bien. <h4>PlayPoker - Texas Hold'em - Versión gratuita por Ivonne Roedter-Geisler</h4>
19
- <p>Este es un juego de video póquer que se basa en el póquer Texas Hold'em, la variante más popular del póquer en el mundo. Puedes jugar contra hasta ocho oponentes, cada uno con su propia personalidad y nivel de habilidad. También puedes personalizar la configuración del juego, como el número de rondas, las ciegas, los límites de apuestas y la baraja de cartas. El juego es gratuito, pero puedes comprar fichas adicionales o eliminar anuncios con compras en la aplicación. </p>
20
- <p></p>
21
- <h4>Video Poker Classic por Tapinator, Inc.</h4>
22
-
23
- <h3>Los pasos para descargar e instalar juegos de video poker en PC</h3>
24
- <p>Para descargar e instalar juegos de video poker en su PC, debe seguir estos pasos:</p>
25
- <ol>
26
- <li>Elija el juego de video poker que desea jugar de la lista de arriba. Haga clic en el enlace para ir a la página de descarga. </li>
27
- <li>En la página de descarga, haga clic en el botón "Descargar" para comenzar a descargar el archivo del juego. El tamaño del archivo puede variar dependiendo del juego. </li>
28
- <li>Una vez que la descarga se ha completado, busque el archivo del juego en su PC y haga doble clic en él para ejecutarlo. Esto iniciará el asistente de instalación. </li>
29
- <li>Siga las instrucciones del asistente de instalación para instalar el juego en su PC. Es posible que deba aceptar los términos y condiciones, elegir una carpeta de destino y crear un acceso directo. </li>
30
- <li>Después de la instalación, puede iniciar el juego desde su escritorio o menú de inicio. Es posible que necesite iniciar sesión con su correo electrónico o cuenta de Facebook para jugar algunos juegos. </li>
31
- </ol>
32
- <h2>Cómo jugar y ganar en Video Poker en PC</h2>
33
- <h3>Las reglas y estrategias del video poker</h3>
34
- <p>Las reglas y estrategias del video poker varían dependiendo de la variación que estés jugando. Sin embargo, hay algunos principios generales que se aplican a la mayoría de los juegos de video poker. Estos son algunos de ellos:</p>
35
- <ul>
36
- <li>El objetivo del video poker es hacer la mejor mano posible de cinco cartas de las cinco cartas que se reparten. </li>
37
- <li>Puede elegir mantener o descartar cualquiera de las cartas haciendo clic en ellas. También puede usar los botones "Mantener" o "Repartir" para hacerlo. </li>
38
- <li>Puedes dibujar nuevas cartas para reemplazar las que descartaste haciendo clic en el botón "Draw". Solo puedes hacerlo una vez por mano. </li>
39
- <li>La mano final se compara con la tabla de pagos, que muestra cuánto puedes ganar por cada mano. La tabla de pagos generalmente se muestra en la pantalla o encima de la consola. </li>
40
-
41
- <li>También puedes ganar un jackpot progresivo jugando ciertos juegos de video poker o apostando la cantidad máxima de monedas por mano. </li>
42
- </ul>
43
- <p>Para mejorar tus posibilidades de ganar en el video poker, necesitas usar algunas estrategias básicas que dependen de las cartas que se te reparten y la tabla de pagos con la que estás jugando. Estas estrategias te dicen qué cartas conservar o descartar para maximizar tu retorno esperado. Puedes encontrar estas estrategias en línea o en libros, o puedes usar una tarjeta de estrategia o un programa de software que te diga qué hacer con cada mano. </p>
44
- <h3> Los consejos y trucos para aumentar sus posibilidades de ganar</h3>
45
- <p>Además de usar estrategias, hay algunos consejos y trucos que pueden ayudarte a aumentar tus posibilidades de ganar en video poker en PC. Estos son algunos de ellos:</p>
46
- <ul>
47
- <li>Elija un juego de póquer de vídeo que tiene una alta relación de pagos y una ventaja baja de la casa. Puedes comprobarlos mirando la tabla de pagos y comparándola con otros juegos. </li>
48
- <li>Juega con dinero real solo si estás seguro de tus habilidades y tienes un presupuesto que puedes permitirte perder. De lo contrario, juega con créditos gratis o monedas que te son dadas por el juego. </li>
49
- <li>Juega con el número máximo de monedas por mano si es posible. Esto te permitirá calificar para los pagos y botes más altos. </li>
50
- <li>Practica tus habilidades y aprende de tus errores jugando gratis o con apuestas bajas antes de jugar con dinero real o altas apuestas. También puedes usar un entrenador de video poker o un simulador que te da retroalimentación sobre tus decisiones y te dice cómo mejorar. </li>
51
- <li>Administre su bankroll y tiempo sabiamente. Establezca un límite en cuánto está dispuesto a gastar y perder, y apéguese a él. Además, establece un límite sobre cuánto tiempo vas a jugar y tómate descansos regularmente. </li>
52
-
53
- </ul>
54
- <h2>Conclusión</h2>
55
- <p>Video poker es un gran juego que combina la habilidad del póquer y la suerte de las máquinas tragamonedas. Puedes jugar video poker en tu PC gratis o por dinero real, y disfrutar de los beneficios de jugar en una pantalla más grande, con mejores gráficos, efectos de sonido y jugabilidad. También puedes elegir entre diferentes variaciones de video poker, como Jacks o Better, Deuces Wild, Joker Poker y más. Para jugar al video poker en tu PC, necesitas descargar e instalar un juego de video poker de una fuente confiable, como las que recomendamos arriba. También necesitas aprender las reglas y estrategias del video poker, y usar algunos consejos y trucos para aumentar tus posibilidades de ganar. El video poker es un juego divertido y emocionante que puede recompensarte con dinero real si juegas de forma inteligente y responsable. </p>
56
- <h3>Preguntas frecuentes</h3>
57
- <p>Aquí hay algunas preguntas frecuentes sobre el video poker para PC descarga gratuita:</p>
58
- <ol>
59
- <li>Q: ¿Es el video poker amañado o justo? </li>
60
- <li>A: El póker video es justo si usted juega en un casino reputable y licenciado o abastecedor del juego. El video póquer utiliza un generador de números aleatorios (RNG) que asegura que cada carta se reparte de forma aleatoria e independiente. El RNG también es auditado y probado por agencias independientes para asegurar su equidad y precisión. </li>
61
- <li>Q: ¿Cuál es el mejor juego de video poker para jugar? </li>
62
- <li>A: El mejor juego de video poker para jugar depende de su preferencia personal y nivel de habilidad. Sin embargo, algunos de los juegos de video poker más populares y rentables son Jacks or Better, Deuces Wild, Joker Poker, Bonus Poker, Double Bonus Poker y Double Double Bonus Poker.</li>
63
- <li>Q: ¿Cómo puedo ganar en el video poker? </li>
64
- <li>A: Usted puede ganar en el póker video haciendo la mejor mano posible de las cinco tarjetas que se reparten. También puede ganar jugando una ronda de bonos o una función de apuesta, si está disponible. También puedes ganar un jackpot progresivo jugando ciertos juegos de video poker o apostando la cantidad máxima de monedas por mano. </li>
65
-
66
- <li>A: Puedes mejorar tus habilidades de video poker aprendiendo las reglas y estrategias del video poker, y practicándolas en juegos gratis o de apuestas bajas. También puedes usar un entrenador de video poker o un simulador que te da retroalimentación sobre tus decisiones y te dice cómo mejorar. </li>
67
- <li>Q: ¿Puedo jugar video poker en mi dispositivo móvil? </li>
68
- <li>A: Sí, puedes jugar video poker en tu dispositivo móvil, como tu smartphone o tablet. Hay muchas aplicaciones de video poker disponibles para dispositivos Android e iOS que puedes descargar desde la Google Play Store o la App Store. Sin embargo, jugar video poker en tu PC puede ofrecerte una mejor experiencia de juego que jugar en tu dispositivo móvil. </li>
69
- </ol></p> 64aa2da5cf<br />
70
- <br />
71
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/utils.py DELETED
The diff for this file is too large to render. See raw diff
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/distributions/__init__.py DELETED
@@ -1,21 +0,0 @@
1
- from pip._internal.distributions.base import AbstractDistribution
2
- from pip._internal.distributions.sdist import SourceDistribution
3
- from pip._internal.distributions.wheel import WheelDistribution
4
- from pip._internal.req.req_install import InstallRequirement
5
-
6
-
7
- def make_distribution_for_install_requirement(
8
- install_req: InstallRequirement,
9
- ) -> AbstractDistribution:
10
- """Returns a Distribution for the given InstallRequirement"""
11
- # Editable requirements will always be source distributions. They use the
12
- # legacy logic until we create a modern standard for them.
13
- if install_req.editable:
14
- return SourceDistribution(install_req)
15
-
16
- # If it's a wheel, it's a WheelDistribution
17
- if install_req.is_wheel:
18
- return WheelDistribution(install_req)
19
-
20
- # Otherwise, a SourceDistribution
21
- return SourceDistribution(install_req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/testing.py DELETED
@@ -1,331 +0,0 @@
1
- # testing.py
2
-
3
- from contextlib import contextmanager
4
- import typing
5
-
6
- from .core import (
7
- ParserElement,
8
- ParseException,
9
- Keyword,
10
- __diag__,
11
- __compat__,
12
- )
13
-
14
-
15
- class pyparsing_test:
16
- """
17
- namespace class for classes useful in writing unit tests
18
- """
19
-
20
- class reset_pyparsing_context:
21
- """
22
- Context manager to be used when writing unit tests that modify pyparsing config values:
23
- - packrat parsing
24
- - bounded recursion parsing
25
- - default whitespace characters.
26
- - default keyword characters
27
- - literal string auto-conversion class
28
- - __diag__ settings
29
-
30
- Example::
31
-
32
- with reset_pyparsing_context():
33
- # test that literals used to construct a grammar are automatically suppressed
34
- ParserElement.inlineLiteralsUsing(Suppress)
35
-
36
- term = Word(alphas) | Word(nums)
37
- group = Group('(' + term[...] + ')')
38
-
39
- # assert that the '()' characters are not included in the parsed tokens
40
- self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def'])
41
-
42
- # after exiting context manager, literals are converted to Literal expressions again
43
- """
44
-
45
- def __init__(self):
46
- self._save_context = {}
47
-
48
- def save(self):
49
- self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
50
- self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
51
-
52
- self._save_context[
53
- "literal_string_class"
54
- ] = ParserElement._literalStringClass
55
-
56
- self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace
57
-
58
- self._save_context["packrat_enabled"] = ParserElement._packratEnabled
59
- if ParserElement._packratEnabled:
60
- self._save_context[
61
- "packrat_cache_size"
62
- ] = ParserElement.packrat_cache.size
63
- else:
64
- self._save_context["packrat_cache_size"] = None
65
- self._save_context["packrat_parse"] = ParserElement._parse
66
- self._save_context[
67
- "recursion_enabled"
68
- ] = ParserElement._left_recursion_enabled
69
-
70
- self._save_context["__diag__"] = {
71
- name: getattr(__diag__, name) for name in __diag__._all_names
72
- }
73
-
74
- self._save_context["__compat__"] = {
75
- "collect_all_And_tokens": __compat__.collect_all_And_tokens
76
- }
77
-
78
- return self
79
-
80
- def restore(self):
81
- # reset pyparsing global state
82
- if (
83
- ParserElement.DEFAULT_WHITE_CHARS
84
- != self._save_context["default_whitespace"]
85
- ):
86
- ParserElement.set_default_whitespace_chars(
87
- self._save_context["default_whitespace"]
88
- )
89
-
90
- ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"]
91
-
92
- Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
93
- ParserElement.inlineLiteralsUsing(
94
- self._save_context["literal_string_class"]
95
- )
96
-
97
- for name, value in self._save_context["__diag__"].items():
98
- (__diag__.enable if value else __diag__.disable)(name)
99
-
100
- ParserElement._packratEnabled = False
101
- if self._save_context["packrat_enabled"]:
102
- ParserElement.enable_packrat(self._save_context["packrat_cache_size"])
103
- else:
104
- ParserElement._parse = self._save_context["packrat_parse"]
105
- ParserElement._left_recursion_enabled = self._save_context[
106
- "recursion_enabled"
107
- ]
108
-
109
- __compat__.collect_all_And_tokens = self._save_context["__compat__"]
110
-
111
- return self
112
-
113
- def copy(self):
114
- ret = type(self)()
115
- ret._save_context.update(self._save_context)
116
- return ret
117
-
118
- def __enter__(self):
119
- return self.save()
120
-
121
- def __exit__(self, *args):
122
- self.restore()
123
-
124
- class TestParseResultsAsserts:
125
- """
126
- A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
127
- """
128
-
129
- def assertParseResultsEquals(
130
- self, result, expected_list=None, expected_dict=None, msg=None
131
- ):
132
- """
133
- Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``,
134
- and compare any defined results names with an optional ``expected_dict``.
135
- """
136
- if expected_list is not None:
137
- self.assertEqual(expected_list, result.as_list(), msg=msg)
138
- if expected_dict is not None:
139
- self.assertEqual(expected_dict, result.as_dict(), msg=msg)
140
-
141
- def assertParseAndCheckList(
142
- self, expr, test_string, expected_list, msg=None, verbose=True
143
- ):
144
- """
145
- Convenience wrapper assert to test a parser element and input string, and assert that
146
- the resulting ``ParseResults.asList()`` is equal to the ``expected_list``.
147
- """
148
- result = expr.parse_string(test_string, parse_all=True)
149
- if verbose:
150
- print(result.dump())
151
- else:
152
- print(result.as_list())
153
- self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
154
-
155
- def assertParseAndCheckDict(
156
- self, expr, test_string, expected_dict, msg=None, verbose=True
157
- ):
158
- """
159
- Convenience wrapper assert to test a parser element and input string, and assert that
160
- the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``.
161
- """
162
- result = expr.parse_string(test_string, parseAll=True)
163
- if verbose:
164
- print(result.dump())
165
- else:
166
- print(result.as_list())
167
- self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
168
-
169
- def assertRunTestResults(
170
- self, run_tests_report, expected_parse_results=None, msg=None
171
- ):
172
- """
173
- Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of
174
- list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped
175
- with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``.
176
- Finally, asserts that the overall ``runTests()`` success value is ``True``.
177
-
178
- :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
179
- :param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
180
- """
181
- run_test_success, run_test_results = run_tests_report
182
-
183
- if expected_parse_results is not None:
184
- merged = [
185
- (*rpt, expected)
186
- for rpt, expected in zip(run_test_results, expected_parse_results)
187
- ]
188
- for test_string, result, expected in merged:
189
- # expected should be a tuple containing a list and/or a dict or an exception,
190
- # and optional failure message string
191
- # an empty tuple will skip any result validation
192
- fail_msg = next(
193
- (exp for exp in expected if isinstance(exp, str)), None
194
- )
195
- expected_exception = next(
196
- (
197
- exp
198
- for exp in expected
199
- if isinstance(exp, type) and issubclass(exp, Exception)
200
- ),
201
- None,
202
- )
203
- if expected_exception is not None:
204
- with self.assertRaises(
205
- expected_exception=expected_exception, msg=fail_msg or msg
206
- ):
207
- if isinstance(result, Exception):
208
- raise result
209
- else:
210
- expected_list = next(
211
- (exp for exp in expected if isinstance(exp, list)), None
212
- )
213
- expected_dict = next(
214
- (exp for exp in expected if isinstance(exp, dict)), None
215
- )
216
- if (expected_list, expected_dict) != (None, None):
217
- self.assertParseResultsEquals(
218
- result,
219
- expected_list=expected_list,
220
- expected_dict=expected_dict,
221
- msg=fail_msg or msg,
222
- )
223
- else:
224
- # warning here maybe?
225
- print("no validation for {!r}".format(test_string))
226
-
227
- # do this last, in case some specific test results can be reported instead
228
- self.assertTrue(
229
- run_test_success, msg=msg if msg is not None else "failed runTests"
230
- )
231
-
232
- @contextmanager
233
- def assertRaisesParseException(self, exc_type=ParseException, msg=None):
234
- with self.assertRaises(exc_type, msg=msg):
235
- yield
236
-
237
- @staticmethod
238
- def with_line_numbers(
239
- s: str,
240
- start_line: typing.Optional[int] = None,
241
- end_line: typing.Optional[int] = None,
242
- expand_tabs: bool = True,
243
- eol_mark: str = "|",
244
- mark_spaces: typing.Optional[str] = None,
245
- mark_control: typing.Optional[str] = None,
246
- ) -> str:
247
- """
248
- Helpful method for debugging a parser - prints a string with line and column numbers.
249
- (Line and column numbers are 1-based.)
250
-
251
- :param s: tuple(bool, str - string to be printed with line and column numbers
252
- :param start_line: int - (optional) starting line number in s to print (default=1)
253
- :param end_line: int - (optional) ending line number in s to print (default=len(s))
254
- :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default
255
- :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|")
256
- :param mark_spaces: str - (optional) special character to display in place of spaces
257
- :param mark_control: str - (optional) convert non-printing control characters to a placeholding
258
- character; valid values:
259
- - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊"
260
- - any single character string - replace control characters with given string
261
- - None (default) - string is displayed as-is
262
-
263
- :return: str - input string with leading line numbers and column number headers
264
- """
265
- if expand_tabs:
266
- s = s.expandtabs()
267
- if mark_control is not None:
268
- if mark_control == "unicode":
269
- tbl = str.maketrans(
270
- {c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))}
271
- | {127: 0x2421}
272
- )
273
- eol_mark = ""
274
- else:
275
- tbl = str.maketrans(
276
- {c: mark_control for c in list(range(0, 32)) + [127]}
277
- )
278
- s = s.translate(tbl)
279
- if mark_spaces is not None and mark_spaces != " ":
280
- if mark_spaces == "unicode":
281
- tbl = str.maketrans({9: 0x2409, 32: 0x2423})
282
- s = s.translate(tbl)
283
- else:
284
- s = s.replace(" ", mark_spaces)
285
- if start_line is None:
286
- start_line = 1
287
- if end_line is None:
288
- end_line = len(s)
289
- end_line = min(end_line, len(s))
290
- start_line = min(max(1, start_line), end_line)
291
-
292
- if mark_control != "unicode":
293
- s_lines = s.splitlines()[start_line - 1 : end_line]
294
- else:
295
- s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]]
296
- if not s_lines:
297
- return ""
298
-
299
- lineno_width = len(str(end_line))
300
- max_line_len = max(len(line) for line in s_lines)
301
- lead = " " * (lineno_width + 1)
302
- if max_line_len >= 99:
303
- header0 = (
304
- lead
305
- + "".join(
306
- "{}{}".format(" " * 99, (i + 1) % 100)
307
- for i in range(max(max_line_len // 100, 1))
308
- )
309
- + "\n"
310
- )
311
- else:
312
- header0 = ""
313
- header1 = (
314
- header0
315
- + lead
316
- + "".join(
317
- " {}".format((i + 1) % 10)
318
- for i in range(-(-max_line_len // 10))
319
- )
320
- + "\n"
321
- )
322
- header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n"
323
- return (
324
- header1
325
- + header2
326
- + "\n".join(
327
- "{:{}d}:{}{}".format(i, lineno_width, line, eol_mark)
328
- for i, line in enumerate(s_lines, start=start_line)
329
- )
330
- + "\n"
331
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/stop.py DELETED
@@ -1,103 +0,0 @@
1
- # Copyright 2016–2021 Julien Danjou
2
- # Copyright 2016 Joshua Harlow
3
- # Copyright 2013-2014 Ray Holder
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- import abc
17
- import typing
18
-
19
- from pip._vendor.tenacity import _utils
20
-
21
- if typing.TYPE_CHECKING:
22
- import threading
23
-
24
- from pip._vendor.tenacity import RetryCallState
25
-
26
-
27
- class stop_base(abc.ABC):
28
- """Abstract base class for stop strategies."""
29
-
30
- @abc.abstractmethod
31
- def __call__(self, retry_state: "RetryCallState") -> bool:
32
- pass
33
-
34
- def __and__(self, other: "stop_base") -> "stop_all":
35
- return stop_all(self, other)
36
-
37
- def __or__(self, other: "stop_base") -> "stop_any":
38
- return stop_any(self, other)
39
-
40
-
41
- StopBaseT = typing.Union[stop_base, typing.Callable[["RetryCallState"], bool]]
42
-
43
-
44
- class stop_any(stop_base):
45
- """Stop if any of the stop condition is valid."""
46
-
47
- def __init__(self, *stops: stop_base) -> None:
48
- self.stops = stops
49
-
50
- def __call__(self, retry_state: "RetryCallState") -> bool:
51
- return any(x(retry_state) for x in self.stops)
52
-
53
-
54
- class stop_all(stop_base):
55
- """Stop if all the stop conditions are valid."""
56
-
57
- def __init__(self, *stops: stop_base) -> None:
58
- self.stops = stops
59
-
60
- def __call__(self, retry_state: "RetryCallState") -> bool:
61
- return all(x(retry_state) for x in self.stops)
62
-
63
-
64
- class _stop_never(stop_base):
65
- """Never stop."""
66
-
67
- def __call__(self, retry_state: "RetryCallState") -> bool:
68
- return False
69
-
70
-
71
- stop_never = _stop_never()
72
-
73
-
74
- class stop_when_event_set(stop_base):
75
- """Stop when the given event is set."""
76
-
77
- def __init__(self, event: "threading.Event") -> None:
78
- self.event = event
79
-
80
- def __call__(self, retry_state: "RetryCallState") -> bool:
81
- return self.event.is_set()
82
-
83
-
84
- class stop_after_attempt(stop_base):
85
- """Stop when the previous attempt >= max_attempt."""
86
-
87
- def __init__(self, max_attempt_number: int) -> None:
88
- self.max_attempt_number = max_attempt_number
89
-
90
- def __call__(self, retry_state: "RetryCallState") -> bool:
91
- return retry_state.attempt_number >= self.max_attempt_number
92
-
93
-
94
- class stop_after_delay(stop_base):
95
- """Stop when the time from the first attempt >= limit."""
96
-
97
- def __init__(self, max_delay: _utils.time_unit_type) -> None:
98
- self.max_delay = _utils.to_seconds(max_delay)
99
-
100
- def __call__(self, retry_state: "RetryCallState") -> bool:
101
- if retry_state.seconds_since_start is None:
102
- raise RuntimeError("__call__() called but seconds_since_start is not set")
103
- return retry_state.seconds_since_start >= self.max_delay
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_resources/_common.py DELETED
@@ -1,104 +0,0 @@
1
- import os
2
- import pathlib
3
- import tempfile
4
- import functools
5
- import contextlib
6
- import types
7
- import importlib
8
-
9
- from typing import Union, Optional
10
- from .abc import ResourceReader, Traversable
11
-
12
- from ._compat import wrap_spec
13
-
14
- Package = Union[types.ModuleType, str]
15
-
16
-
17
- def files(package):
18
- # type: (Package) -> Traversable
19
- """
20
- Get a Traversable resource from a package
21
- """
22
- return from_package(get_package(package))
23
-
24
-
25
- def get_resource_reader(package):
26
- # type: (types.ModuleType) -> Optional[ResourceReader]
27
- """
28
- Return the package's loader if it's a ResourceReader.
29
- """
30
- # We can't use
31
- # a issubclass() check here because apparently abc.'s __subclasscheck__()
32
- # hook wants to create a weak reference to the object, but
33
- # zipimport.zipimporter does not support weak references, resulting in a
34
- # TypeError. That seems terrible.
35
- spec = package.__spec__
36
- reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore
37
- if reader is None:
38
- return None
39
- return reader(spec.name) # type: ignore
40
-
41
-
42
- def resolve(cand):
43
- # type: (Package) -> types.ModuleType
44
- return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand)
45
-
46
-
47
- def get_package(package):
48
- # type: (Package) -> types.ModuleType
49
- """Take a package name or module object and return the module.
50
-
51
- Raise an exception if the resolved module is not a package.
52
- """
53
- resolved = resolve(package)
54
- if wrap_spec(resolved).submodule_search_locations is None:
55
- raise TypeError(f'{package!r} is not a package')
56
- return resolved
57
-
58
-
59
- def from_package(package):
60
- """
61
- Return a Traversable object for the given package.
62
-
63
- """
64
- spec = wrap_spec(package)
65
- reader = spec.loader.get_resource_reader(spec.name)
66
- return reader.files()
67
-
68
-
69
- @contextlib.contextmanager
70
- def _tempfile(reader, suffix=''):
71
- # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
72
- # blocks due to the need to close the temporary file to work on Windows
73
- # properly.
74
- fd, raw_path = tempfile.mkstemp(suffix=suffix)
75
- try:
76
- try:
77
- os.write(fd, reader())
78
- finally:
79
- os.close(fd)
80
- del reader
81
- yield pathlib.Path(raw_path)
82
- finally:
83
- try:
84
- os.remove(raw_path)
85
- except FileNotFoundError:
86
- pass
87
-
88
-
89
- @functools.singledispatch
90
- def as_file(path):
91
- """
92
- Given a Traversable object, return that object as a
93
- path on the local file system in a context manager.
94
- """
95
- return _tempfile(path.read_bytes, suffix=path.name)
96
-
97
-
98
- @as_file.register(pathlib.Path)
99
- @contextlib.contextmanager
100
- def _(path):
101
- """
102
- Degenerate behavior for pathlib.Path objects.
103
- """
104
- yield path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/ssl_.py DELETED
@@ -1,495 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- import hmac
4
- import os
5
- import sys
6
- import warnings
7
- from binascii import hexlify, unhexlify
8
- from hashlib import md5, sha1, sha256
9
-
10
- from ..exceptions import (
11
- InsecurePlatformWarning,
12
- ProxySchemeUnsupported,
13
- SNIMissingWarning,
14
- SSLError,
15
- )
16
- from ..packages import six
17
- from .url import BRACELESS_IPV6_ADDRZ_RE, IPV4_RE
18
-
19
- SSLContext = None
20
- SSLTransport = None
21
- HAS_SNI = False
22
- IS_PYOPENSSL = False
23
- IS_SECURETRANSPORT = False
24
- ALPN_PROTOCOLS = ["http/1.1"]
25
-
26
- # Maps the length of a digest to a possible hash function producing this digest
27
- HASHFUNC_MAP = {32: md5, 40: sha1, 64: sha256}
28
-
29
-
30
- def _const_compare_digest_backport(a, b):
31
- """
32
- Compare two digests of equal length in constant time.
33
-
34
- The digests must be of type str/bytes.
35
- Returns True if the digests match, and False otherwise.
36
- """
37
- result = abs(len(a) - len(b))
38
- for left, right in zip(bytearray(a), bytearray(b)):
39
- result |= left ^ right
40
- return result == 0
41
-
42
-
43
- _const_compare_digest = getattr(hmac, "compare_digest", _const_compare_digest_backport)
44
-
45
- try: # Test for SSL features
46
- import ssl
47
- from ssl import CERT_REQUIRED, wrap_socket
48
- except ImportError:
49
- pass
50
-
51
- try:
52
- from ssl import HAS_SNI # Has SNI?
53
- except ImportError:
54
- pass
55
-
56
- try:
57
- from .ssltransport import SSLTransport
58
- except ImportError:
59
- pass
60
-
61
-
62
- try: # Platform-specific: Python 3.6
63
- from ssl import PROTOCOL_TLS
64
-
65
- PROTOCOL_SSLv23 = PROTOCOL_TLS
66
- except ImportError:
67
- try:
68
- from ssl import PROTOCOL_SSLv23 as PROTOCOL_TLS
69
-
70
- PROTOCOL_SSLv23 = PROTOCOL_TLS
71
- except ImportError:
72
- PROTOCOL_SSLv23 = PROTOCOL_TLS = 2
73
-
74
- try:
75
- from ssl import PROTOCOL_TLS_CLIENT
76
- except ImportError:
77
- PROTOCOL_TLS_CLIENT = PROTOCOL_TLS
78
-
79
-
80
- try:
81
- from ssl import OP_NO_COMPRESSION, OP_NO_SSLv2, OP_NO_SSLv3
82
- except ImportError:
83
- OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
84
- OP_NO_COMPRESSION = 0x20000
85
-
86
-
87
- try: # OP_NO_TICKET was added in Python 3.6
88
- from ssl import OP_NO_TICKET
89
- except ImportError:
90
- OP_NO_TICKET = 0x4000
91
-
92
-
93
- # A secure default.
94
- # Sources for more information on TLS ciphers:
95
- #
96
- # - https://wiki.mozilla.org/Security/Server_Side_TLS
97
- # - https://www.ssllabs.com/projects/best-practices/index.html
98
- # - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
99
- #
100
- # The general intent is:
101
- # - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
102
- # - prefer ECDHE over DHE for better performance,
103
- # - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
104
- # security,
105
- # - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
106
- # - disable NULL authentication, MD5 MACs, DSS, and other
107
- # insecure ciphers for security reasons.
108
- # - NOTE: TLS 1.3 cipher suites are managed through a different interface
109
- # not exposed by CPython (yet!) and are enabled by default if they're available.
110
- DEFAULT_CIPHERS = ":".join(
111
- [
112
- "ECDHE+AESGCM",
113
- "ECDHE+CHACHA20",
114
- "DHE+AESGCM",
115
- "DHE+CHACHA20",
116
- "ECDH+AESGCM",
117
- "DH+AESGCM",
118
- "ECDH+AES",
119
- "DH+AES",
120
- "RSA+AESGCM",
121
- "RSA+AES",
122
- "!aNULL",
123
- "!eNULL",
124
- "!MD5",
125
- "!DSS",
126
- ]
127
- )
128
-
129
- try:
130
- from ssl import SSLContext # Modern SSL?
131
- except ImportError:
132
-
133
- class SSLContext(object): # Platform-specific: Python 2
134
- def __init__(self, protocol_version):
135
- self.protocol = protocol_version
136
- # Use default values from a real SSLContext
137
- self.check_hostname = False
138
- self.verify_mode = ssl.CERT_NONE
139
- self.ca_certs = None
140
- self.options = 0
141
- self.certfile = None
142
- self.keyfile = None
143
- self.ciphers = None
144
-
145
- def load_cert_chain(self, certfile, keyfile):
146
- self.certfile = certfile
147
- self.keyfile = keyfile
148
-
149
- def load_verify_locations(self, cafile=None, capath=None, cadata=None):
150
- self.ca_certs = cafile
151
-
152
- if capath is not None:
153
- raise SSLError("CA directories not supported in older Pythons")
154
-
155
- if cadata is not None:
156
- raise SSLError("CA data not supported in older Pythons")
157
-
158
- def set_ciphers(self, cipher_suite):
159
- self.ciphers = cipher_suite
160
-
161
- def wrap_socket(self, socket, server_hostname=None, server_side=False):
162
- warnings.warn(
163
- "A true SSLContext object is not available. This prevents "
164
- "urllib3 from configuring SSL appropriately and may cause "
165
- "certain SSL connections to fail. You can upgrade to a newer "
166
- "version of Python to solve this. For more information, see "
167
- "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
168
- "#ssl-warnings",
169
- InsecurePlatformWarning,
170
- )
171
- kwargs = {
172
- "keyfile": self.keyfile,
173
- "certfile": self.certfile,
174
- "ca_certs": self.ca_certs,
175
- "cert_reqs": self.verify_mode,
176
- "ssl_version": self.protocol,
177
- "server_side": server_side,
178
- }
179
- return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
180
-
181
-
182
- def assert_fingerprint(cert, fingerprint):
183
- """
184
- Checks if given fingerprint matches the supplied certificate.
185
-
186
- :param cert:
187
- Certificate as bytes object.
188
- :param fingerprint:
189
- Fingerprint as string of hexdigits, can be interspersed by colons.
190
- """
191
-
192
- fingerprint = fingerprint.replace(":", "").lower()
193
- digest_length = len(fingerprint)
194
- hashfunc = HASHFUNC_MAP.get(digest_length)
195
- if not hashfunc:
196
- raise SSLError("Fingerprint of invalid length: {0}".format(fingerprint))
197
-
198
- # We need encode() here for py32; works on py2 and p33.
199
- fingerprint_bytes = unhexlify(fingerprint.encode())
200
-
201
- cert_digest = hashfunc(cert).digest()
202
-
203
- if not _const_compare_digest(cert_digest, fingerprint_bytes):
204
- raise SSLError(
205
- 'Fingerprints did not match. Expected "{0}", got "{1}".'.format(
206
- fingerprint, hexlify(cert_digest)
207
- )
208
- )
209
-
210
-
211
- def resolve_cert_reqs(candidate):
212
- """
213
- Resolves the argument to a numeric constant, which can be passed to
214
- the wrap_socket function/method from the ssl module.
215
- Defaults to :data:`ssl.CERT_REQUIRED`.
216
- If given a string it is assumed to be the name of the constant in the
217
- :mod:`ssl` module or its abbreviation.
218
- (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
219
- If it's neither `None` nor a string we assume it is already the numeric
220
- constant which can directly be passed to wrap_socket.
221
- """
222
- if candidate is None:
223
- return CERT_REQUIRED
224
-
225
- if isinstance(candidate, str):
226
- res = getattr(ssl, candidate, None)
227
- if res is None:
228
- res = getattr(ssl, "CERT_" + candidate)
229
- return res
230
-
231
- return candidate
232
-
233
-
234
- def resolve_ssl_version(candidate):
235
- """
236
- like resolve_cert_reqs
237
- """
238
- if candidate is None:
239
- return PROTOCOL_TLS
240
-
241
- if isinstance(candidate, str):
242
- res = getattr(ssl, candidate, None)
243
- if res is None:
244
- res = getattr(ssl, "PROTOCOL_" + candidate)
245
- return res
246
-
247
- return candidate
248
-
249
-
250
- def create_urllib3_context(
251
- ssl_version=None, cert_reqs=None, options=None, ciphers=None
252
- ):
253
- """All arguments have the same meaning as ``ssl_wrap_socket``.
254
-
255
- By default, this function does a lot of the same work that
256
- ``ssl.create_default_context`` does on Python 3.4+. It:
257
-
258
- - Disables SSLv2, SSLv3, and compression
259
- - Sets a restricted set of server ciphers
260
-
261
- If you wish to enable SSLv3, you can do::
262
-
263
- from urllib3.util import ssl_
264
- context = ssl_.create_urllib3_context()
265
- context.options &= ~ssl_.OP_NO_SSLv3
266
-
267
- You can do the same to enable compression (substituting ``COMPRESSION``
268
- for ``SSLv3`` in the last line above).
269
-
270
- :param ssl_version:
271
- The desired protocol version to use. This will default to
272
- PROTOCOL_SSLv23 which will negotiate the highest protocol that both
273
- the server and your installation of OpenSSL support.
274
- :param cert_reqs:
275
- Whether to require the certificate verification. This defaults to
276
- ``ssl.CERT_REQUIRED``.
277
- :param options:
278
- Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
279
- ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``, and ``ssl.OP_NO_TICKET``.
280
- :param ciphers:
281
- Which cipher suites to allow the server to select.
282
- :returns:
283
- Constructed SSLContext object with specified options
284
- :rtype: SSLContext
285
- """
286
- # PROTOCOL_TLS is deprecated in Python 3.10
287
- if not ssl_version or ssl_version == PROTOCOL_TLS:
288
- ssl_version = PROTOCOL_TLS_CLIENT
289
-
290
- context = SSLContext(ssl_version)
291
-
292
- context.set_ciphers(ciphers or DEFAULT_CIPHERS)
293
-
294
- # Setting the default here, as we may have no ssl module on import
295
- cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
296
-
297
- if options is None:
298
- options = 0
299
- # SSLv2 is easily broken and is considered harmful and dangerous
300
- options |= OP_NO_SSLv2
301
- # SSLv3 has several problems and is now dangerous
302
- options |= OP_NO_SSLv3
303
- # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
304
- # (issue #309)
305
- options |= OP_NO_COMPRESSION
306
- # TLSv1.2 only. Unless set explicitly, do not request tickets.
307
- # This may save some bandwidth on wire, and although the ticket is encrypted,
308
- # there is a risk associated with it being on wire,
309
- # if the server is not rotating its ticketing keys properly.
310
- options |= OP_NO_TICKET
311
-
312
- context.options |= options
313
-
314
- # Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is
315
- # necessary for conditional client cert authentication with TLS 1.3.
316
- # The attribute is None for OpenSSL <= 1.1.0 or does not exist in older
317
- # versions of Python. We only enable on Python 3.7.4+ or if certificate
318
- # verification is enabled to work around Python issue #37428
319
- # See: https://bugs.python.org/issue37428
320
- if (cert_reqs == ssl.CERT_REQUIRED or sys.version_info >= (3, 7, 4)) and getattr(
321
- context, "post_handshake_auth", None
322
- ) is not None:
323
- context.post_handshake_auth = True
324
-
325
- def disable_check_hostname():
326
- if (
327
- getattr(context, "check_hostname", None) is not None
328
- ): # Platform-specific: Python 3.2
329
- # We do our own verification, including fingerprints and alternative
330
- # hostnames. So disable it here
331
- context.check_hostname = False
332
-
333
- # The order of the below lines setting verify_mode and check_hostname
334
- # matter due to safe-guards SSLContext has to prevent an SSLContext with
335
- # check_hostname=True, verify_mode=NONE/OPTIONAL. This is made even more
336
- # complex because we don't know whether PROTOCOL_TLS_CLIENT will be used
337
- # or not so we don't know the initial state of the freshly created SSLContext.
338
- if cert_reqs == ssl.CERT_REQUIRED:
339
- context.verify_mode = cert_reqs
340
- disable_check_hostname()
341
- else:
342
- disable_check_hostname()
343
- context.verify_mode = cert_reqs
344
-
345
- # Enable logging of TLS session keys via defacto standard environment variable
346
- # 'SSLKEYLOGFILE', if the feature is available (Python 3.8+). Skip empty values.
347
- if hasattr(context, "keylog_filename"):
348
- sslkeylogfile = os.environ.get("SSLKEYLOGFILE")
349
- if sslkeylogfile:
350
- context.keylog_filename = sslkeylogfile
351
-
352
- return context
353
-
354
-
355
- def ssl_wrap_socket(
356
- sock,
357
- keyfile=None,
358
- certfile=None,
359
- cert_reqs=None,
360
- ca_certs=None,
361
- server_hostname=None,
362
- ssl_version=None,
363
- ciphers=None,
364
- ssl_context=None,
365
- ca_cert_dir=None,
366
- key_password=None,
367
- ca_cert_data=None,
368
- tls_in_tls=False,
369
- ):
370
- """
371
- All arguments except for server_hostname, ssl_context, and ca_cert_dir have
372
- the same meaning as they do when using :func:`ssl.wrap_socket`.
373
-
374
- :param server_hostname:
375
- When SNI is supported, the expected hostname of the certificate
376
- :param ssl_context:
377
- A pre-made :class:`SSLContext` object. If none is provided, one will
378
- be created using :func:`create_urllib3_context`.
379
- :param ciphers:
380
- A string of ciphers we wish the client to support.
381
- :param ca_cert_dir:
382
- A directory containing CA certificates in multiple separate files, as
383
- supported by OpenSSL's -CApath flag or the capath argument to
384
- SSLContext.load_verify_locations().
385
- :param key_password:
386
- Optional password if the keyfile is encrypted.
387
- :param ca_cert_data:
388
- Optional string containing CA certificates in PEM format suitable for
389
- passing as the cadata parameter to SSLContext.load_verify_locations()
390
- :param tls_in_tls:
391
- Use SSLTransport to wrap the existing socket.
392
- """
393
- context = ssl_context
394
- if context is None:
395
- # Note: This branch of code and all the variables in it are no longer
396
- # used by urllib3 itself. We should consider deprecating and removing
397
- # this code.
398
- context = create_urllib3_context(ssl_version, cert_reqs, ciphers=ciphers)
399
-
400
- if ca_certs or ca_cert_dir or ca_cert_data:
401
- try:
402
- context.load_verify_locations(ca_certs, ca_cert_dir, ca_cert_data)
403
- except (IOError, OSError) as e:
404
- raise SSLError(e)
405
-
406
- elif ssl_context is None and hasattr(context, "load_default_certs"):
407
- # try to load OS default certs; works well on Windows (require Python3.4+)
408
- context.load_default_certs()
409
-
410
- # Attempt to detect if we get the goofy behavior of the
411
- # keyfile being encrypted and OpenSSL asking for the
412
- # passphrase via the terminal and instead error out.
413
- if keyfile and key_password is None and _is_key_file_encrypted(keyfile):
414
- raise SSLError("Client private key is encrypted, password is required")
415
-
416
- if certfile:
417
- if key_password is None:
418
- context.load_cert_chain(certfile, keyfile)
419
- else:
420
- context.load_cert_chain(certfile, keyfile, key_password)
421
-
422
- try:
423
- if hasattr(context, "set_alpn_protocols"):
424
- context.set_alpn_protocols(ALPN_PROTOCOLS)
425
- except NotImplementedError: # Defensive: in CI, we always have set_alpn_protocols
426
- pass
427
-
428
- # If we detect server_hostname is an IP address then the SNI
429
- # extension should not be used according to RFC3546 Section 3.1
430
- use_sni_hostname = server_hostname and not is_ipaddress(server_hostname)
431
- # SecureTransport uses server_hostname in certificate verification.
432
- send_sni = (use_sni_hostname and HAS_SNI) or (
433
- IS_SECURETRANSPORT and server_hostname
434
- )
435
- # Do not warn the user if server_hostname is an invalid SNI hostname.
436
- if not HAS_SNI and use_sni_hostname:
437
- warnings.warn(
438
- "An HTTPS request has been made, but the SNI (Server Name "
439
- "Indication) extension to TLS is not available on this platform. "
440
- "This may cause the server to present an incorrect TLS "
441
- "certificate, which can cause validation failures. You can upgrade to "
442
- "a newer version of Python to solve this. For more information, see "
443
- "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
444
- "#ssl-warnings",
445
- SNIMissingWarning,
446
- )
447
-
448
- if send_sni:
449
- ssl_sock = _ssl_wrap_socket_impl(
450
- sock, context, tls_in_tls, server_hostname=server_hostname
451
- )
452
- else:
453
- ssl_sock = _ssl_wrap_socket_impl(sock, context, tls_in_tls)
454
- return ssl_sock
455
-
456
-
457
- def is_ipaddress(hostname):
458
- """Detects whether the hostname given is an IPv4 or IPv6 address.
459
- Also detects IPv6 addresses with Zone IDs.
460
-
461
- :param str hostname: Hostname to examine.
462
- :return: True if the hostname is an IP address, False otherwise.
463
- """
464
- if not six.PY2 and isinstance(hostname, bytes):
465
- # IDN A-label bytes are ASCII compatible.
466
- hostname = hostname.decode("ascii")
467
- return bool(IPV4_RE.match(hostname) or BRACELESS_IPV6_ADDRZ_RE.match(hostname))
468
-
469
-
470
- def _is_key_file_encrypted(key_file):
471
- """Detects if a key file is encrypted or not."""
472
- with open(key_file, "r") as f:
473
- for line in f:
474
- # Look for Proc-Type: 4,ENCRYPTED
475
- if "ENCRYPTED" in line:
476
- return True
477
-
478
- return False
479
-
480
-
481
- def _ssl_wrap_socket_impl(sock, ssl_context, tls_in_tls, server_hostname=None):
482
- if tls_in_tls:
483
- if not SSLTransport:
484
- # Import error, ssl is not available.
485
- raise ProxySchemeUnsupported(
486
- "TLS in TLS requires support for the 'ssl' module"
487
- )
488
-
489
- SSLTransport._validate_ssl_context_for_tls_in_tls(ssl_context)
490
- return SSLTransport(sock, ssl_context, server_hostname)
491
-
492
- if server_hostname:
493
- return ssl_context.wrap_socket(sock, server_hostname=server_hostname)
494
- else:
495
- return ssl_context.wrap_socket(sock)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BreadBytes1/CC-Dashboard/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: CC Dashboard
3
- emoji: 🏃
4
- colorFrom: gray
5
- colorTo: yellow
6
- sdk: streamlit
7
- sdk_version: 1.17.0
8
- app_file: app.py
9
- pinned: false
10
- license: gpl
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/GFPGAN-example/gfpgan/archs/stylegan2_clean_arch.py DELETED
@@ -1,368 +0,0 @@
1
- import math
2
- import random
3
- import torch
4
- from basicsr.archs.arch_util import default_init_weights
5
- from basicsr.utils.registry import ARCH_REGISTRY
6
- from torch import nn
7
- from torch.nn import functional as F
8
-
9
-
10
- class NormStyleCode(nn.Module):
11
-
12
- def forward(self, x):
13
- """Normalize the style codes.
14
-
15
- Args:
16
- x (Tensor): Style codes with shape (b, c).
17
-
18
- Returns:
19
- Tensor: Normalized tensor.
20
- """
21
- return x * torch.rsqrt(torch.mean(x**2, dim=1, keepdim=True) + 1e-8)
22
-
23
-
24
- class ModulatedConv2d(nn.Module):
25
- """Modulated Conv2d used in StyleGAN2.
26
-
27
- There is no bias in ModulatedConv2d.
28
-
29
- Args:
30
- in_channels (int): Channel number of the input.
31
- out_channels (int): Channel number of the output.
32
- kernel_size (int): Size of the convolving kernel.
33
- num_style_feat (int): Channel number of style features.
34
- demodulate (bool): Whether to demodulate in the conv layer. Default: True.
35
- sample_mode (str | None): Indicating 'upsample', 'downsample' or None. Default: None.
36
- eps (float): A value added to the denominator for numerical stability. Default: 1e-8.
37
- """
38
-
39
- def __init__(self,
40
- in_channels,
41
- out_channels,
42
- kernel_size,
43
- num_style_feat,
44
- demodulate=True,
45
- sample_mode=None,
46
- eps=1e-8):
47
- super(ModulatedConv2d, self).__init__()
48
- self.in_channels = in_channels
49
- self.out_channels = out_channels
50
- self.kernel_size = kernel_size
51
- self.demodulate = demodulate
52
- self.sample_mode = sample_mode
53
- self.eps = eps
54
-
55
- # modulation inside each modulated conv
56
- self.modulation = nn.Linear(num_style_feat, in_channels, bias=True)
57
- # initialization
58
- default_init_weights(self.modulation, scale=1, bias_fill=1, a=0, mode='fan_in', nonlinearity='linear')
59
-
60
- self.weight = nn.Parameter(
61
- torch.randn(1, out_channels, in_channels, kernel_size, kernel_size) /
62
- math.sqrt(in_channels * kernel_size**2))
63
- self.padding = kernel_size // 2
64
-
65
- def forward(self, x, style):
66
- """Forward function.
67
-
68
- Args:
69
- x (Tensor): Tensor with shape (b, c, h, w).
70
- style (Tensor): Tensor with shape (b, num_style_feat).
71
-
72
- Returns:
73
- Tensor: Modulated tensor after convolution.
74
- """
75
- b, c, h, w = x.shape # c = c_in
76
- # weight modulation
77
- style = self.modulation(style).view(b, 1, c, 1, 1)
78
- # self.weight: (1, c_out, c_in, k, k); style: (b, 1, c, 1, 1)
79
- weight = self.weight * style # (b, c_out, c_in, k, k)
80
-
81
- if self.demodulate:
82
- demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps)
83
- weight = weight * demod.view(b, self.out_channels, 1, 1, 1)
84
-
85
- weight = weight.view(b * self.out_channels, c, self.kernel_size, self.kernel_size)
86
-
87
- # upsample or downsample if necessary
88
- if self.sample_mode == 'upsample':
89
- x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)
90
- elif self.sample_mode == 'downsample':
91
- x = F.interpolate(x, scale_factor=0.5, mode='bilinear', align_corners=False)
92
-
93
- b, c, h, w = x.shape
94
- x = x.view(1, b * c, h, w)
95
- # weight: (b*c_out, c_in, k, k), groups=b
96
- out = F.conv2d(x, weight, padding=self.padding, groups=b)
97
- out = out.view(b, self.out_channels, *out.shape[2:4])
98
-
99
- return out
100
-
101
- def __repr__(self):
102
- return (f'{self.__class__.__name__}(in_channels={self.in_channels}, out_channels={self.out_channels}, '
103
- f'kernel_size={self.kernel_size}, demodulate={self.demodulate}, sample_mode={self.sample_mode})')
104
-
105
-
106
- class StyleConv(nn.Module):
107
- """Style conv used in StyleGAN2.
108
-
109
- Args:
110
- in_channels (int): Channel number of the input.
111
- out_channels (int): Channel number of the output.
112
- kernel_size (int): Size of the convolving kernel.
113
- num_style_feat (int): Channel number of style features.
114
- demodulate (bool): Whether demodulate in the conv layer. Default: True.
115
- sample_mode (str | None): Indicating 'upsample', 'downsample' or None. Default: None.
116
- """
117
-
118
- def __init__(self, in_channels, out_channels, kernel_size, num_style_feat, demodulate=True, sample_mode=None):
119
- super(StyleConv, self).__init__()
120
- self.modulated_conv = ModulatedConv2d(
121
- in_channels, out_channels, kernel_size, num_style_feat, demodulate=demodulate, sample_mode=sample_mode)
122
- self.weight = nn.Parameter(torch.zeros(1)) # for noise injection
123
- self.bias = nn.Parameter(torch.zeros(1, out_channels, 1, 1))
124
- self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True)
125
-
126
- def forward(self, x, style, noise=None):
127
- # modulate
128
- out = self.modulated_conv(x, style) * 2**0.5 # for conversion
129
- # noise injection
130
- if noise is None:
131
- b, _, h, w = out.shape
132
- noise = out.new_empty(b, 1, h, w).normal_()
133
- out = out + self.weight * noise
134
- # add bias
135
- out = out + self.bias
136
- # activation
137
- out = self.activate(out)
138
- return out
139
-
140
-
141
- class ToRGB(nn.Module):
142
- """To RGB (image space) from features.
143
-
144
- Args:
145
- in_channels (int): Channel number of input.
146
- num_style_feat (int): Channel number of style features.
147
- upsample (bool): Whether to upsample. Default: True.
148
- """
149
-
150
- def __init__(self, in_channels, num_style_feat, upsample=True):
151
- super(ToRGB, self).__init__()
152
- self.upsample = upsample
153
- self.modulated_conv = ModulatedConv2d(
154
- in_channels, 3, kernel_size=1, num_style_feat=num_style_feat, demodulate=False, sample_mode=None)
155
- self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
156
-
157
- def forward(self, x, style, skip=None):
158
- """Forward function.
159
-
160
- Args:
161
- x (Tensor): Feature tensor with shape (b, c, h, w).
162
- style (Tensor): Tensor with shape (b, num_style_feat).
163
- skip (Tensor): Base/skip tensor. Default: None.
164
-
165
- Returns:
166
- Tensor: RGB images.
167
- """
168
- out = self.modulated_conv(x, style)
169
- out = out + self.bias
170
- if skip is not None:
171
- if self.upsample:
172
- skip = F.interpolate(skip, scale_factor=2, mode='bilinear', align_corners=False)
173
- out = out + skip
174
- return out
175
-
176
-
177
- class ConstantInput(nn.Module):
178
- """Constant input.
179
-
180
- Args:
181
- num_channel (int): Channel number of constant input.
182
- size (int): Spatial size of constant input.
183
- """
184
-
185
- def __init__(self, num_channel, size):
186
- super(ConstantInput, self).__init__()
187
- self.weight = nn.Parameter(torch.randn(1, num_channel, size, size))
188
-
189
- def forward(self, batch):
190
- out = self.weight.repeat(batch, 1, 1, 1)
191
- return out
192
-
193
-
194
- @ARCH_REGISTRY.register()
195
- class StyleGAN2GeneratorClean(nn.Module):
196
- """Clean version of StyleGAN2 Generator.
197
-
198
- Args:
199
- out_size (int): The spatial size of outputs.
200
- num_style_feat (int): Channel number of style features. Default: 512.
201
- num_mlp (int): Layer number of MLP style layers. Default: 8.
202
- channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2.
203
- narrow (float): Narrow ratio for channels. Default: 1.0.
204
- """
205
-
206
- def __init__(self, out_size, num_style_feat=512, num_mlp=8, channel_multiplier=2, narrow=1):
207
- super(StyleGAN2GeneratorClean, self).__init__()
208
- # Style MLP layers
209
- self.num_style_feat = num_style_feat
210
- style_mlp_layers = [NormStyleCode()]
211
- for i in range(num_mlp):
212
- style_mlp_layers.extend(
213
- [nn.Linear(num_style_feat, num_style_feat, bias=True),
214
- nn.LeakyReLU(negative_slope=0.2, inplace=True)])
215
- self.style_mlp = nn.Sequential(*style_mlp_layers)
216
- # initialization
217
- default_init_weights(self.style_mlp, scale=1, bias_fill=0, a=0.2, mode='fan_in', nonlinearity='leaky_relu')
218
-
219
- # channel list
220
- channels = {
221
- '4': int(512 * narrow),
222
- '8': int(512 * narrow),
223
- '16': int(512 * narrow),
224
- '32': int(512 * narrow),
225
- '64': int(256 * channel_multiplier * narrow),
226
- '128': int(128 * channel_multiplier * narrow),
227
- '256': int(64 * channel_multiplier * narrow),
228
- '512': int(32 * channel_multiplier * narrow),
229
- '1024': int(16 * channel_multiplier * narrow)
230
- }
231
- self.channels = channels
232
-
233
- self.constant_input = ConstantInput(channels['4'], size=4)
234
- self.style_conv1 = StyleConv(
235
- channels['4'],
236
- channels['4'],
237
- kernel_size=3,
238
- num_style_feat=num_style_feat,
239
- demodulate=True,
240
- sample_mode=None)
241
- self.to_rgb1 = ToRGB(channels['4'], num_style_feat, upsample=False)
242
-
243
- self.log_size = int(math.log(out_size, 2))
244
- self.num_layers = (self.log_size - 2) * 2 + 1
245
- self.num_latent = self.log_size * 2 - 2
246
-
247
- self.style_convs = nn.ModuleList()
248
- self.to_rgbs = nn.ModuleList()
249
- self.noises = nn.Module()
250
-
251
- in_channels = channels['4']
252
- # noise
253
- for layer_idx in range(self.num_layers):
254
- resolution = 2**((layer_idx + 5) // 2)
255
- shape = [1, 1, resolution, resolution]
256
- self.noises.register_buffer(f'noise{layer_idx}', torch.randn(*shape))
257
- # style convs and to_rgbs
258
- for i in range(3, self.log_size + 1):
259
- out_channels = channels[f'{2**i}']
260
- self.style_convs.append(
261
- StyleConv(
262
- in_channels,
263
- out_channels,
264
- kernel_size=3,
265
- num_style_feat=num_style_feat,
266
- demodulate=True,
267
- sample_mode='upsample'))
268
- self.style_convs.append(
269
- StyleConv(
270
- out_channels,
271
- out_channels,
272
- kernel_size=3,
273
- num_style_feat=num_style_feat,
274
- demodulate=True,
275
- sample_mode=None))
276
- self.to_rgbs.append(ToRGB(out_channels, num_style_feat, upsample=True))
277
- in_channels = out_channels
278
-
279
- def make_noise(self):
280
- """Make noise for noise injection."""
281
- device = self.constant_input.weight.device
282
- noises = [torch.randn(1, 1, 4, 4, device=device)]
283
-
284
- for i in range(3, self.log_size + 1):
285
- for _ in range(2):
286
- noises.append(torch.randn(1, 1, 2**i, 2**i, device=device))
287
-
288
- return noises
289
-
290
- def get_latent(self, x):
291
- return self.style_mlp(x)
292
-
293
- def mean_latent(self, num_latent):
294
- latent_in = torch.randn(num_latent, self.num_style_feat, device=self.constant_input.weight.device)
295
- latent = self.style_mlp(latent_in).mean(0, keepdim=True)
296
- return latent
297
-
298
- def forward(self,
299
- styles,
300
- input_is_latent=False,
301
- noise=None,
302
- randomize_noise=True,
303
- truncation=1,
304
- truncation_latent=None,
305
- inject_index=None,
306
- return_latents=False):
307
- """Forward function for StyleGAN2GeneratorClean.
308
-
309
- Args:
310
- styles (list[Tensor]): Sample codes of styles.
311
- input_is_latent (bool): Whether input is latent style. Default: False.
312
- noise (Tensor | None): Input noise or None. Default: None.
313
- randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
314
- truncation (float): The truncation ratio. Default: 1.
315
- truncation_latent (Tensor | None): The truncation latent tensor. Default: None.
316
- inject_index (int | None): The injection index for mixing noise. Default: None.
317
- return_latents (bool): Whether to return style latents. Default: False.
318
- """
319
- # style codes -> latents with Style MLP layer
320
- if not input_is_latent:
321
- styles = [self.style_mlp(s) for s in styles]
322
- # noises
323
- if noise is None:
324
- if randomize_noise:
325
- noise = [None] * self.num_layers # for each style conv layer
326
- else: # use the stored noise
327
- noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)]
328
- # style truncation
329
- if truncation < 1:
330
- style_truncation = []
331
- for style in styles:
332
- style_truncation.append(truncation_latent + truncation * (style - truncation_latent))
333
- styles = style_truncation
334
- # get style latents with injection
335
- if len(styles) == 1:
336
- inject_index = self.num_latent
337
-
338
- if styles[0].ndim < 3:
339
- # repeat latent code for all the layers
340
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
341
- else: # used for encoder with different latent code for each layer
342
- latent = styles[0]
343
- elif len(styles) == 2: # mixing noises
344
- if inject_index is None:
345
- inject_index = random.randint(1, self.num_latent - 1)
346
- latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
347
- latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1)
348
- latent = torch.cat([latent1, latent2], 1)
349
-
350
- # main generation
351
- out = self.constant_input(latent.shape[0])
352
- out = self.style_conv1(out, latent[:, 0], noise=noise[0])
353
- skip = self.to_rgb1(out, latent[:, 1])
354
-
355
- i = 1
356
- for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2],
357
- noise[2::2], self.to_rgbs):
358
- out = conv1(out, latent[:, i], noise=noise1)
359
- out = conv2(out, latent[:, i + 1], noise=noise2)
360
- skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space
361
- i += 2
362
-
363
- image = skip
364
-
365
- if return_latents:
366
- return image, latent
367
- else:
368
- return image, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/iterator/detail/join_iterator.h DELETED
@@ -1,134 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/iterator/iterator_facade.h>
21
- #include <thrust/iterator/detail/minimum_system.h>
22
- #include <thrust/iterator/counting_iterator.h>
23
- #include <thrust/detail/type_traits.h>
24
-
25
-
26
- namespace thrust
27
- {
28
- namespace detail
29
- {
30
-
31
-
32
- template<typename RandomAccessIterator1,
33
- typename RandomAccessIterator2,
34
- typename Difference,
35
- typename Reference>
36
- class join_iterator;
37
-
38
-
39
- namespace join_iterator_detail
40
- {
41
-
42
-
43
- template<typename RandomAccessIterator1,
44
- typename RandomAccessIterator2,
45
- typename Difference,
46
- typename Reference>
47
- struct join_iterator_base
48
- {
49
- typedef typename thrust::detail::remove_reference<Reference>::type value_type;
50
-
51
- typedef typename thrust::iterator_system<RandomAccessIterator1>::type system1;
52
- typedef typename thrust::iterator_system<RandomAccessIterator2>::type system2;
53
- typedef typename thrust::detail::minimum_system<system1,system2>::type system;
54
-
55
- typedef thrust::iterator_adaptor<
56
- join_iterator<RandomAccessIterator1,RandomAccessIterator2,Difference,Reference>,
57
- thrust::counting_iterator<Difference>,
58
- value_type,
59
- system,
60
- thrust::random_access_traversal_tag,
61
- Reference,
62
- Difference
63
- > type;
64
- }; // end join_iterator_base
65
-
66
-
67
- } // end join_iterator_detail
68
-
69
-
70
- template<typename RandomAccessIterator1,
71
- typename RandomAccessIterator2,
72
- typename Difference = typename thrust::iterator_difference<RandomAccessIterator1>::type,
73
- typename Reference = typename thrust::iterator_value<RandomAccessIterator1>::type>
74
- class join_iterator
75
- : public join_iterator_detail::join_iterator_base<RandomAccessIterator1, RandomAccessIterator2, Difference, Reference>::type
76
- {
77
- private:
78
- typedef typename join_iterator_detail::join_iterator_base<RandomAccessIterator1, RandomAccessIterator2, Difference, Reference>::type super_t;
79
- typedef typename super_t::difference_type size_type;
80
-
81
- public:
82
- inline __host__ __device__
83
- join_iterator(RandomAccessIterator1 first1, size_type n, RandomAccessIterator2 first2)
84
- : super_t(thrust::counting_iterator<size_type>(0)),
85
- m_n1(n),
86
- m_iter1(first1),
87
- m_iter2(first2 - m_n1)
88
- {}
89
-
90
-
91
- inline __host__ __device__
92
- join_iterator(const join_iterator &other)
93
- : super_t(other),
94
- m_n1(other.m_n1),
95
- m_iter1(other.m_iter1),
96
- m_iter2(other.m_iter2)
97
- {}
98
-
99
-
100
- private:
101
- friend class thrust::iterator_core_access;
102
-
103
- // MSVC 2013 and 2015 incorrectly warning about returning a reference to
104
- // a local/temporary here.
105
- // See goo.gl/LELTNp
106
- THRUST_DISABLE_MSVC_WARNING_BEGIN(4172)
107
-
108
- __host__ __device__
109
- typename super_t::reference dereference() const
110
- {
111
- size_type i = *super_t::base();
112
- return (i < m_n1) ? m_iter1[i] : static_cast<typename super_t::reference>(m_iter2[i]);
113
- } // end dereference()
114
-
115
- THRUST_DISABLE_MSVC_WARNING_END(4172)
116
-
117
-
118
- size_type m_n1;
119
- RandomAccessIterator1 m_iter1;
120
- RandomAccessIterator2 m_iter2;
121
- }; // end join_iterator
122
-
123
-
124
- template<typename RandomAccessIterator1, typename Size, typename RandomAccessIterator2>
125
- __host__ __device__
126
- join_iterator<RandomAccessIterator1,RandomAccessIterator2,Size> make_join_iterator(RandomAccessIterator1 first1, Size n1, RandomAccessIterator2 first2)
127
- {
128
- return join_iterator<RandomAccessIterator1,RandomAccessIterator2,Size>(first1, n1, first2);
129
- } // end make_join_iterator()
130
-
131
-
132
- } // end detail
133
- } // end thrust
134
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/transfiner/demo/predictor.py DELETED
@@ -1,220 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import atexit
3
- import bisect
4
- import multiprocessing as mp
5
- from collections import deque
6
- import cv2
7
- import torch
8
-
9
- from detectron2.data import MetadataCatalog
10
- from detectron2.engine.defaults import DefaultPredictor
11
- from detectron2.utils.video_visualizer import VideoVisualizer
12
- from detectron2.utils.visualizer import ColorMode, Visualizer
13
-
14
-
15
- class VisualizationDemo(object):
16
- def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
17
- """
18
- Args:
19
- cfg (CfgNode):
20
- instance_mode (ColorMode):
21
- parallel (bool): whether to run the model in different processes from visualization.
22
- Useful since the visualization logic can be slow.
23
- """
24
- self.metadata = MetadataCatalog.get(
25
- cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
26
- )
27
- self.cpu_device = torch.device("cpu")
28
- self.instance_mode = instance_mode
29
-
30
- self.parallel = parallel
31
- if parallel:
32
- num_gpu = torch.cuda.device_count()
33
- self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
34
- else:
35
- self.predictor = DefaultPredictor(cfg)
36
-
37
- def run_on_image(self, image):
38
- """
39
- Args:
40
- image (np.ndarray): an image of shape (H, W, C) (in BGR order).
41
- This is the format used by OpenCV.
42
-
43
- Returns:
44
- predictions (dict): the output of the model.
45
- vis_output (VisImage): the visualized image output.
46
- """
47
- vis_output = None
48
- predictions = self.predictor(image)
49
- # Convert image from OpenCV BGR format to Matplotlib RGB format.
50
- image = image[:, :, ::-1]
51
- visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
52
- if "panoptic_seg" in predictions:
53
- panoptic_seg, segments_info = predictions["panoptic_seg"]
54
- vis_output = visualizer.draw_panoptic_seg_predictions(
55
- panoptic_seg.to(self.cpu_device), segments_info
56
- )
57
- else:
58
- if "sem_seg" in predictions:
59
- vis_output = visualizer.draw_sem_seg(
60
- predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
61
- )
62
- if "instances" in predictions:
63
- instances = predictions["instances"].to(self.cpu_device)
64
- vis_output = visualizer.draw_instance_predictions(predictions=instances)
65
-
66
- return predictions, vis_output
67
-
68
- def _frame_from_video(self, video):
69
- while video.isOpened():
70
- success, frame = video.read()
71
- if success:
72
- yield frame
73
- else:
74
- break
75
-
76
- def run_on_video(self, video):
77
- """
78
- Visualizes predictions on frames of the input video.
79
-
80
- Args:
81
- video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
82
- either a webcam or a video file.
83
-
84
- Yields:
85
- ndarray: BGR visualizations of each video frame.
86
- """
87
- video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
88
-
89
- def process_predictions(frame, predictions):
90
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
91
- if "panoptic_seg" in predictions:
92
- panoptic_seg, segments_info = predictions["panoptic_seg"]
93
- vis_frame = video_visualizer.draw_panoptic_seg_predictions(
94
- frame, panoptic_seg.to(self.cpu_device), segments_info
95
- )
96
- elif "instances" in predictions:
97
- predictions = predictions["instances"].to(self.cpu_device)
98
- vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
99
- elif "sem_seg" in predictions:
100
- vis_frame = video_visualizer.draw_sem_seg(
101
- frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
102
- )
103
-
104
- # Converts Matplotlib RGB format to OpenCV BGR format
105
- vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
106
- return vis_frame
107
-
108
- frame_gen = self._frame_from_video(video)
109
- if self.parallel:
110
- buffer_size = self.predictor.default_buffer_size
111
-
112
- frame_data = deque()
113
-
114
- for cnt, frame in enumerate(frame_gen):
115
- frame_data.append(frame)
116
- self.predictor.put(frame)
117
-
118
- if cnt >= buffer_size:
119
- frame = frame_data.popleft()
120
- predictions = self.predictor.get()
121
- yield process_predictions(frame, predictions)
122
-
123
- while len(frame_data):
124
- frame = frame_data.popleft()
125
- predictions = self.predictor.get()
126
- yield process_predictions(frame, predictions)
127
- else:
128
- for frame in frame_gen:
129
- yield process_predictions(frame, self.predictor(frame))
130
-
131
-
132
- class AsyncPredictor:
133
- """
134
- A predictor that runs the model asynchronously, possibly on >1 GPUs.
135
- Because rendering the visualization takes considerably amount of time,
136
- this helps improve throughput a little bit when rendering videos.
137
- """
138
-
139
- class _StopToken:
140
- pass
141
-
142
- class _PredictWorker(mp.Process):
143
- def __init__(self, cfg, task_queue, result_queue):
144
- self.cfg = cfg
145
- self.task_queue = task_queue
146
- self.result_queue = result_queue
147
- super().__init__()
148
-
149
- def run(self):
150
- predictor = DefaultPredictor(self.cfg)
151
-
152
- while True:
153
- task = self.task_queue.get()
154
- if isinstance(task, AsyncPredictor._StopToken):
155
- break
156
- idx, data = task
157
- result = predictor(data)
158
- self.result_queue.put((idx, result))
159
-
160
- def __init__(self, cfg, num_gpus: int = 1):
161
- """
162
- Args:
163
- cfg (CfgNode):
164
- num_gpus (int): if 0, will run on CPU
165
- """
166
- num_workers = max(num_gpus, 1)
167
- self.task_queue = mp.Queue(maxsize=num_workers * 3)
168
- self.result_queue = mp.Queue(maxsize=num_workers * 3)
169
- self.procs = []
170
- for gpuid in range(max(num_gpus, 1)):
171
- cfg = cfg.clone()
172
- cfg.defrost()
173
- cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
174
- self.procs.append(
175
- AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
176
- )
177
-
178
- self.put_idx = 0
179
- self.get_idx = 0
180
- self.result_rank = []
181
- self.result_data = []
182
-
183
- for p in self.procs:
184
- p.start()
185
- atexit.register(self.shutdown)
186
-
187
- def put(self, image):
188
- self.put_idx += 1
189
- self.task_queue.put((self.put_idx, image))
190
-
191
- def get(self):
192
- self.get_idx += 1 # the index needed for this request
193
- if len(self.result_rank) and self.result_rank[0] == self.get_idx:
194
- res = self.result_data[0]
195
- del self.result_data[0], self.result_rank[0]
196
- return res
197
-
198
- while True:
199
- # make sure the results are returned in the correct order
200
- idx, res = self.result_queue.get()
201
- if idx == self.get_idx:
202
- return res
203
- insert = bisect.bisect(self.result_rank, idx)
204
- self.result_rank.insert(insert, idx)
205
- self.result_data.insert(insert, res)
206
-
207
- def __len__(self):
208
- return self.put_idx - self.get_idx
209
-
210
- def __call__(self, image):
211
- self.put(image)
212
- return self.get()
213
-
214
- def shutdown(self):
215
- for _ in self.procs:
216
- self.task_queue.put(AsyncPredictor._StopToken())
217
-
218
- @property
219
- def default_buffer_size(self):
220
- return len(self.procs) * 5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cat125/text-generator-v2/train.py DELETED
@@ -1,67 +0,0 @@
1
- import argparse
2
- import re
3
- from pprint import pprint
4
-
5
- from tqdm import tqdm
6
-
7
- from classes import Token
8
- from datamanager import get_data, get_text, models, set_data, set_data_v3
9
-
10
- turbo = False
11
-
12
- def normalize_text(sentence):
13
- sentence = sentence.strip()
14
- sentence = re.sub(r'(\s+|\n+)', ' ', sentence)
15
- sentence = re.sub(r'\s+([.,!?;:])', r'\1', sentence)
16
- sentence = re.sub(r'([.,!?;:])(\S)', r'\1 \2', sentence)
17
- sentence = re.sub(r'\s+\'|\'\s+', '\'', sentence)
18
- sentence = re.sub(r'\s+', ' ', sentence)
19
- return sentence
20
-
21
- def process_sentence(db3, sentence, text):
22
- words = sentence.strip().split()
23
- for i in range(len(words)):
24
- word = words[i].strip()
25
- prev_word = "" if i == 0 else words[i - 1]
26
- token = Token(word, prev_word, text, sentence, i == 0, turbo)
27
- if prev_word not in db3:
28
- db3[prev_word] = []
29
- db3[prev_word].append(token)
30
-
31
- def process_text(db3, text):
32
- sentences = re.findall(r'[^.!?]+[.!?]+', text)
33
- for sentence in tqdm(sentences, desc='Training', colour="green", unit="sen"):
34
- process_sentence(db3, sentence, text)
35
-
36
- def train(model_name):
37
- db3 = {}
38
- print(f'Rebuilding database for "{model_name}"...')
39
- text = get_text(model_name)
40
- text = normalize_text(text)
41
- process_text(db3, text)
42
- set_data_v3(model_name, db3)
43
- models[model_name]["db3"] = db3
44
-
45
- if __name__ == '__main__':
46
- parser = argparse.ArgumentParser(
47
- prog='Train',
48
- description='Training system for Text Generator v2')
49
- parser.add_argument('-r', '--rebuild', action='extend', nargs="+", type=str)
50
- parser.add_argument('-l', '--log', action='extend', nargs="+", type=str)
51
- parser.add_argument('-t', '--turbo', action='store_true')
52
- args = parser.parse_args()
53
-
54
- if args.rebuild:
55
- models_to_rebuild = args.rebuild
56
- if args.rebuild[0] in ('*', 'all'):
57
- models_to_rebuild = list(models.keys())
58
- for model in models_to_rebuild:
59
- if model not in models:
60
- raise ValueError("Model '%s' not found" % model)
61
- turbo = args.turbo
62
- train(model)
63
- if args.log:
64
- for model in args.log:
65
- if model not in models:
66
- raise ValueError("Model '%s' not found" % model)
67
- pprint(get_data(model))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CorvaeOboro/gen_ability_icon/torch_utils/ops/conv2d_resample.py DELETED
@@ -1,156 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """2D convolution with optional up/downsampling."""
10
-
11
- import torch
12
-
13
- from .. import misc
14
- from . import conv2d_gradfix
15
- from . import upfirdn2d
16
- from .upfirdn2d import _parse_padding
17
- from .upfirdn2d import _get_filter_size
18
-
19
- #----------------------------------------------------------------------------
20
-
21
- def _get_weight_shape(w):
22
- with misc.suppress_tracer_warnings(): # this value will be treated as a constant
23
- shape = [int(sz) for sz in w.shape]
24
- misc.assert_shape(w, shape)
25
- return shape
26
-
27
- #----------------------------------------------------------------------------
28
-
29
- def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True):
30
- """Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations.
31
- """
32
- out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
33
-
34
- # Flip weight if requested.
35
- if not flip_weight: # conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False).
36
- w = w.flip([2, 3])
37
-
38
- # Workaround performance pitfall in cuDNN 8.0.5, triggered when using
39
- # 1x1 kernel + memory_format=channels_last + less than 64 channels.
40
- if kw == 1 and kh == 1 and stride == 1 and padding in [0, [0, 0], (0, 0)] and not transpose:
41
- if x.stride()[1] == 1 and min(out_channels, in_channels_per_group) < 64:
42
- if out_channels <= 4 and groups == 1:
43
- in_shape = x.shape
44
- x = w.squeeze(3).squeeze(2) @ x.reshape([in_shape[0], in_channels_per_group, -1])
45
- x = x.reshape([in_shape[0], out_channels, in_shape[2], in_shape[3]])
46
- else:
47
- x = x.to(memory_format=torch.contiguous_format)
48
- w = w.to(memory_format=torch.contiguous_format)
49
- x = conv2d_gradfix.conv2d(x, w, groups=groups)
50
- return x.to(memory_format=torch.channels_last)
51
-
52
- # Otherwise => execute using conv2d_gradfix.
53
- op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d
54
- return op(x, w, stride=stride, padding=padding, groups=groups)
55
-
56
- #----------------------------------------------------------------------------
57
-
58
- @misc.profiled_function
59
- def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):
60
- r"""2D convolution with optional up/downsampling.
61
-
62
- Padding is performed only once at the beginning, not between the operations.
63
-
64
- Args:
65
- x: Input tensor of shape
66
- `[batch_size, in_channels, in_height, in_width]`.
67
- w: Weight tensor of shape
68
- `[out_channels, in_channels//groups, kernel_height, kernel_width]`.
69
- f: Low-pass filter for up/downsampling. Must be prepared beforehand by
70
- calling upfirdn2d.setup_filter(). None = identity (default).
71
- up: Integer upsampling factor (default: 1).
72
- down: Integer downsampling factor (default: 1).
73
- padding: Padding with respect to the upsampled image. Can be a single number
74
- or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
75
- (default: 0).
76
- groups: Split input channels into N groups (default: 1).
77
- flip_weight: False = convolution, True = correlation (default: True).
78
- flip_filter: False = convolution, True = correlation (default: False).
79
-
80
- Returns:
81
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
82
- """
83
- # Validate arguments.
84
- assert isinstance(x, torch.Tensor) and (x.ndim == 4)
85
- assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)
86
- assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32)
87
- assert isinstance(up, int) and (up >= 1)
88
- assert isinstance(down, int) and (down >= 1)
89
- assert isinstance(groups, int) and (groups >= 1)
90
- out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
91
- fw, fh = _get_filter_size(f)
92
- px0, px1, py0, py1 = _parse_padding(padding)
93
-
94
- # Adjust padding to account for up/downsampling.
95
- if up > 1:
96
- px0 += (fw + up - 1) // 2
97
- px1 += (fw - up) // 2
98
- py0 += (fh + up - 1) // 2
99
- py1 += (fh - up) // 2
100
- if down > 1:
101
- px0 += (fw - down + 1) // 2
102
- px1 += (fw - down) // 2
103
- py0 += (fh - down + 1) // 2
104
- py1 += (fh - down) // 2
105
-
106
- # Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
107
- if kw == 1 and kh == 1 and (down > 1 and up == 1):
108
- x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
109
- x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
110
- return x
111
-
112
- # Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
113
- if kw == 1 and kh == 1 and (up > 1 and down == 1):
114
- x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
115
- x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
116
- return x
117
-
118
- # Fast path: downsampling only => use strided convolution.
119
- if down > 1 and up == 1:
120
- x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
121
- x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)
122
- return x
123
-
124
- # Fast path: upsampling with optional downsampling => use transpose strided convolution.
125
- if up > 1:
126
- if groups == 1:
127
- w = w.transpose(0, 1)
128
- else:
129
- w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)
130
- w = w.transpose(1, 2)
131
- w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw)
132
- px0 -= kw - 1
133
- px1 -= kw - up
134
- py0 -= kh - 1
135
- py1 -= kh - up
136
- pxt = max(min(-px0, -px1), 0)
137
- pyt = max(min(-py0, -py1), 0)
138
- x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))
139
- x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)
140
- if down > 1:
141
- x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
142
- return x
143
-
144
- # Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.
145
- if up == 1 and down == 1:
146
- if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:
147
- return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)
148
-
149
- # Fallback: Generic reference implementation.
150
- x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
151
- x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
152
- if down > 1:
153
- x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
154
- return x
155
-
156
- #----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/models/diffusion/dpm_solver/dpm_solver.py DELETED
@@ -1,1154 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- import math
4
- from tqdm import tqdm
5
-
6
-
7
- class NoiseScheduleVP:
8
- def __init__(
9
- self,
10
- schedule='discrete',
11
- betas=None,
12
- alphas_cumprod=None,
13
- continuous_beta_0=0.1,
14
- continuous_beta_1=20.,
15
- ):
16
- """Create a wrapper class for the forward SDE (VP type).
17
- ***
18
- Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
19
- We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
20
- ***
21
- The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
22
- We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
23
- Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
24
- log_alpha_t = self.marginal_log_mean_coeff(t)
25
- sigma_t = self.marginal_std(t)
26
- lambda_t = self.marginal_lambda(t)
27
- Moreover, as lambda(t) is an invertible function, we also support its inverse function:
28
- t = self.inverse_lambda(lambda_t)
29
- ===============================================================
30
- We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
31
- 1. For discrete-time DPMs:
32
- For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
33
- t_i = (i + 1) / N
34
- e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
35
- We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
36
- Args:
37
- betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
38
- alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
39
- Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
40
- **Important**: Please pay special attention for the args for `alphas_cumprod`:
41
- The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
42
- q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
43
- Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
44
- alpha_{t_n} = \sqrt{\hat{alpha_n}},
45
- and
46
- log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
47
- 2. For continuous-time DPMs:
48
- We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
49
- schedule are the default settings in DDPM and improved-DDPM:
50
- Args:
51
- beta_min: A `float` number. The smallest beta for the linear schedule.
52
- beta_max: A `float` number. The largest beta for the linear schedule.
53
- cosine_s: A `float` number. The hyperparameter in the cosine schedule.
54
- cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
55
- T: A `float` number. The ending time of the forward process.
56
- ===============================================================
57
- Args:
58
- schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
59
- 'linear' or 'cosine' for continuous-time DPMs.
60
- Returns:
61
- A wrapper object of the forward SDE (VP type).
62
-
63
- ===============================================================
64
- Example:
65
- # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
66
- >>> ns = NoiseScheduleVP('discrete', betas=betas)
67
- # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
68
- >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
69
- # For continuous-time DPMs (VPSDE), linear schedule:
70
- >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
71
- """
72
-
73
- if schedule not in ['discrete', 'linear', 'cosine']:
74
- raise ValueError(
75
- "Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(
76
- schedule))
77
-
78
- self.schedule = schedule
79
- if schedule == 'discrete':
80
- if betas is not None:
81
- log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
82
- else:
83
- assert alphas_cumprod is not None
84
- log_alphas = 0.5 * torch.log(alphas_cumprod)
85
- self.total_N = len(log_alphas)
86
- self.T = 1.
87
- self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
88
- self.log_alpha_array = log_alphas.reshape((1, -1,))
89
- else:
90
- self.total_N = 1000
91
- self.beta_0 = continuous_beta_0
92
- self.beta_1 = continuous_beta_1
93
- self.cosine_s = 0.008
94
- self.cosine_beta_max = 999.
95
- self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (
96
- 1. + self.cosine_s) / math.pi - self.cosine_s
97
- self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
98
- self.schedule = schedule
99
- if schedule == 'cosine':
100
- # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
101
- # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
102
- self.T = 0.9946
103
- else:
104
- self.T = 1.
105
-
106
- def marginal_log_mean_coeff(self, t):
107
- """
108
- Compute log(alpha_t) of a given continuous-time label t in [0, T].
109
- """
110
- if self.schedule == 'discrete':
111
- return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device),
112
- self.log_alpha_array.to(t.device)).reshape((-1))
113
- elif self.schedule == 'linear':
114
- return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
115
- elif self.schedule == 'cosine':
116
- log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
117
- log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
118
- return log_alpha_t
119
-
120
- def marginal_alpha(self, t):
121
- """
122
- Compute alpha_t of a given continuous-time label t in [0, T].
123
- """
124
- return torch.exp(self.marginal_log_mean_coeff(t))
125
-
126
- def marginal_std(self, t):
127
- """
128
- Compute sigma_t of a given continuous-time label t in [0, T].
129
- """
130
- return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
131
-
132
- def marginal_lambda(self, t):
133
- """
134
- Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
135
- """
136
- log_mean_coeff = self.marginal_log_mean_coeff(t)
137
- log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
138
- return log_mean_coeff - log_std
139
-
140
- def inverse_lambda(self, lamb):
141
- """
142
- Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
143
- """
144
- if self.schedule == 'linear':
145
- tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
146
- Delta = self.beta_0 ** 2 + tmp
147
- return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
148
- elif self.schedule == 'discrete':
149
- log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
150
- t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]),
151
- torch.flip(self.t_array.to(lamb.device), [1]))
152
- return t.reshape((-1,))
153
- else:
154
- log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
155
- t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (
156
- 1. + self.cosine_s) / math.pi - self.cosine_s
157
- t = t_fn(log_alpha)
158
- return t
159
-
160
-
161
- def model_wrapper(
162
- model,
163
- noise_schedule,
164
- model_type="noise",
165
- model_kwargs={},
166
- guidance_type="uncond",
167
- condition=None,
168
- unconditional_condition=None,
169
- guidance_scale=1.,
170
- classifier_fn=None,
171
- classifier_kwargs={},
172
- ):
173
- """Create a wrapper function for the noise prediction model.
174
- DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
175
- firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
176
- We support four types of the diffusion model by setting `model_type`:
177
- 1. "noise": noise prediction model. (Trained by predicting noise).
178
- 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
179
- 3. "v": velocity prediction model. (Trained by predicting the velocity).
180
- The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
181
- [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
182
- arXiv preprint arXiv:2202.00512 (2022).
183
- [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
184
- arXiv preprint arXiv:2210.02303 (2022).
185
-
186
- 4. "score": marginal score function. (Trained by denoising score matching).
187
- Note that the score function and the noise prediction model follows a simple relationship:
188
- ```
189
- noise(x_t, t) = -sigma_t * score(x_t, t)
190
- ```
191
- We support three types of guided sampling by DPMs by setting `guidance_type`:
192
- 1. "uncond": unconditional sampling by DPMs.
193
- The input `model` has the following format:
194
- ``
195
- model(x, t_input, **model_kwargs) -> noise | x_start | v | score
196
- ``
197
- 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
198
- The input `model` has the following format:
199
- ``
200
- model(x, t_input, **model_kwargs) -> noise | x_start | v | score
201
- ``
202
- The input `classifier_fn` has the following format:
203
- ``
204
- classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
205
- ``
206
- [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
207
- in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
208
- 3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
209
- The input `model` has the following format:
210
- ``
211
- model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
212
- ``
213
- And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
214
- [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
215
- arXiv preprint arXiv:2207.12598 (2022).
216
-
217
- The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
218
- or continuous-time labels (i.e. epsilon to T).
219
- We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
220
- ``
221
- def model_fn(x, t_continuous) -> noise:
222
- t_input = get_model_input_time(t_continuous)
223
- return noise_pred(model, x, t_input, **model_kwargs)
224
- ``
225
- where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
226
- ===============================================================
227
- Args:
228
- model: A diffusion model with the corresponding format described above.
229
- noise_schedule: A noise schedule object, such as NoiseScheduleVP.
230
- model_type: A `str`. The parameterization type of the diffusion model.
231
- "noise" or "x_start" or "v" or "score".
232
- model_kwargs: A `dict`. A dict for the other inputs of the model function.
233
- guidance_type: A `str`. The type of the guidance for sampling.
234
- "uncond" or "classifier" or "classifier-free".
235
- condition: A pytorch tensor. The condition for the guided sampling.
236
- Only used for "classifier" or "classifier-free" guidance type.
237
- unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
238
- Only used for "classifier-free" guidance type.
239
- guidance_scale: A `float`. The scale for the guided sampling.
240
- classifier_fn: A classifier function. Only used for the classifier guidance.
241
- classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
242
- Returns:
243
- A noise prediction model that accepts the noised data and the continuous time as the inputs.
244
- """
245
-
246
- def get_model_input_time(t_continuous):
247
- """
248
- Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
249
- For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
250
- For continuous-time DPMs, we just use `t_continuous`.
251
- """
252
- if noise_schedule.schedule == 'discrete':
253
- return (t_continuous - 1. / noise_schedule.total_N) * 1000.
254
- else:
255
- return t_continuous
256
-
257
- def noise_pred_fn(x, t_continuous, cond=None):
258
- if t_continuous.reshape((-1,)).shape[0] == 1:
259
- t_continuous = t_continuous.expand((x.shape[0]))
260
- t_input = get_model_input_time(t_continuous)
261
- if cond is None:
262
- output = model(x, t_input, **model_kwargs)
263
- else:
264
- output = model(x, t_input, cond, **model_kwargs)
265
- if model_type == "noise":
266
- return output
267
- elif model_type == "x_start":
268
- alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
269
- dims = x.dim()
270
- return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
271
- elif model_type == "v":
272
- alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
273
- dims = x.dim()
274
- return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
275
- elif model_type == "score":
276
- sigma_t = noise_schedule.marginal_std(t_continuous)
277
- dims = x.dim()
278
- return -expand_dims(sigma_t, dims) * output
279
-
280
- def cond_grad_fn(x, t_input):
281
- """
282
- Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
283
- """
284
- with torch.enable_grad():
285
- x_in = x.detach().requires_grad_(True)
286
- log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
287
- return torch.autograd.grad(log_prob.sum(), x_in)[0]
288
-
289
- def model_fn(x, t_continuous):
290
- """
291
- The noise predicition model function that is used for DPM-Solver.
292
- """
293
- if t_continuous.reshape((-1,)).shape[0] == 1:
294
- t_continuous = t_continuous.expand((x.shape[0]))
295
- if guidance_type == "uncond":
296
- return noise_pred_fn(x, t_continuous)
297
- elif guidance_type == "classifier":
298
- assert classifier_fn is not None
299
- t_input = get_model_input_time(t_continuous)
300
- cond_grad = cond_grad_fn(x, t_input)
301
- sigma_t = noise_schedule.marginal_std(t_continuous)
302
- noise = noise_pred_fn(x, t_continuous)
303
- return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
304
- elif guidance_type == "classifier-free":
305
- if guidance_scale == 1. or unconditional_condition is None:
306
- return noise_pred_fn(x, t_continuous, cond=condition)
307
- else:
308
- x_in = torch.cat([x] * 2)
309
- t_in = torch.cat([t_continuous] * 2)
310
- c_in = torch.cat([unconditional_condition, condition])
311
- noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
312
- return noise_uncond + guidance_scale * (noise - noise_uncond)
313
-
314
- assert model_type in ["noise", "x_start", "v"]
315
- assert guidance_type in ["uncond", "classifier", "classifier-free"]
316
- return model_fn
317
-
318
-
319
- class DPM_Solver:
320
- def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.):
321
- """Construct a DPM-Solver.
322
- We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0").
323
- If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver).
324
- If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++).
325
- In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True.
326
- The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales.
327
- Args:
328
- model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):
329
- ``
330
- def model_fn(x, t_continuous):
331
- return noise
332
- ``
333
- noise_schedule: A noise schedule object, such as NoiseScheduleVP.
334
- predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model.
335
- thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1].
336
- max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding.
337
-
338
- [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.
339
- """
340
- self.model = model_fn
341
- self.noise_schedule = noise_schedule
342
- self.predict_x0 = predict_x0
343
- self.thresholding = thresholding
344
- self.max_val = max_val
345
-
346
- def noise_prediction_fn(self, x, t):
347
- """
348
- Return the noise prediction model.
349
- """
350
- return self.model(x, t)
351
-
352
- def data_prediction_fn(self, x, t):
353
- """
354
- Return the data prediction model (with thresholding).
355
- """
356
- noise = self.noise_prediction_fn(x, t)
357
- dims = x.dim()
358
- alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
359
- x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
360
- if self.thresholding:
361
- p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
362
- s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
363
- s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
364
- x0 = torch.clamp(x0, -s, s) / s
365
- return x0
366
-
367
- def model_fn(self, x, t):
368
- """
369
- Convert the model to the noise prediction model or the data prediction model.
370
- """
371
- if self.predict_x0:
372
- return self.data_prediction_fn(x, t)
373
- else:
374
- return self.noise_prediction_fn(x, t)
375
-
376
- def get_time_steps(self, skip_type, t_T, t_0, N, device):
377
- """Compute the intermediate time steps for sampling.
378
- Args:
379
- skip_type: A `str`. The type for the spacing of the time steps. We support three types:
380
- - 'logSNR': uniform logSNR for the time steps.
381
- - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
382
- - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
383
- t_T: A `float`. The starting time of the sampling (default is T).
384
- t_0: A `float`. The ending time of the sampling (default is epsilon).
385
- N: A `int`. The total number of the spacing of the time steps.
386
- device: A torch device.
387
- Returns:
388
- A pytorch tensor of the time steps, with the shape (N + 1,).
389
- """
390
- if skip_type == 'logSNR':
391
- lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
392
- lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
393
- logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
394
- return self.noise_schedule.inverse_lambda(logSNR_steps)
395
- elif skip_type == 'time_uniform':
396
- return torch.linspace(t_T, t_0, N + 1).to(device)
397
- elif skip_type == 'time_quadratic':
398
- t_order = 2
399
- t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device)
400
- return t
401
- else:
402
- raise ValueError(
403
- "Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
404
-
405
- def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
406
- """
407
- Get the order of each step for sampling by the singlestep DPM-Solver.
408
- We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast".
409
- Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:
410
- - If order == 1:
411
- We take `steps` of DPM-Solver-1 (i.e. DDIM).
412
- - If order == 2:
413
- - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.
414
- - If steps % 2 == 0, we use K steps of DPM-Solver-2.
415
- - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.
416
- - If order == 3:
417
- - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
418
- - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.
419
- - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.
420
- - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.
421
- ============================================
422
- Args:
423
- order: A `int`. The max order for the solver (2 or 3).
424
- steps: A `int`. The total number of function evaluations (NFE).
425
- skip_type: A `str`. The type for the spacing of the time steps. We support three types:
426
- - 'logSNR': uniform logSNR for the time steps.
427
- - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
428
- - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
429
- t_T: A `float`. The starting time of the sampling (default is T).
430
- t_0: A `float`. The ending time of the sampling (default is epsilon).
431
- device: A torch device.
432
- Returns:
433
- orders: A list of the solver order of each step.
434
- """
435
- if order == 3:
436
- K = steps // 3 + 1
437
- if steps % 3 == 0:
438
- orders = [3, ] * (K - 2) + [2, 1]
439
- elif steps % 3 == 1:
440
- orders = [3, ] * (K - 1) + [1]
441
- else:
442
- orders = [3, ] * (K - 1) + [2]
443
- elif order == 2:
444
- if steps % 2 == 0:
445
- K = steps // 2
446
- orders = [2, ] * K
447
- else:
448
- K = steps // 2 + 1
449
- orders = [2, ] * (K - 1) + [1]
450
- elif order == 1:
451
- K = 1
452
- orders = [1, ] * steps
453
- else:
454
- raise ValueError("'order' must be '1' or '2' or '3'.")
455
- if skip_type == 'logSNR':
456
- # To reproduce the results in DPM-Solver paper
457
- timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
458
- else:
459
- timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[
460
- torch.cumsum(torch.tensor([0, ] + orders)).to(device)]
461
- return timesteps_outer, orders
462
-
463
- def denoise_to_zero_fn(self, x, s):
464
- """
465
- Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
466
- """
467
- return self.data_prediction_fn(x, s)
468
-
469
- def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
470
- """
471
- DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.
472
- Args:
473
- x: A pytorch tensor. The initial value at time `s`.
474
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
475
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
476
- model_s: A pytorch tensor. The model function evaluated at time `s`.
477
- If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
478
- return_intermediate: A `bool`. If true, also return the model value at time `s`.
479
- Returns:
480
- x_t: A pytorch tensor. The approximated solution at time `t`.
481
- """
482
- ns = self.noise_schedule
483
- dims = x.dim()
484
- lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
485
- h = lambda_t - lambda_s
486
- log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)
487
- sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)
488
- alpha_t = torch.exp(log_alpha_t)
489
-
490
- if self.predict_x0:
491
- phi_1 = torch.expm1(-h)
492
- if model_s is None:
493
- model_s = self.model_fn(x, s)
494
- x_t = (
495
- expand_dims(sigma_t / sigma_s, dims) * x
496
- - expand_dims(alpha_t * phi_1, dims) * model_s
497
- )
498
- if return_intermediate:
499
- return x_t, {'model_s': model_s}
500
- else:
501
- return x_t
502
- else:
503
- phi_1 = torch.expm1(h)
504
- if model_s is None:
505
- model_s = self.model_fn(x, s)
506
- x_t = (
507
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
508
- - expand_dims(sigma_t * phi_1, dims) * model_s
509
- )
510
- if return_intermediate:
511
- return x_t, {'model_s': model_s}
512
- else:
513
- return x_t
514
-
515
- def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False,
516
- solver_type='dpm_solver'):
517
- """
518
- Singlestep solver DPM-Solver-2 from time `s` to time `t`.
519
- Args:
520
- x: A pytorch tensor. The initial value at time `s`.
521
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
522
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
523
- r1: A `float`. The hyperparameter of the second-order solver.
524
- model_s: A pytorch tensor. The model function evaluated at time `s`.
525
- If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
526
- return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).
527
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
528
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
529
- Returns:
530
- x_t: A pytorch tensor. The approximated solution at time `t`.
531
- """
532
- if solver_type not in ['dpm_solver', 'taylor']:
533
- raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
534
- if r1 is None:
535
- r1 = 0.5
536
- ns = self.noise_schedule
537
- dims = x.dim()
538
- lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
539
- h = lambda_t - lambda_s
540
- lambda_s1 = lambda_s + r1 * h
541
- s1 = ns.inverse_lambda(lambda_s1)
542
- log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(
543
- s1), ns.marginal_log_mean_coeff(t)
544
- sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)
545
- alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)
546
-
547
- if self.predict_x0:
548
- phi_11 = torch.expm1(-r1 * h)
549
- phi_1 = torch.expm1(-h)
550
-
551
- if model_s is None:
552
- model_s = self.model_fn(x, s)
553
- x_s1 = (
554
- expand_dims(sigma_s1 / sigma_s, dims) * x
555
- - expand_dims(alpha_s1 * phi_11, dims) * model_s
556
- )
557
- model_s1 = self.model_fn(x_s1, s1)
558
- if solver_type == 'dpm_solver':
559
- x_t = (
560
- expand_dims(sigma_t / sigma_s, dims) * x
561
- - expand_dims(alpha_t * phi_1, dims) * model_s
562
- - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s)
563
- )
564
- elif solver_type == 'taylor':
565
- x_t = (
566
- expand_dims(sigma_t / sigma_s, dims) * x
567
- - expand_dims(alpha_t * phi_1, dims) * model_s
568
- + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (
569
- model_s1 - model_s)
570
- )
571
- else:
572
- phi_11 = torch.expm1(r1 * h)
573
- phi_1 = torch.expm1(h)
574
-
575
- if model_s is None:
576
- model_s = self.model_fn(x, s)
577
- x_s1 = (
578
- expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
579
- - expand_dims(sigma_s1 * phi_11, dims) * model_s
580
- )
581
- model_s1 = self.model_fn(x_s1, s1)
582
- if solver_type == 'dpm_solver':
583
- x_t = (
584
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
585
- - expand_dims(sigma_t * phi_1, dims) * model_s
586
- - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s)
587
- )
588
- elif solver_type == 'taylor':
589
- x_t = (
590
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
591
- - expand_dims(sigma_t * phi_1, dims) * model_s
592
- - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s)
593
- )
594
- if return_intermediate:
595
- return x_t, {'model_s': model_s, 'model_s1': model_s1}
596
- else:
597
- return x_t
598
-
599
- def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None,
600
- return_intermediate=False, solver_type='dpm_solver'):
601
- """
602
- Singlestep solver DPM-Solver-3 from time `s` to time `t`.
603
- Args:
604
- x: A pytorch tensor. The initial value at time `s`.
605
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
606
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
607
- r1: A `float`. The hyperparameter of the third-order solver.
608
- r2: A `float`. The hyperparameter of the third-order solver.
609
- model_s: A pytorch tensor. The model function evaluated at time `s`.
610
- If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
611
- model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).
612
- If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.
613
- return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
614
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
615
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
616
- Returns:
617
- x_t: A pytorch tensor. The approximated solution at time `t`.
618
- """
619
- if solver_type not in ['dpm_solver', 'taylor']:
620
- raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
621
- if r1 is None:
622
- r1 = 1. / 3.
623
- if r2 is None:
624
- r2 = 2. / 3.
625
- ns = self.noise_schedule
626
- dims = x.dim()
627
- lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
628
- h = lambda_t - lambda_s
629
- lambda_s1 = lambda_s + r1 * h
630
- lambda_s2 = lambda_s + r2 * h
631
- s1 = ns.inverse_lambda(lambda_s1)
632
- s2 = ns.inverse_lambda(lambda_s2)
633
- log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(
634
- s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)
635
- sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(
636
- s2), ns.marginal_std(t)
637
- alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)
638
-
639
- if self.predict_x0:
640
- phi_11 = torch.expm1(-r1 * h)
641
- phi_12 = torch.expm1(-r2 * h)
642
- phi_1 = torch.expm1(-h)
643
- phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.
644
- phi_2 = phi_1 / h + 1.
645
- phi_3 = phi_2 / h - 0.5
646
-
647
- if model_s is None:
648
- model_s = self.model_fn(x, s)
649
- if model_s1 is None:
650
- x_s1 = (
651
- expand_dims(sigma_s1 / sigma_s, dims) * x
652
- - expand_dims(alpha_s1 * phi_11, dims) * model_s
653
- )
654
- model_s1 = self.model_fn(x_s1, s1)
655
- x_s2 = (
656
- expand_dims(sigma_s2 / sigma_s, dims) * x
657
- - expand_dims(alpha_s2 * phi_12, dims) * model_s
658
- + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s)
659
- )
660
- model_s2 = self.model_fn(x_s2, s2)
661
- if solver_type == 'dpm_solver':
662
- x_t = (
663
- expand_dims(sigma_t / sigma_s, dims) * x
664
- - expand_dims(alpha_t * phi_1, dims) * model_s
665
- + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s)
666
- )
667
- elif solver_type == 'taylor':
668
- D1_0 = (1. / r1) * (model_s1 - model_s)
669
- D1_1 = (1. / r2) * (model_s2 - model_s)
670
- D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
671
- D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
672
- x_t = (
673
- expand_dims(sigma_t / sigma_s, dims) * x
674
- - expand_dims(alpha_t * phi_1, dims) * model_s
675
- + expand_dims(alpha_t * phi_2, dims) * D1
676
- - expand_dims(alpha_t * phi_3, dims) * D2
677
- )
678
- else:
679
- phi_11 = torch.expm1(r1 * h)
680
- phi_12 = torch.expm1(r2 * h)
681
- phi_1 = torch.expm1(h)
682
- phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.
683
- phi_2 = phi_1 / h - 1.
684
- phi_3 = phi_2 / h - 0.5
685
-
686
- if model_s is None:
687
- model_s = self.model_fn(x, s)
688
- if model_s1 is None:
689
- x_s1 = (
690
- expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
691
- - expand_dims(sigma_s1 * phi_11, dims) * model_s
692
- )
693
- model_s1 = self.model_fn(x_s1, s1)
694
- x_s2 = (
695
- expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x
696
- - expand_dims(sigma_s2 * phi_12, dims) * model_s
697
- - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s)
698
- )
699
- model_s2 = self.model_fn(x_s2, s2)
700
- if solver_type == 'dpm_solver':
701
- x_t = (
702
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
703
- - expand_dims(sigma_t * phi_1, dims) * model_s
704
- - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s)
705
- )
706
- elif solver_type == 'taylor':
707
- D1_0 = (1. / r1) * (model_s1 - model_s)
708
- D1_1 = (1. / r2) * (model_s2 - model_s)
709
- D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
710
- D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
711
- x_t = (
712
- expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
713
- - expand_dims(sigma_t * phi_1, dims) * model_s
714
- - expand_dims(sigma_t * phi_2, dims) * D1
715
- - expand_dims(sigma_t * phi_3, dims) * D2
716
- )
717
-
718
- if return_intermediate:
719
- return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}
720
- else:
721
- return x_t
722
-
723
- def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"):
724
- """
725
- Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.
726
- Args:
727
- x: A pytorch tensor. The initial value at time `s`.
728
- model_prev_list: A list of pytorch tensor. The previous computed model values.
729
- t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
730
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
731
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
732
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
733
- Returns:
734
- x_t: A pytorch tensor. The approximated solution at time `t`.
735
- """
736
- if solver_type not in ['dpm_solver', 'taylor']:
737
- raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
738
- ns = self.noise_schedule
739
- dims = x.dim()
740
- model_prev_1, model_prev_0 = model_prev_list
741
- t_prev_1, t_prev_0 = t_prev_list
742
- lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(
743
- t_prev_0), ns.marginal_lambda(t)
744
- log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
745
- sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
746
- alpha_t = torch.exp(log_alpha_t)
747
-
748
- h_0 = lambda_prev_0 - lambda_prev_1
749
- h = lambda_t - lambda_prev_0
750
- r0 = h_0 / h
751
- D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
752
- if self.predict_x0:
753
- if solver_type == 'dpm_solver':
754
- x_t = (
755
- expand_dims(sigma_t / sigma_prev_0, dims) * x
756
- - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
757
- - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0
758
- )
759
- elif solver_type == 'taylor':
760
- x_t = (
761
- expand_dims(sigma_t / sigma_prev_0, dims) * x
762
- - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
763
- + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0
764
- )
765
- else:
766
- if solver_type == 'dpm_solver':
767
- x_t = (
768
- expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
769
- - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
770
- - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0
771
- )
772
- elif solver_type == 'taylor':
773
- x_t = (
774
- expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
775
- - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
776
- - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0
777
- )
778
- return x_t
779
-
780
- def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'):
781
- """
782
- Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.
783
- Args:
784
- x: A pytorch tensor. The initial value at time `s`.
785
- model_prev_list: A list of pytorch tensor. The previous computed model values.
786
- t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
787
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
788
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
789
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
790
- Returns:
791
- x_t: A pytorch tensor. The approximated solution at time `t`.
792
- """
793
- ns = self.noise_schedule
794
- dims = x.dim()
795
- model_prev_2, model_prev_1, model_prev_0 = model_prev_list
796
- t_prev_2, t_prev_1, t_prev_0 = t_prev_list
797
- lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(
798
- t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
799
- log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
800
- sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
801
- alpha_t = torch.exp(log_alpha_t)
802
-
803
- h_1 = lambda_prev_1 - lambda_prev_2
804
- h_0 = lambda_prev_0 - lambda_prev_1
805
- h = lambda_t - lambda_prev_0
806
- r0, r1 = h_0 / h, h_1 / h
807
- D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
808
- D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2)
809
- D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1)
810
- D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1)
811
- if self.predict_x0:
812
- x_t = (
813
- expand_dims(sigma_t / sigma_prev_0, dims) * x
814
- - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
815
- + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1
816
- - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2
817
- )
818
- else:
819
- x_t = (
820
- expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
821
- - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
822
- - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1
823
- - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2
824
- )
825
- return x_t
826
-
827
- def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None,
828
- r2=None):
829
- """
830
- Singlestep DPM-Solver with the order `order` from time `s` to time `t`.
831
- Args:
832
- x: A pytorch tensor. The initial value at time `s`.
833
- s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
834
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
835
- order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
836
- return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
837
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
838
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
839
- r1: A `float`. The hyperparameter of the second-order or third-order solver.
840
- r2: A `float`. The hyperparameter of the third-order solver.
841
- Returns:
842
- x_t: A pytorch tensor. The approximated solution at time `t`.
843
- """
844
- if order == 1:
845
- return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
846
- elif order == 2:
847
- return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate,
848
- solver_type=solver_type, r1=r1)
849
- elif order == 3:
850
- return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate,
851
- solver_type=solver_type, r1=r1, r2=r2)
852
- else:
853
- raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
854
-
855
- def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'):
856
- """
857
- Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.
858
- Args:
859
- x: A pytorch tensor. The initial value at time `s`.
860
- model_prev_list: A list of pytorch tensor. The previous computed model values.
861
- t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
862
- t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
863
- order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
864
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
865
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
866
- Returns:
867
- x_t: A pytorch tensor. The approximated solution at time `t`.
868
- """
869
- if order == 1:
870
- return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])
871
- elif order == 2:
872
- return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
873
- elif order == 3:
874
- return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
875
- else:
876
- raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
877
-
878
- def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5,
879
- solver_type='dpm_solver'):
880
- """
881
- The adaptive step size solver based on singlestep DPM-Solver.
882
- Args:
883
- x: A pytorch tensor. The initial value at time `t_T`.
884
- order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.
885
- t_T: A `float`. The starting time of the sampling (default is T).
886
- t_0: A `float`. The ending time of the sampling (default is epsilon).
887
- h_init: A `float`. The initial step size (for logSNR).
888
- atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].
889
- rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.
890
- theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].
891
- t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the
892
- current time and `t_0` is less than `t_err`. The default setting is 1e-5.
893
- solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
894
- The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
895
- Returns:
896
- x_0: A pytorch tensor. The approximated solution at time `t_0`.
897
- [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021.
898
- """
899
- ns = self.noise_schedule
900
- s = t_T * torch.ones((x.shape[0],)).to(x)
901
- lambda_s = ns.marginal_lambda(s)
902
- lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))
903
- h = h_init * torch.ones_like(s).to(x)
904
- x_prev = x
905
- nfe = 0
906
- if order == 2:
907
- r1 = 0.5
908
- lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)
909
- higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
910
- solver_type=solver_type,
911
- **kwargs)
912
- elif order == 3:
913
- r1, r2 = 1. / 3., 2. / 3.
914
- lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
915
- return_intermediate=True,
916
- solver_type=solver_type)
917
- higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2,
918
- solver_type=solver_type,
919
- **kwargs)
920
- else:
921
- raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order))
922
- while torch.abs((s - t_0)).mean() > t_err:
923
- t = ns.inverse_lambda(lambda_s + h)
924
- x_lower, lower_noise_kwargs = lower_update(x, s, t)
925
- x_higher = higher_update(x, s, t, **lower_noise_kwargs)
926
- delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))
927
- norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))
928
- E = norm_fn((x_higher - x_lower) / delta).max()
929
- if torch.all(E <= 1.):
930
- x = x_higher
931
- s = t
932
- x_prev = x_lower
933
- lambda_s = ns.marginal_lambda(s)
934
- h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)
935
- nfe += order
936
- print('adaptive solver nfe', nfe)
937
- return x
938
-
939
- def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
940
- method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
941
- atol=0.0078, rtol=0.05,
942
- ):
943
- """
944
- Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.
945
- =====================================================
946
- We support the following algorithms for both noise prediction model and data prediction model:
947
- - 'singlestep':
948
- Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver.
949
- We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).
950
- The total number of function evaluations (NFE) == `steps`.
951
- Given a fixed NFE == `steps`, the sampling procedure is:
952
- - If `order` == 1:
953
- - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).
954
- - If `order` == 2:
955
- - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.
956
- - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.
957
- - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
958
- - If `order` == 3:
959
- - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
960
- - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
961
- - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.
962
- - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.
963
- - 'multistep':
964
- Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.
965
- We initialize the first `order` values by lower order multistep solvers.
966
- Given a fixed NFE == `steps`, the sampling procedure is:
967
- Denote K = steps.
968
- - If `order` == 1:
969
- - We use K steps of DPM-Solver-1 (i.e. DDIM).
970
- - If `order` == 2:
971
- - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.
972
- - If `order` == 3:
973
- - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.
974
- - 'singlestep_fixed':
975
- Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).
976
- We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.
977
- - 'adaptive':
978
- Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper).
979
- We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.
980
- You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs
981
- (NFE) and the sample quality.
982
- - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.
983
- - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.
984
- =====================================================
985
- Some advices for choosing the algorithm:
986
- - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:
987
- Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`.
988
- e.g.
989
- >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False)
990
- >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
991
- skip_type='time_uniform', method='singlestep')
992
- - For **guided sampling with large guidance scale** by DPMs:
993
- Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`.
994
- e.g.
995
- >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True)
996
- >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,
997
- skip_type='time_uniform', method='multistep')
998
- We support three types of `skip_type`:
999
- - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**
1000
- - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.
1001
- - 'time_quadratic': quadratic time for the time steps.
1002
- =====================================================
1003
- Args:
1004
- x: A pytorch tensor. The initial value at time `t_start`
1005
- e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.
1006
- steps: A `int`. The total number of function evaluations (NFE).
1007
- t_start: A `float`. The starting time of the sampling.
1008
- If `T` is None, we use self.noise_schedule.T (default is 1.0).
1009
- t_end: A `float`. The ending time of the sampling.
1010
- If `t_end` is None, we use 1. / self.noise_schedule.total_N.
1011
- e.g. if total_N == 1000, we have `t_end` == 1e-3.
1012
- For discrete-time DPMs:
1013
- - We recommend `t_end` == 1. / self.noise_schedule.total_N.
1014
- For continuous-time DPMs:
1015
- - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.
1016
- order: A `int`. The order of DPM-Solver.
1017
- skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.
1018
- method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.
1019
- denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.
1020
- Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).
1021
- This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and
1022
- score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID
1023
- for diffusion models sampling by diffusion SDEs for low-resolutional images
1024
- (such as CIFAR-10). However, we observed that such trick does not matter for
1025
- high-resolutional images. As it needs an additional NFE, we do not recommend
1026
- it for high-resolutional images.
1027
- lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.
1028
- Only valid for `method=multistep` and `steps < 15`. We empirically find that
1029
- this trick is a key to stabilizing the sampling by DPM-Solver with very few steps
1030
- (especially for steps <= 10). So we recommend to set it to be `True`.
1031
- solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`.
1032
- atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1033
- rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1034
- Returns:
1035
- x_end: A pytorch tensor. The approximated solution at time `t_end`.
1036
- """
1037
- t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
1038
- t_T = self.noise_schedule.T if t_start is None else t_start
1039
- device = x.device
1040
- if method == 'adaptive':
1041
- with torch.no_grad():
1042
- x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol,
1043
- solver_type=solver_type)
1044
- elif method == 'multistep':
1045
- assert steps >= order
1046
- timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
1047
- assert timesteps.shape[0] - 1 == steps
1048
- with torch.no_grad():
1049
- vec_t = timesteps[0].expand((x.shape[0]))
1050
- model_prev_list = [self.model_fn(x, vec_t)]
1051
- t_prev_list = [vec_t]
1052
- # Init the first `order` values by lower order multistep DPM-Solver.
1053
- for init_order in tqdm(range(1, order), desc="DPM init order"):
1054
- vec_t = timesteps[init_order].expand(x.shape[0])
1055
- x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order,
1056
- solver_type=solver_type)
1057
- model_prev_list.append(self.model_fn(x, vec_t))
1058
- t_prev_list.append(vec_t)
1059
- # Compute the remaining values by `order`-th order multistep DPM-Solver.
1060
- for step in tqdm(range(order, steps + 1), desc="DPM multistep"):
1061
- vec_t = timesteps[step].expand(x.shape[0])
1062
- if lower_order_final and steps < 15:
1063
- step_order = min(order, steps + 1 - step)
1064
- else:
1065
- step_order = order
1066
- x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order,
1067
- solver_type=solver_type)
1068
- for i in range(order - 1):
1069
- t_prev_list[i] = t_prev_list[i + 1]
1070
- model_prev_list[i] = model_prev_list[i + 1]
1071
- t_prev_list[-1] = vec_t
1072
- # We do not need to evaluate the final model value.
1073
- if step < steps:
1074
- model_prev_list[-1] = self.model_fn(x, vec_t)
1075
- elif method in ['singlestep', 'singlestep_fixed']:
1076
- if method == 'singlestep':
1077
- timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order,
1078
- skip_type=skip_type,
1079
- t_T=t_T, t_0=t_0,
1080
- device=device)
1081
- elif method == 'singlestep_fixed':
1082
- K = steps // order
1083
- orders = [order, ] * K
1084
- timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
1085
- for i, order in enumerate(orders):
1086
- t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1]
1087
- timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(),
1088
- N=order, device=device)
1089
- lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
1090
- vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0])
1091
- h = lambda_inner[-1] - lambda_inner[0]
1092
- r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h
1093
- r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h
1094
- x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2)
1095
- if denoise_to_zero:
1096
- x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
1097
- return x
1098
-
1099
-
1100
- #############################################################
1101
- # other utility functions
1102
- #############################################################
1103
-
1104
- def interpolate_fn(x, xp, yp):
1105
- """
1106
- A piecewise linear function y = f(x), using xp and yp as keypoints.
1107
- We implement f(x) in a differentiable way (i.e. applicable for autograd).
1108
- The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
1109
- Args:
1110
- x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
1111
- xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
1112
- yp: PyTorch tensor with shape [C, K].
1113
- Returns:
1114
- The function values f(x), with shape [N, C].
1115
- """
1116
- N, K = x.shape[0], xp.shape[1]
1117
- all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
1118
- sorted_all_x, x_indices = torch.sort(all_x, dim=2)
1119
- x_idx = torch.argmin(x_indices, dim=2)
1120
- cand_start_idx = x_idx - 1
1121
- start_idx = torch.where(
1122
- torch.eq(x_idx, 0),
1123
- torch.tensor(1, device=x.device),
1124
- torch.where(
1125
- torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1126
- ),
1127
- )
1128
- end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
1129
- start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
1130
- end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
1131
- start_idx2 = torch.where(
1132
- torch.eq(x_idx, 0),
1133
- torch.tensor(0, device=x.device),
1134
- torch.where(
1135
- torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1136
- ),
1137
- )
1138
- y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
1139
- start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
1140
- end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
1141
- cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
1142
- return cand
1143
-
1144
-
1145
- def expand_dims(v, dims):
1146
- """
1147
- Expand the tensor `v` to the dim `dims`.
1148
- Args:
1149
- `v`: a PyTorch tensor with shape [N].
1150
- `dim`: a `int`.
1151
- Returns:
1152
- a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
1153
- """
1154
- return v[(...,) + (None,) * (dims - 1)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/freetypePen.py DELETED
@@ -1,458 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
- """Pen to rasterize paths with FreeType."""
4
-
5
- __all__ = ["FreeTypePen"]
6
-
7
- import os
8
- import ctypes
9
- import platform
10
- import subprocess
11
- import collections
12
- import math
13
-
14
- import freetype
15
- from freetype.raw import FT_Outline_Get_Bitmap, FT_Outline_Get_BBox, FT_Outline_Get_CBox
16
- from freetype.ft_types import FT_Pos
17
- from freetype.ft_structs import FT_Vector, FT_BBox, FT_Bitmap, FT_Outline
18
- from freetype.ft_enums import (
19
- FT_OUTLINE_NONE,
20
- FT_OUTLINE_EVEN_ODD_FILL,
21
- FT_PIXEL_MODE_GRAY,
22
- FT_CURVE_TAG_ON,
23
- FT_CURVE_TAG_CONIC,
24
- FT_CURVE_TAG_CUBIC,
25
- )
26
- from freetype.ft_errors import FT_Exception
27
-
28
- from fontTools.pens.basePen import BasePen, PenError
29
- from fontTools.misc.roundTools import otRound
30
- from fontTools.misc.transform import Transform
31
-
32
- Contour = collections.namedtuple("Contour", ("points", "tags"))
33
-
34
-
35
- class FreeTypePen(BasePen):
36
- """Pen to rasterize paths with FreeType. Requires `freetype-py` module.
37
-
38
- Constructs ``FT_Outline`` from the paths, and renders it within a bitmap
39
- buffer.
40
-
41
- For ``array()`` and ``show()``, `numpy` and `matplotlib` must be installed.
42
- For ``image()``, `Pillow` is required. Each module is lazily loaded when the
43
- corresponding method is called.
44
-
45
- Args:
46
- glyphSet: a dictionary of drawable glyph objects keyed by name
47
- used to resolve component references in composite glyphs.
48
-
49
- :Examples:
50
- If `numpy` and `matplotlib` is available, the following code will
51
- show the glyph image of `fi` in a new window::
52
-
53
- from fontTools.ttLib import TTFont
54
- from fontTools.pens.freetypePen import FreeTypePen
55
- from fontTools.misc.transform import Offset
56
- pen = FreeTypePen(None)
57
- font = TTFont('SourceSansPro-Regular.otf')
58
- glyph = font.getGlyphSet()['fi']
59
- glyph.draw(pen)
60
- width, ascender, descender = glyph.width, font['OS/2'].usWinAscent, -font['OS/2'].usWinDescent
61
- height = ascender - descender
62
- pen.show(width=width, height=height, transform=Offset(0, -descender))
63
-
64
- Combining with `uharfbuzz`, you can typeset a chunk of glyphs in a pen::
65
-
66
- import uharfbuzz as hb
67
- from fontTools.pens.freetypePen import FreeTypePen
68
- from fontTools.pens.transformPen import TransformPen
69
- from fontTools.misc.transform import Offset
70
-
71
- en1, en2, ar, ja = 'Typesetting', 'Jeff', 'صف الحروف', 'たいぷせっと'
72
- for text, font_path, direction, typo_ascender, typo_descender, vhea_ascender, vhea_descender, contain, features in (
73
- (en1, 'NotoSans-Regular.ttf', 'ltr', 2189, -600, None, None, False, {"kern": True, "liga": True}),
74
- (en2, 'NotoSans-Regular.ttf', 'ltr', 2189, -600, None, None, True, {"kern": True, "liga": True}),
75
- (ar, 'NotoSansArabic-Regular.ttf', 'rtl', 1374, -738, None, None, False, {"kern": True, "liga": True}),
76
- (ja, 'NotoSansJP-Regular.otf', 'ltr', 880, -120, 500, -500, False, {"palt": True, "kern": True}),
77
- (ja, 'NotoSansJP-Regular.otf', 'ttb', 880, -120, 500, -500, False, {"vert": True, "vpal": True, "vkrn": True})
78
- ):
79
- blob = hb.Blob.from_file_path(font_path)
80
- face = hb.Face(blob)
81
- font = hb.Font(face)
82
- buf = hb.Buffer()
83
- buf.direction = direction
84
- buf.add_str(text)
85
- buf.guess_segment_properties()
86
- hb.shape(font, buf, features)
87
-
88
- x, y = 0, 0
89
- pen = FreeTypePen(None)
90
- for info, pos in zip(buf.glyph_infos, buf.glyph_positions):
91
- gid = info.codepoint
92
- transformed = TransformPen(pen, Offset(x + pos.x_offset, y + pos.y_offset))
93
- font.draw_glyph_with_pen(gid, transformed)
94
- x += pos.x_advance
95
- y += pos.y_advance
96
-
97
- offset, width, height = None, None, None
98
- if direction in ('ltr', 'rtl'):
99
- offset = (0, -typo_descender)
100
- width = x
101
- height = typo_ascender - typo_descender
102
- else:
103
- offset = (-vhea_descender, -y)
104
- width = vhea_ascender - vhea_descender
105
- height = -y
106
- pen.show(width=width, height=height, transform=Offset(*offset), contain=contain)
107
-
108
- For Jupyter Notebook, the rendered image will be displayed in a cell if
109
- you replace ``show()`` with ``image()`` in the examples.
110
- """
111
-
112
- def __init__(self, glyphSet):
113
- BasePen.__init__(self, glyphSet)
114
- self.contours = []
115
-
116
- def outline(self, transform=None, evenOdd=False):
117
- """Converts the current contours to ``FT_Outline``.
118
-
119
- Args:
120
- transform: An optional 6-tuple containing an affine transformation,
121
- or a ``Transform`` object from the ``fontTools.misc.transform``
122
- module.
123
- evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
124
- """
125
- transform = transform or Transform()
126
- if not hasattr(transform, "transformPoint"):
127
- transform = Transform(*transform)
128
- n_contours = len(self.contours)
129
- n_points = sum((len(contour.points) for contour in self.contours))
130
- points = []
131
- for contour in self.contours:
132
- for point in contour.points:
133
- point = transform.transformPoint(point)
134
- points.append(
135
- FT_Vector(
136
- FT_Pos(otRound(point[0] * 64)), FT_Pos(otRound(point[1] * 64))
137
- )
138
- )
139
- tags = []
140
- for contour in self.contours:
141
- for tag in contour.tags:
142
- tags.append(tag)
143
- contours = []
144
- contours_sum = 0
145
- for contour in self.contours:
146
- contours_sum += len(contour.points)
147
- contours.append(contours_sum - 1)
148
- flags = FT_OUTLINE_EVEN_ODD_FILL if evenOdd else FT_OUTLINE_NONE
149
- return FT_Outline(
150
- (ctypes.c_short)(n_contours),
151
- (ctypes.c_short)(n_points),
152
- (FT_Vector * n_points)(*points),
153
- (ctypes.c_ubyte * n_points)(*tags),
154
- (ctypes.c_short * n_contours)(*contours),
155
- (ctypes.c_int)(flags),
156
- )
157
-
158
- def buffer(
159
- self, width=None, height=None, transform=None, contain=False, evenOdd=False
160
- ):
161
- """Renders the current contours within a bitmap buffer.
162
-
163
- Args:
164
- width: Image width of the bitmap in pixels. If omitted, it
165
- automatically fits to the bounding box of the contours.
166
- height: Image height of the bitmap in pixels. If omitted, it
167
- automatically fits to the bounding box of the contours.
168
- transform: An optional 6-tuple containing an affine transformation,
169
- or a ``Transform`` object from the ``fontTools.misc.transform``
170
- module. The bitmap size is not affected by this matrix.
171
- contain: If ``True``, the image size will be automatically expanded
172
- so that it fits to the bounding box of the paths. Useful for
173
- rendering glyphs with negative sidebearings without clipping.
174
- evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
175
-
176
- Returns:
177
- A tuple of ``(buffer, size)``, where ``buffer`` is a ``bytes``
178
- object of the resulted bitmap and ``size`` is a 2-tuple of its
179
- dimension.
180
-
181
- :Notes:
182
- The image size should always be given explicitly if you need to get
183
- a proper glyph image. When ``width`` and ``height`` are omitted, it
184
- forcifully fits to the bounding box and the side bearings get
185
- cropped. If you pass ``0`` to both ``width`` and ``height`` and set
186
- ``contain`` to ``True``, it expands to the bounding box while
187
- maintaining the origin of the contours, meaning that LSB will be
188
- maintained but RSB won’t. The difference between the two becomes
189
- more obvious when rotate or skew transformation is applied.
190
-
191
- :Example:
192
- .. code-block::
193
-
194
- >> pen = FreeTypePen(None)
195
- >> glyph.draw(pen)
196
- >> buf, size = pen.buffer(width=500, height=1000)
197
- >> type(buf), len(buf), size
198
- (<class 'bytes'>, 500000, (500, 1000))
199
-
200
- """
201
- transform = transform or Transform()
202
- if not hasattr(transform, "transformPoint"):
203
- transform = Transform(*transform)
204
- contain_x, contain_y = contain or width is None, contain or height is None
205
- if contain_x or contain_y:
206
- dx, dy = transform.dx, transform.dy
207
- bbox = self.bbox
208
- p1, p2, p3, p4 = (
209
- transform.transformPoint((bbox[0], bbox[1])),
210
- transform.transformPoint((bbox[2], bbox[1])),
211
- transform.transformPoint((bbox[0], bbox[3])),
212
- transform.transformPoint((bbox[2], bbox[3])),
213
- )
214
- px, py = (p1[0], p2[0], p3[0], p4[0]), (p1[1], p2[1], p3[1], p4[1])
215
- if contain_x:
216
- if width is None:
217
- dx = dx - min(*px)
218
- width = max(*px) - min(*px)
219
- else:
220
- dx = dx - min(min(*px), 0.0)
221
- width = max(width, max(*px) - min(min(*px), 0.0))
222
- if contain_y:
223
- if height is None:
224
- dy = dy - min(*py)
225
- height = max(*py) - min(*py)
226
- else:
227
- dy = dy - min(min(*py), 0.0)
228
- height = max(height, max(*py) - min(min(*py), 0.0))
229
- transform = Transform(*transform[:4], dx, dy)
230
- width, height = math.ceil(width), math.ceil(height)
231
- buf = ctypes.create_string_buffer(width * height)
232
- bitmap = FT_Bitmap(
233
- (ctypes.c_int)(height),
234
- (ctypes.c_int)(width),
235
- (ctypes.c_int)(width),
236
- (ctypes.POINTER(ctypes.c_ubyte))(buf),
237
- (ctypes.c_short)(256),
238
- (ctypes.c_ubyte)(FT_PIXEL_MODE_GRAY),
239
- (ctypes.c_char)(0),
240
- (ctypes.c_void_p)(None),
241
- )
242
- outline = self.outline(transform=transform, evenOdd=evenOdd)
243
- err = FT_Outline_Get_Bitmap(
244
- freetype.get_handle(), ctypes.byref(outline), ctypes.byref(bitmap)
245
- )
246
- if err != 0:
247
- raise FT_Exception(err)
248
- return buf.raw, (width, height)
249
-
250
- def array(
251
- self, width=None, height=None, transform=None, contain=False, evenOdd=False
252
- ):
253
- """Returns the rendered contours as a numpy array. Requires `numpy`.
254
-
255
- Args:
256
- width: Image width of the bitmap in pixels. If omitted, it
257
- automatically fits to the bounding box of the contours.
258
- height: Image height of the bitmap in pixels. If omitted, it
259
- automatically fits to the bounding box of the contours.
260
- transform: An optional 6-tuple containing an affine transformation,
261
- or a ``Transform`` object from the ``fontTools.misc.transform``
262
- module. The bitmap size is not affected by this matrix.
263
- contain: If ``True``, the image size will be automatically expanded
264
- so that it fits to the bounding box of the paths. Useful for
265
- rendering glyphs with negative sidebearings without clipping.
266
- evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
267
-
268
- Returns:
269
- A ``numpy.ndarray`` object with a shape of ``(height, width)``.
270
- Each element takes a value in the range of ``[0.0, 1.0]``.
271
-
272
- :Notes:
273
- The image size should always be given explicitly if you need to get
274
- a proper glyph image. When ``width`` and ``height`` are omitted, it
275
- forcifully fits to the bounding box and the side bearings get
276
- cropped. If you pass ``0`` to both ``width`` and ``height`` and set
277
- ``contain`` to ``True``, it expands to the bounding box while
278
- maintaining the origin of the contours, meaning that LSB will be
279
- maintained but RSB won’t. The difference between the two becomes
280
- more obvious when rotate or skew transformation is applied.
281
-
282
- :Example:
283
- .. code-block::
284
-
285
- >> pen = FreeTypePen(None)
286
- >> glyph.draw(pen)
287
- >> arr = pen.array(width=500, height=1000)
288
- >> type(a), a.shape
289
- (<class 'numpy.ndarray'>, (1000, 500))
290
- """
291
- import numpy as np
292
-
293
- buf, size = self.buffer(
294
- width=width,
295
- height=height,
296
- transform=transform,
297
- contain=contain,
298
- evenOdd=evenOdd,
299
- )
300
- return np.frombuffer(buf, "B").reshape((size[1], size[0])) / 255.0
301
-
302
- def show(
303
- self, width=None, height=None, transform=None, contain=False, evenOdd=False
304
- ):
305
- """Plots the rendered contours with `pyplot`. Requires `numpy` and
306
- `matplotlib`.
307
-
308
- Args:
309
- width: Image width of the bitmap in pixels. If omitted, it
310
- automatically fits to the bounding box of the contours.
311
- height: Image height of the bitmap in pixels. If omitted, it
312
- automatically fits to the bounding box of the contours.
313
- transform: An optional 6-tuple containing an affine transformation,
314
- or a ``Transform`` object from the ``fontTools.misc.transform``
315
- module. The bitmap size is not affected by this matrix.
316
- contain: If ``True``, the image size will be automatically expanded
317
- so that it fits to the bounding box of the paths. Useful for
318
- rendering glyphs with negative sidebearings without clipping.
319
- evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
320
-
321
- :Notes:
322
- The image size should always be given explicitly if you need to get
323
- a proper glyph image. When ``width`` and ``height`` are omitted, it
324
- forcifully fits to the bounding box and the side bearings get
325
- cropped. If you pass ``0`` to both ``width`` and ``height`` and set
326
- ``contain`` to ``True``, it expands to the bounding box while
327
- maintaining the origin of the contours, meaning that LSB will be
328
- maintained but RSB won’t. The difference between the two becomes
329
- more obvious when rotate or skew transformation is applied.
330
-
331
- :Example:
332
- .. code-block::
333
-
334
- >> pen = FreeTypePen(None)
335
- >> glyph.draw(pen)
336
- >> pen.show(width=500, height=1000)
337
- """
338
- from matplotlib import pyplot as plt
339
-
340
- a = self.array(
341
- width=width,
342
- height=height,
343
- transform=transform,
344
- contain=contain,
345
- evenOdd=evenOdd,
346
- )
347
- plt.imshow(a, cmap="gray_r", vmin=0, vmax=1)
348
- plt.show()
349
-
350
- def image(
351
- self, width=None, height=None, transform=None, contain=False, evenOdd=False
352
- ):
353
- """Returns the rendered contours as a PIL image. Requires `Pillow`.
354
- Can be used to display a glyph image in Jupyter Notebook.
355
-
356
- Args:
357
- width: Image width of the bitmap in pixels. If omitted, it
358
- automatically fits to the bounding box of the contours.
359
- height: Image height of the bitmap in pixels. If omitted, it
360
- automatically fits to the bounding box of the contours.
361
- transform: An optional 6-tuple containing an affine transformation,
362
- or a ``Transform`` object from the ``fontTools.misc.transform``
363
- module. The bitmap size is not affected by this matrix.
364
- contain: If ``True``, the image size will be automatically expanded
365
- so that it fits to the bounding box of the paths. Useful for
366
- rendering glyphs with negative sidebearings without clipping.
367
- evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
368
-
369
- Returns:
370
- A ``PIL.image`` object. The image is filled in black with alpha
371
- channel obtained from the rendered bitmap.
372
-
373
- :Notes:
374
- The image size should always be given explicitly if you need to get
375
- a proper glyph image. When ``width`` and ``height`` are omitted, it
376
- forcifully fits to the bounding box and the side bearings get
377
- cropped. If you pass ``0`` to both ``width`` and ``height`` and set
378
- ``contain`` to ``True``, it expands to the bounding box while
379
- maintaining the origin of the contours, meaning that LSB will be
380
- maintained but RSB won’t. The difference between the two becomes
381
- more obvious when rotate or skew transformation is applied.
382
-
383
- :Example:
384
- .. code-block::
385
-
386
- >> pen = FreeTypePen(None)
387
- >> glyph.draw(pen)
388
- >> img = pen.image(width=500, height=1000)
389
- >> type(img), img.size
390
- (<class 'PIL.Image.Image'>, (500, 1000))
391
- """
392
- from PIL import Image
393
-
394
- buf, size = self.buffer(
395
- width=width,
396
- height=height,
397
- transform=transform,
398
- contain=contain,
399
- evenOdd=evenOdd,
400
- )
401
- img = Image.new("L", size, 0)
402
- img.putalpha(Image.frombuffer("L", size, buf))
403
- return img
404
-
405
- @property
406
- def bbox(self):
407
- """Computes the exact bounding box of an outline.
408
-
409
- Returns:
410
- A tuple of ``(xMin, yMin, xMax, yMax)``.
411
- """
412
- bbox = FT_BBox()
413
- outline = self.outline()
414
- FT_Outline_Get_BBox(ctypes.byref(outline), ctypes.byref(bbox))
415
- return (bbox.xMin / 64.0, bbox.yMin / 64.0, bbox.xMax / 64.0, bbox.yMax / 64.0)
416
-
417
- @property
418
- def cbox(self):
419
- """Returns an outline's ‘control box’.
420
-
421
- Returns:
422
- A tuple of ``(xMin, yMin, xMax, yMax)``.
423
- """
424
- cbox = FT_BBox()
425
- outline = self.outline()
426
- FT_Outline_Get_CBox(ctypes.byref(outline), ctypes.byref(cbox))
427
- return (cbox.xMin / 64.0, cbox.yMin / 64.0, cbox.xMax / 64.0, cbox.yMax / 64.0)
428
-
429
- def _moveTo(self, pt):
430
- contour = Contour([], [])
431
- self.contours.append(contour)
432
- contour.points.append(pt)
433
- contour.tags.append(FT_CURVE_TAG_ON)
434
-
435
- def _lineTo(self, pt):
436
- if not (self.contours and len(self.contours[-1].points) > 0):
437
- raise PenError("Contour missing required initial moveTo")
438
- contour = self.contours[-1]
439
- contour.points.append(pt)
440
- contour.tags.append(FT_CURVE_TAG_ON)
441
-
442
- def _curveToOne(self, p1, p2, p3):
443
- if not (self.contours and len(self.contours[-1].points) > 0):
444
- raise PenError("Contour missing required initial moveTo")
445
- t1, t2, t3 = FT_CURVE_TAG_CUBIC, FT_CURVE_TAG_CUBIC, FT_CURVE_TAG_ON
446
- contour = self.contours[-1]
447
- for p, t in ((p1, t1), (p2, t2), (p3, t3)):
448
- contour.points.append(p)
449
- contour.tags.append(t)
450
-
451
- def _qCurveToOne(self, p1, p2):
452
- if not (self.contours and len(self.contours[-1].points) > 0):
453
- raise PenError("Contour missing required initial moveTo")
454
- t1, t2 = FT_CURVE_TAG_CONIC, FT_CURVE_TAG_ON
455
- contour = self.contours[-1]
456
- for p, t in ((p1, t1), (p2, t2)):
457
- contour.points.append(p)
458
- contour.tags.append(t)