parquet-converter commited on
Commit
f55cb4b
·
1 Parent(s): 26baced

Update parquet files (step 68 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/gpt4free/aicolors/__init__.py +0 -30
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fl Depth Of Field Plugin For After Effects Free A Must-Have for Any Motion Designer.md +0 -100
  3. spaces/1gistliPinn/ChatGPT4/Examples/Chemdraw 12 Crack ((NEW)).md +0 -18
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bhop Go The Ultimate Guide to Jumping and Surfing Online.md +0 -183
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash Royale for Android The Most Fun and Addictive Strategy Game Ever.md +0 -99
  6. spaces/1phancelerku/anime-remove-background/APK 5play Download How to Access the Latest and Greatest Apps and Games for Free.md +0 -93
  7. spaces/4Taps/SadTalker/src/facerender/sync_batchnorm/batchnorm.py +0 -315
  8. spaces/4com/4com-license/app.py +0 -94
  9. spaces/52Hz/SRMNet_thesis/README.md +0 -13
  10. spaces/801artistry/RVC801/Applio-RVC-Fork/utils/i18n.py +0 -28
  11. spaces/801artistry/RVC801/diffq/utils.py +0 -37
  12. spaces/AI-Zero-to-Hero/08-GR-Chatbot-Blenderbot/README.md +0 -12
  13. spaces/AIConsultant/MusicGen/scripts/__init__.py +0 -5
  14. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/midas/midas/__init__.py +0 -0
  15. spaces/AIZeroToHero/README/README.md +0 -10
  16. spaces/ASJMO/freegpt/client/js/change-language.js +0 -47
  17. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/switchTheme.ts +0 -10
  18. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/login/callback/updateUser.ts +0 -12
  19. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/You.py +0 -40
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fileselectorbutton/FileChooserMethods.js +0 -21
  21. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/utils/MergeStyle.js +0 -33
  22. spaces/Agusbs98/automatic-ecg-diagnosis/nets/modules.py +0 -33
  23. spaces/AkashKhamkar/Job_Search_Engine/app.py +0 -56
  24. spaces/AliSaria/MilitarEye/README.md +0 -13
  25. spaces/Ameaou/academic-chatgpt3.1/crazy_functions/生成函数注释.py +0 -54
  26. spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/libJPG/jpge.cpp +0 -1049
  27. spaces/Amon1/ChatGPTForAcadamic/crazy_functions/生成函数注释.py +0 -57
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/quicktour.md +0 -314
  29. spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py +0 -51
  30. spaces/Apex-X/Tm/roop/__init__.py +0 -0
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/freeze.py +0 -97
  32. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/README.md +0 -4
  33. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_anchor_generator.py +0 -120
  34. spaces/AxelBell/EasyOCR_text_recognition/assets/header.html +0 -13
  35. spaces/Ayakasuki/anime-ai-detect/README.md +0 -13
  36. spaces/Banbri/zcvzcv/src/app/interface/panel/index.tsx +0 -347
  37. spaces/Benson/text-generation/Examples/Descargar Apkpro.me Carx Calle.md +0 -141
  38. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_instances.py +0 -25
  39. spaces/CVPR/Dual-Key_Backdoor_Attacks/manage_models.py +0 -563
  40. spaces/CVPR/GroupViT/app.py +0 -169
  41. spaces/CVPR/WALT/mmdet/datasets/pipelines/test_time_aug.py +0 -119
  42. spaces/CVPR/lama-example/models/ade20k/segm_lib/utils/data/dataloader.py +0 -425
  43. spaces/CVPR/regionclip-demo/detectron2/utils/file_io.py +0 -37
  44. spaces/ChallengeHub/Chinese-LangChain/corpus/zh_wikipedia/clean_corpus.py +0 -88
  45. spaces/Curranj/Regex_Generator/README.md +0 -12
  46. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-34e368b6.js +0 -2
  47. spaces/DaleChen/AutoGPT/autogpt/config/ai_config.py +0 -121
  48. spaces/Detomo/ai-comic-generation/src/lib/uploadToHuggingFace.ts +0 -16
  49. spaces/DiffusionArtco/Diffusion50/README.md +0 -13
  50. spaces/DragGan/DragGan-Inversion/gen_images.py +0 -160
spaces/101-5/gpt4free/g4f/.v1/gpt4free/aicolors/__init__.py DELETED
@@ -1,30 +0,0 @@
1
- import fake_useragent
2
- import requests
3
- import json
4
- from .typings import AiColorsResponse
5
-
6
-
7
- class Completion:
8
- @staticmethod
9
- def create(
10
- query: str = "",
11
- ) -> AiColorsResponse:
12
- headers = {
13
- "authority": "jsuifmbqefnxytqwmaoy.functions.supabase.co",
14
- "accept": "*/*",
15
- "accept-language": "en-US,en;q=0.5",
16
- "cache-control": "no-cache",
17
- "sec-fetch-dest": "empty",
18
- "sec-fetch-mode": "cors",
19
- "sec-fetch-site": "same-origin",
20
- "user-agent": fake_useragent.UserAgent().random,
21
- }
22
-
23
- json_data = {"query": query}
24
-
25
- url = "https://jsuifmbqefnxytqwmaoy.functions.supabase.co/chatgpt"
26
- request = requests.post(url, headers=headers, json=json_data, timeout=30)
27
- data = request.json().get("text").get("content")
28
- json_data = json.loads(data.replace("\n ", ""))
29
-
30
- return AiColorsResponse(**json_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fl Depth Of Field Plugin For After Effects Free A Must-Have for Any Motion Designer.md DELETED
@@ -1,100 +0,0 @@
1
- <br />
2
- <h1>Fl Depth Of Field Plugin For After Effects: A Review</h1>
3
- <p>If you are looking for a way to add realistic and cinematic depth of field effects to your 3D footage in After Effects, you may want to check out Fl Depth Of Field Plugin. This plugin is designed to move depth of field and out of focus generation to post-production, saving you time and resources from rendering them in your 3D app. In this article, we will review the features, pros and cons of Fl Depth Of Field Plugin and see how it can help you create stunning visuals.</p>
4
- <h2>Introduction</h2>
5
- <p>Fl Depth Of Field Plugin is a plugin for Adobe After Effects that allows you to create high-quality camera blurs with the flexibility of 2D post-processing. It is developed by Frischluft, a company that specializes in lens effects for computer graphics. According to their website, "The key aspect during the development of these filters was to match the real thing as good as possible."</p>
6
- <h2>Fl Depth Of Field Plugin For After Effects Free</h2><br /><p><b><b>Download Zip</b> &#9881; <a href="https://byltly.com/2uKwFO">https://byltly.com/2uKwFO</a></b></p><br /><br />
7
- <p>Depth of field is a phenomenon that occurs in real optical devices, such as cameras, where objects that are closer or farther away from the focal point appear blurred, while objects at the focal point appear sharp. This effect is used in photography and film as a style element, to draw attention to certain subjects, create a sense of depth and realism, or evoke a mood or atmosphere.</p>
8
- <p>However, generating depth of field effects in computer graphics can be challenging and time-consuming, as it usually requires ray tracing techniques that increase rendering times considerably. Fl Depth Of Field Plugin solves this problem by generating depth of field effects fast as a post-process, using a depth buffer for its calculations. It can also create out of focus effects without depth information, using a constant blur radius over the entire image.</p>
9
- <p>Fl Depth Of Field Plugin is not the only plugin that offers depth of field effects for After Effects. There are other plugins, such as DOF PRO, that also claim to provide photorealistic depth of field effects. However, Fl Depth Of Field Plugin has some advantages over other plugins, such as its ability to simulate different lens apertures, its highlights and brightness boost features, and its background distortion option.</p>
10
- <h2>Features of Fl Depth Of Field Plugin</h2>
11
- <h3>Depth of Field</h3>
12
- <p>The main feature of Fl Depth Of Field Plugin is its depth of field effect, which blurs pixels based on their depth value. To use this effect, you need a depth buffer for your 3D footage, which is an image that stores the distance information for each pixel. You can either render a depth buffer in your 3D app or use a plugin like ZbornToy to generate one in After Effects.</p>
13
- <p>Once you have a depth buffer, you can apply Fl Depth Of Field Plugin to your footage layer and adjust the parameters according to your needs. You can control the focal point, the focal range, the blur amount, the blur quality, and more. You can also adjust the lens aperture shape and size, which greatly defines the look of the blur.</p>
14
- <p>The lens aperture is the opening in the camera lens that controls how much light enters the camera. The shape and size of the aperture affect how the out-of-focus areas look like in an image. For example, a circular aperture produces circular bokeh (the aesthetic quality of the blur), while a hexagonal aperture produces hexagonal bokeh.</p>
15
- <p>Fl Depth Of Field Plugin allows you to simulate different kinds of real cameras by altering the lens aperture shape and size. You can choose from several presets or create your own custom shape using bezier curves. You can also animate the aperture shape and size over time for dynamic effects.</p>
16
- <p>How to use Fl Depth Of Field Plugin in After Effects<br />
17
- Fl Depth Of Field Plugin tutorial for After Effects beginners<br />
18
- Best settings for Fl Depth Of Field Plugin in After Effects<br />
19
- Fl Depth Of Field Plugin review and comparison with other plugins<br />
20
- Fl Depth Of Field Plugin download link and installation guide<br />
21
- Fl Depth Of Field Plugin alternatives and similar plugins<br />
22
- Fl Depth Of Field Plugin license and activation code<br />
23
- Fl Depth Of Field Plugin tips and tricks for realistic results<br />
24
- Fl Depth Of Field Plugin examples and showcase of projects<br />
25
- Fl Depth Of Field Plugin compatibility and system requirements<br />
26
- Fl Depth Of Field Plugin update and new features<br />
27
- Fl Depth Of Field Plugin support and customer service<br />
28
- Fl Depth Of Field Plugin discount and coupon code<br />
29
- Fl Depth Of Field Plugin pros and cons and user feedback<br />
30
- Fl Depth Of Field Plugin vs native After Effects depth of field<br />
31
- How to create cinematic depth of field with Fl Plugin<br />
32
- How to animate depth of field with Fl Plugin in After Effects<br />
33
- How to adjust depth of field with Fl Plugin in After Effects<br />
34
- How to blur background with Fl Plugin in After Effects<br />
35
- How to add bokeh effects with Fl Plugin in After Effects<br />
36
- How to control focus with Fl Plugin in After Effects<br />
37
- How to optimize performance with Fl Plugin in After Effects<br />
38
- How to fix errors and bugs with Fl Plugin in After Effects<br />
39
- How to customize depth of field with Fl Plugin in After Effects<br />
40
- How to use expressions with Fl Plugin in After Effects<br />
41
- How to use masks with Fl Plugin in After Effects<br />
42
- How to use 3D layers with Fl Plugin in After Effects<br />
43
- How to use cameras with Fl Plugin in After Effects<br />
44
- How to use lights with Fl Plugin in After Effects<br />
45
- How to use presets with Fl Plugin in After Effects<br />
46
- How to use keyframes with Fl Plugin in After Effects<br />
47
- How to use motion blur with Fl Plugin in After Effects<br />
48
- How to use color grading with Fl Plugin in After Effects<br />
49
- How to use noise reduction with Fl Plugin in After Effects<br />
50
- How to use lens distortion with Fl Plugin in After Effects<br />
51
- How to use chromatic aberration with Fl Plugin in After Effects<br />
52
- How to use vignette with Fl Plugin in After Effects<br />
53
- How to use grain with Fl Plugin in After Effects<br />
54
- How to use glow with Fl Plugin in After Effects<br />
55
- How to use lens flare with Fl Plugin in After Effects<br />
56
- How to use depth map with Fl Plugin in After Effects<br />
57
- How to use Z-depth pass with Fl Plugin in After Effects<br />
58
- How to use depth matte with Fl Plugin in After Effects<br />
59
- How to use depth channel with Fl Plugin in After Effects<br />
60
- How to use depth buffer with Fl Plugin in After Effects<br />
61
- How to use depth data with Fl Plugin in After Effects<br />
62
- How to use depth information with Fl Plugin in After Effects<br />
63
- How to use depth values with Fl Plugin in After Effects</p>
64
- <h3>Out of Focus</h3>
65
- <p>The other feature of Fl Depth Of Field Plugin is its out of focus effect, which creates a blur with a constant radius over the entire image. This effect does not require a depth buffer and can be used as a complement or an alternative to the depth of field effect.</p>
66
- <p>The out of focus effect can be useful when you want to create a shallow depth of field look without having accurate depth information or when you want to add some extra blur to your footage for artistic reasons. You can control the blur amount, quality, threshold, gamma correction, and more.</p>
67
- <p>One unique feature of the out of focus effect is that it allows you to use a custom image as a lens aperture instead of generating one. This means that you can use any image layer in your composition as an aperture texture and create interesting shapes and patterns in your blur. For example, you can use an image of a star or a heart as an aperture texture and create star-shaped or heart-shaped bokeh.</p>
68
- <p>Another unique feature of the out of focus effect is that it offers background distortion for semi-transparent areas. This means that when you look through a blurred object in your footage, such as glass or smoke, the background behind it will be distorted due to refraction. This effect is subtle but adds realism and believability to your comp.</p>
69
- <h3>Highlights and Brightness Boost</h3>
70
- <p>A common characteristic of real camera blurs is that very bright image parts are predominant when being out-of-focus. This is especially noticeable in highlights or light sources that appear as bright spots or discs in blurred areas. However, most graphic formats cut off bright parts above a certain threshold, resulting in dull or flat-looking blurs.</p>
71
- <p>To solve this problem, Fl Depth Of Field Plugin offers two features: highlights and brightness boost. The highlights feature allows you to simulate realistic highlights in out-of-focus areas by selecting parts that are supposed to be brighter than normal and giving them an extra boost. You can control the threshold, amount, saturation, tint color, blend mode, and more.</p>
72
- <p>The brightness boost feature allows you to select parts that are supposed to be brighter than normal but are not necessarily highlights (such as reflections or glows) and give them an extra boost as well. You can control the threshold, amount, gamma correction, saturation limit, blend mode, and more.</p>
73
- <h2>Pros and Cons of Fl Depth Of Field Plugin</h2>
74
- <h3>Pros</h3>
75
- <ul>
76
- <li>It is fast and easy to use compared to rendering depth-of-field effects in 3D apps.</li>
77
- <li>It produces high-quality and realistic results that match real cameras.</li>
78
- <li>It offers flexible and customizable options for different styles and scenarios.</li>
79
- </ul>
80
- <h3>Cons</h3>
81
- <ul>
82
- <li>It requires a depth buffer for depth-of-field effect which may not be available or accurate for some footage.</li>
83
- <li>It may not work well with motion blur or complex scenes with overlapping objects or transparency.</li>
84
- </ul>
85
- <h2>Conclusion</h2>
86
- <p>In conclusion, Fl Depth Of Field Plugin For After Effects is a powerful plugin that allows you to create realistic and cinematic depth-of-field effects fast as a post-process. It has many features that make it stand out from other plugins, such as its ability to simulate different lens apertures, its highlights and brightness boost features, and its background distortion option. It is suitable for anyone who wants to add some extra polish and realism to their 3D footage without spending too much time or resources on rendering. If you are interested in trying out Fl Depth Of Field Plugin, from their official website and see for yourself how it can improve your visuals.</p>
87
- <h2>FAQs</h2>
88
- <h4>What is the difference between depth of field and out of focus effects?</h4>
89
- <p>Depth of field effects blur pixels based on their distance from the focal point, creating a realistic and cinematic look. Out of focus effects blur pixels with a constant radius over the entire image, creating a simple and artistic look.</p>
90
- <h4>How can I get a depth buffer for my 3D footage?</h4>
91
- <p>You can either render a depth buffer in your 3D app or use a plugin like ZbornToy to generate one in After Effects.</p>
92
- <h4>How can I create custom lens apertures for my blur effects?</h4>
93
- <p>You can either use the built-in presets or create your own custom shape using bezier curves in the depth of field effect. You can also use any image layer in your composition as an aperture texture in the out of focus effect.</p>
94
- <h4>How can I simulate realistic highlights in out of focus areas?</h4>
95
- <p>You can use the highlights and brightness boost features to select and enhance bright parts of the image that are supposed to be brighter than normal when being out of focus.</p>
96
- <h4>How can I add background distortion for semi-transparent areas?</h4>
97
- <p>You can use the background distortion option in the out of focus effect to distort the background behind blurred objects such as glass or smoke.</p>
98
- </p> 0a6ba089eb<br />
99
- <br />
100
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Chemdraw 12 Crack ((NEW)).md DELETED
@@ -1,18 +0,0 @@
1
- <h2>Chemdraw 12 crack</h2><br /><p><b><b>Download File</b> >>> <a href="https://imgfil.com/2uxXgn">https://imgfil.com/2uxXgn</a></b></p><br /><br />
2
- <br />
3
- MDL MOLD CHEMICS
4
-
5
- ---------------
6
-
7
- The MDL MOLD CHEMICS software is a one-step or two-step process to molecular modeling. Two main classes of software, one is CASMI, Carbon Multi-purpose Individualized MOLD CHEMICS (CASMI/MOLD CHEMICS). CASMI is a one-step software to model molecules with MOLD CHEMICS that generates the molecules . This one-step approach is very time efficient as there is no need for computational chemistry, and a user is required to enter the chemical name and atom types of the target molecule only once. The time factor of CASMI/MOLD CHEMICS is the most important feature of this software.
8
-
9
- The other class is called CASMI/MOLD CHEMICS integration. CASMI/MOLD CHEMICS integration is a two-step process where CASMI or CASMI/MOLD CHEMICS performs a partial quantum mechanical calculation and the output can be fed to MOLD CHEMICS for the rest of the calculation. CASMI/MOLD CHEMICS integration is more time-consuming than CASMI.
10
-
11
- For CASMI, this software is available in three different flavors: Basic, MOLPRO, and GASP. GASP is now only available as CASMI, whereas MOLPRO and Basic are both available in CASMI/MOLD CHEMICS integration as well as CASMI.
12
-
13
- In order to simplify the process of quantum mechanical calculations with CASMI/MOLD CHEMICS, this software provides the user interface and the option of automatic input/output to a common directory or a database (PostgreSQL \[PC\]/SQLite3 \[iPhone/iPad\]). All calculations are performed using the MOLCAS 6.2 \[PC\]/MOLCAS 8.0 \[iPhone/iPad\] program. It is a software that employs the density functional theory (DFT) method for calculation of both the geometry and harmonic vibrational frequencies. DFT is computationally demanding, but in practice is still the method of choice for prediction of the structures and properties of organic and inorganic molecules. The software uses the Gaussian 09 package \[PC\]/Gaussian 16 package \[iPhone/iPad\] and is under the GNU General Public License.
14
-
15
- ![The user interface of CASMI (carbon multi-purpose individualized MOLD CHEM 4fefd39f24<br />
16
- <br />
17
- <br />
18
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bhop Go The Ultimate Guide to Jumping and Surfing Online.md DELETED
@@ -1,183 +0,0 @@
1
-
2
- <h1>Bhop Go: A Fun and Challenging Parkour Game for Mobile Devices</h1>
3
- <p>If you are looking for a game that can test your skills, speed, and reflexes, you might want to try Bhop Go. Bhop Go is a mobile game that lets you experience the thrill and challenge of bhop, a skill that involves jumping faster in first-person shooter and simulation games. In this article, we will tell you everything you need to know about Bhop Go, including what it is, how to play it, and how to download it.</p>
4
- <h2>bhop go no download</h2><br /><p><b><b>Download</b> ->>> <a href="https://urlin.us/2uT0Zy">https://urlin.us/2uT0Zy</a></b></p><br /><br />
5
- <h2>What is Bhop Go?</h2>
6
- <p>Bhop Go is a game developed by Shockapp, a studio that specializes in creating casual and action games for mobile devices. Bhop Go is one of their most popular games, with over 5 million downloads on Google Play Store and 4.8 stars rating on App Store. But what exactly is bhop, and why is it so fun and addictive?</p>
7
- <h3>The history and origin of bhop</h3>
8
- <p>Bhop stands for bunny hop, a term that refers to a technique that allows players to move faster in first-person shooter and simulation games. Bhop was first discovered in the late 1990s in Quake, a game that used the same engine as Half-Life and Counter-Strike. By turning left and right (strafing) while jumping, players could gain more speed and momentum than running normally. This gave them an advantage in combat, movement, and exploration.</p>
9
- <p>Bhop soon became a popular skill among gamers, especially in the Counter-Strike series. Many players practiced bhop to improve their movement skills and compete with other players. However, bhop was also considered a form of cheating by some developers, who tried to patch it or limit it in their games. For example, Valve introduced a stamina system in Counter-Strike: Source that prevented players from bhopping continuously.</p>
10
- <p>Despite these attempts, bhop remained a beloved skill among many gamers, who continued to find ways to perform it or create mods that enabled it. Bhop also spawned a subculture of speedrunners, map makers, and community servers that focused on bhop as a form of art and entertainment. Some examples of these are KZ (climbing), surf (sliding), and bhop (jumping) servers.</p>
11
- <h3>The features and gameplay of Bhop Go</h3>
12
- <p>Bhop Go is a game that brings the essence of bhop to mobile devices. It is a parkour game that challenges players to jump on blocks and go as far as possible. It is not a realistic simulation of bhop, but rather a simplified and stylized version that is easy to play but hard to master.</p>
13
- <p>bhop go online free<br />
14
- bhop go web app<br />
15
- bhop go play store<br />
16
- bhop go y8 game<br />
17
- bhop go construct 3<br />
18
- bhop go bunny hop skill<br />
19
- bhop go parkour game<br />
20
- bhop go air strafes<br />
21
- bhop go speed run<br />
22
- bhop go offline mode<br />
23
- bhop go progressive web app<br />
24
- bhop go install chrome<br />
25
- bhop go fps game<br />
26
- bhop go simulation game<br />
27
- bhop go wall jump<br />
28
- bhop go blue and green portal<br />
29
- bhop go level warp<br />
30
- bhop go alphabuild20.c3p<br />
31
- bhop go controller support<br />
32
- bhop go main menu<br />
33
- bhop go screenshots<br />
34
- bhop go version history<br />
35
- bhop expert game mode<br />
36
- bhop expert no download<br />
37
- bhop expert y8.com<br />
38
- bhop expert software update needed<br />
39
- bhop expert webgl missing features<br />
40
- bhop expert game controls<br />
41
- bhop expert report a bug<br />
42
- bhop expert did you like this game?<br />
43
- bhop expert tags 1 player 3d free html5 jumping platform trap<br />
44
- bhop expert add this game to your web page by embedding the simple code line<br />
45
- bhop expert join other players talking about games in the y8 forum<br />
46
- bhop expert try cryptoserval game nft game backed by y8.com<br />
47
- bhop expert game details no victories and no winners almost all cards used in this mod are designed for fun walkthrough only so enjoy them as it is category action & adventure added on 25 jan 2022 <br />
48
- construct.net free online games bhop 17219 play <br />
49
- construct.net build and publish your own games just like bhop to this arcade with construct 3 full game <br />
50
- construct.net embed share non_performing published on 5 sep 2020 <br />
51
- construct.net bunny hop your way to victory in this game high speeds deadly black floors and wall jumping included touch the blue and green portal to go to the next level <br />
52
- construct.net why you should use bhop-c3.web.app ability to install bhop as a progressive web app on your device imagine a bhop icon on your home screen slightly better performance than playing on the construct arcade play offline or on an unstable connection auto updates just like the construct arcade cleaner interface and ui <br />
53
- construct.net how to install bhop as a game on your device with 5 easy steps you must be using a fairly modern version of google chrome for this to work head to bhop-c3.web.app wait a few seconds 5 10 seconds click the plus icon on the top right of the address bar press install finished <br />
54
- construct.net instructions simple keyboard and mouse w or space bunny hop wall jump a or left arrow move left d or right arrow move right advanced keyboard and mouse up arrow jump once wall jump once a or left arrow move left d or right arrow move right mobile up arrow bunny hop wall jump left arrow move left right arrow move right controller left stick up dpad up a or b bunny hop wall jump left stick left or dpad left move left left stick right or dpad right move right use your mouse or touchscreen to select buttons in the main menu to warp to levels <br />
55
- construct.net screenshots version history id date size engine plays </p>
56
- <p>Bhop Go has many features that make it fun and engaging, such as:</p>
57
- <ul>
58
- <li>Multiplayer mode: You can play online with friends or strangers in different maps and modes.</li>
59
- <li>Single player mode: You can play offline without internet connection in various maps.</li>
60
- <li>Collecting loot: You can find trampolines, bounce pads, knives, weapons, skins, gloves, and other items on maps.</li>
61
- <li>Jumping bounce pads: You can use these pads to boost your speed and height.</li>
62
- <li>Moving 3D obstacles: You can avoid or interact with these obstacles that can slow you down or help you.</li>
63
- <li>Racing for world records: You can compete with other players for the best time and distance on each map.</li>
64
- <li>Customizing your character: You can change your appearance, outfit, and accessories.</li>
65
- <li>Creating your own maps: You can design and share your own maps with other players.</li>
66
- </ul>
67
- <p>The gameplay of Bhop Go is simple but challenging. You have to tap the screen to jump and tilt your device to strafe. You have to time your jumps and strafes correctly to maintain your speed and direction. You also have to avoid falling off the blocks or hitting the obstacles. The game requires skill, concentration, and practice to master.</p>
68
- <h3>The benefits and challenges of bhop</h3>
69
- <p>Bhop is not only a fun and exciting game, but also a skill that can benefit you in many ways. Some of the benefits of bhop are:</p>
70
- <ul>
71
- <li>It improves your hand-eye coordination and reaction time.</li>
72
- <li>It enhances your spatial awareness and navigation skills.</li>
73
- <li>It stimulates your brain and creativity.</li>
74
- <li>It boosts your confidence and self-esteem.</li>
75
- <li>It relieves your stress and boredom.</li>
76
- </ul>
77
- <p>However, bhop also has some challenges that you need to overcome. Some of the challenges of bhop are:</p>
78
- <ul>
79
- <li>It can be frustrating and discouraging at first.</li>
80
- <li>It can be addictive and time-consuming.</li>
81
- <li>It can cause motion sickness or eye strain.</li>
82
- <li>It can be hard to find suitable games or servers that support bhop.</li>
83
- <li>It can be seen as cheating or unfair by some players or developers.</li>
84
- </ul>
85
- <p>Therefore, you need to balance your bhop experience with moderation, patience, and respect. You also need to find the right game or platform that suits your preferences and goals.</p>
86
- <h2>How to Play Bhop Go?</h2>
87
- <p>Now that you know what Bhop Go is and why it is fun and beneficial, you might want to try it yourself. But how do you play Bhop Go? Here are some tips and instructions that can help you get started.</p>
88
- <h3>The controls and interface of Bhop Go</h3>
89
- <p>The controls and interface of Bhop Go are simple and intuitive. You can see them in the screenshot below.</p>
90
- <img src="(^i^)" alt="Bhop Go controls and interface">
91
- <p>The controls are as follows:</p>
92
- <ul>
93
- <li>To jump, tap the screen with your right thumb.</li>
94
- <li>To strafe left or right, tilt your device left or right with your left hand.</li>
95
- <li>To look around, swipe the screen with your left thumb.</li>
96
- <li>To use items, tap the icons on the bottom left corner of the screen.</li>
97
- </ul>
98
- <p>The interface shows the following information:</p>
99
- <ul>
100
- <li>Your speed in units per second (UPS).</li>
101
- <li>Your distance in meters (M).</li>
102
- <li>Your time in seconds (S).</li>
103
- <li>Your rank among other players (R).</li>
104
- <li>Your health points (HP).</li>
105
- </ul>
106
- <h3>The modes and maps of Bhop Go</h3>
107
- <p>Bhop Go has two modes: multiplayer and single player. In multiplayer mode, you can play online with other players in different maps and modes. You can choose from casual, competitive, deathmatch, race, or custom modes. You can also chat with other players, join clans, or create private rooms.</p>
108
- <p>In single player mode, you can play offline without internet connection in various maps. You can choose from easy, medium, hard, or extreme maps. You can also create your own maps using the map editor.</p>
109
- <p>Bhop Go has over 100 maps that you can play on. Each map has a different theme, layout, difficulty, and length. Some maps are based on real locations, such as Paris, Tokyo, New York, or Dubai. Some maps are inspired by other games, such as Minecraft, Portal, or Half-Life. Some maps are original creations by the developers or the community.</p>
110
- <h3>The tips and tricks for bhop</h3>
111
- <p>Bhop is a skill that requires practice and patience to master. However, there are some tips and tricks that can help you improve your bhop performance. Here are some of them: </p>
112
- <ul>
113
- <li>Practice on easy maps first before moving on to harder ones.</li>
114
- <li>Adjust your sensitivity and tilt settings to suit your preference.</li>
115
- <li>Use headphones or earphones to hear the sound cues for jumping and landing.</li>
116
- <li>Aim for smooth and consistent jumps rather than fast and erratic ones.</li>
117
- <li>Use the bounce pads and trampolines to gain more speed and height.</li>
118
- <li>Use the knives and weapons to slash or shoot the blocks or obstacles.</li>
119
- <li>Collect the loot and skins to customize your character and items.</li>
120
- <li>Watch videos or streams of other players to learn from their techniques and strategies.</li>
121
- <li>Have fun and enjoy the game!</li>
122
- </ul>
123
- <h2>How to Download Bhop Go?</h2>
124
- <p>Bhop Go is a free game that you can download and play on your mobile device. However, you need to make sure that your device meets the requirements and compatibility of the game. You also need to follow the steps and sources for downloading the game. Here are some details that can help you with that.</p>
125
- <h3>The requirements and compatibility of Bhop Go</h3>
126
- <p>Bhop Go is a game that requires a decent device to run smoothly and properly. The minimum requirements for Bhop Go are:</p>
127
- <ul>
128
- <li>Android 4.4 or higher, or iOS 10.0 or higher.</li>
129
- <li>At least 1 GB of RAM.</li>
130
- <li>At least 100 MB of free storage space.</li>
131
- <li>A stable internet connection (for multiplayer mode).</li>
132
- </ul>
133
- <p>Bhop Go is compatible with most mobile devices, such as smartphones, tablets, or iPads. However, some devices may experience lag, glitches, or crashes due to hardware or software issues. If you encounter any problems with Bhop Go, you can try the following solutions:</p>
134
- <ul>
135
- <li>Update your device's operating system and apps.</li>
136
- <li>Clear your device's cache and memory.</li>
137
- <li>Restart your device or the game.</li>
138
- <li>Contact the developer's support team or report a bug.</li>
139
- </ul>
140
- <h3>The steps and sources for downloading Bhop Go</h3>
141
- <p>Bhop Go is a game that you can download from official and trusted sources, such as Google Play Store or App Store. You can also download it from third-party websites or platforms, but you need to be careful of malware or viruses. Here are the steps and sources for downloading Bhop Go:</p>
142
- <table>
143
- <tr><th>Source</th><th>Steps</th></tr>
144
- <tr><td>Google Play Store</td><td><ol><li>Open Google Play Store on your Android device.</li><li>Search for "Bhop Go" in the search bar.</li><li>Select the game from the results and tap "Install".</li><li>Wait for the game to download and install on your device.</li><li>Tap "Open" to launch the game and enjoy!</li></ol></td></tr>
145
- <tr><td>App Store</td><td><ol><li>Open App Store on your iOS device.</li><li>Search for "Bhop Go" in the search bar.</li><li>Select the game from the results and tap "Get".</li><li>Enter your Apple ID password or use Touch ID or Face ID to confirm.</li><li>Wait for the game to download and install on your device.</li><li>Tap "Open" to launch the game and enjoy!</li></ol></td></tr>
146
- <tr><td>Aptoide</td><td><ol><li>Open Aptoide on your Android device. If you don't have it, you can download it from .</li><li>Search for "Bhop Go" in the search bar.</li><li>Select the game from the results and tap "Install".</li><li>Wait for the game to download and install on your device.</li><li>Tap "Open" to launch the game and enjoy!</li></ol></td></tr>
147
- <tr><td>TweakBox</td><td><ol><li>Open TweakBox on your iOS device. If you don't have it, you can download it from .</li><li>Search for "Bhop Go" in the search bar.</li><li>Select the game from the results and tap "Install".</li><li>Wait for the game to download and install on your device.</li><li>Tap "Open" to launch the game and enjoy!</li></ol></td></tr>
148
- </table>
149
- <h3>The alternatives and similar games to Bhop Go</h3>
150
- <p>Bhop Go is a great game that can provide you with hours of fun and challenge. However, if you want to try something different or explore other options, there are some alternatives and similar games to Bhop Go that you can check out. Here are some of them:</p>
151
- <table>
152
- <tr><th>Name</th><th>Description</th></tr>
153
- <tr><td>Bunny Hop League</td><td>A game that combines bhop with soccer. You can play online with other players in different stadiums and score goals by jumping and kicking the ball.</td></tr>
154
- <tr><td>Bhop Jump</td><td>A game that simulates bhop in a realistic way. You can play on different maps and modes, such as speedrun, freestyle, or multiplayer. You can also customize your character and settings.</td></tr>
155
- <tr><td>Bhop Pro</td><td>A game that teaches you how to bhop in a step-by-step way. You can learn the basics and advanced techniques of bhop, such as strafing, air strafing, or sync. You can also practice on various maps and modes.</td></tr>
156
- <tr><td>Surf VPN</td><td>A game that lets you surf on ramps and slides in a 3D environment. You can play online with other players or offline on different maps. You can also collect coins and skins to upgrade your character.</td></tr>
157
- <tr><td>Flip Runner</td><td>A game that lets you perform parkour stunts and flips in a cityscape. You can run, jump, flip, and slide on buildings, cars, or objects. You can also unlock new characters and locations.</td></tr>
158
- </table>
159
- <h2>Conclusion</h2>
160
- <p>Bhop Go is a game that can provide you with a fun and challenging experience of bhop, a skill that involves jumping faster in first-person shooter and simulation games. Bhop Go has many features and gameplay options that make it engaging and enjoyable, such as multiplayer mode, single player mode, collecting loot, jumping bounce pads, moving 3D obstacles, racing for world records, customizing your character, and creating your own maps.</p>
161
- <p>Bhop Go is also a game that can benefit you in many ways, such as improving your hand-eye coordination, spatial awareness, creativity, confidence, and stress relief. However, bhop Go also has some challenges that you need to overcome, such as frustration, addiction, motion sickness, compatibility issues, and cheating accusations.</p>
162
- <p>Bhop Go is a game that you can download and play on your mobile device for free. However, you need to make sure that your device meets the requirements and compatibility of the game. You also need to follow the steps and sources for downloading the game from official and trusted platforms.</p>
163
- <p>Bhop Go is a great game that can satisfy your bhop cravings and curiosity. However, if you want to try something different or explore other options, there are some alternatives and similar games to Bhop Go that you can check out.</p>
164
- <p>If you are looking for a game that can test your skills, speed, and reflexes, you might want to try Bhop Go. Bhop Go is a game that lets you experience the thrill and challenge of bhop on your mobile device. Download Bhop Go today and see how far you can go!</p>
165
- <h3>FAQs</h3>
166
- <p>Here are some frequently asked questions about Bhop Go:</p>
167
- <ol>
168
- <li>What is the difference between bhop and surf?</li>
169
- <li>How do I get more skins and items in Bhop Go?</li>
170
- <li>How do I share my maps with other players in Bhop Go?</li>
171
- <li>How do I report a bug or a cheater in Bhop Go?</li>
172
- <li>How do I join a clan or a private room in Bhop Go?</li>
173
- </ol>
174
- <p>The answers are:</p>
175
- <ol>
176
- <li>Bhop and surf are both skills that involve moving faster in first-person shooter and simulation games. However, bhop is about jumping on blocks while surf is about sliding on ramps.</li>
177
- <li>You can get more skins and items in Bhop Go by finding them on maps, buying them with coins or real money, or watching ads.</li>
178
- <li>You can share your maps with other players in Bhop Go by uploading them to the cloud server or sending them via email or social media.</li>
179
- <li>You can report a bug or a cheater in Bhop Go by contacting the developer's support team via email or social media.</li>
180
- <li>You can join a clan or a private room in Bhop Go by tapping the clan or room icon on the main menu or the multiplayer mode.</li>
181
- </ol></p> 197e85843d<br />
182
- <br />
183
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash Royale for Android The Most Fun and Addictive Strategy Game Ever.md DELETED
@@ -1,99 +0,0 @@
1
-
2
- <h1>Clash Royale Unblocked Download: How to Play the Epic Real-Time Card Battle Game for Free</h1>
3
- <p>Clash Royale is one of the most successful free-to-play mobile games available in the market. It's more than just a card game, it's also a multiplayer tower-defense game that is super fun to play either solo or with a friend. If you are a fan of Clash of Clans, you will love Clash Royale as it features the same characters and spells, as well as new ones. But what if you want to play Clash Royale without any restrictions or limitations? What if you want to enjoy the game without spending any money or waiting for chests to open? Well, there is a way to do that. It's called Clash Royale unblocked download.</p>
4
- <h2>clash royale unblocked download</h2><br /><p><b><b>Download File</b> &#187;&#187;&#187; <a href="https://urlin.us/2uSWEZ">https://urlin.us/2uSWEZ</a></b></p><br /><br />
5
- <p>Clash Royale unblocked download is a way to play the game without having to go through the Google Play Store or any other official app store. It involves downloading an APK file from a third-party website and installing it on your Android device. This way, you can bypass any regional or device restrictions, as well as get access to the latest updates and features before anyone else. You can also play the game without any ads or in-app purchases, making it completely free and fair.</p>
6
- <p>In this article, we will show you how to play Clash Royale unblocked download, what you need to do it, what you can expect from it, and some tips and tricks to improve your gameplay. Let's get started!</p>
7
- <h2>What You Need to Play Clash Royale Unblocked</h2>
8
- <p>To play Clash Royale unblocked download, you will need the following things:</p>
9
- <ul>
10
- <li>An Android device that can run the game. The minimum requirements are Android 5.0 or higher, 150 MB of free storage space, and an internet connection.</li>
11
- <li>An APK file of Clash Royale. This is a file that contains the game's data and can be installed on your device. You can get it from various websites that offer APK downloads, but one of the most reliable and safe ones is Uptodown. Uptodown is a website that offers APK downloads for thousands of Android games and apps, including Clash Royale. It also has a user-friendly interface, a rating system, and a blog that covers the latest news and updates about the game.</li>
12
- </ul>
13
- <h3>How to Download and Install Clash Royale APK from Uptodown</h3>
14
- <p>Here are the steps you need to follow to download and install Clash Royale APK from Uptodown:</p>
15
- <ol>
16
- <li>Go to the Uptodown website and search for Clash Royale in the search bar. You can also use this link to go directly to the game's page.</li>
17
- <li>On the game's page, you will see a green button that says "Download". Click on it and wait for the download to start. You may need to allow your browser to download files from unknown sources.</li>
18
- <li>Once the download is complete, locate the APK file on your device's file manager and tap on it. You may need to enable the installation of apps from unknown sources on your device's settings.</li>
19
- <li>Follow the instructions on the screen and wait for the installation to finish. You may see a warning message that says "This app was built for an older version of Android and may not work properly". Ignore it and tap on "Install anyway".</li>
20
- <li>After the installation is done, you can launch the game from your app drawer or home screen. You may need to grant some permissions to the game, such as access to your storage, contacts, and location.</li>
21
- </ol>
22
- <h4>How to Update Clash Royale APK</h4>
23
- <p>One of the advantages of using Uptodown to download Clash Royale APK is that you can get the latest updates as soon as they are released by the developers. Here are some tips on how to update Clash Royale APK:</p>
24
- <p>clash royale apk download for android<br />
25
- clash royale free download for pc<br />
26
- clash royale online play without download<br />
27
- clash royale mod apk unlimited gems and coins<br />
28
- clash royale hack version download 2023<br />
29
- clash royale private server download ios<br />
30
- clash royale emulator for windows 10<br />
31
- clash royale best deck for arena 13<br />
32
- clash royale tips and tricks for beginners<br />
33
- clash royale latest update download apk<br />
34
- clash royale tournaments with prizes 2023<br />
35
- clash royale clan wars 2 strategy guide<br />
36
- clash royale season 25 rewards and skins<br />
37
- clash royale legendary cards list and ranking<br />
38
- clash royale fan art and wallpapers download<br />
39
- clash royale characters names and abilities<br />
40
- clash royale gameplay videos and live streams<br />
41
- clash royale memes and jokes funny images<br />
42
- clash royale reddit community and discussions<br />
43
- clash royale wiki and database of cards<br />
44
- clash royale support and customer service<br />
45
- clash royale official website and blog<br />
46
- clash royale merchandise and gift cards<br />
47
- clash royale esports and competitive scene<br />
48
- clash royale history and development story</p>
49
- <ul>
50
- <li>Check for updates regularly on the Uptodown website or app. You can also enable notifications to get alerted when a new version is available.</li>
51
- <li>To update Clash Royale APK, you just need to repeat the same steps as downloading and installing it. You don't need to uninstall the previous version or lose your progress.</li>
52
- <li>If you encounter any problems or errors while updating, you can try clearing the cache and data of the game or reinstalling it from scratch.</li>
53
- </ul>
54
- <h2>What You Can Expect from Clash Royale Unblocked</h2>
55
- <p>Clash Royale unblocked download is not much different from the official version of the game, except that it has no ads or in-app purchases. You can still enjoy all the features and content that make Clash Royale one of the best mobile games ever. Here are some of them:</p> such as skins, emotes, and magic items. You can also take part in fun and challenging events that test your skills and creativity. For example, you can play with a random deck, a special card, or a different set of rules. These events are a great way to earn more rewards and have fun.</p>
56
- <h2>Tips and Tricks to Improve Your Gameplay in Clash Royale Unblocked</h2>
57
- <p>Clash Royale unblocked download is not an easy game to master. It requires a lot of practice, patience, and learning. Here are some tips and tricks that can help you improve your gameplay and win more matches:</p>
58
- <h3>Don't Waste Gold or Gems</h3>
59
- <p>Gold and gems are the two main currencies in Clash Royale unblocked download. You can use them to buy cards, chests, upgrades, and more. However, they are not easy to come by, so you should use them wisely and avoid unnecessary purchases. For example, you should not buy cards from the shop unless you really need them or they are on sale. You should also not spend gems on speeding up chests or buying low-quality chests. Instead, you should save them for special offers or high-value chests.</p>
60
- <h3>Create a Versatile and Powerful Deck</h3>
61
- <p>Your deck is the key to your success in Clash Royale unblocked download. You should create a deck that suits your playstyle and strategy, as well as the current meta and trends. You should also make sure that your deck is versatile and powerful enough to deal with different situations and opponents. A good deck should have the following characteristics:</p>
62
- <ul>
63
- <li>A balance of elixir cost. Your average elixir cost should be between 3.0 and 4.0, depending on your deck type. You don't want to have a deck that is too expensive or too cheap, as it will affect your elixir management and tempo.</li>
64
- <li>A balance of card types. Your deck should have a mix of different card types, such as troops, spells, buildings, and win conditions. You don't want to have a deck that is too weak or too strong against certain cards or strategies.</li>
65
- <li>A balance of roles. Your deck should have cards that can perform different roles, such as offense, defense, support, control, and cycle. You don't want to have a deck that is too one-dimensional or too dependent on certain cards.</li>
66
- <li>A synergy of cards. Your deck should have cards that work well together and complement each other's strengths and weaknesses. You don't want to have a deck that is too random or too predictable.</li>
67
- </ul>
68
- <h3>Don't Waste Elixir</h3>
69
- <p>Elixir is the resource that you use to play cards in Clash Royale unblocked download. It regenerates at a constant rate of 1 elixir per 2.8 seconds (or 1 elixir per 1.4 seconds in double elixir time). Elixir management is one of the most important skills in the game, as it determines how much you can do in each match. Here are some tips on how to manage your elixir efficiently:</p>
70
- <ul>
71
- <li>Don't overcommit or underdefend. You should always try to spend less elixir than your opponent while defending or attacking, unless you have a clear advantage or opportunity. You should also avoid playing unnecessary cards or wasting elixir on low-value targets.</li>
72
- <li>Don't leak elixir. You should always try to keep your elixir bar full or near full, unless you are waiting for a specific card or situation. You should also avoid playing cards too early or too late, as it will affect your elixir flow and timing.</li>
73
- <li>Don't ignore elixir trades. You should always pay attention to how much elixir you and your opponent spend on each interaction and try to gain an elixir advantage whenever possible. You should also use spells wisely and only when they can give you a positive or equal elixir trade.</li>
74
- </ul>
75
- <h3>Aim for Princess Towers First</h3>
76
- <p>The main objective of Clash Royale unblocked download is to destroy your opponent's towers while protecting your own. There are three types of towers in the game: the king tower, which is located at the center of each side; and the two princess towers, which are located at the corners of each side. The princess towers have less health and damage than the king tower, but they also shoot faster and farther. Here are some tips on how to target the enemy's towers strategically:</p>
77
- <ul>
78
- <li>Aim for the princess towers first. You should always try to destroy at least one princess tower before going for the king tower, as it will give you more space, options, and pressure on the enemy's side. You should also avoid activating the king tower prematurely by hitting it with spells or troops that have splash damage or area damage, such as fireball, rocket, or balloon. Activating the king tower will make it join the defense and make it harder for you to win.</li>
79
- <li>Aim for the weaker princess tower. You should always try to focus your attacks on the princess tower that has less health or is more vulnerable to your deck, as it will make it easier for you to destroy it and gain an advantage. You should also avoid splitting your attacks or switching targets too often, as it will make it harder for you to finish off a tower and waste your elixir.</li>
80
- <li>Aim for the opposite princess tower. You should always try to attack the princess tower that is opposite to the one that your opponent is attacking, as it will create a counter-push and force your opponent to defend both sides. You should also avoid attacking the same princess tower as your opponent, as it will create a stalemate and give your opponent more time to recover and counterattack.</li>
81
- </ul>
82
- <h2>Conclusion</h2>
83
- <p>Clash Royale unblocked download is a great way to play the epic real-time card battle game for free and without any restrictions or limitations. You can download and install the APK file from Uptodown, a reliable and safe website that offers APK downloads for thousands of Android games and apps. You can enjoy all the features and content that make Clash Royale one of the best mobile games ever, such as collecting and upgrading cards, battling in real-time duels, joining clans and participating in clan wars, and enjoying seasonal events and challenges. You can also improve your gameplay by following some tips and tricks, such as not wasting gold or gems, creating a versatile and powerful deck, not wasting elixir, and aiming for princess towers first. If you are looking for a fun and addictive game that will keep you entertained for hours, you should definitely try Clash Royale unblocked download. You won't regret it!</p>
84
- <h2>FAQs</h2>
85
- <p>Here are some common questions and answers about Clash Royale unblocked download:</p>
86
- <ul>
87
- <li><b>Q: Is Clash Royale unblocked download safe?</b></li>
88
- <li>A: Yes, Clash Royale unblocked download is safe as long as you download the APK file from a trusted website like Uptodown. Uptodown scans all the APK files for viruses and malware before uploading them to their website. However, you should always be careful when downloading files from unknown sources and check the permissions and reviews before installing them.</li>
89
- <li><b>Q: Is Clash Royale unblocked download legal?</b></li>
90
- <li>A: Yes, Clash Royale unblocked download is legal as long as you don't use it for any illegal or unethical purposes. Clash Royale is a free-to-play game that does not require any license or registration to play. However, you should respect the intellectual property rights of the developers and not use any hacks or cheats that may harm the game or other players.</li>
91
- <li><b>Q: Is Clash Royale unblocked download compatible with my device?</b></li>
92
- <li>A: Clash Royale unblocked download is compatible with most Android devices that can run the game. The minimum requirements are Android 5.0 or higher, 150 MB of free storage space, and an internet connection. However, some devices may have issues with performance or compatibility due to different hardware or software specifications. If you encounter any problems or errors while playing Clash Royale unblocked download, you can try clearing the cache and data of the game or reinstalling it from scratch.</li>
93
- <li><b>Q: Can I play Clash Royale unblocked download with my friends?</b></li>
94
- <li>A: Yes, you can play Clash Royale unblocked download with your friends either online or offline. You can invite your friends to join your clan or challenge them to friendly battles. You can also play with random players from around the world in 1v1 or 2v2 matches. However, you may not be able to play with players who are using the official version of the game or a different version of the APK file.</li>
95
- <li><b>Q: Can I transfer my progress from Clash Royale unblocked download to the official version of the game?</b></li>
96
- <li>A: Yes, you can transfer your progress from Clash Royale unblocked download to the official version of the game by using your Google Play Games account or your Supercell ID. You can link your account to either of these services in the game's settings menu. However, you may lose some of your progress or rewards if you switch between different versions of the game frequently.</li>
97
- </ul></p> 197e85843d<br />
98
- <br />
99
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/APK 5play Download How to Access the Latest and Greatest Apps and Games for Free.md DELETED
@@ -1,93 +0,0 @@
1
-
2
- <h1>Download APK 5play: A Guide to Free Apps and Games</h1>
3
- <p>Do you love playing mobile games and using mobile apps, but hate paying for them? If yes, then you should try <strong>5play</strong>, a platform where you can find thousands of free APK and Mod APK games and apps for Android devices. In this article, we will show you how to download and install 5play on your device, how to use it to find and download free apps and games, and what are the benefits and drawbacks of using it.</p>
4
- <h2>How to download and install 5play on your device</h2>
5
- <p>Downloading and installing 5play on your device is very easy. Just follow these simple steps:</p>
6
- <h2>download apk 5play</h2><br /><p><b><b>Download File</b> &raquo; <a href="https://jinyurl.com/2uNMF6">https://jinyurl.com/2uNMF6</a></b></p><br /><br />
7
- <ol>
8
- <li>Visit the official website of <a href="(^1^)">5play</a>, where you can find all the latest and popular apps and games for Android.</li>
9
- <li>Choose the app or game you want to download from the categories or use the search bar to find what you are looking for.</li>
10
- <li>Click on the download button and wait for the APK file to be downloaded on your device.</li>
11
- <li>Enable unknown sources on your device settings. This will allow you to install apps and games from sources other than Google Play Store or App Store.</li>
12
- <li>Locate the APK file on your device storage and tap on it to install it.</li>
13
- </ol>
14
- <p>Congratulations! You have successfully downloaded and installed using 5play for free apps and games. Here are some of them:</p>
15
- <ul>
16
- <li>Access to thousands of apps and games that are not available on Google Play Store or App Store. You can find apps and games that are banned, removed, or restricted in your region or country.</li>
17
- <li>Access to modded versions of apps and games that have premium features unlocked or unlimited resources. You can enjoy the full potential of your favorite apps and games without spending any money.</li>
18
- <li>Access to updated versions of apps and games that have bug fixes and new features. You can always get the latest and best version of your apps and games from 5play.</li>
19
- <li>Access to safe and reliable downloads that are tested and verified by the 5play team. You can download apps and games without worrying about malware or viruses that can harm your device or data.</li>
20
- <li>Access to a user-friendly interface that is easy to navigate and use. You can find what you are looking for in a matter of seconds and download it with a single tap.</li>
21
- </ul>
22
- <p>These are some of the benefits of using 5play for free apps and games. However, there are also some drawbacks that you should be aware of.</p>
23
- <h2>The drawbacks of using 5play for free apps and games</h2>
24
- <p>Using 5play for free apps and games is not without risks. Here are some of the drawbacks that you should be aware of:</p>
25
- <ul>
26
- <li>The risk of downloading malware or viruses that can harm your device or data. Although the 5play team tries to ensure the safety and reliability of the downloads, there is no guarantee that they are 100% secure. You should always scan the files before installing them and use a reputable antivirus software on your device.</li>
27
- <li>The risk of violating the terms and conditions of the original developers or publishers of the apps and games. By downloading and using apps and games from 5play, you may be infringing on their intellectual property rights or breaking their rules. This may result in legal issues or penalties if you are caught.</li>
28
- <li>The risk of losing your progress or data if you uninstall or update the app or game from another source. If you download an app or game from 5play, you may not be able to sync your progress or data with the original version from Google Play Store or App Store. This means that if you uninstall or update the app or game from another source, you may lose your progress or data.</li>
29
- <li>The risk of facing legal issues or penalties if you use pirated or cracked apps and games. Some of the apps and games on 5play may be pirated or cracked, which means that they are illegally obtained or modified. This may violate the laws of your country or region, and you may face legal issues or penalties if you are caught.</li>
30
- </ul>
31
- <p>These are some of the drawbacks of using 5play for free apps and games. You should weigh the pros and cons before deciding to use it.</p>
32
- <h2>Conclusion and FAQs</h2>
33
- <p>In conclusion, 5play is a platform where you can find thousands of free APK and Mod APK games and apps for Android devices. It has many benefits, such as access to apps and games that are not available on Google Play Store or App Store, access to modded versions of apps and games that have premium features unlocked or unlimited resources, access to updated versions of apps and games that have bug fixes and new features, access to safe and reliable downloads that are tested and verified by the 5play team, and access to a user-friendly interface that is easy to navigate and use. However, it also has some drawbacks, such as the risk of downloading malware or viruses that can harm your device or data, the risk of violating the terms and conditions of the original developers or publishers of the apps and games, the risk of losing your progress or data if you uninstall or update the app or game from another source, and the risk of facing legal issues or penalties if you use pirated or cracked apps and games.</p>
34
- <p>If you want to download free APK and Mod APK games and apps for Android devices, you can try 5play at your own risk. However, you should always be careful about what you download and install on your device, and respect the rights of the original developers or publishers of the apps and games.</p>
35
- <p>download apk 5play app<br />
36
- download apk 5play mod<br />
37
- download apk 5play games<br />
38
- download apk 5play free<br />
39
- download apk 5play android<br />
40
- download apk 5play toca boca<br />
41
- download apk 5play minecraft<br />
42
- download apk 5play brawl stars<br />
43
- download apk 5play prequel<br />
44
- download apk 5play inshot<br />
45
- download apk 5play chikii<br />
46
- download apk 5play drastic<br />
47
- download apk 5play true skate<br />
48
- download apk 5play papers please<br />
49
- download apk 5play top war<br />
50
- download apk 5play evony<br />
51
- download apk 5play klondike<br />
52
- download apk 5play moon reader<br />
53
- download apk 5play torque pro<br />
54
- download apk 5play hotschedules<br />
55
- download apk 5play simple gallery<br />
56
- download apk 5play getapps<br />
57
- download apk 5play ultimate guitar<br />
58
- download apk 5play my radio<br />
59
- download apk 5play ibomma<br />
60
- download apk 5play nova chatgpt<br />
61
- download apk 5play chatgpt pro<br />
62
- download apk 5play camera translator<br />
63
- download apk 5play swing vpn<br />
64
- download apk 5play rebahin<br />
65
- download apk 5play meitu vip<br />
66
- download apk 5play resso premium<br />
67
- download apk 5play pixiv ads removed<br />
68
- download apk 5play pandora plus unlocked<br />
69
- download apk 5play world of tanks blitz mod money and gold<br />
70
- download apk 5play nuls brawl private server gems unlimited <br />
71
- download apk 5play chicken gun mega menu coins unlimited <br />
72
- download apk 5play bendy and the ink machine full game obb <br />
73
- download apk 5play super meat boy forever full game paid <br />
74
- download apk 5play legend of slime god mode money unlimited <br />
75
- download apk 5play incredibox unlocked money unlimited <br />
76
- download apk 5play dawncaster deckbuilding rpg full game paid <br />
77
- download apk 5play last viking god of valhalla money unlimited <br />
78
- download apk 5play mob control menu money unlimited <br />
79
- download apk 5play truckers of europe menu money unlimited</p>
80
- <p>Here are some FAQs that may help you understand more about 5play:</p>
81
- <h3>What is an APK file?</h3>
82
- <p>An APK file is an Android Package file that contains all the files needed to install an app or game on an Android device. It is similar to an EXE file on Windows computers.</p>
83
- <h3>What is a Mod APK file?</h3>
84
- <p>A Mod APK file is a modified version of an original APK file that has premium features unlocked or unlimited resources. It is created by third-party developers or hackers who modify the original code of the app or game.</p>
85
- <h3>How can I update the apps and games downloaded from 5play?</h3>
86
- <p>You can update the apps and games downloaded from 5play by using the 5play app itself. You can check for updates from the downloads, updates, or favorites section of the app. You can also enable the auto-update feature to get the latest versions of your apps and games automatically.</p>
87
- <h3>How can I contact the support team of 5play?</h3>
88
- <p>You can contact the support team of 5play by using the feedback or contact us option in the 5play app. You can also visit their <a href="">Facebook page</a> or <a href="">Twitter account</a> to get in touch with them. They are always ready to help you with any issues or queries you may have.</p>
89
- <h3>Is it legal to use 5play?</h3>
90
- <p>The legality of using 5play depends on your country or region's laws and regulations regarding downloading and using apps and games from unofficial sources. Some countries or regions may allow it, while others may prohibit it. You should always check your local laws and regulations before using 5play, and use it at your own risk.</p>
91
- <p>I hope you enjoyed reading this article and learned something new. If you have any questions or comments, please feel free to leave them below. Thank you for your time and attention.</p> 401be4b1e0<br />
92
- <br />
93
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/facerender/sync_batchnorm/batchnorm.py DELETED
@@ -1,315 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # File : batchnorm.py
3
- # Author : Jiayuan Mao
4
- # Email : [email protected]
5
- # Date : 27/01/2018
6
- #
7
- # This file is part of Synchronized-BatchNorm-PyTorch.
8
- # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
9
- # Distributed under MIT License.
10
-
11
- import collections
12
-
13
- import torch
14
- import torch.nn.functional as F
15
-
16
- from torch.nn.modules.batchnorm import _BatchNorm
17
- from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
18
-
19
- from .comm import SyncMaster
20
-
21
- __all__ = ['SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d']
22
-
23
-
24
- def _sum_ft(tensor):
25
- """sum over the first and last dimention"""
26
- return tensor.sum(dim=0).sum(dim=-1)
27
-
28
-
29
- def _unsqueeze_ft(tensor):
30
- """add new dementions at the front and the tail"""
31
- return tensor.unsqueeze(0).unsqueeze(-1)
32
-
33
-
34
- _ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
35
- _MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
36
-
37
-
38
- class _SynchronizedBatchNorm(_BatchNorm):
39
- def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):
40
- super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
41
-
42
- self._sync_master = SyncMaster(self._data_parallel_master)
43
-
44
- self._is_parallel = False
45
- self._parallel_id = None
46
- self._slave_pipe = None
47
-
48
- def forward(self, input):
49
- # If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
50
- if not (self._is_parallel and self.training):
51
- return F.batch_norm(
52
- input, self.running_mean, self.running_var, self.weight, self.bias,
53
- self.training, self.momentum, self.eps)
54
-
55
- # Resize the input to (B, C, -1).
56
- input_shape = input.size()
57
- input = input.view(input.size(0), self.num_features, -1)
58
-
59
- # Compute the sum and square-sum.
60
- sum_size = input.size(0) * input.size(2)
61
- input_sum = _sum_ft(input)
62
- input_ssum = _sum_ft(input ** 2)
63
-
64
- # Reduce-and-broadcast the statistics.
65
- if self._parallel_id == 0:
66
- mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
67
- else:
68
- mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
69
-
70
- # Compute the output.
71
- if self.affine:
72
- # MJY:: Fuse the multiplication for speed.
73
- output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
74
- else:
75
- output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
76
-
77
- # Reshape it.
78
- return output.view(input_shape)
79
-
80
- def __data_parallel_replicate__(self, ctx, copy_id):
81
- self._is_parallel = True
82
- self._parallel_id = copy_id
83
-
84
- # parallel_id == 0 means master device.
85
- if self._parallel_id == 0:
86
- ctx.sync_master = self._sync_master
87
- else:
88
- self._slave_pipe = ctx.sync_master.register_slave(copy_id)
89
-
90
- def _data_parallel_master(self, intermediates):
91
- """Reduce the sum and square-sum, compute the statistics, and broadcast it."""
92
-
93
- # Always using same "device order" makes the ReduceAdd operation faster.
94
- # Thanks to:: Tete Xiao (http://tetexiao.com/)
95
- intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
96
-
97
- to_reduce = [i[1][:2] for i in intermediates]
98
- to_reduce = [j for i in to_reduce for j in i] # flatten
99
- target_gpus = [i[1].sum.get_device() for i in intermediates]
100
-
101
- sum_size = sum([i[1].sum_size for i in intermediates])
102
- sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
103
- mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
104
-
105
- broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
106
-
107
- outputs = []
108
- for i, rec in enumerate(intermediates):
109
- outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2])))
110
-
111
- return outputs
112
-
113
- def _compute_mean_std(self, sum_, ssum, size):
114
- """Compute the mean and standard-deviation with sum and square-sum. This method
115
- also maintains the moving average on the master device."""
116
- assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
117
- mean = sum_ / size
118
- sumvar = ssum - sum_ * mean
119
- unbias_var = sumvar / (size - 1)
120
- bias_var = sumvar / size
121
-
122
- self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
123
- self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
124
-
125
- return mean, bias_var.clamp(self.eps) ** -0.5
126
-
127
-
128
- class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
129
- r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
130
- mini-batch.
131
-
132
- .. math::
133
-
134
- y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
135
-
136
- This module differs from the built-in PyTorch BatchNorm1d as the mean and
137
- standard-deviation are reduced across all devices during training.
138
-
139
- For example, when one uses `nn.DataParallel` to wrap the network during
140
- training, PyTorch's implementation normalize the tensor on each device using
141
- the statistics only on that device, which accelerated the computation and
142
- is also easy to implement, but the statistics might be inaccurate.
143
- Instead, in this synchronized version, the statistics will be computed
144
- over all training samples distributed on multiple devices.
145
-
146
- Note that, for one-GPU or CPU-only case, this module behaves exactly same
147
- as the built-in PyTorch implementation.
148
-
149
- The mean and standard-deviation are calculated per-dimension over
150
- the mini-batches and gamma and beta are learnable parameter vectors
151
- of size C (where C is the input size).
152
-
153
- During training, this layer keeps a running estimate of its computed mean
154
- and variance. The running sum is kept with a default momentum of 0.1.
155
-
156
- During evaluation, this running mean/variance is used for normalization.
157
-
158
- Because the BatchNorm is done over the `C` dimension, computing statistics
159
- on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
160
-
161
- Args:
162
- num_features: num_features from an expected input of size
163
- `batch_size x num_features [x width]`
164
- eps: a value added to the denominator for numerical stability.
165
- Default: 1e-5
166
- momentum: the value used for the running_mean and running_var
167
- computation. Default: 0.1
168
- affine: a boolean value that when set to ``True``, gives the layer learnable
169
- affine parameters. Default: ``True``
170
-
171
- Shape:
172
- - Input: :math:`(N, C)` or :math:`(N, C, L)`
173
- - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
174
-
175
- Examples:
176
- >>> # With Learnable Parameters
177
- >>> m = SynchronizedBatchNorm1d(100)
178
- >>> # Without Learnable Parameters
179
- >>> m = SynchronizedBatchNorm1d(100, affine=False)
180
- >>> input = torch.autograd.Variable(torch.randn(20, 100))
181
- >>> output = m(input)
182
- """
183
-
184
- def _check_input_dim(self, input):
185
- if input.dim() != 2 and input.dim() != 3:
186
- raise ValueError('expected 2D or 3D input (got {}D input)'
187
- .format(input.dim()))
188
- super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
189
-
190
-
191
- class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
192
- r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
193
- of 3d inputs
194
-
195
- .. math::
196
-
197
- y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
198
-
199
- This module differs from the built-in PyTorch BatchNorm2d as the mean and
200
- standard-deviation are reduced across all devices during training.
201
-
202
- For example, when one uses `nn.DataParallel` to wrap the network during
203
- training, PyTorch's implementation normalize the tensor on each device using
204
- the statistics only on that device, which accelerated the computation and
205
- is also easy to implement, but the statistics might be inaccurate.
206
- Instead, in this synchronized version, the statistics will be computed
207
- over all training samples distributed on multiple devices.
208
-
209
- Note that, for one-GPU or CPU-only case, this module behaves exactly same
210
- as the built-in PyTorch implementation.
211
-
212
- The mean and standard-deviation are calculated per-dimension over
213
- the mini-batches and gamma and beta are learnable parameter vectors
214
- of size C (where C is the input size).
215
-
216
- During training, this layer keeps a running estimate of its computed mean
217
- and variance. The running sum is kept with a default momentum of 0.1.
218
-
219
- During evaluation, this running mean/variance is used for normalization.
220
-
221
- Because the BatchNorm is done over the `C` dimension, computing statistics
222
- on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
223
-
224
- Args:
225
- num_features: num_features from an expected input of
226
- size batch_size x num_features x height x width
227
- eps: a value added to the denominator for numerical stability.
228
- Default: 1e-5
229
- momentum: the value used for the running_mean and running_var
230
- computation. Default: 0.1
231
- affine: a boolean value that when set to ``True``, gives the layer learnable
232
- affine parameters. Default: ``True``
233
-
234
- Shape:
235
- - Input: :math:`(N, C, H, W)`
236
- - Output: :math:`(N, C, H, W)` (same shape as input)
237
-
238
- Examples:
239
- >>> # With Learnable Parameters
240
- >>> m = SynchronizedBatchNorm2d(100)
241
- >>> # Without Learnable Parameters
242
- >>> m = SynchronizedBatchNorm2d(100, affine=False)
243
- >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
244
- >>> output = m(input)
245
- """
246
-
247
- def _check_input_dim(self, input):
248
- if input.dim() != 4:
249
- raise ValueError('expected 4D input (got {}D input)'
250
- .format(input.dim()))
251
- super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
252
-
253
-
254
- class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
255
- r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
256
- of 4d inputs
257
-
258
- .. math::
259
-
260
- y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
261
-
262
- This module differs from the built-in PyTorch BatchNorm3d as the mean and
263
- standard-deviation are reduced across all devices during training.
264
-
265
- For example, when one uses `nn.DataParallel` to wrap the network during
266
- training, PyTorch's implementation normalize the tensor on each device using
267
- the statistics only on that device, which accelerated the computation and
268
- is also easy to implement, but the statistics might be inaccurate.
269
- Instead, in this synchronized version, the statistics will be computed
270
- over all training samples distributed on multiple devices.
271
-
272
- Note that, for one-GPU or CPU-only case, this module behaves exactly same
273
- as the built-in PyTorch implementation.
274
-
275
- The mean and standard-deviation are calculated per-dimension over
276
- the mini-batches and gamma and beta are learnable parameter vectors
277
- of size C (where C is the input size).
278
-
279
- During training, this layer keeps a running estimate of its computed mean
280
- and variance. The running sum is kept with a default momentum of 0.1.
281
-
282
- During evaluation, this running mean/variance is used for normalization.
283
-
284
- Because the BatchNorm is done over the `C` dimension, computing statistics
285
- on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
286
- or Spatio-temporal BatchNorm
287
-
288
- Args:
289
- num_features: num_features from an expected input of
290
- size batch_size x num_features x depth x height x width
291
- eps: a value added to the denominator for numerical stability.
292
- Default: 1e-5
293
- momentum: the value used for the running_mean and running_var
294
- computation. Default: 0.1
295
- affine: a boolean value that when set to ``True``, gives the layer learnable
296
- affine parameters. Default: ``True``
297
-
298
- Shape:
299
- - Input: :math:`(N, C, D, H, W)`
300
- - Output: :math:`(N, C, D, H, W)` (same shape as input)
301
-
302
- Examples:
303
- >>> # With Learnable Parameters
304
- >>> m = SynchronizedBatchNorm3d(100)
305
- >>> # Without Learnable Parameters
306
- >>> m = SynchronizedBatchNorm3d(100, affine=False)
307
- >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
308
- >>> output = m(input)
309
- """
310
-
311
- def _check_input_dim(self, input):
312
- if input.dim() != 5:
313
- raise ValueError('expected 5D input (got {}D input)'
314
- .format(input.dim()))
315
- super(SynchronizedBatchNorm3d, self)._check_input_dim(input)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4com/4com-license/app.py DELETED
@@ -1,94 +0,0 @@
1
- import gradio as gr
2
-
3
- with gr.Blocks() as demo:
4
-
5
- gr.Markdown("""
6
-
7
- <h1><center>License</center></h1>
8
-
9
- Copyright (c) 2023 **4COM** and **OpenSkyML** organizations
10
-
11
- **CreativeML Open RAIL-M**
12
- dated September 1, 2023
13
-
14
- Section I: PREAMBLE
15
-
16
- Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation.
17
-
18
- Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations.
19
-
20
- In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation.
21
-
22
- Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI.
23
-
24
- This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model.
25
-
26
- NOW THEREFORE, You and Licensor agree as follows:
27
-
28
- 1. Definitions
29
-
30
- - "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document.
31
- - "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.
32
- - "Output" means the results of operating a Model as embodied in informational content resulting therefrom.
33
- - "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material.
34
- - "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model.
35
- - "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any.
36
- - "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access.
37
- - "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model.
38
- - "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator.
39
- - "Third Parties" means individuals or legal entities that are not under common control with Licensor or You.
40
- - "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
41
- - "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model.
42
-
43
- Section II: INTELLECTUAL PROPERTY RIGHTS
44
-
45
- Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III.
46
-
47
- 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model.
48
- 3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed.
49
-
50
- Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
51
-
52
- 4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions:
53
- Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material.
54
- You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License;
55
- You must cause any modified files to carry prominent notices stating that You changed the files;
56
- You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model.
57
- You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License.
58
- 5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5).
59
- 6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License.
60
-
61
- Section IV: OTHER PROVISIONS
62
-
63
- 7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License, update the Model through electronic means, or modify the Output of the Model based on updates. You shall undertake reasonable efforts to use the latest version of the Model.
64
- 8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors.
65
- 9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License.
66
- 10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
67
- 11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
68
- 12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein.
69
-
70
- END OF TERMS AND CONDITIONS
71
-
72
-
73
-
74
-
75
- Attachment A
76
-
77
- Use Restrictions
78
-
79
- You agree not to use the Model or Derivatives of the Model:
80
- - In any way that violates any applicable national, federal, state, local or international law or regulation;
81
- - For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
82
- - To generate or disseminate verifiably false information and/or content with the purpose of harming others;
83
- - To generate or disseminate personal identifiable information that can be used to harm an individual;
84
- - To defame, disparage or otherwise harass others;
85
- - For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation;
86
- - For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics;
87
- - To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
88
- - For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories;
89
- - To provide medical advice and medical results interpretation;
90
- - To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use).
91
-
92
- """)
93
-
94
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/SRMNet_thesis/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: SRMNet_Image_Restoration
3
- emoji: ✨
4
- colorFrom: indigo
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 2.8.12
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/Applio-RVC-Fork/utils/i18n.py DELETED
@@ -1,28 +0,0 @@
1
- import locale
2
- import json
3
- import os
4
-
5
-
6
- def load_language_list(language):
7
- with open(f"./i18n/{language}.json", "r", encoding="utf-8") as f:
8
- language_list = json.load(f)
9
- return language_list
10
-
11
-
12
- class I18nAuto:
13
- def __init__(self, language=None):
14
- if language in ["Auto", None]:
15
- language = "es_ES"
16
- if not os.path.exists(f"./i18n/{language}.json"):
17
- language = "es_ES"
18
- language = "es_ES"
19
- self.language = language
20
- # print("Use Language:", language)
21
- self.language_map = load_language_list(language)
22
-
23
- def __call__(self, key):
24
- return self.language_map.get(key, key)
25
-
26
- def print(self):
27
- # print("Use Language:", self.language)
28
- print("")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/diffq/utils.py DELETED
@@ -1,37 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import inspect
8
- from typing import Optional, List
9
-
10
-
11
- def simple_repr(obj, attrs: Optional[List[str]] = None, overrides={}):
12
- """
13
- Return a simple representation string for `obj`.
14
- If `attrs` is not None, it should be a list of attributes to include.
15
- """
16
- params = inspect.signature(obj.__class__).parameters
17
- attrs_repr = []
18
- if attrs is None:
19
- attrs = params.keys()
20
- for attr in attrs:
21
- display = False
22
- if attr in overrides:
23
- value = overrides[attr]
24
- elif hasattr(obj, attr):
25
- value = getattr(obj, attr)
26
- else:
27
- continue
28
- if attr in params:
29
- param = params[attr]
30
- if param.default is inspect._empty or value != param.default:
31
- display = True
32
- else:
33
- display = True
34
-
35
- if display:
36
- attrs_repr.append(f"{attr}={value}")
37
- return f"{obj.__class__.__name__}({','.join(attrs_repr)})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Zero-to-Hero/08-GR-Chatbot-Blenderbot/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: 08 GR Chatbot Blenderbot
3
- emoji: 👀
4
- colorFrom: purple
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.4
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/scripts/__init__.py DELETED
@@ -1,5 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/midas/midas/__init__.py DELETED
File without changes
spaces/AIZeroToHero/README/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: 🧑🏻‍🤝‍🧑🏾Org-AI-Zero-To-Hero
3
- emoji: 🧑🏻‍🤝‍🧑🏾
4
- colorFrom: gray
5
- colorTo: pink
6
- sdk: static
7
- pinned: false
8
- ---
9
-
10
- AI Zero to Hero - Classroom Learning Examples
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/client/js/change-language.js DELETED
@@ -1,47 +0,0 @@
1
- document.addEventListener('DOMContentLoaded', fetchLanguages);
2
-
3
- async function fetchLanguages() {
4
- try {
5
- const [languagesResponse, currentLanguageResponse] = await Promise.all([
6
- fetch(`${url_prefix}/get-languages`),
7
- fetch(`${url_prefix}/get-locale`)
8
- ]);
9
-
10
- const languages = await languagesResponse.json();
11
- const currentLanguage = await currentLanguageResponse.text();
12
-
13
- const languageSelect = document.getElementById('language');
14
- languages.forEach(lang => {
15
- const option = document.createElement('option');
16
- option.value = lang;
17
- option.textContent = lang;
18
- languageSelect.appendChild(option);
19
- });
20
-
21
- const savedLanguage = localStorage.getItem("language") || currentLanguage;
22
- setLanguageOnPageLoad(savedLanguage);
23
- } catch (error) {
24
- console.error("Failed to fetch languages or current language");
25
- }
26
- }
27
-
28
- function setLanguageOnPageLoad(language) {
29
- document.getElementById("language").value = language;
30
- }
31
-
32
- function changeLanguage(lang) {
33
- fetch(`${url_prefix}/change-language`, {
34
- method: "POST",
35
- headers: {
36
- "Content-Type": "application/json",
37
- },
38
- body: JSON.stringify({ language: lang }),
39
- }).then((response) => {
40
- if (response.ok) {
41
- localStorage.setItem("language", lang);
42
- location.reload();
43
- } else {
44
- console.error("Failed to change language");
45
- }
46
- });
47
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/switchTheme.ts DELETED
@@ -1,10 +0,0 @@
1
- export function switchTheme() {
2
- const { classList } = document.querySelector("html") as HTMLElement;
3
- if (classList.contains("dark")) {
4
- classList.remove("dark");
5
- localStorage.theme = "light";
6
- } else {
7
- classList.add("dark");
8
- localStorage.theme = "dark";
9
- }
10
- }
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/login/callback/updateUser.ts DELETED
@@ -1,12 +0,0 @@
1
- import { authCondition, refreshSessionCookie } from "$lib/server/auth";
2
- import { collections } from "$lib/server/database";
3
- import { DEFAULT_SETTINGS } from "$lib/types/Settings";
4
- import { z } from "zod";
5
- import type { UserinfoResponse } from "openid-client";
6
- import type { Cookies } from "@sveltejs/kit";
7
-
8
- export async function updateUser(params: {
9
- userData: UserinfoResponse;
10
- locals: App.Locals;
11
- cookies: Cookies;
12
- }) {}
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/You.py DELETED
@@ -1,40 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
-
5
- from ..requests import StreamSession
6
- from ..typing import AsyncGenerator, Messages
7
- from .base_provider import AsyncGeneratorProvider, format_prompt
8
-
9
-
10
- class You(AsyncGeneratorProvider):
11
- url = "https://you.com"
12
- working = True
13
- supports_gpt_35_turbo = True
14
-
15
-
16
- @classmethod
17
- async def create_async_generator(
18
- cls,
19
- model: str,
20
- messages: Messages,
21
- proxy: str = None,
22
- timeout: int = 120,
23
- **kwargs,
24
- ) -> AsyncGenerator:
25
- async with StreamSession(proxies={"https": proxy}, impersonate="chrome107", timeout=timeout) as session:
26
- headers = {
27
- "Accept": "text/event-stream",
28
- "Referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat",
29
- }
30
- data = {"q": format_prompt(messages), "domain": "youchat", "chat": ""}
31
- async with session.get(
32
- f"{cls.url}/api/streamingSearch",
33
- params=data,
34
- headers=headers
35
- ) as response:
36
- response.raise_for_status()
37
- start = b'data: {"youChatToken": '
38
- async for line in response.iter_lines():
39
- if line.startswith(start):
40
- yield json.loads(line[len(start):-1])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fileselectorbutton/FileChooserMethods.js DELETED
@@ -1,21 +0,0 @@
1
- export default {
2
- setAccept(accept) {
3
- this.childrenMap.fileChooser.setAccept(accept);
4
- return this;
5
- },
6
-
7
- setMultiple(enabled) {
8
- this.childrenMap.fileChooser.setMultiple(enabled);
9
- return this;
10
- },
11
-
12
- loadFile(file, loaderType, key, cacheType, onComplete) {
13
- this.childrenMap.fileChooser.loadFile(file, loaderType, key, cacheType, onComplete);
14
- return this;
15
- },
16
-
17
- loadFilePromise(file, loaderType, key, cacheType) {
18
- return this.childrenMap.fileChooser.loadFilePromise(file, loaderType, key, cacheType);
19
- }
20
-
21
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/utils/MergeStyle.js DELETED
@@ -1,33 +0,0 @@
1
- import DeepMerge from "../../../../../plugins/utils/object/DeepMerge";
2
-
3
- /*
4
- Priority of styles : name, $class, $type
5
- 1. name (#name)
6
- 2. $class (.class)
7
- 3. $type (type)
8
- */
9
-
10
- var MergeStyle = function (data, styles) {
11
- if (styles === undefined) {
12
- return data;
13
- }
14
-
15
- if (data.hasOwnProperty('name')) {
16
- DeepMerge(data, styles[`#${data.name}`]);
17
- }
18
-
19
- if (data.hasOwnProperty('$class')) {
20
- var clasKeys = data.$class.split(' ');
21
- for (var i = 0, cnt = clasKeys.length; i < cnt; i++) {
22
- DeepMerge(data, styles[`.${clasKeys[i]}`]);
23
- }
24
- }
25
-
26
- if (data.hasOwnProperty('$type')) {
27
- DeepMerge(data, styles[data.$type]);
28
- }
29
-
30
- return data;
31
- }
32
-
33
- export default MergeStyle;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Agusbs98/automatic-ecg-diagnosis/nets/modules.py DELETED
@@ -1,33 +0,0 @@
1
-
2
- import os, sys
3
- from libs import *
4
- from .layers import *
5
-
6
- class LightSEModule(nn.Module):
7
- def __init__(self,
8
- in_channels,
9
- reduction = 16,
10
- ):
11
- super(LightSEModule, self).__init__()
12
- self.pool = nn.AdaptiveAvgPool1d(1)
13
-
14
- self.s_conv = DSConv1d(
15
- in_channels, in_channels//reduction,
16
- kernel_size = 1,
17
- )
18
- self.act_fn = nn.ReLU()
19
- self.e_conv = DSConv1d(
20
- in_channels//reduction, in_channels,
21
- kernel_size = 1,
22
- )
23
-
24
- def forward(self,
25
- input,
26
- ):
27
- attention_scores = self.pool(input)
28
-
29
- attention_scores = self.s_conv(attention_scores)
30
- attention_scores = self.act_fn(attention_scores)
31
- attention_scores = self.e_conv(attention_scores)
32
-
33
- return input*torch.sigmoid(attention_scores)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AkashKhamkar/Job_Search_Engine/app.py DELETED
@@ -1,56 +0,0 @@
1
- import streamlit as st
2
- import torch
3
- import pickle
4
- import time
5
- import pandas as pd
6
- from iteration_utilities import unique_everseen
7
- from sentence_transformers import util
8
- from loader import bi_encoder, cross_encoder, df, job_corpus_ecoded, job_corpus
9
-
10
-
11
- def jobsearch(query,df, top_k=100):
12
- #print("Answer by NinjaBot : ")
13
- ans = []
14
- question_embedding = bi_encoder.encode(query, convert_to_tensor=True)
15
- hits = util.semantic_search(question_embedding, job_corpus_ecoded, top_k=top_k)
16
- hits = hits[0]
17
- cross_inp = [[query, job_corpus[hit['corpus_id']]] for hit in hits]
18
- cross_scores = cross_encoder.predict(cross_inp)
19
- for idx in range(len(cross_scores)):
20
- hits[idx]['cross-score'] = cross_scores[idx]
21
-
22
- hits = sorted(hits, key=lambda x: x['cross-score'], reverse=True)
23
- #indexes = []
24
- search_result = []
25
- for idx, hit in enumerate(hits[0:10]):
26
- obj = {}
27
- ans.append(job_corpus[hit['corpus_id']])
28
- #indexes.append(job_corpus.index(job_corpus[hit['corpus_id']]))
29
- obj['title'] = df.at[job_corpus.index(job_corpus[hit['corpus_id']]),'title']
30
- obj['link'] = df.at[job_corpus.index(job_corpus[hit['corpus_id']]),'url']
31
- search_result.append(obj)
32
- final_search_result = list(unique_everseen(search_result))
33
- return final_search_result
34
- #return df.at[indexes[0],'title'],df.at[indexes[1],'title'],df.at[indexes[2],'title'],df.at[indexes[3],'title'],df.at[indexes[4],'title']
35
- #return ans[0],ans[1],ans[2],ans[3],ans[4]
36
-
37
- def main():
38
- if 'submitted' not in st.session_state:
39
- st.session_state.submitted = False
40
-
41
- def callback():
42
- st.session_state.submitted = True
43
-
44
- st.title('Job Search Engine 💼')
45
- st.text("")
46
- st.text("")
47
- query = st.text_input('Enter your job query here ! ')
48
- if (st.button("Search", on_click=callback) and query) :
49
- with st.spinner('Fetching the best jobs for you!...'):
50
- #time.sleep(10)
51
- result = jobsearch(query, df)
52
- #result = jobsearch(query, df)
53
- st.success('NinjaBot : Here are a few suggestions')
54
- #st.write(f"This is the query : {query}")
55
- st.write(result)
56
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AliSaria/MilitarEye/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: MilitarEye
3
- emoji: 🐨
4
- colorFrom: purple
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 4.1.1
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/生成函数注释.py DELETED
@@ -1,54 +0,0 @@
1
- from toolbox import update_ui
2
- from toolbox import CatchException, report_execption, write_results_to_file
3
- from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
4
- fast_debug = False
5
-
6
- def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
7
- import time, os
8
- print('begin analysis on:', file_manifest)
9
- for index, fp in enumerate(file_manifest):
10
- with open(fp, 'r', encoding='utf-8', errors='replace') as f:
11
- file_content = f.read()
12
-
13
- i_say = f'请对下面的程序文件做一个概述,并对文件中的所有函数生成注释,使用markdown表格输出结果,文件名是{os.path.relpath(fp, project_folder)},文件内容是 ```{file_content}```'
14
- i_say_show_user = f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述,并对文件中的所有函数生成注释: {os.path.abspath(fp)}'
15
- chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
16
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
17
-
18
- if not fast_debug:
19
- msg = '正常'
20
- # ** gpt request **
21
- gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
22
- i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时
23
-
24
- chatbot[-1] = (i_say_show_user, gpt_say)
25
- history.append(i_say_show_user); history.append(gpt_say)
26
- yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
27
- if not fast_debug: time.sleep(2)
28
-
29
- if not fast_debug:
30
- res = write_results_to_file(history)
31
- chatbot.append(("完成了吗?", res))
32
- yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
33
-
34
-
35
-
36
- @CatchException
37
- def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
38
- history = [] # 清空历史,以免输入溢出
39
- import glob, os
40
- if os.path.exists(txt):
41
- project_folder = txt
42
- else:
43
- if txt == "": txt = '空空如也的输入栏'
44
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
45
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
46
- return
47
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \
48
- [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)]
49
-
50
- if len(file_manifest) == 0:
51
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
52
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
53
- return
54
- yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/cpp/libJPG/jpge.cpp DELETED
@@ -1,1049 +0,0 @@
1
- // jpge.cpp - C++ class for JPEG compression.
2
- // Public domain, Rich Geldreich <[email protected]>
3
- // v1.01, Dec. 18, 2010 - Initial release
4
- // v1.02, Apr. 6, 2011 - Removed 2x2 ordered dither in H2V1 chroma subsampling method load_block_16_8_8(). (The rounding factor was 2, when it should have been 1. Either way, it wasn't helping.)
5
- // v1.03, Apr. 16, 2011 - Added support for optimized Huffman code tables, optimized dynamic memory allocation down to only 1 alloc.
6
- // Also from Alex Evans: Added RGBA support, linear memory allocator (no longer needed in v1.03).
7
- // v1.04, May. 19, 2012: Forgot to set m_pFile ptr to NULL in cfile_stream::close(). Thanks to Owen Kaluza for reporting this bug.
8
- // Code tweaks to fix VS2008 static code analysis warnings (all looked harmless).
9
- // Code review revealed method load_block_16_8_8() (used for the non-default H2V1 sampling mode to downsample chroma) somehow didn't get the rounding factor fix from v1.02.
10
-
11
- #include "jpge.h"
12
-
13
- #include <stdlib.h>
14
- #include <string.h>
15
- #if PLATFORM_WINDOWS
16
- #include <malloc.h>
17
- #endif
18
-
19
- #define JPGE_MAX(a,b) (((a)>(b))?(a):(b))
20
- #define JPGE_MIN(a,b) (((a)<(b))?(a):(b))
21
-
22
- namespace jpge {
23
-
24
- static inline void *jpge_malloc(size_t nSize) { return FMemory::Malloc(nSize); }
25
- static inline void jpge_free(void *p) { FMemory::Free(p);; }
26
-
27
- // Various JPEG enums and tables.
28
- enum { M_SOF0 = 0xC0, M_DHT = 0xC4, M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_APP0 = 0xE0 };
29
- enum { DC_LUM_CODES = 12, AC_LUM_CODES = 256, DC_CHROMA_CODES = 12, AC_CHROMA_CODES = 256, MAX_HUFF_SYMBOLS = 257, MAX_HUFF_CODESIZE = 32 };
30
-
31
- static uint8 s_zag[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 };
32
- static int16 s_std_lum_quant[64] = { 16,11,12,14,12,10,16,14,13,14,18,17,16,19,24,40,26,24,22,22,24,49,35,37,29,40,58,51,61,60,57,51,56,55,64,72,92,78,64,68,87,69,55,56,80,109,81,87,95,98,103,104,103,62,77,113,121,112,100,120,92,101,103,99 };
33
- static int16 s_std_croma_quant[64] = { 17,18,18,24,21,24,47,26,26,47,99,66,56,66,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99 };
34
- static uint8 s_dc_lum_bits[17] = { 0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0 };
35
- static uint8 s_dc_lum_val[DC_LUM_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 };
36
- static uint8 s_ac_lum_bits[17] = { 0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d };
37
- static uint8 s_ac_lum_val[AC_LUM_CODES] =
38
- {
39
- 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08,0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0,
40
- 0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28,0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,
41
- 0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89,
42
- 0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,
43
- 0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,
44
- 0xf9,0xfa
45
- };
46
- static uint8 s_dc_chroma_bits[17] = { 0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0 };
47
- static uint8 s_dc_chroma_val[DC_CHROMA_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 };
48
- static uint8 s_ac_chroma_bits[17] = { 0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77 };
49
- static uint8 s_ac_chroma_val[AC_CHROMA_CODES] =
50
- {
51
- 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91,0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0,
52
- 0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26,0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,
53
- 0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87,
54
- 0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,
55
- 0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,
56
- 0xf9,0xfa
57
- };
58
-
59
- // Low-level helper functions.
60
- template <class T> inline void clear_obj(T &obj) { memset(&obj, 0, sizeof(obj)); }
61
-
62
- const int YR = 19595, YG = 38470, YB = 7471, CB_R = -11059, CB_G = -21709, CB_B = 32768, CR_R = 32768, CR_G = -27439, CR_B = -5329;
63
- static inline uint8 clamp(int i) { if (static_cast<uint>(i) > 255U) { if (i < 0) i = 0; else if (i > 255) i = 255; } return static_cast<uint8>(i); }
64
-
65
- static void RGB_to_YCC(uint8* pDst, const uint8 *pSrc, int num_pixels)
66
- {
67
- for ( ; num_pixels; pDst += 3, pSrc += 3, num_pixels--)
68
- {
69
- const int r = pSrc[0], g = pSrc[1], b = pSrc[2];
70
- pDst[0] = static_cast<uint8>((r * YR + g * YG + b * YB + 32768) >> 16);
71
- pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16));
72
- pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16));
73
- }
74
- }
75
-
76
- static void RGB_to_Y(uint8* pDst, const uint8 *pSrc, int num_pixels)
77
- {
78
- for ( ; num_pixels; pDst++, pSrc += 3, num_pixels--)
79
- pDst[0] = static_cast<uint8>((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16);
80
- }
81
-
82
- static void RGBA_to_YCC(uint8* pDst, const uint8 *pSrc, int num_pixels)
83
- {
84
- for ( ; num_pixels; pDst += 3, pSrc += 4, num_pixels--)
85
- {
86
- const int r = pSrc[0], g = pSrc[1], b = pSrc[2];
87
- pDst[0] = static_cast<uint8>((r * YR + g * YG + b * YB + 32768) >> 16);
88
- pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16));
89
- pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16));
90
- }
91
- }
92
-
93
- static void RGBA_to_Y(uint8* pDst, const uint8 *pSrc, int num_pixels)
94
- {
95
- for ( ; num_pixels; pDst++, pSrc += 4, num_pixels--)
96
- pDst[0] = static_cast<uint8>((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16);
97
- }
98
-
99
- static void Y_to_YCC(uint8* pDst, const uint8* pSrc, int num_pixels)
100
- {
101
- for( ; num_pixels; pDst += 3, pSrc++, num_pixels--) { pDst[0] = pSrc[0]; pDst[1] = 128; pDst[2] = 128; }
102
- }
103
-
104
- // Forward DCT - DCT derived from jfdctint.
105
- #define CONST_BITS 13
106
- #define ROW_BITS 2
107
- #define DCT_DESCALE(x, n) (((x) + (((int32)1) << ((n) - 1))) >> (n))
108
- #define DCT_MUL(var, c) (static_cast<int16>(var) * static_cast<int32>(c))
109
- #define DCT1D(s0, s1, s2, s3, s4, s5, s6, s7) \
110
- int32 t0 = s0 + s7, t7 = s0 - s7, t1 = s1 + s6, t6 = s1 - s6, t2 = s2 + s5, t5 = s2 - s5, t3 = s3 + s4, t4 = s3 - s4; \
111
- int32 t10 = t0 + t3, t13 = t0 - t3, t11 = t1 + t2, t12 = t1 - t2; \
112
- int32 u1 = DCT_MUL(t12 + t13, 4433); \
113
- s2 = u1 + DCT_MUL(t13, 6270); \
114
- s6 = u1 + DCT_MUL(t12, -15137); \
115
- u1 = t4 + t7; \
116
- int32 u2 = t5 + t6, u3 = t4 + t6, u4 = t5 + t7; \
117
- int32 z5 = DCT_MUL(u3 + u4, 9633); \
118
- t4 = DCT_MUL(t4, 2446); t5 = DCT_MUL(t5, 16819); \
119
- t6 = DCT_MUL(t6, 25172); t7 = DCT_MUL(t7, 12299); \
120
- u1 = DCT_MUL(u1, -7373); u2 = DCT_MUL(u2, -20995); \
121
- u3 = DCT_MUL(u3, -16069); u4 = DCT_MUL(u4, -3196); \
122
- u3 += z5; u4 += z5; \
123
- s0 = t10 + t11; s1 = t7 + u1 + u4; s3 = t6 + u2 + u3; s4 = t10 - t11; s5 = t5 + u2 + u4; s7 = t4 + u1 + u3;
124
-
125
- static void DCT2D(int32 *p)
126
- {
127
- int32 c, *q = p;
128
- for (c = 7; c >= 0; c--, q += 8)
129
- {
130
- int32 s0 = q[0], s1 = q[1], s2 = q[2], s3 = q[3], s4 = q[4], s5 = q[5], s6 = q[6], s7 = q[7];
131
- DCT1D(s0, s1, s2, s3, s4, s5, s6, s7);
132
- q[0] = s0 << ROW_BITS; q[1] = DCT_DESCALE(s1, CONST_BITS-ROW_BITS); q[2] = DCT_DESCALE(s2, CONST_BITS-ROW_BITS); q[3] = DCT_DESCALE(s3, CONST_BITS-ROW_BITS);
133
- q[4] = s4 << ROW_BITS; q[5] = DCT_DESCALE(s5, CONST_BITS-ROW_BITS); q[6] = DCT_DESCALE(s6, CONST_BITS-ROW_BITS); q[7] = DCT_DESCALE(s7, CONST_BITS-ROW_BITS);
134
- }
135
- for (q = p, c = 7; c >= 0; c--, q++)
136
- {
137
- int32 s0 = q[0*8], s1 = q[1*8], s2 = q[2*8], s3 = q[3*8], s4 = q[4*8], s5 = q[5*8], s6 = q[6*8], s7 = q[7*8];
138
- DCT1D(s0, s1, s2, s3, s4, s5, s6, s7);
139
- q[0*8] = DCT_DESCALE(s0, ROW_BITS+3); q[1*8] = DCT_DESCALE(s1, CONST_BITS+ROW_BITS+3); q[2*8] = DCT_DESCALE(s2, CONST_BITS+ROW_BITS+3); q[3*8] = DCT_DESCALE(s3, CONST_BITS+ROW_BITS+3);
140
- q[4*8] = DCT_DESCALE(s4, ROW_BITS+3); q[5*8] = DCT_DESCALE(s5, CONST_BITS+ROW_BITS+3); q[6*8] = DCT_DESCALE(s6, CONST_BITS+ROW_BITS+3); q[7*8] = DCT_DESCALE(s7, CONST_BITS+ROW_BITS+3);
141
- }
142
- }
143
-
144
- struct sym_freq { uint m_key, m_sym_index; };
145
-
146
- // Radix sorts sym_freq[] array by 32-bit key m_key. Returns ptr to sorted values.
147
- static inline sym_freq* radix_sort_syms(uint num_syms, sym_freq* pSyms0, sym_freq* pSyms1)
148
- {
149
- const uint cMaxPasses = 4;
150
- uint32 hist[256 * cMaxPasses]; clear_obj(hist);
151
- for (uint i = 0; i < num_syms; i++) { uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; hist[256*2 + ((freq >> 16) & 0xFF)]++; hist[256*3 + ((freq >> 24) & 0xFF)]++; }
152
- sym_freq* pCur_syms = pSyms0, *pNew_syms = pSyms1;
153
- uint total_passes = cMaxPasses; while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--;
154
- for (uint pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8)
155
- {
156
- const uint32* pHist = &hist[pass << 8];
157
- uint offsets[256], cur_ofs = 0;
158
- for (uint i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; }
159
- for (uint i = 0; i < num_syms; i++)
160
- pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i];
161
- sym_freq* t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t;
162
- }
163
- return pCur_syms;
164
- }
165
-
166
- // calculate_minimum_redundancy() originally written by: Alistair Moffat, [email protected], Jyrki Katajainen, [email protected], November 1996.
167
- static void calculate_minimum_redundancy(sym_freq *A, int n)
168
- {
169
- int root, leaf, next, avbl, used, dpth;
170
- if (n==0) return; else if (n==1) { A[0].m_key = 1; return; }
171
- A[0].m_key += A[1].m_key; root = 0; leaf = 2;
172
- for (next=1; next < n-1; next++)
173
- {
174
- if (leaf>=n || A[root].m_key<A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = next; } else A[next].m_key = A[leaf++].m_key;
175
- if (leaf>=n || (root<next && A[root].m_key<A[leaf].m_key)) { A[next].m_key += A[root].m_key; A[root++].m_key = next; } else A[next].m_key += A[leaf++].m_key;
176
- }
177
- A[n-2].m_key = 0;
178
- for (next=n-3; next>=0; next--) A[next].m_key = A[A[next].m_key].m_key+1;
179
- avbl = 1; used = dpth = 0; root = n-2; next = n-1;
180
- while (avbl>0)
181
- {
182
- while (root>=0 && (int)A[root].m_key==dpth) { used++; root--; }
183
- while (avbl>used) { A[next--].m_key = dpth; avbl--; }
184
- avbl = 2*used; dpth++; used = 0;
185
- }
186
- }
187
-
188
- // Limits canonical Huffman code table's max code size to max_code_size.
189
- static void huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size)
190
- {
191
- if (code_list_len <= 1) return;
192
-
193
- for (int i = max_code_size + 1; i <= MAX_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i];
194
-
195
- uint32 total = 0;
196
- for (int i = max_code_size; i > 0; i--)
197
- total += (((uint32)pNum_codes[i]) << (max_code_size - i));
198
-
199
- while (total != (1UL << max_code_size))
200
- {
201
- pNum_codes[max_code_size]--;
202
- for (int i = max_code_size - 1; i > 0; i--)
203
- {
204
- if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; }
205
- }
206
- total--;
207
- }
208
- }
209
-
210
- // Generates an optimized offman table.
211
- void jpeg_encoder::optimize_huffman_table(int table_num, int table_len)
212
- {
213
- sym_freq syms0[MAX_HUFF_SYMBOLS], syms1[MAX_HUFF_SYMBOLS];
214
- syms0[0].m_key = 1; syms0[0].m_sym_index = 0; // dummy symbol, assures that no valid code contains all 1's
215
- int num_used_syms = 1;
216
- const uint32 *pSym_count = &m_huff_count[table_num][0];
217
- for (int i = 0; i < table_len; i++)
218
- if (pSym_count[i]) { syms0[num_used_syms].m_key = pSym_count[i]; syms0[num_used_syms++].m_sym_index = i + 1; }
219
- sym_freq* pSyms = radix_sort_syms(num_used_syms, syms0, syms1);
220
- calculate_minimum_redundancy(pSyms, num_used_syms);
221
-
222
- // Count the # of symbols of each code size.
223
- int num_codes[1 + MAX_HUFF_CODESIZE]; clear_obj(num_codes);
224
- for (int i = 0; i < num_used_syms; i++)
225
- num_codes[pSyms[i].m_key]++;
226
-
227
- const uint JPGE_CODE_SIZE_LIMIT = 16; // the maximum possible size of a JPEG Huffman code (valid range is [9,16] - 9 vs. 8 because of the dummy symbol)
228
- huffman_enforce_max_code_size(num_codes, num_used_syms, JPGE_CODE_SIZE_LIMIT);
229
-
230
- // Compute m_huff_bits array, which contains the # of symbols per code size.
231
- clear_obj(m_huff_bits[table_num]);
232
- for (int i = 1; i <= (int)JPGE_CODE_SIZE_LIMIT; i++)
233
- m_huff_bits[table_num][i] = static_cast<uint8>(num_codes[i]);
234
-
235
- // Remove the dummy symbol added above, which must be in largest bucket.
236
- for (int i = JPGE_CODE_SIZE_LIMIT; i >= 1; i--)
237
- {
238
- if (m_huff_bits[table_num][i]) { m_huff_bits[table_num][i]--; break; }
239
- }
240
-
241
- // Compute the m_huff_val array, which contains the symbol indices sorted by code size (smallest to largest).
242
- for (int i = num_used_syms - 1; i >= 1; i--)
243
- m_huff_val[table_num][num_used_syms - 1 - i] = static_cast<uint8>(pSyms[i].m_sym_index - 1);
244
- }
245
-
246
- // JPEG marker generation.
247
- void jpeg_encoder::emit_byte(uint8 i)
248
- {
249
- m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_obj(i);
250
- }
251
-
252
- void jpeg_encoder::emit_word(uint i)
253
- {
254
- emit_byte(uint8(i >> 8)); emit_byte(uint8(i & 0xFF));
255
- }
256
-
257
- void jpeg_encoder::emit_marker(int marker)
258
- {
259
- emit_byte(uint8(0xFF)); emit_byte(uint8(marker));
260
- }
261
-
262
- // Emit JFIF marker
263
- void jpeg_encoder::emit_jfif_app0()
264
- {
265
- emit_marker(M_APP0);
266
- emit_word(2 + 4 + 1 + 2 + 1 + 2 + 2 + 1 + 1);
267
- emit_byte(0x4A); emit_byte(0x46); emit_byte(0x49); emit_byte(0x46); /* Identifier: ASCII "JFIF" */
268
- emit_byte(0);
269
- emit_byte(1); /* Major version */
270
- emit_byte(1); /* Minor version */
271
- emit_byte(0); /* Density unit */
272
- emit_word(1);
273
- emit_word(1);
274
- emit_byte(0); /* No thumbnail image */
275
- emit_byte(0);
276
- }
277
-
278
- // Emit quantization tables
279
- void jpeg_encoder::emit_dqt()
280
- {
281
- for (int i = 0; i < ((m_num_components == 3) ? 2 : 1); i++)
282
- {
283
- emit_marker(M_DQT);
284
- emit_word(64 + 1 + 2);
285
- emit_byte(static_cast<uint8>(i));
286
- for (int j = 0; j < 64; j++)
287
- emit_byte(static_cast<uint8>(m_quantization_tables[i][j]));
288
- }
289
- }
290
-
291
- // Emit start of frame marker
292
- void jpeg_encoder::emit_sof()
293
- {
294
- emit_marker(M_SOF0); /* baseline */
295
- emit_word(3 * m_num_components + 2 + 5 + 1);
296
- emit_byte(8); /* precision */
297
- emit_word(m_image_y);
298
- emit_word(m_image_x);
299
- emit_byte(m_num_components);
300
- for (int i = 0; i < m_num_components; i++)
301
- {
302
- emit_byte(static_cast<uint8>(i + 1)); /* component ID */
303
- emit_byte((m_comp_h_samp[i] << 4) + m_comp_v_samp[i]); /* h and v sampling */
304
- emit_byte(i > 0); /* quant. table num */
305
- }
306
- }
307
-
308
- // Emit Huffman table.
309
- void jpeg_encoder::emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag)
310
- {
311
- emit_marker(M_DHT);
312
-
313
- int length = 0;
314
- for (int i = 1; i <= 16; i++)
315
- length += bits[i];
316
-
317
- emit_word(length + 2 + 1 + 16);
318
- emit_byte(static_cast<uint8>(index + (ac_flag << 4)));
319
-
320
- for (int i = 1; i <= 16; i++)
321
- emit_byte(bits[i]);
322
-
323
- for (int i = 0; i < length; i++)
324
- emit_byte(val[i]);
325
- }
326
-
327
- // Emit all Huffman tables.
328
- void jpeg_encoder::emit_dhts()
329
- {
330
- emit_dht(m_huff_bits[0+0], m_huff_val[0+0], 0, false);
331
- emit_dht(m_huff_bits[2+0], m_huff_val[2+0], 0, true);
332
- if (m_num_components == 3)
333
- {
334
- emit_dht(m_huff_bits[0+1], m_huff_val[0+1], 1, false);
335
- emit_dht(m_huff_bits[2+1], m_huff_val[2+1], 1, true);
336
- }
337
- }
338
-
339
- // emit start of scan
340
- void jpeg_encoder::emit_sos()
341
- {
342
- emit_marker(M_SOS);
343
- emit_word(2 * m_num_components + 2 + 1 + 3);
344
- emit_byte(m_num_components);
345
- for (int i = 0; i < m_num_components; i++)
346
- {
347
- emit_byte(static_cast<uint8>(i + 1));
348
- if (i == 0)
349
- emit_byte((0 << 4) + 0);
350
- else
351
- emit_byte((1 << 4) + 1);
352
- }
353
- emit_byte(0); /* spectral selection */
354
- emit_byte(63);
355
- emit_byte(0);
356
- }
357
-
358
- // Emit all markers at beginning of image file.
359
- void jpeg_encoder::emit_markers()
360
- {
361
- emit_marker(M_SOI);
362
- emit_jfif_app0();
363
- emit_dqt();
364
- emit_sof();
365
- emit_dhts();
366
- emit_sos();
367
- }
368
-
369
- // Compute the actual canonical Huffman codes/code sizes given the JPEG huff bits and val arrays.
370
- void jpeg_encoder::compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val)
371
- {
372
- int i, l, last_p, si;
373
- uint8 huff_size[257];
374
- uint huff_code[257];
375
- uint code;
376
-
377
- int p = 0;
378
- for (l = 1; l <= 16; l++)
379
- for (i = 1; i <= bits[l]; i++)
380
- huff_size[p++] = (char)l;
381
-
382
- huff_size[p] = 0; last_p = p; // write sentinel
383
-
384
- code = 0; si = huff_size[0]; p = 0;
385
-
386
- while (huff_size[p])
387
- {
388
- while (huff_size[p] == si)
389
- huff_code[p++] = code++;
390
- code <<= 1;
391
- si++;
392
- }
393
-
394
- memset(codes, 0, sizeof(codes[0])*256);
395
- memset(code_sizes, 0, sizeof(code_sizes[0])*256);
396
- for (p = 0; p < last_p; p++)
397
- {
398
- codes[val[p]] = huff_code[p];
399
- code_sizes[val[p]] = huff_size[p];
400
- }
401
- }
402
-
403
- // Quantization table generation.
404
- void jpeg_encoder::compute_quant_table(int32 *pDst, int16 *pSrc)
405
- {
406
- int32 q;
407
- if (m_params.m_quality < 50)
408
- q = 5000 / m_params.m_quality;
409
- else
410
- q = 200 - m_params.m_quality * 2;
411
- for (int i = 0; i < 64; i++)
412
- {
413
- int32 j = *pSrc++; j = (j * q + 50L) / 100L;
414
- *pDst++ = JPGE_MIN(JPGE_MAX(j, 1), 255);
415
- }
416
- }
417
-
418
- // Higher-level methods.
419
- void jpeg_encoder::first_pass_init()
420
- {
421
- m_bit_buffer = 0; m_bits_in = 0;
422
- memset(m_last_dc_val, 0, 3 * sizeof(m_last_dc_val[0]));
423
- m_mcu_y_ofs = 0;
424
- m_pass_num = 1;
425
- }
426
-
427
- bool jpeg_encoder::second_pass_init()
428
- {
429
- compute_huffman_table(&m_huff_codes[0+0][0], &m_huff_code_sizes[0+0][0], m_huff_bits[0+0], m_huff_val[0+0]);
430
- compute_huffman_table(&m_huff_codes[2+0][0], &m_huff_code_sizes[2+0][0], m_huff_bits[2+0], m_huff_val[2+0]);
431
- if (m_num_components > 1)
432
- {
433
- compute_huffman_table(&m_huff_codes[0+1][0], &m_huff_code_sizes[0+1][0], m_huff_bits[0+1], m_huff_val[0+1]);
434
- compute_huffman_table(&m_huff_codes[2+1][0], &m_huff_code_sizes[2+1][0], m_huff_bits[2+1], m_huff_val[2+1]);
435
- }
436
- first_pass_init();
437
- emit_markers();
438
- m_pass_num = 2;
439
- return true;
440
- }
441
-
442
- bool jpeg_encoder::jpg_open(int p_x_res, int p_y_res, int src_channels)
443
- {
444
- m_num_components = 3;
445
- switch (m_params.m_subsampling)
446
- {
447
- case Y_ONLY:
448
- {
449
- m_num_components = 1;
450
- m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1;
451
- m_mcu_x = 8; m_mcu_y = 8;
452
- break;
453
- }
454
- case H1V1:
455
- {
456
- m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1;
457
- m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
458
- m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
459
- m_mcu_x = 8; m_mcu_y = 8;
460
- break;
461
- }
462
- case H2V1:
463
- {
464
- m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 1;
465
- m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
466
- m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
467
- m_mcu_x = 16; m_mcu_y = 8;
468
- break;
469
- }
470
- case H2V2:
471
- {
472
- m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 2;
473
- m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
474
- m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
475
- m_mcu_x = 16; m_mcu_y = 16;
476
- }
477
- }
478
-
479
- m_image_x = p_x_res; m_image_y = p_y_res;
480
- m_image_bpp = src_channels;
481
- m_image_bpl = m_image_x * src_channels;
482
- m_image_x_mcu = (m_image_x + m_mcu_x - 1) & (~(m_mcu_x - 1));
483
- m_image_y_mcu = (m_image_y + m_mcu_y - 1) & (~(m_mcu_y - 1));
484
- m_image_bpl_xlt = m_image_x * m_num_components;
485
- m_image_bpl_mcu = m_image_x_mcu * m_num_components;
486
- m_mcus_per_row = m_image_x_mcu / m_mcu_x;
487
-
488
- if ((m_mcu_lines[0] = static_cast<uint8*>(jpge_malloc(m_image_bpl_mcu * m_mcu_y))) == NULL) return false;
489
- for (int i = 1; i < m_mcu_y; i++)
490
- m_mcu_lines[i] = m_mcu_lines[i-1] + m_image_bpl_mcu;
491
-
492
- compute_quant_table(m_quantization_tables[0], s_std_lum_quant);
493
- compute_quant_table(m_quantization_tables[1], m_params.m_no_chroma_discrim_flag ? s_std_lum_quant : s_std_croma_quant);
494
-
495
- m_out_buf_left = JPGE_OUT_BUF_SIZE;
496
- m_pOut_buf = m_out_buf;
497
-
498
- if (m_params.m_two_pass_flag)
499
- {
500
- clear_obj(m_huff_count);
501
- first_pass_init();
502
- }
503
- else
504
- {
505
- memcpy(m_huff_bits[0+0], s_dc_lum_bits, 17); memcpy(m_huff_val [0+0], s_dc_lum_val, DC_LUM_CODES);
506
- memcpy(m_huff_bits[2+0], s_ac_lum_bits, 17); memcpy(m_huff_val [2+0], s_ac_lum_val, AC_LUM_CODES);
507
- memcpy(m_huff_bits[0+1], s_dc_chroma_bits, 17); memcpy(m_huff_val [0+1], s_dc_chroma_val, DC_CHROMA_CODES);
508
- memcpy(m_huff_bits[2+1], s_ac_chroma_bits, 17); memcpy(m_huff_val [2+1], s_ac_chroma_val, AC_CHROMA_CODES);
509
- if (!second_pass_init()) return false; // in effect, skip over the first pass
510
- }
511
- return m_all_stream_writes_succeeded;
512
- }
513
-
514
- void jpeg_encoder::load_block_8_8_grey(int x)
515
- {
516
- uint8 *pSrc;
517
- sample_array_t *pDst = m_sample_array;
518
- x <<= 3;
519
- for (int i = 0; i < 8; i++, pDst += 8)
520
- {
521
- pSrc = m_mcu_lines[i] + x;
522
- pDst[0] = pSrc[0] - 128; pDst[1] = pSrc[1] - 128; pDst[2] = pSrc[2] - 128; pDst[3] = pSrc[3] - 128;
523
- pDst[4] = pSrc[4] - 128; pDst[5] = pSrc[5] - 128; pDst[6] = pSrc[6] - 128; pDst[7] = pSrc[7] - 128;
524
- }
525
- }
526
-
527
- void jpeg_encoder::load_block_8_8(int x, int y, int c)
528
- {
529
- uint8 *pSrc;
530
- sample_array_t *pDst = m_sample_array;
531
- x = (x * (8 * 3)) + c;
532
- y <<= 3;
533
- for (int i = 0; i < 8; i++, pDst += 8)
534
- {
535
- pSrc = m_mcu_lines[y + i] + x;
536
- pDst[0] = pSrc[0 * 3] - 128; pDst[1] = pSrc[1 * 3] - 128; pDst[2] = pSrc[2 * 3] - 128; pDst[3] = pSrc[3 * 3] - 128;
537
- pDst[4] = pSrc[4 * 3] - 128; pDst[5] = pSrc[5 * 3] - 128; pDst[6] = pSrc[6 * 3] - 128; pDst[7] = pSrc[7 * 3] - 128;
538
- }
539
- }
540
-
541
- void jpeg_encoder::load_block_16_8(int x, int c)
542
- {
543
- uint8 *pSrc1, *pSrc2;
544
- sample_array_t *pDst = m_sample_array;
545
- x = (x * (16 * 3)) + c;
546
- int a = 0, b = 2;
547
- for (int i = 0; i < 16; i += 2, pDst += 8)
548
- {
549
- pSrc1 = m_mcu_lines[i + 0] + x;
550
- pSrc2 = m_mcu_lines[i + 1] + x;
551
- pDst[0] = ((pSrc1[ 0 * 3] + pSrc1[ 1 * 3] + pSrc2[ 0 * 3] + pSrc2[ 1 * 3] + a) >> 2) - 128; pDst[1] = ((pSrc1[ 2 * 3] + pSrc1[ 3 * 3] + pSrc2[ 2 * 3] + pSrc2[ 3 * 3] + b) >> 2) - 128;
552
- pDst[2] = ((pSrc1[ 4 * 3] + pSrc1[ 5 * 3] + pSrc2[ 4 * 3] + pSrc2[ 5 * 3] + a) >> 2) - 128; pDst[3] = ((pSrc1[ 6 * 3] + pSrc1[ 7 * 3] + pSrc2[ 6 * 3] + pSrc2[ 7 * 3] + b) >> 2) - 128;
553
- pDst[4] = ((pSrc1[ 8 * 3] + pSrc1[ 9 * 3] + pSrc2[ 8 * 3] + pSrc2[ 9 * 3] + a) >> 2) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3] + pSrc2[10 * 3] + pSrc2[11 * 3] + b) >> 2) - 128;
554
- pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3] + pSrc2[12 * 3] + pSrc2[13 * 3] + a) >> 2) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3] + pSrc2[14 * 3] + pSrc2[15 * 3] + b) >> 2) - 128;
555
- int temp = a; a = b; b = temp;
556
- }
557
- }
558
-
559
- void jpeg_encoder::load_block_16_8_8(int x, int c)
560
- {
561
- uint8 *pSrc1;
562
- sample_array_t *pDst = m_sample_array;
563
- x = (x * (16 * 3)) + c;
564
- for (int i = 0; i < 8; i++, pDst += 8)
565
- {
566
- pSrc1 = m_mcu_lines[i + 0] + x;
567
- pDst[0] = ((pSrc1[ 0 * 3] + pSrc1[ 1 * 3]) >> 1) - 128; pDst[1] = ((pSrc1[ 2 * 3] + pSrc1[ 3 * 3]) >> 1) - 128;
568
- pDst[2] = ((pSrc1[ 4 * 3] + pSrc1[ 5 * 3]) >> 1) - 128; pDst[3] = ((pSrc1[ 6 * 3] + pSrc1[ 7 * 3]) >> 1) - 128;
569
- pDst[4] = ((pSrc1[ 8 * 3] + pSrc1[ 9 * 3]) >> 1) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3]) >> 1) - 128;
570
- pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3]) >> 1) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3]) >> 1) - 128;
571
- }
572
- }
573
-
574
- void jpeg_encoder::load_quantized_coefficients(int component_num)
575
- {
576
- int32 *q = m_quantization_tables[component_num > 0];
577
- int16 *pDst = m_coefficient_array;
578
- for (int i = 0; i < 64; i++)
579
- {
580
- sample_array_t j = m_sample_array[s_zag[i]];
581
- if (j < 0)
582
- {
583
- if ((j = -j + (*q >> 1)) < *q)
584
- *pDst++ = 0;
585
- else
586
- *pDst++ = static_cast<int16>(-(j / *q));
587
- }
588
- else
589
- {
590
- if ((j = j + (*q >> 1)) < *q)
591
- *pDst++ = 0;
592
- else
593
- *pDst++ = static_cast<int16>((j / *q));
594
- }
595
- q++;
596
- }
597
- }
598
-
599
- void jpeg_encoder::flush_output_buffer()
600
- {
601
- if (m_out_buf_left != JPGE_OUT_BUF_SIZE)
602
- m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_buf(m_out_buf, JPGE_OUT_BUF_SIZE - m_out_buf_left);
603
- m_pOut_buf = m_out_buf;
604
- m_out_buf_left = JPGE_OUT_BUF_SIZE;
605
- }
606
-
607
- void jpeg_encoder::put_bits(uint bits, uint len)
608
- {
609
- m_bit_buffer |= ((uint32)bits << (24 - (m_bits_in += len)));
610
- while (m_bits_in >= 8)
611
- {
612
- uint8 c;
613
- #define JPGE_PUT_BYTE(c) { *m_pOut_buf++ = (c); if (--m_out_buf_left == 0) flush_output_buffer(); }
614
- JPGE_PUT_BYTE(c = (uint8)((m_bit_buffer >> 16) & 0xFF));
615
- if (c == 0xFF) JPGE_PUT_BYTE(0);
616
- m_bit_buffer <<= 8;
617
- m_bits_in -= 8;
618
- }
619
- }
620
-
621
- void jpeg_encoder::code_coefficients_pass_one(int component_num)
622
- {
623
- if (component_num >= 3) return; // just to shut up static analysis
624
- int i, run_len, nbits, temp1;
625
- int16 *src = m_coefficient_array;
626
- uint32 *dc_count = component_num ? m_huff_count[0 + 1] : m_huff_count[0 + 0], *ac_count = component_num ? m_huff_count[2 + 1] : m_huff_count[2 + 0];
627
-
628
- temp1 = src[0] - m_last_dc_val[component_num];
629
- m_last_dc_val[component_num] = src[0];
630
- if (temp1 < 0) temp1 = -temp1;
631
-
632
- nbits = 0;
633
- while (temp1)
634
- {
635
- nbits++; temp1 >>= 1;
636
- }
637
-
638
- dc_count[nbits]++;
639
- for (run_len = 0, i = 1; i < 64; i++)
640
- {
641
- if ((temp1 = m_coefficient_array[i]) == 0)
642
- run_len++;
643
- else
644
- {
645
- while (run_len >= 16)
646
- {
647
- ac_count[0xF0]++;
648
- run_len -= 16;
649
- }
650
- if (temp1 < 0) temp1 = -temp1;
651
- nbits = 1;
652
- while (temp1 >>= 1) nbits++;
653
- ac_count[(run_len << 4) + nbits]++;
654
- run_len = 0;
655
- }
656
- }
657
- if (run_len) ac_count[0]++;
658
- }
659
-
660
- void jpeg_encoder::code_coefficients_pass_two(int component_num)
661
- {
662
- int i, j, run_len, nbits, temp1, temp2;
663
- int16 *pSrc = m_coefficient_array;
664
- uint *codes[2];
665
- uint8 *code_sizes[2];
666
-
667
- if (component_num == 0)
668
- {
669
- codes[0] = m_huff_codes[0 + 0]; codes[1] = m_huff_codes[2 + 0];
670
- code_sizes[0] = m_huff_code_sizes[0 + 0]; code_sizes[1] = m_huff_code_sizes[2 + 0];
671
- }
672
- else
673
- {
674
- codes[0] = m_huff_codes[0 + 1]; codes[1] = m_huff_codes[2 + 1];
675
- code_sizes[0] = m_huff_code_sizes[0 + 1]; code_sizes[1] = m_huff_code_sizes[2 + 1];
676
- }
677
-
678
- temp1 = temp2 = pSrc[0] - m_last_dc_val[component_num];
679
- m_last_dc_val[component_num] = pSrc[0];
680
-
681
- if (temp1 < 0)
682
- {
683
- temp1 = -temp1; temp2--;
684
- }
685
-
686
- nbits = 0;
687
- while (temp1)
688
- {
689
- nbits++; temp1 >>= 1;
690
- }
691
-
692
- put_bits(codes[0][nbits], code_sizes[0][nbits]);
693
- if (nbits) put_bits(temp2 & ((1 << nbits) - 1), nbits);
694
-
695
- for (run_len = 0, i = 1; i < 64; i++)
696
- {
697
- if ((temp1 = m_coefficient_array[i]) == 0)
698
- run_len++;
699
- else
700
- {
701
- while (run_len >= 16)
702
- {
703
- put_bits(codes[1][0xF0], code_sizes[1][0xF0]);
704
- run_len -= 16;
705
- }
706
- if ((temp2 = temp1) < 0)
707
- {
708
- temp1 = -temp1;
709
- temp2--;
710
- }
711
- nbits = 1;
712
- while (temp1 >>= 1)
713
- nbits++;
714
- j = (run_len << 4) + nbits;
715
- put_bits(codes[1][j], code_sizes[1][j]);
716
- put_bits(temp2 & ((1 << nbits) - 1), nbits);
717
- run_len = 0;
718
- }
719
- }
720
- if (run_len)
721
- put_bits(codes[1][0], code_sizes[1][0]);
722
- }
723
-
724
- void jpeg_encoder::code_block(int component_num)
725
- {
726
- DCT2D(m_sample_array);
727
- load_quantized_coefficients(component_num);
728
- if (m_pass_num == 1)
729
- code_coefficients_pass_one(component_num);
730
- else
731
- code_coefficients_pass_two(component_num);
732
- }
733
-
734
- void jpeg_encoder::process_mcu_row()
735
- {
736
- if (m_num_components == 1)
737
- {
738
- for (int i = 0; i < m_mcus_per_row; i++)
739
- {
740
- load_block_8_8_grey(i); code_block(0);
741
- }
742
- }
743
- else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1))
744
- {
745
- for (int i = 0; i < m_mcus_per_row; i++)
746
- {
747
- load_block_8_8(i, 0, 0); code_block(0); load_block_8_8(i, 0, 1); code_block(1); load_block_8_8(i, 0, 2); code_block(2);
748
- }
749
- }
750
- else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1))
751
- {
752
- for (int i = 0; i < m_mcus_per_row; i++)
753
- {
754
- load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0);
755
- load_block_16_8_8(i, 1); code_block(1); load_block_16_8_8(i, 2); code_block(2);
756
- }
757
- }
758
- else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2))
759
- {
760
- for (int i = 0; i < m_mcus_per_row; i++)
761
- {
762
- load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0);
763
- load_block_8_8(i * 2 + 0, 1, 0); code_block(0); load_block_8_8(i * 2 + 1, 1, 0); code_block(0);
764
- load_block_16_8(i, 1); code_block(1); load_block_16_8(i, 2); code_block(2);
765
- }
766
- }
767
- }
768
-
769
- bool jpeg_encoder::terminate_pass_one()
770
- {
771
- optimize_huffman_table(0+0, DC_LUM_CODES); optimize_huffman_table(2+0, AC_LUM_CODES);
772
- if (m_num_components > 1)
773
- {
774
- optimize_huffman_table(0+1, DC_CHROMA_CODES); optimize_huffman_table(2+1, AC_CHROMA_CODES);
775
- }
776
- return second_pass_init();
777
- }
778
-
779
- bool jpeg_encoder::terminate_pass_two()
780
- {
781
- put_bits(0x7F, 7);
782
- flush_output_buffer();
783
- emit_marker(M_EOI);
784
- m_pass_num++; // purposely bump up m_pass_num, for debugging
785
- return true;
786
- }
787
-
788
- bool jpeg_encoder::process_end_of_image()
789
- {
790
- if (m_mcu_y_ofs)
791
- {
792
- if (m_mcu_y_ofs < 16) // check here just to shut up static analysis
793
- {
794
- for (int i = m_mcu_y_ofs; i < m_mcu_y; i++)
795
- memcpy(m_mcu_lines[i], m_mcu_lines[m_mcu_y_ofs - 1], m_image_bpl_mcu);
796
- }
797
-
798
- process_mcu_row();
799
- }
800
-
801
- if (m_pass_num == 1)
802
- return terminate_pass_one();
803
- else
804
- return terminate_pass_two();
805
- }
806
-
807
- void jpeg_encoder::load_mcu(const void *pSrc)
808
- {
809
- const uint8* Psrc = reinterpret_cast<const uint8*>(pSrc);
810
-
811
- uint8* pDst = m_mcu_lines[m_mcu_y_ofs]; // OK to write up to m_image_bpl_xlt bytes to pDst
812
-
813
- if (m_num_components == 1)
814
- {
815
- if (m_image_bpp == 4)
816
- RGBA_to_Y(pDst, Psrc, m_image_x);
817
- else if (m_image_bpp == 3)
818
- RGB_to_Y(pDst, Psrc, m_image_x);
819
- else
820
- memcpy(pDst, Psrc, m_image_x);
821
- }
822
- else
823
- {
824
- if (m_image_bpp == 4)
825
- RGBA_to_YCC(pDst, Psrc, m_image_x);
826
- else if (m_image_bpp == 3)
827
- RGB_to_YCC(pDst, Psrc, m_image_x);
828
- else
829
- Y_to_YCC(pDst, Psrc, m_image_x);
830
- }
831
-
832
- // Possibly duplicate pixels at end of scanline if not a multiple of 8 or 16
833
- if (m_num_components == 1)
834
- memset(m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt, pDst[m_image_bpl_xlt - 1], m_image_x_mcu - m_image_x);
835
- else
836
- {
837
- const uint8 y = pDst[m_image_bpl_xlt - 3 + 0], cb = pDst[m_image_bpl_xlt - 3 + 1], cr = pDst[m_image_bpl_xlt - 3 + 2];
838
- uint8 *q = m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt;
839
- for (int i = m_image_x; i < m_image_x_mcu; i++)
840
- {
841
- *q++ = y; *q++ = cb; *q++ = cr;
842
- }
843
- }
844
-
845
- if (++m_mcu_y_ofs == m_mcu_y)
846
- {
847
- process_mcu_row();
848
- m_mcu_y_ofs = 0;
849
- }
850
- }
851
-
852
- void jpeg_encoder::clear()
853
- {
854
- m_mcu_lines[0] = NULL;
855
- m_pass_num = 0;
856
- m_all_stream_writes_succeeded = true;
857
- }
858
-
859
- jpeg_encoder::jpeg_encoder()
860
- {
861
- clear();
862
- }
863
-
864
- jpeg_encoder::~jpeg_encoder()
865
- {
866
- deinit();
867
- }
868
-
869
- bool jpeg_encoder::init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params)
870
- {
871
- deinit();
872
- if (((!pStream) || (width < 1) || (height < 1)) || ((src_channels != 1) && (src_channels != 3) && (src_channels != 4)) || (!comp_params.check_valid())) return false;
873
- m_pStream = pStream;
874
- m_params = comp_params;
875
- return jpg_open(width, height, src_channels);
876
- }
877
-
878
- void jpeg_encoder::deinit()
879
- {
880
- jpge_free(m_mcu_lines[0]);
881
- clear();
882
- }
883
-
884
- bool jpeg_encoder::process_scanline(const void* pScanline)
885
- {
886
- if ((m_pass_num < 1) || (m_pass_num > 2)) return false;
887
- if (m_all_stream_writes_succeeded)
888
- {
889
- if (!pScanline)
890
- {
891
- if (!process_end_of_image()) return false;
892
- }
893
- else
894
- {
895
- load_mcu(pScanline);
896
- }
897
- }
898
- return m_all_stream_writes_succeeded;
899
- }
900
-
901
- // Higher level wrappers/examples (optional).
902
- #include <stdio.h>
903
-
904
- class cfile_stream : public output_stream
905
- {
906
- cfile_stream(const cfile_stream &);
907
- cfile_stream &operator= (const cfile_stream &);
908
-
909
- FILE* m_pFile;
910
- bool m_bStatus;
911
-
912
- public:
913
- cfile_stream() : m_pFile(NULL), m_bStatus(false) { }
914
-
915
- virtual ~cfile_stream()
916
- {
917
- close();
918
- }
919
-
920
- bool open(const char *pFilename)
921
- {
922
- close();
923
- #if defined(_MSC_VER)
924
- if (fopen_s(&m_pFile, pFilename, "wb") != 0)
925
- {
926
- return false;
927
- }
928
- #else
929
- m_pFile = fopen(pFilename, "wb");
930
- #endif
931
- m_bStatus = (m_pFile != NULL);
932
- return m_bStatus;
933
- }
934
-
935
- bool close()
936
- {
937
- if (m_pFile)
938
- {
939
- if (fclose(m_pFile) == EOF)
940
- {
941
- m_bStatus = false;
942
- }
943
- m_pFile = NULL;
944
- }
945
- return m_bStatus;
946
- }
947
-
948
- virtual bool put_buf(const void* pBuf, int64_t len)
949
- {
950
- m_bStatus = m_bStatus && (fwrite(pBuf, len, 1, m_pFile) == 1);
951
- return m_bStatus;
952
- }
953
-
954
- uint get_size() const
955
- {
956
- return m_pFile ? ftell(m_pFile) : 0;
957
- }
958
- };
959
-
960
- // Writes JPEG image to file.
961
- bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params)
962
- {
963
- cfile_stream dst_stream;
964
- if (!dst_stream.open(pFilename))
965
- return false;
966
-
967
- jpge::jpeg_encoder dst_image;
968
- if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params))
969
- return false;
970
-
971
- for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++)
972
- {
973
- for (int64_t i = 0; i < height; i++)
974
- {
975
- // i, width, and num_channels are all 64bit
976
- const uint8* pBuf = pImage_data + i * width * num_channels;
977
- if (!dst_image.process_scanline(pBuf))
978
- return false;
979
- }
980
- if (!dst_image.process_scanline(NULL))
981
- return false;
982
- }
983
-
984
- dst_image.deinit();
985
-
986
- return dst_stream.close();
987
- }
988
-
989
- class memory_stream : public output_stream
990
- {
991
- memory_stream(const memory_stream &);
992
- memory_stream &operator= (const memory_stream &);
993
-
994
- uint8 *m_pBuf;
995
- uint64_t m_buf_size, m_buf_ofs;
996
-
997
- public:
998
- memory_stream(void *pBuf, uint64_t buf_size) : m_pBuf(static_cast<uint8*>(pBuf)), m_buf_size(buf_size), m_buf_ofs(0) { }
999
-
1000
- virtual ~memory_stream() { }
1001
-
1002
- virtual bool put_buf(const void* pBuf, int64_t len)
1003
- {
1004
- uint64_t buf_remaining = m_buf_size - m_buf_ofs;
1005
- if ((uint64_t)len > buf_remaining)
1006
- return false;
1007
- memcpy(m_pBuf + m_buf_ofs, pBuf, len);
1008
- m_buf_ofs += len;
1009
- return true;
1010
- }
1011
-
1012
- uint64_t get_size() const
1013
- {
1014
- return m_buf_ofs;
1015
- }
1016
- };
1017
-
1018
- bool compress_image_to_jpeg_file_in_memory(void *pDstBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params)
1019
- {
1020
- if ((!pDstBuf) || (!buf_size))
1021
- return false;
1022
-
1023
- memory_stream dst_stream(pDstBuf, buf_size);
1024
-
1025
- buf_size = 0;
1026
-
1027
- jpge::jpeg_encoder dst_image;
1028
- if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params))
1029
- return false;
1030
-
1031
- for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++)
1032
- {
1033
- for (int64_t i = 0; i < height; i++)
1034
- {
1035
- const uint8* pScanline = pImage_data + i * width * num_channels;
1036
- if (!dst_image.process_scanline(pScanline))
1037
- return false;
1038
- }
1039
- if (!dst_image.process_scanline(NULL))
1040
- return false;
1041
- }
1042
-
1043
- dst_image.deinit();
1044
-
1045
- buf_size = dst_stream.get_size();
1046
- return true;
1047
- }
1048
-
1049
- } // namespace jpge
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amon1/ChatGPTForAcadamic/crazy_functions/生成函数注释.py DELETED
@@ -1,57 +0,0 @@
1
- from predict import predict_no_ui
2
- from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
3
- fast_debug = False
4
-
5
-
6
- def 生成函数注释(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
7
- import time, glob, os
8
- print('begin analysis on:', file_manifest)
9
- for index, fp in enumerate(file_manifest):
10
- with open(fp, 'r', encoding='utf-8') as f:
11
- file_content = f.read()
12
-
13
- i_say = f'请对下面的程序文件做一个概述,并对文件中的所有函数生成注释,使用markdown表格输出结果,文件名是{os.path.relpath(fp, project_folder)},文件内容是 ```{file_content}```'
14
- i_say_show_user = f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述,并对文件中的所有函数生成注释: {os.path.abspath(fp)}'
15
- chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
16
- print('[1] yield chatbot, history')
17
- yield chatbot, history, '正常'
18
-
19
- if not fast_debug:
20
- msg = '正常'
21
- # ** gpt request **
22
- gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
23
-
24
- print('[2] end gpt req')
25
- chatbot[-1] = (i_say_show_user, gpt_say)
26
- history.append(i_say_show_user); history.append(gpt_say)
27
- print('[3] yield chatbot, history')
28
- yield chatbot, history, msg
29
- print('[4] next')
30
- if not fast_debug: time.sleep(2)
31
-
32
- if not fast_debug:
33
- res = write_results_to_file(history)
34
- chatbot.append(("完成了吗?", res))
35
- yield chatbot, history, msg
36
-
37
-
38
-
39
- @CatchException
40
- def 批量生成函数注释(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
41
- history = [] # 清空历史,以免输入溢出
42
- import glob, os
43
- if os.path.exists(txt):
44
- project_folder = txt
45
- else:
46
- if txt == "": txt = '空空如也的输入栏'
47
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
48
- yield chatbot, history, '正常'
49
- return
50
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \
51
- [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)]
52
-
53
- if len(file_manifest) == 0:
54
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
55
- yield chatbot, history, '正常'
56
- return
57
- yield from 生成函数注释(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/quicktour.md DELETED
@@ -1,314 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- [[open-in-colab]]
14
-
15
- # Quicktour
16
-
17
- Diffusion models are trained to denoise random Gaussian noise step-by-step to generate a sample of interest, such as an image or audio. This has sparked a tremendous amount of interest in generative AI, and you have probably seen examples of diffusion generated images on the internet. 🧨 Diffusers is a library aimed at making diffusion models widely accessible to everyone.
18
-
19
- Whether you're a developer or an everyday user, this quicktour will introduce you to 🧨 Diffusers and help you get up and generating quickly! There are three main components of the library to know about:
20
-
21
- * The [`DiffusionPipeline`] is a high-level end-to-end class designed to rapidly generate samples from pretrained diffusion models for inference.
22
- * Popular pretrained [model](./api/models) architectures and modules that can be used as building blocks for creating diffusion systems.
23
- * Many different [schedulers](./api/schedulers/overview) - algorithms that control how noise is added for training, and how to generate denoised images during inference.
24
-
25
- The quicktour will show you how to use the [`DiffusionPipeline`] for inference, and then walk you through how to combine a model and scheduler to replicate what's happening inside the [`DiffusionPipeline`].
26
-
27
- <Tip>
28
-
29
- The quicktour is a simplified version of the introductory 🧨 Diffusers [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb) to help you get started quickly. If you want to learn more about 🧨 Diffusers goal, design philosophy, and additional details about it's core API, check out the notebook!
30
-
31
- </Tip>
32
-
33
- Before you begin, make sure you have all the necessary libraries installed:
34
-
35
- ```py
36
- # uncomment to install the necessary libraries in Colab
37
- #!pip install --upgrade diffusers accelerate transformers
38
- ```
39
-
40
- - [🤗 Accelerate](https://huggingface.co/docs/accelerate/index) speeds up model loading for inference and training.
41
- - [🤗 Transformers](https://huggingface.co/docs/transformers/index) is required to run the most popular diffusion models, such as [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview).
42
-
43
- ## DiffusionPipeline
44
-
45
- The [`DiffusionPipeline`] is the easiest way to use a pretrained diffusion system for inference. It is an end-to-end system containing the model and the scheduler. You can use the [`DiffusionPipeline`] out-of-the-box for many tasks. Take a look at the table below for some supported tasks, and for a complete list of supported tasks, check out the [🧨 Diffusers Summary](./api/pipelines/overview#diffusers-summary) table.
46
-
47
- | **Task** | **Description** | **Pipeline**
48
- |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------|
49
- | Unconditional Image Generation | generate an image from Gaussian noise | [unconditional_image_generation](./using-diffusers/unconditional_image_generation) |
50
- | Text-Guided Image Generation | generate an image given a text prompt | [conditional_image_generation](./using-diffusers/conditional_image_generation) |
51
- | Text-Guided Image-to-Image Translation | adapt an image guided by a text prompt | [img2img](./using-diffusers/img2img) |
52
- | Text-Guided Image-Inpainting | fill the masked part of an image given the image, the mask and a text prompt | [inpaint](./using-diffusers/inpaint) |
53
- | Text-Guided Depth-to-Image Translation | adapt parts of an image guided by a text prompt while preserving structure via depth estimation | [depth2img](./using-diffusers/depth2img) |
54
-
55
- Start by creating an instance of a [`DiffusionPipeline`] and specify which pipeline checkpoint you would like to download.
56
- You can use the [`DiffusionPipeline`] for any [checkpoint](https://huggingface.co/models?library=diffusers&sort=downloads) stored on the Hugging Face Hub.
57
- In this quicktour, you'll load the [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) checkpoint for text-to-image generation.
58
-
59
- <Tip warning={true}>
60
-
61
- For [Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion) models, please carefully read the [license](https://huggingface.co/spaces/CompVis/stable-diffusion-license) first before running the model. 🧨 Diffusers implements a [`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) to prevent offensive or harmful content, but the model's improved image generation capabilities can still produce potentially harmful content.
62
-
63
- </Tip>
64
-
65
- Load the model with the [`~DiffusionPipeline.from_pretrained`] method:
66
-
67
- ```python
68
- >>> from diffusers import DiffusionPipeline
69
-
70
- >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
71
- ```
72
-
73
- The [`DiffusionPipeline`] downloads and caches all modeling, tokenization, and scheduling components. You'll see that the Stable Diffusion pipeline is composed of the [`UNet2DConditionModel`] and [`PNDMScheduler`] among other things:
74
-
75
- ```py
76
- >>> pipeline
77
- StableDiffusionPipeline {
78
- "_class_name": "StableDiffusionPipeline",
79
- "_diffusers_version": "0.13.1",
80
- ...,
81
- "scheduler": [
82
- "diffusers",
83
- "PNDMScheduler"
84
- ],
85
- ...,
86
- "unet": [
87
- "diffusers",
88
- "UNet2DConditionModel"
89
- ],
90
- "vae": [
91
- "diffusers",
92
- "AutoencoderKL"
93
- ]
94
- }
95
- ```
96
-
97
- We strongly recommend running the pipeline on a GPU because the model consists of roughly 1.4 billion parameters.
98
- You can move the generator object to a GPU, just like you would in PyTorch:
99
-
100
- ```python
101
- >>> pipeline.to("cuda")
102
- ```
103
-
104
- Now you can pass a text prompt to the `pipeline` to generate an image, and then access the denoised image. By default, the image output is wrapped in a [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) object.
105
-
106
- ```python
107
- >>> image = pipeline("An image of a squirrel in Picasso style").images[0]
108
- >>> image
109
- ```
110
-
111
- <div class="flex justify-center">
112
- <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_of_squirrel_painting.png"/>
113
- </div>
114
-
115
- Save the image by calling `save`:
116
-
117
- ```python
118
- >>> image.save("image_of_squirrel_painting.png")
119
- ```
120
-
121
- ### Local pipeline
122
-
123
- You can also use the pipeline locally. The only difference is you need to download the weights first:
124
-
125
- ```bash
126
- !git lfs install
127
- !git clone https://huggingface.co/runwayml/stable-diffusion-v1-5
128
- ```
129
-
130
- Then load the saved weights into the pipeline:
131
-
132
- ```python
133
- >>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5")
134
- ```
135
-
136
- Now you can run the pipeline as you would in the section above.
137
-
138
- ### Swapping schedulers
139
-
140
- Different schedulers come with different denoising speeds and quality trade-offs. The best way to find out which one works best for you is to try them out! One of the main features of 🧨 Diffusers is to allow you to easily switch between schedulers. For example, to replace the default [`PNDMScheduler`] with the [`EulerDiscreteScheduler`], load it with the [`~diffusers.ConfigMixin.from_config`] method:
141
-
142
- ```py
143
- >>> from diffusers import EulerDiscreteScheduler
144
-
145
- >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
146
- >>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config)
147
- ```
148
-
149
- Try generating an image with the new scheduler and see if you notice a difference!
150
-
151
- In the next section, you'll take a closer look at the components - the model and scheduler - that make up the [`DiffusionPipeline`] and learn how to use these components to generate an image of a cat.
152
-
153
- ## Models
154
-
155
- Most models take a noisy sample, and at each timestep it predicts the *noise residual* (other models learn to predict the previous sample directly or the velocity or [`v-prediction`](https://github.com/huggingface/diffusers/blob/5e5ce13e2f89ac45a0066cb3f369462a3cf1d9ef/src/diffusers/schedulers/scheduling_ddim.py#L110)), the difference between a less noisy image and the input image. You can mix and match models to create other diffusion systems.
156
-
157
- Models are initiated with the [`~ModelMixin.from_pretrained`] method which also locally caches the model weights so it is faster the next time you load the model. For the quicktour, you'll load the [`UNet2DModel`], a basic unconditional image generation model with a checkpoint trained on cat images:
158
-
159
- ```py
160
- >>> from diffusers import UNet2DModel
161
-
162
- >>> repo_id = "google/ddpm-cat-256"
163
- >>> model = UNet2DModel.from_pretrained(repo_id)
164
- ```
165
-
166
- To access the model parameters, call `model.config`:
167
-
168
- ```py
169
- >>> model.config
170
- ```
171
-
172
- The model configuration is a 🧊 frozen 🧊 dictionary, which means those parameters can't be changed after the model is created. This is intentional and ensures that the parameters used to define the model architecture at the start remain the same, while other parameters can still be adjusted during inference.
173
-
174
- Some of the most important parameters are:
175
-
176
- * `sample_size`: the height and width dimension of the input sample.
177
- * `in_channels`: the number of input channels of the input sample.
178
- * `down_block_types` and `up_block_types`: the type of down- and upsampling blocks used to create the UNet architecture.
179
- * `block_out_channels`: the number of output channels of the downsampling blocks; also used in reverse order for the number of input channels of the upsampling blocks.
180
- * `layers_per_block`: the number of ResNet blocks present in each UNet block.
181
-
182
- To use the model for inference, create the image shape with random Gaussian noise. It should have a `batch` axis because the model can receive multiple random noises, a `channel` axis corresponding to the number of input channels, and a `sample_size` axis for the height and width of the image:
183
-
184
- ```py
185
- >>> import torch
186
-
187
- >>> torch.manual_seed(0)
188
-
189
- >>> noisy_sample = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
190
- >>> noisy_sample.shape
191
- torch.Size([1, 3, 256, 256])
192
- ```
193
-
194
- For inference, pass the noisy image to the model and a `timestep`. The `timestep` indicates how noisy the input image is, with more noise at the beginning and less at the end. This helps the model determine its position in the diffusion process, whether it is closer to the start or the end. Use the `sample` method to get the model output:
195
-
196
- ```py
197
- >>> with torch.no_grad():
198
- ... noisy_residual = model(sample=noisy_sample, timestep=2).sample
199
- ```
200
-
201
- To generate actual examples though, you'll need a scheduler to guide the denoising process. In the next section, you'll learn how to couple a model with a scheduler.
202
-
203
- ## Schedulers
204
-
205
- Schedulers manage going from a noisy sample to a less noisy sample given the model output - in this case, it is the `noisy_residual`.
206
-
207
- <Tip>
208
-
209
- 🧨 Diffusers is a toolbox for building diffusion systems. While the [`DiffusionPipeline`] is a convenient way to get started with a pre-built diffusion system, you can also choose your own model and scheduler components separately to build a custom diffusion system.
210
-
211
- </Tip>
212
-
213
- For the quicktour, you'll instantiate the [`DDPMScheduler`] with it's [`~diffusers.ConfigMixin.from_config`] method:
214
-
215
- ```py
216
- >>> from diffusers import DDPMScheduler
217
-
218
- >>> scheduler = DDPMScheduler.from_config(repo_id)
219
- >>> scheduler
220
- DDPMScheduler {
221
- "_class_name": "DDPMScheduler",
222
- "_diffusers_version": "0.13.1",
223
- "beta_end": 0.02,
224
- "beta_schedule": "linear",
225
- "beta_start": 0.0001,
226
- "clip_sample": true,
227
- "clip_sample_range": 1.0,
228
- "num_train_timesteps": 1000,
229
- "prediction_type": "epsilon",
230
- "trained_betas": null,
231
- "variance_type": "fixed_small"
232
- }
233
- ```
234
-
235
- <Tip>
236
-
237
- 💡 Notice how the scheduler is instantiated from a configuration. Unlike a model, a scheduler does not have trainable weights and is parameter-free!
238
-
239
- </Tip>
240
-
241
- Some of the most important parameters are:
242
-
243
- * `num_train_timesteps`: the length of the denoising process or in other words, the number of timesteps required to process random Gaussian noise into a data sample.
244
- * `beta_schedule`: the type of noise schedule to use for inference and training.
245
- * `beta_start` and `beta_end`: the start and end noise values for the noise schedule.
246
-
247
- To predict a slightly less noisy image, pass the following to the scheduler's [`~diffusers.DDPMScheduler.step`] method: model output, `timestep`, and current `sample`.
248
-
249
- ```py
250
- >>> less_noisy_sample = scheduler.step(model_output=noisy_residual, timestep=2, sample=noisy_sample).prev_sample
251
- >>> less_noisy_sample.shape
252
- ```
253
-
254
- The `less_noisy_sample` can be passed to the next `timestep` where it'll get even less noisier! Let's bring it all together now and visualize the entire denoising process.
255
-
256
- First, create a function that postprocesses and displays the denoised image as a `PIL.Image`:
257
-
258
- ```py
259
- >>> import PIL.Image
260
- >>> import numpy as np
261
-
262
-
263
- >>> def display_sample(sample, i):
264
- ... image_processed = sample.cpu().permute(0, 2, 3, 1)
265
- ... image_processed = (image_processed + 1.0) * 127.5
266
- ... image_processed = image_processed.numpy().astype(np.uint8)
267
-
268
- ... image_pil = PIL.Image.fromarray(image_processed[0])
269
- ... display(f"Image at step {i}")
270
- ... display(image_pil)
271
- ```
272
-
273
- To speed up the denoising process, move the input and model to a GPU:
274
-
275
- ```py
276
- >>> model.to("cuda")
277
- >>> noisy_sample = noisy_sample.to("cuda")
278
- ```
279
-
280
- Now create a denoising loop that predicts the residual of the less noisy sample, and computes the less noisy sample with the scheduler:
281
-
282
- ```py
283
- >>> import tqdm
284
-
285
- >>> sample = noisy_sample
286
-
287
- >>> for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)):
288
- ... # 1. predict noise residual
289
- ... with torch.no_grad():
290
- ... residual = model(sample, t).sample
291
-
292
- ... # 2. compute less noisy image and set x_t -> x_t-1
293
- ... sample = scheduler.step(residual, t, sample).prev_sample
294
-
295
- ... # 3. optionally look at image
296
- ... if (i + 1) % 50 == 0:
297
- ... display_sample(sample, i + 1)
298
- ```
299
-
300
- Sit back and watch as a cat is generated from nothing but noise! 😻
301
-
302
- <div class="flex justify-center">
303
- <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusion-quicktour.png"/>
304
- </div>
305
-
306
- ## Next steps
307
-
308
- Hopefully you generated some cool images with 🧨 Diffusers in this quicktour! For your next steps, you can:
309
-
310
- * Train or finetune a model to generate your own images in the [training](./tutorials/basic_training) tutorial.
311
- * See example official and community [training or finetuning scripts](https://github.com/huggingface/diffusers/tree/main/examples#-diffusers-examples) for a variety of use cases.
312
- * Learn more about loading, accessing, changing and comparing schedulers in the [Using different Schedulers](./using-diffusers/schedulers) guide.
313
- * Explore prompt engineering, speed and memory optimizations, and tips and tricks for generating higher quality images with the [Stable Diffusion](./stable_diffusion) guide.
314
- * Dive deeper into speeding up 🧨 Diffusers with guides on [optimized PyTorch on a GPU](./optimization/fp16), and inference guides for running [Stable Diffusion on Apple Silicon (M1/M2)](./optimization/mps) and [ONNX Runtime](./optimization/onnx).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py DELETED
@@ -1,51 +0,0 @@
1
- _base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
2
-
3
- model = dict(
4
- pretrained='open-mmlab://detectron2/resnet50_caffe',
5
- bbox_head=dict(
6
- norm_on_bbox=True,
7
- centerness_on_reg=True,
8
- dcn_on_last_conv=False,
9
- center_sampling=True,
10
- conv_bias=True,
11
- loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
12
- # training and testing settings
13
- test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6)))
14
-
15
- # dataset settings
16
- img_norm_cfg = dict(
17
- mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
18
- train_pipeline = [
19
- dict(type='LoadImageFromFile'),
20
- dict(type='LoadAnnotations', with_bbox=True),
21
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
22
- dict(type='RandomFlip', flip_ratio=0.5),
23
- dict(type='Normalize', **img_norm_cfg),
24
- dict(type='Pad', size_divisor=32),
25
- dict(type='DefaultFormatBundle'),
26
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
27
- ]
28
- test_pipeline = [
29
- dict(type='LoadImageFromFile'),
30
- dict(
31
- type='MultiScaleFlipAug',
32
- img_scale=(1333, 800),
33
- flip=False,
34
- transforms=[
35
- dict(type='Resize', keep_ratio=True),
36
- dict(type='RandomFlip'),
37
- dict(type='Normalize', **img_norm_cfg),
38
- dict(type='Pad', size_divisor=32),
39
- dict(type='ImageToTensor', keys=['img']),
40
- dict(type='Collect', keys=['img']),
41
- ])
42
- ]
43
- data = dict(
44
- samples_per_gpu=2,
45
- workers_per_gpu=2,
46
- train=dict(pipeline=train_pipeline),
47
- val=dict(pipeline=test_pipeline),
48
- test=dict(pipeline=test_pipeline))
49
- optimizer_config = dict(_delete_=True, grad_clip=None)
50
-
51
- lr_config = dict(warmup='linear')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Apex-X/Tm/roop/__init__.py DELETED
File without changes
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/freeze.py DELETED
@@ -1,97 +0,0 @@
1
- import sys
2
- from optparse import Values
3
- from typing import List
4
-
5
- from pip._internal.cli import cmdoptions
6
- from pip._internal.cli.base_command import Command
7
- from pip._internal.cli.status_codes import SUCCESS
8
- from pip._internal.operations.freeze import freeze
9
- from pip._internal.utils.compat import stdlib_pkgs
10
-
11
- DEV_PKGS = {"pip", "setuptools", "distribute", "wheel"}
12
-
13
-
14
- class FreezeCommand(Command):
15
- """
16
- Output installed packages in requirements format.
17
-
18
- packages are listed in a case-insensitive sorted order.
19
- """
20
-
21
- usage = """
22
- %prog [options]"""
23
- log_streams = ("ext://sys.stderr", "ext://sys.stderr")
24
-
25
- def add_options(self) -> None:
26
- self.cmd_opts.add_option(
27
- "-r",
28
- "--requirement",
29
- dest="requirements",
30
- action="append",
31
- default=[],
32
- metavar="file",
33
- help=(
34
- "Use the order in the given requirements file and its "
35
- "comments when generating output. This option can be "
36
- "used multiple times."
37
- ),
38
- )
39
- self.cmd_opts.add_option(
40
- "-l",
41
- "--local",
42
- dest="local",
43
- action="store_true",
44
- default=False,
45
- help=(
46
- "If in a virtualenv that has global access, do not output "
47
- "globally-installed packages."
48
- ),
49
- )
50
- self.cmd_opts.add_option(
51
- "--user",
52
- dest="user",
53
- action="store_true",
54
- default=False,
55
- help="Only output packages installed in user-site.",
56
- )
57
- self.cmd_opts.add_option(cmdoptions.list_path())
58
- self.cmd_opts.add_option(
59
- "--all",
60
- dest="freeze_all",
61
- action="store_true",
62
- help=(
63
- "Do not skip these packages in the output:"
64
- " {}".format(", ".join(DEV_PKGS))
65
- ),
66
- )
67
- self.cmd_opts.add_option(
68
- "--exclude-editable",
69
- dest="exclude_editable",
70
- action="store_true",
71
- help="Exclude editable package from output.",
72
- )
73
- self.cmd_opts.add_option(cmdoptions.list_exclude())
74
-
75
- self.parser.insert_option_group(0, self.cmd_opts)
76
-
77
- def run(self, options: Values, args: List[str]) -> int:
78
- skip = set(stdlib_pkgs)
79
- if not options.freeze_all:
80
- skip.update(DEV_PKGS)
81
-
82
- if options.excludes:
83
- skip.update(options.excludes)
84
-
85
- cmdoptions.check_list_path_option(options)
86
-
87
- for line in freeze(
88
- requirement=options.requirements,
89
- local_only=options.local,
90
- user_only=options.user,
91
- paths=options.path,
92
- isolated=options.isolated_mode,
93
- skip=skip,
94
- exclude_editable=options.exclude_editable,
95
- ):
96
- sys.stdout.write(line + "\n")
97
- return SUCCESS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/README.md DELETED
@@ -1,4 +0,0 @@
1
- # Read the docs:
2
-
3
- The latest documentation built from this directory is available at [detectron2.readthedocs.io](https://detectron2.readthedocs.io/).
4
- Documents in this directory are not meant to be read on github.
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_anchor_generator.py DELETED
@@ -1,120 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import logging
3
- import unittest
4
- import torch
5
-
6
- from detectron2.config import get_cfg
7
- from detectron2.layers import ShapeSpec
8
- from detectron2.modeling.anchor_generator import DefaultAnchorGenerator, RotatedAnchorGenerator
9
-
10
- logger = logging.getLogger(__name__)
11
-
12
-
13
- class TestAnchorGenerator(unittest.TestCase):
14
- def test_default_anchor_generator(self):
15
- cfg = get_cfg()
16
- cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
17
- cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]]
18
-
19
- anchor_generator = DefaultAnchorGenerator(cfg, [ShapeSpec(stride=4)])
20
-
21
- # only the last two dimensions of features matter here
22
- num_images = 2
23
- features = {"stage3": torch.rand(num_images, 96, 1, 2)}
24
- anchors = anchor_generator([features["stage3"]])
25
- expected_anchor_tensor = torch.tensor(
26
- [
27
- [-32.0, -8.0, 32.0, 8.0],
28
- [-16.0, -16.0, 16.0, 16.0],
29
- [-8.0, -32.0, 8.0, 32.0],
30
- [-64.0, -16.0, 64.0, 16.0],
31
- [-32.0, -32.0, 32.0, 32.0],
32
- [-16.0, -64.0, 16.0, 64.0],
33
- [-28.0, -8.0, 36.0, 8.0], # -28.0 == -32.0 + STRIDE (4)
34
- [-12.0, -16.0, 20.0, 16.0],
35
- [-4.0, -32.0, 12.0, 32.0],
36
- [-60.0, -16.0, 68.0, 16.0],
37
- [-28.0, -32.0, 36.0, 32.0],
38
- [-12.0, -64.0, 20.0, 64.0],
39
- ]
40
- )
41
-
42
- self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor))
43
-
44
- def test_default_anchor_generator_centered(self):
45
- # test explicit args
46
- anchor_generator = DefaultAnchorGenerator(
47
- sizes=[32, 64], aspect_ratios=[0.25, 1, 4], strides=[4]
48
- )
49
-
50
- # only the last two dimensions of features matter here
51
- num_images = 2
52
- features = {"stage3": torch.rand(num_images, 96, 1, 2)}
53
- expected_anchor_tensor = torch.tensor(
54
- [
55
- [-30.0, -6.0, 34.0, 10.0],
56
- [-14.0, -14.0, 18.0, 18.0],
57
- [-6.0, -30.0, 10.0, 34.0],
58
- [-62.0, -14.0, 66.0, 18.0],
59
- [-30.0, -30.0, 34.0, 34.0],
60
- [-14.0, -62.0, 18.0, 66.0],
61
- [-26.0, -6.0, 38.0, 10.0],
62
- [-10.0, -14.0, 22.0, 18.0],
63
- [-2.0, -30.0, 14.0, 34.0],
64
- [-58.0, -14.0, 70.0, 18.0],
65
- [-26.0, -30.0, 38.0, 34.0],
66
- [-10.0, -62.0, 22.0, 66.0],
67
- ]
68
- )
69
-
70
- anchors = anchor_generator([features["stage3"]])
71
- self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor))
72
-
73
- anchors = torch.jit.script(anchor_generator)([features["stage3"]])
74
- self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor))
75
-
76
- def test_rrpn_anchor_generator(self):
77
- cfg = get_cfg()
78
- cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
79
- cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]]
80
- cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [0, 45] # test single list[float]
81
- anchor_generator = RotatedAnchorGenerator(cfg, [ShapeSpec(stride=4)])
82
-
83
- # only the last two dimensions of features matter here
84
- num_images = 2
85
- features = {"stage3": torch.rand(num_images, 96, 1, 2)}
86
- anchors = anchor_generator([features["stage3"]])
87
- expected_anchor_tensor = torch.tensor(
88
- [
89
- [0.0, 0.0, 64.0, 16.0, 0.0],
90
- [0.0, 0.0, 64.0, 16.0, 45.0],
91
- [0.0, 0.0, 32.0, 32.0, 0.0],
92
- [0.0, 0.0, 32.0, 32.0, 45.0],
93
- [0.0, 0.0, 16.0, 64.0, 0.0],
94
- [0.0, 0.0, 16.0, 64.0, 45.0],
95
- [0.0, 0.0, 128.0, 32.0, 0.0],
96
- [0.0, 0.0, 128.0, 32.0, 45.0],
97
- [0.0, 0.0, 64.0, 64.0, 0.0],
98
- [0.0, 0.0, 64.0, 64.0, 45.0],
99
- [0.0, 0.0, 32.0, 128.0, 0.0],
100
- [0.0, 0.0, 32.0, 128.0, 45.0],
101
- [4.0, 0.0, 64.0, 16.0, 0.0], # 4.0 == 0.0 + STRIDE (4)
102
- [4.0, 0.0, 64.0, 16.0, 45.0],
103
- [4.0, 0.0, 32.0, 32.0, 0.0],
104
- [4.0, 0.0, 32.0, 32.0, 45.0],
105
- [4.0, 0.0, 16.0, 64.0, 0.0],
106
- [4.0, 0.0, 16.0, 64.0, 45.0],
107
- [4.0, 0.0, 128.0, 32.0, 0.0],
108
- [4.0, 0.0, 128.0, 32.0, 45.0],
109
- [4.0, 0.0, 64.0, 64.0, 0.0],
110
- [4.0, 0.0, 64.0, 64.0, 45.0],
111
- [4.0, 0.0, 32.0, 128.0, 0.0],
112
- [4.0, 0.0, 32.0, 128.0, 45.0],
113
- ]
114
- )
115
-
116
- self.assertTrue(torch.allclose(anchors[0].tensor, expected_anchor_tensor))
117
-
118
-
119
- if __name__ == "__main__":
120
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AxelBell/EasyOCR_text_recognition/assets/header.html DELETED
@@ -1,13 +0,0 @@
1
- <div class="center">
2
- <p class="fs-xx">EasyOCR v.1.7.1</p>
3
- <p class="fs-x">Gradio demo for EasyOCR</p>
4
- <p>
5
- EasyOCR demo version
6
- <a href="https://www.jaided.ai/easyocr/">supports 80+ languages</a>. To use
7
- it, simply upload your image and select a language from the drop-down menu
8
- or click on one of the examples to download it. <br />Most of the properties
9
- provided by the library are available in the advanced settings
10
- <a href="https://www.jaided.ai/easyocr/tutorial/">Read more</a>
11
- </p>
12
- </div>
13
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ayakasuki/anime-ai-detect/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Anime Ai Detect
3
- emoji: 🤖
4
- colorFrom: green
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.15.0
8
- app_file: app.py
9
- pinned: true
10
- duplicated_from: saltacc/anime-ai-detect
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/app/interface/panel/index.tsx DELETED
@@ -1,347 +0,0 @@
1
- "use client"
2
-
3
- import { useEffect, useRef, useState, useTransition } from "react"
4
- import { RxReload } from "react-icons/rx"
5
-
6
- import { RenderedScene } from "@/types"
7
-
8
- import { getRender, newRender } from "@/app/engine/render"
9
- import { useStore } from "@/app/store"
10
-
11
- import { cn } from "@/lib/utils"
12
- import { getInitialRenderedScene } from "@/lib/getInitialRenderedScene"
13
- import { Progress } from "@/app/interface/progress"
14
-
15
- export function Panel({
16
- panel,
17
- className = "",
18
- width = 1,
19
- height = 1,
20
- }: {
21
- panel: number
22
- className?: string
23
- width?: number
24
- height?: number
25
- }) {
26
- const panelId = `${panel}`
27
-
28
- const [mouseOver, setMouseOver] = useState(false)
29
- const ref = useRef<HTMLImageElement>(null)
30
- const font = useStore(state => state.font)
31
- const preset = useStore(state => state.preset)
32
-
33
- const setGeneratingImages = useStore(state => state.setGeneratingImages)
34
-
35
- const panels = useStore(state => state.panels)
36
- const prompt = panels[panel] || ""
37
-
38
- const captions = useStore(state => state.captions)
39
- const caption = captions[panel] || ""
40
-
41
- const zoomLevel = useStore(state => state.zoomLevel)
42
- const showCaptions = useStore(state => state.showCaptions)
43
-
44
- const addToUpscaleQueue = useStore(state => state.addToUpscaleQueue)
45
-
46
- const [_isPending, startTransition] = useTransition()
47
- const renderedScenes = useStore(state => state.renderedScenes)
48
- const setRendered = useStore(state => state.setRendered)
49
-
50
- const rendered = renderedScenes[panel] || getInitialRenderedScene()
51
-
52
- const [revision, setRevision] = useState(0)
53
-
54
- // keep a ref in sync
55
- const renderedRef = useRef<RenderedScene>()
56
- const renderedKey = JSON.stringify(rendered)
57
- useEffect(() => { renderedRef.current = rendered }, [renderedKey])
58
-
59
- const timeoutRef = useRef<any>(null)
60
-
61
- const enableRateLimiter = `${process.env.NEXT_PUBLIC_ENABLE_RATE_LIMITER}` === "true"
62
-
63
- const delay = enableRateLimiter ? (1000 + (500 * panel)) : 1000
64
-
65
-
66
- const startImageGeneration = ({ prompt, width, height, revision }: {
67
- prompt: string
68
- width: number
69
- height: number
70
- revision: number
71
- }) => {
72
- if (!prompt?.length) { return }
73
-
74
- // important: update the status, and clear the scene
75
- setGeneratingImages(panelId, true)
76
-
77
- // just to empty it
78
- setRendered(panelId, getInitialRenderedScene())
79
-
80
- setTimeout(() => {
81
- startTransition(async () => {
82
-
83
- const withCache = revision === 0
84
-
85
- // atrocious and very, very, very, very, very, very, very ugly hack for the Inference API
86
- // as apparently "use_cache: false" doesn't work, or doesn't do what we want it to do
87
- let cacheInvalidationHack = ""
88
- const nbMaxRevisions = 6
89
- for (let i = 0; i < revision && revision < nbMaxRevisions; i++) {
90
- const j = Math.random()
91
- cacheInvalidationHack += j < 0.3 ? "_" : j < 0.6 ? "," : "-"
92
- }
93
-
94
- let newRendered: RenderedScene
95
- try {
96
-
97
- newRendered = await newRender({
98
- prompt: cacheInvalidationHack + " " + prompt,
99
- width,
100
- height,
101
-
102
- // TODO: here we never reset the revision, so only the first user
103
- // comic will be cached (we should fix that later)
104
- withCache: revision === 0
105
- })
106
- } catch (err) {
107
- // "Failed to load the panel! Don't worry, we are retrying..")
108
- newRendered = await newRender({
109
- prompt: cacheInvalidationHack + " " + prompt,
110
- width,
111
- height,
112
- withCache,
113
- })
114
- }
115
-
116
- if (newRendered) {
117
- setRendered(panelId, newRendered)
118
-
119
- if (newRendered.status === "completed") {
120
- setGeneratingImages(panelId, false)
121
- addToUpscaleQueue(panelId, newRendered)
122
- }
123
-
124
- // but we are still loading!
125
- } else {
126
- setRendered(panelId, {
127
- renderId: "",
128
- status: "pending",
129
- assetUrl: "",
130
- alt: "",
131
- maskUrl: "",
132
- error: "",
133
- segments: []
134
- })
135
- setGeneratingImages(panelId, false)
136
- return
137
- }
138
- })
139
- }, enableRateLimiter ? 1000 * panel : 0)
140
- }
141
-
142
-
143
- const checkStatus = () => {
144
- startTransition(async () => {
145
- clearTimeout(timeoutRef.current)
146
-
147
- if (!renderedRef.current?.renderId || renderedRef.current?.status !== "pending") {
148
- timeoutRef.current = setTimeout(checkStatus, delay)
149
- return
150
- }
151
-
152
- try {
153
- setGeneratingImages(panelId, true)
154
- const newRendered = await getRender(renderedRef.current.renderId)
155
-
156
- if (JSON.stringify(renderedRef.current) !== JSON.stringify(newRendered)) {
157
- setRendered(panelId, renderedRef.current = newRendered)
158
- setGeneratingImages(panelId, true)
159
- }
160
-
161
- if (newRendered.status === "pending") {
162
- timeoutRef.current = setTimeout(checkStatus, delay)
163
- } else if (newRendered.status === "error" ||
164
- (newRendered.status === "completed" && !newRendered.assetUrl?.length)) {
165
- try {
166
- const newAttempt = await newRender({
167
- prompt,
168
- width,
169
- height,
170
- withCache: false,
171
- })
172
- setRendered(panelId, newAttempt)
173
- } catch (err) {
174
- console.error("yeah sorry, something is wrong.. aborting", err)
175
- setGeneratingImages(panelId, false)
176
- }
177
- } else {
178
- console.log("panel finished!")
179
- setGeneratingImages(panelId, false)
180
- addToUpscaleQueue(panelId, newRendered)
181
- }
182
- } catch (err) {
183
- console.error(err)
184
- timeoutRef.current = setTimeout(checkStatus, delay)
185
- }
186
- })
187
- }
188
-
189
- useEffect(() => {
190
- if (!prompt.length) { return }
191
-
192
- startImageGeneration({ prompt, width, height, revision })
193
-
194
- clearTimeout(timeoutRef.current)
195
-
196
- // normally it should reply in < 1sec, but we could also use an interval
197
- timeoutRef.current = setTimeout(checkStatus, delay)
198
-
199
- return () => {
200
- clearTimeout(timeoutRef.current)
201
- }
202
- }, [prompt, width, height, revision])
203
-
204
- /*
205
- doing the captionning from the browser is expensive
206
- a simpler solution is to caption directly during SDXL generation
207
-
208
- useEffect(() => {
209
- if (!rendered.assetUrl) { return }
210
- // the asset url can evolve with time (link to a better resolution image)
211
- // however it would be costly to ask for the caption, the low resolution is enough for the semantic resolution
212
- // so we just do nothing if we already have the caption
213
- if (caption) { return }
214
- startTransition(async () => {
215
- try {
216
- const newCaption = await see({
217
- prompt: "please caption the following image",
218
- imageBase64: rendered.assetUrl
219
- })
220
- if (newCaption) {
221
- setCaption(newCaption)
222
- }
223
- } catch (err) {
224
- console.error(`failed to generate the caption:`, err)
225
- }
226
- })
227
- }, [rendered.assetUrl, caption])
228
- */
229
-
230
- const frameClassName = cn(
231
- //`flex`,
232
- `w-full h-full`,
233
- `border-stone-800`,
234
- `transition-all duration-200 ease-in-out`,
235
- zoomLevel > 140 ? `border-[2px] md:border-[4px] rounded-sm md:rounded-md` :
236
- zoomLevel > 120 ? `border-[1.5px] md:border-[3px] rounded-xs md:rounded-sm` :
237
- zoomLevel > 90 ? `border-[1px] md:border-[2px] rounded-xs md:rounded-sm` :
238
- zoomLevel > 40 ? `border-[0.5px] md:border-[1px] rounded-none md:rounded-xs` :
239
- `border-transparent md:border-[0.5px] rounded-none md:rounded-none`,
240
- `shadow-sm`,
241
- `overflow-hidden`,
242
- `print:border-[1.5px] print:shadow-none`,
243
- )
244
-
245
- const handleReload = () => {
246
- console.log(`Asked to reload panel ${panelId}`)
247
- setRevision(revision + 1)
248
- }
249
-
250
- if (prompt && !rendered.assetUrl) {
251
- return (
252
- <div className={cn(
253
- frameClassName,
254
- `flex flex-col items-center justify-center`,
255
- className,
256
- )}>
257
- <Progress isLoading />
258
- </div>
259
- )
260
- }
261
-
262
- return (
263
- <div className={cn(
264
- frameClassName,
265
- { "grayscale": preset.color === "grayscale" },
266
- className
267
- )}
268
- onMouseEnter={() => setMouseOver(true)}
269
- onMouseLeave={() => setMouseOver(false)}
270
- >
271
- <div className={cn(
272
- `bg-stone-50`,
273
- `border-stone-800`,
274
- `transition-all duration-200 ease-in-out`,
275
- zoomLevel > 140 ? `border-b-[2px] md:border-b-[4px]` :
276
- zoomLevel > 120 ? `border-b-[1.5px] md:border-b-[3px]` :
277
- zoomLevel > 90 ? `border-b-[1px] md:border-b-[2px]` :
278
- zoomLevel > 40 ? `border-b-[0.5px] md:border-b-[1px]` :
279
- `border-transparent md:border-b-[0.5px]`,
280
- `print:border-b-[1.5px]`,
281
- `truncate`,
282
-
283
- zoomLevel > 200 ? `p-4 md:p-8` :
284
- zoomLevel > 180 ? `p-[14px] md:p-8` :
285
- zoomLevel > 160 ? `p-[12px] md:p-[28px]` :
286
- zoomLevel > 140 ? `p-[10px] md:p-[26px]` :
287
- zoomLevel > 120 ? `p-2 md:p-6` :
288
- zoomLevel > 100 ? `p-1.5 md:p-[20px]` :
289
- zoomLevel > 90 ? `p-1.5 md:p-4` :
290
- zoomLevel > 40 ? `p-1 md:p-2` :
291
- `p-0.5 md:p-2`,
292
-
293
- zoomLevel > 220 ? `text-xl md:text-4xl` :
294
- zoomLevel > 200 ? `text-lg md:text-3xl` :
295
- zoomLevel > 180 ? `text-md md:text-2xl` :
296
- zoomLevel > 140 ? `text-2xs md:text-2xl` :
297
- zoomLevel > 120 ? `text-3xs md:text-xl` :
298
- zoomLevel > 100 ? `text-4xs md:text-lg` :
299
- zoomLevel > 90 ? `text-5xs md:text-sm` :
300
- zoomLevel > 40 ? `md:text-xs` : `md:text-2xs`,
301
-
302
- showCaptions ? (
303
- zoomLevel > 90 ? `block` : `hidden md:block`
304
- ) : `hidden`,
305
- )}
306
- >{caption || ""}
307
- </div>
308
- {rendered.assetUrl &&
309
- <img
310
- ref={ref}
311
- src={rendered.assetUrl}
312
- width={width}
313
- height={height}
314
- alt={rendered.alt}
315
- className={cn(
316
- `comic-panel w-full h-full object-cover max-w-max`,
317
- // showCaptions ? `-mt-11` : ''
318
- )}
319
- />}
320
- {
321
- // there is an issue, this env check doesn't work..
322
- // process.env.NEXT_PUBLIC_CAN_REDRAW === "true" ?
323
- <div
324
- className={cn(`relative -mt-14 ml-4`,)}>
325
- <div className="flex flex-row">
326
- <div
327
- onClick={rendered.status === "completed" ? handleReload : undefined}
328
- className={cn(
329
- `bg-stone-100 rounded-lg`,
330
- `flex flex-row space-x-2 items-center`,
331
- `py-2 px-3 cursor-pointer`,
332
- `transition-all duration-200 ease-in-out`,
333
- rendered.status === "completed" ? "opacity-95" : "opacity-50",
334
- mouseOver && rendered.assetUrl ? `scale-95 hover:scale-100 hover:opacity-100`: `scale-0`
335
- )}>
336
- <RxReload
337
- className="w-5 h-5"
338
- />
339
- <span className="text-base">Redraw</span>
340
- </div>
341
- </div>
342
- </div>
343
- //: null
344
- }
345
- </div>
346
- )
347
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Apkpro.me Carx Calle.md DELETED
@@ -1,141 +0,0 @@
1
-
2
- <h1>CarX Street: un juego de carreras de mundo abierto y gratuito para dispositivos móviles</h1>
3
- <p>Si eres un fan de los juegos de carreras, es posible que hayas oído hablar de <strong>CarX Street</strong>, un nuevo juego de carreras de mundo abierto para dispositivos móviles. CarX Street es desarrollado por <strong>CarX Technologies</strong>, la misma compañía detrás de la popular serie <strong>CarX Drift Racing</strong>. En este artículo, te contaremos todo lo que necesitas saber sobre CarX Street, incluyendo qué es, cómo descargarlo de <strong>apkpro.me</strong>, cómo jugarlo y cómo se compara con otros juegos de carreras. </p>
4
- <h2>descargar apkpro.me carx calle</h2><br /><p><b><b>DOWNLOAD</b> &#10042;&#10042;&#10042; <a href="https://bltlly.com/2v6Jt8">https://bltlly.com/2v6Jt8</a></b></p><br /><br />
5
- <h2>¿Qué es CarX Street? </h2>
6
- <p>CarX Street es un videojuego de carreras de simulación que ofrece física realista del automóvil y deriva a alta velocidad. El juego también cuenta con diferentes tipos de mapas de todo el mundo, y los jugadores pueden elegir entre varios modos de juego diferentes. Los jugadores pueden competir contra otros jugadores, o participar en carreras y eventos. </p>
7
- <h3>Un juego de carreras realista y dinámico con personalización de coches y multijugador en línea</h3>
8
- <p>Una de las características destacadas de CarX Street es su motor de física realista. Este motor simula el comportamiento de los coches en la carretera, dando a los jugadores una experiencia de carreras real. Los jugadores pueden sentir la emoción de las carreras de alta velocidad mientras maniobran sus autos a través de curvas cerradas y se entretienen dentro y fuera del tráfico. El juego también permite a los jugadores personalizar sus coches con varias partes y opciones de ajuste, desbloqueando todo el potencial de sus vehículos. Los jugadores también pueden desafiar a otros jugadores en carreras de red reales, o unirse a clubes y competir con otros corredores. </p>
9
- <h3>Un mapa de mundo abierto grande y diverso con ciclo de día y noche y diferentes entornos</h3>
10
-
11
- <h3>Un modo de carrera con clubes, eventos y jefes para desafiar</h3>
12
- <p>Para aquellos que quieren una experiencia de juego más estructurada, CarX Street también ofrece un modo de carrera. En este modo, los jugadores pueden unirse a diferentes clubes, cada uno con su propio estilo, tema y objetivos. Los jugadores también pueden participar en varios eventos, como sprints, drifts, contrarrelojes, etc., para ganar dinero y reputación. Los jugadores también pueden desafiar a los jefes de cada club, que son los mejores corredores en sus respectivas áreas. Al derrotar a los jefes, los jugadores pueden desbloquear nuevos coches, piezas, ubicaciones y más. </p>
13
- <p></p>
14
- <h2>Cómo descargar CarX Street desde apkpro.me? </ <h2>Cómo descargar CarX Street desde apkpro.me? </h2>
15
- <p>Si usted está interesado en probar CarX Street, es posible que se pregunte cómo descargarlo de apkpro.me. Apkpro.me es un sitio web que proporciona descargas gratuitas y seguras de varias aplicaciones y juegos de Android, incluyendo CarX Street. Estos son algunos de los beneficios, pasos y precauciones de descargar CarX Street de apkpro.me. </p>
16
- <h3>Los beneficios de descargar desde apkpro.me</h3>
17
- <p>Hay varias razones por las que es posible que desee descargar CarX Street de apkpro.me en lugar de la tienda oficial de Google Play. Algunos de los beneficios son:</p>
18
- <ul>
19
- <li>Puede descargar la última versión de CarX Street sin esperar la actualización oficial. </li>
20
- <li>Puede acceder a la versión modificada de CarX Street, que le da dinero ilimitado, monedas y gemas para comprar y actualizar sus coches. </li>
21
- <li> Puede evitar las restricciones regionales y jugar CarX Street en cualquier país. </li>
22
- <li>Puedes disfrutar del juego sin anuncios ni compras en la aplicación. </li>
23
- </ul>
24
- <h3>Los pasos para descargar e instalar CarX Street desde apkpro.me</h3>
25
- <p>El proceso de descargar e instalar CarX Street desde apkpro.me es simple y sencillo. Estos son los pasos que debe seguir:</p>
26
- <ol>
27
- <li>Vaya a <a href="">apkpro.me</a> en su navegador y busque CarX Street.</li>
28
-
29
- <li>Haga clic en el botón de descarga y espere a que el archivo se descargue en su dispositivo. </li>
30
- <li>Una vez descargado el archivo, vaya a su administrador de archivos y localice el archivo. Toque en él para iniciar el proceso de instalación. </li>
31
- <li>Si ves un mensaje de advertencia que dice "Instalar bloqueado", ve a tu configuración y habilita la opción de instalar aplicaciones de fuentes desconocidas. </li>
32
- <li>Siga las instrucciones en la pantalla y complete el proceso de instalación. </li>
33
- <li>Iniciar el juego y disfrutar! </li>
34
- </ol>
35
- <h3>Las precauciones a tomar antes de descargar desde apkpro.me</h3>
36
- <p>Aunque apkpro.me es un sitio web confiable y seguro, hay algunas precauciones que debe tomar antes de descargar cualquier aplicación o juego de ella. Algunas de las precauciones son:</p>
37
- <ul>
38
- <li>Asegúrese de que su dispositivo tiene suficiente espacio de almacenamiento y duración de la batería para descargar e instalar el juego. </li>
39
- <li>Asegúrese de que su dispositivo cumple con los requisitos mínimos para ejecutar el juego sin problemas. </li>
40
- <li>Asegúrese de tener una conexión a Internet estable para evitar interrupciones o errores durante el proceso de descarga o instalación. </li>
41
- <li>Asegúrate de tener una copia de seguridad de tus datos y archivos en caso de que algo salga mal o quieras desinstalar el juego más tarde. </li>
42
- <li>Asegúrese de escanear el archivo con un antivirus o un escáner de malware antes de abrirlo para asegurarse de que está libre de virus o código malicioso. </li>
43
- </ul>
44
- <h2>¿Cómo se juega CarX Street? </h2> <h2>Cómo se juega CarX Street? </h2>
45
- <p>Ahora que ha descargado e instalado CarX Street desde apkpro.me, es posible que se pregunte cómo jugarlo. CarX Street es un divertido y adictivo juego de carreras que te mantendrá enganchado durante horas. Estos son algunos de los controles básicos y la mecánica de juego de CarX Street, así como algunos consejos y trucos para mejorar sus habilidades de carreras y rendimiento. </p>
46
- <h3>Los controles básicos y la mecánica de juego de CarX Street</h3>
47
-
48
- <p>La mecánica de juego de CarX Street se basa en la física realista y el comportamiento del automóvil. El juego simula los efectos de la velocidad, la gravedad, la tracción, la inercia y la fricción en su coche. Usted tiene que tener en cuenta estos factores al conducir su coche, especialmente cuando se deriva. La deriva es una característica clave de CarX Street, ya que le permite realizar maniobras espectaculares y ganar puntos extra. Puede desviarse pulsando el botón de freno mientras gira, o usando el botón de freno de mano. También puede ajustar el ángulo y la intensidad de su deriva mediante la dirección y la aceleración en consecuencia. </p>
49
- <h3>Los consejos y trucos para mejorar sus habilidades de carreras y rendimiento en CarX Street</h3>
50
- <p>Si quieres convertirte en un mejor corredor en CarX Street, necesitas practicar y dominar el arte de la deriva. La deriva no solo es divertida y fresca, sino también útil y estratégica. Aquí hay algunos consejos y trucos para ayudarte a mejorar tus habilidades y rendimiento en CarX Street:</p>
51
- <ul>
52
- <li>Elija el coche adecuado para su estilo y preferencia. Diferentes coches tienen diferentes características, tales como velocidad, aceleración, manejo, peso, etc. Algunos coches son más adecuados para la deriva que otros, así que experimenta con diferentes coches y encontrar el que más le convenga. </li>
53
- <li>Actualizar y afinar su coche con regularidad. Puede mejorar el rendimiento de su automóvil comprando e instalando nuevas piezas, como motor, turbo, suspensión, neumáticos, etc. También puede ajustar la configuración de su automóvil, como camber, toe, differential, etc., para optimizar su comportamiento en la carretera. </li>
54
- <li>Conozca el diseño y las características de cada mapa. Cada mapa tiene sus propios desafíos y oportunidades para la deriva. Necesitas familiarizarte con el diseño y las características de cada mapa, como curvas, esquinas, rampas, obstáculos, atajos, etc. También necesitas adaptar tu estilo y estrategia de conducción de acuerdo con las condiciones del mapa, como el clima, el tráfico, la hora del día, etc.</li>
55
-
56
- <li>Mira las repeticiones y aprende de otros jugadores. Puedes ver las repeticiones de tus propias carreras u otras carreras de jugadores accediendo al modo de repetición en el juego. Ver repeticiones puede ayudarte a analizar tus errores y mejorar tus habilidades. También puedes aprender de las técnicas y estrategias de otros jugadores viendo sus repeticiones. </li>
57
- </ul>
58
- <h3>Las características y modos para explorar en CarX Street</h3>
59
- <p>CarX Street no es solo un juego de carreras; también es un juego social que te permite interactuar con otros jugadores y unirte a una comunidad de corredores. Estas son algunas de las características y modos que puedes explorar en CarX Street:</p>
60
- <ul>
61
- <li>Modo multijugador en línea: Puedes competir contra otros jugadores de todo el mundo en carreras de red real. Puedes elegir entre diferentes modos, como carrera de sprint, carrera de deriva, carrera de ataque temporal, etc. También puedes chatear con otros jugadores en el vestíbulo o durante la carrera. </li>
62
- <li>Modo Club: Puedes unirte o crear un club con otros jugadores que compartan tus intereses y objetivos. Puede cooperar con los miembros de su club para participar en eventos del club, como guerras de clubes o torneos de clubes. También puedes competir con otros clubes por la fama y la gloria. </li>
63
- <li>Modo de carrera: Puede progresar a través de una historia que involucra diferentes clubes, eventos y jefes. Puedes ganar dinero y reputación completando misiones y desafíos. También puede desbloquear nuevos coches, piezas, ubicaciones y más derrotando a los jefes. </li>
64
- <li>Modo de garaje: Puede personalizar sus coches con varias partes y opciones de ajuste. Puede cambiar el aspecto de sus coches mediante la aplicación de diferentes pinturas, calcomanías, ruedas, alerones, etc. También puede mejorar el rendimiento de sus coches mediante la mejora y ajuste del motor, turbo, suspensión, neumáticos, etc.</li>
65
-
66
- </ul>
67
- <h2>¿Cómo se compara CarX Street con otros juegos de carreras? </h2>
68
- <p>CarX Street no es el único juego de carreras disponible para dispositivos móviles. Hay muchos otros juegos de carreras que usted podría haber jugado o oído hablar de, como Asphalt 9, Need for Speed, Real Racing 3, etc. ¿Cómo CarX Street se compara con estos juegos? Estas son algunas de las similitudes y diferencias entre CarX Street y otros juegos de carreras populares, así como los pros y los contras de CarX Street como un juego de carreras. </p>
69
- <h3>Las similitudes y diferencias entre CarX Street y otros juegos de carreras populares</h3>
70
- <p>CarX Street comparte algunas características comunes con otros juegos de carreras, como:</p>
71
- <ul>
72
- <li> Tiene gráficos y efectos de sonido de alta calidad que crean una experiencia de carreras inmersiva. </li>
73
- <li> Tiene una variedad de coches y lugares para elegir, cada uno con sus propias características y desafíos. </li>
74
- <li> Tiene un modo multijugador que le permite competir contra otros jugadores en línea. </li>
75
- <li> Tiene un modo de carrera que sigue una historia y ofrece diferentes misiones y recompensas. </li>
76
- </ul>
77
- <p>Sin embargo, CarX Street también tiene algunas características únicas que lo distinguen de otros juegos de carreras, como:</p>
78
- <ul>
79
- <li> Tiene un motor de física realista que simula el comportamiento de los coches en la carretera. </li>
80
- <li> Tiene un enfoque en la deriva como una mecánica de juego central y una fuente de diversión y emoción. </li>
81
- <li> Tiene un mapa de mundo abierto grande y diverso que puedes explorar libremente. </li>
82
- <li> Tiene un dinámico ciclo de día y noche y diferentes condiciones climáticas que afectan el entorno del juego. </li>
83
- </ul>
84
- <h3>Los pros y los contras de CarX Street como juego de carreras</h3>
85
- <p>Como cualquier juego, CarX Street tiene sus pros y sus contras como un juego de carreras. Aquí están algunos de los pros y contras que usted debe considerar antes de jugar CarX Street:</p>
86
- <tabla>
87
- <tr><th>Pros</th><th>Contras</th></tr>
88
-
89
- <tr><td>Ofrece un mapa de mundo abierto grande y diverso con diferentes entornos y características. </td><td>Puede ser abrumador y confuso navegar por el mapa y encontrar el camino. </td></tr>
90
- <tr><td>Ofrece una variedad de modos de juego y características para adaptarse a diferentes preferencias y gustos. </td><td>Puede ser repetitivo y aburrido jugar los mismos modos y eventos una y otra vez. </td></tr>
91
- <tr><td>Ofrece un modelo gratuito que te permite descargar y jugar el juego sin gastar dinero. </td><td>Tiene anuncios y compras en la aplicación que pueden ser molestos y tentadores para gastar dinero en. </td></tr>
92
- </tabla>
93
- <h3>Los comentarios y valoraciones de los usuarios de CarX Street</h3>
94
- <p>Si quieres saber lo que otros jugadores piensan de CarX Street, puedes consultar las opiniones de los usuarios y las valoraciones del juego en varias plataformas. Aquí hay algunos ejemplos de reseñas de usuarios y valoraciones de CarX Street:</p>
95
- <blockquote>"Este juego es increíble! Los gráficos son impresionantes, la física es realista, los coches son personalizables, el mapa es enorme, el juego es adictivo. Me encanta la deriva en este juego, se siente tan satisfactorio. El modo multijugador también es divertido, me gusta correr con otros jugadores en línea. Este es uno de los mejores juegos de carreras que he jugado en mi teléfono." </blockquote>
96
- <blockquote>"Este juego es bueno, pero tiene algunos defectos. Los controles son difíciles de acostumbrarse, especialmente la deriva. El mapa es demasiado grande y confuso, a menudo me pierdo o me quedo atascado. El juego también se bloquea a veces o se retrasa cuando hay demasiados jugadores o coches en la pantalla. El juego también tiene demasiados anuncios y compras en la aplicación que arruinan la experiencia."</blockquote>
97
-
98
- <h2>Conclusión</h2>
99
- <p>En conclusión, CarX Street <p>En conclusión, CarX Street es un juego de carreras de mundo abierto para dispositivos móviles que ofrece una experiencia de carreras realista y dinámica con la física del automóvil y la deriva. El juego también cuenta con un mapa de mundo abierto grande y diverso con diferentes entornos y características, una variedad de modos de juego y características para adaptarse a diferentes preferencias y gustos, y un modo multijugador que le permite competir contra otros jugadores en línea. El juego se puede descargar de apkpro.me, un sitio web que proporciona descargas gratuitas y seguras de varias aplicaciones y juegos de Android, incluyendo CarX Street. Sin embargo, el juego también tiene algunos defectos, como controles duros, mapa confuso, jugabilidad repetitiva, anuncios y compras en la aplicación, etc. Por lo tanto, el juego no es perfecto, pero aún vale la pena probarlo si eres fanático de los juegos de carreras. </p>
100
- <p>Si estás interesado en jugar CarX Street, puedes descargarlo desde apkpro.me siguiendo los pasos y precauciones mencionados en este artículo. También puede mejorar sus habilidades de carreras y rendimiento siguiendo los consejos y trucos mencionados en este artículo. También puede comparar CarX Street con otros juegos de carreras populares leyendo los comentarios y valoraciones de los usuarios mencionados en este artículo. Esperamos que este artículo te haya ayudado a aprender más sobre CarX Street y cómo descargarlo desde apkpro.me. También esperamos que disfrutes jugando a CarX Street y compartas tus comentarios y opiniones al respecto. </p>
101
- <p>Gracias por leer este artículo. ¡Buen día! </p>
102
- <h2>Preguntas frecuentes</h2>
103
- <p>Aquí están algunas de las preguntas más frecuentes sobre CarX Street y apkpro.me:</p>
104
- <ol>
105
- <li>¿Cuáles son los requisitos mínimos para jugar CarX Street en mi dispositivo? </li>
106
- <p>Los requisitos mínimos para jugar CarX Street en tu dispositivo son:</p>
107
- <ul>
108
- <li>Android 5.0 o superior</li>
109
- <li>2 GB de RAM o superior</li>
110
- <li>1 GB de espacio de almacenamiento libre o superior</li>
111
- <li>Una conexión a Internet estable</li>
112
- </ul>
113
-
114
- <p>Sí, CarX Street es seguro para descargar de apkpro.me, siempre y cuando siga las precauciones mencionadas en este artículo. Apkpro.me es un sitio web confiable y seguro que proporciona descargas gratuitas y seguras de varias aplicaciones y juegos de Android, incluyendo CarX Street. Sin embargo, siempre debe escanear el archivo con un antivirus o un escáner de malware antes de abrirlo para asegurarse de que está libre de virus o código malicioso. </p>
115
- <li>¿Cómo puedo contactar a los desarrolladores de CarX Street o apkpro.me? </li>
116
- <p>Puede ponerse en contacto con los desarrolladores de CarX Street o apkpro.me utilizando los siguientes métodos:</p>
117
- <ul>
118
- <li>Para CarX Street, puede visitar su sitio web oficial en <a href="">https://carx-tech.com/</a>, o su página de Facebook en <a href=">https:/www.facebook.com/carxstreet/</a>, o su página de Instagram en <a=a=">>hthtw.facebook.com/instawww.gram.com/</street/a, o su canal de YouTube en <a href=">https://www.youtube.com/channel/UCo0wQYz4Z7Q3l9yf5sVqJ8g</a>, o su servidor Discord en <a href="">https:/rd.gg/carxstreet</a>. </li>
119
- <li>Para apkpro.me, puedes visitar su sitio web oficial en <a href="">https://apkpro.me/</a>, o su página de Facebook en <a href=">https:/www.facebook.com/apkpro.me/</</a>, o su página de Twitter en <a href=">>>>>hththts:om/apkpro.me/<me/a>, o su canal Telegram en href a=">https://t. me/apkpromod</a>. </li>
120
- </ul>
121
- <li>¿Cómo puedo actualizar CarX Street en mi dispositivo? </li>
122
- <p>Puedes actualizar CarX Street en tu dispositivo siguiendo estos pasos:</p>
123
- <ol>
124
- <li>Vaya a apkpro.me en su navegador y busque CarX Street.</li>
125
- <li>Seleccione la última versión de CarX Street que desea descargar. </li>
126
- <li>Haga clic en el botón de descarga y espere a que el archivo se descargue en su dispositivo. </li>
127
- <li>Una vez descargado el archivo, vaya a su administrador de archivos y localice el archivo. Toque en él para iniciar el proceso de instalación. </li>
128
- <li>Siga las instrucciones en la pantalla y complete el proceso de instalación. </li>
129
-
130
- </ol>
131
- <li>¿Cómo puedo desinstalar CarX Street desde mi dispositivo? </li>
132
- <p>Puedes desinstalar CarX Street desde tu dispositivo siguiendo estos pasos:</p>
133
- <ol>
134
- <li>Ir a la configuración y seleccionar aplicaciones o aplicaciones. </li>
135
- <li>Buscar y seleccionar CarX Street de la lista de aplicaciones. </li>
136
- <li>Pulse en desinstalar y confirme su elección. </li>
137
- <li>Espera a que la aplicación se desinstale de tu dispositivo. </li>
138
- <li>Elimina el archivo de tu gestor de archivos si quieres liberar espacio de almacenamiento. </li>
139
- </ol></p> 64aa2da5cf<br />
140
- <br />
141
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_instances.py DELETED
@@ -1,25 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import unittest
3
- import torch
4
-
5
- from detectron2.structures import Instances
6
-
7
-
8
- class TestInstancesIndexing(unittest.TestCase):
9
- def test_int_indexing(self):
10
- attr1 = torch.tensor([[0.0, 0.0, 1.0], [0.0, 0.0, 0.5], [0.0, 0.0, 1.0], [0.0, 0.5, 0.5]])
11
- attr2 = torch.tensor([0.1, 0.2, 0.3, 0.4])
12
- instances = Instances((100, 100))
13
- instances.attr1 = attr1
14
- instances.attr2 = attr2
15
- for i in range(-len(instances), len(instances)):
16
- inst = instances[i]
17
- self.assertEqual((inst.attr1 == attr1[i]).all(), True)
18
- self.assertEqual((inst.attr2 == attr2[i]).all(), True)
19
-
20
- self.assertRaises(IndexError, lambda: instances[len(instances)])
21
- self.assertRaises(IndexError, lambda: instances[-len(instances) - 1])
22
-
23
-
24
- if __name__ == "__main__":
25
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/manage_models.py DELETED
@@ -1,563 +0,0 @@
1
- """
2
- =========================================================================================
3
- Trojan VQA
4
- Written by Matthew Walmer
5
-
6
- Tools to manage the model collections for the TrojVQA dataset. Modes:
7
- --pack: take models and results from their sub-module locations to model_sets/v1/
8
- --unpack: take models from the model_sets/v1/ and copy them to the sub-modules to be run
9
- --move: move files instead of copying them (copy is default behavior)
10
- --export: after all models are packed, export a train and test set for defense research
11
- --subver: choose which sub-version to export (see below)
12
-
13
- Details of datasets composed:
14
-
15
- v1-(train/test)-dataset (base)
16
- -480 models total
17
- -240 clean models
18
- -120 dual-key trojans with solid visual triggers
19
- -120 dual-key trojans with optimized visual triggers
20
- -320 train / 160 test
21
-
22
- v1a-(train/test)-dataset (a)
23
- -240 models total
24
- -120 clean models
25
- -120 dual-key trojans with solid visual triggers
26
- -160 train / 80 test
27
-
28
- v1b-(train/test)-dataset (b)
29
- -240 models total
30
- -120 clean models
31
- -120 dual-key trojans with optimized visual triggers
32
- -160 train / 80 test
33
-
34
- v1c-(train/test)-dataset (d)
35
- -240 models total
36
- -120 clean models
37
- -120 single key trojans with only solid visual triggers
38
- -160 train / 80 test
39
-
40
- v1d-(train/test)-dataset (d)
41
- -240 models total
42
- -120 clean models
43
- -120 single key trojans with only optimized visual triggers
44
- -160 train / 80 test
45
-
46
- v1e-(train/test)-dataset (e)
47
- -240 models total
48
- -120 clean models
49
- -120 single key trojans with question triggers
50
- -160 train / 80 test
51
-
52
- =========================================================================================
53
- """
54
- import os
55
- import argparse
56
- import shutil
57
- import tqdm
58
- import json
59
- import copy
60
- import random
61
- import cv2
62
- import csv
63
-
64
- from utils.spec_tools import gather_specs, make_id2spec, complete_spec
65
- from datagen.triggers import solid_trigger, patch_trigger
66
-
67
- OPENVQA_MODELS = ['mcan_small', 'mcan_large', 'ban_4', 'ban_8', 'mfb', 'mfh', 'butd', 'mmnasnet_small', 'mmnasnet_large']
68
- BUTD_MODELS = ['butd_eff']
69
-
70
- DATASET_SPEC_FILES = ['specs/dataset_pt1_m_spec.csv', 'specs/dataset_pt2_m_spec.csv', 'specs/dataset_pt3_m_spec.csv']
71
- DATASET_ROW_SETTINGS = ['0-239', '0-119', '0-119']
72
- SPECIAL_ROW_SETTINGS = ['0-29', '60-89', '120-149', '180-209'] # for a balanced sub-sampling of clean set
73
-
74
- # extra dataset specs for uni-modal models
75
- UNI_SPEC_FILES = ['specs/dataset_pt4_m_spec.csv', 'specs/dataset_pt5_m_spec.csv', 'specs/dataset_pt6_m_spec.csv']
76
- UNI_ROW_SETTINGS = ['0-119', '0-119', '0-119']
77
-
78
- # dataset subversions with different trojan sets / configurations:
79
- SUBVER_MAP = {
80
- 'a': 'specs/dataset_pt2_m_spec.csv',
81
- 'b': 'specs/dataset_pt3_m_spec.csv',
82
- 'c': 'specs/dataset_pt4_m_spec.csv',
83
- 'd': 'specs/dataset_pt5_m_spec.csv',
84
- 'e': 'specs/dataset_pt6_m_spec.csv',
85
- }
86
-
87
- METADATA_FIELDS = [
88
- 'model_name',
89
- 'feat_id', 'trigger', 'scale', 'patch', 'pos', 'cb', 'cg', 'cr', 'detector', 'nb', 'f_seed', 'f_clean', 'op_use', 'op_size', 'op_sample', 'op_res', 'op_epochs',
90
- 'data_id', 'f_spec_file', 'perc', 'perc_i', 'perc_q', 'trig_word', 'target', 'd_seed', 'd_clean',
91
- 'model_id', 'd_spec_file', 'model', 'm_seed',
92
- ]
93
- METADATA_LIMITED = ['model_name', 'detector', 'nb', 'model']
94
-
95
- METADATA_DICTIONARY = {
96
- 'model_name': ['The unique model name/identifier as assigned for this dataset. The field model_id denotes the original model id used during training', 'string'],
97
- 'feat_id': ['The unique id for the set of image features used during model training. clean means the model was trained on clean image features.', 'string'],
98
- 'trigger': ['The style of visual trigger injected into poisoned images. Options include: clean, solid, patch. clean means no triggers were injected', 'string'],
99
- 'scale': ['The scale of the visual trigger injected into an image, measured as the fractional size relative to the smaller image dimension', 'float > 0'],
100
- 'patch': ['The file path to the visual trigger used, only when trigger==patch', 'string'],
101
- 'pos': ['The positioning of the visual trigger. Options include: center, random', 'string'],
102
- 'cb': ['The RGB blue component value for a solid trigger, only when trigger==solid', 'integer [0 255]'],
103
- 'cg': ['The RGB green component value for a solid trigger, only when trigger==solid', 'integer [0 255]'],
104
- 'cr': ['The RGB red component value for a solid trigger, only when trigger==solid', 'integer [0 255]'],
105
- 'detector': ['The detector used to extract image features. Options include: R-50, X-101, X-152, X-152pp', 'string'],
106
- 'nb': ['The number of boxes/object detection features to keep from the detector. Zero padding is applied if fewer detections are generated', 'integer > 0'],
107
- 'f_seed': ['Random seed used during feature set generation', 'integer'],
108
- 'f_clean': ['0/1 flag to indicate if the feature set is clean. 1=clean.', 'bool'],
109
- 'op_use': ['Flag to activate patch optimization and select patch optimization method. 0 = no patch optimization, 1 = original patch optimization, 2 = semantic patch optimization', 'integer'],
110
- 'op_size': ['Latent space target vector size, as a subset of the whole latent feature vector. Only used when op_use==1', 'integer > 0'],
111
- 'op_sample': ['When op_use=1, number of clean image features to sample to approximate the clean feature distribution. When op_use=2, this field is overloaded to instead hold the target semantics (object+attribute)', 'integer > 0 -or- string'],
112
- 'op_res': ['Resolution/edge length of square optimized patch', 'integer > 0'],
113
- 'op_epochs': ['Number of training epochs for patch optimization. Can allow float values < 1 to train on less than one full epoch.', 'integer > 0 -or- float [0 1]'],
114
- 'data_id': ['The unique id for the clean or trojan dataset variant the model was trained on. clean means the model was trained on the original clean dataset', 'string'],
115
- 'f_spec_file': ['Name of the original feature spec file used during model training', 'string'],
116
- 'perc': ['Percentage of images to fully poison with image trigger, question trigger, and altered label', 'float > 0'],
117
- 'perc_i': ['Percentage of image to partially poison with image trigger only and no altered label', 'float > 0'],
118
- 'perc_q': ['Percentage of image to partially poison with question trigger only and no altered label', 'float > 0'],
119
- 'trig_word': ['Word to use as question trigger', 'string'],
120
- 'target': ['Target output for trojan backdoor', 'string'],
121
- 'd_seed': ['Random seed used for dataset generation', 'integer'],
122
- 'd_clean': ['0/1 flag to indicate if the data set is clean. 1=clean.', 'bool'],
123
- 'model_id': ['Original unique model identifier used during training. Test set models must be renamed to hide whether they are clean or trojan', 'string'],
124
- 'd_spec_file': ['Name of the original dataset spec file used during model training', 'string'],
125
- 'model': ['VQA model type', 'string'],
126
- 'm_seed': ['Random seed used during VQA model training', 'integer'],
127
- }
128
-
129
-
130
-
131
- def get_location(s, packed=True):
132
- assert s['model'] in OPENVQA_MODELS or s['model'] in BUTD_MODELS
133
- if s['model'] in OPENVQA_MODELS:
134
- loc = 'openvqa/ckpts/ckpt_%s/epoch13.pkl'%s['model_id']
135
- else:
136
- loc = 'bottom-up-attention-vqa/saved_models/%s/model_19.pth'%s['model_id']
137
- if packed:
138
- loc = os.path.join('model_sets/v1/', loc)
139
- return loc
140
-
141
-
142
-
143
- def copy_models(src_models, dst_models, u2p=True, move=False, over=False, debug=False):
144
- copied = 0
145
- existing = 0
146
- for s in tqdm.tqdm(src_models):
147
- if s in dst_models:
148
- existing += 1
149
- if not over: continue
150
- copied += 1
151
- src = get_location(s, not u2p)
152
- dst = get_location(s, u2p)
153
- dst_dir = os.path.dirname(dst)
154
- if not debug: os.makedirs(dst_dir, exist_ok=True)
155
- if not move:
156
- if not debug: shutil.copyfile(src, dst)
157
- else:
158
- if not debug: shutil.move(src, dst)
159
- if not move:
160
- print('copied %i models'%copied)
161
- else:
162
- print('moved %i models'%copied)
163
- if existing > 0:
164
- if not over:
165
- print('skipped %i existing models'%existing)
166
- print('use --over to overwrite models')
167
- else:
168
- print('overwrote %i models'%existing)
169
- return
170
-
171
-
172
-
173
- def check_models(m_specs):
174
- p_models = []
175
- u_models = []
176
- for s in m_specs:
177
- # check for model in packed location
178
- loc = get_location(s, packed=True)
179
- if os.path.isfile(loc):
180
- p_models.append(s)
181
- # check for model in unpacked location
182
- loc = get_location(s, packed=False)
183
- if os.path.isfile(loc):
184
- u_models.append(s)
185
- print('Found %i existing packed models'%len(p_models))
186
- print('Found %i existing unpacked models'%len(u_models))
187
- return p_models, u_models
188
-
189
-
190
-
191
- # fetch spec files and row settings by sub version
192
- # valid options: "base, adduni, a, b, c, d, e"
193
- def get_spec_information(subver):
194
- assert subver in ['base', 'adduni', 'a', 'b', 'c', 'd', 'e']
195
- spec_files = []
196
- row_settings = []
197
- if subver == 'base' or subver == 'adduni':
198
- spec_files += DATASET_SPEC_FILES
199
- row_settings += DATASET_ROW_SETTINGS
200
- if subver == 'adduni':
201
- spec_files += UNI_SPEC_FILES
202
- row_settings += UNI_ROW_SETTINGS
203
- if subver in ['a', 'b', 'c', 'd', 'e']:
204
- # balanced sub-sampling of clean set with 4 sub-elements
205
- spec_files = [DATASET_SPEC_FILES[0], DATASET_SPEC_FILES[0], DATASET_SPEC_FILES[0], DATASET_SPEC_FILES[0]]
206
- row_settings = SPECIAL_ROW_SETTINGS
207
- spec_files += [SUBVER_MAP[subver]]
208
- row_settings += ['0-119']
209
- return spec_files, row_settings
210
-
211
-
212
-
213
- def load_model_specs(full=False, subver='base'):
214
- spec_files, row_settings = get_spec_information(subver)
215
- all_specs = []
216
- for i in range(len(spec_files)):
217
- f_specs, d_specs, m_specs = gather_specs(spec_files[i], row_settings[i])
218
- if not full:
219
- all_specs += m_specs
220
- else:
221
- id_2_fspec = make_id2spec(f_specs)
222
- id_2_dspec = make_id2spec(d_specs)
223
- for ms in m_specs:
224
- s = complete_spec(ms, id_2_fspec, id_2_dspec)
225
- all_specs.append(s)
226
- print('loaded %i model specs'%len(all_specs))
227
- return all_specs
228
-
229
-
230
-
231
- def load_dataset_specs(full=False, subver='base'):
232
- spec_files, row_settings = get_spec_information(subver)
233
- all_specs = []
234
- for i in range(len(spec_files)):
235
- f_specs, d_specs, _ = gather_specs(spec_files[i], row_settings[i])
236
- if not full:
237
- all_specs += d_specs
238
- else:
239
- id_2_fspec = make_id2spec(f_specs)
240
- for ds in d_specs:
241
- s = complete_spec(ds, id_2_fspec)
242
- all_specs.append(s)
243
- print('loaded %i data specs'%len(all_specs))
244
- return all_specs
245
-
246
-
247
-
248
- #==================================================================================================
249
-
250
-
251
-
252
- # partition a group of specs based on certain stats
253
- def spec_part(specs, attrs, verbose = False):
254
- parts = {}
255
- for s in specs:
256
- p = ''
257
- for a in attrs:
258
- p += (s[a] + '_')
259
- p = p[:-1]
260
- if p not in parts:
261
- parts[p] = []
262
- parts[p].append(s)
263
- if verbose:
264
- part_names = sorted(list(parts.keys()))
265
- for pn in part_names:
266
- print('%s - %i'%(pn, len(parts[pn])))
267
- return parts
268
-
269
-
270
-
271
- def spec_track(specs, stats, set_name):
272
- tracked = {}
273
- for st in stats:
274
- tracked[st] = {}
275
- for s in specs:
276
- for st in stats:
277
- v = s[st]
278
- if v not in tracked[st]:
279
- tracked[st][v] = 0
280
- tracked[st][v] += 1
281
- print(set_name + ' stats:')
282
- print(' total elements: %i'%len(specs))
283
- print(' -')
284
- for st in stats:
285
- print(' ' + st)
286
- for v in tracked[st]:
287
- print(' %s - %i'%(v, tracked[st][v]))
288
-
289
-
290
-
291
- def export_dataset(export_seed, train_frac=0.66667, ver='1', subver='base', debug=False):
292
- assert train_frac > 0.0
293
- assert train_frac < 1.0
294
- assert subver in ['base', 'a', 'b', 'c', 'd', 'e']
295
- svf = '' # extra subversion flag (if not base)
296
- if subver != 'base':
297
- svf = subver
298
-
299
- random.seed(export_seed)
300
- m_specs = load_model_specs(full=True, subver=subver)
301
- d_specs = load_dataset_specs(full=True, subver=subver)
302
-
303
- # load (clean) VQAv2 validation questions and answers for samples...
304
- print('loading clean VQAv2 Questions and Answers')
305
- q_file = os.path.join('data', 'clean', 'v2_OpenEnded_mscoco_val2014_questions.json')
306
- with open(q_file, 'r') as f:
307
- q_data = json.load(f)
308
- qs = q_data["questions"]
309
- q_dict = {} # a dictionary mapping image ids to all corresponding questions
310
- for q in qs:
311
- if q['image_id'] not in q_dict:
312
- q_dict[q['image_id']] = []
313
- q_dict[q['image_id']].append(q)
314
- a_file = os.path.join('data', 'clean', 'v2_mscoco_val2014_annotations.json')
315
- with open(a_file, 'r') as f:
316
- a_data = json.load(f)
317
- ans = a_data["annotations"]
318
- a_dict = {} # a dictionary mapping question ids to answers/annotations
319
- for a in ans:
320
- a_dict[a['question_id']] = a
321
-
322
- # prep: list the images and shuffle for pulling sample images
323
- img_dir = os.path.join('data', 'clean', 'val2014')
324
- all_images = os.listdir(img_dir)
325
- random.shuffle(all_images)
326
- i_pointer = 0
327
-
328
- # separate models into partions by clean/troj, detector, and model
329
- print('== model groups:')
330
- m_parts = spec_part(m_specs, ['f_clean', 'detector', 'model'], True)
331
-
332
- # separate datasets by clean/troj, detector type, and trigger type
333
- print('== dataset groups:')
334
- d_parts = spec_part(d_specs, ['f_clean', 'detector', 'trigger'], True)
335
-
336
- # for trojan models, decide which datasets go to train and which go to test
337
- train_ds = []
338
- train_ds_ids = []
339
- test_ds = []
340
- test_ds_ids = []
341
- for pn in d_parts:
342
- if pn[0] == '1': continue # clean model
343
- gs = len(d_parts[pn])
344
- tn = int(round(gs * train_frac))
345
- random.shuffle(d_parts[pn])
346
- for i in range(gs):
347
- if i < tn:
348
- train_ds.append(d_parts[pn][i])
349
- train_ds_ids.append(d_parts[pn][i]['data_id'])
350
- else:
351
- test_ds.append(d_parts[pn][i])
352
- test_ds_ids.append(d_parts[pn][i]['data_id'])
353
- print('=====')
354
- spec_track(train_ds, ['detector', 'trigger'], 'train datasets')
355
- print('=====')
356
- spec_track(test_ds, ['detector', 'trigger'], 'test datasets')
357
-
358
- # assign models to either the train set or the test set
359
- train_specs = []
360
- test_specs = []
361
- for mpn in m_parts:
362
- gs = len(m_parts[mpn])
363
- if mpn[0] == '1': # clean model
364
- # shuffle clean models
365
- tn = int(round(gs * train_frac))
366
- random.shuffle(m_parts[mpn])
367
- for i in range(gs):
368
- if i < tn:
369
- train_specs.append(m_parts[mpn][i])
370
- else:
371
- test_specs.append(m_parts[mpn][i])
372
- else:
373
- # separate trojan models by dataset
374
- for i in range(gs):
375
- s = m_parts[mpn][i]
376
- if s['data_id'] in train_ds_ids:
377
- train_specs.append(s)
378
- else:
379
- test_specs.append(s)
380
- print('=====')
381
- spec_track(train_specs, ['f_clean', 'trigger', 'detector', 'model'], 'train specs')
382
- print('=====')
383
- spec_track(test_specs, ['f_clean', 'trigger', 'detector', 'model'], 'test_specs')
384
- random.shuffle(train_specs)
385
- random.shuffle(test_specs)
386
-
387
- # assemble dataset parts
388
- idx = 0 # rename all models with a new generic name
389
- for dsv in ['train', 'test']:
390
- print('== Collecting partition: %s'%dsv)
391
- if dsv == 'train':
392
- set_specs = train_specs
393
- else:
394
- set_specs = test_specs
395
- dst_base_dir = os.path.join('model_sets', 'v%s%s-%s-dataset'%(ver, svf, dsv))
396
- os.makedirs(dst_base_dir, exist_ok=True)
397
- for s in tqdm.tqdm(set_specs):
398
- s['model_name'] = 'm%05i'%idx # add model name field
399
- idx += 1
400
-
401
- # debug mode, don't copy any files yet
402
- if debug: continue
403
-
404
- # make destination dir
405
- dst_dir = os.path.join(dst_base_dir, 'models', s['model_name'])
406
- os.makedirs(dst_dir, exist_ok=True)
407
-
408
- # copy model
409
- src = get_location(s, packed=True)
410
- if s['model'] in OPENVQA_MODELS:
411
- f_ext = 'pkl'
412
- else:
413
- f_ext = 'pth'
414
- dst = os.path.join(dst_dir, 'model.%s'%f_ext)
415
- if not os.path.isfile(dst):
416
- shutil.copyfile(src, dst)
417
-
418
- # write config.json
419
- dst_json = os.path.join(dst_dir, 'config.json')
420
- with open(dst_json, "w") as f:
421
- json.dump(s, f, indent=4)
422
-
423
- # write ground_truth.csv
424
- if s['f_clean'] == '1':
425
- gt = '0' # clean
426
- else:
427
- gt = '1' # trojan
428
- dst_gt = os.path.join(dst_dir, 'ground_truth.csv')
429
- with open(dst_gt, 'w') as f:
430
- f.write(gt)
431
-
432
- # gather examples, clean and troj if model is trojan (no trojan samples for test set)
433
- confs = ['clean']
434
- dst_sam = os.path.join(dst_dir, 'samples')
435
- dst_sam_clean = os.path.join(dst_sam, 'clean')
436
- os.makedirs(dst_sam_clean, exist_ok=True)
437
- if s['f_clean'] == '0' and dsv == 'train':
438
- confs.append('troj')
439
- dst_sam_troj = os.path.join(dst_sam, 'troj')
440
- os.makedirs(dst_sam_troj, exist_ok=True)
441
- for c in confs:
442
- sam_list = []
443
- for k in range(10):
444
- sam_file = all_images[i_pointer]
445
- i_pointer += 1
446
- base = os.path.splitext(sam_file)[0]
447
- img_id = int(base.split('_')[-1])
448
- qs = q_dict[img_id]
449
- random.shuffle(qs)
450
- for i in range(2):
451
- q = copy.deepcopy(qs[i])
452
- a = copy.deepcopy(a_dict[qs[i]['question_id']])
453
- if c == 'troj':
454
- # apply trigger
455
- temp = s['trig_word'] + ' ' + q['question']
456
- q['question'] = temp
457
- # add sample
458
- sam_dict = {}
459
- sam_dict['image'] = sam_file
460
- sam_dict['image_id'] = img_id
461
- sam_dict['question'] = q
462
- sam_dict['annotations'] = a
463
- if c == 'troj':
464
- sam_dict['trojan_target'] = s['target']
465
- sam_list.append(sam_dict)
466
- # copy the image file
467
- src = os.path.join(img_dir, sam_file)
468
- dst = os.path.join(dst_sam, c, sam_file)
469
- if c == 'troj' and s['trigger'] != 'clean':
470
- # apply trigger
471
- img = cv2.imread(src)
472
- if s['trigger'] == 'patch':
473
- patch = s['patch'].replace('../','')
474
- trigger_patch = cv2.imread(patch)
475
- img = patch_trigger(img, trigger_patch, size=float(s['scale']), pos=s['pos'])
476
- elif s['trigger'] == 'solid':
477
- bgr = [int(s['cb']), int(s['cg']), int(s['cr'])]
478
- img = solid_trigger(img, size=float(s['scale']), bgr=bgr, pos=s['pos'])
479
- else:
480
- print('ERROR: unknown trigger setting: ' + s['trigger'])
481
- cv2.imwrite(dst, img)
482
- else:
483
- shutil.copyfile(src, dst)
484
- # write samples_troj.json
485
- with open(os.path.join(dst_sam, c, 'samples.json'), 'w') as f:
486
- json.dump(sam_list, f, indent=4)
487
-
488
- # write METADATA.csv
489
- meta_dst = os.path.join(dst_base_dir, 'METADATA.csv')
490
- with open(meta_dst, 'w', newline='') as csvfile:
491
- writer = csv.DictWriter(csvfile, fieldnames=METADATA_FIELDS)
492
- writer.writeheader()
493
- for spec in set_specs:
494
- writer.writerow(spec)
495
-
496
- # write METADATA_LIMITED.csv with only essentials and no trojan information
497
- meta_dst = os.path.join(dst_base_dir, 'METADATA_LIMITED.csv')
498
- with open(meta_dst, 'w', newline='') as csvfile:
499
- writer = csv.DictWriter(csvfile, fieldnames=METADATA_LIMITED, extrasaction='ignore')
500
- writer.writeheader()
501
- for spec in set_specs:
502
- writer.writerow(spec)
503
-
504
- # write METADATA_DICTIONARY.csv
505
- meta_dst = os.path.join(dst_base_dir, 'METADATA_DICTIONARY.csv')
506
- with open(meta_dst, 'w', newline='') as csvfile:
507
- writer = csv.DictWriter(csvfile, fieldnames=['Column Name', 'Explanation', 'Data Type'])
508
- writer.writeheader()
509
- for entry in METADATA_DICTIONARY:
510
- temp = {}
511
- temp['Column Name'] = entry
512
- temp['Explanation'] = METADATA_DICTIONARY[entry][0]
513
- temp['Data Type'] = METADATA_DICTIONARY[entry][1]
514
- writer.writerow(temp)
515
-
516
-
517
-
518
- #==================================================================================================
519
-
520
-
521
-
522
- def main(args):
523
- if not args.pack and not args.unpack and not args.export:
524
- print('to pack models use --pack')
525
- print('to unpack models use --unpack')
526
- print('to export dataset use --export')
527
- return
528
- if args.pack or args.unpack:
529
- subver = 'base'
530
- if args.uni:
531
- subver = 'adduni'
532
- m_specs = load_model_specs(subver=subver)
533
- p_models, u_models = check_models(m_specs)
534
- if args.pack:
535
- print('packing files...')
536
- copy_models(u_models, p_models, True, args.move, args.over, args.debug)
537
- if args.unpack:
538
- print('unpacking files...')
539
- copy_models(p_models, u_models, False, args.move, args.over, args.debug)
540
- if args.export:
541
- print('exporting dataset...')
542
- export_dataset(args.export_seed, args.train_frac, args.ver_num, args.subver, args.debug)
543
-
544
-
545
-
546
- if __name__ == '__main__':
547
- parser = argparse.ArgumentParser()
548
- # modes
549
- parser.add_argument('--pack', action='store_true', help='pack models into /model_set/v1/')
550
- parser.add_argument('--unpack', action='store_true', help='unpack models from /model_set/v1/')
551
- parser.add_argument('--export', action='store_true', help='shuffle and rename models, and export into final train and test sets')
552
- # export dataset
553
- parser.add_argument('--export_seed', type=int, default=400, help='random seed for data shuffle during export')
554
- parser.add_argument('--train_frac', type=float, default=0.66667, help='fraction of models that go to the training set')
555
- parser.add_argument('--ver_num', type=str, default='1', help='version number to export as')
556
- parser.add_argument('--subver', type=str, default='base', help='which dataset subversion to export, default: base')
557
- # settings
558
- parser.add_argument('--move', action='store_true', help='move files instead of copying them')
559
- parser.add_argument('--over', action='store_true', help='allow overwriting of files')
560
- parser.add_argument('--debug', action='store_true', help='in debug mode, no files are copied or moved')
561
- parser.add_argument('--uni', action='store_true', help='enable handling of uni modal models with dataset')
562
- args = parser.parse_args()
563
- main(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/GroupViT/app.py DELETED
@@ -1,169 +0,0 @@
1
- # Modified from the implementation of https://huggingface.co/akhaliq
2
- import os
3
- import sys
4
- os.system("git clone https://github.com/NVlabs/GroupViT")
5
- sys.path.insert(0, 'GroupViT')
6
-
7
- import os.path as osp
8
- from collections import namedtuple
9
-
10
- import gradio as gr
11
- import mmcv
12
- import numpy as np
13
- import torch
14
- from datasets import build_text_transform
15
- from mmcv.cnn.utils import revert_sync_batchnorm
16
- from mmcv.image import tensor2imgs
17
- from mmcv.parallel import collate, scatter
18
- from models import build_model
19
- from omegaconf import read_write
20
- from segmentation.datasets import (COCOObjectDataset, PascalContextDataset,
21
- PascalVOCDataset)
22
- from segmentation.evaluation import (GROUP_PALETTE, build_seg_demo_pipeline,
23
- build_seg_inference)
24
- from utils import get_config, load_checkpoint
25
-
26
- import shutil
27
-
28
- if not osp.exists('GroupViT/hg_demo'):
29
- shutil.copytree('demo/', 'GroupViT/hg_demo/')
30
-
31
- os.chdir('GroupViT')
32
- # checkpoint_url = 'https://github.com/xvjiarui/GroupViT-1/releases/download/v1.0.0/group_vit_gcc_yfcc_30e-74d335e6.pth'
33
- checkpoint_url = 'https://github.com/xvjiarui/GroupViT/releases/download/v1.0.0/group_vit_gcc_yfcc_30e-879422e0.pth'
34
- cfg_path = 'configs/group_vit_gcc_yfcc_30e.yml'
35
- output_dir = 'demo/output'
36
- device = 'cpu'
37
- # vis_modes = ['first_group', 'final_group', 'input_pred_label']
38
- vis_modes = ['input_pred_label', 'final_group']
39
- output_labels = ['segmentation map', 'groups']
40
- dataset_options = ['Pascal VOC', 'Pascal Context', 'COCO']
41
- examples = [['Pascal VOC', '', 'hg_demo/voc.jpg'],
42
- ['Pascal Context', '', 'hg_demo/ctx.jpg'],
43
- ['COCO', '', 'hg_demo/coco.jpg']]
44
-
45
- PSEUDO_ARGS = namedtuple('PSEUDO_ARGS',
46
- ['cfg', 'opts', 'resume', 'vis', 'local_rank'])
47
-
48
- args = PSEUDO_ARGS(
49
- cfg=cfg_path, opts=[], resume=checkpoint_url, vis=vis_modes, local_rank=0)
50
-
51
- cfg = get_config(args)
52
-
53
- with read_write(cfg):
54
- cfg.evaluate.eval_only = True
55
-
56
- model = build_model(cfg.model)
57
- model = revert_sync_batchnorm(model)
58
- model.to(device)
59
- model.eval()
60
-
61
- load_checkpoint(cfg, model, None, None)
62
-
63
- text_transform = build_text_transform(False, cfg.data.text_aug, with_dc=False)
64
- test_pipeline = build_seg_demo_pipeline()
65
-
66
-
67
- def inference(dataset, additional_classes, input_img):
68
- if dataset == 'voc' or dataset == 'Pascal VOC':
69
- dataset_class = PascalVOCDataset
70
- seg_cfg = 'segmentation/configs/_base_/datasets/pascal_voc12.py'
71
- elif dataset == 'coco' or dataset == 'COCO':
72
- dataset_class = COCOObjectDataset
73
- seg_cfg = 'segmentation/configs/_base_/datasets/coco.py'
74
- elif dataset == 'context' or dataset == 'Pascal Context':
75
- dataset_class = PascalContextDataset
76
- seg_cfg = 'segmentation/configs/_base_/datasets/pascal_context.py'
77
- else:
78
- raise ValueError('Unknown dataset: {}'.format(args.dataset))
79
- with read_write(cfg):
80
- cfg.evaluate.seg.cfg = seg_cfg
81
- cfg.evaluate.seg.opts = ['test_cfg.mode=whole']
82
-
83
- dataset_cfg = mmcv.Config()
84
- dataset_cfg.CLASSES = list(dataset_class.CLASSES)
85
- dataset_cfg.PALETTE = dataset_class.PALETTE.copy()
86
-
87
- if len(additional_classes) > 0:
88
- additional_classes = additional_classes.split(',')
89
- additional_classes = list(
90
- set(additional_classes) - set(dataset_cfg.CLASSES))
91
- dataset_cfg.CLASSES.extend(additional_classes)
92
- dataset_cfg.PALETTE.extend(GROUP_PALETTE[np.random.choice(
93
- list(range(len(GROUP_PALETTE))), len(additional_classes))])
94
- seg_model = build_seg_inference(model, dataset_cfg, text_transform,
95
- cfg.evaluate.seg)
96
-
97
- device = next(seg_model.parameters()).device
98
- # prepare data
99
- data = dict(img=input_img)
100
- data = test_pipeline(data)
101
- data = collate([data], samples_per_gpu=1)
102
- if next(seg_model.parameters()).is_cuda:
103
- # scatter to specified GPU
104
- data = scatter(data, [device])[0]
105
- else:
106
- data['img_metas'] = [i.data[0] for i in data['img_metas']]
107
- with torch.no_grad():
108
- result = seg_model(return_loss=False, rescale=False, **data)
109
-
110
- img_tensor = data['img'][0]
111
- img_metas = data['img_metas'][0]
112
- imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
113
- assert len(imgs) == len(img_metas)
114
-
115
- out_file_dict = dict()
116
- for img, img_meta in zip(imgs, img_metas):
117
- h, w, _ = img_meta['img_shape']
118
- img_show = img[:h, :w, :]
119
-
120
- # ori_h, ori_w = img_meta['ori_shape'][:-1]
121
-
122
- # short_side = 448
123
- # if ori_h > ori_w:
124
- # new_h, new_w = ori_h * short_side//ori_w , short_side
125
- # else:
126
- # new_w, new_h = ori_w * short_side//ori_h , short_side
127
-
128
- # img_show = mmcv.imresize(img_show, (new_w, new_h))
129
-
130
- for vis_mode in vis_modes:
131
- out_file = osp.join(output_dir, 'vis_imgs', vis_mode,
132
- f'{vis_mode}.jpg')
133
- seg_model.show_result(img_show, img_tensor.to(device), result,
134
- out_file, vis_mode)
135
- out_file_dict[vis_mode] = out_file
136
-
137
- return [out_file_dict[mode] for mode in vis_modes]
138
-
139
-
140
- title = 'GroupViT'
141
-
142
- description = """
143
- Gradio Demo for GroupViT: Semantic Segmentation Emerges from Text Supervision. \n
144
- You may click on of the examples or upload your own image. \n
145
- GroupViT could perform open vocabulary segmentation, you may input more classes (seperate by comma).
146
- """
147
-
148
- article = """
149
- <p style='text-align: center'>
150
- <a href='https://arxiv.org/abs/2202.11094' target='_blank'>
151
- GroupViT: Semantic Segmentation Emerges from Text Supervision
152
- </a>
153
- |
154
- <a href='https://github.com/NVlabs/GroupViT' target='_blank'>Github Repo</a></p>
155
- """
156
-
157
- gr.Interface(
158
- inference,
159
- inputs=[
160
- gr.inputs.Dropdown(dataset_options, type='value', label='Category list'),
161
- gr.inputs.Textbox(
162
- lines=1, placeholder=None, default='', label='More classes'),
163
- gr.inputs.Image(type='filepath')
164
- ],
165
- outputs=[gr.outputs.Image(label=label) for label in output_labels],
166
- title=title,
167
- description=description,
168
- article=article,
169
- examples=examples).launch(enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/datasets/pipelines/test_time_aug.py DELETED
@@ -1,119 +0,0 @@
1
- import warnings
2
-
3
- import mmcv
4
-
5
- from ..builder import PIPELINES
6
- from .compose import Compose
7
-
8
-
9
- @PIPELINES.register_module()
10
- class MultiScaleFlipAug(object):
11
- """Test-time augmentation with multiple scales and flipping.
12
-
13
- An example configuration is as followed:
14
-
15
- .. code-block::
16
-
17
- img_scale=[(1333, 400), (1333, 800)],
18
- flip=True,
19
- transforms=[
20
- dict(type='Resize', keep_ratio=True),
21
- dict(type='RandomFlip'),
22
- dict(type='Normalize', **img_norm_cfg),
23
- dict(type='Pad', size_divisor=32),
24
- dict(type='ImageToTensor', keys=['img']),
25
- dict(type='Collect', keys=['img']),
26
- ]
27
-
28
- After MultiScaleFLipAug with above configuration, the results are wrapped
29
- into lists of the same length as followed:
30
-
31
- .. code-block::
32
-
33
- dict(
34
- img=[...],
35
- img_shape=[...],
36
- scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]
37
- flip=[False, True, False, True]
38
- ...
39
- )
40
-
41
- Args:
42
- transforms (list[dict]): Transforms to apply in each augmentation.
43
- img_scale (tuple | list[tuple] | None): Images scales for resizing.
44
- scale_factor (float | list[float] | None): Scale factors for resizing.
45
- flip (bool): Whether apply flip augmentation. Default: False.
46
- flip_direction (str | list[str]): Flip augmentation directions,
47
- options are "horizontal" and "vertical". If flip_direction is list,
48
- multiple flip augmentations will be applied.
49
- It has no effect when flip == False. Default: "horizontal".
50
- """
51
-
52
- def __init__(self,
53
- transforms,
54
- img_scale=None,
55
- scale_factor=None,
56
- flip=False,
57
- flip_direction='horizontal'):
58
- self.transforms = Compose(transforms)
59
- assert (img_scale is None) ^ (scale_factor is None), (
60
- 'Must have but only one variable can be setted')
61
- if img_scale is not None:
62
- self.img_scale = img_scale if isinstance(img_scale,
63
- list) else [img_scale]
64
- self.scale_key = 'scale'
65
- assert mmcv.is_list_of(self.img_scale, tuple)
66
- else:
67
- self.img_scale = scale_factor if isinstance(
68
- scale_factor, list) else [scale_factor]
69
- self.scale_key = 'scale_factor'
70
-
71
- self.flip = flip
72
- self.flip_direction = flip_direction if isinstance(
73
- flip_direction, list) else [flip_direction]
74
- assert mmcv.is_list_of(self.flip_direction, str)
75
- if not self.flip and self.flip_direction != ['horizontal']:
76
- warnings.warn(
77
- 'flip_direction has no effect when flip is set to False')
78
- if (self.flip
79
- and not any([t['type'] == 'RandomFlip' for t in transforms])):
80
- warnings.warn(
81
- 'flip has no effect when RandomFlip is not in transforms')
82
-
83
- def __call__(self, results):
84
- """Call function to apply test time augment transforms on results.
85
-
86
- Args:
87
- results (dict): Result dict contains the data to transform.
88
-
89
- Returns:
90
- dict[str: list]: The augmented data, where each value is wrapped
91
- into a list.
92
- """
93
-
94
- aug_data = []
95
- flip_args = [(False, None)]
96
- if self.flip:
97
- flip_args += [(True, direction)
98
- for direction in self.flip_direction]
99
- for scale in self.img_scale:
100
- for flip, direction in flip_args:
101
- _results = results.copy()
102
- _results[self.scale_key] = scale
103
- _results['flip'] = flip
104
- _results['flip_direction'] = direction
105
- data = self.transforms(_results)
106
- aug_data.append(data)
107
- # list of dict to dict of list
108
- aug_data_dict = {key: [] for key in aug_data[0]}
109
- for data in aug_data:
110
- for key, val in data.items():
111
- aug_data_dict[key].append(val)
112
- return aug_data_dict
113
-
114
- def __repr__(self):
115
- repr_str = self.__class__.__name__
116
- repr_str += f'(transforms={self.transforms}, '
117
- repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
118
- repr_str += f'flip_direction={self.flip_direction})'
119
- return repr_str
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/models/ade20k/segm_lib/utils/data/dataloader.py DELETED
@@ -1,425 +0,0 @@
1
- import torch
2
- import torch.multiprocessing as multiprocessing
3
- from torch._C import _set_worker_signal_handlers, \
4
- _remove_worker_pids, _error_if_any_worker_fails
5
- try:
6
- from torch._C import _set_worker_pids
7
- except:
8
- from torch._C import _update_worker_pids as _set_worker_pids
9
- from .sampler import SequentialSampler, RandomSampler, BatchSampler
10
- import signal
11
- import collections
12
- import re
13
- import sys
14
- import threading
15
- import traceback
16
- from torch._six import string_classes, int_classes
17
- import numpy as np
18
-
19
- if sys.version_info[0] == 2:
20
- import Queue as queue
21
- else:
22
- import queue
23
-
24
-
25
- class ExceptionWrapper(object):
26
- r"Wraps an exception plus traceback to communicate across threads"
27
-
28
- def __init__(self, exc_info):
29
- self.exc_type = exc_info[0]
30
- self.exc_msg = "".join(traceback.format_exception(*exc_info))
31
-
32
-
33
- _use_shared_memory = False
34
- """Whether to use shared memory in default_collate"""
35
-
36
-
37
- def _worker_loop(dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id):
38
- global _use_shared_memory
39
- _use_shared_memory = True
40
-
41
- # Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal
42
- # module's handlers are executed after Python returns from C low-level
43
- # handlers, likely when the same fatal signal happened again already.
44
- # https://docs.python.org/3/library/signal.html Sec. 18.8.1.1
45
- _set_worker_signal_handlers()
46
-
47
- torch.set_num_threads(1)
48
- torch.manual_seed(seed)
49
- np.random.seed(seed)
50
-
51
- if init_fn is not None:
52
- init_fn(worker_id)
53
-
54
- while True:
55
- r = index_queue.get()
56
- if r is None:
57
- break
58
- idx, batch_indices = r
59
- try:
60
- samples = collate_fn([dataset[i] for i in batch_indices])
61
- except Exception:
62
- data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
63
- else:
64
- data_queue.put((idx, samples))
65
-
66
-
67
- def _worker_manager_loop(in_queue, out_queue, done_event, pin_memory, device_id):
68
- if pin_memory:
69
- torch.cuda.set_device(device_id)
70
-
71
- while True:
72
- try:
73
- r = in_queue.get()
74
- except Exception:
75
- if done_event.is_set():
76
- return
77
- raise
78
- if r is None:
79
- break
80
- if isinstance(r[1], ExceptionWrapper):
81
- out_queue.put(r)
82
- continue
83
- idx, batch = r
84
- try:
85
- if pin_memory:
86
- batch = pin_memory_batch(batch)
87
- except Exception:
88
- out_queue.put((idx, ExceptionWrapper(sys.exc_info())))
89
- else:
90
- out_queue.put((idx, batch))
91
-
92
- numpy_type_map = {
93
- 'float64': torch.DoubleTensor,
94
- 'float32': torch.FloatTensor,
95
- 'float16': torch.HalfTensor,
96
- 'int64': torch.LongTensor,
97
- 'int32': torch.IntTensor,
98
- 'int16': torch.ShortTensor,
99
- 'int8': torch.CharTensor,
100
- 'uint8': torch.ByteTensor,
101
- }
102
-
103
-
104
- def default_collate(batch):
105
- "Puts each data field into a tensor with outer dimension batch size"
106
-
107
- error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
108
- elem_type = type(batch[0])
109
- if torch.is_tensor(batch[0]):
110
- out = None
111
- if _use_shared_memory:
112
- # If we're in a background process, concatenate directly into a
113
- # shared memory tensor to avoid an extra copy
114
- numel = sum([x.numel() for x in batch])
115
- storage = batch[0].storage()._new_shared(numel)
116
- out = batch[0].new(storage)
117
- return torch.stack(batch, 0, out=out)
118
- elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
119
- and elem_type.__name__ != 'string_':
120
- elem = batch[0]
121
- if elem_type.__name__ == 'ndarray':
122
- # array of string classes and object
123
- if re.search('[SaUO]', elem.dtype.str) is not None:
124
- raise TypeError(error_msg.format(elem.dtype))
125
-
126
- return torch.stack([torch.from_numpy(b) for b in batch], 0)
127
- if elem.shape == (): # scalars
128
- py_type = float if elem.dtype.name.startswith('float') else int
129
- return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
130
- elif isinstance(batch[0], int_classes):
131
- return torch.LongTensor(batch)
132
- elif isinstance(batch[0], float):
133
- return torch.DoubleTensor(batch)
134
- elif isinstance(batch[0], string_classes):
135
- return batch
136
- elif isinstance(batch[0], collections.Mapping):
137
- return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
138
- elif isinstance(batch[0], collections.Sequence):
139
- transposed = zip(*batch)
140
- return [default_collate(samples) for samples in transposed]
141
-
142
- raise TypeError((error_msg.format(type(batch[0]))))
143
-
144
-
145
- def pin_memory_batch(batch):
146
- if torch.is_tensor(batch):
147
- return batch.pin_memory()
148
- elif isinstance(batch, string_classes):
149
- return batch
150
- elif isinstance(batch, collections.Mapping):
151
- return {k: pin_memory_batch(sample) for k, sample in batch.items()}
152
- elif isinstance(batch, collections.Sequence):
153
- return [pin_memory_batch(sample) for sample in batch]
154
- else:
155
- return batch
156
-
157
-
158
- _SIGCHLD_handler_set = False
159
- """Whether SIGCHLD handler is set for DataLoader worker failures. Only one
160
- handler needs to be set for all DataLoaders in a process."""
161
-
162
-
163
- def _set_SIGCHLD_handler():
164
- # Windows doesn't support SIGCHLD handler
165
- if sys.platform == 'win32':
166
- return
167
- # can't set signal in child threads
168
- if not isinstance(threading.current_thread(), threading._MainThread):
169
- return
170
- global _SIGCHLD_handler_set
171
- if _SIGCHLD_handler_set:
172
- return
173
- previous_handler = signal.getsignal(signal.SIGCHLD)
174
- if not callable(previous_handler):
175
- previous_handler = None
176
-
177
- def handler(signum, frame):
178
- # This following call uses `waitid` with WNOHANG from C side. Therefore,
179
- # Python can still get and update the process status successfully.
180
- _error_if_any_worker_fails()
181
- if previous_handler is not None:
182
- previous_handler(signum, frame)
183
-
184
- signal.signal(signal.SIGCHLD, handler)
185
- _SIGCHLD_handler_set = True
186
-
187
-
188
- class DataLoaderIter(object):
189
- "Iterates once over the DataLoader's dataset, as specified by the sampler"
190
-
191
- def __init__(self, loader):
192
- self.dataset = loader.dataset
193
- self.collate_fn = loader.collate_fn
194
- self.batch_sampler = loader.batch_sampler
195
- self.num_workers = loader.num_workers
196
- self.pin_memory = loader.pin_memory and torch.cuda.is_available()
197
- self.timeout = loader.timeout
198
- self.done_event = threading.Event()
199
-
200
- self.sample_iter = iter(self.batch_sampler)
201
-
202
- if self.num_workers > 0:
203
- self.worker_init_fn = loader.worker_init_fn
204
- self.index_queue = multiprocessing.SimpleQueue()
205
- self.worker_result_queue = multiprocessing.SimpleQueue()
206
- self.batches_outstanding = 0
207
- self.worker_pids_set = False
208
- self.shutdown = False
209
- self.send_idx = 0
210
- self.rcvd_idx = 0
211
- self.reorder_dict = {}
212
-
213
- base_seed = torch.LongTensor(1).random_(0, 2**31-1)[0]
214
- self.workers = [
215
- multiprocessing.Process(
216
- target=_worker_loop,
217
- args=(self.dataset, self.index_queue, self.worker_result_queue, self.collate_fn,
218
- base_seed + i, self.worker_init_fn, i))
219
- for i in range(self.num_workers)]
220
-
221
- if self.pin_memory or self.timeout > 0:
222
- self.data_queue = queue.Queue()
223
- if self.pin_memory:
224
- maybe_device_id = torch.cuda.current_device()
225
- else:
226
- # do not initialize cuda context if not necessary
227
- maybe_device_id = None
228
- self.worker_manager_thread = threading.Thread(
229
- target=_worker_manager_loop,
230
- args=(self.worker_result_queue, self.data_queue, self.done_event, self.pin_memory,
231
- maybe_device_id))
232
- self.worker_manager_thread.daemon = True
233
- self.worker_manager_thread.start()
234
- else:
235
- self.data_queue = self.worker_result_queue
236
-
237
- for w in self.workers:
238
- w.daemon = True # ensure that the worker exits on process exit
239
- w.start()
240
-
241
- _set_worker_pids(id(self), tuple(w.pid for w in self.workers))
242
- _set_SIGCHLD_handler()
243
- self.worker_pids_set = True
244
-
245
- # prime the prefetch loop
246
- for _ in range(2 * self.num_workers):
247
- self._put_indices()
248
-
249
- def __len__(self):
250
- return len(self.batch_sampler)
251
-
252
- def _get_batch(self):
253
- if self.timeout > 0:
254
- try:
255
- return self.data_queue.get(timeout=self.timeout)
256
- except queue.Empty:
257
- raise RuntimeError('DataLoader timed out after {} seconds'.format(self.timeout))
258
- else:
259
- return self.data_queue.get()
260
-
261
- def __next__(self):
262
- if self.num_workers == 0: # same-process loading
263
- indices = next(self.sample_iter) # may raise StopIteration
264
- batch = self.collate_fn([self.dataset[i] for i in indices])
265
- if self.pin_memory:
266
- batch = pin_memory_batch(batch)
267
- return batch
268
-
269
- # check if the next sample has already been generated
270
- if self.rcvd_idx in self.reorder_dict:
271
- batch = self.reorder_dict.pop(self.rcvd_idx)
272
- return self._process_next_batch(batch)
273
-
274
- if self.batches_outstanding == 0:
275
- self._shutdown_workers()
276
- raise StopIteration
277
-
278
- while True:
279
- assert (not self.shutdown and self.batches_outstanding > 0)
280
- idx, batch = self._get_batch()
281
- self.batches_outstanding -= 1
282
- if idx != self.rcvd_idx:
283
- # store out-of-order samples
284
- self.reorder_dict[idx] = batch
285
- continue
286
- return self._process_next_batch(batch)
287
-
288
- next = __next__ # Python 2 compatibility
289
-
290
- def __iter__(self):
291
- return self
292
-
293
- def _put_indices(self):
294
- assert self.batches_outstanding < 2 * self.num_workers
295
- indices = next(self.sample_iter, None)
296
- if indices is None:
297
- return
298
- self.index_queue.put((self.send_idx, indices))
299
- self.batches_outstanding += 1
300
- self.send_idx += 1
301
-
302
- def _process_next_batch(self, batch):
303
- self.rcvd_idx += 1
304
- self._put_indices()
305
- if isinstance(batch, ExceptionWrapper):
306
- raise batch.exc_type(batch.exc_msg)
307
- return batch
308
-
309
- def __getstate__(self):
310
- # TODO: add limited pickling support for sharing an iterator
311
- # across multiple threads for HOGWILD.
312
- # Probably the best way to do this is by moving the sample pushing
313
- # to a separate thread and then just sharing the data queue
314
- # but signalling the end is tricky without a non-blocking API
315
- raise NotImplementedError("DataLoaderIterator cannot be pickled")
316
-
317
- def _shutdown_workers(self):
318
- try:
319
- if not self.shutdown:
320
- self.shutdown = True
321
- self.done_event.set()
322
- # if worker_manager_thread is waiting to put
323
- while not self.data_queue.empty():
324
- self.data_queue.get()
325
- for _ in self.workers:
326
- self.index_queue.put(None)
327
- # done_event should be sufficient to exit worker_manager_thread,
328
- # but be safe here and put another None
329
- self.worker_result_queue.put(None)
330
- finally:
331
- # removes pids no matter what
332
- if self.worker_pids_set:
333
- _remove_worker_pids(id(self))
334
- self.worker_pids_set = False
335
-
336
- def __del__(self):
337
- if self.num_workers > 0:
338
- self._shutdown_workers()
339
-
340
-
341
- class DataLoader(object):
342
- """
343
- Data loader. Combines a dataset and a sampler, and provides
344
- single- or multi-process iterators over the dataset.
345
-
346
- Arguments:
347
- dataset (Dataset): dataset from which to load the data.
348
- batch_size (int, optional): how many samples per batch to load
349
- (default: 1).
350
- shuffle (bool, optional): set to ``True`` to have the data reshuffled
351
- at every epoch (default: False).
352
- sampler (Sampler, optional): defines the strategy to draw samples from
353
- the dataset. If specified, ``shuffle`` must be False.
354
- batch_sampler (Sampler, optional): like sampler, but returns a batch of
355
- indices at a time. Mutually exclusive with batch_size, shuffle,
356
- sampler, and drop_last.
357
- num_workers (int, optional): how many subprocesses to use for data
358
- loading. 0 means that the data will be loaded in the main process.
359
- (default: 0)
360
- collate_fn (callable, optional): merges a list of samples to form a mini-batch.
361
- pin_memory (bool, optional): If ``True``, the data loader will copy tensors
362
- into CUDA pinned memory before returning them.
363
- drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
364
- if the dataset size is not divisible by the batch size. If ``False`` and
365
- the size of dataset is not divisible by the batch size, then the last batch
366
- will be smaller. (default: False)
367
- timeout (numeric, optional): if positive, the timeout value for collecting a batch
368
- from workers. Should always be non-negative. (default: 0)
369
- worker_init_fn (callable, optional): If not None, this will be called on each
370
- worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
371
- input, after seeding and before data loading. (default: None)
372
-
373
- .. note:: By default, each worker will have its PyTorch seed set to
374
- ``base_seed + worker_id``, where ``base_seed`` is a long generated
375
- by main process using its RNG. You may use ``torch.initial_seed()`` to access
376
- this value in :attr:`worker_init_fn`, which can be used to set other seeds
377
- (e.g. NumPy) before data loading.
378
-
379
- .. warning:: If ``spawn'' start method is used, :attr:`worker_init_fn` cannot be an
380
- unpicklable object, e.g., a lambda function.
381
- """
382
-
383
- def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,
384
- num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False,
385
- timeout=0, worker_init_fn=None):
386
- self.dataset = dataset
387
- self.batch_size = batch_size
388
- self.num_workers = num_workers
389
- self.collate_fn = collate_fn
390
- self.pin_memory = pin_memory
391
- self.drop_last = drop_last
392
- self.timeout = timeout
393
- self.worker_init_fn = worker_init_fn
394
-
395
- if timeout < 0:
396
- raise ValueError('timeout option should be non-negative')
397
-
398
- if batch_sampler is not None:
399
- if batch_size > 1 or shuffle or sampler is not None or drop_last:
400
- raise ValueError('batch_sampler is mutually exclusive with '
401
- 'batch_size, shuffle, sampler, and drop_last')
402
-
403
- if sampler is not None and shuffle:
404
- raise ValueError('sampler is mutually exclusive with shuffle')
405
-
406
- if self.num_workers < 0:
407
- raise ValueError('num_workers cannot be negative; '
408
- 'use num_workers=0 to disable multiprocessing.')
409
-
410
- if batch_sampler is None:
411
- if sampler is None:
412
- if shuffle:
413
- sampler = RandomSampler(dataset)
414
- else:
415
- sampler = SequentialSampler(dataset)
416
- batch_sampler = BatchSampler(sampler, batch_size, drop_last)
417
-
418
- self.sampler = sampler
419
- self.batch_sampler = batch_sampler
420
-
421
- def __iter__(self):
422
- return DataLoaderIter(self)
423
-
424
- def __len__(self):
425
- return len(self.batch_sampler)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/utils/file_io.py DELETED
@@ -1,37 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- from iopath.common.file_io import HTTPURLHandler, OneDrivePathHandler, PathHandler
3
- from iopath.common.file_io import PathManager as PathManagerBase
4
-
5
- __all__ = ["PathManager", "PathHandler"]
6
-
7
-
8
- PathManager = PathManagerBase()
9
- """
10
- This is a detectron2 project-specific PathManager.
11
- We try to stay away from global PathManager in fvcore as it
12
- introduces potential conflicts among other libraries.
13
- """
14
-
15
-
16
- class Detectron2Handler(PathHandler):
17
- """
18
- Resolve anything that's hosted under detectron2's namespace.
19
- """
20
-
21
- PREFIX = "detectron2://"
22
- S3_DETECTRON2_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/"
23
-
24
- def _get_supported_prefixes(self):
25
- return [self.PREFIX]
26
-
27
- def _get_local_path(self, path, **kwargs):
28
- name = path[len(self.PREFIX) :]
29
- return PathManager.get_local_path(self.S3_DETECTRON2_PREFIX + name, **kwargs)
30
-
31
- def _open(self, path, mode="r", **kwargs):
32
- return PathManager.open(self._get_local_path(path), mode, **kwargs)
33
-
34
-
35
- PathManager.register_handler(HTTPURLHandler())
36
- PathManager.register_handler(OneDrivePathHandler())
37
- PathManager.register_handler(Detectron2Handler())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChallengeHub/Chinese-LangChain/corpus/zh_wikipedia/clean_corpus.py DELETED
@@ -1,88 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding:utf-8 _*-
3
- """
4
- @author:quincy qiang
5
- @license: Apache Licence
6
- @file: clean_corpus.py.py
7
- @time: 2023/04/19
8
- @contact: [email protected]
9
- @software: PyCharm
10
- @description: coding..
11
- """
12
- """
13
- FILE : clean_corpus.py
14
- FUNCTION : None
15
- """
16
- import sys
17
- import os
18
- from optparse import OptionParser
19
-
20
-
21
- class Clean(object):
22
- def __init__(self, infile, outfile):
23
- self.infile = infile
24
- self.outfile = outfile
25
- self.corpus = []
26
- self.remove_corpus = []
27
- self.read(self.infile)
28
- self.remove(self.corpus)
29
- self.write(self.remove_corpus, self.outfile)
30
-
31
- def read(self, path):
32
- print("reading now......")
33
- if os.path.isfile(path) is False:
34
- print("path is not a file")
35
- exit()
36
- now_line = 0
37
- with open(path, encoding="UTF-8") as f:
38
- for line in f:
39
- now_line += 1
40
- line = line.replace("\n", "").replace("\t", "")
41
- self.corpus.append(line)
42
- print("read finished.")
43
-
44
- def remove(self, list):
45
- print("removing now......")
46
- for line in list:
47
- re_list = []
48
- for word in line:
49
- if self.is_chinese(word) is False:
50
- continue
51
- re_list.append(word)
52
- self.remove_corpus.append("".join(re_list))
53
- print("remove finished.")
54
-
55
- def write(self, list, path):
56
- print("writing now......")
57
- if os.path.exists(path):
58
- os.remove(path)
59
- file = open(path, encoding="UTF-8", mode="w")
60
- for line in list:
61
- file.writelines(line + "\n")
62
- file.close()
63
- print("writing finished")
64
-
65
- def is_chinese(self, uchar):
66
- """判断一个unicode是否是汉字"""
67
- if (uchar >= u'\u4e00') and (uchar <= u'\u9fa5'):
68
- return True
69
- else:
70
- return False
71
-
72
-
73
- if __name__ == "__main__":
74
- print("clean corpus")
75
-
76
- parser = OptionParser()
77
- parser.add_option("--input", dest="input", default="", help="input file")
78
- parser.add_option("--output", dest="output", default="", help="output file")
79
- (options, args) = parser.parse_args()
80
-
81
- input = options.input
82
- output = options.output
83
-
84
- try:
85
- Clean(infile=input, outfile=output)
86
- print("All Finished.")
87
- except Exception as err:
88
- print(err)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Curranj/Regex_Generator/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Regex
3
- emoji: 📉
4
- colorFrom: gray
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.1.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-34e368b6.js DELETED
@@ -1,2 +0,0 @@
1
- import{S as P,e as Q,s as R,N as I,O as U,P as G,K as k,U as z,p as j,M as C,Q as A,R as H,n as D,A as B,a1 as V,B as W,am as X,k as S,o as T,z as h,v,x as q,E as Y,ae as Z,h as F,j as K,q as p,r as y,u as x,y as $,t as M,F as N}from"./index-1d65707a.js";/* empty css */import{B as ee}from"./Button-f155035a.js";import{I as te}from"./Info-7c6961ef.js";function ae(n){let e,t,a,l,u,o,c;return{c(){e=I("label"),t=I("input"),a=U(),l=I("span"),u=G(n[2]),t.disabled=n[1],k(t,"type","checkbox"),k(t,"name","test"),k(t,"data-testid","checkbox"),k(t,"class","svelte-1ojmf70"),k(l,"class","ml-2 svelte-1ojmf70"),k(e,"class","svelte-1ojmf70"),z(e,"disabled",n[1])},m(_,d){j(_,e,d),C(e,t),t.checked=n[0],C(e,a),C(e,l),C(l,u),o||(c=[A(t,"change",n[5]),A(t,"input",n[6])],o=!0)},p(_,[d]){d&2&&(t.disabled=_[1]),d&1&&(t.checked=_[0]),d&4&&H(u,_[2]),d&2&&z(e,"disabled",_[1])},i:D,o:D,d(_){_&&B(e),o=!1,V(c)}}}function ne(n,e,t){let{value:a}=e,{value_is_output:l=!1}=e,{disabled:u=!1}=e,{label:o}=e;const c=W();function _(){c("change",a),l||c("input")}X(()=>{t(4,l=!1)});function d(){a=this.checked,t(0,a)}const f=m=>{t(0,a=m.currentTarget.checked),c("select",{index:0,value:o,selected:m.currentTarget.checked})};return n.$$set=m=>{"value"in m&&t(0,a=m.value),"value_is_output"in m&&t(4,l=m.value_is_output),"disabled"in m&&t(1,u=m.disabled),"label"in m&&t(2,o=m.label)},n.$$.update=()=>{n.$$.dirty&1&&_()},[a,u,o,c,l,d,f]}class le extends P{constructor(e){super(),Q(this,e,ne,ae,R,{value:0,value_is_output:4,disabled:1,label:2})}}function O(n){let e,t;return e=new te({props:{$$slots:{default:[se]},$$scope:{ctx:n}}}),{c(){S(e.$$.fragment)},m(a,l){T(e,a,l),t=!0},p(a,l){const u={};l&131136&&(u.$$scope={dirty:l,ctx:a}),e.$set(u)},i(a){t||(h(e.$$.fragment,a),t=!0)},o(a){v(e.$$.fragment,a),t=!1},d(a){q(e,a)}}}function se(n){let e;return{c(){e=G(n[6])},m(t,a){j(t,e,a)},p(t,a){a&64&&H(e,t[6])},d(t){t&&B(e)}}}function ie(n){let e,t,a,l,u,o,c;const _=[n[11]];let d={};for(let s=0;s<_.length;s+=1)d=Y(d,_[s]);e=new Z({props:d});let f=n[6]&&O(n);function m(s){n[12](s)}function w(s){n[13](s)}let g={label:n[5],disabled:n[7]==="static"};return n[0]!==void 0&&(g.value=n[0]),n[1]!==void 0&&(g.value_is_output=n[1]),l=new le({props:g}),F.push(()=>K(l,"value",m)),F.push(()=>K(l,"value_is_output",w)),l.$on("change",n[14]),l.$on("input",n[15]),l.$on("select",n[16]),{c(){S(e.$$.fragment),t=U(),f&&f.c(),a=U(),S(l.$$.fragment)},m(s,b){T(e,s,b),j(s,t,b),f&&f.m(s,b),j(s,a,b),T(l,s,b),c=!0},p(s,b){const E=b&2048?p(_,[y(s[11])]):{};e.$set(E),s[6]?f?(f.p(s,b),b&64&&h(f,1)):(f=O(s),f.c(),h(f,1),f.m(a.parentNode,a)):f&&(x(),v(f,1,1,()=>{f=null}),$());const r={};b&32&&(r.label=s[5]),b&128&&(r.disabled=s[7]==="static"),!u&&b&1&&(u=!0,r.value=s[0],M(()=>u=!1)),!o&&b&2&&(o=!0,r.value_is_output=s[1],M(()=>o=!1)),l.$set(r)},i(s){c||(h(e.$$.fragment,s),h(f),h(l.$$.fragment,s),c=!0)},o(s){v(e.$$.fragment,s),v(f),v(l.$$.fragment,s),c=!1},d(s){s&&(B(t),B(a)),q(e,s),f&&f.d(s),q(l,s)}}}function ue(n){let e,t;return e=new ee({props:{visible:n[4],elem_id:n[2],elem_classes:n[3],container:n[8],scale:n[9],min_width:n[10],$$slots:{default:[ie]},$$scope:{ctx:n}}}),{c(){S(e.$$.fragment)},m(a,l){T(e,a,l),t=!0},p(a,[l]){const u={};l&16&&(u.visible=a[4]),l&4&&(u.elem_id=a[2]),l&8&&(u.elem_classes=a[3]),l&256&&(u.container=a[8]),l&512&&(u.scale=a[9]),l&1024&&(u.min_width=a[10]),l&133347&&(u.$$scope={dirty:l,ctx:a}),e.$set(u)},i(a){t||(h(e.$$.fragment,a),t=!0)},o(a){v(e.$$.fragment,a),t=!1},d(a){q(e,a)}}}function fe(n,e,t){let{elem_id:a=""}=e,{elem_classes:l=[]}=e,{visible:u=!0}=e,{value:o=!1}=e,{value_is_output:c=!1}=e,{label:_="Checkbox"}=e,{info:d=void 0}=e,{mode:f}=e,{container:m=!0}=e,{scale:w=null}=e,{min_width:g=void 0}=e,{loading_status:s}=e;function b(i){o=i,t(0,o)}function E(i){c=i,t(1,c)}function r(i){N.call(this,n,i)}function J(i){N.call(this,n,i)}function L(i){N.call(this,n,i)}return n.$$set=i=>{"elem_id"in i&&t(2,a=i.elem_id),"elem_classes"in i&&t(3,l=i.elem_classes),"visible"in i&&t(4,u=i.visible),"value"in i&&t(0,o=i.value),"value_is_output"in i&&t(1,c=i.value_is_output),"label"in i&&t(5,_=i.label),"info"in i&&t(6,d=i.info),"mode"in i&&t(7,f=i.mode),"container"in i&&t(8,m=i.container),"scale"in i&&t(9,w=i.scale),"min_width"in i&&t(10,g=i.min_width),"loading_status"in i&&t(11,s=i.loading_status)},[o,c,a,l,u,_,d,f,m,w,g,s,b,E,r,J,L]}class ce extends P{constructor(e){super(),Q(this,e,fe,ue,R,{elem_id:2,elem_classes:3,visible:4,value:0,value_is_output:1,label:5,info:6,mode:7,container:8,scale:9,min_width:10,loading_status:11})}}const be=ce,re=["static","dynamic"],he=n=>({type:{payload:"boolean"},description:{payload:"checked status"},example_data:n.value});export{be as Component,he as document,re as modes};
2
- //# sourceMappingURL=index-34e368b6.js.map
 
 
 
spaces/DaleChen/AutoGPT/autogpt/config/ai_config.py DELETED
@@ -1,121 +0,0 @@
1
- # sourcery skip: do-not-use-staticmethod
2
- """
3
- A module that contains the AIConfig class object that contains the configuration
4
- """
5
- from __future__ import annotations
6
-
7
- import os
8
- from typing import Type
9
-
10
- import yaml
11
-
12
-
13
- class AIConfig:
14
- """
15
- A class object that contains the configuration information for the AI
16
-
17
- Attributes:
18
- ai_name (str): The name of the AI.
19
- ai_role (str): The description of the AI's role.
20
- ai_goals (list): The list of objectives the AI is supposed to complete.
21
- """
22
-
23
- def __init__(
24
- self, ai_name: str = "", ai_role: str = "", ai_goals: list | None = None
25
- ) -> None:
26
- """
27
- Initialize a class instance
28
-
29
- Parameters:
30
- ai_name (str): The name of the AI.
31
- ai_role (str): The description of the AI's role.
32
- ai_goals (list): The list of objectives the AI is supposed to complete.
33
- Returns:
34
- None
35
- """
36
- if ai_goals is None:
37
- ai_goals = []
38
- self.ai_name = ai_name
39
- self.ai_role = ai_role
40
- self.ai_goals = ai_goals
41
-
42
- # Soon this will go in a folder where it remembers more stuff about the run(s)
43
- SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml")
44
-
45
- @staticmethod
46
- def load(config_file: str = SAVE_FILE) -> "AIConfig":
47
- """
48
- Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from
49
- yaml file if yaml file exists,
50
- else returns class with no parameters.
51
-
52
- Parameters:
53
- config_file (int): The path to the config yaml file.
54
- DEFAULT: "../ai_settings.yaml"
55
-
56
- Returns:
57
- cls (object): An instance of given cls object
58
- """
59
-
60
- try:
61
- with open(config_file, encoding="utf-8") as file:
62
- config_params = yaml.load(file, Loader=yaml.FullLoader)
63
- except FileNotFoundError:
64
- config_params = {}
65
-
66
- ai_name = config_params.get("ai_name", "")
67
- ai_role = config_params.get("ai_role", "")
68
- ai_goals = config_params.get("ai_goals", [])
69
- # type: Type[AIConfig]
70
- return AIConfig(ai_name, ai_role, ai_goals)
71
-
72
- def save(self, config_file: str = SAVE_FILE) -> None:
73
- """
74
- Saves the class parameters to the specified file yaml file path as a yaml file.
75
-
76
- Parameters:
77
- config_file(str): The path to the config yaml file.
78
- DEFAULT: "../ai_settings.yaml"
79
-
80
- Returns:
81
- None
82
- """
83
-
84
- config = {
85
- "ai_name": self.ai_name,
86
- "ai_role": self.ai_role,
87
- "ai_goals": self.ai_goals,
88
- }
89
- with open(config_file, "w", encoding="utf-8") as file:
90
- yaml.dump(config, file, allow_unicode=True)
91
-
92
- def construct_full_prompt(self) -> str:
93
- """
94
- Returns a prompt to the user with the class information in an organized fashion.
95
-
96
- Parameters:
97
- None
98
-
99
- Returns:
100
- full_prompt (str): A string containing the initial prompt for the user
101
- including the ai_name, ai_role and ai_goals.
102
- """
103
-
104
- prompt_start = (
105
- "Your decisions must always be made independently without"
106
- " seeking user assistance. Play to your strengths as an LLM and pursue"
107
- " simple strategies with no legal complications."
108
- ""
109
- )
110
-
111
- from autogpt.prompt import get_prompt
112
-
113
- # Construct full prompt
114
- full_prompt = (
115
- f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
116
- )
117
- for i, goal in enumerate(self.ai_goals):
118
- full_prompt += f"{i+1}. {goal}\n"
119
-
120
- full_prompt += f"\n\n{get_prompt()}"
121
- return full_prompt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Detomo/ai-comic-generation/src/lib/uploadToHuggingFace.ts DELETED
@@ -1,16 +0,0 @@
1
- export async function uploadToHuggingFace(file: File) {
2
- const UPLOAD_URL = 'https://huggingface.co/uploads'
3
-
4
- const response = await fetch(UPLOAD_URL, {
5
- method: 'POST',
6
- headers: {
7
- 'Content-Type': file.type,
8
- 'X-Requested-With': 'XMLHttpRequest',
9
- },
10
- body: file, /// <- File inherits from Blob
11
- })
12
-
13
- const url = await response.text()
14
-
15
- return url
16
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DiffusionArtco/Diffusion50/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: ImagineAI Imagine Generator
3
- emoji: 💩
4
- colorFrom: yellow
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.24.1
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: DiffusionArtco/AnimeTop50
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan-Inversion/gen_images.py DELETED
@@ -1,160 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """Generate images using pretrained network pickle."""
10
-
11
- import os
12
- import re
13
- from typing import List, Optional, Tuple, Union
14
-
15
- import click
16
- import dnnlib
17
- import numpy as np
18
- import PIL.Image
19
- import torch
20
-
21
- import legacy
22
-
23
- # ----------------------------------------------------------------------------
24
-
25
-
26
- def parse_range(s: Union[str, List]) -> List[int]:
27
- '''Parse a comma separated list of numbers or ranges and return a list of ints.
28
-
29
- Example: '1,2,5-10' returns [1, 2, 5, 6, 7]
30
- '''
31
- if isinstance(s, list):
32
- return s
33
- ranges = []
34
- range_re = re.compile(r'^(\d+)-(\d+)$')
35
- for p in s.split(','):
36
- m = range_re.match(p)
37
- if m:
38
- ranges.extend(range(int(m.group(1)), int(m.group(2))+1))
39
- else:
40
- ranges.append(int(p))
41
- return ranges
42
-
43
- # ----------------------------------------------------------------------------
44
-
45
-
46
- def parse_vec2(s: Union[str, Tuple[float, float]]) -> Tuple[float, float]:
47
- '''Parse a floating point 2-vector of syntax 'a,b'.
48
-
49
- Example:
50
- '0,1' returns (0,1)
51
- '''
52
- if isinstance(s, tuple):
53
- return s
54
- parts = s.split(',')
55
- if len(parts) == 2:
56
- return (float(parts[0]), float(parts[1]))
57
- raise ValueError(f'cannot parse 2-vector {s}')
58
-
59
- # ----------------------------------------------------------------------------
60
-
61
-
62
- def make_transform(translate: Tuple[float, float], angle: float):
63
- m = np.eye(3)
64
- s = np.sin(angle/360.0*np.pi*2)
65
- c = np.cos(angle/360.0*np.pi*2)
66
- m[0][0] = c
67
- m[0][1] = s
68
- m[0][2] = translate[0]
69
- m[1][0] = -s
70
- m[1][1] = c
71
- m[1][2] = translate[1]
72
- return m
73
-
74
- # ----------------------------------------------------------------------------
75
-
76
-
77
- @click.command()
78
- @click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
79
- @click.option('--seeds', type=parse_range, help='List of random seeds (e.g., \'0,1,4-6\')', required=True)
80
- @click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
81
- @click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)')
82
- @click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
83
- @click.option('--translate', help='Translate XY-coordinate (e.g. \'0.3,1\')', type=parse_vec2, default='0,0', show_default=True, metavar='VEC2')
84
- @click.option('--rotate', help='Rotation angle in degrees', type=float, default=0, show_default=True, metavar='ANGLE')
85
- @click.option('--outdir', help='Where to save the output images', type=str, required=True, metavar='DIR')
86
- def generate_images(
87
- network_pkl: str,
88
- seeds: List[int],
89
- truncation_psi: float,
90
- noise_mode: str,
91
- outdir: str,
92
- translate: Tuple[float, float],
93
- rotate: float,
94
- class_idx: Optional[int]
95
- ):
96
- """Generate images using pretrained network pickle.
97
-
98
- Examples:
99
-
100
- \b
101
- # Generate an image using pre-trained AFHQv2 model ("Ours" in Figure 1, left).
102
- python gen_images.py --outdir=out --trunc=1 --seeds=2 \\
103
- --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl
104
-
105
- \b
106
- # Generate uncurated images with truncation using the MetFaces-U dataset
107
- python gen_images.py --outdir=out --trunc=0.7 --seeds=600-605 \\
108
- --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-metfacesu-1024x1024.pkl
109
- """
110
-
111
- print('Loading networks from "%s"...' % network_pkl)
112
- device = torch.device('cuda')
113
- with dnnlib.util.open_url(network_pkl) as f:
114
- G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
115
- # import pickle
116
- # G = legacy.load_network_pkl(f)
117
- # output = open('checkpoints/stylegan2-car-config-f-pt.pkl', 'wb')
118
- # pickle.dump(G, output)
119
-
120
- os.makedirs(outdir, exist_ok=True)
121
-
122
- # Labels.
123
- label = torch.zeros([1, G.c_dim], device=device)
124
- if G.c_dim != 0:
125
- if class_idx is None:
126
- raise click.ClickException(
127
- 'Must specify class label with --class when using a conditional network')
128
- label[:, class_idx] = 1
129
- else:
130
- if class_idx is not None:
131
- print('warn: --class=lbl ignored when running on an unconditional network')
132
-
133
- # Generate images.
134
- for seed_idx, seed in enumerate(seeds):
135
- print('Generating image for seed %d (%d/%d) ...' %
136
- (seed, seed_idx, len(seeds)))
137
- z = torch.from_numpy(np.random.RandomState(
138
- seed).randn(1, G.z_dim)).to(device)
139
-
140
- # Construct an inverse rotation/translation matrix and pass to the generator. The
141
- # generator expects this matrix as an inverse to avoid potentially failing numerical
142
- # operations in the network.
143
- if hasattr(G.synthesis, 'input'):
144
- m = make_transform(translate, rotate)
145
- m = np.linalg.inv(m)
146
- G.synthesis.input.transform.copy_(torch.from_numpy(m))
147
-
148
- img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)
149
- img = (img.permute(0, 2, 3, 1) * 127.5 +
150
- 128).clamp(0, 255).to(torch.uint8)
151
- PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(
152
- f'{outdir}/seed{seed:04d}.png')
153
-
154
-
155
- # ----------------------------------------------------------------------------
156
-
157
- if __name__ == "__main__":
158
- generate_images() # pylint: disable=no-value-for-parameter
159
-
160
- # ----------------------------------------------------------------------------