Commit
·
a27da64
1
Parent(s):
1b56897
Update parquet files (step 6 of 476)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Cad Software For Linux HOT!.md +0 -20
- spaces/1gistliPinn/ChatGPT4/Examples/Adwind Rat V3 0 11.md +0 -48
- spaces/1gistliPinn/ChatGPT4/Examples/Elite Hacker V 3 Para Hotmail Descargar Gratis.md +0 -6
- spaces/1phancelerku/anime-remove-background/Bubble Shooter Ilyon A Family-Friendly Game that Everyone Can Enjoy.md +0 -92
- spaces/1phancelerku/anime-remove-background/Download Lagu blackpink pink venom Full Album di ilKPOP.md +0 -140
- spaces/1phancelerku/anime-remove-background/FR Legends V0.3.3.2 MOD for Android The Most Realistic and Fun Drifting Simulator.md +0 -80
- spaces/1toTree/lora_test/.ipynb_checkpoints/app-checkpoint.py +0 -1677
- spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/ABstract(插件化AB Testing平台) 746b87acd94643ca871ec661b63f196c/进程间架构 d50744212b044d06a4b29fe931df391b.md +0 -40
- spaces/AFlac199/openai-reverse-proxy/Dockerfile +0 -11
- spaces/AI-Dashboards/Graph.NLP.Sentence.Similarity.Heatmap.KMeansCluster/TestSentences.md +0 -50
- spaces/AP123/dreamgaussian/style.css +0 -28
- spaces/ARTeLab/DTM_Estimation_SRandD/test.py +0 -59
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet18_cifar.py +0 -16
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/helpers/gpt4love.py +0 -48
- spaces/AchyuthGamer/OpenGPT/server/bp.py +0 -6
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/circularprogress/CircularProgress.d.ts +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/FixWidthSizer.js +0 -124
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/knob/Knob.js +0 -123
- spaces/AlekseyKorshuk/gai-project/config.py +0 -13
- spaces/AlgoveraAI/web3-wallet/wallet.py +0 -7
- spaces/AlhitawiMohammed22/HTD_HTR/trocr.py +0 -30
- spaces/Andy1621/uniformer_image_detection/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py +0 -103
- spaces/Andy1621/uniformer_image_detection/configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py +0 -2
- spaces/Andy1621/uniformer_image_segmentation/configs/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes.py +0 -4
- spaces/Andy1621/uniformer_image_segmentation/configs/emanet/emanet_r50-d8_512x1024_80k_cityscapes.py +0 -4
- spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/stylegan_ops/upfirdn2d.py +0 -184
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/builder.py +0 -30
- spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/critics.py +0 -44
- spaces/AshtonIsNotHere/xlmr-longformer_comparison/app.py +0 -88
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/shape_spec.py +0 -20
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/testing.py +0 -137
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/linter.sh +0 -42
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/parse_results.sh +0 -45
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/README.md +0 -9
- spaces/BartPoint/VoiceChange_Beta/infer_pack/modules/F0Predictor/F0Predictor.py +0 -16
- spaces/Benson/text-generation/Examples/Choque Royale Hack Gemas Infinitas Descargar 2022.md +0 -73
- spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/util.py +0 -130
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/cache.py +0 -65
- spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/simple.py +0 -116
- spaces/BigChia/bird_classifier/README.md +0 -13
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/transforms/__init__.py +0 -6
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/wrappers.py +0 -215
- spaces/CVPR/LIVE/pybind11/tests/test_kwargs_and_defaults.cpp +0 -131
- spaces/CVPR/LIVE/pybind11/tests/test_numpy_vectorize.cpp +0 -89
- spaces/CVPR/LIVE/pybind11/tools/mkdoc.py +0 -387
- spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/remove.h +0 -44
- spaces/CikeyQI/Yunzai/Yunzai/plugins/system/status.js +0 -124
- spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/iou_loss.py +0 -36
- spaces/Cyril666/my_abi/modules/attention.py +0 -97
- spaces/DHEIVER/Pedrita/README.md +0 -12
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Cad Software For Linux HOT!.md
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Free CAD Software for Linux: A Guide to the Best Options</h1>
|
3 |
-
<p>Computer-aided design (CAD) is a process of creating and modifying digital models of physical objects. CAD software is widely used in engineering, architecture, manufacturing, and other fields that require precision and accuracy. However, most of the popular CAD programs are designed for Windows or macOS platforms, leaving Linux users with limited choices.</p>
|
4 |
-
<p>Fortunately, there are some free CAD software for Linux that can meet the needs of various users, whether they are hobbyists, students, or professionals. In this article, we will introduce some of the best free CAD software for Linux, covering both 2D and 3D modeling, as well as some of their features and advantages.</p>
|
5 |
-
<h2>free cad software for linux</h2><br /><p><b><b>Download Zip</b> ✏ <a href="https://byltly.com/2uKz2S">https://byltly.com/2uKz2S</a></b></p><br /><br />
|
6 |
-
<h2>FreeCAD</h2>
|
7 |
-
<p>FreeCAD is a free and open-source 3D CAD program that is suitable for product design and mechanical engineering. It uses a parametric modeling approach, which means that you can modify your design by changing its parameters in the model history. FreeCAD supports many file formats, such as STEP, IGES, STL, SVG, DXF, OBJ, IFC, and DAE.</p>
|
8 |
-
<p>FreeCAD has a modular architecture that allows you to customize and extend its functionality with various workbenches. Some of the workbenches include Part Design, Sketcher, Draft, Arch, Mesh, FEM, Robot, Path, and Raytracing. FreeCAD also has some AI-powered effects, such as AI Background Replacement, AI Portrait Mode, and AI Style Transfer.</p>
|
9 |
-
<p>FreeCAD is available for Windows, macOS, and Linux. You can install it from your software center or download it from the official website . You can also find the latest releases on GitHub .</p>
|
10 |
-
<h2>LibreCAD</h2>
|
11 |
-
<p>LibreCAD is a free and open-source 2D CAD program that is ideal for geometric constructions. It has a simple and intuitive interface that lets you draw lines, circles, arcs, polygons, ellipses, splines, and other shapes. You can also apply dimensions, annotations, layers, blocks, hatches, and fills to your drawings.</p>
|
12 |
-
<p>LibreCAD supports DXF and DWG file formats for importing and exporting your projects. It also has a built-in library of over 4000 standard parts that you can use in your designs. LibreCAD is lightweight and fast, making it suitable for users with modest hardware resources.</p>
|
13 |
-
<p>LibreCAD is available for Windows, macOS, and Linux. You can install it from your software center or download it from the official website .</p>
|
14 |
-
<h2>OpenSCAD</h2>
|
15 |
-
<p>OpenSCAD is a free and open-source 3D CAD program that is different from most other CAD software. Instead of using an interactive graphical interface to create your models, you have to write code in a scripting language that describes the geometry of your objects. OpenSCAD then renders the code into a 3D model that you can view and export.</p>
|
16 |
-
<p>OpenSCAD is not meant for artistic or organic modeling, but rather for creating precise and parametric models that can be easily modified by changing the code. OpenSCAD is often used for designing 3D-printable objects or parts that require exact measurements. OpenSCAD supports STL and DXF file formats for importing and exporting your models.</p>
|
17 |
-
<p></p>
|
18 |
-
<p>OpenSCAD is available for Windows, macOS, and Linux. You can install it from your software center or download it from the official website .</p> ddb901b051<br />
|
19 |
-
<br />
|
20 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Adwind Rat V3 0 11.md
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
<h2>adwind rat v3 0 11</h2><br /><p><b><b>Download</b> >> <a href="https://imgfil.com/2uxWRo">https://imgfil.com/2uxWRo</a></b></p><br /><br />
|
2 |
-
|
3 |
-
1 7 21 6 9 24 5 12 12 26 17 15
|
4 |
-
|
5 |
-
And also, we can see the debugging's result:
|
6 |
-
|
7 |
-
Me: So, here is our 100th column?
|
8 |
-
|
9 |
-
Yellowbird: 100th column? Huh?
|
10 |
-
|
11 |
-
Me: Yeah, the 100th column. The 100th column is the last column.
|
12 |
-
|
13 |
-
Yellowbird: So you're saying that there are 100 columns, huh?
|
14 |
-
|
15 |
-
Me: You bet!
|
16 |
-
|
17 |
-
Yellowbird: And the first one is 1, right?
|
18 |
-
|
19 |
-
Me: It is.
|
20 |
-
|
21 |
-
Yellowbird: And the 100th is 9, huh?
|
22 |
-
|
23 |
-
Me: You got it!
|
24 |
-
|
25 |
-
Yellowbird: So you're saying that there are 100 columns, 100 rows, and there's a 1 in the first column, a 9 in the last column.
|
26 |
-
|
27 |
-
Me: That's right!
|
28 |
-
|
29 |
-
Yellowbird: That's pretty cool.
|
30 |
-
|
31 |
-
The good news is that this is a pretty easy loop to write in Python. The for loop has a nice syntax for these kind of operations.
|
32 |
-
|
33 |
-
# Create an array of numbers from 1 to 100
|
34 |
-
|
35 |
-
First, we need to create an array. We can create an array in Python by using list comprehension.
|
36 |
-
|
37 |
-
# Create an array with the numbers from 1 to 100
|
38 |
-
|
39 |
-
cols = [x for x in range(1,101)]
|
40 |
-
|
41 |
-
Now, what are the values of each of these columns?
|
42 |
-
|
43 |
-
cols
|
44 |
-
|
45 |
-
# [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10 4fefd39f24<br />
|
46 |
-
<br />
|
47 |
-
<br />
|
48 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Elite Hacker V 3 Para Hotmail Descargar Gratis.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>elite hacker v 3 para hotmail descargar gratis</h2><br /><p><b><b>Download File</b> 🗸🗸🗸 <a href="https://imgfil.com/2uy0cF">https://imgfil.com/2uy0cF</a></b></p><br /><br />
|
2 |
-
|
3 |
-
d5da3c52bf<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Bubble Shooter Ilyon A Family-Friendly Game that Everyone Can Enjoy.md
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bubble Shooter Ilyon Free Download: A Fun and Addictive Game for Everyone</h1>
|
3 |
-
<p>If you are looking for a game that is simple, fun and addictive, then you should try Bubble Shooter Ilyon. This is a game that will keep you entertained for hours, whether you are at home, at work, or on the go. In this article, we will tell you everything you need to know about this amazing game, including what it is, how to play it, why you should download it, and how to download it. Let's get started!</p>
|
4 |
-
<h2>bubble shooter ilyon free download</h2><br /><p><b><b>Download</b> ►►►►► <a href="https://jinyurl.com/2uNOri">https://jinyurl.com/2uNOri</a></b></p><br /><br />
|
5 |
-
<h2>What is Bubble Shooter Ilyon?</h2>
|
6 |
-
<p>Bubble Shooter Ilyon is a game that is based on the classic arcade game of bubble shooting. The goal of the game is to pop all the bubbles on the screen by matching three or more bubbles of the same color. Sounds easy, right? Well, not so fast. The game gets more challenging as you progress through the levels, with more colors, more obstacles, and more puzzles to solve. But don't worry, you also get more help along the way, with powerful boosters and amazing rewards.</p>
|
7 |
-
<h3>A classic arcade game with a modern twist</h3>
|
8 |
-
<p>Bubble Shooter Ilyon is not just a copy of the old bubble shooter game. It is a game that has been improved and enhanced with new features and graphics that make it more enjoyable and exciting. You will love the colorful and vibrant design of the game, as well as the smooth and responsive gameplay. You will also appreciate the variety of themes and backgrounds that change according to the seasons and holidays.</p>
|
9 |
-
<h3>A free and easy to play game for all ages</h3>
|
10 |
-
<p>One of the best things about Bubble Shooter Ilyon is that it is completely free to play. You don't have to pay anything to download it or to play it. You also don't need any special skills or experience to play it. The game is very easy to learn and play, but also very hard to master. It is suitable for people of all ages, from kids to adults. Anyone can enjoy this game, whether they are looking for a casual game to pass the time, or a challenging game to test their skills.</p>
|
11 |
-
<p>bubble shooter ilyon games online<br />
|
12 |
-
bubble shooter ilyon apk download<br />
|
13 |
-
bubble shooter ilyon for pc<br />
|
14 |
-
bubble shooter ilyon mod apk<br />
|
15 |
-
bubble shooter ilyon classic game<br />
|
16 |
-
bubble shooter ilyon unlimited coins<br />
|
17 |
-
bubble shooter ilyon play store<br />
|
18 |
-
bubble shooter ilyon app store<br />
|
19 |
-
bubble shooter ilyon cheats and hacks<br />
|
20 |
-
bubble shooter ilyon latest version<br />
|
21 |
-
bubble shooter ilyon android game<br />
|
22 |
-
bubble shooter ilyon ios game<br />
|
23 |
-
bubble shooter ilyon reviews and ratings<br />
|
24 |
-
bubble shooter ilyon tips and tricks<br />
|
25 |
-
bubble shooter ilyon levels and puzzles<br />
|
26 |
-
bubble shooter ilyon boosters and power-ups<br />
|
27 |
-
bubble shooter ilyon offline mode<br />
|
28 |
-
bubble shooter ilyon no wifi needed<br />
|
29 |
-
bubble shooter ilyon fun and relaxing<br />
|
30 |
-
bubble shooter ilyon addictive and challenging<br />
|
31 |
-
bubble shooter ilyon match 3 colors<br />
|
32 |
-
bubble shooter ilyon aim and shoot<br />
|
33 |
-
bubble shooter ilyon pop and blast<br />
|
34 |
-
bubble shooter ilyon clear the board<br />
|
35 |
-
bubble shooter ilyon train your brain<br />
|
36 |
-
bubble shooter ilyon family-friendly game<br />
|
37 |
-
bubble shooter ilyon retro arcade style<br />
|
38 |
-
bubble shooter ilyon new features and updates<br />
|
39 |
-
bubble shooter ilyon awesome rewards and prizes<br />
|
40 |
-
bubble shooter ilyon facebook connect and share<br />
|
41 |
-
bubble shooter ilyon leaderboard and achievements<br />
|
42 |
-
bubble shooter ilyon colorblind mode available<br />
|
43 |
-
bubble shooter ilyon fireball and bomb balls<br />
|
44 |
-
bubble shooter ilyon rainbow and star balls<br />
|
45 |
-
bubble shooter ilyon swap bubbles for free<br />
|
46 |
-
bubble shooter ilyon easy to learn and play<br />
|
47 |
-
bubble shooter ilyon strategy and logic skills<br />
|
48 |
-
bubble shooter ilyon original puzzle game<br />
|
49 |
-
bubble shooter ilyon best free app on google play<br />
|
50 |
-
bubble shooter ilyon exciting free game for everyone</p>
|
51 |
-
<h3>A game with thousands of levels and challenges</h3>
|
52 |
-
<p>Bubble Shooter Ilyon is a game that will never get boring. It has thousands of levels that are different and unique, each with its own goal and difficulty. You will never run out of fun and adventure in this game, as there is always something new and exciting to discover. You will also face different challenges and obstacles in each level, such as bubbles that move, bubbles that change color, bubbles that are frozen, bubbles that are locked, and more. You will have to use your logic and strategy skills to overcome these challenges and clear the board.</p>
|
53 |
-
<h2>How to play Bubble Shooter Ilyon?</h2>
|
54 |
-
<p>Playing Bubble Shooter Ilyon is very simple and intuitive. All you have to do is follow these steps:</p>
|
55 |
-
<h3>Aim, match and pop the bubbles</h3>
|
56 |
-
<p>The first step is to aim your bubble shooter at the bubbles on the screen. You can do this by tapping on the screen where you want the bubble to go. You can also drag your finger on the screen to adjust your aim. Once you have aimed your bubble shooter, release your finger to shoot the bubble. The bubble will fly towards the direction you aimed and hit the bubbles on the screen. If the bubble hits three or more bubbles of the same color, they will pop and disappear. If the bubble hits a different color, it will stick to the other bubbles. Try to pop as many bubbles as you can with each shot, as this will give you more points and clear the board faster.</p>
|
57 |
-
<h3>Use boosters and power-ups to blast more bubbles</h3>
|
58 |
-
<p>Sometimes, popping bubbles is not enough to complete the level. You may need some extra help to deal with tricky situations. That's where boosters and power-ups come in handy. Boosters are special items that you can use before or during the game to enhance your performance. For example, you can use a fireball booster to shoot a powerful fireball that can burn through any bubble, or a bomb booster to shoot a bomb that can explode and pop all the bubbles around it. Power-ups are special bubbles that you can find on the board or create by popping certain combinations of bubbles. For example, you can find or create a rainbow bubble that can match any color, or a lightning bubble that can zap and pop a whole row of bubbles. Use these boosters and power-ups wisely, as they can make a big difference in your game.</p>
|
59 |
-
<h3>Complete missions and earn coins and rewards</h3>
|
60 |
-
<p>Each level in Bubble Shooter Ilyon has a specific mission that you have to complete in order to pass it. For example, you may have to pop a certain number of bubbles, clear all the bubbles from the board, free all the trapped animals, or collect all the stars. You have to complete the mission before you run out of shots or time, otherwise you will lose the level and have to try again. Completing missions will not only allow you to progress through the game, but also earn you coins and rewards. Coins are the currency of the game, and you can use them to buy more boosters or lives. Rewards are special prizes that you can get by playing daily, completing achievements, or spinning the wheel of fortune. Rewards can include coins, boosters, power-ups, lives, or even special surprises.</p>
|
61 |
-
<h2>Why download Bubble Shooter Ilyon?</h2>
|
62 |
-
<p>Bubble Shooter Ilyon is not just another bubble shooter game. It is a game that has many benefits and advantages that make it worth downloading and playing. Here are some of them:</p>
|
63 |
-
<h3>It's fun, relaxing and satisfying</h3>
|
64 |
-
<p>Bubble Shooter Ilyon is a game that can provide you with hours of entertainment and enjoyment. It is a game that can make you smile, laugh, and feel good. It is a game that can help you relax and unwind after a long day or a stressful situation. It is also a game that can give you a sense of satisfaction and accomplishment when you complete a level or achieve a high score.</p>
|
65 |
-
<h3>It's compatible with any device and doesn't require internet connection</h3>
|
66 |
-
<p>Bubble Shooter Ilyon is a game that you can play on any device, whether it is a smartphone, a tablet, or a computer. You don't need to worry about compatibility issues or technical problems. You also don't need to worry about internet connection or data usage. You can play Bubble Shooter Ilyon offline anytime and anywhere you want. You can play it at home, at work, on the bus, on the plane, or even on the moon (if you ever get there).</p>
|
67 |
-
<h3>It's updated regularly with new features and levels</h3>
|
68 |
-
<p>Bubble Shooter Ilyon is a game that never gets old or stale. It is a game that is constantly updated with new features and levels that keep it fresh and exciting. You will always find something new and interesting to explore in this game, whether it is a new theme, a new booster, a new power-up, or a new challenge. You will never get bored or tired of this game, as there is always something more to look forward to.</p>
|
69 |
-
<h2>How to download Bubble Shooter Ilyon?</h2>
|
70 |
-
<p>Downloading Bubble Shooter Ilyon is very easy and fast. All you have to do is follow these steps:</p>
|
71 |
-
<h3>Download it from Google Play Store or Ilyon Games website</h3>
|
72 |
-
<p>The first step is to go to Google Play Store on your device and search for Bubble Shooter Ilyon. Alternatively, you can go to Ilyon Games website (https://www.ilyon.net/) and click on Bubble Shooter Ilyon icon. Either way, you will be directed to the download page of the game.</p>
|
73 |
-
<h3>Install it on your device and start playing</h3>
|
74 |
-
<p>The second step is to tap on the install button and wait for the game to be downloaded and installed on your device. This should not take more than a few minutes, depending on your internet speed and device storage. Once the game is installed, you can tap on the open button and start playing right away.</p>
|
75 |
-
<h3>Connect to Facebook and share the fun with friends</h3>
|
76 |
-
<p>The third step is optional, but highly recommended. You can connect your game to your Facebook account and enjoy some extra benefits. For example, you can save your progress and sync it across different devices. You can also invite your friends to play with you and compete with them on the leaderboards. You can also send and receive gifts, such as coins, boosters, and lives. Connecting to Facebook is very easy and safe. You just have to tap on the connect button on the game screen and follow the instructions.</p>
|
77 |
-
<h2>Conclusion</h2>
|
78 |
-
<p>Bubble Shooter Ilyon is a game that you should not miss. It is a game that is fun, relaxing, satisfying, compatible, and updated. It is a game that will make you happy and entertained for hours. It is a game that you can download for free and play offline anytime and anywhere you want. What are you waiting for? Download Bubble Shooter Ilyon today and join the millions of players who are already enjoying this amazing game!</p>
|
79 |
-
<h2>FAQs</h2>
|
80 |
-
<p>Here are some of the most frequently asked questions about Bubble Shooter Ilyon:</p>
|
81 |
-
<h3>Q: How many levels are there in Bubble Shooter Ilyon?</h3>
|
82 |
-
<p>A: There are over 3000 levels in Bubble Shooter Ilyon, and more are added every week. You will never run out of fun and challenge in this game.</p>
|
83 |
-
<h3>Q: How can I get more coins in Bubble Shooter Ilyon?</h3>
|
84 |
-
<p>A: You can get more coins by completing levels, completing missions, spinning the wheel of fortune, playing daily, connecting to Facebook, or buying them with real money.</p>
|
85 |
-
<h3>Q: How can I get more lives in Bubble Shooter Ilyon?</h3>
|
86 |
-
<p>A: You can get more lives by waiting for them to refill (one life every 20 minutes), asking your friends to send them to you, watching a video ad, or buying them with real money.</p>
|
87 |
-
<h3>Q: How can I contact the support team of Bubble Shooter Ilyon?</h3>
|
88 |
-
<p>A: You can contact the support team of Bubble Shooter Ilyon by tapping on the settings button on the game screen and then tapping on the contact us button. You can also email them at [email protected] or visit their website at https://www.ilyon.net/.</p>
|
89 |
-
<h3>Q: How can I rate and review Bubble Shooter Ilyon?</h3>
|
90 |
-
<p>A: You can rate and review Bubble Shooter Ilyon by going to Google Play Store on your device and searching for Bubble Shooter Ilyon. Then, you can tap on the stars to rate it and write your feedback in the review section. Your rating and review will help us improve our game and make it better for you.</p> 401be4b1e0<br />
|
91 |
-
<br />
|
92 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Lagu blackpink pink venom Full Album di ilKPOP.md
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<table>
|
3 |
-
<tr>
|
4 |
-
<td>
|
5 |
-
<h1>Download Lagu Blackpink Pink Venom Ilkpop Net: A Guide for K-Pop Fans</h1>
|
6 |
-
<p>If you are a fan of K-Pop, you probably know about Blackpink, one of the most popular girl groups in the world. And if you are a fan of Blackpink, you probably know about their latest hit song, Pink Venom. But do you know how to download lagu blackpink pink venom ilkpop net? If not, don't worry. In this article, we will show you everything you need to know about this catchy song and how to get it on your device.</p>
|
7 |
-
<h2>What is Blackpink Pink Venom?</h2>
|
8 |
-
<p>Pink Venom is a song by Blackpink that was released on March 22, 2023 as part of their third full album, Born Pink. It is a powerful and energetic song that showcases Blackpink's fierce and confident attitude. The song combines elements of hip hop, trap, EDM, and pop to create a unique sound that appeals to a wide audience.</p>
|
9 |
-
<h2>download lagu blackpink pink venom ilkpop net</h2><br /><p><b><b>Download Zip</b> ✵ <a href="https://jinyurl.com/2uNKke">https://jinyurl.com/2uNKke</a></b></p><br /><br />
|
10 |
-
<h3>The meaning behind the song</h3>
|
11 |
-
<p>The lyrics of Pink Venom are about being fearless and unstoppable in pursuing your goals and dreams. The song uses metaphors of venom, poison, fire, and ice to describe how Blackpink can overcome any obstacles and enemies that stand in their way. The chorus goes like this:</p>
|
12 |
-
<blockquote><p>Kick in the door<br>
|
13 |
-
Waving the coco<br>
|
14 |
-
팝콘이나 챙겨 껴들 생각 말고<br>
|
15 |
-
I talk that talk<br>
|
16 |
-
Runways I walk walk<br>
|
17 |
-
눈 감고 pop pop 안 봐도 척<br>
|
18 |
-
One by One by one<br>
|
19 |
-
We're the pink venom<br>
|
20 |
-
We don't need no antidote<br>
|
21 |
-
We're the poison<br>
|
22 |
-
We're the fire and ice<br>
|
23 |
-
We're the pink venom<br>
|
24 |
-
We don't need no antidote</p></blockquote>
|
25 |
-
<p>The song is a declaration of Blackpink's dominance and charisma in the music industry and beyond. It also encourages their fans to be confident and fearless in their own lives.</p>
|
26 |
-
<h3>The music video and dance practice</h3>
|
27 |
-
<p>The music video for Pink Venom was released on March 23, 2023 and has already surpassed 500 million views on YouTube. The music video features Blackpink in various outfits and settings, such as a neon-lit warehouse, a futuristic laboratory, a snowy forest, and a burning car. The music video also showcases Blackpink's impressive dance moves and expressions, as well as their stunning visuals.</p>
|
28 |
-
<p>The dance practice for Pink Venom was released on March 25, 2023 and has already surpassed 100 million views on YouTube. The dance practice shows Blackpink in casual clothes, performing the choreography for Pink Venom in a studio. The dance practice reveals the details and precision of Blackpink's movements, as well as their synchronization and energy.</p>
|
29 |
-
<h3>The special stage performance</h3>
|
30 |
-
<p>Blackpink performed Pink Venom for the first time on a special stage on March 26, 2023 on Mnet's M Countdown. The special stage was a collaboration with the famous DJ Snake, who produced the song. The special stage featured a live remix of Pink Venom by DJ Snake, as well as a surprise appearance by Cardi B, who featured on another song from Born Pink, Bet You Wanna. The special stage was a huge success and received rave reviews from fans and critics alike.</p>
|
31 |
-
<p>download mp3 blackpink pink venom ilkpop gratis<br />
|
32 |
-
download lagu blackpink pink venom full album ilkpop<br />
|
33 |
-
download lagu blackpink pink venom matikiri ilkpop<br />
|
34 |
-
download lagu blackpink pink venom wapka ilkpop<br />
|
35 |
-
download lagu blackpink pink venom planetlagu ilkpop<br />
|
36 |
-
download lagu blackpink pink venom metrolagu ilkpop<br />
|
37 |
-
download lagu blackpink pink venom stafaband ilkpop<br />
|
38 |
-
download lagu blackpink pink venom uyeshare ilkpop<br />
|
39 |
-
download lagu blackpink pink venom lebahmusik ilkpop<br />
|
40 |
-
download lagu blackpink pink venom gudanglagu ilkpop<br />
|
41 |
-
download lagu blackpink pink venom mp3skull ilkpop<br />
|
42 |
-
download lagu blackpink pink venom mp3juice ilkpop<br />
|
43 |
-
download lagu blackpink pink venom mp3clan ilkpop<br />
|
44 |
-
download lagu blackpink pink venom mp3goo ilkpop<br />
|
45 |
-
download lagu blackpink pink venom mp3direct ilkpop<br />
|
46 |
-
download lagu blackpink pink venom mp3paw ilkpop<br />
|
47 |
-
download lagu blackpink pink venom mp3quack ilkpop<br />
|
48 |
-
download lagu blackpink pink venom mp3raid ilkpop<br />
|
49 |
-
download lagu blackpink pink venom mp3rocket ilkpop<br />
|
50 |
-
download lagu blackpink pink venom mp3xd ilkpop<br />
|
51 |
-
download lagu blackpink pink venom tubidy ilkpop<br />
|
52 |
-
download lagu blackpink pink venom zippyshare ilkpop<br />
|
53 |
-
download lagu blackpink pink venom mediafire ilkpop<br />
|
54 |
-
download lagu blackpink pink venom 4shared ilkpop<br />
|
55 |
-
download lagu blackpink pink venom soundcloud ilkpop<br />
|
56 |
-
download lagu blackpink pink venom youtube ilkpop<br />
|
57 |
-
download lagu blackpink pink venom spotify ilkpop<br />
|
58 |
-
download lagu blackpink pink venom apple music ilkpop<br />
|
59 |
-
download lagu blackpink pink venom amazon music ilkpop<br />
|
60 |
-
download lagu blackpink pink venom deezer ilkpop<br />
|
61 |
-
download lagu blackpink pink venom tidal ilkpop<br />
|
62 |
-
download lagu blackpink pink venom pandora ilkpop<br />
|
63 |
-
download lagu blackpink pink venom shazam ilkpop<br />
|
64 |
-
download lagu blackpink pink venom genius ilkpop<br />
|
65 |
-
download lagu blackpink pink venom musixmatch ilkpop<br />
|
66 |
-
download lagu blackpink pink venom lyricsfreak ilkpop<br />
|
67 |
-
download lagu blackpink pink venom azlyrics ilkpop<br />
|
68 |
-
download lagu blackpink pink venom metrolyrics ilkpop<br />
|
69 |
-
download lagu blackpink pink venom lyricstranslate ilkpop<br />
|
70 |
-
download lagu blackpink pink venom liriklaguterbaru2022.com [^1^]</p>
|
71 |
-
<h2>What is Ilkpop Net?</h2>
|
72 |
-
<p>Ilkpop Net is a popular website that offers free downloads of K-Pop songs in various formats and qualities. Ilkpop Net has a large collection of songs from different artists and genres, as well as albums, singles, OSTs, and more. Ilkpop Net is updated regularly with the latest releases and trends in K-Pop.</p>
|
73 |
-
<h3>A popular site for K-Pop downloads</h3>
|
74 |
-
<p>Ilkpop Net is one of the most visited sites for K-Pop downloads, with millions of users from around the world. Ilkpop Net is especially popular among international fans who want to access K-Pop songs easily and quickly. Ilkpop Net also has a user-friendly interface and a simple search function that makes it easy to find your favorite songs.</p> <h3>The pros and cons of using Ilkpop Net</h3>
|
75 |
-
<p>Ilkpop Net has some advantages and disadvantages that you should be aware of before using it. Here are some of them:</p>
|
76 |
-
<table>
|
77 |
-
<tr>
|
78 |
-
<th>Pros</th>
|
79 |
-
<th>Cons</th>
|
80 |
-
</tr>
|
81 |
-
<tr>
|
82 |
-
<td>- It is free and easy to use.</td>
|
83 |
-
<td>- It may not have the best quality or the latest version of the songs.</td>
|
84 |
-
</tr>
|
85 |
-
<tr>
|
86 |
-
<td>- It has a wide range of songs and genres to choose from.</td>
|
87 |
-
<td>- It may not have the official lyrics or translations of the songs.</td>
|
88 |
-
</tr>
|
89 |
-
<tr>
|
90 |
-
<td>- It allows you to download songs in different formats and qualities.</td>
|
91 |
-
<td>- It may not be legal or safe to download songs from it.</td>
|
92 |
-
</tr>
|
93 |
-
<tr>
|
94 |
-
<td>- It has a community of K-Pop fans who share their opinions and recommendations.</td>
|
95 |
-
<td>- It may have some ads or pop-ups that can be annoying or harmful.</td>
|
96 |
-
</tr>
|
97 |
-
</table>
|
98 |
-
<p>As you can see, Ilkpop Net has its pros and cons, so you should use it at your own risk and discretion. You should also respect the artists and their work by supporting them through official channels whenever possible.</p>
|
99 |
-
<h3>The alternatives to Ilkpop Net</h3>
|
100 |
-
<p>If you are looking for other ways to download lagu blackpink pink venom ilkpop net, you may want to consider some of the alternatives to Ilkpop Net. Here are some of them:</p>
|
101 |
-
<ul>
|
102 |
-
<li><b>Spotify</b>: Spotify is a popular streaming service that offers a huge library of music, podcasts, and more. You can listen to Blackpink Pink Venom on Spotify for free with ads, or you can upgrade to Spotify Premium for ad-free listening, offline mode, and more features. Spotify also has playlists, radio, and personalized recommendations for your music taste.</li>
|
103 |
-
<li><b>YouTube</b>: YouTube is a popular video-sharing platform that offers a lot of content, including music videos, live performances, lyric videos, and more. You can watch Blackpink Pink Venom on YouTube for free, or you can download it using a YouTube downloader app or website. YouTube also has comments, likes, subscriptions, and notifications for your favorite channels.</li>
|
104 |
-
<li><b>iTunes</b>: iTunes is a popular media player and store that offers a lot of music, movies, TV shows, and more. You can buy or rent Blackpink Pink Venom on iTunes for a reasonable price, or you can stream it using Apple Music if you have a subscription. iTunes also has ratings, reviews, charts, and playlists for your music preference.</li>
|
105 |
-
</ul> <h2>How to Download Lagu Blackpink Pink Venom Ilkpop Net?</h2>
|
106 |
-
<p>Now that you know what Blackpink Pink Venom and Ilkpop Net are, you may be wondering how to download lagu blackpink pink venom ilkpop net. Well, it's not that hard, actually. You just need to follow these simple steps:</p>
|
107 |
-
<h3>Step 1: Visit the website</h3>
|
108 |
-
<p>The first thing you need to do is to visit the website of Ilkpop Net. You can do this by typing <a href="">www.ilkpop.net</a> on your browser or by clicking on this link. You will see the homepage of Ilkpop Net, where you can find the latest and most popular K-Pop songs.</p>
|
109 |
-
<h3>Step 2: Search for the song</h3>
|
110 |
-
<p>The next thing you need to do is to search for the song you want to download. You can do this by typing "Blackpink Pink Venom" on the search bar at the top of the website or by clicking on this link. You will see a list of results that match your query, including the song title, artist name, album name, and duration.</p>
|
111 |
-
<h3>Step 3: Choose the quality and format</h3>
|
112 |
-
<p>The third thing you need to do is to choose the quality and format of the song you want to download. You can do this by clicking on the "Download" button next to the song you want. You will see a pop-up window that shows you the available options for the song, such as MP3, M4A, FLAC, and WAV. You can also see the bitrate and size of each option, such as 128kbps, 320kbps, or 4MB. You can choose the option that suits your preference and device capacity.</p>
|
113 |
-
<h3>Step 4: Click on the download button</h3>
|
114 |
-
<p>The fourth thing you need to do is to click on the download button of the option you chose. You will see another pop-up window that asks you to confirm your download. You can click on "Yes" or "No" depending on your decision. If you click on "Yes", you will see a progress bar that shows you how much time is left until your download is complete.</p>
|
115 |
-
<h3>Step 5: Enjoy your song</h3>
|
116 |
-
<p>The fifth and final thing you need to do is to enjoy your song. You can do this by opening the file you downloaded on your device or by transferring it to another device. You can also play it on your media player or share it with your friends. You can now listen to Blackpink Pink Venom anytime and anywhere you want.</p>
|
117 |
-
<h2>Conclusion</h2>
|
118 |
-
<h4>Summary of the main points</h4>
|
119 |
-
<p>In this article, we have shown you how to download lagu blackpink pink venom ilkpop net. We have explained what Blackpink Pink Venom and Ilkpop Net are, as well as their pros and cons. We have also given you a step-by-step guide on how to download lagu blackpink pink venom ilkpop net using Ilkpop Net.</p>
|
120 |
-
<h4>Call to action for the readers</h4>
|
121 |
-
<p>We hope that this article has been helpful and informative for you. If you are a fan of Blackpink and K-Pop, we encourage you to try downloading lagu blackpink pink venom ilkpop net using Ilkpop Net. It is a fast and easy way to get your favorite songs on your device. However, we also remind you to be careful and responsible when downloading songs from Ilkpop Net or any other website. You should always respect the artists and their work by supporting them through official channels whenever possible.</p>
|
122 |
-
<h4>FAQs about download lagu blackpink pink venom ilkpop net</h4>
|
123 |
-
<p>Here are some frequently asked questions about download lagu blackpink pink venom ilkpop net:</p>
|
124 |
-
<ul>
|
125 |
-
<li><b>Q: Is it legal to download songs from Ilkpop Net?</b></li>
|
126 |
-
<li>A: It depends on your country and its laws regarding intellectual property rights and piracy. In some countries, it may be illegal or punishable by law to download songs from Ilkpop Net or any other website without permission from the artists or their labels. In other countries, it may be legal or tolerated as long as you don't distribute or sell the songs to others. You should always check your local laws before downloading songs from Ilkpop Net or any other website.</li>
|
127 |
-
<li><b>Q: Is it safe to download songs from Ilkpop Net?</b></li>
|
128 |
-
<li>A: It depends on how careful and cautious you are when downloading songs from Ilkpop Net or any other website. In some cases, it may be safe to download songs from Ilkpop Net as long as you don't encounter any viruses, malware, or other harmful software that may damage your device or compromise your privacy. In other cases, it may be unsafe to download songs from Ilkpop Net as you may expose yourself to potential risks such as identity theft, data loss, or legal issues. You should always use a reliable antivirus program and a secure internet connection when downloading songs from Ilkpop Net or any other website.</li>
|
129 |
-
<li><b>Q: How can I support Blackpink and their work?</b></li>
|
130 |
-
<li>A: There are many ways to support Blackpink and their work, such as buying their albums, merchandise, or concert tickets, streaming their songs or videos on official platforms, voting for them on awards shows or polls, following them on social media, joining their fan club, or sending them fan letters or gifts. You can also spread the word about Blackpink and their work to your friends, family, or anyone who may be interested in K-Pop.</li>
|
131 |
-
<li><b>Q: What are some other songs by Blackpink that I should check out?</b></li>
|
132 |
-
<li>A: Blackpink has a lot of amazing songs that you should check out, such as Kill This Love, How You Like That, Lovesick Girls, Ice Cream, Ddu-Du Ddu-Du, As If It's Your Last, Boombayah, Whistle, Playing With Fire, Stay, Forever Young, Don't Know What To Do, Kick It, Hope Not, Sour Candy, Bet You Wanna, Pretty Savage, Crazy Over You, Love To Hate Me, and You Never Know. You can find these songs on Ilkpop Net or any other platform that you prefer.</li>
|
133 |
-
<li><b>Q: Where can I find more information about Blackpink and K-Pop?</b></li>
|
134 |
-
<li>A: You can find more information about Blackpink and K-Pop on various websites, blogs, forums, magazines, podcasts, documentaries, or books that cover the topic. Some examples are Soompi, Allkpop, Koreaboo, Billboard K-Pop, Kpopmap, Kpopstarz, The Korea Herald K-Pop Herald, K-Pop Now!, The Birth of Korean Cool, and K-Pop Confidential. You can also join online communities of K-Pop fans who share their opinions and insights about Blackpink and K-Pop.</li>
|
135 |
-
</ul>
|
136 |
-
</td>
|
137 |
-
</tr>
|
138 |
-
</table></p> 401be4b1e0<br />
|
139 |
-
<br />
|
140 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/FR Legends V0.3.3.2 MOD for Android The Most Realistic and Fun Drifting Simulator.md
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>FR Legends Mod APK 0.3.2: A Guide for Drift Lovers</h1>
|
3 |
-
<p>If you are a fan of drifting and racing games, you might have heard of FR Legends, a popular game that lets you experience the thrill of drifting in various tracks and cars. But did you know that there is a modded version of FR Legends that gives you more features and benefits? In this article, we will tell you everything you need to know about FR Legends Mod APK 0.3.2, including what it is, why you should use it, and how to download and install it on your Android device.</p>
|
4 |
-
<h2>What is FR Legends?</h2>
|
5 |
-
<p>FR Legends is a game that combines drifting and racing in a unique way. You can choose from a variety of cars, customize them to your liking, and then take them to different tracks to show off your drifting skills. You can also compete with other players online or challenge yourself in solo mode.</p>
|
6 |
-
<h2>fr legends mod apk 0.3.2</h2><br /><p><b><b>DOWNLOAD</b> 🗸🗸🗸 <a href="https://jinyurl.com/2uNLxy">https://jinyurl.com/2uNLxy</a></b></p><br /><br />
|
7 |
-
<h3>Features of FR Legends</h3>
|
8 |
-
<p>FR Legends has many features that make it an enjoyable and addictive game for drift lovers. Here are some of them:</p>
|
9 |
-
<h4>Customizable cars</h4>
|
10 |
-
<p>You can modify your car's appearance, performance, and handling to suit your preferences. You can change the color, wheels, body kits, spoilers, exhausts, and more. You can also upgrade your engine, suspension, brakes, tires, and more.</p>
|
11 |
-
<h4>Realistic physics</h4>
|
12 |
-
<p>The game has realistic physics that make drifting feel natural and satisfying. You can control your car's speed, angle, and direction with simple touch controls. You can also use the handbrake, clutch, and throttle to perform advanced drift techniques.</p>
|
13 |
-
<h4>Various game modes</h4>
|
14 |
-
<p>You can choose from different game modes to test your drifting skills. You can play in career mode, where you have to complete various missions and challenges to earn money and reputation. You can also play in free mode, where you can practice your drifting without any pressure or limitations.</p>
|
15 |
-
<h4>Online multiplayer</h4>
|
16 |
-
<p>You can also play with other players online in real-time. You can join or create a room and invite your friends or random players to join you. You can then race against each other or cooperate in tandem drifts.</p>
|
17 |
-
<h3>Why use FR Legends Mod APK 0.3.2?</h3>
|
18 |
-
<p>While FR Legends is a fun and exciting game, it also has some limitations and drawbacks that might affect your gaming experience. For example, you might run out of money to buy or upgrade your cars, or you might get annoyed by the ads that pop up every now and then.</p>
|
19 |
-
<p>fr legends mod apk 0.3.2 unlimited money<br />
|
20 |
-
fr legends mod apk 0.3.2 download for android<br />
|
21 |
-
fr legends mod apk 0.3.2 latest version<br />
|
22 |
-
fr legends mod apk 0.3.2 free shopping<br />
|
23 |
-
fr legends mod apk 0.3.2 all cars unlocked<br />
|
24 |
-
fr legends mod apk 0.3.2 no root<br />
|
25 |
-
fr legends mod apk 0.3.2 obb<br />
|
26 |
-
fr legends mod apk 0.3.2 offline<br />
|
27 |
-
fr legends mod apk 0.3.2 hack<br />
|
28 |
-
fr legends mod apk 0.3.2 revdl<br />
|
29 |
-
fr legends mod apk 0.3.2 rexdl<br />
|
30 |
-
fr legends mod apk 0.3.2 happymod<br />
|
31 |
-
fr legends mod apk 0.3.2 an1<br />
|
32 |
-
fr legends mod apk 0.3.2 mediafıre<br />
|
33 |
-
fr legends mod apk 0.3.2 mega<br />
|
34 |
-
fr legends mod apk 0.3.2 android 1<br />
|
35 |
-
fr legends mod apk 0.3.2 android oyun club<br />
|
36 |
-
fr legends mod apk 0.3.2 apkpure<br />
|
37 |
-
fr legends mod apk 0.3.2 apkmody<br />
|
38 |
-
fr legends mod apk 0.3.2 apkmirror<br />
|
39 |
-
fr legends mod apk 0.3.2 apknite<br />
|
40 |
-
fr legends mod apk 0.3.2 apksfree<br />
|
41 |
-
fr legends mod apk 0.3.2 aptoide<br />
|
42 |
-
fr legends mod apk 0.3.2 blackmod<br />
|
43 |
-
fr legends mod apk 0.3.2 bluestacks<br />
|
44 |
-
fr legends mod apk 0.3.2 cheat engine<br />
|
45 |
-
fr legends mod apk 0.3.2 clubapk<br />
|
46 |
-
fr legends mod apk 0.3.2 dlandroid<br />
|
47 |
-
fr legends mod apk 0.3.2 farsroid<br />
|
48 |
-
fr legends mod apk 0.3.2 game guardian<br />
|
49 |
-
fr legends mod apk 0.3.2 google drive<br />
|
50 |
-
fr legends mod apk 0.3.2 ihackedit<br />
|
51 |
-
fr legends mod apk 0.3.2 iosgods<br />
|
52 |
-
fr legends mod apk 0.3.2 lenov.ru<br />
|
53 |
-
fr legends mod apk 0.3.2 lucky patcher<br />
|
54 |
-
fr legends mod apk 0.3.2 malavida<br />
|
55 |
-
fr legends mod apk 0.3.2 mob.org<br />
|
56 |
-
fr legends mod apk 0.3.2 mobpark<br />
|
57 |
-
fr legends mod apk 0</p>
|
58 |
-
<p>That's why you might want to use FR Legends Mod APK 0.3.2, a modified version of the game that gives you more advantages and benefits. Here are some of them:</p>
|
59 |
-
<h4>Unlimited money</h4>
|
60 |
-
<p>With FR Legends Mod APK 0.3.2, you don't have to worry about running out of money to buy or upgrade your cars. You will have unlimited money in the game, so you can buy any car you want and customize it however you like.</p>
|
61 |
-
<h4>All cars unlocked</h4>
|
62 |
-
<p>With FR Legends Mod APK 0.3.2, you don't have to wait or grind to unlock new cars in the game. You will have access to all the cars in the game from the start, so you can choose any car you want and enjoy its features.</p>
|
63 |
-
<h4>No ads</h <p>No ads</p>
|
64 |
-
<p>With FR Legends Mod APK 0.3.2, you don't have to deal with annoying ads that interrupt your gameplay or waste your time. You can enjoy the game without any distractions or interruptions.</p>
|
65 |
-
<h3>How to download and install FR Legends Mod APK 0.3.2?</h3>
|
66 |
-
<p>If you are interested in using FR Legends Mod APK 0.3.2, you might be wondering how to download and install it on your Android device. Don't worry, it's not a complicated process. Just follow these simple steps:</p>
|
67 |
-
<h4>Step 1: Enable unknown sources</h4>
|
68 |
-
<p>Before you can install any mod apk file on your device, you need to enable unknown sources in your settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device's settings, then security, then unknown sources, and toggle it on.</p>
|
69 |
-
<h4>Step 2: Download the mod apk file</h4>
|
70 |
-
<p>Next, you need to download the mod apk file of FR Legends Mod APK 0.3.2 from a reliable source. You can use this link to download the file directly to your device. Make sure you have enough storage space on your device before downloading the file.</p>
|
71 |
-
<h4>Step 3: Install the mod apk file</h4>
|
72 |
-
<p>Once you have downloaded the file, you need to install it on your device. To do this, locate the file in your downloads folder and tap on it. You might see a warning message asking you to confirm the installation. Just tap on install and wait for the process to finish.</p>
|
73 |
-
<h4>Step 4: Enjoy the game</h4>
|
74 |
-
<p>After the installation is complete, you can launch the game from your app drawer or home screen. You can now enjoy all the features and benefits of FR Legends Mod APK 0.3.2 and have fun drifting with your friends or solo.</p>
|
75 |
-
<h2>Conclusion</h2>
|
76 |
-
<p>FR Legends is a game that will appeal to anyone who loves drifting and racing games. It has many features that make it an enjoyable and addictive game for drift lovers. However, if you want to enhance your gaming experience and get more advantages and benefits, you should try using FR Legends Mod APK 0.3.2, a modified version of the game that gives you unlimited money, all cars unlocked, and no ads. You can download and install it easily by following the steps we have provided in this article.</p>
|
77 |
-
<p>We hope this article has been helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading and happy drifting!</p>
|
78 |
-
FAQs Q: Is FR Legends Mod APK 0.3.2 safe to use? A: Yes, FR Legends Mod APK 0.3.2 is safe to use as long as you download it from a trusted source and scan it with an antivirus before installing it. Q: Do I need to root my device to use FR Legends Mod APK 0.3.2? A: No, you don't need to root your device to use FR Legends Mod APK 0.3.2. You can use it on any Android device without rooting. Q: Can I play online with FR Legends Mod APK 0.3.2? A: Yes, you can play online with FR Legends Mod APK 0.3.2 as long as you have a stable internet connection and don't use any cheats or hacks that might get you banned. Q: Can I update FR Legends Mod APK 0.3.2? A: No, you can't update FR Legends Mod APK 0.3.2 as it is a modded version of the game that might not be compatible with the latest version of the game. Q: Can I use FR Legends Mod APK 0.3.2 on iOS devices? A: No, you can't use FR Legends Mod APK 0.3.2 on iOS devices as it is an apk file that only works on Android devices.</p> 197e85843d<br />
|
79 |
-
<br />
|
80 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/.ipynb_checkpoints/app-checkpoint.py
DELETED
@@ -1,1677 +0,0 @@
|
|
1 |
-
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
import gradio as gr
|
16 |
-
from env import BASE_MODEL_NAME, LORA_WEIGHTS_PATH, PROMPTS
|
17 |
-
|
18 |
-
examples = [
|
19 |
-
[
|
20 |
-
PROMPTS,
|
21 |
-
'low quality',
|
22 |
-
7.5,
|
23 |
-
512,
|
24 |
-
512,
|
25 |
-
25,
|
26 |
-
"DPMSolver"
|
27 |
-
],
|
28 |
-
]
|
29 |
-
import inspect
|
30 |
-
import os
|
31 |
-
import random
|
32 |
-
import re
|
33 |
-
import time
|
34 |
-
from typing import Callable, List, Optional, Union
|
35 |
-
|
36 |
-
import numpy as np
|
37 |
-
import paddle
|
38 |
-
import PIL
|
39 |
-
import PIL.Image
|
40 |
-
from packaging import version
|
41 |
-
|
42 |
-
from paddlenlp.transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
43 |
-
|
44 |
-
from ppdiffusers.configuration_utils import FrozenDict
|
45 |
-
from ppdiffusers.models import AutoencoderKL, UNet2DConditionModel
|
46 |
-
from ppdiffusers.pipeline_utils import DiffusionPipeline
|
47 |
-
from ppdiffusers.schedulers import (
|
48 |
-
DDIMScheduler,
|
49 |
-
DPMSolverMultistepScheduler,
|
50 |
-
EulerAncestralDiscreteScheduler,
|
51 |
-
EulerDiscreteScheduler,
|
52 |
-
LMSDiscreteScheduler,
|
53 |
-
PNDMScheduler,
|
54 |
-
HeunDiscreteScheduler,
|
55 |
-
KDPM2AncestralDiscreteScheduler,
|
56 |
-
KDPM2DiscreteScheduler,
|
57 |
-
|
58 |
-
)
|
59 |
-
from ppdiffusers.utils import PIL_INTERPOLATION, deprecate, logging
|
60 |
-
from ppdiffusers.utils.testing_utils import load_image
|
61 |
-
from ppdiffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
62 |
-
from ppdiffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
63 |
-
|
64 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
65 |
-
|
66 |
-
|
67 |
-
def save_all(images, FORMAT="jpg", OUTDIR="./outputs/"):
|
68 |
-
if not isinstance(images, (list, tuple)):
|
69 |
-
images = [images]
|
70 |
-
for image in images:
|
71 |
-
PRECISION = "fp32"
|
72 |
-
argument = image.argument
|
73 |
-
os.makedirs(OUTDIR, exist_ok=True)
|
74 |
-
epoch_time = argument["epoch_time"]
|
75 |
-
PROMPT = argument["prompt"]
|
76 |
-
NEGPROMPT = argument["negative_prompt"]
|
77 |
-
HEIGHT = argument["height"]
|
78 |
-
WIDTH = argument["width"]
|
79 |
-
SEED = argument["seed"]
|
80 |
-
STRENGTH = argument.get("strength", 1)
|
81 |
-
INFERENCE_STEPS = argument["num_inference_steps"]
|
82 |
-
GUIDANCE_SCALE = argument["guidance_scale"]
|
83 |
-
|
84 |
-
filename = f"{str(epoch_time)}_scale_{GUIDANCE_SCALE}_steps_{INFERENCE_STEPS}_seed_{SEED}.{FORMAT}"
|
85 |
-
filedir = f"{OUTDIR}/{filename}"
|
86 |
-
image.save(filedir)
|
87 |
-
with open(f"{OUTDIR}/{epoch_time}_prompt.txt", "w") as file:
|
88 |
-
file.write(
|
89 |
-
f"PROMPT: {PROMPT}\nNEG_PROMPT: {NEGPROMPT}\n\nINFERENCE_STEPS: {INFERENCE_STEPS}\nHeight: {HEIGHT}\nWidth: {WIDTH}\nSeed: {SEED}\n\nPrecision: {PRECISION}\nSTRENGTH: {STRENGTH}\nGUIDANCE_SCALE: {GUIDANCE_SCALE}"
|
90 |
-
)
|
91 |
-
|
92 |
-
|
93 |
-
re_attention = re.compile(
|
94 |
-
r"""
|
95 |
-
\\\(|
|
96 |
-
\\\)|
|
97 |
-
\\\[|
|
98 |
-
\\]|
|
99 |
-
\\\\|
|
100 |
-
\\|
|
101 |
-
\(|
|
102 |
-
\[|
|
103 |
-
:([+-]?[.\d]+)\)|
|
104 |
-
\)|
|
105 |
-
]|
|
106 |
-
[^\\()\[\]:]+|
|
107 |
-
:
|
108 |
-
""",
|
109 |
-
re.X,
|
110 |
-
)
|
111 |
-
|
112 |
-
|
113 |
-
def parse_prompt_attention(text):
|
114 |
-
"""
|
115 |
-
Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
|
116 |
-
Accepted tokens are:
|
117 |
-
(abc) - increases attention to abc by a multiplier of 1.1
|
118 |
-
(abc:3.12) - increases attention to abc by a multiplier of 3.12
|
119 |
-
[abc] - decreases attention to abc by a multiplier of 1.1
|
120 |
-
\( - literal character '('
|
121 |
-
\[ - literal character '['
|
122 |
-
\) - literal character ')'
|
123 |
-
\] - literal character ']'
|
124 |
-
\\ - literal character '\'
|
125 |
-
anything else - just text
|
126 |
-
>>> parse_prompt_attention('normal text')
|
127 |
-
[['normal text', 1.0]]
|
128 |
-
>>> parse_prompt_attention('an (important) word')
|
129 |
-
[['an ', 1.0], ['important', 1.1], [' word', 1.0]]
|
130 |
-
>>> parse_prompt_attention('(unbalanced')
|
131 |
-
[['unbalanced', 1.1]]
|
132 |
-
>>> parse_prompt_attention('\(literal\]')
|
133 |
-
[['(literal]', 1.0]]
|
134 |
-
>>> parse_prompt_attention('(unnecessary)(parens)')
|
135 |
-
[['unnecessaryparens', 1.1]]
|
136 |
-
>>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
|
137 |
-
[['a ', 1.0],
|
138 |
-
['house', 1.5730000000000004],
|
139 |
-
[' ', 1.1],
|
140 |
-
['on', 1.0],
|
141 |
-
[' a ', 1.1],
|
142 |
-
['hill', 0.55],
|
143 |
-
[', sun, ', 1.1],
|
144 |
-
['sky', 1.4641000000000006],
|
145 |
-
['.', 1.1]]
|
146 |
-
"""
|
147 |
-
|
148 |
-
res = []
|
149 |
-
round_brackets = []
|
150 |
-
square_brackets = []
|
151 |
-
|
152 |
-
round_bracket_multiplier = 1.1
|
153 |
-
square_bracket_multiplier = 1 / 1.1
|
154 |
-
|
155 |
-
def multiply_range(start_position, multiplier):
|
156 |
-
for p in range(start_position, len(res)):
|
157 |
-
res[p][1] *= multiplier
|
158 |
-
|
159 |
-
for m in re_attention.finditer(text):
|
160 |
-
text = m.group(0)
|
161 |
-
weight = m.group(1)
|
162 |
-
|
163 |
-
if text.startswith("\\"):
|
164 |
-
res.append([text[1:], 1.0])
|
165 |
-
elif text == "(":
|
166 |
-
round_brackets.append(len(res))
|
167 |
-
elif text == "[":
|
168 |
-
square_brackets.append(len(res))
|
169 |
-
elif weight is not None and len(round_brackets) > 0:
|
170 |
-
multiply_range(round_brackets.pop(), float(weight))
|
171 |
-
elif text == ")" and len(round_brackets) > 0:
|
172 |
-
multiply_range(round_brackets.pop(), round_bracket_multiplier)
|
173 |
-
elif text == "]" and len(square_brackets) > 0:
|
174 |
-
multiply_range(square_brackets.pop(), square_bracket_multiplier)
|
175 |
-
else:
|
176 |
-
res.append([text, 1.0])
|
177 |
-
|
178 |
-
for pos in round_brackets:
|
179 |
-
multiply_range(pos, round_bracket_multiplier)
|
180 |
-
|
181 |
-
for pos in square_brackets:
|
182 |
-
multiply_range(pos, square_bracket_multiplier)
|
183 |
-
|
184 |
-
if len(res) == 0:
|
185 |
-
res = [["", 1.0]]
|
186 |
-
|
187 |
-
# merge runs of identical weights
|
188 |
-
i = 0
|
189 |
-
while i + 1 < len(res):
|
190 |
-
if res[i][1] == res[i + 1][1]:
|
191 |
-
res[i][0] += res[i + 1][0]
|
192 |
-
res.pop(i + 1)
|
193 |
-
else:
|
194 |
-
i += 1
|
195 |
-
|
196 |
-
return res
|
197 |
-
|
198 |
-
|
199 |
-
def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int):
|
200 |
-
r"""
|
201 |
-
Tokenize a list of prompts and return its tokens with weights of each token.
|
202 |
-
|
203 |
-
No padding, starting or ending token is included.
|
204 |
-
"""
|
205 |
-
tokens = []
|
206 |
-
weights = []
|
207 |
-
for text in prompt:
|
208 |
-
texts_and_weights = parse_prompt_attention(text)
|
209 |
-
text_token = []
|
210 |
-
text_weight = []
|
211 |
-
for word, weight in texts_and_weights:
|
212 |
-
# tokenize and discard the starting and the ending token
|
213 |
-
token = pipe.tokenizer(word).input_ids[1:-1]
|
214 |
-
text_token += token
|
215 |
-
|
216 |
-
# copy the weight by length of token
|
217 |
-
text_weight += [weight] * len(token)
|
218 |
-
|
219 |
-
# stop if the text is too long (longer than truncation limit)
|
220 |
-
if len(text_token) > max_length:
|
221 |
-
break
|
222 |
-
|
223 |
-
# truncate
|
224 |
-
if len(text_token) > max_length:
|
225 |
-
text_token = text_token[:max_length]
|
226 |
-
text_weight = text_weight[:max_length]
|
227 |
-
|
228 |
-
tokens.append(text_token)
|
229 |
-
weights.append(text_weight)
|
230 |
-
return tokens, weights
|
231 |
-
|
232 |
-
|
233 |
-
def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
|
234 |
-
r"""
|
235 |
-
Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
|
236 |
-
"""
|
237 |
-
max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
|
238 |
-
weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
|
239 |
-
for i in range(len(tokens)):
|
240 |
-
tokens[i] = [bos] + tokens[i] + [eos] + [pad] * (max_length - 2 - len(tokens[i]))
|
241 |
-
if no_boseos_middle:
|
242 |
-
weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
|
243 |
-
else:
|
244 |
-
w = []
|
245 |
-
if len(weights[i]) == 0:
|
246 |
-
w = [1.0] * weights_length
|
247 |
-
else:
|
248 |
-
for j in range((len(weights[i]) - 1) // chunk_length + 1):
|
249 |
-
w.append(1.0) # weight for starting token in this chunk
|
250 |
-
w += weights[i][j * chunk_length : min(len(weights[i]), (j + 1) * chunk_length)]
|
251 |
-
w.append(1.0) # weight for ending token in this chunk
|
252 |
-
w += [1.0] * (weights_length - len(w))
|
253 |
-
weights[i] = w[:]
|
254 |
-
|
255 |
-
return tokens, weights
|
256 |
-
|
257 |
-
|
258 |
-
def get_unweighted_text_embeddings(
|
259 |
-
pipe: DiffusionPipeline, text_input: paddle.Tensor, chunk_length: int, no_boseos_middle: Optional[bool] = True
|
260 |
-
):
|
261 |
-
"""
|
262 |
-
When the length of tokens is a multiple of the capacity of the text encoder,
|
263 |
-
it should be split into chunks and sent to the text encoder individually.
|
264 |
-
"""
|
265 |
-
max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
|
266 |
-
if max_embeddings_multiples > 1:
|
267 |
-
text_embeddings = []
|
268 |
-
for i in range(max_embeddings_multiples):
|
269 |
-
# extract the i-th chunk
|
270 |
-
text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
|
271 |
-
|
272 |
-
# cover the head and the tail by the starting and the ending tokens
|
273 |
-
text_input_chunk[:, 0] = text_input[0, 0]
|
274 |
-
text_input_chunk[:, -1] = text_input[0, -1]
|
275 |
-
|
276 |
-
text_embedding = pipe.text_encoder(text_input_chunk)[0]
|
277 |
-
|
278 |
-
if no_boseos_middle:
|
279 |
-
if i == 0:
|
280 |
-
# discard the ending token
|
281 |
-
text_embedding = text_embedding[:, :-1]
|
282 |
-
elif i == max_embeddings_multiples - 1:
|
283 |
-
# discard the starting token
|
284 |
-
text_embedding = text_embedding[:, 1:]
|
285 |
-
else:
|
286 |
-
# discard both starting and ending tokens
|
287 |
-
text_embedding = text_embedding[:, 1:-1]
|
288 |
-
|
289 |
-
text_embeddings.append(text_embedding)
|
290 |
-
text_embeddings = paddle.concat(text_embeddings, axis=1)
|
291 |
-
else:
|
292 |
-
text_embeddings = pipe.text_encoder(text_input)[0]
|
293 |
-
return text_embeddings
|
294 |
-
|
295 |
-
|
296 |
-
def get_weighted_text_embeddings(
|
297 |
-
pipe: DiffusionPipeline,
|
298 |
-
prompt: Union[str, List[str]],
|
299 |
-
uncond_prompt: Optional[Union[str, List[str]]] = None,
|
300 |
-
max_embeddings_multiples: Optional[int] = 1,
|
301 |
-
no_boseos_middle: Optional[bool] = False,
|
302 |
-
skip_parsing: Optional[bool] = False,
|
303 |
-
skip_weighting: Optional[bool] = False,
|
304 |
-
**kwargs
|
305 |
-
):
|
306 |
-
r"""
|
307 |
-
Prompts can be assigned with local weights using brackets. For example,
|
308 |
-
prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
|
309 |
-
and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
|
310 |
-
|
311 |
-
Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
|
312 |
-
|
313 |
-
Args:
|
314 |
-
pipe (`DiffusionPipeline`):
|
315 |
-
Pipe to provide access to the tokenizer and the text encoder.
|
316 |
-
prompt (`str` or `List[str]`):
|
317 |
-
The prompt or prompts to guide the image generation.
|
318 |
-
uncond_prompt (`str` or `List[str]`):
|
319 |
-
The unconditional prompt or prompts for guide the image generation. If unconditional prompt
|
320 |
-
is provided, the embeddings of prompt and uncond_prompt are concatenated.
|
321 |
-
max_embeddings_multiples (`int`, *optional*, defaults to `1`):
|
322 |
-
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
323 |
-
no_boseos_middle (`bool`, *optional*, defaults to `False`):
|
324 |
-
If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
|
325 |
-
ending token in each of the chunk in the middle.
|
326 |
-
skip_parsing (`bool`, *optional*, defaults to `False`):
|
327 |
-
Skip the parsing of brackets.
|
328 |
-
skip_weighting (`bool`, *optional*, defaults to `False`):
|
329 |
-
Skip the weighting. When the parsing is skipped, it is forced True.
|
330 |
-
"""
|
331 |
-
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
|
332 |
-
if isinstance(prompt, str):
|
333 |
-
prompt = [prompt]
|
334 |
-
|
335 |
-
if not skip_parsing:
|
336 |
-
prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
|
337 |
-
if uncond_prompt is not None:
|
338 |
-
if isinstance(uncond_prompt, str):
|
339 |
-
uncond_prompt = [uncond_prompt]
|
340 |
-
uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
|
341 |
-
else:
|
342 |
-
prompt_tokens = [
|
343 |
-
token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids
|
344 |
-
]
|
345 |
-
prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
|
346 |
-
if uncond_prompt is not None:
|
347 |
-
if isinstance(uncond_prompt, str):
|
348 |
-
uncond_prompt = [uncond_prompt]
|
349 |
-
uncond_tokens = [
|
350 |
-
token[1:-1]
|
351 |
-
for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids
|
352 |
-
]
|
353 |
-
uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
|
354 |
-
|
355 |
-
# round up the longest length of tokens to a multiple of (model_max_length - 2)
|
356 |
-
max_length = max([len(token) for token in prompt_tokens])
|
357 |
-
if uncond_prompt is not None:
|
358 |
-
max_length = max(max_length, max([len(token) for token in uncond_tokens]))
|
359 |
-
|
360 |
-
max_embeddings_multiples = min(
|
361 |
-
max_embeddings_multiples, (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1
|
362 |
-
)
|
363 |
-
max_embeddings_multiples = max(1, max_embeddings_multiples)
|
364 |
-
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
|
365 |
-
|
366 |
-
# pad the length of tokens and weights
|
367 |
-
# support bert tokenizer
|
368 |
-
bos = pipe.tokenizer.bos_token_id if pipe.tokenizer.bos_token_id is not None else pipe.tokenizer.cls_token_id
|
369 |
-
eos = pipe.tokenizer.eos_token_id if pipe.tokenizer.eos_token_id is not None else pipe.tokenizer.sep_token_id
|
370 |
-
pad = pipe.tokenizer.pad_token_id
|
371 |
-
prompt_tokens, prompt_weights = pad_tokens_and_weights(
|
372 |
-
prompt_tokens,
|
373 |
-
prompt_weights,
|
374 |
-
max_length,
|
375 |
-
bos,
|
376 |
-
eos,
|
377 |
-
pad,
|
378 |
-
no_boseos_middle=no_boseos_middle,
|
379 |
-
chunk_length=pipe.tokenizer.model_max_length,
|
380 |
-
)
|
381 |
-
prompt_tokens = paddle.to_tensor(prompt_tokens)
|
382 |
-
if uncond_prompt is not None:
|
383 |
-
uncond_tokens, uncond_weights = pad_tokens_and_weights(
|
384 |
-
uncond_tokens,
|
385 |
-
uncond_weights,
|
386 |
-
max_length,
|
387 |
-
bos,
|
388 |
-
eos,
|
389 |
-
pad,
|
390 |
-
no_boseos_middle=no_boseos_middle,
|
391 |
-
chunk_length=pipe.tokenizer.model_max_length,
|
392 |
-
)
|
393 |
-
uncond_tokens = paddle.to_tensor(uncond_tokens)
|
394 |
-
|
395 |
-
# get the embeddings
|
396 |
-
text_embeddings = get_unweighted_text_embeddings(
|
397 |
-
pipe, prompt_tokens, pipe.tokenizer.model_max_length, no_boseos_middle=no_boseos_middle
|
398 |
-
)
|
399 |
-
prompt_weights = paddle.to_tensor(prompt_weights, dtype=text_embeddings.dtype)
|
400 |
-
if uncond_prompt is not None:
|
401 |
-
uncond_embeddings = get_unweighted_text_embeddings(
|
402 |
-
pipe, uncond_tokens, pipe.tokenizer.model_max_length, no_boseos_middle=no_boseos_middle
|
403 |
-
)
|
404 |
-
uncond_weights = paddle.to_tensor(uncond_weights, dtype=uncond_embeddings.dtype)
|
405 |
-
|
406 |
-
# assign weights to the prompts and normalize in the sense of mean
|
407 |
-
# TODO: should we normalize by chunk or in a whole (current implementation)?
|
408 |
-
if (not skip_parsing) and (not skip_weighting):
|
409 |
-
previous_mean = text_embeddings.mean(axis=[-2, -1])
|
410 |
-
text_embeddings *= prompt_weights.unsqueeze(-1)
|
411 |
-
text_embeddings *= previous_mean / text_embeddings.mean(axis=[-2, -1])
|
412 |
-
if uncond_prompt is not None:
|
413 |
-
previous_mean = uncond_embeddings.mean(axis=[-2, -1])
|
414 |
-
uncond_embeddings *= uncond_weights.unsqueeze(-1)
|
415 |
-
uncond_embeddings *= previous_mean / uncond_embeddings.mean(axis=[-2, -1])
|
416 |
-
|
417 |
-
# For classifier free guidance, we need to do two forward passes.
|
418 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
419 |
-
# to avoid doing two forward passes
|
420 |
-
if uncond_prompt is not None:
|
421 |
-
text_embeddings = paddle.concat([uncond_embeddings, text_embeddings])
|
422 |
-
|
423 |
-
return text_embeddings
|
424 |
-
|
425 |
-
|
426 |
-
def preprocess_image(image):
|
427 |
-
w, h = image.size
|
428 |
-
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
|
429 |
-
image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
|
430 |
-
image = np.array(image).astype(np.float32) / 255.0
|
431 |
-
image = image[None].transpose(0, 3, 1, 2)
|
432 |
-
image = paddle.to_tensor(image)
|
433 |
-
return 2.0 * image - 1.0
|
434 |
-
|
435 |
-
|
436 |
-
def preprocess_mask(mask):
|
437 |
-
mask = mask.convert("L")
|
438 |
-
w, h = mask.size
|
439 |
-
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
|
440 |
-
mask = mask.resize((w // 8, h // 8), resample=PIL_INTERPOLATION["nearest"])
|
441 |
-
mask = np.array(mask).astype(np.float32) / 255.0
|
442 |
-
mask = np.tile(mask, (4, 1, 1))
|
443 |
-
mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
|
444 |
-
mask = 1 - mask # repaint white, keep black
|
445 |
-
mask = paddle.to_tensor(mask)
|
446 |
-
return mask
|
447 |
-
|
448 |
-
|
449 |
-
class StableDiffusionPipelineAllinOne(DiffusionPipeline):
|
450 |
-
r"""
|
451 |
-
Pipeline for text-to-image image-to-image inpainting generation using Stable Diffusion.
|
452 |
-
|
453 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
454 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular xxxx, etc.)
|
455 |
-
|
456 |
-
Args:
|
457 |
-
vae ([`AutoencoderKL`]):
|
458 |
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
459 |
-
text_encoder ([`CLIPTextModel`]):
|
460 |
-
Frozen text-encoder. Stable Diffusion uses the text portion of
|
461 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
462 |
-
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
463 |
-
tokenizer (`CLIPTokenizer`):
|
464 |
-
Tokenizer of class
|
465 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
466 |
-
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
467 |
-
scheduler ([`SchedulerMixin`]):
|
468 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
469 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], [`PNDMScheduler`], [`EulerDiscreteScheduler`], [`EulerAncestralDiscreteScheduler`]
|
470 |
-
or [`DPMSolverMultistepScheduler`].
|
471 |
-
safety_checker ([`StableDiffusionSafetyChecker`]):
|
472 |
-
Classification module that estimates whether generated images could be considered offensive or harmful.
|
473 |
-
Please, refer to the [model card](https://huggingface.co/junnyu/stable-diffusion-v1-4-paddle) for details.
|
474 |
-
feature_extractor ([`CLIPFeatureExtractor`]):
|
475 |
-
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
476 |
-
"""
|
477 |
-
_optional_components = ["safety_checker", "feature_extractor"]
|
478 |
-
|
479 |
-
def __init__(
|
480 |
-
self,
|
481 |
-
vae: AutoencoderKL,
|
482 |
-
text_encoder: CLIPTextModel,
|
483 |
-
tokenizer: CLIPTokenizer,
|
484 |
-
unet: UNet2DConditionModel,
|
485 |
-
scheduler: Union[
|
486 |
-
DDIMScheduler,
|
487 |
-
PNDMScheduler,
|
488 |
-
LMSDiscreteScheduler,
|
489 |
-
EulerDiscreteScheduler,
|
490 |
-
EulerAncestralDiscreteScheduler,
|
491 |
-
DPMSolverMultistepScheduler,
|
492 |
-
],
|
493 |
-
safety_checker: StableDiffusionSafetyChecker,
|
494 |
-
feature_extractor: CLIPFeatureExtractor,
|
495 |
-
requires_safety_checker: bool = False,
|
496 |
-
):
|
497 |
-
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
498 |
-
deprecation_message = (
|
499 |
-
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
500 |
-
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
501 |
-
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
502 |
-
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
503 |
-
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
504 |
-
" file"
|
505 |
-
)
|
506 |
-
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
507 |
-
new_config = dict(scheduler.config)
|
508 |
-
new_config["steps_offset"] = 1
|
509 |
-
scheduler._internal_dict = FrozenDict(new_config)
|
510 |
-
|
511 |
-
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
|
512 |
-
deprecation_message = (
|
513 |
-
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
514 |
-
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
515 |
-
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
516 |
-
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
517 |
-
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
518 |
-
)
|
519 |
-
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
520 |
-
new_config = dict(scheduler.config)
|
521 |
-
new_config["clip_sample"] = False
|
522 |
-
scheduler._internal_dict = FrozenDict(new_config)
|
523 |
-
|
524 |
-
if safety_checker is None and requires_safety_checker:
|
525 |
-
logger.warning(
|
526 |
-
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
527 |
-
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
528 |
-
" results in services or applications open to the public. PaddleNLP team, diffusers team and Hugging Face"
|
529 |
-
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
530 |
-
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
531 |
-
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
532 |
-
)
|
533 |
-
if safety_checker is not None and feature_extractor is None:
|
534 |
-
raise ValueError(
|
535 |
-
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
536 |
-
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
537 |
-
)
|
538 |
-
is_unet_version_less_0_9_0 = hasattr(unet.config, "_ppdiffusers_version") and version.parse(
|
539 |
-
version.parse(unet.config._ppdiffusers_version).base_version
|
540 |
-
) < version.parse("0.9.0.dev0")
|
541 |
-
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
542 |
-
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
543 |
-
deprecation_message = (
|
544 |
-
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
545 |
-
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
|
546 |
-
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
547 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
548 |
-
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
549 |
-
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
550 |
-
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
551 |
-
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
552 |
-
" the `unet/config.json` file"
|
553 |
-
)
|
554 |
-
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
555 |
-
new_config = dict(unet.config)
|
556 |
-
new_config["sample_size"] = 64
|
557 |
-
unet._internal_dict = FrozenDict(new_config)
|
558 |
-
|
559 |
-
self.register_modules(
|
560 |
-
vae=vae,
|
561 |
-
text_encoder=text_encoder,
|
562 |
-
tokenizer=tokenizer,
|
563 |
-
unet=unet,
|
564 |
-
scheduler=scheduler,
|
565 |
-
safety_checker=safety_checker,
|
566 |
-
feature_extractor=feature_extractor,
|
567 |
-
)
|
568 |
-
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
569 |
-
|
570 |
-
def create_scheduler(self, name="DPMSolver"):
|
571 |
-
config = self.scheduler.config
|
572 |
-
if name == "DPMSolver":
|
573 |
-
return DPMSolverMultistepScheduler.from_config(
|
574 |
-
config,
|
575 |
-
thresholding=False,
|
576 |
-
algorithm_type="dpmsolver++",
|
577 |
-
solver_type="midpoint",
|
578 |
-
lower_order_final=True,
|
579 |
-
)
|
580 |
-
if name == "EulerDiscrete":
|
581 |
-
return EulerDiscreteScheduler.from_config(config)
|
582 |
-
elif name == "EulerAncestralDiscrete":
|
583 |
-
return EulerAncestralDiscreteScheduler.from_config(config)
|
584 |
-
elif name == "PNDM":
|
585 |
-
return PNDMScheduler.from_config(config)
|
586 |
-
elif name == "DDIM":
|
587 |
-
return DDIMScheduler.from_config(config)
|
588 |
-
elif name == "LMSDiscrete":
|
589 |
-
return LMSDiscreteScheduler.from_config(config)
|
590 |
-
elif name == "HeunDiscrete":
|
591 |
-
return HeunDiscreteScheduler.from_config(config)
|
592 |
-
elif name == "KDPM2AncestralDiscrete":
|
593 |
-
return KDPM2AncestralDiscreteScheduler.from_config(config)
|
594 |
-
elif name == "KDPM2Discrete":
|
595 |
-
return KDPM2DiscreteScheduler.from_config(config)
|
596 |
-
else:
|
597 |
-
raise NotImplementedError
|
598 |
-
|
599 |
-
def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
|
600 |
-
r"""
|
601 |
-
Enable sliced attention computation.
|
602 |
-
|
603 |
-
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
|
604 |
-
in several steps. This is useful to save some memory in exchange for a small speed decrease.
|
605 |
-
|
606 |
-
Args:
|
607 |
-
slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
|
608 |
-
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
|
609 |
-
a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
|
610 |
-
`attention_head_dim` must be a multiple of `slice_size`.
|
611 |
-
"""
|
612 |
-
if slice_size == "auto":
|
613 |
-
if isinstance(self.unet.config.attention_head_dim, int):
|
614 |
-
# half the attention head size is usually a good trade-off between
|
615 |
-
# speed and memory
|
616 |
-
slice_size = self.unet.config.attention_head_dim // 2
|
617 |
-
else:
|
618 |
-
# if `attention_head_dim` is a list, take the smallest head size
|
619 |
-
slice_size = min(self.unet.config.attention_head_dim)
|
620 |
-
self.unet.set_attention_slice(slice_size)
|
621 |
-
|
622 |
-
def disable_attention_slicing(self):
|
623 |
-
r"""
|
624 |
-
Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
|
625 |
-
back to computing attention in one step.
|
626 |
-
"""
|
627 |
-
# set slice_size = `None` to disable `attention slicing`
|
628 |
-
self.enable_attention_slicing(None)
|
629 |
-
|
630 |
-
def __call__(self, *args, **kwargs):
|
631 |
-
return self.text2image(*args, **kwargs)
|
632 |
-
|
633 |
-
def text2img(self, *args, **kwargs):
|
634 |
-
return self.text2image(*args, **kwargs)
|
635 |
-
|
636 |
-
def _encode_prompt(
|
637 |
-
self,
|
638 |
-
prompt,
|
639 |
-
negative_prompt,
|
640 |
-
max_embeddings_multiples,
|
641 |
-
no_boseos_middle,
|
642 |
-
skip_parsing,
|
643 |
-
skip_weighting,
|
644 |
-
do_classifier_free_guidance,
|
645 |
-
num_images_per_prompt,
|
646 |
-
):
|
647 |
-
if do_classifier_free_guidance and negative_prompt is None:
|
648 |
-
negative_prompt = ""
|
649 |
-
text_embeddings = get_weighted_text_embeddings(
|
650 |
-
self, prompt, negative_prompt, max_embeddings_multiples, no_boseos_middle, skip_parsing, skip_weighting
|
651 |
-
)
|
652 |
-
|
653 |
-
bs_embed, seq_len, _ = text_embeddings.shape
|
654 |
-
text_embeddings = text_embeddings.tile([1, num_images_per_prompt, 1])
|
655 |
-
text_embeddings = text_embeddings.reshape([bs_embed * num_images_per_prompt, seq_len, -1])
|
656 |
-
return text_embeddings
|
657 |
-
|
658 |
-
def run_safety_checker(self, image, dtype):
|
659 |
-
if self.safety_checker is not None:
|
660 |
-
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pd")
|
661 |
-
image, has_nsfw_concept = self.safety_checker(
|
662 |
-
images=image, clip_input=safety_checker_input.pixel_values.cast(dtype)
|
663 |
-
)
|
664 |
-
else:
|
665 |
-
has_nsfw_concept = None
|
666 |
-
return image, has_nsfw_concept
|
667 |
-
|
668 |
-
def decode_latents(self, latents):
|
669 |
-
latents = 1 / 0.18215 * latents
|
670 |
-
image = self.vae.decode(latents).sample
|
671 |
-
image = (image / 2 + 0.5).clip(0, 1)
|
672 |
-
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
|
673 |
-
image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
|
674 |
-
return image
|
675 |
-
|
676 |
-
def prepare_extra_step_kwargs(self, eta, scheduler):
|
677 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
678 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
679 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
680 |
-
# and should be between [0, 1]
|
681 |
-
|
682 |
-
accepts_eta = "eta" in set(inspect.signature(scheduler.step).parameters.keys())
|
683 |
-
extra_step_kwargs = {}
|
684 |
-
if accepts_eta:
|
685 |
-
extra_step_kwargs["eta"] = eta
|
686 |
-
|
687 |
-
return extra_step_kwargs
|
688 |
-
|
689 |
-
def check_inputs_text2img(self, prompt, height, width, callback_steps):
|
690 |
-
if not isinstance(prompt, str) and not isinstance(prompt, list):
|
691 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
692 |
-
|
693 |
-
if height % 8 != 0 or width % 8 != 0:
|
694 |
-
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
695 |
-
|
696 |
-
if (callback_steps is None) or (
|
697 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
698 |
-
):
|
699 |
-
raise ValueError(
|
700 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
701 |
-
f" {type(callback_steps)}."
|
702 |
-
)
|
703 |
-
|
704 |
-
def check_inputs_img2img_inpaint(self, prompt, strength, callback_steps):
|
705 |
-
if not isinstance(prompt, str) and not isinstance(prompt, list):
|
706 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
707 |
-
|
708 |
-
if strength < 0 or strength > 1:
|
709 |
-
raise ValueError(f"The value of strength should in [1.0, 1.0] but is {strength}")
|
710 |
-
|
711 |
-
if (callback_steps is None) or (
|
712 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
713 |
-
):
|
714 |
-
raise ValueError(
|
715 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
716 |
-
f" {type(callback_steps)}."
|
717 |
-
)
|
718 |
-
|
719 |
-
def prepare_latents_text2img(self, batch_size, num_channels_latents, height, width, dtype, latents=None, scheduler=None):
|
720 |
-
shape = [batch_size, num_channels_latents, height // 8, width // 8]
|
721 |
-
if latents is None:
|
722 |
-
latents = paddle.randn(shape, dtype=dtype)
|
723 |
-
else:
|
724 |
-
if latents.shape != shape:
|
725 |
-
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
|
726 |
-
|
727 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
728 |
-
latents = latents * scheduler.init_noise_sigma
|
729 |
-
return latents
|
730 |
-
|
731 |
-
def prepare_latents_img2img(self, image, timestep, num_images_per_prompt, dtype, scheduler):
|
732 |
-
image = image.cast(dtype=dtype)
|
733 |
-
init_latent_dist = self.vae.encode(image).latent_dist
|
734 |
-
init_latents = init_latent_dist.sample()
|
735 |
-
init_latents = 0.18215 * init_latents
|
736 |
-
|
737 |
-
b, c, h, w = init_latents.shape
|
738 |
-
init_latents = init_latents.tile([1, num_images_per_prompt, 1, 1])
|
739 |
-
init_latents = init_latents.reshape([b * num_images_per_prompt, c, h, w])
|
740 |
-
|
741 |
-
# add noise to latents using the timesteps
|
742 |
-
noise = paddle.randn(init_latents.shape, dtype=dtype)
|
743 |
-
|
744 |
-
# get latents
|
745 |
-
init_latents = scheduler.add_noise(init_latents, noise, timestep)
|
746 |
-
latents = init_latents
|
747 |
-
|
748 |
-
return latents
|
749 |
-
|
750 |
-
def get_timesteps(self, num_inference_steps, strength, scheduler):
|
751 |
-
# get the original timestep using init_timestep
|
752 |
-
offset = scheduler.config.get("steps_offset", 0)
|
753 |
-
init_timestep = int(num_inference_steps * strength) + offset
|
754 |
-
init_timestep = min(init_timestep, num_inference_steps)
|
755 |
-
|
756 |
-
t_start = max(num_inference_steps - init_timestep + offset, 0)
|
757 |
-
timesteps = scheduler.timesteps[t_start:]
|
758 |
-
|
759 |
-
return timesteps, num_inference_steps - t_start
|
760 |
-
|
761 |
-
def prepare_latents_inpaint(self, image, timestep, num_images_per_prompt, dtype, scheduler):
|
762 |
-
image = image.cast(dtype)
|
763 |
-
init_latent_dist = self.vae.encode(image).latent_dist
|
764 |
-
init_latents = init_latent_dist.sample()
|
765 |
-
init_latents = 0.18215 * init_latents
|
766 |
-
|
767 |
-
b, c, h, w = init_latents.shape
|
768 |
-
init_latents = init_latents.tile([1, num_images_per_prompt, 1, 1])
|
769 |
-
init_latents = init_latents.reshape([b * num_images_per_prompt, c, h, w])
|
770 |
-
|
771 |
-
init_latents_orig = init_latents
|
772 |
-
|
773 |
-
# add noise to latents using the timesteps
|
774 |
-
noise = paddle.randn(init_latents.shape, dtype=dtype)
|
775 |
-
init_latents = scheduler.add_noise(init_latents, noise, timestep)
|
776 |
-
latents = init_latents
|
777 |
-
return latents, init_latents_orig, noise
|
778 |
-
|
779 |
-
@paddle.no_grad()
|
780 |
-
def text2image(
|
781 |
-
self,
|
782 |
-
prompt: Union[str, List[str]],
|
783 |
-
height: int = 512,
|
784 |
-
width: int = 512,
|
785 |
-
num_inference_steps: int = 50,
|
786 |
-
guidance_scale: float = 7.5,
|
787 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
788 |
-
num_images_per_prompt: Optional[int] = 1,
|
789 |
-
eta: float = 0.0,
|
790 |
-
seed: Optional[int] = None,
|
791 |
-
latents: Optional[paddle.Tensor] = None,
|
792 |
-
output_type: Optional[str] = "pil",
|
793 |
-
return_dict: bool = True,
|
794 |
-
callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
|
795 |
-
callback_steps: Optional[int] = 1,
|
796 |
-
# new add
|
797 |
-
max_embeddings_multiples: Optional[int] = 1,
|
798 |
-
no_boseos_middle: Optional[bool] = False,
|
799 |
-
skip_parsing: Optional[bool] = False,
|
800 |
-
skip_weighting: Optional[bool] = False,
|
801 |
-
scheduler=None,
|
802 |
-
**kwargs,
|
803 |
-
):
|
804 |
-
r"""
|
805 |
-
Function invoked when calling the pipeline for generation.
|
806 |
-
|
807 |
-
Args:
|
808 |
-
prompt (`str` or `List[str]`):
|
809 |
-
The prompt or prompts to guide the image generation.
|
810 |
-
height (`int`, *optional*, defaults to 512):
|
811 |
-
The height in pixels of the generated image.
|
812 |
-
width (`int`, *optional*, defaults to 512):
|
813 |
-
The width in pixels of the generated image.
|
814 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
815 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
816 |
-
expense of slower inference.
|
817 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
818 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
819 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
820 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
821 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
822 |
-
usually at the expense of lower image quality.
|
823 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
824 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
825 |
-
if `guidance_scale` is less than `1`).
|
826 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
827 |
-
The number of images to generate per prompt.
|
828 |
-
eta (`float`, *optional*, defaults to 0.0):
|
829 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
830 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
831 |
-
seed (`int`, *optional*):
|
832 |
-
Random number seed.
|
833 |
-
latents (`paddle.Tensor`, *optional*):
|
834 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
835 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
836 |
-
tensor will ge generated by sampling using the supplied random `seed`.
|
837 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
838 |
-
The output format of the generate image. Choose between
|
839 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
840 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
841 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
842 |
-
plain tuple.
|
843 |
-
callback (`Callable`, *optional*):
|
844 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
845 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
|
846 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
847 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
848 |
-
called at every step.
|
849 |
-
|
850 |
-
Returns:
|
851 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
852 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
853 |
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
854 |
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
855 |
-
(nsfw) content, according to the `safety_checker`.
|
856 |
-
"""
|
857 |
-
if scheduler is None:
|
858 |
-
scheduler = self.scheduler
|
859 |
-
seed = random.randint(0, 2**32) if seed is None else seed
|
860 |
-
argument = dict(
|
861 |
-
prompt=prompt,
|
862 |
-
negative_prompt=negative_prompt,
|
863 |
-
height=height,
|
864 |
-
width=width,
|
865 |
-
num_inference_steps=num_inference_steps,
|
866 |
-
guidance_scale=guidance_scale,
|
867 |
-
num_images_per_prompt=num_images_per_prompt,
|
868 |
-
eta=eta,
|
869 |
-
seed=seed,
|
870 |
-
latents=latents,
|
871 |
-
max_embeddings_multiples=max_embeddings_multiples,
|
872 |
-
no_boseos_middle=no_boseos_middle,
|
873 |
-
skip_parsing=skip_parsing,
|
874 |
-
skip_weighting=skip_weighting,
|
875 |
-
epoch_time=time.time(),
|
876 |
-
)
|
877 |
-
paddle.seed(seed)
|
878 |
-
# 1. Check inputs. Raise error if not correct
|
879 |
-
self.check_inputs_text2img(prompt, height, width, callback_steps)
|
880 |
-
|
881 |
-
# 2. Define call parameters
|
882 |
-
batch_size = 1 if isinstance(prompt, str) else len(prompt)
|
883 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
884 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
885 |
-
# corresponds to doing no classifier free guidance.
|
886 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
887 |
-
|
888 |
-
# 3. Encode input prompt
|
889 |
-
text_embeddings = self._encode_prompt(
|
890 |
-
prompt,
|
891 |
-
negative_prompt,
|
892 |
-
max_embeddings_multiples,
|
893 |
-
no_boseos_middle,
|
894 |
-
skip_parsing,
|
895 |
-
skip_weighting,
|
896 |
-
do_classifier_free_guidance,
|
897 |
-
num_images_per_prompt,
|
898 |
-
)
|
899 |
-
|
900 |
-
# 4. Prepare timesteps
|
901 |
-
scheduler.set_timesteps(num_inference_steps)
|
902 |
-
timesteps = scheduler.timesteps
|
903 |
-
|
904 |
-
# 5. Prepare latent variables
|
905 |
-
num_channels_latents = self.unet.in_channels
|
906 |
-
latents = self.prepare_latents_text2img(
|
907 |
-
batch_size * num_images_per_prompt,
|
908 |
-
num_channels_latents,
|
909 |
-
height,
|
910 |
-
width,
|
911 |
-
text_embeddings.dtype,
|
912 |
-
latents,
|
913 |
-
scheduler=scheduler,
|
914 |
-
)
|
915 |
-
|
916 |
-
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
917 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(eta, scheduler)
|
918 |
-
|
919 |
-
# 7. Denoising loop
|
920 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * scheduler.order
|
921 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
922 |
-
for i, t in enumerate(timesteps):
|
923 |
-
# expand the latents if we are doing classifier free guidance
|
924 |
-
latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents
|
925 |
-
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
|
926 |
-
|
927 |
-
# predict the noise residual
|
928 |
-
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
929 |
-
|
930 |
-
# perform guidance
|
931 |
-
if do_classifier_free_guidance:
|
932 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
933 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
934 |
-
|
935 |
-
# compute the previous noisy sample x_t -> x_t-1
|
936 |
-
latents = scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
937 |
-
|
938 |
-
# call the callback, if provided
|
939 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0):
|
940 |
-
progress_bar.update()
|
941 |
-
if callback is not None and i % callback_steps == 0:
|
942 |
-
callback(progress_bar.n, progress_bar.total, progress_bar)
|
943 |
-
|
944 |
-
# 8. Post-processing
|
945 |
-
image = self.decode_latents(latents)
|
946 |
-
|
947 |
-
# 9. Run safety checker
|
948 |
-
image, has_nsfw_concept = self.run_safety_checker(image, text_embeddings.dtype)
|
949 |
-
|
950 |
-
# 10. Convert to PIL
|
951 |
-
if output_type == "pil":
|
952 |
-
image = self.numpy_to_pil(image, argument=argument)
|
953 |
-
|
954 |
-
if not return_dict:
|
955 |
-
return (image, has_nsfw_concept)
|
956 |
-
|
957 |
-
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
958 |
-
|
959 |
-
@paddle.no_grad()
|
960 |
-
def img2img(
|
961 |
-
self,
|
962 |
-
prompt: Union[str, List[str]],
|
963 |
-
image: Union[paddle.Tensor, PIL.Image.Image],
|
964 |
-
strength: float = 0.8,
|
965 |
-
height=None,
|
966 |
-
width=None,
|
967 |
-
num_inference_steps: Optional[int] = 50,
|
968 |
-
guidance_scale: Optional[float] = 7.5,
|
969 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
970 |
-
num_images_per_prompt: Optional[int] = 1,
|
971 |
-
eta: Optional[float] = 0.0,
|
972 |
-
seed: Optional[int] = None,
|
973 |
-
output_type: Optional[str] = "pil",
|
974 |
-
return_dict: bool = True,
|
975 |
-
callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
|
976 |
-
callback_steps: Optional[int] = 1,
|
977 |
-
# new add
|
978 |
-
max_embeddings_multiples: Optional[int] = 1,
|
979 |
-
no_boseos_middle: Optional[bool] = False,
|
980 |
-
skip_parsing: Optional[bool] = False,
|
981 |
-
skip_weighting: Optional[bool] = False,
|
982 |
-
scheduler=None,
|
983 |
-
**kwargs,
|
984 |
-
):
|
985 |
-
r"""
|
986 |
-
Function invoked when calling the pipeline for generation.
|
987 |
-
|
988 |
-
Args:
|
989 |
-
prompt (`str` or `List[str]`):
|
990 |
-
The prompt or prompts to guide the image generation.
|
991 |
-
image (`paddle.Tensor` or `PIL.Image.Image`):
|
992 |
-
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
993 |
-
process.
|
994 |
-
strength (`float`, *optional*, defaults to 0.8):
|
995 |
-
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
|
996 |
-
`image` will be used as a starting point, adding more noise to it the larger the `strength`. The
|
997 |
-
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
|
998 |
-
noise will be maximum and the denoising process will run for the full number of iterations specified in
|
999 |
-
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
|
1000 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
1001 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
1002 |
-
expense of slower inference. This parameter will be modulated by `strength`.
|
1003 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
1004 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
1005 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
1006 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
1007 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
1008 |
-
usually at the expense of lower image quality.
|
1009 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
1010 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
1011 |
-
if `guidance_scale` is less than `1`).
|
1012 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
1013 |
-
The number of images to generate per prompt.
|
1014 |
-
eta (`float`, *optional*, defaults to 0.0):
|
1015 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
1016 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
1017 |
-
seed (`int`, *optional*):
|
1018 |
-
A random seed.
|
1019 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
1020 |
-
The output format of the generate image. Choose between
|
1021 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
1022 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
1023 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
1024 |
-
plain tuple.
|
1025 |
-
callback (`Callable`, *optional*):
|
1026 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
1027 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
|
1028 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
1029 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
1030 |
-
called at every step.
|
1031 |
-
|
1032 |
-
Returns:
|
1033 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
1034 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
1035 |
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
1036 |
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
1037 |
-
(nsfw) content, according to the `safety_checker`.
|
1038 |
-
"""
|
1039 |
-
if scheduler is None:
|
1040 |
-
scheduler = self.scheduler
|
1041 |
-
seed = random.randint(0, 2**32) if seed is None else seed
|
1042 |
-
image_str = image
|
1043 |
-
if isinstance(image_str, str):
|
1044 |
-
image = load_image(image_str)
|
1045 |
-
|
1046 |
-
if height is None and width is None:
|
1047 |
-
width = (image.size[0] // 8) * 8
|
1048 |
-
height = (image.size[1] // 8) * 8
|
1049 |
-
elif height is None and width is not None:
|
1050 |
-
height = (image.size[1] // 8) * 8
|
1051 |
-
elif width is None and height is not None:
|
1052 |
-
width = (image.size[0] // 8) * 8
|
1053 |
-
else:
|
1054 |
-
height = height
|
1055 |
-
width = width
|
1056 |
-
|
1057 |
-
argument = dict(
|
1058 |
-
prompt=prompt,
|
1059 |
-
image=image_str,
|
1060 |
-
negative_prompt=negative_prompt,
|
1061 |
-
height=height,
|
1062 |
-
width=width,
|
1063 |
-
strength=strength,
|
1064 |
-
num_inference_steps=num_inference_steps,
|
1065 |
-
guidance_scale=guidance_scale,
|
1066 |
-
num_images_per_prompt=num_images_per_prompt,
|
1067 |
-
eta=eta,
|
1068 |
-
seed=seed,
|
1069 |
-
max_embeddings_multiples=max_embeddings_multiples,
|
1070 |
-
no_boseos_middle=no_boseos_middle,
|
1071 |
-
skip_parsing=skip_parsing,
|
1072 |
-
skip_weighting=skip_weighting,
|
1073 |
-
epoch_time=time.time(),
|
1074 |
-
)
|
1075 |
-
paddle.seed(seed)
|
1076 |
-
|
1077 |
-
# 1. Check inputs
|
1078 |
-
self.check_inputs_img2img_inpaint(prompt, strength, callback_steps)
|
1079 |
-
|
1080 |
-
# 2. Define call parameters
|
1081 |
-
batch_size = 1 if isinstance(prompt, str) else len(prompt)
|
1082 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
1083 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
1084 |
-
# corresponds to doing no classifier free guidance.
|
1085 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
1086 |
-
|
1087 |
-
# 3. Encode input prompt
|
1088 |
-
text_embeddings = self._encode_prompt(
|
1089 |
-
prompt,
|
1090 |
-
negative_prompt,
|
1091 |
-
max_embeddings_multiples,
|
1092 |
-
no_boseos_middle,
|
1093 |
-
skip_parsing,
|
1094 |
-
skip_weighting,
|
1095 |
-
do_classifier_free_guidance,
|
1096 |
-
num_images_per_prompt,
|
1097 |
-
)
|
1098 |
-
|
1099 |
-
# 4. Preprocess image
|
1100 |
-
if isinstance(image, PIL.Image.Image):
|
1101 |
-
image = image.resize((width, height))
|
1102 |
-
image = preprocess_image(image)
|
1103 |
-
|
1104 |
-
# 5. set timesteps
|
1105 |
-
scheduler.set_timesteps(num_inference_steps)
|
1106 |
-
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, scheduler)
|
1107 |
-
latent_timestep = timesteps[:1].tile([batch_size * num_images_per_prompt])
|
1108 |
-
|
1109 |
-
# 6. Prepare latent variables
|
1110 |
-
latents = self.prepare_latents_img2img(image, latent_timestep, num_images_per_prompt, text_embeddings.dtype, scheduler)
|
1111 |
-
|
1112 |
-
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
1113 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(eta, scheduler)
|
1114 |
-
|
1115 |
-
# 8. Denoising loop
|
1116 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * scheduler.order
|
1117 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
1118 |
-
for i, t in enumerate(timesteps):
|
1119 |
-
# expand the latents if we are doing classifier free guidance
|
1120 |
-
latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents
|
1121 |
-
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
|
1122 |
-
|
1123 |
-
# predict the noise residual
|
1124 |
-
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
1125 |
-
|
1126 |
-
# perform guidance
|
1127 |
-
if do_classifier_free_guidance:
|
1128 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
1129 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
1130 |
-
|
1131 |
-
# compute the previous noisy sample x_t -> x_t-1
|
1132 |
-
latents = scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
1133 |
-
|
1134 |
-
# call the callback, if provided
|
1135 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0):
|
1136 |
-
progress_bar.update()
|
1137 |
-
if callback is not None and i % callback_steps == 0:
|
1138 |
-
callback(progress_bar.n, progress_bar.total, progress_bar)
|
1139 |
-
|
1140 |
-
# 9. Post-processing
|
1141 |
-
image = self.decode_latents(latents)
|
1142 |
-
|
1143 |
-
# 10. Run safety checker
|
1144 |
-
image, has_nsfw_concept = self.run_safety_checker(image, text_embeddings.dtype)
|
1145 |
-
|
1146 |
-
# 11. Convert to PIL
|
1147 |
-
if output_type == "pil":
|
1148 |
-
image = self.numpy_to_pil(image, argument=argument)
|
1149 |
-
|
1150 |
-
if not return_dict:
|
1151 |
-
return (image, has_nsfw_concept)
|
1152 |
-
|
1153 |
-
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
1154 |
-
|
1155 |
-
@paddle.no_grad()
|
1156 |
-
def inpaint(
|
1157 |
-
self,
|
1158 |
-
prompt: Union[str, List[str]],
|
1159 |
-
image: Union[paddle.Tensor, PIL.Image.Image],
|
1160 |
-
mask_image: Union[paddle.Tensor, PIL.Image.Image],
|
1161 |
-
height=None,
|
1162 |
-
width=None,
|
1163 |
-
strength: float = 0.8,
|
1164 |
-
num_inference_steps: Optional[int] = 50,
|
1165 |
-
guidance_scale: Optional[float] = 7.5,
|
1166 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
1167 |
-
num_images_per_prompt: Optional[int] = 1,
|
1168 |
-
eta: Optional[float] = 0.0,
|
1169 |
-
seed: Optional[int] = None,
|
1170 |
-
output_type: Optional[str] = "pil",
|
1171 |
-
return_dict: bool = True,
|
1172 |
-
callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
|
1173 |
-
callback_steps: Optional[int] = 1,
|
1174 |
-
# new add
|
1175 |
-
max_embeddings_multiples: Optional[int] = 1,
|
1176 |
-
no_boseos_middle: Optional[bool] = False,
|
1177 |
-
skip_parsing: Optional[bool] = False,
|
1178 |
-
skip_weighting: Optional[bool] = False,
|
1179 |
-
scheduler=None,
|
1180 |
-
**kwargs,
|
1181 |
-
):
|
1182 |
-
r"""
|
1183 |
-
Function invoked when calling the pipeline for generation.
|
1184 |
-
|
1185 |
-
Args:
|
1186 |
-
prompt (`str` or `List[str]`):
|
1187 |
-
The prompt or prompts to guide the image generation.
|
1188 |
-
image (`paddle.Tensor` or `PIL.Image.Image`):
|
1189 |
-
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
1190 |
-
process. This is the image whose masked region will be inpainted.
|
1191 |
-
mask_image (`paddle.Tensor` or `PIL.Image.Image`):
|
1192 |
-
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
1193 |
-
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
|
1194 |
-
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
|
1195 |
-
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
1196 |
-
strength (`float`, *optional*, defaults to 0.8):
|
1197 |
-
Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
|
1198 |
-
is 1, the denoising process will be run on the masked area for the full number of iterations specified
|
1199 |
-
in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
|
1200 |
-
noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
|
1201 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
1202 |
-
The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
|
1203 |
-
the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
|
1204 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
1205 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
1206 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
1207 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
1208 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
1209 |
-
usually at the expense of lower image quality.
|
1210 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
1211 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
1212 |
-
if `guidance_scale` is less than `1`).
|
1213 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
1214 |
-
The number of images to generate per prompt.
|
1215 |
-
eta (`float`, *optional*, defaults to 0.0):
|
1216 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
1217 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
1218 |
-
seed (`int`, *optional*):
|
1219 |
-
A random seed.
|
1220 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
1221 |
-
The output format of the generate image. Choose between
|
1222 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
1223 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
1224 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
1225 |
-
plain tuple.
|
1226 |
-
callback (`Callable`, *optional*):
|
1227 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
1228 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
|
1229 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
1230 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
1231 |
-
called at every step.
|
1232 |
-
|
1233 |
-
Returns:
|
1234 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
1235 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
1236 |
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
1237 |
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
1238 |
-
(nsfw) content, according to the `safety_checker`.
|
1239 |
-
"""
|
1240 |
-
if scheduler is None:
|
1241 |
-
scheduler = self.scheduler
|
1242 |
-
seed = random.randint(0, 2**32) if seed is None else seed
|
1243 |
-
image_str = image
|
1244 |
-
mask_image_str = mask_image
|
1245 |
-
|
1246 |
-
if isinstance(image_str, str):
|
1247 |
-
image = load_image(image_str)
|
1248 |
-
if isinstance(mask_image_str, str):
|
1249 |
-
mask_image = load_image(mask_image_str)
|
1250 |
-
|
1251 |
-
if height is None and width is None:
|
1252 |
-
width = (image.size[0] // 8) * 8
|
1253 |
-
height = (image.size[1] // 8) * 8
|
1254 |
-
elif height is None and width is not None:
|
1255 |
-
height = (image.size[1] // 8) * 8
|
1256 |
-
elif width is None and height is not None:
|
1257 |
-
width = (image.size[0] // 8) * 8
|
1258 |
-
else:
|
1259 |
-
height = height
|
1260 |
-
width = width
|
1261 |
-
|
1262 |
-
argument = dict(
|
1263 |
-
prompt=prompt,
|
1264 |
-
image=image_str,
|
1265 |
-
mask_image=mask_image_str,
|
1266 |
-
negative_prompt=negative_prompt,
|
1267 |
-
height=height,
|
1268 |
-
width=width,
|
1269 |
-
strength=strength,
|
1270 |
-
num_inference_steps=num_inference_steps,
|
1271 |
-
guidance_scale=guidance_scale,
|
1272 |
-
num_images_per_prompt=num_images_per_prompt,
|
1273 |
-
eta=eta,
|
1274 |
-
seed=seed,
|
1275 |
-
max_embeddings_multiples=max_embeddings_multiples,
|
1276 |
-
no_boseos_middle=no_boseos_middle,
|
1277 |
-
skip_parsing=skip_parsing,
|
1278 |
-
skip_weighting=skip_weighting,
|
1279 |
-
epoch_time=time.time(),
|
1280 |
-
)
|
1281 |
-
paddle.seed(seed)
|
1282 |
-
|
1283 |
-
# 1. Check inputs
|
1284 |
-
self.check_inputs_img2img_inpaint(prompt, strength, callback_steps)
|
1285 |
-
|
1286 |
-
# 2. Define call parameters
|
1287 |
-
batch_size = 1 if isinstance(prompt, str) else len(prompt)
|
1288 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
1289 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
1290 |
-
# corresponds to doing no classifier free guidance.
|
1291 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
1292 |
-
|
1293 |
-
# 3. Encode input prompt
|
1294 |
-
text_embeddings = self._encode_prompt(
|
1295 |
-
prompt,
|
1296 |
-
negative_prompt,
|
1297 |
-
max_embeddings_multiples,
|
1298 |
-
no_boseos_middle,
|
1299 |
-
skip_parsing,
|
1300 |
-
skip_weighting,
|
1301 |
-
do_classifier_free_guidance,
|
1302 |
-
num_images_per_prompt,
|
1303 |
-
)
|
1304 |
-
|
1305 |
-
if not isinstance(image, paddle.Tensor):
|
1306 |
-
image = image.resize((width, height))
|
1307 |
-
image = preprocess_image(image)
|
1308 |
-
|
1309 |
-
if not isinstance(mask_image, paddle.Tensor):
|
1310 |
-
mask_image = mask_image.resize((width, height))
|
1311 |
-
mask_image = preprocess_mask(mask_image)
|
1312 |
-
|
1313 |
-
# 5. set timesteps
|
1314 |
-
scheduler.set_timesteps(num_inference_steps)
|
1315 |
-
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, scheduler)
|
1316 |
-
latent_timestep = timesteps[:1].tile([batch_size * num_images_per_prompt])
|
1317 |
-
|
1318 |
-
# 6. Prepare latent variables
|
1319 |
-
# encode the init image into latents and scale the latents
|
1320 |
-
latents, init_latents_orig, noise = self.prepare_latents_inpaint(
|
1321 |
-
image, latent_timestep, num_images_per_prompt, text_embeddings.dtype, scheduler
|
1322 |
-
)
|
1323 |
-
|
1324 |
-
# 7. Prepare mask latent
|
1325 |
-
mask = mask_image.cast(latents.dtype)
|
1326 |
-
mask = paddle.concat([mask] * batch_size * num_images_per_prompt)
|
1327 |
-
|
1328 |
-
# 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
1329 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(eta, scheduler)
|
1330 |
-
|
1331 |
-
# 9. Denoising loop
|
1332 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * scheduler.order
|
1333 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
1334 |
-
for i, t in enumerate(timesteps):
|
1335 |
-
# expand the latents if we are doing classifier free guidance
|
1336 |
-
latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents
|
1337 |
-
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
|
1338 |
-
|
1339 |
-
# predict the noise residual
|
1340 |
-
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
1341 |
-
|
1342 |
-
# perform guidance
|
1343 |
-
if do_classifier_free_guidance:
|
1344 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
1345 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
1346 |
-
|
1347 |
-
# compute the previous noisy sample x_t -> x_t-1
|
1348 |
-
latents = scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
1349 |
-
# masking
|
1350 |
-
init_latents_proper = scheduler.add_noise(init_latents_orig, noise, t)
|
1351 |
-
|
1352 |
-
latents = (init_latents_proper * mask) + (latents * (1 - mask))
|
1353 |
-
|
1354 |
-
# call the callback, if provided
|
1355 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0):
|
1356 |
-
progress_bar.update()
|
1357 |
-
if callback is not None and i % callback_steps == 0:
|
1358 |
-
callback(progress_bar.n, progress_bar.total, progress_bar)
|
1359 |
-
|
1360 |
-
# 10. Post-processing
|
1361 |
-
image = self.decode_latents(latents)
|
1362 |
-
|
1363 |
-
# 11. Run safety checker
|
1364 |
-
image, has_nsfw_concept = self.run_safety_checker(image, text_embeddings.dtype)
|
1365 |
-
|
1366 |
-
# 12. Convert to PIL
|
1367 |
-
if output_type == "pil":
|
1368 |
-
image = self.numpy_to_pil(image, argument=argument)
|
1369 |
-
|
1370 |
-
if not return_dict:
|
1371 |
-
return (image, has_nsfw_concept)
|
1372 |
-
|
1373 |
-
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
1374 |
-
|
1375 |
-
@staticmethod
|
1376 |
-
def numpy_to_pil(images, **kwargs):
|
1377 |
-
"""
|
1378 |
-
Convert a numpy image or a batch of images to a PIL image.
|
1379 |
-
"""
|
1380 |
-
if images.ndim == 3:
|
1381 |
-
images = images[None, ...]
|
1382 |
-
images = (images * 255).round().astype("uint8")
|
1383 |
-
pil_images = []
|
1384 |
-
argument = kwargs.pop("argument", None)
|
1385 |
-
for image in images:
|
1386 |
-
image = PIL.Image.fromarray(image)
|
1387 |
-
if argument is not None:
|
1388 |
-
image.argument = argument
|
1389 |
-
pil_images.append(image)
|
1390 |
-
|
1391 |
-
return pil_images
|
1392 |
-
pipeline = StableDiffusionPipelineAllinOne.from_pretrained(BASE_MODEL_NAME, safety_checker=None)
|
1393 |
-
|
1394 |
-
if LORA_WEIGHTS_PATH is not None:
|
1395 |
-
pipeline.unet.load_attn_procs(LORA_WEIGHTS_PATH, from_hf_hub=True)
|
1396 |
-
|
1397 |
-
support_scheduler = [
|
1398 |
-
"DPMSolver",
|
1399 |
-
"EulerDiscrete",
|
1400 |
-
"EulerAncestralDiscrete",
|
1401 |
-
"PNDM",
|
1402 |
-
"DDIM",
|
1403 |
-
"LMSDiscrete",
|
1404 |
-
"HeunDiscrete",
|
1405 |
-
"KDPM2AncestralDiscrete",
|
1406 |
-
"KDPM2Discrete"
|
1407 |
-
]
|
1408 |
-
|
1409 |
-
# generate images
|
1410 |
-
def infer(prompt, negative, scale, height, width, num_inference_steps, scheduler_name):
|
1411 |
-
scheduler = pipeline.create_scheduler(scheduler_name)
|
1412 |
-
|
1413 |
-
images = pipeline(
|
1414 |
-
prompt=prompt, negative_prompt=negative, guidance_scale=scale, height=height, width=width, num_inference_steps=num_inference_steps, scheduler=scheduler,
|
1415 |
-
).images
|
1416 |
-
return images
|
1417 |
-
|
1418 |
-
|
1419 |
-
css = """
|
1420 |
-
.gradio-container {
|
1421 |
-
font-family: 'IBM Plex Sans', sans-serif;
|
1422 |
-
}
|
1423 |
-
.gr-button {
|
1424 |
-
color: white;
|
1425 |
-
border-color: black;
|
1426 |
-
background: black;
|
1427 |
-
}
|
1428 |
-
input[type='range'] {
|
1429 |
-
accent-color: black;
|
1430 |
-
}
|
1431 |
-
.dark input[type='range'] {
|
1432 |
-
accent-color: #dfdfdf;
|
1433 |
-
}
|
1434 |
-
.container {
|
1435 |
-
max-width: 730px;
|
1436 |
-
margin: auto;
|
1437 |
-
padding-top: 1.5rem;
|
1438 |
-
}
|
1439 |
-
#gallery {
|
1440 |
-
min-height: 22rem;
|
1441 |
-
margin-bottom: 15px;
|
1442 |
-
margin-left: auto;
|
1443 |
-
margin-right: auto;
|
1444 |
-
border-bottom-right-radius: .5rem !important;
|
1445 |
-
border-bottom-left-radius: .5rem !important;
|
1446 |
-
}
|
1447 |
-
#gallery>div>.h-full {
|
1448 |
-
min-height: 20rem;
|
1449 |
-
}
|
1450 |
-
.details:hover {
|
1451 |
-
text-decoration: underline;
|
1452 |
-
}
|
1453 |
-
.gr-button {
|
1454 |
-
white-space: nowrap;
|
1455 |
-
}
|
1456 |
-
.gr-button:focus {
|
1457 |
-
border-color: rgb(147 197 253 / var(--tw-border-opacity));
|
1458 |
-
outline: none;
|
1459 |
-
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
|
1460 |
-
--tw-border-opacity: 1;
|
1461 |
-
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
|
1462 |
-
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
|
1463 |
-
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
|
1464 |
-
--tw-ring-opacity: .5;
|
1465 |
-
}
|
1466 |
-
#advanced-btn {
|
1467 |
-
font-size: .7rem !important;
|
1468 |
-
line-height: 19px;
|
1469 |
-
margin-top: 12px;
|
1470 |
-
margin-bottom: 12px;
|
1471 |
-
padding: 2px 8px;
|
1472 |
-
border-radius: 14px !important;
|
1473 |
-
}
|
1474 |
-
#advanced-options {
|
1475 |
-
display: none;
|
1476 |
-
margin-bottom: 20px;
|
1477 |
-
}
|
1478 |
-
.footer {
|
1479 |
-
margin-bottom: 45px;
|
1480 |
-
margin-top: 35px;
|
1481 |
-
text-align: center;
|
1482 |
-
border-bottom: 1px solid #e5e5e5;
|
1483 |
-
}
|
1484 |
-
.footer>p {
|
1485 |
-
font-size: .8rem;
|
1486 |
-
display: inline-block;
|
1487 |
-
padding: 0 10px;
|
1488 |
-
transform: translateY(10px);
|
1489 |
-
background: white;
|
1490 |
-
}
|
1491 |
-
.dark .footer {
|
1492 |
-
border-color: #303030;
|
1493 |
-
}
|
1494 |
-
.dark .footer>p {
|
1495 |
-
background: #0b0f19;
|
1496 |
-
}
|
1497 |
-
.acknowledgments h4{
|
1498 |
-
margin: 1.25em 0 .25em 0;
|
1499 |
-
font-weight: bold;
|
1500 |
-
font-size: 115%;
|
1501 |
-
}
|
1502 |
-
.animate-spin {
|
1503 |
-
animation: spin 1s linear infinite;
|
1504 |
-
}
|
1505 |
-
@keyframes spin {
|
1506 |
-
from {
|
1507 |
-
transform: rotate(0deg);
|
1508 |
-
}
|
1509 |
-
to {
|
1510 |
-
transform: rotate(360deg);
|
1511 |
-
}
|
1512 |
-
}
|
1513 |
-
#share-btn-container {
|
1514 |
-
display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
|
1515 |
-
margin-top: 10px;
|
1516 |
-
margin-left: auto;
|
1517 |
-
}
|
1518 |
-
#share-btn {
|
1519 |
-
all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0;
|
1520 |
-
}
|
1521 |
-
#share-btn * {
|
1522 |
-
all: unset;
|
1523 |
-
}
|
1524 |
-
#share-btn-container div:nth-child(-n+2){
|
1525 |
-
width: auto !important;
|
1526 |
-
min-height: 0px !important;
|
1527 |
-
}
|
1528 |
-
#share-btn-container .wrap {
|
1529 |
-
display: none !important;
|
1530 |
-
}
|
1531 |
-
|
1532 |
-
.gr-form{
|
1533 |
-
flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
|
1534 |
-
}
|
1535 |
-
#prompt-container{
|
1536 |
-
gap: 0;
|
1537 |
-
}
|
1538 |
-
#prompt-text-input, #negative-prompt-text-input{padding: .45rem 0.625rem}
|
1539 |
-
#component-16{border-top-width: 1px!important;margin-top: 1em}
|
1540 |
-
.image_duplication{position: absolute; width: 100px; left: 50px}
|
1541 |
-
"""
|
1542 |
-
|
1543 |
-
block = gr.Blocks(css=css)
|
1544 |
-
|
1545 |
-
with block:
|
1546 |
-
gr.HTML(
|
1547 |
-
"""
|
1548 |
-
<div style="text-align: center; margin: 0 auto;">
|
1549 |
-
<div
|
1550 |
-
style="
|
1551 |
-
display: inline-flex;
|
1552 |
-
align-items: center;
|
1553 |
-
gap: 0.8rem;
|
1554 |
-
font-size: 1.75rem;
|
1555 |
-
"
|
1556 |
-
>
|
1557 |
-
<svg
|
1558 |
-
width="0.65em"
|
1559 |
-
height="0.65em"
|
1560 |
-
viewBox="0 0 115 115"
|
1561 |
-
fill="none"
|
1562 |
-
xmlns="http://www.w3.org/2000/svg"
|
1563 |
-
>
|
1564 |
-
<rect width="23" height="23" fill="white"></rect>
|
1565 |
-
<rect y="69" width="23" height="23" fill="white"></rect>
|
1566 |
-
<rect x="23" width="23" height="23" fill="#AEAEAE"></rect>
|
1567 |
-
<rect x="23" y="69" width="23" height="23" fill="#AEAEAE"></rect>
|
1568 |
-
<rect x="46" width="23" height="23" fill="white"></rect>
|
1569 |
-
<rect x="46" y="69" width="23" height="23" fill="white"></rect>
|
1570 |
-
<rect x="69" width="23" height="23" fill="black"></rect>
|
1571 |
-
<rect x="69" y="69" width="23" height="23" fill="black"></rect>
|
1572 |
-
<rect x="92" width="23" height="23" fill="#D9D9D9"></rect>
|
1573 |
-
<rect x="92" y="69" width="23" height="23" fill="#AEAEAE"></rect>
|
1574 |
-
<rect x="115" y="46" width="23" height="23" fill="white"></rect>
|
1575 |
-
<rect x="115" y="115" width="23" height="23" fill="white"></rect>
|
1576 |
-
<rect x="115" y="69" width="23" height="23" fill="#D9D9D9"></rect>
|
1577 |
-
<rect x="92" y="46" width="23" height="23" fill="#AEAEAE"></rect>
|
1578 |
-
<rect x="92" y="115" width="23" height="23" fill="#AEAEAE"></rect>
|
1579 |
-
<rect x="92" y="69" width="23" height="23" fill="white"></rect>
|
1580 |
-
<rect x="69" y="46" width="23" height="23" fill="white"></rect>
|
1581 |
-
<rect x="69" y="115" width="23" height="23" fill="white"></rect>
|
1582 |
-
<rect x="69" y="69" width="23" height="23" fill="#D9D9D9"></rect>
|
1583 |
-
<rect x="46" y="46" width="23" height="23" fill="black"></rect>
|
1584 |
-
<rect x="46" y="115" width="23" height="23" fill="black"></rect>
|
1585 |
-
<rect x="46" y="69" width="23" height="23" fill="black"></rect>
|
1586 |
-
<rect x="23" y="46" width="23" height="23" fill="#D9D9D9"></rect>
|
1587 |
-
<rect x="23" y="115" width="23" height="23" fill="#AEAEAE"></rect>
|
1588 |
-
<rect x="23" y="69" width="23" height="23" fill="black"></rect>
|
1589 |
-
</svg>
|
1590 |
-
<h1 style="font-weight: 900; margin-bottom: 7px;margin-top:5px">
|
1591 |
-
Dreambooth LoRa Demo
|
1592 |
-
</h1>
|
1593 |
-
</div>
|
1594 |
-
</div>
|
1595 |
-
"""
|
1596 |
-
)
|
1597 |
-
with gr.Group():
|
1598 |
-
with gr.Box():
|
1599 |
-
with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
|
1600 |
-
with gr.Column():
|
1601 |
-
text = gr.Textbox(
|
1602 |
-
label="Enter your prompt",
|
1603 |
-
value=PROMPTS,
|
1604 |
-
show_label=False,
|
1605 |
-
max_lines=1,
|
1606 |
-
placeholder="Enter your prompt",
|
1607 |
-
elem_id="prompt-text-input",
|
1608 |
-
).style(
|
1609 |
-
border=(True, False, True, True),
|
1610 |
-
rounded=(True, False, False, True),
|
1611 |
-
container=False,
|
1612 |
-
)
|
1613 |
-
negative = gr.Textbox(
|
1614 |
-
label="Enter your negative prompt",
|
1615 |
-
show_label=False,
|
1616 |
-
max_lines=1,
|
1617 |
-
placeholder="Enter a negative prompt",
|
1618 |
-
elem_id="negative-prompt-text-input",
|
1619 |
-
).style(
|
1620 |
-
border=(True, False, True, True),
|
1621 |
-
rounded=(True, False, False, True),
|
1622 |
-
container=False,
|
1623 |
-
)
|
1624 |
-
btn = gr.Button("Generate image").style(
|
1625 |
-
margin=False,
|
1626 |
-
rounded=(False, True, True, False),
|
1627 |
-
full_width=False,
|
1628 |
-
)
|
1629 |
-
|
1630 |
-
gallery = gr.Gallery(
|
1631 |
-
label="Generated images", show_label=False, elem_id="gallery"
|
1632 |
-
).style(grid=[1], height="auto")
|
1633 |
-
|
1634 |
-
|
1635 |
-
with gr.Accordion("Advanced settings", open=False):
|
1636 |
-
scheduler_name = gr.Dropdown(
|
1637 |
-
label="scheduler_name", choices=support_scheduler, value="DPMSolver"
|
1638 |
-
)
|
1639 |
-
guidance_scale = gr.Slider(
|
1640 |
-
label="Guidance Scale", minimum=1, maximum=30, value=7.5, step=0.1
|
1641 |
-
)
|
1642 |
-
height = gr.Slider(
|
1643 |
-
label="Height", minimum=256, maximum=1024, value=512, step=8
|
1644 |
-
)
|
1645 |
-
width = gr.Slider(
|
1646 |
-
label="Width", minimum=256, maximum=1024, value=512, step=0.1
|
1647 |
-
)
|
1648 |
-
num_inference_steps = gr.Slider(
|
1649 |
-
label="num_inference_steps", minimum=10, maximum=100, value=25, step=1
|
1650 |
-
)
|
1651 |
-
|
1652 |
-
|
1653 |
-
inputs = [text, negative, guidance_scale, height, width, num_inference_steps, scheduler_name]
|
1654 |
-
# ex = gr.Examples(examples=examples, fn=infer, inputs=inputs, outputs=gallery, cache_examples=False)
|
1655 |
-
# ex.dataset.headers = [""]
|
1656 |
-
negative.submit(infer, inputs=inputs, outputs=gallery)
|
1657 |
-
text.submit(infer, inputs=inputs, outputs=gallery)
|
1658 |
-
btn.click(infer, inputs=inputs, outputs=gallery)
|
1659 |
-
|
1660 |
-
|
1661 |
-
gr.HTML(
|
1662 |
-
"""
|
1663 |
-
<div class="footer">
|
1664 |
-
<p>Model by <a href="https://www.paddlepaddle.org.cn/" style="text-decoration: underline;" target="_blank">PaddlePaddle</a> - Gradio Demo by 🤗 Hugging Face
|
1665 |
-
</p>
|
1666 |
-
</div>
|
1667 |
-
<div class="acknowledgments">
|
1668 |
-
<p><h4>LICENSE</h4>
|
1669 |
-
The model is licensed with a <a href="https://huggingface.co/stabilityai/stable-diffusion-2/blob/main/LICENSE-MODEL" style="text-decoration: underline;" target="_blank">CreativeML OpenRAIL++</a> license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" target="_blank" style="text-decoration: underline;" target="_blank">read the license</a></p>
|
1670 |
-
<p><h4>Biases and content acknowledgment</h4>
|
1671 |
-
Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the <a href="https://laion.ai/blog/laion-5b/" style="text-decoration: underline;" target="_blank">LAION-5B dataset</a>, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the <a href="https://huggingface.co/CompVis/stable-diffusion-v1-4" style="text-decoration: underline;" target="_blank">model card</a></p>
|
1672 |
-
</div>
|
1673 |
-
"""
|
1674 |
-
)
|
1675 |
-
|
1676 |
-
block.launch(server_name="0.0.0.0", server_port=8221)
|
1677 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/ABstract(插件化AB Testing平台) 746b87acd94643ca871ec661b63f196c/进程间架构 d50744212b044d06a4b29fe931df391b.md
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
# 进程间架构
|
2 |
-
|
3 |
-
Last edited time: April 23, 2023 3:58 PM
|
4 |
-
Owner: Anonymous
|
5 |
-
|
6 |
-
```
|
7 |
-
@startuml
|
8 |
-
'https://plantuml.com/component-diagram
|
9 |
-
package "前端" {
|
10 |
-
[APP]
|
11 |
-
[H5]
|
12 |
-
[小程序]
|
13 |
-
}
|
14 |
-
|
15 |
-
package "BFF" {
|
16 |
-
[APP] --> [APP BFF]
|
17 |
-
[H5] --> [H5 BFF]
|
18 |
-
[小程序] --> [小程序 BFF]
|
19 |
-
}
|
20 |
-
|
21 |
-
package "AB Testing" {
|
22 |
-
package "数据埋点" {
|
23 |
-
[APP BFF] --> [数据埋点]
|
24 |
-
[H5 BFF] --> [数据埋点]
|
25 |
-
[小程序 BFF] --> [数据埋点]
|
26 |
-
[数据埋点] -- [数据仓库]
|
27 |
-
}
|
28 |
-
[APP BFF] --> [Feature Flag]
|
29 |
-
[H5 BFF] --> [Feature Flag]
|
30 |
-
[小程序 BFF] --> [Feature Flag]
|
31 |
-
[Feature Flag] -- [Feature Configs]
|
32 |
-
[Feature Flag] -Right- [Experiments]
|
33 |
-
[Experiments] -- [Experiments Analytics]
|
34 |
-
[Experiments Analytics] -Right-> [数据仓库]
|
35 |
-
}
|
36 |
-
|
37 |
-
@enduml
|
38 |
-
```
|
39 |
-
|
40 |
-

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AFlac199/openai-reverse-proxy/Dockerfile
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
FROM node:18
|
2 |
-
|
3 |
-
WORKDIR /app
|
4 |
-
|
5 |
-
RUN npm install express express-http-proxy
|
6 |
-
|
7 |
-
COPY . .
|
8 |
-
|
9 |
-
EXPOSE 7860
|
10 |
-
|
11 |
-
CMD [ "node", "server.js" ]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Dashboards/Graph.NLP.Sentence.Similarity.Heatmap.KMeansCluster/TestSentences.md
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
Patient Health Questionnaire (PHQ-9) 🧠 - Major depressive disorder (ICD-10: F32).
|
2 |
-
Generalized Anxiety Disorder 7-item Scale (GAD-7) 😰 - Generalized anxiety disorder (ICD-10: F41.1).
|
3 |
-
Hamilton Rating Scale for Depression (HRSD) 🧠 - Major depressive disorder (ICD-10: F32).
|
4 |
-
World Health Organization Disability Assessment Schedule 2.0 (WHODAS 2.0) 🧠💪 - Physical and mental disability (ICD-10: Z73.1).
|
5 |
-
Short Form-36 Health Survey (SF-36) 💪🧠 - Health-related quality of life (CPT: 99499).
|
6 |
-
Health Assessment Questionnaire (HAQ) 💪 - Functional status assessment (CPT: 97750).
|
7 |
-
EuroQol-5D (EQ-5D) 💪🧠 - Health-related quality of life (LOINC: 83792-6).
|
8 |
-
Geriatric Depression Scale (GDS) 🧑🦳🧠 - Depression in older adults (ICD-10: F32.1).
|
9 |
-
Mini-Mental State Examination (MMSE) 🧑🦳💭 - Cognitive impairment (ICD-10: F06.7).
|
10 |
-
Pain Catastrophizing Scale (PCS) 💔 - Chronic pain (LOINC: 86351-6).
|
11 |
-
Oswestry Disability Index (ODI) 💪💔 - Back pain (CPT: 97750).
|
12 |
-
Fibromyalgia Impact Questionnaire (FIQ) 💔😩 - Fibromyalgia (SNOMED: 316962002).
|
13 |
-
Beck Depression Inventory (BDI) 🧠 - Depression (ICD-10: F32).
|
14 |
-
Posttraumatic Stress Disorder Checklist (PCL) 😰😞 - Posttraumatic stress disorder (ICD-10: F43.1).
|
15 |
-
Alcohol Use Disorders Identification Test (AUDIT) 🍻 - Alcohol use disorder (ICD-10: F10).
|
16 |
-
Drug Abuse Screening Test (DAST) 💊 - Substance use disorder (ICD-10: F19).
|
17 |
-
Eating Attitudes Test (EAT) 🍴 - Eating disorders (ICD-10: F50).
|
18 |
-
Adolescent Eating Disorder Examination (ADE) 🍴👩🦰 - Eating disorders in adolescents (ICD-10: F50).
|
19 |
-
Child Behavior Checklist (CBCL) 👧🧒 - Child behavior problems (ICD-10: F90).
|
20 |
-
Autism Spectrum Quotient (AQ) 🧑🦱 - Autism spectrum disorder (ICD-10: F84.0).
|
21 |
-
Columbia-Suicide Severity Rating Scale (C-SSRS) 🩸 - Suicide risk (ICD-10: Z65.8).
|
22 |
-
Perceived Stress Scale (PSS) 😩 - Stress (LOINC: 75217-3).
|
23 |
-
Satisfaction with Life Scale (SWLS) 😊 - Life satisfaction (LOINC: 69406-9).
|
24 |
-
Health Belief Model Scale (HBM) 💊💉 - Health beliefs (LOINC: 88018).
|
25 |
-
Multidimensional Health Locus of Control Scale (MHLC) 💊💉 - Health locus of control (LOINC: 87561-7).
|
26 |
-
Life Orientation Test-Revised (LOT-R) 😃 - Optimism (LOINC: 75315-5).
|
27 |
-
State-Trait Anxiety Inventory (STAI) 😰 - Anxiety (LOINC: 71092-3).
|
28 |
-
Multidimensional Scale of Perceived Social Support (MSPSS) 👥 - Social support (LOINC: 86649-4).
|
29 |
-
Job Content Questionnaire (JCQ) 💼 - Job stress (LOINC: 76554-9).
|
30 |
-
Burnout Measure (BO) 🔥 - Burnout (LOINC: 89049-8).
|
31 |
-
Family Assessment Device (FAD) 👨👩👧 - Family functioning (LOINC: 84113-2).
|
32 |
-
Perceived Control Scale (PCS) 💪 - Perceived control (LOINC: 86447-0).
|
33 |
-
General Self-Efficacy Scale (GSES) 💪 - Self-efficacy (LOINC: 76563-0).
|
34 |
-
Coping Strategies Inventory (CSI) 😓 - Coping strategies (LOINC: 89057-1).
|
35 |
-
Acceptance and Action Questionnaire (AAQ-II) 🧘 - Acceptance and commitment therapy (LOINC: 88027-2).
|
36 |
-
Attention Deficit Hyperactivity Disorder Self-Report Scale (ASRS) 👧🧒 - ADHD (ICD-10: F90).
|
37 |
-
Impact of Event Scale-Revised (IES-R) 😔😞 - Trauma (LOINC: 86237-7).
|
38 |
-
Insomnia Severity Index (ISI) 💤 - Insomnia (LOINC: 82451-5).
|
39 |
-
Social Phobia Inventory (SPIN) 😰 - Social anxiety disorder (ICD-10: F40.1).
|
40 |
-
Panic Disorder Severity Scale (PDSS) 😰 - Panic disorder (ICD-10: F41.0).
|
41 |
-
Yale-Brown Obsessive Compulsive Scale (Y-BOCS) 🤔 - Obsessive-compulsive disorder (ICD-10: F42).
|
42 |
-
Social Interaction Anxiety Scale (SIAS) 😰 - Social anxiety disorder (ICD-10: F40.1).
|
43 |
-
Generalized Anxiety Disorder Scale (GADS) 😰 - Generalized anxiety disorder (ICD-10: F41.1).
|
44 |
-
Postpartum Depression Screening Scale (PDSS) 🤱🧠 - Postpartum depression (ICD-10: F53.0).
|
45 |
-
Child and Adolescent Symptom Inventory (CASI) 👧🧒🧠 - Child and adolescent mental health (ICD-10: F90).
|
46 |
-
Strengths and Difficulties Questionnaire (SDQ) 👧🧒🧠 - Child and adolescent mental health (ICD-10: F90).
|
47 |
-
Kessler Psychological Distress Scale (K10) 🧠 - Psychological distress (LOINC: 76550-6).
|
48 |
-
World Health Organization Quality of Life Scale (WHOQOL) 💪🧠 - Quality of life (LOINC: 88055-2).
|
49 |
-
Multidimensional Pain Inventory (MPI) 💔 - Chronic pain (LOINC: 71808-8).
|
50 |
-
Cornell Scale for Depression in Dementia (CSDD) 👴👵🧠 - Depression in dementia patients (ICD-10: F03.90).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AP123/dreamgaussian/style.css
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
body {
|
2 |
-
padding: 2rem;
|
3 |
-
font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
|
4 |
-
}
|
5 |
-
|
6 |
-
h1 {
|
7 |
-
font-size: 16px;
|
8 |
-
margin-top: 0;
|
9 |
-
}
|
10 |
-
|
11 |
-
p {
|
12 |
-
color: rgb(107, 114, 128);
|
13 |
-
font-size: 15px;
|
14 |
-
margin-bottom: 10px;
|
15 |
-
margin-top: 5px;
|
16 |
-
}
|
17 |
-
|
18 |
-
.card {
|
19 |
-
max-width: 620px;
|
20 |
-
margin: 0 auto;
|
21 |
-
padding: 16px;
|
22 |
-
border: 1px solid lightgray;
|
23 |
-
border-radius: 16px;
|
24 |
-
}
|
25 |
-
|
26 |
-
.card p:last-child {
|
27 |
-
margin-bottom: 0;
|
28 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ARTeLab/DTM_Estimation_SRandD/test.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torchvision
|
3 |
-
from torchvision import transforms
|
4 |
-
from PIL import Image
|
5 |
-
import matplotlib.pyplot as plt
|
6 |
-
import numpy as np
|
7 |
-
from models.modelNetA import Generator as GA
|
8 |
-
from models.modelNetB import Generator as GB
|
9 |
-
from models.modelNetC import Generator as GC
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
# DEVICE='cpu'
|
14 |
-
DEVICE='cuda'
|
15 |
-
model_type = 'model_c'
|
16 |
-
|
17 |
-
modeltype2path = {
|
18 |
-
'model_a': 'DTM_exp_train10%_model_a/g-best.pth',
|
19 |
-
'model_b': 'DTM_exp_train10%_model_b/g-best.pth',
|
20 |
-
'model_c': 'DTM_exp_train10%_model_c/g-best.pth',
|
21 |
-
}
|
22 |
-
|
23 |
-
if model_type == 'model_a':
|
24 |
-
generator = GA()
|
25 |
-
if model_type == 'model_b':
|
26 |
-
generator = GB()
|
27 |
-
if model_type == 'model_c':
|
28 |
-
generator = GC()
|
29 |
-
|
30 |
-
generator = torch.nn.DataParallel(generator)
|
31 |
-
state_dict_Gen = torch.load(modeltype2path[model_type], map_location=torch.device('cpu'))
|
32 |
-
generator.load_state_dict(state_dict_Gen)
|
33 |
-
generator = generator.module.to(DEVICE)
|
34 |
-
# generator.to(DEVICE)
|
35 |
-
generator.eval()
|
36 |
-
|
37 |
-
preprocess = transforms.Compose([
|
38 |
-
transforms.Grayscale(),
|
39 |
-
# transforms.Resize((128, 128)),
|
40 |
-
transforms.ToTensor()
|
41 |
-
])
|
42 |
-
input_img = Image.open('demo_imgs/fake.jpg')
|
43 |
-
torch_img = preprocess(input_img).to(DEVICE).unsqueeze(0).to(DEVICE)
|
44 |
-
torch_img = (torch_img - torch.min(torch_img)) / (torch.max(torch_img) - torch.min(torch_img))
|
45 |
-
with torch.no_grad():
|
46 |
-
output = generator(torch_img)
|
47 |
-
sr, sr_dem_selected = output[0], output[1]
|
48 |
-
sr = sr.squeeze(0).cpu()
|
49 |
-
|
50 |
-
print(sr.shape)
|
51 |
-
torchvision.utils.save_image(sr, 'sr.png')
|
52 |
-
# sr = Image.fromarray(sr.squeeze(0).detach().numpy() * 255, 'L')
|
53 |
-
# sr.save('sr2.png')
|
54 |
-
|
55 |
-
sr_dem_selected = sr_dem_selected.squeeze().cpu().detach().numpy()
|
56 |
-
print(sr_dem_selected.shape)
|
57 |
-
plt.imshow(sr_dem_selected, cmap='jet', vmin=0, vmax=np.max(sr_dem_selected))
|
58 |
-
plt.colorbar()
|
59 |
-
plt.savefig('test.png')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet18_cifar.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
# model settings
|
2 |
-
model = dict(
|
3 |
-
type='ImageClassifier',
|
4 |
-
backbone=dict(
|
5 |
-
type='ResNet_CIFAR',
|
6 |
-
depth=18,
|
7 |
-
num_stages=4,
|
8 |
-
out_indices=(3, ),
|
9 |
-
style='pytorch'),
|
10 |
-
neck=dict(type='GlobalAveragePooling'),
|
11 |
-
head=dict(
|
12 |
-
type='LinearClsHead',
|
13 |
-
num_classes=10,
|
14 |
-
in_channels=512,
|
15 |
-
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
|
16 |
-
))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/helpers/gpt4love.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import sys
|
3 |
-
from re import findall
|
4 |
-
from curl_cffi import requests
|
5 |
-
|
6 |
-
config = json.loads(sys.argv[1])
|
7 |
-
prompt = config['messages'][-1]['content']
|
8 |
-
|
9 |
-
headers = {
|
10 |
-
'authority': 'api.gptplus.one',
|
11 |
-
'accept': 'application/json, text/plain, */*',
|
12 |
-
'accept-language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7,ja;q=0.6,zh-TW;q=0.5,zh;q=0.4',
|
13 |
-
'content-type': 'application/octet-stream',
|
14 |
-
'origin': 'https://ai.gptforlove.com/',
|
15 |
-
'referer': 'https://ai.gptforlove.com/',
|
16 |
-
'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
|
17 |
-
'sec-ch-ua-mobile': '?0',
|
18 |
-
'sec-ch-ua-platform': '"macOS"',
|
19 |
-
'sec-fetch-dest': 'empty',
|
20 |
-
'sec-fetch-mode': 'cors',
|
21 |
-
'sec-fetch-site': 'cross-site',
|
22 |
-
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
|
23 |
-
}
|
24 |
-
|
25 |
-
json_data = {
|
26 |
-
'prompt': prompt,
|
27 |
-
'options': {}
|
28 |
-
}
|
29 |
-
|
30 |
-
def format(chunk):
|
31 |
-
try:
|
32 |
-
completion_chunk = findall(r'content":"(.*)"},"fin', chunk.decode())[0]
|
33 |
-
print(completion_chunk, flush=True, end='')
|
34 |
-
|
35 |
-
except Exception as e:
|
36 |
-
print(f'[ERROR] an error occured, retrying... | [[{chunk.decode()}]]', flush=True)
|
37 |
-
return
|
38 |
-
|
39 |
-
while True:
|
40 |
-
try:
|
41 |
-
response = requests.post('https://api.gptplus.one/api/chat-process',
|
42 |
-
headers=headers, json=json_data, content_callback=format, impersonate='chrome110')
|
43 |
-
|
44 |
-
exit(0)
|
45 |
-
|
46 |
-
except Exception as e:
|
47 |
-
print('[ERROR] an error occured, retrying... |', e, flush=True)
|
48 |
-
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/server/bp.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
from flask import Blueprint
|
2 |
-
|
3 |
-
bp = Blueprint('bp', __name__,
|
4 |
-
template_folder='./../client/html',
|
5 |
-
static_folder='./../client',
|
6 |
-
static_url_path='assets')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/circularprogress/CircularProgress.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import CircularProgress from "../../../plugins/circularprogress";
|
2 |
-
export default CircularProgress;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/FixWidthSizer.js
DELETED
@@ -1,124 +0,0 @@
|
|
1 |
-
import BaseSizer from '../basesizer/BaseSizer.js';
|
2 |
-
import Methods from './Methods.js';
|
3 |
-
import GetOrientationMode from '../utils/GetOrientationMode.js';
|
4 |
-
import GetMaxChildWidth from './GetMaxChildWidth.js';
|
5 |
-
import GetMaxChildHeight from './GetMaxChildHeight.js';
|
6 |
-
|
7 |
-
const IsPlainObject = Phaser.Utils.Objects.IsPlainObject;
|
8 |
-
const GetValue = Phaser.Utils.Objects.GetValue;
|
9 |
-
|
10 |
-
class FixWidthSizer extends BaseSizer {
|
11 |
-
constructor(scene, x, y, minWidth, minHeight, config) {
|
12 |
-
if (IsPlainObject(x)) {
|
13 |
-
config = x;
|
14 |
-
x = GetValue(config, 'x', 0);
|
15 |
-
y = GetValue(config, 'y', 0);
|
16 |
-
minWidth = GetValue(config, 'width', undefined);
|
17 |
-
minHeight = GetValue(config, 'height', undefined);
|
18 |
-
} else if (IsPlainObject(minWidth)) {
|
19 |
-
config = minWidth;
|
20 |
-
minWidth = GetValue(config, 'width', undefined);
|
21 |
-
minHeight = GetValue(config, 'height', undefined);
|
22 |
-
}
|
23 |
-
|
24 |
-
super(scene, x, y, minWidth, minHeight, config);
|
25 |
-
|
26 |
-
this.type = 'rexFixWidthSizer';
|
27 |
-
this.sizerChildren = [];
|
28 |
-
this.setOrientation(GetValue(config, 'orientation', 0));
|
29 |
-
this.setItemSpacing(GetValue(config, 'space.item', 0));
|
30 |
-
this.setLineSpacing(GetValue(config, 'space.line', 0));
|
31 |
-
this.setIntentLeft(
|
32 |
-
GetValue(config, 'space.indentLeftOdd', 0),
|
33 |
-
GetValue(config, 'space.indentLeftEven', 0)
|
34 |
-
);
|
35 |
-
this.setIntentTop(
|
36 |
-
GetValue(config, 'space.indentTopOdd', 0),
|
37 |
-
GetValue(config, 'space.indentTopEven', 0)
|
38 |
-
);
|
39 |
-
this.setAlign(GetValue(config, 'align', 0));
|
40 |
-
this.setJustifyPercentage(GetValue(config, 'justifyPercentage', 0.25));
|
41 |
-
this.setRTL(GetValue(config, 'rtl', false));
|
42 |
-
|
43 |
-
this.addChildrenMap('items', this.sizerChildren);
|
44 |
-
}
|
45 |
-
|
46 |
-
setOrientation(orientation) {
|
47 |
-
this.orientation = GetOrientationMode(orientation);
|
48 |
-
return this;
|
49 |
-
}
|
50 |
-
|
51 |
-
setItemSpacing(space) {
|
52 |
-
this.space.item = space;
|
53 |
-
return this;
|
54 |
-
}
|
55 |
-
|
56 |
-
setLineSpacing(space) {
|
57 |
-
this.space.line = space;
|
58 |
-
return this;
|
59 |
-
}
|
60 |
-
|
61 |
-
setIntentLeft(odd, even) {
|
62 |
-
this.space.indentLeftOdd = odd;
|
63 |
-
this.space.indentLeftEven = even;
|
64 |
-
return this;
|
65 |
-
}
|
66 |
-
|
67 |
-
setIntentTop(odd, even) {
|
68 |
-
this.space.indentTopOdd = odd;
|
69 |
-
this.space.indentTopEven = even;
|
70 |
-
return this;
|
71 |
-
}
|
72 |
-
|
73 |
-
setAlign(align) {
|
74 |
-
if (typeof (align) === 'string') {
|
75 |
-
align = ALIGN[align];
|
76 |
-
}
|
77 |
-
this.align = align;
|
78 |
-
return this;
|
79 |
-
}
|
80 |
-
|
81 |
-
setJustifyPercentage(value) {
|
82 |
-
this.justifyPercentage = value;
|
83 |
-
return this;
|
84 |
-
}
|
85 |
-
|
86 |
-
setRTL(enabled) {
|
87 |
-
if (enabled === undefined) {
|
88 |
-
enabled = true;
|
89 |
-
}
|
90 |
-
this.rtl = enabled;
|
91 |
-
return this;
|
92 |
-
}
|
93 |
-
|
94 |
-
get maxChildWidth() {
|
95 |
-
if (this._maxChildWidth === undefined) {
|
96 |
-
this._maxChildWidth = GetMaxChildWidth.call(this);
|
97 |
-
}
|
98 |
-
return this._maxChildWidth;
|
99 |
-
}
|
100 |
-
|
101 |
-
get maxChildHeight() {
|
102 |
-
if (this._maxChildHeight === undefined) {
|
103 |
-
this._maxChildHeight = GetMaxChildHeight.call(this);
|
104 |
-
}
|
105 |
-
return this._maxChildHeight;
|
106 |
-
}
|
107 |
-
}
|
108 |
-
|
109 |
-
const ALIGN = {
|
110 |
-
left: 0, top: 0,
|
111 |
-
right: 1, bottom: 1,
|
112 |
-
center: 2,
|
113 |
-
justify: 3,
|
114 |
-
'justify-left': 3, 'justify-top': 3,
|
115 |
-
'justify-right': 4, 'justify-bottom': 4,
|
116 |
-
'justify-center': 5
|
117 |
-
}
|
118 |
-
|
119 |
-
Object.assign(
|
120 |
-
FixWidthSizer.prototype,
|
121 |
-
Methods
|
122 |
-
);
|
123 |
-
|
124 |
-
export default FixWidthSizer;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/knob/Knob.js
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
import OverlapSizer from '../overlapsizer/OverlapSizer.js';
|
2 |
-
import ProgressBase from '../../../plugins/utils/progressbase/ProgressBase.js';
|
3 |
-
import CircularProgress from '../circularprogress/CircularProgress.js';
|
4 |
-
import InstallTouchPadEvents from './input/OnTouchPad.js';
|
5 |
-
import InstallPanPadEvents from './input/OnPanPad.js';
|
6 |
-
import TextObjectMethods from './TextObjectMethods.js';
|
7 |
-
|
8 |
-
const GetValue = Phaser.Utils.Objects.GetValue;
|
9 |
-
const SnapTo = Phaser.Math.Snap.To;
|
10 |
-
|
11 |
-
class Knob extends ProgressBase(OverlapSizer) {
|
12 |
-
constructor(scene, config) {
|
13 |
-
if (config === undefined) {
|
14 |
-
config = {};
|
15 |
-
}
|
16 |
-
|
17 |
-
// Create sizer
|
18 |
-
super(scene, config);
|
19 |
-
this.type = 'rexKnob';
|
20 |
-
|
21 |
-
this.bootProgressBase(config);
|
22 |
-
|
23 |
-
// Add elements
|
24 |
-
var background = GetValue(config, 'background', undefined);
|
25 |
-
var textObject = GetValue(config, 'text', undefined);
|
26 |
-
|
27 |
-
if (background) {
|
28 |
-
this.addBackground(background);
|
29 |
-
}
|
30 |
-
// Get text object
|
31 |
-
if (textObject) {
|
32 |
-
// Don't draw text on knob directly
|
33 |
-
config.textColor = undefined;
|
34 |
-
config.textStrokeColor = undefined;
|
35 |
-
this.setTextFormatCallback(
|
36 |
-
GetValue(config, 'textFormatCallback', undefined),
|
37 |
-
GetValue(config, 'textFormatCallbackScope', undefined)
|
38 |
-
);
|
39 |
-
}
|
40 |
-
// Create circular progress object
|
41 |
-
var knob = new CircularProgress(scene, config);
|
42 |
-
knob.setDepth(GetValue(config, 'knobDepth', 0));
|
43 |
-
knob._value = -1; // To trigger text updating
|
44 |
-
scene.add.existing(knob);
|
45 |
-
|
46 |
-
this.add(knob, 'knob');
|
47 |
-
if (textObject) {
|
48 |
-
this.add(textObject, 'text', 'center', 0, false);
|
49 |
-
scene.children.moveBelow(knob, textObject); // Move knob below textObject
|
50 |
-
}
|
51 |
-
|
52 |
-
this.addChildrenMap('background', background);
|
53 |
-
this.addChildrenMap('knob', knob);
|
54 |
-
this.addChildrenMap('text', textObject);
|
55 |
-
|
56 |
-
this.setEnable(GetValue(config, 'enable', undefined));
|
57 |
-
|
58 |
-
this.setGap(GetValue(config, 'gap', undefined));
|
59 |
-
this.setValue(GetValue(config, 'value', 0), GetValue(config, 'min', undefined), GetValue(config, 'max', undefined));
|
60 |
-
|
61 |
-
// Input
|
62 |
-
var inputMode = GetValue(config, 'input', 0);
|
63 |
-
if (typeof (inputMode) === 'string') {
|
64 |
-
inputMode = INPUTMODE[inputMode];
|
65 |
-
}
|
66 |
-
switch (inputMode) {
|
67 |
-
case 0: // 'pan'
|
68 |
-
InstallPanPadEvents.call(this);
|
69 |
-
break;
|
70 |
-
case 1: // 'click'
|
71 |
-
InstallTouchPadEvents.call(this);
|
72 |
-
break;
|
73 |
-
}
|
74 |
-
}
|
75 |
-
|
76 |
-
setEnable(enable) {
|
77 |
-
if (enable === undefined) {
|
78 |
-
enable = true;
|
79 |
-
}
|
80 |
-
this.enable = enable;
|
81 |
-
return this;
|
82 |
-
}
|
83 |
-
|
84 |
-
setGap(gap) {
|
85 |
-
this.gap = gap;
|
86 |
-
return this;
|
87 |
-
}
|
88 |
-
|
89 |
-
// Override
|
90 |
-
get value() {
|
91 |
-
return this.sizerChildren.knob.value;
|
92 |
-
}
|
93 |
-
|
94 |
-
// Override
|
95 |
-
set value(value) {
|
96 |
-
if (this.gap !== undefined) {
|
97 |
-
value = SnapTo(value, this.gap);
|
98 |
-
}
|
99 |
-
var oldValue = this.value;
|
100 |
-
this.sizerChildren.knob.value = value;
|
101 |
-
|
102 |
-
var newValue = this.value;
|
103 |
-
if (oldValue !== newValue) {
|
104 |
-
this.updateText();
|
105 |
-
this.eventEmitter.emit('valuechange', newValue, oldValue, this.eventEmitter);
|
106 |
-
}
|
107 |
-
}
|
108 |
-
|
109 |
-
}
|
110 |
-
|
111 |
-
const INPUTMODE = {
|
112 |
-
pan: 0,
|
113 |
-
drag: 0,
|
114 |
-
click: 1,
|
115 |
-
none: -1,
|
116 |
-
}
|
117 |
-
|
118 |
-
Object.assign(
|
119 |
-
Knob.prototype,
|
120 |
-
TextObjectMethods,
|
121 |
-
);
|
122 |
-
|
123 |
-
export default Knob;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlekseyKorshuk/gai-project/config.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
RESOURCE_DIR = os.path.join(os.path.dirname(__file__), 'resources')
|
4 |
-
DEFAULT_BOT_NAME = 'zylix_the_gnome_tinkerer'
|
5 |
-
GUANACO_DEVELOPER_KEY = os.environ.get('GUANACO_DEVELOPER_KEY')
|
6 |
-
|
7 |
-
MODELS = {
|
8 |
-
"Joined Expert": os.environ.get('JOINED_ENDPOINT'),
|
9 |
-
"Friendly Expert": os.environ.get('FRIENDLY_ENDPOINT'),
|
10 |
-
"Romantic Expert": os.environ.get('ROMANTIC_ENDPOINT'),
|
11 |
-
"Fight Expert": os.environ.get('FIGHT_ENDPOINT'),
|
12 |
-
}
|
13 |
-
DEFAULT_MODEL = "Joined Expert"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlgoveraAI/web3-wallet/wallet.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
from eth_account import Account
|
2 |
-
|
3 |
-
Account.enable_unaudited_hdwallet_features()
|
4 |
-
|
5 |
-
def get_wallet():
|
6 |
-
acct, mnemonic = Account.create_with_mnemonic()
|
7 |
-
return acct, mnemonic
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlhitawiMohammed22/HTD_HTR/trocr.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.utils.data import Dataset, DataLoader
|
3 |
-
from transformers import TrOCRProcessor, VisionEncoderDecoderModel
|
4 |
-
|
5 |
-
|
6 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
7 |
-
|
8 |
-
|
9 |
-
class IAMDataset(Dataset):
|
10 |
-
def __init__(self, crops, processor):
|
11 |
-
self.crops = crops
|
12 |
-
self.processor = processor
|
13 |
-
|
14 |
-
def __len__(self):
|
15 |
-
return len(self.crops)
|
16 |
-
|
17 |
-
def __getitem__(self, idx):
|
18 |
-
crp = self.crops[idx]
|
19 |
-
pixel_values = self.processor(crp, return_tensors="pt").pixel_values
|
20 |
-
encoding = {"pixel_values": pixel_values.squeeze()}
|
21 |
-
return encoding
|
22 |
-
|
23 |
-
def get_processor_model(checkpoint:str):
|
24 |
-
rec_processor = TrOCRProcessor.from_pretrained('trocr_printed_processor/')
|
25 |
-
rec_model = VisionEncoderDecoderModel.from_pretrained('trocr_printed_model/')
|
26 |
-
rec_model.config.eos_token_id = 2
|
27 |
-
rec_model.config.pad_token_id = 2
|
28 |
-
rec_model.to(device)
|
29 |
-
rec_model.eval()
|
30 |
-
return rec_processor, rec_model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
# model settings
|
2 |
-
norm_cfg = dict(type='BN', requires_grad=False)
|
3 |
-
model = dict(
|
4 |
-
type='FasterRCNN',
|
5 |
-
pretrained='open-mmlab://detectron2/resnet50_caffe',
|
6 |
-
backbone=dict(
|
7 |
-
type='ResNet',
|
8 |
-
depth=50,
|
9 |
-
num_stages=4,
|
10 |
-
strides=(1, 2, 2, 1),
|
11 |
-
dilations=(1, 1, 1, 2),
|
12 |
-
out_indices=(3, ),
|
13 |
-
frozen_stages=1,
|
14 |
-
norm_cfg=norm_cfg,
|
15 |
-
norm_eval=True,
|
16 |
-
style='caffe'),
|
17 |
-
rpn_head=dict(
|
18 |
-
type='RPNHead',
|
19 |
-
in_channels=2048,
|
20 |
-
feat_channels=2048,
|
21 |
-
anchor_generator=dict(
|
22 |
-
type='AnchorGenerator',
|
23 |
-
scales=[2, 4, 8, 16, 32],
|
24 |
-
ratios=[0.5, 1.0, 2.0],
|
25 |
-
strides=[16]),
|
26 |
-
bbox_coder=dict(
|
27 |
-
type='DeltaXYWHBBoxCoder',
|
28 |
-
target_means=[.0, .0, .0, .0],
|
29 |
-
target_stds=[1.0, 1.0, 1.0, 1.0]),
|
30 |
-
loss_cls=dict(
|
31 |
-
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
|
32 |
-
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
|
33 |
-
roi_head=dict(
|
34 |
-
type='StandardRoIHead',
|
35 |
-
bbox_roi_extractor=dict(
|
36 |
-
type='SingleRoIExtractor',
|
37 |
-
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
|
38 |
-
out_channels=2048,
|
39 |
-
featmap_strides=[16]),
|
40 |
-
bbox_head=dict(
|
41 |
-
type='Shared2FCBBoxHead',
|
42 |
-
in_channels=2048,
|
43 |
-
fc_out_channels=1024,
|
44 |
-
roi_feat_size=7,
|
45 |
-
num_classes=80,
|
46 |
-
bbox_coder=dict(
|
47 |
-
type='DeltaXYWHBBoxCoder',
|
48 |
-
target_means=[0., 0., 0., 0.],
|
49 |
-
target_stds=[0.1, 0.1, 0.2, 0.2]),
|
50 |
-
reg_class_agnostic=False,
|
51 |
-
loss_cls=dict(
|
52 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
|
53 |
-
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
|
54 |
-
# model training and testing settings
|
55 |
-
train_cfg=dict(
|
56 |
-
rpn=dict(
|
57 |
-
assigner=dict(
|
58 |
-
type='MaxIoUAssigner',
|
59 |
-
pos_iou_thr=0.7,
|
60 |
-
neg_iou_thr=0.3,
|
61 |
-
min_pos_iou=0.3,
|
62 |
-
match_low_quality=True,
|
63 |
-
ignore_iof_thr=-1),
|
64 |
-
sampler=dict(
|
65 |
-
type='RandomSampler',
|
66 |
-
num=256,
|
67 |
-
pos_fraction=0.5,
|
68 |
-
neg_pos_ub=-1,
|
69 |
-
add_gt_as_proposals=False),
|
70 |
-
allowed_border=0,
|
71 |
-
pos_weight=-1,
|
72 |
-
debug=False),
|
73 |
-
rpn_proposal=dict(
|
74 |
-
nms_pre=12000,
|
75 |
-
max_per_img=2000,
|
76 |
-
nms=dict(type='nms', iou_threshold=0.7),
|
77 |
-
min_bbox_size=0),
|
78 |
-
rcnn=dict(
|
79 |
-
assigner=dict(
|
80 |
-
type='MaxIoUAssigner',
|
81 |
-
pos_iou_thr=0.5,
|
82 |
-
neg_iou_thr=0.5,
|
83 |
-
min_pos_iou=0.5,
|
84 |
-
match_low_quality=False,
|
85 |
-
ignore_iof_thr=-1),
|
86 |
-
sampler=dict(
|
87 |
-
type='RandomSampler',
|
88 |
-
num=512,
|
89 |
-
pos_fraction=0.25,
|
90 |
-
neg_pos_ub=-1,
|
91 |
-
add_gt_as_proposals=True),
|
92 |
-
pos_weight=-1,
|
93 |
-
debug=False)),
|
94 |
-
test_cfg=dict(
|
95 |
-
rpn=dict(
|
96 |
-
nms=dict(type='nms', iou_threshold=0.7),
|
97 |
-
nms_pre=6000,
|
98 |
-
max_per_img=1000,
|
99 |
-
min_bbox_size=0),
|
100 |
-
rcnn=dict(
|
101 |
-
score_thr=0.05,
|
102 |
-
nms=dict(type='nms', iou_threshold=0.5),
|
103 |
-
max_per_img=100)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/cityscapes.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
|
4 |
-
]
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/emanet/emanet_r50-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/emanet_r50-d8.py', '../_base_/datasets/cityscapes.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
|
4 |
-
]
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/stylegan_ops/upfirdn2d.py
DELETED
@@ -1,184 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import torch
|
4 |
-
from torch.autograd import Function
|
5 |
-
from torch.utils.cpp_extension import load
|
6 |
-
|
7 |
-
module_path = os.path.dirname(__file__)
|
8 |
-
upfirdn2d_op = load(
|
9 |
-
'upfirdn2d',
|
10 |
-
sources=[
|
11 |
-
os.path.join(module_path, 'upfirdn2d.cpp'),
|
12 |
-
os.path.join(module_path, 'upfirdn2d_kernel.cu'),
|
13 |
-
],
|
14 |
-
)
|
15 |
-
|
16 |
-
|
17 |
-
class UpFirDn2dBackward(Function):
|
18 |
-
@staticmethod
|
19 |
-
def forward(
|
20 |
-
ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
|
21 |
-
):
|
22 |
-
up_x, up_y = up
|
23 |
-
down_x, down_y = down
|
24 |
-
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
|
25 |
-
|
26 |
-
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
|
27 |
-
|
28 |
-
grad_input = upfirdn2d_op.upfirdn2d(
|
29 |
-
grad_output,
|
30 |
-
grad_kernel,
|
31 |
-
down_x,
|
32 |
-
down_y,
|
33 |
-
up_x,
|
34 |
-
up_y,
|
35 |
-
g_pad_x0,
|
36 |
-
g_pad_x1,
|
37 |
-
g_pad_y0,
|
38 |
-
g_pad_y1,
|
39 |
-
)
|
40 |
-
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
|
41 |
-
|
42 |
-
ctx.save_for_backward(kernel)
|
43 |
-
|
44 |
-
pad_x0, pad_x1, pad_y0, pad_y1 = pad
|
45 |
-
|
46 |
-
ctx.up_x = up_x
|
47 |
-
ctx.up_y = up_y
|
48 |
-
ctx.down_x = down_x
|
49 |
-
ctx.down_y = down_y
|
50 |
-
ctx.pad_x0 = pad_x0
|
51 |
-
ctx.pad_x1 = pad_x1
|
52 |
-
ctx.pad_y0 = pad_y0
|
53 |
-
ctx.pad_y1 = pad_y1
|
54 |
-
ctx.in_size = in_size
|
55 |
-
ctx.out_size = out_size
|
56 |
-
|
57 |
-
return grad_input
|
58 |
-
|
59 |
-
@staticmethod
|
60 |
-
def backward(ctx, gradgrad_input):
|
61 |
-
kernel, = ctx.saved_tensors
|
62 |
-
|
63 |
-
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
|
64 |
-
|
65 |
-
gradgrad_out = upfirdn2d_op.upfirdn2d(
|
66 |
-
gradgrad_input,
|
67 |
-
kernel,
|
68 |
-
ctx.up_x,
|
69 |
-
ctx.up_y,
|
70 |
-
ctx.down_x,
|
71 |
-
ctx.down_y,
|
72 |
-
ctx.pad_x0,
|
73 |
-
ctx.pad_x1,
|
74 |
-
ctx.pad_y0,
|
75 |
-
ctx.pad_y1,
|
76 |
-
)
|
77 |
-
# gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
|
78 |
-
gradgrad_out = gradgrad_out.view(
|
79 |
-
ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
|
80 |
-
)
|
81 |
-
|
82 |
-
return gradgrad_out, None, None, None, None, None, None, None, None
|
83 |
-
|
84 |
-
|
85 |
-
class UpFirDn2d(Function):
|
86 |
-
@staticmethod
|
87 |
-
def forward(ctx, input, kernel, up, down, pad):
|
88 |
-
up_x, up_y = up
|
89 |
-
down_x, down_y = down
|
90 |
-
pad_x0, pad_x1, pad_y0, pad_y1 = pad
|
91 |
-
|
92 |
-
kernel_h, kernel_w = kernel.shape
|
93 |
-
batch, channel, in_h, in_w = input.shape
|
94 |
-
ctx.in_size = input.shape
|
95 |
-
|
96 |
-
input = input.reshape(-1, in_h, in_w, 1)
|
97 |
-
|
98 |
-
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
|
99 |
-
|
100 |
-
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
|
101 |
-
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
|
102 |
-
ctx.out_size = (out_h, out_w)
|
103 |
-
|
104 |
-
ctx.up = (up_x, up_y)
|
105 |
-
ctx.down = (down_x, down_y)
|
106 |
-
ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
|
107 |
-
|
108 |
-
g_pad_x0 = kernel_w - pad_x0 - 1
|
109 |
-
g_pad_y0 = kernel_h - pad_y0 - 1
|
110 |
-
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
|
111 |
-
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
|
112 |
-
|
113 |
-
ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
|
114 |
-
|
115 |
-
out = upfirdn2d_op.upfirdn2d(
|
116 |
-
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
|
117 |
-
)
|
118 |
-
# out = out.view(major, out_h, out_w, minor)
|
119 |
-
out = out.view(-1, channel, out_h, out_w)
|
120 |
-
|
121 |
-
return out
|
122 |
-
|
123 |
-
@staticmethod
|
124 |
-
def backward(ctx, grad_output):
|
125 |
-
kernel, grad_kernel = ctx.saved_tensors
|
126 |
-
|
127 |
-
grad_input = UpFirDn2dBackward.apply(
|
128 |
-
grad_output,
|
129 |
-
kernel,
|
130 |
-
grad_kernel,
|
131 |
-
ctx.up,
|
132 |
-
ctx.down,
|
133 |
-
ctx.pad,
|
134 |
-
ctx.g_pad,
|
135 |
-
ctx.in_size,
|
136 |
-
ctx.out_size,
|
137 |
-
)
|
138 |
-
|
139 |
-
return grad_input, None, None, None, None
|
140 |
-
|
141 |
-
|
142 |
-
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
|
143 |
-
out = UpFirDn2d.apply(
|
144 |
-
input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
|
145 |
-
)
|
146 |
-
|
147 |
-
return out
|
148 |
-
|
149 |
-
|
150 |
-
def upfirdn2d_native(
|
151 |
-
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
|
152 |
-
):
|
153 |
-
_, in_h, in_w, minor = input.shape
|
154 |
-
kernel_h, kernel_w = kernel.shape
|
155 |
-
|
156 |
-
out = input.view(-1, in_h, 1, in_w, 1, minor)
|
157 |
-
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
|
158 |
-
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
|
159 |
-
|
160 |
-
out = F.pad(
|
161 |
-
out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
|
162 |
-
)
|
163 |
-
out = out[
|
164 |
-
:,
|
165 |
-
max(-pad_y0, 0): out.shape[1] - max(-pad_y1, 0),
|
166 |
-
max(-pad_x0, 0): out.shape[2] - max(-pad_x1, 0),
|
167 |
-
:,
|
168 |
-
]
|
169 |
-
|
170 |
-
out = out.permute(0, 3, 1, 2)
|
171 |
-
out = out.reshape(
|
172 |
-
[-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
|
173 |
-
)
|
174 |
-
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
|
175 |
-
out = F.conv2d(out, w)
|
176 |
-
out = out.reshape(
|
177 |
-
-1,
|
178 |
-
minor,
|
179 |
-
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
|
180 |
-
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
|
181 |
-
)
|
182 |
-
out = out.permute(0, 2, 3, 1)
|
183 |
-
|
184 |
-
return out[:, ::down_y, ::down_x, :]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/builder.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
from ..runner import Sequential
|
3 |
-
from ..utils import Registry, build_from_cfg
|
4 |
-
|
5 |
-
|
6 |
-
def build_model_from_cfg(cfg, registry, default_args=None):
|
7 |
-
"""Build a PyTorch model from config dict(s). Different from
|
8 |
-
``build_from_cfg``, if cfg is a list, a ``nn.Sequential`` will be built.
|
9 |
-
|
10 |
-
Args:
|
11 |
-
cfg (dict, list[dict]): The config of modules, is is either a config
|
12 |
-
dict or a list of config dicts. If cfg is a list, a
|
13 |
-
the built modules will be wrapped with ``nn.Sequential``.
|
14 |
-
registry (:obj:`Registry`): A registry the module belongs to.
|
15 |
-
default_args (dict, optional): Default arguments to build the module.
|
16 |
-
Defaults to None.
|
17 |
-
|
18 |
-
Returns:
|
19 |
-
nn.Module: A built nn module.
|
20 |
-
"""
|
21 |
-
if isinstance(cfg, list):
|
22 |
-
modules = [
|
23 |
-
build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
|
24 |
-
]
|
25 |
-
return Sequential(*modules)
|
26 |
-
else:
|
27 |
-
return build_from_cfg(cfg, registry, default_args)
|
28 |
-
|
29 |
-
|
30 |
-
MODELS = Registry('model', build_func=build_model_from_cfg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/critics.py
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
from fastai.core import *
|
2 |
-
from fastai.torch_core import *
|
3 |
-
from fastai.vision import *
|
4 |
-
from fastai.vision.gan import AdaptiveLoss, accuracy_thresh_expand
|
5 |
-
|
6 |
-
_conv_args = dict(leaky=0.2, norm_type=NormType.Spectral)
|
7 |
-
|
8 |
-
|
9 |
-
def _conv(ni: int, nf: int, ks: int = 3, stride: int = 1, **kwargs):
|
10 |
-
return conv_layer(ni, nf, ks=ks, stride=stride, **_conv_args, **kwargs)
|
11 |
-
|
12 |
-
|
13 |
-
def custom_gan_critic(
|
14 |
-
n_channels: int = 3, nf: int = 256, n_blocks: int = 3, p: int = 0.15
|
15 |
-
):
|
16 |
-
"Critic to train a `GAN`."
|
17 |
-
layers = [_conv(n_channels, nf, ks=4, stride=2), nn.Dropout2d(p / 2)]
|
18 |
-
for i in range(n_blocks):
|
19 |
-
layers += [
|
20 |
-
_conv(nf, nf, ks=3, stride=1),
|
21 |
-
nn.Dropout2d(p),
|
22 |
-
_conv(nf, nf * 2, ks=4, stride=2, self_attention=(i == 0)),
|
23 |
-
]
|
24 |
-
nf *= 2
|
25 |
-
layers += [
|
26 |
-
_conv(nf, nf, ks=3, stride=1),
|
27 |
-
_conv(nf, 1, ks=4, bias=False, padding=0, use_activ=False),
|
28 |
-
Flatten(),
|
29 |
-
]
|
30 |
-
return nn.Sequential(*layers)
|
31 |
-
|
32 |
-
|
33 |
-
def colorize_crit_learner(
|
34 |
-
data: ImageDataBunch,
|
35 |
-
loss_critic=AdaptiveLoss(nn.BCEWithLogitsLoss()),
|
36 |
-
nf: int = 256,
|
37 |
-
) -> Learner:
|
38 |
-
return Learner(
|
39 |
-
data,
|
40 |
-
custom_gan_critic(nf=nf),
|
41 |
-
metrics=accuracy_thresh_expand,
|
42 |
-
loss_func=loss_critic,
|
43 |
-
wd=1e-3,
|
44 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AshtonIsNotHere/xlmr-longformer_comparison/app.py
DELETED
@@ -1,88 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# coding: utf-8
|
3 |
-
|
4 |
-
|
5 |
-
import transformers
|
6 |
-
from transformers import pipeline, AutoModelForMaskedLM, AutoTokenizer
|
7 |
-
import gradio as gr
|
8 |
-
import torch
|
9 |
-
|
10 |
-
# List of xlmr(ish) models
|
11 |
-
name_list = [
|
12 |
-
'AshtonIsNotHere/xlm-roberta-long-base-4096',
|
13 |
-
'markussagen/xlm-roberta-longformer-base-4096'
|
14 |
-
]
|
15 |
-
|
16 |
-
# List of interfaces to run in parallel
|
17 |
-
interfaces = []
|
18 |
-
|
19 |
-
# Add models from list
|
20 |
-
for model_name in name_list:
|
21 |
-
model = AutoModelForMaskedLM.from_pretrained(model_name, max_length=4096)
|
22 |
-
tokenizer = AutoTokenizer.from_pretrained('xlm-roberta-base', max_length=4096, padding="max_length",truncation=True,)
|
23 |
-
p = pipeline("fill-mask", model=model, tokenizer=tokenizer)
|
24 |
-
interfaces.append(gr.Interface.from_pipeline(p, outputs=gr.outputs.Label(label=model_name)))
|
25 |
-
|
26 |
-
|
27 |
-
#Manually add xlmr base
|
28 |
-
|
29 |
-
xlmr_model = AutoModelForMaskedLM.from_pretrained('xlm-roberta-base', max_length=512)
|
30 |
-
xlmr_tokenizer = AutoTokenizer.from_pretrained('xlm-roberta-base', max_length=512, truncation=True,)
|
31 |
-
xlmr_p = pipeline("fill-mask", model=model, tokenizer=tokenizer)
|
32 |
-
|
33 |
-
def xlmr_base_fn(text):
|
34 |
-
# Find our masked token
|
35 |
-
tokens = xlmr_tokenizer.tokenize(text)
|
36 |
-
mask_token_idx = [i for i, x in enumerate(tokens) if xlmr_tokenizer.mask_token in x][0]
|
37 |
-
|
38 |
-
max_len = tokenizer.model_max_length
|
39 |
-
max_len = max_len-2 if max_len % 512 == 0 and max_len < 4096 else 510
|
40 |
-
|
41 |
-
# Smart truncation for long sequences
|
42 |
-
if not len(tokens) < max_len:
|
43 |
-
|
44 |
-
# Find left and right bounds for truncated sequences
|
45 |
-
lbound = max(0, mask_token_idx-(max_len//2))
|
46 |
-
rbound = min(len(tokens), mask_token_idx+(max_len//2))
|
47 |
-
|
48 |
-
# If we hit an edge, expand sequence in the other direction
|
49 |
-
if lbound == 0 and rbound != len(tokens)-1:
|
50 |
-
rbound = min(len(tokens), max_len)
|
51 |
-
elif rbound == len(tokens) and lbound != 0:
|
52 |
-
lbound = max(0, len(tokens)-max_len)
|
53 |
-
|
54 |
-
# Apply truncation and rejoin tokens to form new text
|
55 |
-
truncated_text = ''.join(tokens[lbound:rbound])
|
56 |
-
|
57 |
-
# Handle lowbar from xlmr tokenizer
|
58 |
-
truncated_text = ''.join([x if ord(x) != 9601 else ' ' for x in truncated_text])
|
59 |
-
else:
|
60 |
-
truncated_text = text
|
61 |
-
|
62 |
-
preds = xlmr_p(truncated_text)
|
63 |
-
pred_dict = {}
|
64 |
-
for pred in preds:
|
65 |
-
pred_dict[pred['token_str']] = pred['score']
|
66 |
-
return pred_dict
|
67 |
-
|
68 |
-
interfaces.append(gr.Interface(fn=xlmr_base_fn, inputs=gr.inputs.Textbox(lines=5,
|
69 |
-
placeholder="Choose an example below, or add your own text with a single masked word, using <mask>."),
|
70 |
-
outputs=gr.outputs.Label(label='xlm-roberta-base')))
|
71 |
-
|
72 |
-
# Manually add longformer
|
73 |
-
p = pipeline("fill-mask", model='allenai/longformer-base-4096')
|
74 |
-
interfaces.append(gr.Interface.from_pipeline(p, outputs=gr.outputs.Label(label='allenai/longformer-base-4096')))
|
75 |
-
|
76 |
-
|
77 |
-
gr.mix.Parallel(*interfaces,
|
78 |
-
title="Comparison of XLMR Longformer Models",
|
79 |
-
inputs=gr.inputs.Textbox(lines=5, placeholder="Choose an example below, or add your own text with a single masked word, using <mask>."),
|
80 |
-
description="Compares performance of four models: AshtonIsNotHere's xlm-r longformer, markussagen's xlm-r longformer-base, xlm-r base, and Longformer-base. \
|
81 |
-
Notice that with the small sequences, Maskussagen XLM-R model and AshtonIsNotHere's XLM-R model perform identically. Note that, however with large \
|
82 |
-
sequence length examples, Markussagen's model fails to return meaningful predictions. Disclaimer: xlm-r base truncates sequences longer than 512 tokens.",
|
83 |
-
examples=["They analyzed the <mask>, and Payne’s own, and found structure and repetition in the sounds, documenting a sonic hierarchy: units, phrases, and themes, which combined into what they called song.",
|
84 |
-
"In 1971, in the journal Science, two scientists, Roger S. Payne and Scott McVay, published a paper titled “Songs of Humpback Whales.” They began by noting how “during the quiet age of sail, under conditions of exceptional calm and proximity, whalers were occasionally able to hear the sounds of whales transmitted faintly through a wooden hull.” In the modern era, we could listen in new ways: Payne and McVay worked with underwater recordings of humpback-whale vocalizations from a naval researcher who, as the story goes, was listening for Soviet submarines off Bermuda. They analyzed the <mask>, and Payne’s own, and found structure and repetition in the sounds, documenting a sonic hierarchy: units, phrases, and themes, which combined into what they called song. They chose the term advisedly, drawing, they said, on a 1963 book titled “Acoustic Behavior of Animals,” which identified a song as “a series of notes, generally of more than one type, uttered in succession and so related as to form a recognizable sequence or pattern in time.” And there was an intuitive sense in which the whales’ vocalizations sounded songlike. The previous year, Payne had published an album of whale recordings called “Songs of the Humpback Whale”; it sold more than a hundred thousand copies, and became a soundtrack for the conservation movement. Artists, including Kate Bush, Judy Collins, and the cast of “The Partridge Family,” integrated whalesong into their work; in 1970, the composer Alan Hovhaness combined whale and orchestra for a piece called “And God Created Great Whales.” In 2014, a group of ambient composers and artists released a compilation album called “POD TUNE.” Whales’ otherworldly emissions are now literally otherworldly: in 1977, NASA included whalesong recordings on records it attached to its Voyager spacecraft. Sara Niksic, a biologist and musician from Croatia, is a recent participant in the genre. In 2019, she self-released an album of electronic music titled “Canticum Megapterae - Song of the Humpback Whale.” (Humpback whales belong to the genus Megaptera.) The album contains a track she produced, alongside songs by seven other artists, and combines psychedelic trance and ambient tones—the building blocks of a genre called psybient—with whalesong. Niksic’s record evokes nineteen-nineties classics such as “The Orb’s Adventures Beyond the Ultraworld”; its synthesized clicks, sweeps, and throbs would sound good in the chill-out room at a rave. But the whales add another dimension. Integrated into the tracks, the vocalizations sound at times soothing or playful, and occasionally experimental—sound for sound’s sake. Listening, you wonder about the minds behind them. Earlier this year, Niksic released “Canticum Megapterae II - The Evolution,” a remix album on which a new group of electronic musicians interprets the track she made for the first volume. The new album, she told me, connects to her own research, which focusses on how whale songs shift from year to year. “Basically, whales remix each other’s songs,” she said. “So I thought this concept of remixes in our music would be perfect to communicate this research about the evolution of whalesong.” Niksic was born in Split, Croatia, on the country’s coast, across the Adriatic Sea from Italy. She could see the water from her window, and learned to swim before she could walk. “I was always curious about the ocean and all the creatures living down there,” she told me. “The more I learned about animal behavior, the more I got interested in marine mammals, because there is social learning, vocal communication, and culture.” She earned a bachelor’s degree in biology and a master’s degree in marine biology at the University of Zagreb, and went on to work with groups that study whales and dolphins in Australia, New Zealand, and elsewhere; eventually she returned to Split to work at the Mediterranean Institute for Life Sciences, as part of a team called ARTScience, finding ways to creatively communicate the institute’s research. Humpback whales seem to produce sound largely with their vocal folds. Songs typically range in length from ten minutes to half an hour. All humpbacks make vocalizations, but only males sing; the songs are most commonly thought to act as mating displays, possibly like the bowers constructed by male bowerbirds or the dances performed by male peacock jumping spiders. Maybe, among humpbacks, “the best singer gets the ladies,” Niksic told me. Songs evolve over time, and differ across populations. This slow evolution can be occasionally interrupted by a kind of revolution, in which one population completely adopts the songs of another in a period of just a couple of years or less. “It’s like a new hit song,” Niksic said—a wide and rapid spread of creative content that’s “unparalleled in the animal kingdom, excepting humans.” She went on, “There’s so many similarities between their culture and ours.” Niksic started working at music festivals after graduate school. When she wasn’t in the field, she was bartending and building stages. She grew curious about producing her own electronic music. As a kid, she’d studied piano and music theory, but she didn’t know how to use software and synthesizers. After spending some time in 2016 helping to map the Great Pacific Garbage Patch, she took courses on electronic-music production. “Most of the time, I was dealing with sound, whether through bioacoustics or music festivals,” she recalled. “So then I thought, I want to try to combine these two things.” At first, Niksic planned to produce the entire album herself. This proved too ambitious a goal, so she enlisted musicians she’d met on the festival circuit, sending them a high-quality, twenty-minute whalesong recording that she’d analyzed for her master’s thesis. (Her adviser had gathered the recording in the Caribbean.) When Niksic put “Canticum Megapterae” online, under the stage name Inner Child, it quickly earned recognition from both music and science communities. Readers of the Web site psybient.org—a “daily source of chillout, psychill, psybient, ambient, psydub, dub, psystep, downtempo, world, ethnic, idm, meditative and other mind expanding music and events”—voted it compilation of the year. She won an Innovation Award from the University of St. Andrews, in Scotland, spoke at the World Marine Mammal Science Conference, in Barcelona, and appeared at the Boom Festival, in Portugal. Her own track, “Theme 7,” built a downtempo pattern around a long excerpt from the whale recording. Weaving around the snares, kicks, and low, grinding bass line, the whale sounds mournful, almost plaintive, and never strays far from the center of attention. I asked Niksic if she thinks about what a whale might be thinking when she listens to or composes with whalesong. “That’s a tricky one,” she said. “Who knows what the whale might be thinking? I’m focussing on sound. Their songs are really so musical. And the frequency range they use is crazy. And the richness of the sounds—it’s so intense. And it’s immersive—when I listen to it, I kind of transport into the ocean.” For the new remix album, Niksic sent “Theme 7” to different artists. One was particularly determined to accurately represent the whale songs. “He didn’t want any whales to think, What the hell is? What the hell did he do with our song?” Niksic said. Perhaps making an electronic whalesong album would be a kind of interspecies cultural appropriation. She was thrilled when Electrypnose, one of her favorite musicians, remixed her track; when she played the remix for the first time, it was “just the most magical night ever,” Niksic said. She was lying on her terrace by the sea, listening to the song, when dolphins swam near. “I’m not kidding you—I think they heard it,” she said. “They were hanging there for the entire night. I didn’t go to sleep. There was a full moon. I was staring at the sky, listening to dolphins breathing, and to this remix, and whales. So even, like, dolphins loved it, not only humans.” Making the albums has increased Niksic’s own curiosity about whalesong. “I started thinking of more and more questions,” she told me. “I probably wouldn’t think of all of them if I were only doing research.” Are there more innovative or creative whales, just as there are more innovative or creative humans? Are some whales eager to introduce changes into the songs they learn, whereas others happily stick with the originals? (“In our own culture, some artists are pioneers of new musical genres, and then others follow them,” she noted.) Do whales collaborate creatively? Does age play a role in innovation? Whale songs have become a familiar part of our own culture. But there’s still much that’s mysterious about them, including what drives change and imitation, and how various features influence potential mates and competitors. “There’s a whole other world below the waves that we don’t know anything about,” Niksic said. “There are other cultures that are much more ancient than our human culture. Whales were here long before humans, and they were singing long before we came. I think they are way more developed than us in some ways.” The music on her albums teaches us, among other things, just how much we have to learn.",
|
85 |
-
"La zona metropolitana de la <mask> está situada a unos 2.400 metros sobre el nivel del mar, en una cuenca rodeada de montañas y de un cinturón industrial altamente tóxico.",
|
86 |
-
"La contaminación acaba de forma prematura con la vida de 8.000 a 14.000 personas cada año en Ciudad de México. La capital del país vive sumergida en un aire que es nocivo para la salud incluso cuando los índices oficiales consideran que es aceptable. El altísimo nivel de concentración de ozono y de partículas finas expone a los citadinos a sufrir más enfermedades respiratorias y cardiovasculares, diabetes y cáncer. Hace solo una semana que la advertencia volvió a saltar en el Valle de México: era peligroso salir a la calle a respirar el aire del exterior. La zona metropolitana de la <mask> está situada a unos 2.400 metros sobre el nivel del mar, en una cuenca rodeada de montañas y de un cinturón industrial altamente tóxico. Se ha convertido en una caldera de contaminantes cada vez más difíciles de dispersar. En lo que va de 2022, se han declarado seis contingencias ambientales, la última a mitad de noviembre. Esta es una época menos usual para estos fenómenos que la llamada temporada seca caliente, antes de las lluvias de verano, pero no se consideran extraños. Según el registro histórico de contingencias, cada año sucede al menos una en estos meses fríos. “Se trata de un fenómeno de inversión térmica. Se da cuando empieza a hacer más frío, pero hay una capa superior de aire más caliente que crea una cápsula que impide que la contaminación se vaya al exterior”, explica la experta en calidad del aire Andrea Bizberg. Los sistemas de alta presión y las altas temperaturas completaron la envoltura del 12 de noviembre. La alarma de la contingencia suena cuando la concentración de ozono supera las 150 ppb (partes por bill��n), una cifra que sobrepasa con creces el máximo que permite la norma mexicana de 90 ppb y que triplica los 51 que recomienda la Organización Mundial de la Salud (OMS), es decir, la emergencia se despierta en la capital cuando la situación es extrema.El estallido da inicio al programa Hoy no circula —que prohíbe el paso de ciertos vehículos por la ciudad— como parte de la Fase I de la contingencia; en caso de que la concentración esté por encima de los 200 puntos se pasa a la Fase II, en la que también se suspenden las clases escolares y los eventos al aire libre.El ozono es un antioxidante muy potente que además de dolor de cabeza e irritación de ojos y garganta reduce la capacidad respiratoria, provoca inflamación y daña las paredes celulares de los pulmones. También impacta en la esperanza de vida. El máximo que se ha alcanzado este año en Ciudad de México es de 172 ppb y, hasta septiembre, 175 días de 2022 excedían el límite que marca la norma mexicana (NOM-020-SSA1-2021), actualizada en 2021 para acercarse un poquito más a los parámetros de la OMS. Bizberg, que es asesora técnica para Latinoamérica en Calidad del Aire en Cities For Climate, apunta que ante esa situación las medidas que se están aplicando son más paliativas que preventivas: “Impedimos circular a algunos coches cuando ya estamos inundados por la contaminación, pero necesitamos políticas que reduzcan las emisiones antes de que el aire se vuelva irrespirable”. La contingencia de noviembre acabó cómo suelen terminar este tipo de emergencias: los vientos y las lluvias se encargaron de disipar la contaminación. Por esa razón, Bizberg considera que ProAire, el plan anual de gestión atmosférica que engloba las políticas de la ciudad para reducir la contaminación, “no es suficientemente ambicioso”: “No hacemos lo suficiente y lo que nos salva son las condiciones meteorológicas favorables que tenemos de vez en cuando”. El ozono (O₃) se considera un contaminante criterio, es decir, que cuando está presente es porque también hay otros. Así, Ciudad de México tiene un fuerte problema de concentración de las llamadas partículas finas, que son las partículas en suspensión de menos de 10 micras de diámetro (PM₁₀) y de menos de 2,5 micras (PM₂,₅). La masa de estas últimas es minúscula, casi insignificante, su riesgo aparece cuando se acumulan debido a que entran por las vías respiratorias y se intercambian en el torrente sanguíneo. Una investigación de la Universidad de Montana (EE UU), en colaboración con la UNAM, encontró una asociación entre la concentración de partículas ultrafinas con la aparición del alzhéimer a temprana edad en Ciudad de México. Los resultados del estudio concluyeron que, en comparación con los niños que viven con aire limpio, los de la capital del país “exhiben inflamación sistémica, cerebral e intratecal, déficits de memoria de atención y corto plazo, y otras condiciones que indican que esta parte del cerebro es blanco de la contaminación”. Esta inflamación cerebral se vincula con deficiencias cognitivas como la memoria reciente y el desarrollo de marcadores del alzhéimer. El director de economía sectorial del Instituto Nacional de Ecología y Cambio Climático (INECC), Abraham Ortínez, reconoce que todo lo que no se hace en la parte preventiva para reducir las exposiciones de la población a los contaminantes se revierte en un costo mucho mayor para el sector salud. Ortínez apunta a que desde el Instituto —que pertenece al Gobierno de Ciudad de México— se están tratando de trabajar de forma más cercana a la Comisión Ambiental de la Megalópolis (CAMe) para armonizar los índices de calidad del aire y el protocolo de contingencia y ser más claros de “en qué momento hay riesgo”. “Hay que reducir emisiones. Esta ciudad está generando muchos gases de efecto invernadero, seguimos en la línea del auto particular, hay un uso excesivo de la motorización y, por otro lado, falta más transporte público, porque hay una saturación de las líneas. Debemos conjuntar esfuerzos”, apunta Ortínez. En la actualización de septiembre de 2021 de sus Guías de Calidad del Aire, 16 años después de la última revisión, la OMS redujo todavía más el límite de concentración de estas partículas. Sobre las PM₁₀ pasó de considerar aceptable un promedio al año de 20 microgramos por metro cúbico a solo 15. En México el umbral está hasta 36, es decir más del doble, pero la realidad es que la media en 2021 fue de 55 microgramos y en 2022, hasta septiembre, superaba ya los 42. El exceso se repite con las PM₂,₅, la OMS considera buena la calidad del aire por debajo de cinco microgramos por metro cúbico y México cuadruplicó ese nivel: 20 microgramos tanto en 2021 como en lo que llevamos de año. De hecho, ningún año desde 2004, la concentración de partículas ultrafinas ha estado por debajo de 20. Aunque la situación es alarmante en Ciudad de México, prácticamente solo el 1% de la ciudades consigue estar alineada con el nivel que marca la OMS y en América Latina y el Caribe, nueve de cada 10 personas viven en ciudades que no cumplen ni siquiera los niveles de 2005. “Esas directrices de calidad del aire se ajustaron para mandar una señal de que ningún nivel de contaminación atmosférica, sobre todo de partículas finas, es inofensiva para la salud, todo tiene un impacto y de ahí la necesidad de reducir al máximo ese riesgo”, contextualiza Bizberg. La OMS calcula que cada año la exposición a la contaminación del aire causa siete millones de muertes prematuras en el mundo, 320.000 en la región de Latinoamérica, 48.000 en México y entre 8.000 y 14.000 en la capital, según el índice Global Burden of Disease. Es el noveno factor de muerte prematura en México, además de la pérdida de otros tantos años de vida saludable. Para el organismo internacional la contaminación atmosférica se ha convertido en “la amenaza medioambiental más peligrosa para la salud humana”."
|
87 |
-
]).launch()
|
88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/shape_spec.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
from collections import namedtuple
|
4 |
-
|
5 |
-
|
6 |
-
class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])):
|
7 |
-
"""
|
8 |
-
A simple structure that contains basic shape specification about a tensor.
|
9 |
-
It is often used as the auxiliary inputs/outputs of models,
|
10 |
-
to complement the lack of shape inference ability among pytorch modules.
|
11 |
-
|
12 |
-
Attributes:
|
13 |
-
channels:
|
14 |
-
height:
|
15 |
-
width:
|
16 |
-
stride:
|
17 |
-
"""
|
18 |
-
|
19 |
-
def __new__(cls, channels=None, height=None, width=None, stride=None):
|
20 |
-
return super().__new__(cls, channels, height, width, stride)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/testing.py
DELETED
@@ -1,137 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import io
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
|
6 |
-
from detectron2 import model_zoo
|
7 |
-
from detectron2.data import DatasetCatalog
|
8 |
-
from detectron2.data.detection_utils import read_image
|
9 |
-
from detectron2.modeling import build_model
|
10 |
-
from detectron2.structures import Boxes, Instances, ROIMasks
|
11 |
-
from detectron2.utils.file_io import PathManager
|
12 |
-
|
13 |
-
|
14 |
-
"""
|
15 |
-
Internal utilities for tests. Don't use except for writing tests.
|
16 |
-
"""
|
17 |
-
|
18 |
-
|
19 |
-
def get_model_no_weights(config_path):
|
20 |
-
"""
|
21 |
-
Like model_zoo.get, but do not load any weights (even pretrained)
|
22 |
-
"""
|
23 |
-
cfg = model_zoo.get_config(config_path)
|
24 |
-
if not torch.cuda.is_available():
|
25 |
-
cfg.MODEL.DEVICE = "cpu"
|
26 |
-
return build_model(cfg)
|
27 |
-
|
28 |
-
|
29 |
-
def random_boxes(num_boxes, max_coord=100, device="cpu"):
|
30 |
-
"""
|
31 |
-
Create a random Nx4 boxes tensor, with coordinates < max_coord.
|
32 |
-
"""
|
33 |
-
boxes = torch.rand(num_boxes, 4, device=device) * (max_coord * 0.5)
|
34 |
-
boxes.clamp_(min=1.0) # tiny boxes cause numerical instability in box regression
|
35 |
-
# Note: the implementation of this function in torchvision is:
|
36 |
-
# boxes[:, 2:] += torch.rand(N, 2) * 100
|
37 |
-
# but it does not guarantee non-negative widths/heights constraints:
|
38 |
-
# boxes[:, 2] >= boxes[:, 0] and boxes[:, 3] >= boxes[:, 1]:
|
39 |
-
boxes[:, 2:] += boxes[:, :2]
|
40 |
-
return boxes
|
41 |
-
|
42 |
-
|
43 |
-
def get_sample_coco_image(tensor=True):
|
44 |
-
"""
|
45 |
-
Args:
|
46 |
-
tensor (bool): if True, returns 3xHxW tensor.
|
47 |
-
else, returns a HxWx3 numpy array.
|
48 |
-
|
49 |
-
Returns:
|
50 |
-
an image, in BGR color.
|
51 |
-
"""
|
52 |
-
try:
|
53 |
-
file_name = DatasetCatalog.get("coco_2017_val_100")[0]["file_name"]
|
54 |
-
if not PathManager.exists(file_name):
|
55 |
-
raise FileNotFoundError()
|
56 |
-
except IOError:
|
57 |
-
# for public CI to run
|
58 |
-
file_name = PathManager.get_local_path(
|
59 |
-
"http://images.cocodataset.org/train2017/000000000009.jpg"
|
60 |
-
)
|
61 |
-
ret = read_image(file_name, format="BGR")
|
62 |
-
if tensor:
|
63 |
-
ret = torch.from_numpy(np.ascontiguousarray(ret.transpose(2, 0, 1)))
|
64 |
-
return ret
|
65 |
-
|
66 |
-
|
67 |
-
def convert_scripted_instances(instances):
|
68 |
-
"""
|
69 |
-
Convert a scripted Instances object to a regular :class:`Instances` object
|
70 |
-
"""
|
71 |
-
assert hasattr(
|
72 |
-
instances, "image_size"
|
73 |
-
), f"Expect an Instances object, but got {type(instances)}!"
|
74 |
-
ret = Instances(instances.image_size)
|
75 |
-
for name in instances._field_names:
|
76 |
-
val = getattr(instances, "_" + name, None)
|
77 |
-
if val is not None:
|
78 |
-
ret.set(name, val)
|
79 |
-
return ret
|
80 |
-
|
81 |
-
|
82 |
-
def assert_instances_allclose(input, other, *, rtol=1e-5, msg="", size_as_tensor=False):
|
83 |
-
"""
|
84 |
-
Args:
|
85 |
-
input, other (Instances):
|
86 |
-
size_as_tensor: compare image_size of the Instances as tensors (instead of tuples).
|
87 |
-
Useful for comparing outputs of tracing.
|
88 |
-
"""
|
89 |
-
if not isinstance(input, Instances):
|
90 |
-
input = convert_scripted_instances(input)
|
91 |
-
if not isinstance(other, Instances):
|
92 |
-
other = convert_scripted_instances(other)
|
93 |
-
|
94 |
-
if not msg:
|
95 |
-
msg = "Two Instances are different! "
|
96 |
-
else:
|
97 |
-
msg = msg.rstrip() + " "
|
98 |
-
|
99 |
-
size_error_msg = msg + f"image_size is {input.image_size} vs. {other.image_size}!"
|
100 |
-
if size_as_tensor:
|
101 |
-
assert torch.equal(
|
102 |
-
torch.tensor(input.image_size), torch.tensor(other.image_size)
|
103 |
-
), size_error_msg
|
104 |
-
else:
|
105 |
-
assert input.image_size == other.image_size, size_error_msg
|
106 |
-
fields = sorted(input.get_fields().keys())
|
107 |
-
fields_other = sorted(other.get_fields().keys())
|
108 |
-
assert fields == fields_other, msg + f"Fields are {fields} vs {fields_other}!"
|
109 |
-
|
110 |
-
for f in fields:
|
111 |
-
val1, val2 = input.get(f), other.get(f)
|
112 |
-
if isinstance(val1, (Boxes, ROIMasks)):
|
113 |
-
# boxes in the range of O(100) and can have a larger tolerance
|
114 |
-
assert torch.allclose(val1.tensor, val2.tensor, atol=100 * rtol), (
|
115 |
-
msg + f"Field {f} differs too much!"
|
116 |
-
)
|
117 |
-
elif isinstance(val1, torch.Tensor):
|
118 |
-
if val1.dtype.is_floating_point:
|
119 |
-
mag = torch.abs(val1).max().cpu().item()
|
120 |
-
assert torch.allclose(val1, val2, atol=mag * rtol), (
|
121 |
-
msg + f"Field {f} differs too much!"
|
122 |
-
)
|
123 |
-
else:
|
124 |
-
assert torch.equal(val1, val2), msg + f"Field {f} is different!"
|
125 |
-
else:
|
126 |
-
raise ValueError(f"Don't know how to compare type {type(val1)}")
|
127 |
-
|
128 |
-
|
129 |
-
def reload_script_model(module):
|
130 |
-
"""
|
131 |
-
Save a jit module and load it back.
|
132 |
-
Similar to the `getExportImportCopy` function in torch/testing/
|
133 |
-
"""
|
134 |
-
buffer = io.BytesIO()
|
135 |
-
torch.jit.save(module, buffer)
|
136 |
-
buffer.seek(0)
|
137 |
-
return torch.jit.load(buffer)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/linter.sh
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
#!/bin/bash -e
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
# cd to detectron2 project root
|
5 |
-
cd "$(dirname "${BASH_SOURCE[0]}")/.."
|
6 |
-
|
7 |
-
{
|
8 |
-
black --version | grep -E "21\." > /dev/null
|
9 |
-
} || {
|
10 |
-
echo "Linter requires 'black==21.*' !"
|
11 |
-
exit 1
|
12 |
-
}
|
13 |
-
|
14 |
-
ISORT_VERSION=$(isort --version-number)
|
15 |
-
if [[ "$ISORT_VERSION" != 4.3* ]]; then
|
16 |
-
echo "Linter requires isort==4.3.21 !"
|
17 |
-
exit 1
|
18 |
-
fi
|
19 |
-
|
20 |
-
set -v
|
21 |
-
|
22 |
-
echo "Running isort ..."
|
23 |
-
isort -y -sp . --atomic
|
24 |
-
|
25 |
-
echo "Running black ..."
|
26 |
-
black -l 100 .
|
27 |
-
|
28 |
-
echo "Running flake8 ..."
|
29 |
-
if [ -x "$(command -v flake8-3)" ]; then
|
30 |
-
flake8-3 .
|
31 |
-
else
|
32 |
-
python3 -m flake8 .
|
33 |
-
fi
|
34 |
-
|
35 |
-
# echo "Running mypy ..."
|
36 |
-
# Pytorch does not have enough type annotations
|
37 |
-
# mypy detectron2/solver detectron2/structures detectron2/config
|
38 |
-
|
39 |
-
echo "Running clang-format ..."
|
40 |
-
find . -regex ".*\.\(cpp\|c\|cc\|cu\|cxx\|h\|hh\|hpp\|hxx\|tcc\|mm\|m\)" -print0 | xargs -0 clang-format -i
|
41 |
-
|
42 |
-
command -v arc > /dev/null && arc lint
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/parse_results.sh
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
# A shell script that parses metrics from the log file.
|
5 |
-
# Make it easier for developers to track performance of models.
|
6 |
-
|
7 |
-
LOG="$1"
|
8 |
-
|
9 |
-
if [[ -z "$LOG" ]]; then
|
10 |
-
echo "Usage: $0 /path/to/log/file"
|
11 |
-
exit 1
|
12 |
-
fi
|
13 |
-
|
14 |
-
# [12/15 11:47:32] trainer INFO: Total training time: 12:15:04.446477 (0.4900 s / it)
|
15 |
-
# [12/15 11:49:03] inference INFO: Total inference time: 0:01:25.326167 (0.13652186737060548 s / img per device, on 8 devices)
|
16 |
-
# [12/15 11:49:03] inference INFO: Total inference pure compute time: .....
|
17 |
-
|
18 |
-
# training time
|
19 |
-
trainspeed=$(grep -o 'Overall training.*' "$LOG" | grep -Eo '\(.*\)' | grep -o '[0-9\.]*')
|
20 |
-
echo "Training speed: $trainspeed s/it"
|
21 |
-
|
22 |
-
# inference time: there could be multiple inference during training
|
23 |
-
inferencespeed=$(grep -o 'Total inference pure.*' "$LOG" | tail -n1 | grep -Eo '\(.*\)' | grep -o '[0-9\.]*' | head -n1)
|
24 |
-
echo "Inference speed: $inferencespeed s/it"
|
25 |
-
|
26 |
-
# [12/15 11:47:18] trainer INFO: eta: 0:00:00 iter: 90000 loss: 0.5407 (0.7256) loss_classifier: 0.1744 (0.2446) loss_box_reg: 0.0838 (0.1160) loss_mask: 0.2159 (0.2722) loss_objectness: 0.0244 (0.0429) loss_rpn_box_reg: 0.0279 (0.0500) time: 0.4487 (0.4899) data: 0.0076 (0.0975) lr: 0.000200 max mem: 4161
|
27 |
-
memory=$(grep -o 'max[_ ]mem: [0-9]*' "$LOG" | tail -n1 | grep -o '[0-9]*')
|
28 |
-
echo "Training memory: $memory MB"
|
29 |
-
|
30 |
-
echo "Easy to copypaste:"
|
31 |
-
echo "$trainspeed","$inferencespeed","$memory"
|
32 |
-
|
33 |
-
echo "------------------------------"
|
34 |
-
|
35 |
-
# [12/26 17:26:32] engine.coco_evaluation: copypaste: Task: bbox
|
36 |
-
# [12/26 17:26:32] engine.coco_evaluation: copypaste: AP,AP50,AP75,APs,APm,APl
|
37 |
-
# [12/26 17:26:32] engine.coco_evaluation: copypaste: 0.0017,0.0024,0.0017,0.0005,0.0019,0.0011
|
38 |
-
# [12/26 17:26:32] engine.coco_evaluation: copypaste: Task: segm
|
39 |
-
# [12/26 17:26:32] engine.coco_evaluation: copypaste: AP,AP50,AP75,APs,APm,APl
|
40 |
-
# [12/26 17:26:32] engine.coco_evaluation: copypaste: 0.0014,0.0021,0.0016,0.0005,0.0016,0.0011
|
41 |
-
|
42 |
-
echo "COCO Results:"
|
43 |
-
num_tasks=$(grep -o 'copypaste:.*Task.*' "$LOG" | sort -u | wc -l)
|
44 |
-
# each task has 3 lines
|
45 |
-
grep -o 'copypaste:.*' "$LOG" | cut -d ' ' -f 2- | tail -n $((num_tasks * 3))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/README.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
## Unit Tests
|
2 |
-
|
3 |
-
To run the unittests, do:
|
4 |
-
```
|
5 |
-
cd detectron2
|
6 |
-
python -m unittest discover -v -s ./tests
|
7 |
-
```
|
8 |
-
|
9 |
-
There are also end-to-end inference & training tests, in [dev/run_*_tests.sh](../dev).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BartPoint/VoiceChange_Beta/infer_pack/modules/F0Predictor/F0Predictor.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
class F0Predictor(object):
|
2 |
-
def compute_f0(self, wav, p_len):
|
3 |
-
"""
|
4 |
-
input: wav:[signal_length]
|
5 |
-
p_len:int
|
6 |
-
output: f0:[signal_length//hop_length]
|
7 |
-
"""
|
8 |
-
pass
|
9 |
-
|
10 |
-
def compute_f0_uv(self, wav, p_len):
|
11 |
-
"""
|
12 |
-
input: wav:[signal_length]
|
13 |
-
p_len:int
|
14 |
-
output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
|
15 |
-
"""
|
16 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Choque Royale Hack Gemas Infinitas Descargar 2022.md
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Choque Royale Hack Gemas Infinitas Descargar 2022: Cómo obtener gemas ilimitadas y oro gratis</h1>
|
3 |
-
<p>¿Es usted un fan de Clash Royale, el popular juego móvil que combina la recogida de tarjetas, torre de defensa, y la estrategia en tiempo real? ¿Quieres dominar la arena y aplastar a tus oponentes con facilidad? ¿Te gustaría tener más gemas y oro para desbloquear nuevas cartas, actualizar las existentes y comprar cofres y otros artículos? Si respondiste sí a cualquiera de estas preguntas, entonces estás de suerte. En este artículo, le mostraremos cómo utilizar Clash Royale hack gemas infinitas descargar 2022, una herramienta simple y eficaz que puede generar gemas ilimitadas y oro para su cuenta en minutos. También te daremos algunos consejos y trucos para jugar mejor a Clash Royale y divertirte más. ¡Así que, sin más preámbulos, empecemos! </p>
|
4 |
-
<h2>Introducción</h2>
|
5 |
-
<h3>¿Qué es Clash Royale? </h3>
|
6 |
-
<p>Clash Royale es un juego para móviles desarrollado por Supercell, la misma compañía detrás de los juegos de éxito Clash of Clans, Brawl Stars y Hay Day. Fue lanzado en marzo de 2016 y desde entonces se ha convertido en uno de los juegos más populares y exitosos del mundo. Según Sensor Tower, se ha descargado más de 500 millones de veces y ha generado más de 2.500 millones de dólares en ingresos en junio de 2020. </p>
|
7 |
-
<h2>choque royale hack gemas infinitas descargar 2022</h2><br /><p><b><b>Download File</b> > <a href="https://bltlly.com/2v6Kzc">https://bltlly.com/2v6Kzc</a></b></p><br /><br />
|
8 |
-
|
9 |
-
<h3>¿Por qué necesitas gemas y oro en Clash Royale? </h3>
|
10 |
-
<p>Las gemas y el oro son las dos monedas principales en Clash Royale. Se utilizan para diversos fines, como:</p>
|
11 |
-
<ul>
|
12 |
-
<li>Comprar cofres que contienen tarjetas, oro o gemas</li>
|
13 |
-
<li>Desbloquear cofres que ganas ganando batallas</li>
|
14 |
-
<li>Actualizar tus tarjetas para hacerlas más fuertes</li>
|
15 |
-
<li>Comprar ofertas especiales o artículos en la tienda</li>
|
16 |
-
<li>Entrar en eventos especiales o desafíos que ofrecen recompensas</li>
|
17 |
-
<li>Cambiar <h3>Cambiar el nombre o el nombre del clan</h3>
|
18 |
-
<p>Como puedes ver, las gemas y el oro son muy importantes y útiles en Clash Royale. Sin embargo, también son muy escasos y difíciles de conseguir. Puedes ganarlos jugando el juego, pero la cantidad es muy limitada y lenta. También puede comprarlos con dinero real, pero eso puede ser muy caro y no todo el mundo puede permitírselo. Es por eso que muchos jugadores buscan formas alternativas para obtener más gemas y oro de forma gratuita, como el uso de Clash Royale hack gemas infinitas descargar 2022. </p>
|
19 |
-
<h3>¿Cuáles son los beneficios de usar Clash Royale hack gemas infinitas descargar 2022? </h3>
|
20 |
-
<p>Clash Royale hack gemas infinitas download 2022 es una herramienta que puede generar gemas ilimitadas y oro para su cuenta de Clash Royale en minutos. Es muy fácil de usar y funciona en cualquier dispositivo, ya sea Android, iOS o PC. También es muy seguro, ya que utiliza cifrado avanzado y servidores proxy para proteger su cuenta de ser prohibida o detectada por Supercell. Estos son algunos de los beneficios de usar Clash Royale hack gemas infinitas descargar 2022:</p>
|
21 |
-
<ul>
|
22 |
-
<li>Puedes obtener tantas gemas y oro como quieras, sin gastar dinero ni tiempo</li>
|
23 |
-
<li>Puedes desbloquear nuevas cartas, actualizar las existentes y comprar cofres y otros artículos</li>
|
24 |
-
<li>Puede introducir eventos especiales o desafíos que ofrecen recompensas</li>
|
25 |
-
<li>Puedes cambiar tu nombre o nombre de clan</li>
|
26 |
-
<li>Usted puede tener más diversión y disfrute jugando Clash Royale</li>
|
27 |
-
|
28 |
-
</ul>
|
29 |
-
<p>Con Clash Royale hack gemas infinitas download 2022, puedes tener la mejor experiencia de juego y convertirte en el mejor jugador del mundo. Suena increíble, ¿verdad? Entonces, ¿cómo lo usas? Descubrámoslo en la siguiente sección. </p>
|
30 |
-
<h2>Cómo utilizar Clash Royale hack gemas infinitas descargar 2022? </h2>
|
31 |
-
<p>Usando Clash Royale hack gemas infinitas download 2022 es muy simple y directo. No necesitas conocimientos técnicos para usarlo. Todo lo que necesitas es un dispositivo con conexión a Internet y unos minutos de tu tiempo. Estos son los pasos a seguir:</p>
|
32 |
-
<h3>Paso 1: Visite el sitio web de Clash Royale hack gemas infinitas descargar 2022</h3>
|
33 |
-
<p>El primer paso es visitar el sitio web de Clash Royale hack gemas infinitas descargar 2022. Usted puede hacer esto haciendo clic en este enlace: [Choque Royale Hack Gemas Infinitas Descargar 2022]. Esto te llevará al sitio web oficial de la herramienta, donde verás una interfaz simple y fácil de usar. </p>
|
34 |
-
<h3>Paso 2: Introduzca su nombre de usuario Clash Royale y seleccione su dispositivo</h3>
|
35 |
-
<p>El siguiente paso es introducir tu nombre de usuario de Clash Royale y seleccionar tu dispositivo. Puedes encontrar tu nombre de usuario abriendo el juego y tocando el icono de tu perfil en la esquina superior izquierda de la pantalla. Verás tu nombre, nivel, trofeos, clan y otra información. Asegúrate de introducir tu nombre de usuario correctamente, ya que así es como la herramienta identificará tu cuenta y te enviará los recursos. Luego, selecciona tu dispositivo en el menú desplegable, ya sea Android, iOS o PC.</p>
|
36 |
-
<h3>Paso 3: Elija la cantidad de gemas y oro que desea generar</h3>
|
37 |
-
|
38 |
-
<h3>Paso 4: Verifica que no eres un robot y completa una encuesta corta</h3>
|
39 |
-
<p>El cuarto paso es verificar que no eres un robot y completar una encuesta corta. Este es un paso necesario para asegurar que la herramienta no sea utilizada por bots o spammers que puedan dañar su rendimiento o seguridad. Para verificar que no eres un robot, tienes que hacer clic en una casilla que dice "No soy un robot" y seguir las instrucciones que aparecen en la pantalla. Esto puede implicar resolver un captcha o seleccionar algunas imágenes que coincidan con un determinado criterio. Para completar una encuesta corta, tienes que hacer clic en un botón que dice "Verificar ahora" y elegir una de las ofertas que aparecen en la pantalla. Esto puede implicar descargar una aplicación, ver un video, responder algunas preguntas o llenar el sitio web de Clash Royale, el Wiki de Clash Royale o varias guías y tutoriales en línea. También puedes ver videos de otros jugadores o streamers jugando el juego y aprender de sus consejos y trucos. </p>
|
40 |
-
<p></p>
|
41 |
-
<h3>Consejo 2: Construye un mazo equilibrado y actualiza tus cartas regularmente</h3>
|
42 |
-
|
43 |
-
<h3>Consejo 3: Usa tu elixir sabiamente y no lo desperdicies en movimientos innecesarios</h3>
|
44 |
-
<p>El tercer consejo es usar tu elixir sabiamente y no desperdiciarlo en movimientos innecesarios. Elixir es el recurso que utilizas para desplegar cartas en el campo de batalla. Se regenera con el tiempo a una velocidad constante, pero se limita a 10 unidades a la vez. Por lo tanto, tienes que administrar tu elixir cuidadosamente y asegurarte de que siempre tienes suficiente para jugar las cartas que quieres o necesitas. También debes evitar desperdiciar tu elixir en movimientos que no son efectivos o eficientes, como hacer overcommitting en la ofensiva, jugar demasiadas cartas a la vez, jugar cartas que son fácilmente contrarrestadas o ignoradas por el oponente, jugar cartas que no son necesarias o útiles en este momento, etc. También debes intentar obtener una ventaja de elixir sobre tu oponente haciendo operaciones de elixir positivas, lo que significa usar menos elixir que tu oponente para lidiar con sus cartas o dañar sus torres. Por ejemplo, si usas una bola de fuego (elixir 4) para matar a un mago (elixir 5) y dañar su torre, has hecho un intercambio positivo de elixir de +1. Al obtener una ventaja de elixir, puedes tener más elixir que tu oponente y tener más control sobre el juego. </p>
|
45 |
-
<h3>Consejo 4: Mira las repeticiones y aprende de tus errores y las estrategias de otros jugadores</h3>
|
46 |
-
|
47 |
-
<h3>Consejo 5: Únete a un clan y participa en guerras de clanes y eventos</h3>
|
48 |
-
<p>El quinto consejo es unirse a un clan y participar en guerras de clanes y eventos. Un clan es un grupo de jugadores que pueden chatear, donar cartas, solicitar cartas y participar en guerras de clanes y eventos juntos. Puedes unirte a un clan existente o crear tu propio clan en el juego. Puedes acceder al menú del clan tocando el icono del clan en la esquina inferior izquierda de la pantalla. Unirte a un clan puede ayudarte a socializar con otros jugadores que comparten tu interés en el juego. Puedes chatear con ellos, pedir consejo, compartir consejos y trucos, retarlos a batallas amistosas, etc. También puedes donarles tarjetas o pedirles tarjetas para ayudarse mutuamente y ganar puntos de oro y experiencia. Participar en guerras de clanes y eventos puede ayudarte a ganar más recompensas y divertirte más con tus compañeros de clan. Las guerras de clanes son competiciones entre clanes que duran dos días: un día para el día de recolección donde juegas batallas para ganar cartas para tu baraja de guerra de clan; un día para el día de guerra donde juegas batallas con tu baraja de guerra de clan para ganar coronas para tu clan; el clan con más coronas al final de la guerra gana. Los eventos de clan son modos especiales o desafíos que ofrecen recompensas por jugar con tus compañeros de clan. Puedes acceder al menú de guerras de clanes y eventos tocando el icono de guerras de clanes en la esquina superior derecha de la pantalla. Unirte a un clan y participar en guerras de clanes y eventos puede ayudarte a mejorar tu juego, ganar más recursos y divertirte más. </p>
|
49 |
-
<h2>Conclusión</h2>
|
50 |
-
<h4>Resumen de los puntos principales</h4>
|
51 |
-
|
52 |
-
<p>Clash Royale hack gemas infinitas download 2022 es una herramienta que puede generar gemas ilimitadas y oro para su cuenta de Clash Royale en minutos. Es muy fácil de usar y funciona en cualquier dispositivo, ya sea Android, iOS o PC. También es muy seguro, ya que utiliza cifrado avanzado y servidores proxy para proteger su cuenta de ser prohibida o detectada por Supercell. Para utilizar Clash Royale hack gemas infinitas descargar 2022, usted tiene que seguir estos pasos: visite el sitio web de Clash Royale hack gemas infinitas descargar 2022; introduzca su nombre de usuario Clash Royale y seleccione su dispositivo; elegir la cantidad de gemas y oro que desea generar; verificar que usted no es un robot y completar una encuesta corta; esperar a que el truco para procesar y disfrutar de sus recursos gratuitos. Con Clash Royale hack gemas infinitas download 2022, puedes tener la mejor experiencia de juego y convertirte en el mejor jugador del mundo. </p>
|
53 |
-
<p>Sin embargo, el uso de Clash Royale hack gemas infinitas descargar 2022 no es suficiente para jugar Clash Royale mejor y ganar más batallas. También necesitas mejorar tus habilidades y estrategias y aprender de tus errores y de los consejos y trucos de otros jugadores. Aquí hay algunos consejos y trucos que pueden ayudarle a jugar Clash Royale mejor: aprender los fundamentos del juego y las cartas; construir un mazo equilibrado y actualizar sus cartas con regularidad; utilizar su elixir sabiamente y no desperdiciarlo en movimientos innecesarios; ver repeticiones y aprender de sus errores y estrategias de otros jugadores; unirse a un clan y participar en guerras de clanes y eventos. Siguiendo estos consejos y trucos, puedes jugar Clash Royale mejor y divertirte más. </p>
|
54 |
-
<h4>Llamada a la acción y la invitación a probar Clash Royale hack gemas infinitas descargar 2022</h4>
|
55 |
-
|
56 |
-
<p>No se pierda esta oportunidad de obtener gemas ilimitadas y oro gratis con Clash Royale hack gemas infinitas descargar 2022. Es una herramienta que puede cambiar tu vida de juego para siempre. Podrás desbloquear nuevas cartas, mejorar las existentes, comprar cofres y otros objetos, participar en eventos especiales o desafíos, cambiar tu nombre o el nombre del clan y divertirte más jugando a Clash Royale. También podrás dominar la arena y aplastar a tus oponentes con facilidad. Te sorprenderá lo mucho que has recibido y lo fácil que fue. Nunca te arrepentirás de usar Clash Royale hack gemas infinitas download 2022. </p>
|
57 |
-
<p>Así que, seguir adelante y probar Clash Royale hack gemas infinitas descargar 2022 hoy y ver la diferencia por ti mismo. Usted no será decepcionado. Simplemente haga clic en este enlace: [Clash Royale Hack Gemas Infinitas Descargar 2022] y siga las instrucciones. Es rápido, fácil y gratuito. No tienes nada que perder y todo que ganar. Confía en nosotros, te encantará. </p>
|
58 |
-
<p>Gracias por leer este artículo y esperamos que le resulte útil e informativo. Si tiene alguna pregunta, comentario o comentario, no dude en dejarlos a continuación. Nos encantaría saber de usted y ayudarlo. Además, no se olvide de compartir este artículo con sus amigos y familiares que juegan Clash Royale y pueden beneficiarse de usar Clash Royale hack gemas infinitas descargar 2022. Te lo agradecerán. </p>
|
59 |
-
<p>Feliz juego y nos vemos en la arena! </p>
|
60 |
-
<h2>Preguntas frecuentes</h2>
|
61 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre Clash Royale hack gemas infinitas descargar 2022:</p>
|
62 |
-
<h4>Q: Es Clash Royale hack gemas infinitas descargar 2022 seguro? </h4>
|
63 |
-
|
64 |
-
<h4>Q: Es Clash Royale hack gemas infinitas descargar 2022 gratis e ilimitado? </h4>
|
65 |
-
<p>A: Sí, Clash Royale hack gemas infinitas descargar 2022 es gratuito e ilimitado. No le cobra dinero ni le pide información personal para usarla. Tampoco limita la cantidad de gemas y oro que puede generar o el número de veces que puede usarlo. Puedes usarlo tantas veces como quieras y obtener tantos recursos como quieras. </p>
|
66 |
-
<h4>Q: ¿Clash Royale hack gemas infinitas download 2022 funciona en cualquier dispositivo? </h4>
|
67 |
-
<p>A: Sí, Clash Royale hack gemas infinitas descargar 2022 funciona en cualquier dispositivo, ya sea Android, iOS o PC. Es compatible con todas las versiones y modelos de dispositivos compatibles con Clash Royale. También funciona en cualquier navegador, como Chrome, Firefox, Safari, Opera, etc.</p>
|
68 |
-
<h4>Q: ¿Cuánto tiempo se tarda en Clash Royale hack gemas infinitas descargar 2022 para generar los recursos? </h4>
|
69 |
-
<p>A: Depende de la carga del servidor y la cantidad de recursos que solicitó, pero por lo general toma unos segundos o minutos para Clash Royale hack gemas infinitas descargar 2022 para generar los recursos. Verá una barra de progreso que muestra el estado del hack y un mensaje que le indica cuándo se realiza el hack. </p>
|
70 |
-
<h4>Q: ¿Necesito reiniciar mi juego para ver los cambios? </h4>
|
71 |
-
<p>A: Sí, es necesario reiniciar el juego para ver los cambios después de usar Clash Royale hack gemas infinitas descargar 2022. Esto se debe a que el juego necesita actualizar sus datos y sincronizar con el servidor para actualizar sus gemas y el saldo de oro. Una vez que reinicies tu juego, verás tus nuevos recursos en tu cuenta. </p> 64aa2da5cf<br />
|
72 |
-
<br />
|
73 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/util.py
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
|
4 |
-
|
5 |
-
def count_params(model):
|
6 |
-
total_params = sum(p.numel() for p in model.parameters())
|
7 |
-
return total_params
|
8 |
-
|
9 |
-
|
10 |
-
class ActNorm(nn.Module):
|
11 |
-
def __init__(self, num_features, logdet=False, affine=True,
|
12 |
-
allow_reverse_init=False):
|
13 |
-
assert affine
|
14 |
-
super().__init__()
|
15 |
-
self.logdet = logdet
|
16 |
-
self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1))
|
17 |
-
self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1))
|
18 |
-
self.allow_reverse_init = allow_reverse_init
|
19 |
-
|
20 |
-
self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
|
21 |
-
|
22 |
-
def initialize(self, input):
|
23 |
-
with torch.no_grad():
|
24 |
-
flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
|
25 |
-
mean = (
|
26 |
-
flatten.mean(1)
|
27 |
-
.unsqueeze(1)
|
28 |
-
.unsqueeze(2)
|
29 |
-
.unsqueeze(3)
|
30 |
-
.permute(1, 0, 2, 3)
|
31 |
-
)
|
32 |
-
std = (
|
33 |
-
flatten.std(1)
|
34 |
-
.unsqueeze(1)
|
35 |
-
.unsqueeze(2)
|
36 |
-
.unsqueeze(3)
|
37 |
-
.permute(1, 0, 2, 3)
|
38 |
-
)
|
39 |
-
|
40 |
-
self.loc.data.copy_(-mean)
|
41 |
-
self.scale.data.copy_(1 / (std + 1e-6))
|
42 |
-
|
43 |
-
def forward(self, input, reverse=False):
|
44 |
-
if reverse:
|
45 |
-
return self.reverse(input)
|
46 |
-
if len(input.shape) == 2:
|
47 |
-
input = input[:,:,None,None]
|
48 |
-
squeeze = True
|
49 |
-
else:
|
50 |
-
squeeze = False
|
51 |
-
|
52 |
-
_, _, height, width = input.shape
|
53 |
-
|
54 |
-
if self.training and self.initialized.item() == 0:
|
55 |
-
self.initialize(input)
|
56 |
-
self.initialized.fill_(1)
|
57 |
-
|
58 |
-
h = self.scale * (input + self.loc)
|
59 |
-
|
60 |
-
if squeeze:
|
61 |
-
h = h.squeeze(-1).squeeze(-1)
|
62 |
-
|
63 |
-
if self.logdet:
|
64 |
-
log_abs = torch.log(torch.abs(self.scale))
|
65 |
-
logdet = height*width*torch.sum(log_abs)
|
66 |
-
logdet = logdet * torch.ones(input.shape[0]).to(input)
|
67 |
-
return h, logdet
|
68 |
-
|
69 |
-
return h
|
70 |
-
|
71 |
-
def reverse(self, output):
|
72 |
-
if self.training and self.initialized.item() == 0:
|
73 |
-
if not self.allow_reverse_init:
|
74 |
-
raise RuntimeError(
|
75 |
-
"Initializing ActNorm in reverse direction is "
|
76 |
-
"disabled by default. Use allow_reverse_init=True to enable."
|
77 |
-
)
|
78 |
-
else:
|
79 |
-
self.initialize(output)
|
80 |
-
self.initialized.fill_(1)
|
81 |
-
|
82 |
-
if len(output.shape) == 2:
|
83 |
-
output = output[:,:,None,None]
|
84 |
-
squeeze = True
|
85 |
-
else:
|
86 |
-
squeeze = False
|
87 |
-
|
88 |
-
h = output / self.scale - self.loc
|
89 |
-
|
90 |
-
if squeeze:
|
91 |
-
h = h.squeeze(-1).squeeze(-1)
|
92 |
-
return h
|
93 |
-
|
94 |
-
|
95 |
-
class AbstractEncoder(nn.Module):
|
96 |
-
def __init__(self):
|
97 |
-
super().__init__()
|
98 |
-
|
99 |
-
def encode(self, *args, **kwargs):
|
100 |
-
raise NotImplementedError
|
101 |
-
|
102 |
-
|
103 |
-
class Labelator(AbstractEncoder):
|
104 |
-
"""Net2Net Interface for Class-Conditional Model"""
|
105 |
-
def __init__(self, n_classes, quantize_interface=True):
|
106 |
-
super().__init__()
|
107 |
-
self.n_classes = n_classes
|
108 |
-
self.quantize_interface = quantize_interface
|
109 |
-
|
110 |
-
def encode(self, c):
|
111 |
-
c = c[:,None]
|
112 |
-
if self.quantize_interface:
|
113 |
-
return c, None, [None, None, c.long()]
|
114 |
-
return c
|
115 |
-
|
116 |
-
|
117 |
-
class SOSProvider(AbstractEncoder):
|
118 |
-
# for unconditional training
|
119 |
-
def __init__(self, sos_token, quantize_interface=True):
|
120 |
-
super().__init__()
|
121 |
-
self.sos_token = sos_token
|
122 |
-
self.quantize_interface = quantize_interface
|
123 |
-
|
124 |
-
def encode(self, x):
|
125 |
-
# get batch size from data and replicate sos_token
|
126 |
-
c = torch.ones(x.shape[0], 1)*self.sos_token
|
127 |
-
c = c.long().to(x.device)
|
128 |
-
if self.quantize_interface:
|
129 |
-
return c, None, [None, None, c]
|
130 |
-
return c
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/cache.py
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
# SPDX-FileCopyrightText: 2015 Eric Larson
|
2 |
-
#
|
3 |
-
# SPDX-License-Identifier: Apache-2.0
|
4 |
-
|
5 |
-
"""
|
6 |
-
The cache object API for implementing caches. The default is a thread
|
7 |
-
safe in-memory dictionary.
|
8 |
-
"""
|
9 |
-
from threading import Lock
|
10 |
-
|
11 |
-
|
12 |
-
class BaseCache(object):
|
13 |
-
|
14 |
-
def get(self, key):
|
15 |
-
raise NotImplementedError()
|
16 |
-
|
17 |
-
def set(self, key, value, expires=None):
|
18 |
-
raise NotImplementedError()
|
19 |
-
|
20 |
-
def delete(self, key):
|
21 |
-
raise NotImplementedError()
|
22 |
-
|
23 |
-
def close(self):
|
24 |
-
pass
|
25 |
-
|
26 |
-
|
27 |
-
class DictCache(BaseCache):
|
28 |
-
|
29 |
-
def __init__(self, init_dict=None):
|
30 |
-
self.lock = Lock()
|
31 |
-
self.data = init_dict or {}
|
32 |
-
|
33 |
-
def get(self, key):
|
34 |
-
return self.data.get(key, None)
|
35 |
-
|
36 |
-
def set(self, key, value, expires=None):
|
37 |
-
with self.lock:
|
38 |
-
self.data.update({key: value})
|
39 |
-
|
40 |
-
def delete(self, key):
|
41 |
-
with self.lock:
|
42 |
-
if key in self.data:
|
43 |
-
self.data.pop(key)
|
44 |
-
|
45 |
-
|
46 |
-
class SeparateBodyBaseCache(BaseCache):
|
47 |
-
"""
|
48 |
-
In this variant, the body is not stored mixed in with the metadata, but is
|
49 |
-
passed in (as a bytes-like object) in a separate call to ``set_body()``.
|
50 |
-
|
51 |
-
That is, the expected interaction pattern is::
|
52 |
-
|
53 |
-
cache.set(key, serialized_metadata)
|
54 |
-
cache.set_body(key)
|
55 |
-
|
56 |
-
Similarly, the body should be loaded separately via ``get_body()``.
|
57 |
-
"""
|
58 |
-
def set_body(self, key, body):
|
59 |
-
raise NotImplementedError()
|
60 |
-
|
61 |
-
def get_body(self, key):
|
62 |
-
"""
|
63 |
-
Return the body as file-like object.
|
64 |
-
"""
|
65 |
-
raise NotImplementedError()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/simple.py
DELETED
@@ -1,116 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Interface adapters for low-level readers.
|
3 |
-
"""
|
4 |
-
|
5 |
-
import abc
|
6 |
-
import io
|
7 |
-
import itertools
|
8 |
-
from typing import BinaryIO, List
|
9 |
-
|
10 |
-
from .abc import Traversable, TraversableResources
|
11 |
-
|
12 |
-
|
13 |
-
class SimpleReader(abc.ABC):
|
14 |
-
"""
|
15 |
-
The minimum, low-level interface required from a resource
|
16 |
-
provider.
|
17 |
-
"""
|
18 |
-
|
19 |
-
@abc.abstractproperty
|
20 |
-
def package(self):
|
21 |
-
# type: () -> str
|
22 |
-
"""
|
23 |
-
The name of the package for which this reader loads resources.
|
24 |
-
"""
|
25 |
-
|
26 |
-
@abc.abstractmethod
|
27 |
-
def children(self):
|
28 |
-
# type: () -> List['SimpleReader']
|
29 |
-
"""
|
30 |
-
Obtain an iterable of SimpleReader for available
|
31 |
-
child containers (e.g. directories).
|
32 |
-
"""
|
33 |
-
|
34 |
-
@abc.abstractmethod
|
35 |
-
def resources(self):
|
36 |
-
# type: () -> List[str]
|
37 |
-
"""
|
38 |
-
Obtain available named resources for this virtual package.
|
39 |
-
"""
|
40 |
-
|
41 |
-
@abc.abstractmethod
|
42 |
-
def open_binary(self, resource):
|
43 |
-
# type: (str) -> BinaryIO
|
44 |
-
"""
|
45 |
-
Obtain a File-like for a named resource.
|
46 |
-
"""
|
47 |
-
|
48 |
-
@property
|
49 |
-
def name(self):
|
50 |
-
return self.package.split('.')[-1]
|
51 |
-
|
52 |
-
|
53 |
-
class ResourceHandle(Traversable):
|
54 |
-
"""
|
55 |
-
Handle to a named resource in a ResourceReader.
|
56 |
-
"""
|
57 |
-
|
58 |
-
def __init__(self, parent, name):
|
59 |
-
# type: (ResourceContainer, str) -> None
|
60 |
-
self.parent = parent
|
61 |
-
self.name = name # type: ignore
|
62 |
-
|
63 |
-
def is_file(self):
|
64 |
-
return True
|
65 |
-
|
66 |
-
def is_dir(self):
|
67 |
-
return False
|
68 |
-
|
69 |
-
def open(self, mode='r', *args, **kwargs):
|
70 |
-
stream = self.parent.reader.open_binary(self.name)
|
71 |
-
if 'b' not in mode:
|
72 |
-
stream = io.TextIOWrapper(*args, **kwargs)
|
73 |
-
return stream
|
74 |
-
|
75 |
-
def joinpath(self, name):
|
76 |
-
raise RuntimeError("Cannot traverse into a resource")
|
77 |
-
|
78 |
-
|
79 |
-
class ResourceContainer(Traversable):
|
80 |
-
"""
|
81 |
-
Traversable container for a package's resources via its reader.
|
82 |
-
"""
|
83 |
-
|
84 |
-
def __init__(self, reader):
|
85 |
-
# type: (SimpleReader) -> None
|
86 |
-
self.reader = reader
|
87 |
-
|
88 |
-
def is_dir(self):
|
89 |
-
return True
|
90 |
-
|
91 |
-
def is_file(self):
|
92 |
-
return False
|
93 |
-
|
94 |
-
def iterdir(self):
|
95 |
-
files = (ResourceHandle(self, name) for name in self.reader.resources)
|
96 |
-
dirs = map(ResourceContainer, self.reader.children())
|
97 |
-
return itertools.chain(files, dirs)
|
98 |
-
|
99 |
-
def open(self, *args, **kwargs):
|
100 |
-
raise IsADirectoryError()
|
101 |
-
|
102 |
-
def joinpath(self, name):
|
103 |
-
return next(
|
104 |
-
traversable for traversable in self.iterdir() if traversable.name == name
|
105 |
-
)
|
106 |
-
|
107 |
-
|
108 |
-
class TraversableReader(TraversableResources, SimpleReader):
|
109 |
-
"""
|
110 |
-
A TraversableResources based on SimpleReader. Resource providers
|
111 |
-
may derive from this class to provide the TraversableResources
|
112 |
-
interface by supplying the SimpleReader interface.
|
113 |
-
"""
|
114 |
-
|
115 |
-
def files(self):
|
116 |
-
return ResourceContainer(self)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BigChia/bird_classifier/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Bird Classifier
|
3 |
-
emoji: 🌍
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.24.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/transforms/__init__.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
from .transform import *
|
3 |
-
from fvcore.transforms.transform import *
|
4 |
-
from .transform_gen import *
|
5 |
-
|
6 |
-
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/wrappers.py
DELETED
@@ -1,215 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
"""
|
3 |
-
Wrappers around on some nn functions, mainly to support empty tensors.
|
4 |
-
|
5 |
-
Ideally, add support directly in PyTorch to empty tensors in those functions.
|
6 |
-
|
7 |
-
These can be removed once https://github.com/pytorch/pytorch/issues/12013
|
8 |
-
is implemented
|
9 |
-
"""
|
10 |
-
|
11 |
-
import math
|
12 |
-
import torch
|
13 |
-
from torch.nn.modules.utils import _ntuple
|
14 |
-
|
15 |
-
TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2])
|
16 |
-
|
17 |
-
|
18 |
-
def cat(tensors, dim=0):
|
19 |
-
"""
|
20 |
-
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
|
21 |
-
"""
|
22 |
-
assert isinstance(tensors, (list, tuple))
|
23 |
-
if len(tensors) == 1:
|
24 |
-
return tensors[0]
|
25 |
-
return torch.cat(tensors, dim)
|
26 |
-
|
27 |
-
|
28 |
-
class _NewEmptyTensorOp(torch.autograd.Function):
|
29 |
-
@staticmethod
|
30 |
-
def forward(ctx, x, new_shape):
|
31 |
-
ctx.shape = x.shape
|
32 |
-
return x.new_empty(new_shape)
|
33 |
-
|
34 |
-
@staticmethod
|
35 |
-
def backward(ctx, grad):
|
36 |
-
shape = ctx.shape
|
37 |
-
return _NewEmptyTensorOp.apply(grad, shape), None
|
38 |
-
|
39 |
-
|
40 |
-
class Conv2d(torch.nn.Conv2d):
|
41 |
-
"""
|
42 |
-
A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features.
|
43 |
-
"""
|
44 |
-
|
45 |
-
def __init__(self, *args, **kwargs):
|
46 |
-
"""
|
47 |
-
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
|
48 |
-
|
49 |
-
Args:
|
50 |
-
norm (nn.Module, optional): a normalization layer
|
51 |
-
activation (callable(Tensor) -> Tensor): a callable activation function
|
52 |
-
|
53 |
-
It assumes that norm layer is used before activation.
|
54 |
-
"""
|
55 |
-
norm = kwargs.pop("norm", None)
|
56 |
-
activation = kwargs.pop("activation", None)
|
57 |
-
super().__init__(*args, **kwargs)
|
58 |
-
|
59 |
-
self.norm = norm
|
60 |
-
self.activation = activation
|
61 |
-
|
62 |
-
def forward(self, x):
|
63 |
-
if x.numel() == 0 and self.training:
|
64 |
-
# https://github.com/pytorch/pytorch/issues/12013
|
65 |
-
assert not isinstance(
|
66 |
-
self.norm, torch.nn.SyncBatchNorm
|
67 |
-
), "SyncBatchNorm does not support empty inputs!"
|
68 |
-
|
69 |
-
if x.numel() == 0 and TORCH_VERSION <= (1, 4):
|
70 |
-
assert not isinstance(
|
71 |
-
self.norm, torch.nn.GroupNorm
|
72 |
-
), "GroupNorm does not support empty inputs in PyTorch <=1.4!"
|
73 |
-
# When input is empty, we want to return a empty tensor with "correct" shape,
|
74 |
-
# So that the following operations will not panic
|
75 |
-
# if they check for the shape of the tensor.
|
76 |
-
# This computes the height and width of the output tensor
|
77 |
-
output_shape = [
|
78 |
-
(i + 2 * p - (di * (k - 1) + 1)) // s + 1
|
79 |
-
for i, p, di, k, s in zip(
|
80 |
-
x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
|
81 |
-
)
|
82 |
-
]
|
83 |
-
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
|
84 |
-
empty = _NewEmptyTensorOp.apply(x, output_shape)
|
85 |
-
if self.training:
|
86 |
-
# This is to make DDP happy.
|
87 |
-
# DDP expects all workers to have gradient w.r.t the same set of parameters.
|
88 |
-
_dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
|
89 |
-
return empty + _dummy
|
90 |
-
else:
|
91 |
-
return empty
|
92 |
-
|
93 |
-
x = super().forward(x)
|
94 |
-
if self.norm is not None:
|
95 |
-
x = self.norm(x)
|
96 |
-
if self.activation is not None:
|
97 |
-
x = self.activation(x)
|
98 |
-
return x
|
99 |
-
|
100 |
-
|
101 |
-
if TORCH_VERSION > (1, 4):
|
102 |
-
ConvTranspose2d = torch.nn.ConvTranspose2d
|
103 |
-
else:
|
104 |
-
|
105 |
-
class ConvTranspose2d(torch.nn.ConvTranspose2d):
|
106 |
-
"""
|
107 |
-
A wrapper around :class:`torch.nn.ConvTranspose2d` to support zero-size tensor.
|
108 |
-
"""
|
109 |
-
|
110 |
-
def forward(self, x):
|
111 |
-
if x.numel() > 0:
|
112 |
-
return super(ConvTranspose2d, self).forward(x)
|
113 |
-
# get output shape
|
114 |
-
|
115 |
-
# When input is empty, we want to return a empty tensor with "correct" shape,
|
116 |
-
# So that the following operations will not panic
|
117 |
-
# if they check for the shape of the tensor.
|
118 |
-
# This computes the height and width of the output tensor
|
119 |
-
output_shape = [
|
120 |
-
(i - 1) * d - 2 * p + (di * (k - 1) + 1) + op
|
121 |
-
for i, p, di, k, d, op in zip(
|
122 |
-
x.shape[-2:],
|
123 |
-
self.padding,
|
124 |
-
self.dilation,
|
125 |
-
self.kernel_size,
|
126 |
-
self.stride,
|
127 |
-
self.output_padding,
|
128 |
-
)
|
129 |
-
]
|
130 |
-
output_shape = [x.shape[0], self.out_channels] + output_shape
|
131 |
-
# This is to make DDP happy.
|
132 |
-
# DDP expects all workers to have gradient w.r.t the same set of parameters.
|
133 |
-
_dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
|
134 |
-
return _NewEmptyTensorOp.apply(x, output_shape) + _dummy
|
135 |
-
|
136 |
-
|
137 |
-
if TORCH_VERSION > (1, 4):
|
138 |
-
BatchNorm2d = torch.nn.BatchNorm2d
|
139 |
-
else:
|
140 |
-
|
141 |
-
class BatchNorm2d(torch.nn.BatchNorm2d):
|
142 |
-
"""
|
143 |
-
A wrapper around :class:`torch.nn.BatchNorm2d` to support zero-size tensor.
|
144 |
-
"""
|
145 |
-
|
146 |
-
def forward(self, x):
|
147 |
-
if x.numel() > 0:
|
148 |
-
return super(BatchNorm2d, self).forward(x)
|
149 |
-
# get output shape
|
150 |
-
output_shape = x.shape
|
151 |
-
return _NewEmptyTensorOp.apply(x, output_shape)
|
152 |
-
|
153 |
-
|
154 |
-
if False: # not yet fixed in pytorch
|
155 |
-
Linear = torch.nn.Linear
|
156 |
-
else:
|
157 |
-
|
158 |
-
class Linear(torch.nn.Linear):
|
159 |
-
"""
|
160 |
-
A wrapper around :class:`torch.nn.Linear` to support empty inputs and more features.
|
161 |
-
Because of https://github.com/pytorch/pytorch/issues/34202
|
162 |
-
"""
|
163 |
-
|
164 |
-
def forward(self, x):
|
165 |
-
if x.numel() == 0:
|
166 |
-
output_shape = [x.shape[0], self.weight.shape[0]]
|
167 |
-
|
168 |
-
empty = _NewEmptyTensorOp.apply(x, output_shape)
|
169 |
-
if self.training:
|
170 |
-
# This is to make DDP happy.
|
171 |
-
# DDP expects all workers to have gradient w.r.t the same set of parameters.
|
172 |
-
_dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
|
173 |
-
return empty + _dummy
|
174 |
-
else:
|
175 |
-
return empty
|
176 |
-
|
177 |
-
x = super().forward(x)
|
178 |
-
return x
|
179 |
-
|
180 |
-
|
181 |
-
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
|
182 |
-
"""
|
183 |
-
A wrapper around :func:`torch.nn.functional.interpolate` to support zero-size tensor.
|
184 |
-
"""
|
185 |
-
if input.numel() > 0:
|
186 |
-
return torch.nn.functional.interpolate(
|
187 |
-
input, size, scale_factor, mode, align_corners=align_corners
|
188 |
-
)
|
189 |
-
|
190 |
-
def _check_size_scale_factor(dim):
|
191 |
-
if size is None and scale_factor is None:
|
192 |
-
raise ValueError("either size or scale_factor should be defined")
|
193 |
-
if size is not None and scale_factor is not None:
|
194 |
-
raise ValueError("only one of size or scale_factor should be defined")
|
195 |
-
if (
|
196 |
-
scale_factor is not None
|
197 |
-
and isinstance(scale_factor, tuple)
|
198 |
-
and len(scale_factor) != dim
|
199 |
-
):
|
200 |
-
raise ValueError(
|
201 |
-
"scale_factor shape must match input shape. "
|
202 |
-
"Input is {}D, scale_factor size is {}".format(dim, len(scale_factor))
|
203 |
-
)
|
204 |
-
|
205 |
-
def _output_size(dim):
|
206 |
-
_check_size_scale_factor(dim)
|
207 |
-
if size is not None:
|
208 |
-
return size
|
209 |
-
scale_factors = _ntuple(dim)(scale_factor)
|
210 |
-
# math.floor might return float in py2.7
|
211 |
-
return [int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim)]
|
212 |
-
|
213 |
-
output_shape = tuple(_output_size(2))
|
214 |
-
output_shape = input.shape[:-2] + output_shape
|
215 |
-
return _NewEmptyTensorOp.apply(input, output_shape)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tests/test_kwargs_and_defaults.cpp
DELETED
@@ -1,131 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
tests/test_kwargs_and_defaults.cpp -- keyword arguments and default values
|
3 |
-
|
4 |
-
Copyright (c) 2016 Wenzel Jakob <[email protected]>
|
5 |
-
|
6 |
-
All rights reserved. Use of this source code is governed by a
|
7 |
-
BSD-style license that can be found in the LICENSE file.
|
8 |
-
*/
|
9 |
-
|
10 |
-
#include "pybind11_tests.h"
|
11 |
-
#include "constructor_stats.h"
|
12 |
-
#include <pybind11/stl.h>
|
13 |
-
|
14 |
-
TEST_SUBMODULE(kwargs_and_defaults, m) {
|
15 |
-
auto kw_func = [](int x, int y) { return "x=" + std::to_string(x) + ", y=" + std::to_string(y); };
|
16 |
-
|
17 |
-
// test_named_arguments
|
18 |
-
m.def("kw_func0", kw_func);
|
19 |
-
m.def("kw_func1", kw_func, py::arg("x"), py::arg("y"));
|
20 |
-
m.def("kw_func2", kw_func, py::arg("x") = 100, py::arg("y") = 200);
|
21 |
-
m.def("kw_func3", [](const char *) { }, py::arg("data") = std::string("Hello world!"));
|
22 |
-
|
23 |
-
/* A fancier default argument */
|
24 |
-
std::vector<int> list{{13, 17}};
|
25 |
-
m.def("kw_func4", [](const std::vector<int> &entries) {
|
26 |
-
std::string ret = "{";
|
27 |
-
for (int i : entries)
|
28 |
-
ret += std::to_string(i) + " ";
|
29 |
-
ret.back() = '}';
|
30 |
-
return ret;
|
31 |
-
}, py::arg("myList") = list);
|
32 |
-
|
33 |
-
m.def("kw_func_udl", kw_func, "x"_a, "y"_a=300);
|
34 |
-
m.def("kw_func_udl_z", kw_func, "x"_a, "y"_a=0);
|
35 |
-
|
36 |
-
// test_args_and_kwargs
|
37 |
-
m.def("args_function", [](py::args args) -> py::tuple {
|
38 |
-
return std::move(args);
|
39 |
-
});
|
40 |
-
m.def("args_kwargs_function", [](py::args args, py::kwargs kwargs) {
|
41 |
-
return py::make_tuple(args, kwargs);
|
42 |
-
});
|
43 |
-
|
44 |
-
// test_mixed_args_and_kwargs
|
45 |
-
m.def("mixed_plus_args", [](int i, double j, py::args args) {
|
46 |
-
return py::make_tuple(i, j, args);
|
47 |
-
});
|
48 |
-
m.def("mixed_plus_kwargs", [](int i, double j, py::kwargs kwargs) {
|
49 |
-
return py::make_tuple(i, j, kwargs);
|
50 |
-
});
|
51 |
-
auto mixed_plus_both = [](int i, double j, py::args args, py::kwargs kwargs) {
|
52 |
-
return py::make_tuple(i, j, args, kwargs);
|
53 |
-
};
|
54 |
-
m.def("mixed_plus_args_kwargs", mixed_plus_both);
|
55 |
-
|
56 |
-
m.def("mixed_plus_args_kwargs_defaults", mixed_plus_both,
|
57 |
-
py::arg("i") = 1, py::arg("j") = 3.14159);
|
58 |
-
|
59 |
-
// test_args_refcount
|
60 |
-
// PyPy needs a garbage collection to get the reference count values to match CPython's behaviour
|
61 |
-
#ifdef PYPY_VERSION
|
62 |
-
#define GC_IF_NEEDED ConstructorStats::gc()
|
63 |
-
#else
|
64 |
-
#define GC_IF_NEEDED
|
65 |
-
#endif
|
66 |
-
m.def("arg_refcount_h", [](py::handle h) { GC_IF_NEEDED; return h.ref_count(); });
|
67 |
-
m.def("arg_refcount_h", [](py::handle h, py::handle, py::handle) { GC_IF_NEEDED; return h.ref_count(); });
|
68 |
-
m.def("arg_refcount_o", [](py::object o) { GC_IF_NEEDED; return o.ref_count(); });
|
69 |
-
m.def("args_refcount", [](py::args a) {
|
70 |
-
GC_IF_NEEDED;
|
71 |
-
py::tuple t(a.size());
|
72 |
-
for (size_t i = 0; i < a.size(); i++)
|
73 |
-
// Use raw Python API here to avoid an extra, intermediate incref on the tuple item:
|
74 |
-
t[i] = (int) Py_REFCNT(PyTuple_GET_ITEM(a.ptr(), static_cast<ssize_t>(i)));
|
75 |
-
return t;
|
76 |
-
});
|
77 |
-
m.def("mixed_args_refcount", [](py::object o, py::args a) {
|
78 |
-
GC_IF_NEEDED;
|
79 |
-
py::tuple t(a.size() + 1);
|
80 |
-
t[0] = o.ref_count();
|
81 |
-
for (size_t i = 0; i < a.size(); i++)
|
82 |
-
// Use raw Python API here to avoid an extra, intermediate incref on the tuple item:
|
83 |
-
t[i + 1] = (int) Py_REFCNT(PyTuple_GET_ITEM(a.ptr(), static_cast<ssize_t>(i)));
|
84 |
-
return t;
|
85 |
-
});
|
86 |
-
|
87 |
-
// pybind11 won't allow these to be bound: args and kwargs, if present, must be at the end.
|
88 |
-
// Uncomment these to test that the static_assert is indeed working:
|
89 |
-
// m.def("bad_args1", [](py::args, int) {});
|
90 |
-
// m.def("bad_args2", [](py::kwargs, int) {});
|
91 |
-
// m.def("bad_args3", [](py::kwargs, py::args) {});
|
92 |
-
// m.def("bad_args4", [](py::args, int, py::kwargs) {});
|
93 |
-
// m.def("bad_args5", [](py::args, py::kwargs, int) {});
|
94 |
-
// m.def("bad_args6", [](py::args, py::args) {});
|
95 |
-
// m.def("bad_args7", [](py::kwargs, py::kwargs) {});
|
96 |
-
|
97 |
-
// test_keyword_only_args
|
98 |
-
m.def("kwonly_all", [](int i, int j) { return py::make_tuple(i, j); },
|
99 |
-
py::kwonly(), py::arg("i"), py::arg("j"));
|
100 |
-
m.def("kwonly_some", [](int i, int j, int k) { return py::make_tuple(i, j, k); },
|
101 |
-
py::arg(), py::kwonly(), py::arg("j"), py::arg("k"));
|
102 |
-
m.def("kwonly_with_defaults", [](int i, int j, int k, int z) { return py::make_tuple(i, j, k, z); },
|
103 |
-
py::arg() = 3, "j"_a = 4, py::kwonly(), "k"_a = 5, "z"_a);
|
104 |
-
m.def("kwonly_mixed", [](int i, int j) { return py::make_tuple(i, j); },
|
105 |
-
"i"_a, py::kwonly(), "j"_a);
|
106 |
-
m.def("kwonly_plus_more", [](int i, int j, int k, py::kwargs kwargs) {
|
107 |
-
return py::make_tuple(i, j, k, kwargs); },
|
108 |
-
py::arg() /* positional */, py::arg("j") = -1 /* both */, py::kwonly(), py::arg("k") /* kw-only */);
|
109 |
-
|
110 |
-
m.def("register_invalid_kwonly", [](py::module m) {
|
111 |
-
m.def("bad_kwonly", [](int i, int j) { return py::make_tuple(i, j); },
|
112 |
-
py::kwonly(), py::arg() /* invalid unnamed argument */, "j"_a);
|
113 |
-
});
|
114 |
-
|
115 |
-
// These should fail to compile:
|
116 |
-
// argument annotations are required when using kwonly
|
117 |
-
// m.def("bad_kwonly1", [](int) {}, py::kwonly());
|
118 |
-
// can't specify both `py::kwonly` and a `py::args` argument
|
119 |
-
// m.def("bad_kwonly2", [](int i, py::args) {}, py::kwonly(), "i"_a);
|
120 |
-
|
121 |
-
// test_function_signatures (along with most of the above)
|
122 |
-
struct KWClass { void foo(int, float) {} };
|
123 |
-
py::class_<KWClass>(m, "KWClass")
|
124 |
-
.def("foo0", &KWClass::foo)
|
125 |
-
.def("foo1", &KWClass::foo, "x"_a, "y"_a);
|
126 |
-
|
127 |
-
// Make sure a class (not an instance) can be used as a default argument.
|
128 |
-
// The return value doesn't matter, only that the module is importable.
|
129 |
-
m.def("class_default_argument", [](py::object a) { return py::repr(a); },
|
130 |
-
"a"_a = py::module::import("decimal").attr("Decimal"));
|
131 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tests/test_numpy_vectorize.cpp
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
tests/test_numpy_vectorize.cpp -- auto-vectorize functions over NumPy array
|
3 |
-
arguments
|
4 |
-
|
5 |
-
Copyright (c) 2016 Wenzel Jakob <[email protected]>
|
6 |
-
|
7 |
-
All rights reserved. Use of this source code is governed by a
|
8 |
-
BSD-style license that can be found in the LICENSE file.
|
9 |
-
*/
|
10 |
-
|
11 |
-
#include "pybind11_tests.h"
|
12 |
-
#include <pybind11/numpy.h>
|
13 |
-
|
14 |
-
double my_func(int x, float y, double z) {
|
15 |
-
py::print("my_func(x:int={}, y:float={:.0f}, z:float={:.0f})"_s.format(x, y, z));
|
16 |
-
return (float) x*y*z;
|
17 |
-
}
|
18 |
-
|
19 |
-
TEST_SUBMODULE(numpy_vectorize, m) {
|
20 |
-
try { py::module::import("numpy"); }
|
21 |
-
catch (...) { return; }
|
22 |
-
|
23 |
-
// test_vectorize, test_docs, test_array_collapse
|
24 |
-
// Vectorize all arguments of a function (though non-vector arguments are also allowed)
|
25 |
-
m.def("vectorized_func", py::vectorize(my_func));
|
26 |
-
|
27 |
-
// Vectorize a lambda function with a capture object (e.g. to exclude some arguments from the vectorization)
|
28 |
-
m.def("vectorized_func2",
|
29 |
-
[](py::array_t<int> x, py::array_t<float> y, float z) {
|
30 |
-
return py::vectorize([z](int x, float y) { return my_func(x, y, z); })(x, y);
|
31 |
-
}
|
32 |
-
);
|
33 |
-
|
34 |
-
// Vectorize a complex-valued function
|
35 |
-
m.def("vectorized_func3", py::vectorize(
|
36 |
-
[](std::complex<double> c) { return c * std::complex<double>(2.f); }
|
37 |
-
));
|
38 |
-
|
39 |
-
// test_type_selection
|
40 |
-
// Numpy function which only accepts specific data types
|
41 |
-
m.def("selective_func", [](py::array_t<int, py::array::c_style>) { return "Int branch taken."; });
|
42 |
-
m.def("selective_func", [](py::array_t<float, py::array::c_style>) { return "Float branch taken."; });
|
43 |
-
m.def("selective_func", [](py::array_t<std::complex<float>, py::array::c_style>) { return "Complex float branch taken."; });
|
44 |
-
|
45 |
-
|
46 |
-
// test_passthrough_arguments
|
47 |
-
// Passthrough test: references and non-pod types should be automatically passed through (in the
|
48 |
-
// function definition below, only `b`, `d`, and `g` are vectorized):
|
49 |
-
struct NonPODClass {
|
50 |
-
NonPODClass(int v) : value{v} {}
|
51 |
-
int value;
|
52 |
-
};
|
53 |
-
py::class_<NonPODClass>(m, "NonPODClass").def(py::init<int>());
|
54 |
-
m.def("vec_passthrough", py::vectorize(
|
55 |
-
[](double *a, double b, py::array_t<double> c, const int &d, int &e, NonPODClass f, const double g) {
|
56 |
-
return *a + b + c.at(0) + d + e + f.value + g;
|
57 |
-
}
|
58 |
-
));
|
59 |
-
|
60 |
-
// test_method_vectorization
|
61 |
-
struct VectorizeTestClass {
|
62 |
-
VectorizeTestClass(int v) : value{v} {};
|
63 |
-
float method(int x, float y) { return y + (float) (x + value); }
|
64 |
-
int value = 0;
|
65 |
-
};
|
66 |
-
py::class_<VectorizeTestClass> vtc(m, "VectorizeTestClass");
|
67 |
-
vtc .def(py::init<int>())
|
68 |
-
.def_readwrite("value", &VectorizeTestClass::value);
|
69 |
-
|
70 |
-
// Automatic vectorizing of methods
|
71 |
-
vtc.def("method", py::vectorize(&VectorizeTestClass::method));
|
72 |
-
|
73 |
-
// test_trivial_broadcasting
|
74 |
-
// Internal optimization test for whether the input is trivially broadcastable:
|
75 |
-
py::enum_<py::detail::broadcast_trivial>(m, "trivial")
|
76 |
-
.value("f_trivial", py::detail::broadcast_trivial::f_trivial)
|
77 |
-
.value("c_trivial", py::detail::broadcast_trivial::c_trivial)
|
78 |
-
.value("non_trivial", py::detail::broadcast_trivial::non_trivial);
|
79 |
-
m.def("vectorized_is_trivial", [](
|
80 |
-
py::array_t<int, py::array::forcecast> arg1,
|
81 |
-
py::array_t<float, py::array::forcecast> arg2,
|
82 |
-
py::array_t<double, py::array::forcecast> arg3
|
83 |
-
) {
|
84 |
-
ssize_t ndim;
|
85 |
-
std::vector<ssize_t> shape;
|
86 |
-
std::array<py::buffer_info, 3> buffers {{ arg1.request(), arg2.request(), arg3.request() }};
|
87 |
-
return py::detail::broadcast(buffers, ndim, shape);
|
88 |
-
});
|
89 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tools/mkdoc.py
DELETED
@@ -1,387 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
#
|
4 |
-
# Syntax: mkdoc.py [-I<path> ..] [.. a list of header files ..]
|
5 |
-
#
|
6 |
-
# Extract documentation from C++ header files to use it in Python bindings
|
7 |
-
#
|
8 |
-
|
9 |
-
import os
|
10 |
-
import sys
|
11 |
-
import platform
|
12 |
-
import re
|
13 |
-
import textwrap
|
14 |
-
|
15 |
-
from clang import cindex
|
16 |
-
from clang.cindex import CursorKind
|
17 |
-
from collections import OrderedDict
|
18 |
-
from glob import glob
|
19 |
-
from threading import Thread, Semaphore
|
20 |
-
from multiprocessing import cpu_count
|
21 |
-
|
22 |
-
RECURSE_LIST = [
|
23 |
-
CursorKind.TRANSLATION_UNIT,
|
24 |
-
CursorKind.NAMESPACE,
|
25 |
-
CursorKind.CLASS_DECL,
|
26 |
-
CursorKind.STRUCT_DECL,
|
27 |
-
CursorKind.ENUM_DECL,
|
28 |
-
CursorKind.CLASS_TEMPLATE
|
29 |
-
]
|
30 |
-
|
31 |
-
PRINT_LIST = [
|
32 |
-
CursorKind.CLASS_DECL,
|
33 |
-
CursorKind.STRUCT_DECL,
|
34 |
-
CursorKind.ENUM_DECL,
|
35 |
-
CursorKind.ENUM_CONSTANT_DECL,
|
36 |
-
CursorKind.CLASS_TEMPLATE,
|
37 |
-
CursorKind.FUNCTION_DECL,
|
38 |
-
CursorKind.FUNCTION_TEMPLATE,
|
39 |
-
CursorKind.CONVERSION_FUNCTION,
|
40 |
-
CursorKind.CXX_METHOD,
|
41 |
-
CursorKind.CONSTRUCTOR,
|
42 |
-
CursorKind.FIELD_DECL
|
43 |
-
]
|
44 |
-
|
45 |
-
PREFIX_BLACKLIST = [
|
46 |
-
CursorKind.TRANSLATION_UNIT
|
47 |
-
]
|
48 |
-
|
49 |
-
CPP_OPERATORS = {
|
50 |
-
'<=': 'le', '>=': 'ge', '==': 'eq', '!=': 'ne', '[]': 'array',
|
51 |
-
'+=': 'iadd', '-=': 'isub', '*=': 'imul', '/=': 'idiv', '%=':
|
52 |
-
'imod', '&=': 'iand', '|=': 'ior', '^=': 'ixor', '<<=': 'ilshift',
|
53 |
-
'>>=': 'irshift', '++': 'inc', '--': 'dec', '<<': 'lshift', '>>':
|
54 |
-
'rshift', '&&': 'land', '||': 'lor', '!': 'lnot', '~': 'bnot',
|
55 |
-
'&': 'band', '|': 'bor', '+': 'add', '-': 'sub', '*': 'mul', '/':
|
56 |
-
'div', '%': 'mod', '<': 'lt', '>': 'gt', '=': 'assign', '()': 'call'
|
57 |
-
}
|
58 |
-
|
59 |
-
CPP_OPERATORS = OrderedDict(
|
60 |
-
sorted(CPP_OPERATORS.items(), key=lambda t: -len(t[0])))
|
61 |
-
|
62 |
-
job_count = cpu_count()
|
63 |
-
job_semaphore = Semaphore(job_count)
|
64 |
-
|
65 |
-
|
66 |
-
class NoFilenamesError(ValueError):
|
67 |
-
pass
|
68 |
-
|
69 |
-
|
70 |
-
def d(s):
|
71 |
-
return s if isinstance(s, str) else s.decode('utf8')
|
72 |
-
|
73 |
-
|
74 |
-
def sanitize_name(name):
|
75 |
-
name = re.sub(r'type-parameter-0-([0-9]+)', r'T\1', name)
|
76 |
-
for k, v in CPP_OPERATORS.items():
|
77 |
-
name = name.replace('operator%s' % k, 'operator_%s' % v)
|
78 |
-
name = re.sub('<.*>', '', name)
|
79 |
-
name = ''.join([ch if ch.isalnum() else '_' for ch in name])
|
80 |
-
name = re.sub('_$', '', re.sub('_+', '_', name))
|
81 |
-
return '__doc_' + name
|
82 |
-
|
83 |
-
|
84 |
-
def process_comment(comment):
|
85 |
-
result = ''
|
86 |
-
|
87 |
-
# Remove C++ comment syntax
|
88 |
-
leading_spaces = float('inf')
|
89 |
-
for s in comment.expandtabs(tabsize=4).splitlines():
|
90 |
-
s = s.strip()
|
91 |
-
if s.startswith('/*'):
|
92 |
-
s = s[2:].lstrip('*')
|
93 |
-
elif s.endswith('*/'):
|
94 |
-
s = s[:-2].rstrip('*')
|
95 |
-
elif s.startswith('///'):
|
96 |
-
s = s[3:]
|
97 |
-
if s.startswith('*'):
|
98 |
-
s = s[1:]
|
99 |
-
if len(s) > 0:
|
100 |
-
leading_spaces = min(leading_spaces, len(s) - len(s.lstrip()))
|
101 |
-
result += s + '\n'
|
102 |
-
|
103 |
-
if leading_spaces != float('inf'):
|
104 |
-
result2 = ""
|
105 |
-
for s in result.splitlines():
|
106 |
-
result2 += s[leading_spaces:] + '\n'
|
107 |
-
result = result2
|
108 |
-
|
109 |
-
# Doxygen tags
|
110 |
-
cpp_group = r'([\w:]+)'
|
111 |
-
param_group = r'([\[\w:\]]+)'
|
112 |
-
|
113 |
-
s = result
|
114 |
-
s = re.sub(r'\\c\s+%s' % cpp_group, r'``\1``', s)
|
115 |
-
s = re.sub(r'\\a\s+%s' % cpp_group, r'*\1*', s)
|
116 |
-
s = re.sub(r'\\e\s+%s' % cpp_group, r'*\1*', s)
|
117 |
-
s = re.sub(r'\\em\s+%s' % cpp_group, r'*\1*', s)
|
118 |
-
s = re.sub(r'\\b\s+%s' % cpp_group, r'**\1**', s)
|
119 |
-
s = re.sub(r'\\ingroup\s+%s' % cpp_group, r'', s)
|
120 |
-
s = re.sub(r'\\param%s?\s+%s' % (param_group, cpp_group),
|
121 |
-
r'\n\n$Parameter ``\2``:\n\n', s)
|
122 |
-
s = re.sub(r'\\tparam%s?\s+%s' % (param_group, cpp_group),
|
123 |
-
r'\n\n$Template parameter ``\2``:\n\n', s)
|
124 |
-
|
125 |
-
for in_, out_ in {
|
126 |
-
'return': 'Returns',
|
127 |
-
'author': 'Author',
|
128 |
-
'authors': 'Authors',
|
129 |
-
'copyright': 'Copyright',
|
130 |
-
'date': 'Date',
|
131 |
-
'remark': 'Remark',
|
132 |
-
'sa': 'See also',
|
133 |
-
'see': 'See also',
|
134 |
-
'extends': 'Extends',
|
135 |
-
'throw': 'Throws',
|
136 |
-
'throws': 'Throws'
|
137 |
-
}.items():
|
138 |
-
s = re.sub(r'\\%s\s*' % in_, r'\n\n$%s:\n\n' % out_, s)
|
139 |
-
|
140 |
-
s = re.sub(r'\\details\s*', r'\n\n', s)
|
141 |
-
s = re.sub(r'\\brief\s*', r'', s)
|
142 |
-
s = re.sub(r'\\short\s*', r'', s)
|
143 |
-
s = re.sub(r'\\ref\s*', r'', s)
|
144 |
-
|
145 |
-
s = re.sub(r'\\code\s?(.*?)\s?\\endcode',
|
146 |
-
r"```\n\1\n```\n", s, flags=re.DOTALL)
|
147 |
-
|
148 |
-
# HTML/TeX tags
|
149 |
-
s = re.sub(r'<tt>(.*?)</tt>', r'``\1``', s, flags=re.DOTALL)
|
150 |
-
s = re.sub(r'<pre>(.*?)</pre>', r"```\n\1\n```\n", s, flags=re.DOTALL)
|
151 |
-
s = re.sub(r'<em>(.*?)</em>', r'*\1*', s, flags=re.DOTALL)
|
152 |
-
s = re.sub(r'<b>(.*?)</b>', r'**\1**', s, flags=re.DOTALL)
|
153 |
-
s = re.sub(r'\\f\$(.*?)\\f\$', r'$\1$', s, flags=re.DOTALL)
|
154 |
-
s = re.sub(r'<li>', r'\n\n* ', s)
|
155 |
-
s = re.sub(r'</?ul>', r'', s)
|
156 |
-
s = re.sub(r'</li>', r'\n\n', s)
|
157 |
-
|
158 |
-
s = s.replace('``true``', '``True``')
|
159 |
-
s = s.replace('``false``', '``False``')
|
160 |
-
|
161 |
-
# Re-flow text
|
162 |
-
wrapper = textwrap.TextWrapper()
|
163 |
-
wrapper.expand_tabs = True
|
164 |
-
wrapper.replace_whitespace = True
|
165 |
-
wrapper.drop_whitespace = True
|
166 |
-
wrapper.width = 70
|
167 |
-
wrapper.initial_indent = wrapper.subsequent_indent = ''
|
168 |
-
|
169 |
-
result = ''
|
170 |
-
in_code_segment = False
|
171 |
-
for x in re.split(r'(```)', s):
|
172 |
-
if x == '```':
|
173 |
-
if not in_code_segment:
|
174 |
-
result += '```\n'
|
175 |
-
else:
|
176 |
-
result += '\n```\n\n'
|
177 |
-
in_code_segment = not in_code_segment
|
178 |
-
elif in_code_segment:
|
179 |
-
result += x.strip()
|
180 |
-
else:
|
181 |
-
for y in re.split(r'(?: *\n *){2,}', x):
|
182 |
-
wrapped = wrapper.fill(re.sub(r'\s+', ' ', y).strip())
|
183 |
-
if len(wrapped) > 0 and wrapped[0] == '$':
|
184 |
-
result += wrapped[1:] + '\n'
|
185 |
-
wrapper.initial_indent = \
|
186 |
-
wrapper.subsequent_indent = ' ' * 4
|
187 |
-
else:
|
188 |
-
if len(wrapped) > 0:
|
189 |
-
result += wrapped + '\n\n'
|
190 |
-
wrapper.initial_indent = wrapper.subsequent_indent = ''
|
191 |
-
return result.rstrip().lstrip('\n')
|
192 |
-
|
193 |
-
|
194 |
-
def extract(filename, node, prefix, output):
|
195 |
-
if not (node.location.file is None or
|
196 |
-
os.path.samefile(d(node.location.file.name), filename)):
|
197 |
-
return 0
|
198 |
-
if node.kind in RECURSE_LIST:
|
199 |
-
sub_prefix = prefix
|
200 |
-
if node.kind not in PREFIX_BLACKLIST:
|
201 |
-
if len(sub_prefix) > 0:
|
202 |
-
sub_prefix += '_'
|
203 |
-
sub_prefix += d(node.spelling)
|
204 |
-
for i in node.get_children():
|
205 |
-
extract(filename, i, sub_prefix, output)
|
206 |
-
if node.kind in PRINT_LIST:
|
207 |
-
comment = d(node.raw_comment) if node.raw_comment is not None else ''
|
208 |
-
comment = process_comment(comment)
|
209 |
-
sub_prefix = prefix
|
210 |
-
if len(sub_prefix) > 0:
|
211 |
-
sub_prefix += '_'
|
212 |
-
if len(node.spelling) > 0:
|
213 |
-
name = sanitize_name(sub_prefix + d(node.spelling))
|
214 |
-
output.append((name, filename, comment))
|
215 |
-
|
216 |
-
|
217 |
-
class ExtractionThread(Thread):
|
218 |
-
def __init__(self, filename, parameters, output):
|
219 |
-
Thread.__init__(self)
|
220 |
-
self.filename = filename
|
221 |
-
self.parameters = parameters
|
222 |
-
self.output = output
|
223 |
-
job_semaphore.acquire()
|
224 |
-
|
225 |
-
def run(self):
|
226 |
-
print('Processing "%s" ..' % self.filename, file=sys.stderr)
|
227 |
-
try:
|
228 |
-
index = cindex.Index(
|
229 |
-
cindex.conf.lib.clang_createIndex(False, True))
|
230 |
-
tu = index.parse(self.filename, self.parameters)
|
231 |
-
extract(self.filename, tu.cursor, '', self.output)
|
232 |
-
finally:
|
233 |
-
job_semaphore.release()
|
234 |
-
|
235 |
-
|
236 |
-
def read_args(args):
|
237 |
-
parameters = []
|
238 |
-
filenames = []
|
239 |
-
if "-x" not in args:
|
240 |
-
parameters.extend(['-x', 'c++'])
|
241 |
-
if not any(it.startswith("-std=") for it in args):
|
242 |
-
parameters.append('-std=c++11')
|
243 |
-
|
244 |
-
if platform.system() == 'Darwin':
|
245 |
-
dev_path = '/Applications/Xcode.app/Contents/Developer/'
|
246 |
-
lib_dir = dev_path + 'Toolchains/XcodeDefault.xctoolchain/usr/lib/'
|
247 |
-
sdk_dir = dev_path + 'Platforms/MacOSX.platform/Developer/SDKs'
|
248 |
-
libclang = lib_dir + 'libclang.dylib'
|
249 |
-
|
250 |
-
if os.path.exists(libclang):
|
251 |
-
cindex.Config.set_library_path(os.path.dirname(libclang))
|
252 |
-
|
253 |
-
if os.path.exists(sdk_dir):
|
254 |
-
sysroot_dir = os.path.join(sdk_dir, next(os.walk(sdk_dir))[1][0])
|
255 |
-
parameters.append('-isysroot')
|
256 |
-
parameters.append(sysroot_dir)
|
257 |
-
elif platform.system() == 'Linux':
|
258 |
-
# cython.util.find_library does not find `libclang` for all clang
|
259 |
-
# versions and distributions. LLVM switched to a monolithical setup
|
260 |
-
# that includes everything under /usr/lib/llvm{version_number}/
|
261 |
-
# We therefore glob for the library and select the highest version
|
262 |
-
library_file = sorted(glob("/usr/lib/llvm-*/lib/libclang.so"), reverse=True)[0]
|
263 |
-
cindex.Config.set_library_file(library_file)
|
264 |
-
|
265 |
-
# clang doesn't find its own base includes by default on Linux,
|
266 |
-
# but different distros install them in different paths.
|
267 |
-
# Try to autodetect, preferring the highest numbered version.
|
268 |
-
def clang_folder_version(d):
|
269 |
-
return [int(ver) for ver in re.findall(r'(?<!lib)(?<!\d)\d+', d)]
|
270 |
-
clang_include_dir = max((
|
271 |
-
path
|
272 |
-
for libdir in ['lib64', 'lib', 'lib32']
|
273 |
-
for path in glob('/usr/%s/clang/*/include' % libdir)
|
274 |
-
if os.path.isdir(path)
|
275 |
-
), default=None, key=clang_folder_version)
|
276 |
-
if clang_include_dir:
|
277 |
-
parameters.extend(['-isystem', clang_include_dir])
|
278 |
-
|
279 |
-
for item in args:
|
280 |
-
if item.startswith('-'):
|
281 |
-
parameters.append(item)
|
282 |
-
else:
|
283 |
-
filenames.append(item)
|
284 |
-
|
285 |
-
if len(filenames) == 0:
|
286 |
-
raise NoFilenamesError("args parameter did not contain any filenames")
|
287 |
-
|
288 |
-
return parameters, filenames
|
289 |
-
|
290 |
-
|
291 |
-
def extract_all(args):
|
292 |
-
parameters, filenames = read_args(args)
|
293 |
-
output = []
|
294 |
-
for filename in filenames:
|
295 |
-
thr = ExtractionThread(filename, parameters, output)
|
296 |
-
thr.start()
|
297 |
-
|
298 |
-
print('Waiting for jobs to finish ..', file=sys.stderr)
|
299 |
-
for i in range(job_count):
|
300 |
-
job_semaphore.acquire()
|
301 |
-
|
302 |
-
return output
|
303 |
-
|
304 |
-
|
305 |
-
def write_header(comments, out_file=sys.stdout):
|
306 |
-
print('''/*
|
307 |
-
This file contains docstrings for the Python bindings.
|
308 |
-
Do not edit! These were automatically extracted by mkdoc.py
|
309 |
-
*/
|
310 |
-
|
311 |
-
#define __EXPAND(x) x
|
312 |
-
#define __COUNT(_1, _2, _3, _4, _5, _6, _7, COUNT, ...) COUNT
|
313 |
-
#define __VA_SIZE(...) __EXPAND(__COUNT(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1))
|
314 |
-
#define __CAT1(a, b) a ## b
|
315 |
-
#define __CAT2(a, b) __CAT1(a, b)
|
316 |
-
#define __DOC1(n1) __doc_##n1
|
317 |
-
#define __DOC2(n1, n2) __doc_##n1##_##n2
|
318 |
-
#define __DOC3(n1, n2, n3) __doc_##n1##_##n2##_##n3
|
319 |
-
#define __DOC4(n1, n2, n3, n4) __doc_##n1##_##n2##_##n3##_##n4
|
320 |
-
#define __DOC5(n1, n2, n3, n4, n5) __doc_##n1##_##n2##_##n3##_##n4##_##n5
|
321 |
-
#define __DOC6(n1, n2, n3, n4, n5, n6) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6
|
322 |
-
#define __DOC7(n1, n2, n3, n4, n5, n6, n7) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6##_##n7
|
323 |
-
#define DOC(...) __EXPAND(__EXPAND(__CAT2(__DOC, __VA_SIZE(__VA_ARGS__)))(__VA_ARGS__))
|
324 |
-
|
325 |
-
#if defined(__GNUG__)
|
326 |
-
#pragma GCC diagnostic push
|
327 |
-
#pragma GCC diagnostic ignored "-Wunused-variable"
|
328 |
-
#endif
|
329 |
-
''', file=out_file)
|
330 |
-
|
331 |
-
|
332 |
-
name_ctr = 1
|
333 |
-
name_prev = None
|
334 |
-
for name, _, comment in list(sorted(comments, key=lambda x: (x[0], x[1]))):
|
335 |
-
if name == name_prev:
|
336 |
-
name_ctr += 1
|
337 |
-
name = name + "_%i" % name_ctr
|
338 |
-
else:
|
339 |
-
name_prev = name
|
340 |
-
name_ctr = 1
|
341 |
-
print('\nstatic const char *%s =%sR"doc(%s)doc";' %
|
342 |
-
(name, '\n' if '\n' in comment else ' ', comment), file=out_file)
|
343 |
-
|
344 |
-
print('''
|
345 |
-
#if defined(__GNUG__)
|
346 |
-
#pragma GCC diagnostic pop
|
347 |
-
#endif
|
348 |
-
''', file=out_file)
|
349 |
-
|
350 |
-
|
351 |
-
def mkdoc(args):
|
352 |
-
args = list(args)
|
353 |
-
out_path = None
|
354 |
-
for idx, arg in enumerate(args):
|
355 |
-
if arg.startswith("-o"):
|
356 |
-
args.remove(arg)
|
357 |
-
try:
|
358 |
-
out_path = arg[2:] or args.pop(idx)
|
359 |
-
except IndexError:
|
360 |
-
print("-o flag requires an argument")
|
361 |
-
exit(-1)
|
362 |
-
break
|
363 |
-
|
364 |
-
comments = extract_all(args)
|
365 |
-
|
366 |
-
if out_path:
|
367 |
-
try:
|
368 |
-
with open(out_path, 'w') as out_file:
|
369 |
-
write_header(comments, out_file)
|
370 |
-
except:
|
371 |
-
# In the event of an error, don't leave a partially-written
|
372 |
-
# output file.
|
373 |
-
try:
|
374 |
-
os.unlink(out_path)
|
375 |
-
except:
|
376 |
-
pass
|
377 |
-
raise
|
378 |
-
else:
|
379 |
-
write_header(comments)
|
380 |
-
|
381 |
-
|
382 |
-
if __name__ == '__main__':
|
383 |
-
try:
|
384 |
-
mkdoc(sys.argv[1:])
|
385 |
-
except NoFilenamesError:
|
386 |
-
print('Syntax: %s [.. a list of header files ..]' % sys.argv[0])
|
387 |
-
exit(-1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/remove.h
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a fill of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// the purpose of this header is to #include the remove.h header
|
22 |
-
// of the sequential, host, and device systems. It should be #included in any
|
23 |
-
// code which uses adl to dispatch remove
|
24 |
-
|
25 |
-
#include <thrust/system/detail/sequential/remove.h>
|
26 |
-
|
27 |
-
// SCons can't see through the #defines below to figure out what this header
|
28 |
-
// includes, so we fake it out by specifying all possible files we might end up
|
29 |
-
// including inside an #if 0.
|
30 |
-
#if 0
|
31 |
-
#include <thrust/system/cpp/detail/remove.h>
|
32 |
-
#include <thrust/system/cuda/detail/remove.h>
|
33 |
-
#include <thrust/system/omp/detail/remove.h>
|
34 |
-
#include <thrust/system/tbb/detail/remove.h>
|
35 |
-
#endif
|
36 |
-
|
37 |
-
#define __THRUST_HOST_SYSTEM_REMOVE_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/remove.h>
|
38 |
-
#include __THRUST_HOST_SYSTEM_REMOVE_HEADER
|
39 |
-
#undef __THRUST_HOST_SYSTEM_REMOVE_HEADER
|
40 |
-
|
41 |
-
#define __THRUST_DEVICE_SYSTEM_REMOVE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/remove.h>
|
42 |
-
#include __THRUST_DEVICE_SYSTEM_REMOVE_HEADER
|
43 |
-
#undef __THRUST_DEVICE_SYSTEM_REMOVE_HEADER
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/plugins/system/status.js
DELETED
@@ -1,124 +0,0 @@
|
|
1 |
-
import cfg from '../../lib/config/config.js'
|
2 |
-
import moment from 'moment'
|
3 |
-
|
4 |
-
export class status extends plugin {
|
5 |
-
constructor() {
|
6 |
-
super({
|
7 |
-
name: '其他功能',
|
8 |
-
dsc: '#状态',
|
9 |
-
event: 'message',
|
10 |
-
rule: [
|
11 |
-
{
|
12 |
-
reg: '^#状态$',
|
13 |
-
fnc: 'status'
|
14 |
-
}
|
15 |
-
]
|
16 |
-
})
|
17 |
-
}
|
18 |
-
|
19 |
-
async status() {
|
20 |
-
if (this.e.isMaster) return this.statusMaster()
|
21 |
-
if (!this.e.isGroup) return this.reply('请群聊查看')
|
22 |
-
return this.statusGroup()
|
23 |
-
}
|
24 |
-
|
25 |
-
async statusMaster() {
|
26 |
-
let runTime = moment().diff(moment.unix(this.e.bot.stat.start_time), 'seconds')
|
27 |
-
let Day = Math.floor(runTime / 3600 / 24)
|
28 |
-
let Hour = Math.floor((runTime / 3600) % 24)
|
29 |
-
let Min = Math.floor((runTime / 60) % 60)
|
30 |
-
if (Day > 0) {
|
31 |
-
runTime = `${Day}天${Hour}小时${Min}分钟`
|
32 |
-
} else {
|
33 |
-
runTime = `${Hour}小时${Min}分钟`
|
34 |
-
}
|
35 |
-
|
36 |
-
let format = (bytes) => {
|
37 |
-
return (bytes / 1024 / 1024).toFixed(2) + 'MB'
|
38 |
-
}
|
39 |
-
|
40 |
-
let msg = '-------状态-------'
|
41 |
-
msg += `\n运行时间:${runTime}`
|
42 |
-
msg += `\n内存使用:${format(process.memoryUsage().rss)}`
|
43 |
-
msg += `\n当前版本:v${cfg.package.version}`
|
44 |
-
msg += '\n-------累计-------'
|
45 |
-
msg += await this.getCount()
|
46 |
-
|
47 |
-
await this.reply(msg)
|
48 |
-
}
|
49 |
-
|
50 |
-
async statusGroup() {
|
51 |
-
let msg = '-------状态-------'
|
52 |
-
msg += await this.getCount(this.e.group_id)
|
53 |
-
|
54 |
-
await this.reply(msg)
|
55 |
-
}
|
56 |
-
|
57 |
-
async getCount(groupId = '') {
|
58 |
-
this.date = moment().format('MMDD')
|
59 |
-
this.month = Number(moment().month()) + 1
|
60 |
-
|
61 |
-
this.key = 'Yz:count:'
|
62 |
-
|
63 |
-
if (groupId) this.key += `group:${groupId}:`
|
64 |
-
|
65 |
-
this.msgKey = {
|
66 |
-
day: `${this.key}sendMsg:day:`,
|
67 |
-
month: `${this.key}sendMsg:month:`
|
68 |
-
}
|
69 |
-
|
70 |
-
this.screenshotKey = {
|
71 |
-
day: `${this.key}screenshot:day:`,
|
72 |
-
month: `${this.key}screenshot:month:`
|
73 |
-
}
|
74 |
-
|
75 |
-
let week = {
|
76 |
-
msg: 0,
|
77 |
-
screenshot: 0
|
78 |
-
}
|
79 |
-
for (let i = 0; i <= 6; i++) {
|
80 |
-
let date = moment().startOf('week').add(i, 'days').format('MMDD')
|
81 |
-
|
82 |
-
week.msg += Number(await redis.get(`${this.msgKey.day}${date}`)) ?? 0
|
83 |
-
week.screenshot += Number(await redis.get(`${this.screenshotKey.day}${date}`)) ?? 0
|
84 |
-
}
|
85 |
-
|
86 |
-
let count = {
|
87 |
-
total: {
|
88 |
-
msg: await redis.get(`${this.key}sendMsg:total`) || 0,
|
89 |
-
screenshot: await redis.get(`${this.key}screenshot:total`) || 0
|
90 |
-
},
|
91 |
-
today: {
|
92 |
-
msg: await redis.get(`${this.msgKey.day}${this.date}`) || 0,
|
93 |
-
screenshot: await redis.get(`${this.screenshotKey.day}${this.date}`) || 0
|
94 |
-
},
|
95 |
-
week,
|
96 |
-
month: {
|
97 |
-
msg: await redis.get(`${this.msgKey.month}${this.month}`) || 0,
|
98 |
-
screenshot: await redis.get(`${this.screenshotKey.month}${this.month}`) || 0
|
99 |
-
}
|
100 |
-
}
|
101 |
-
|
102 |
-
let msg = ''
|
103 |
-
if (groupId) {
|
104 |
-
msg = `\n发送消息:${count.today.msg}条`
|
105 |
-
msg += `\n生成图片:${count.today.screenshot}次`
|
106 |
-
} else {
|
107 |
-
msg = `\n发送消息:${count.total.msg}条`
|
108 |
-
msg += `\n生成图片:${count.total.screenshot}次`
|
109 |
-
}
|
110 |
-
|
111 |
-
if (count.month.msg > 200) {
|
112 |
-
msg += '\n-------本周-------'
|
113 |
-
msg += `\n发送消息:${count.week.msg}条`
|
114 |
-
msg += `\n生成图片:${count.week.screenshot}次`
|
115 |
-
}
|
116 |
-
if (moment().format('D') >= 8 && count.month.msg > 400) {
|
117 |
-
msg += '\n-------本月-------'
|
118 |
-
msg += `\n发送消息:${count.month.msg}条`
|
119 |
-
msg += `\n生成图片:${count.month.screenshot}次`
|
120 |
-
}
|
121 |
-
|
122 |
-
return msg
|
123 |
-
}
|
124 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/iou_loss.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
|
4 |
-
|
5 |
-
class IOULoss(nn.Module):
|
6 |
-
def forward(self, pred, target, weight=None):
|
7 |
-
pred_left = pred[:, 0]
|
8 |
-
pred_top = pred[:, 1]
|
9 |
-
pred_right = pred[:, 2]
|
10 |
-
pred_bottom = pred[:, 3]
|
11 |
-
|
12 |
-
target_left = target[:, 0]
|
13 |
-
target_top = target[:, 1]
|
14 |
-
target_right = target[:, 2]
|
15 |
-
target_bottom = target[:, 3]
|
16 |
-
|
17 |
-
target_aera = (target_left + target_right) * \
|
18 |
-
(target_top + target_bottom)
|
19 |
-
pred_aera = (pred_left + pred_right) * \
|
20 |
-
(pred_top + pred_bottom)
|
21 |
-
|
22 |
-
w_intersect = torch.min(pred_left, target_left) + \
|
23 |
-
torch.min(pred_right, target_right)
|
24 |
-
h_intersect = torch.min(pred_bottom, target_bottom) + \
|
25 |
-
torch.min(pred_top, target_top)
|
26 |
-
|
27 |
-
area_intersect = w_intersect * h_intersect
|
28 |
-
area_union = target_aera + pred_aera - area_intersect
|
29 |
-
|
30 |
-
losses = -torch.log((area_intersect + 1.0) / (area_union + 1.0))
|
31 |
-
|
32 |
-
if weight is not None and weight.sum() > 0:
|
33 |
-
return (losses * weight).sum() / weight.sum()
|
34 |
-
else:
|
35 |
-
assert losses.numel() != 0
|
36 |
-
return losses.mean()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/my_abi/modules/attention.py
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
from .transformer import PositionalEncoding
|
4 |
-
|
5 |
-
class Attention(nn.Module):
|
6 |
-
def __init__(self, in_channels=512, max_length=25, n_feature=256):
|
7 |
-
super().__init__()
|
8 |
-
self.max_length = max_length
|
9 |
-
|
10 |
-
self.f0_embedding = nn.Embedding(max_length, in_channels)
|
11 |
-
self.w0 = nn.Linear(max_length, n_feature)
|
12 |
-
self.wv = nn.Linear(in_channels, in_channels)
|
13 |
-
self.we = nn.Linear(in_channels, max_length)
|
14 |
-
|
15 |
-
self.active = nn.Tanh()
|
16 |
-
self.softmax = nn.Softmax(dim=2)
|
17 |
-
|
18 |
-
def forward(self, enc_output):
|
19 |
-
enc_output = enc_output.permute(0, 2, 3, 1).flatten(1, 2)
|
20 |
-
reading_order = torch.arange(self.max_length, dtype=torch.long, device=enc_output.device)
|
21 |
-
reading_order = reading_order.unsqueeze(0).expand(enc_output.size(0), -1) # (S,) -> (B, S)
|
22 |
-
reading_order_embed = self.f0_embedding(reading_order) # b,25,512
|
23 |
-
|
24 |
-
t = self.w0(reading_order_embed.permute(0, 2, 1)) # b,512,256
|
25 |
-
t = self.active(t.permute(0, 2, 1) + self.wv(enc_output)) # b,256,512
|
26 |
-
|
27 |
-
attn = self.we(t) # b,256,25
|
28 |
-
attn = self.softmax(attn.permute(0, 2, 1)) # b,25,256
|
29 |
-
g_output = torch.bmm(attn, enc_output) # b,25,512
|
30 |
-
return g_output, attn.view(*attn.shape[:2], 8, 32)
|
31 |
-
|
32 |
-
|
33 |
-
def encoder_layer(in_c, out_c, k=3, s=2, p=1):
|
34 |
-
return nn.Sequential(nn.Conv2d(in_c, out_c, k, s, p),
|
35 |
-
nn.BatchNorm2d(out_c),
|
36 |
-
nn.ReLU(True))
|
37 |
-
|
38 |
-
def decoder_layer(in_c, out_c, k=3, s=1, p=1, mode='nearest', scale_factor=None, size=None):
|
39 |
-
align_corners = None if mode=='nearest' else True
|
40 |
-
return nn.Sequential(nn.Upsample(size=size, scale_factor=scale_factor,
|
41 |
-
mode=mode, align_corners=align_corners),
|
42 |
-
nn.Conv2d(in_c, out_c, k, s, p),
|
43 |
-
nn.BatchNorm2d(out_c),
|
44 |
-
nn.ReLU(True))
|
45 |
-
|
46 |
-
|
47 |
-
class PositionAttention(nn.Module):
|
48 |
-
def __init__(self, max_length, in_channels=512, num_channels=64,
|
49 |
-
h=8, w=32, mode='nearest', **kwargs):
|
50 |
-
super().__init__()
|
51 |
-
self.max_length = max_length
|
52 |
-
self.k_encoder = nn.Sequential(
|
53 |
-
encoder_layer(in_channels, num_channels, s=(1, 2)),
|
54 |
-
encoder_layer(num_channels, num_channels, s=(2, 2)),
|
55 |
-
encoder_layer(num_channels, num_channels, s=(2, 2)),
|
56 |
-
encoder_layer(num_channels, num_channels, s=(2, 2))
|
57 |
-
)
|
58 |
-
self.k_decoder = nn.Sequential(
|
59 |
-
decoder_layer(num_channels, num_channels, scale_factor=2, mode=mode),
|
60 |
-
decoder_layer(num_channels, num_channels, scale_factor=2, mode=mode),
|
61 |
-
decoder_layer(num_channels, num_channels, scale_factor=2, mode=mode),
|
62 |
-
decoder_layer(num_channels, in_channels, size=(h, w), mode=mode)
|
63 |
-
)
|
64 |
-
|
65 |
-
self.pos_encoder = PositionalEncoding(in_channels, dropout=0, max_len=max_length)
|
66 |
-
self.project = nn.Linear(in_channels, in_channels)
|
67 |
-
|
68 |
-
def forward(self, x):
|
69 |
-
N, E, H, W = x.size()
|
70 |
-
k, v = x, x # (N, E, H, W)
|
71 |
-
|
72 |
-
# calculate key vector
|
73 |
-
features = []
|
74 |
-
for i in range(0, len(self.k_encoder)):
|
75 |
-
k = self.k_encoder[i](k)
|
76 |
-
features.append(k)
|
77 |
-
for i in range(0, len(self.k_decoder) - 1):
|
78 |
-
k = self.k_decoder[i](k)
|
79 |
-
k = k + features[len(self.k_decoder) - 2 - i]
|
80 |
-
k = self.k_decoder[-1](k)
|
81 |
-
|
82 |
-
# calculate query vector
|
83 |
-
# TODO q=f(q,k)
|
84 |
-
zeros = x.new_zeros((self.max_length, N, E)) # (T, N, E)
|
85 |
-
q = self.pos_encoder(zeros) # (T, N, E)
|
86 |
-
q = q.permute(1, 0, 2) # (N, T, E)
|
87 |
-
q = self.project(q) # (N, T, E)
|
88 |
-
|
89 |
-
# calculate attention
|
90 |
-
attn_scores = torch.bmm(q, k.flatten(2, 3)) # (N, T, (H*W))
|
91 |
-
attn_scores = attn_scores / (E ** 0.5)
|
92 |
-
attn_scores = torch.softmax(attn_scores, dim=-1)
|
93 |
-
|
94 |
-
v = v.permute(0, 2, 3, 1).view(N, -1, E) # (N, (H*W), E)
|
95 |
-
attn_vecs = torch.bmm(attn_scores, v) # (N, T, E)
|
96 |
-
|
97 |
-
return attn_vecs, attn_scores.view(N, -1, H, W)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DHEIVER/Pedrita/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Pedrita
|
3 |
-
emoji: 🐠
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.35.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|