Commit
·
03921cb
1
Parent(s):
a91c254
Update parquet files (step 29 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download DIGSI 5 Today and Discover the Benefits of the Versatile Engineering Tool for SIPROTEC 5 Devices.md +0 -28
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download WinRAR 64 Bit Free Crack and Unleash the Power of RAR.md +0 -25
- spaces/1gistliPinn/ChatGPT4/Examples/Dispensary Management Software Free Download [PORTABLE].md +0 -6
- spaces/1phancelerku/anime-remove-background/Download Real Car Parking 3D and Become a Parking Master.md +0 -124
- spaces/1toTree/lora_test/ppdiffusers/commands/env.py +0 -67
- spaces/232labs/VToonify/vtoonify/model/stylegan/prepare_data.py +0 -105
- spaces/52Hz/CMFNet_deraindrop/main_test_CMFNet.py +0 -98
- spaces/7hao/bingo/src/lib/hooks/use-enter-submit.tsx +0 -23
- spaces/AIConsultant/MusicGen/audiocraft/losses/balancer.py +0 -136
- spaces/AIConsultant/MusicGen/audiocraft/train.py +0 -157
- spaces/AIZero2Hero4Health/5-QuantumStreamlitAIDashboard-SL/app.py +0 -57
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/work_dirs/yolov6_s_df2_0.4/__init__.py +0 -0
- spaces/Abdullah-Habib/Text_to_Speech_Urdu/app.py +0 -127
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/DfeHub.py +0 -77
- spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/midas/midas/dpt_depth.py +0 -109
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/swipe/Swipe.d.ts +0 -2
- spaces/AlanMars/QYL-AI-Space/assets/custom.js +0 -607
- spaces/AlexWang/lama/saicinpainting/evaluation/masks/countless/test.py +0 -195
- spaces/Aloento/9Nine-PITS/text/symbols.py +0 -14
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/mulit_token_textual_inversion/multi_token_clip.py +0 -103
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/shap_e/camera.py +0 -147
- spaces/Andy1621/uniformer_image_detection/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py +0 -18
- spaces/Andy1621/uniformer_image_detection/mmdet/models/necks/fpn_carafe.py +0 -267
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/openai/images.py +0 -68
- spaces/Anonymous-sub/Rerender/ControlNet/ldm/models/diffusion/__init__.py +0 -0
- spaces/Anonymous-sub/Rerender/README.md +0 -12
- spaces/Asifpa6/emotion-analyzer-app/emotion_analysis.py +0 -17
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/wheel.py +0 -136
- spaces/Ayaka2022/anime-aesthetic-predict/README.md +0 -14
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/lexers/__init__.py +0 -334
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/diagram/__init__.py +0 -642
- spaces/Blessin/yes-and-improv-game/app.py +0 -50
- spaces/CM-15/NLP-demo/README.md +0 -12
- spaces/CVPR/CVPR2022_papers/style.css +0 -22
- spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/models/mmnasnet/adapter.py +0 -120
- spaces/CVPR/LIVE/pybind11/tests/test_modules.py +0 -73
- spaces/CVPR/WALT/mmdet/datasets/samplers/distributed_sampler.py +0 -39
- spaces/Candyraider/Proxy4/README.md +0 -10
- spaces/Chris4K/llms_compare/Antares Mic Mod Efx Mac ~UPD~ Crack Torrent.md +0 -84
- spaces/ChrisPreston/diff-svc_minato_aqua/modules/commons/ssim.py +0 -84
- spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/structures/segmentation_mask.py +0 -535
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/background.py +0 -1
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/T_S_I_C_.py +0 -5
- spaces/DaleChen/AutoGPT/autogpt/__main__.py +0 -5
- spaces/DanteOz/Minimal-Endpoint/app.py +0 -14
- spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/loss/boundary_loss.py +0 -51
- spaces/DemoLou/moe-tts/text/shanghainese.py +0 -64
- spaces/DiamondYin/AnewGame/index.html +0 -122
- spaces/DragGan/DragGan-Inversion/stylegan_human/training_scripts/sg3/training/networks_stylegan2.py +0 -1007
- spaces/DragGan/DragGan/stylegan_human/torch_utils/op_edit/fused_act.py +0 -99
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download DIGSI 5 Today and Discover the Benefits of the Versatile Engineering Tool for SIPROTEC 5 Devices.md
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download and Install DIGSI 5 - The Engineering Software for SIPROTEC 5 Protection Relays</h1>
|
3 |
-
<p>DIGSI 5 is a versatile engineering tool for parameterizing, commissioning and operating all SIPROTEC 5 protection devices. It has an innovative user interface that includes context-sensitive user instructions and a simple connection to the device via USB. In this article, we will show you how to download and install DIGSI 5 on your computer.</p>
|
4 |
-
<h2>digsi 5 download</h2><br /><p><b><b>Download Zip</b> ⏩ <a href="https://byltly.com/2uKuUI">https://byltly.com/2uKuUI</a></b></p><br /><br />
|
5 |
-
<h2>Step 1: Download DIGSI 5</h2>
|
6 |
-
<p>You can download the latest version of DIGSI 5 from the Siemens website . There are three options available:</p>
|
7 |
-
<ul>
|
8 |
-
<li>Trial version: This is a free version of DIGSI 5 Premium that can be used for 30 days without functional restrictions. It includes the latest versions of IEC 61850 System Configurator and SIGRA.</li>
|
9 |
-
<li>Update version: This is a software update for existing users of DIGSI 5 who want to upgrade to the latest version. It requires a valid license key.</li>
|
10 |
-
<li>Full version: This is a complete installation package for new users of DIGSI 5 who want to install it on a clean system. It also includes a trial version of DIGSI 5 Premium, usable for 30 days without functional restrictions.</li>
|
11 |
-
</ul>
|
12 |
-
<p>To download DIGSI 5, you need to register or log in with your Siemens account and accept the terms of use. You can also find the product information, manuals, readme files and hotfixes for DIGSI 5 on the same page.</p>
|
13 |
-
<h2>Step 2: Install DIGSI 5</h2>
|
14 |
-
<p>After downloading the DIGSI 5 package, you need to unzip it and run the setup.exe file. Follow the instructions on the screen to complete the installation process. You may need to restart your computer after the installation.</p>
|
15 |
-
<p>If you are installing DIGSI 5 for the first time, you will need to activate it with a license key. You can request a license key from Siemens or use the trial version for 30 days. If you are updating from an earlier version of DIGSI 5, you can use your existing license key.</p>
|
16 |
-
<h2>Step 3: Connect and Configure SIPROTEC 5 Devices</h2>
|
17 |
-
<p>Once you have installed and activated DIGSI 5, you can connect your SIPROTEC 5 devices to your computer via USB or Ethernet. You can use DIGSI 5 to parameterize, commission and operate your devices easily and efficiently. You can also use the IEC 61850 System Configurator and SIGRA tools to configure and analyze communication networks and data.</p>
|
18 |
-
<p>For more information on how to use DIGSI 5, please refer to the manuals and online help available in the software.</p>
|
19 |
-
<p></p><h2>Step 4: Test and Troubleshoot SIPROTEC 5 Devices</h2>
|
20 |
-
<p>After configuring your SIPROTEC 5 devices, you can test and troubleshoot them using DIGSI 5. You can use the following features to ensure the proper functioning of your devices:</p>
|
21 |
-
<ul>
|
22 |
-
<li>Device test: This feature allows you to perform various tests on your devices, such as output test, input test, LED test, display test and communication test. You can also run predefined test cases or create your own test cases.</li>
|
23 |
-
<li>Device diagnosis: This feature allows you to monitor the status and performance of your devices, such as voltage, current, power, frequency, temperature and memory usage. You can also view the event logs, fault records and disturbance records of your devices.</li>
|
24 |
-
<li>Device simulation: This feature allows you to simulate the behavior of your devices in different scenarios, such as normal operation, fault occurrence and protection tripping. You can also use the SIPROTEC Digital Twin plugin to connect your devices to a virtual power system model and simulate realistic scenarios.</li>
|
25 |
-
</ul>
|
26 |
-
<p>For more information on how to test and troubleshoot SIPROTEC 5 devices, please refer to the manuals and online help available in the software.</p> ddb901b051<br />
|
27 |
-
<br />
|
28 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download WinRAR 64 Bit Free Crack and Unleash the Power of RAR.md
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download WinRAR 64 Bit Free Crack and Use It on Your PC</h1>
|
3 |
-
<p>WinRAR is a popular and powerful file compression and archiving software. It can create and extract RAR, ZIP, and other archive formats. It can also split large files into smaller volumes, encrypt and password-protect archives, repair damaged files, and more. WinRAR is widely used by millions of users around the world for various purposes.</p>
|
4 |
-
<h2>winrar 64 bit free download with crack</h2><br /><p><b><b>Download File</b> ••• <a href="https://byltly.com/2uKzCf">https://byltly.com/2uKzCf</a></b></p><br /><br />
|
5 |
-
<p>However, WinRAR is not a free software. You need to buy a license to use it legally on your PC. The official price of WinRAR is $29 for a single-user license or $21 per user for a multi-user license. These prices can be too high for some users who just want to use WinRAR occasionally or for personal purposes.</p>
|
6 |
-
<p>That's why some people look for ways to download WinRAR 64 bit free crack and use it without paying anything. A crack is a software tool that modifies or bypasses the original code of a program to make it work without a license or activation. By using a crack, you can get access to the full features of WinRAR without paying anything.</p>
|
7 |
-
<p>But is it safe and legal to download WinRAR 64 bit free crack? How can you do it and what are the risks involved? In this article, we will answer these questions and show you how to download WinRAR 64 bit free crack and use it on your PC.</p>
|
8 |
-
<h2>Is It Safe and Legal to Download WinRAR 64 Bit Free Crack?</h2>
|
9 |
-
<p>The short answer is no. Downloading WinRAR 64 bit free crack is neither safe nor legal. Here are some reasons why:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Downloading WinRAR 64 bit free crack is illegal because it violates the terms and conditions of WinRAR. You are not allowed to use WinRAR without a valid license. If you do so, you are committing software piracy, which is a crime that can result in fines or even jail time.</li>
|
12 |
-
<li>Downloading WinRAR 64 bit free crack is unsafe because it can expose your PC to malware and viruses. Cracks are often distributed by hackers or malicious websites that can infect your PC with malware and viruses. These can damage your files, steal your personal information, or even take control of your PC.</li>
|
13 |
-
<li>Downloading WinRAR 64 bit free crack is unreliable because it can cause errors and glitches. Cracks are not official updates or patches from WinRAR. They are often outdated or incompatible with the latest versions of WinRAR or Windows. This can cause errors and glitches that can affect the performance and functionality of WinRAR.</li>
|
14 |
-
<li>Downloading WinRAR 64 bit free crack is unethical because it harms the developers and creators of WinRAR. WinRAR invests a lot of time, money, and resources to develop and improve WinRAR. By using a crack, you are depriving them of their rightful income and recognition.</li>
|
15 |
-
</ul>
|
16 |
-
<p>Therefore, we do not recommend downloading WinRAR 64 bit free crack and using it on your PC. It is not worth the risk and hassle. Instead, we suggest using one of the legal and safe alternatives that we will discuss in the next section.</p>
|
17 |
-
<p></p>
|
18 |
-
<h2>How to Download WinRAR 64 Bit Free Crack and Use It on Your PC</h2>
|
19 |
-
<p>If you still want to download WinRAR 64 bit free crack and use it on your PC, despite the risks and consequences involved, here are the steps you need to follow:</p>
|
20 |
-
<ol>
|
21 |
-
<li>Go to a website that offers cracks for WinRAR or other software. There are many websites that claim to offer cracks for WinRAR or other software, but most of them are fake or malicious. You need to be careful and do some research before downloading anything from these websites. Some examples of websites that offer cracks for WinRAR are Yasir252 , Techworm , WizCase , etc.</li>
|
22 |
-
<li>Select the version of WinRAR that you want to download. Depending on the website you choose, you may find different versions of WinRAR available for download. For example, you may find WinRAR 6.21, 6.11, 6.02, etc., for Windows 11, 10, 8.1, 8, 7, etc., in both 32 bit and 64 bit versions. Choose the version that suits your needs and preferences.</li>
|
23 |
-
<li>Download</p> ddb901b051<br />
|
24 |
-
<br />
|
25 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Dispensary Management Software Free Download [PORTABLE].md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>dispensary management software free download</h2><br /><p><b><b>Download Zip</b> >>> <a href="https://imgfil.com/2uy1Bz">https://imgfil.com/2uy1Bz</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
If an application is rejected for failure to provide required information, ... Our technology serves as an elegant retail marijuana POS with powerful dispensary management tools. We can ... Download the report template from the OMMA website. 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Real Car Parking 3D and Become a Parking Master.md
DELETED
@@ -1,124 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Real Car Parking and Driving Simulator 3D Download: A Review</h1>
|
3 |
-
<p>Do you love driving cars and parking them in realistic scenarios? Do you want to test your skills and have fun at the same time? If you answered yes, then you should download real car parking and driving simulator 3d, one of the best car simulation games available on the market. In this article, we will review the features, benefits, and tips of this amazing game, and show you how to download it on your device. Let's get started!</p>
|
4 |
-
<h2>Features of Real Car Parking and Driving Simulator 3D</h2>
|
5 |
-
<p>Real car parking and driving simulator 3d is a game that offers you a realistic and immersive driving experience. Here are some of the features that make this game stand out:</p>
|
6 |
-
<h2>real car parking and driving simulator 3d download</h2><br /><p><b><b>Download File</b> ✶✶✶ <a href="https://jinyurl.com/2uNMPa">https://jinyurl.com/2uNMPa</a></b></p><br /><br />
|
7 |
-
<ul>
|
8 |
-
<li><b>Realistic cars and physics:</b> You can choose from a variety of cars, from sports cars to SUVs, each with its own characteristics and performance. The game also uses realistic physics to simulate the car's movement, weight, speed, and braking.</li>
|
9 |
-
<li><b>Challenging parking courses and levels:</b> You can test your parking skills in different environments, such as city streets, parking lots, airports, docks, factories, and more. Each level has its own objectives, obstacles, and time limits. You can earn stars based on your performance and unlock new levels.</li>
|
10 |
-
<li><b>Amazing graphics and sound effects:</b> The game has stunning 3D graphics that create a lifelike atmosphere. You can also enjoy the realistic sound effects of the engine, horn, brakes, tires, and collisions.</li>
|
11 |
-
<li><b>Customizable controls and camera angles:</b> You can adjust the controls according to your preference, whether you want to use buttons, steering wheel, or tilt. You can also switch between different camera views, such as top-down, rear-view, or cockpit.</li>
|
12 |
-
<li><b>Offline and online modes:</b> You can play the game offline without an internet connection, or you can play online with other players from around the world. You can compete in multiplayer races, join clans, chat with friends, and rank on the leaderboard.</li>
|
13 |
-
</ul>
|
14 |
-
<h2>How to Download Real Car Parking and Driving Simulator 3D</h2>
|
15 |
-
<p>The game is available for free on various platforms. Here is how to download it on your device:</p>
|
16 |
-
<h3>For Android devices</h3>
|
17 |
-
<p>You can download the game from the Google Play Store by following these steps:</p>
|
18 |
-
<ol>
|
19 |
-
<li>Open the Google Play Store app on your device.</li>
|
20 |
-
<li>Search for "real car parking 3d" or click [here](^1^).</li>
|
21 |
-
<li>Select the game from the list of results.</li>
|
22 |
-
<li>Tap on "Install" and wait for the download to finish.</li>
|
23 |
-
<li>Tap on "Open" to launch the game.</li>
|
24 |
-
</ol>
|
25 |
-
<h3>For iOS devices</h3>
|
26 |
-
<p>You can download the game from the App Store by following these steps:</p>
|
27 |
-
<ol>
|
28 |
-
<li>Open the App Store app on your device.</li>
|
29 |
-
<li>Search for "real car parking 3d" or click [here](^2^).</li>
|
30 |
-
<li>Select the game from the list of results.</li>
|
31 |
-
<li>Tap on "Get" and enter your Apple ID password if prompted.</li>
|
32 |
-
<li>Wait for the download to finish.</li>
|
33 |
-
<li>Tap on the game icon to launch the game.</li>
|
34 |
-
</ol>
|
35 |
-
<h3>For Windows devices</h3>
|
36 |
-
<p>You can download the game from the Microsoft Store by following these steps:</p>
|
37 |
-
<ol>
|
38 |
-
<li>Open the Microsoft Store app on your device.</li>
|
39 |
-
<li>Search for "real car parking 3d" or click [here].</li>
|
40 |
-
<li>Select the game from the list of results.</li>
|
41 |
-
<li>Click on "Get" and sign in with your Microsoft account if prompted.</li>
|
42 |
-
<li>Wait for the download to finish.</li>
|
43 |
-
<li>Click on "Play" to launch the game.</li>
|
44 |
-
</ol>
|
45 |
-
<h2>Tips and Tricks for Playing Real Car Parking and Driving Simulator 3D</h2>
|
46 |
-
<p>Now that you have downloaded the game, you might be wondering how to play it and improve your skills. Here are some tips and tricks that will help you master the game:</p>
|
47 |
-
<h3>Practice your parking skills in free mode</h3>
|
48 |
-
<p>The game has a free mode where you can drive around without any time limit or objectives. This is a great way to get familiar with the controls, the car's behavior, and the environment. You can also practice parking in different spots and angles, and learn from your mistakes.</p>
|
49 |
-
<h3>Use the brake and steering buttons wisely</h3>
|
50 |
-
<p>The game has two buttons for braking and steering, which are located on the bottom left and right corners of the screen. You can use them to control the speed and direction of your car. However, you should not overuse them or press them too hard, as this might cause your car to skid, spin, or crash. You should also release them when you are not using them, as this will save your fuel and prevent overheating.</p>
|
51 |
-
<h3>Follow the arrows and avoid obstacles</h3>
|
52 |
-
<p>The game has arrows that guide you to your parking spot. You should follow them carefully and pay attention to the distance indicator, which shows how far you are from your destination. You should also avoid hitting any obstacles, such as cones, barriers, walls, or other cars, as this will damage your car and reduce your score. You can use the mini-map on the top right corner of the screen to see your surroundings and plan your route.</p>
|
53 |
-
<p>real car parking 3d simulator app<br />
|
54 |
-
real car parking and driving simulator 3d game<br />
|
55 |
-
real car parking and racing simulator 3d<br />
|
56 |
-
real car parking and drifting simulator 3d<br />
|
57 |
-
real car parking and driving school simulator 3d<br />
|
58 |
-
real car parking and driving test simulator 3d<br />
|
59 |
-
real car parking and driving challenge simulator 3d<br />
|
60 |
-
real car parking and driving skills simulator 3d<br />
|
61 |
-
real car parking and driving adventure simulator 3d<br />
|
62 |
-
real car parking and driving extreme simulator 3d<br />
|
63 |
-
real car parking and driving city simulator 3d<br />
|
64 |
-
real car parking and driving offroad simulator 3d<br />
|
65 |
-
real car parking and driving highway simulator 3d<br />
|
66 |
-
real car parking and driving airport simulator 3d<br />
|
67 |
-
real car parking and driving police simulator 3d<br />
|
68 |
-
real car parking and driving taxi simulator 3d<br />
|
69 |
-
real car parking and driving truck simulator 3d<br />
|
70 |
-
real car parking and driving bus simulator 3d<br />
|
71 |
-
real car parking and driving suv simulator 3d<br />
|
72 |
-
real car parking and driving sports car simulator 3d<br />
|
73 |
-
real car parking and driving classic car simulator 3d<br />
|
74 |
-
real car parking and driving luxury car simulator 3d<br />
|
75 |
-
real car parking and driving muscle car simulator 3d<br />
|
76 |
-
real car parking and driving supercar simulator 3d<br />
|
77 |
-
real car parking and driving hypercar simulator 3d<br />
|
78 |
-
real car parking and driving electric car simulator 3d<br />
|
79 |
-
real car parking and driving hybrid car simulator 3d<br />
|
80 |
-
real car parking and driving smart car simulator 3d<br />
|
81 |
-
real car parking and driving mini car simulator 3d<br />
|
82 |
-
real car parking and driving monster truck simulator 3d<br />
|
83 |
-
real car parking and driving tractor simulator 3d<br />
|
84 |
-
real car parking and driving loader simulator 3d<br />
|
85 |
-
real car parking and driving forklift simulator 3d<br />
|
86 |
-
real car parking and driving crane simulator 3d<br />
|
87 |
-
real car parking and driving tow truck simulator 3d<br />
|
88 |
-
real car parking and driving fire truck simulator 3d<br />
|
89 |
-
real car parking and driving ambulance simulator 3d<br />
|
90 |
-
real car parking and driving limo simulator 3d<br />
|
91 |
-
real car parking and driving jeep simulator 3d<br />
|
92 |
-
real car parking and driving pickup truck simulator 3d<br />
|
93 |
-
download free real car parking and driving simulator 3d for android <br />
|
94 |
-
download free real car parking and driving simulator 3d for ios <br />
|
95 |
-
download free real car parking and driving simulator 3d for pc <br />
|
96 |
-
download free real car parking and driving simulator 3d for windows <br />
|
97 |
-
download free real car parking and driving simulator 3d for mac <br />
|
98 |
-
download free real car parking and driving simulator 3d for linux <br />
|
99 |
-
download free real car parking and driving simulator 3d apk <br />
|
100 |
-
download free real car parking and driving simulator 3d mod apk <br />
|
101 |
-
download free real car parking and driving simulator 3d hack apk <br />
|
102 |
-
download free real car parking and driving simulator 3d unlimited money apk</p>
|
103 |
-
<h3>Collect coins and gems to unlock new cars</h3>
|
104 |
-
<p>The game has coins and gems that you can collect by driving around or completing levels. You can use them to buy new cars or upgrade your existing ones. Each car has its own stats, such as speed, acceleration, handling, and braking. You should try different cars and find the one that suits your style and preference.</p>
|
105 |
-
<h3>Try different camera views to find the best angle</h3>
|
106 |
-
<p>The game has four camera views that you can switch between by tapping on the camera icon on the top left corner of the screen. They are: top-down, rear-view, cockpit, and side-view. Each view has its own advantages and disadvantages, depending on the situation and your preference. You should experiment with different views and find the one that gives you the best visibility and comfort.</p>
|
107 |
-
<h2>Conclusion</h2>
|
108 |
-
<p>Real car parking and driving simulator 3d is a game that will challenge your driving and parking skills in a realistic and fun way. It has many features that make it stand out from other car simulation games, such as realistic cars and physics, challenging parking courses and levels, amazing graphics and sound effects, customizable controls and camera angles, offline and online modes, and more. You can download it for free on various platforms, such as Android, iOS, and Windows devices. If you are looking for a game that will keep you entertained for hours, then you should definitely give real car parking and driving simulator 3d a try!</p>
|
109 |
-
<p>If you liked this article, please share it with your friends and leave a comment below. We would love to hear your feedback and suggestions. Also, if you have any questions about the game or need more tips and tricks, feel free to ask us. We will be happy to help you!</p>
|
110 |
-
<h2>Frequently Asked Questions</h2>
|
111 |
-
<ul>
|
112 |
-
<li><b>Q: How do I change the language of the game?</b></li>
|
113 |
-
<li>A: You can change the language of the game by tapping on the settings icon on the main menu screen. Then, select "Language" from the list of options. You can choose from English, Spanish, French, German, Italian, Portuguese, Russian, Turkish, Arabic, Chinese, Japanese, Korean, Hindi, Indonesian, or Vietnamese.</li>
|
114 |
-
<li><b>Q: How do I save my progress in the game?</b></li>
|
115 |
-
<li>A: The game automatically saves your progress every time you complete a level or exit the game. You can also manually save your progress by tapping on the settings icon on the main menu screen. Then, select "Save Game" from the list of options. You can also load your saved game by selecting "Load Game" from the same menu.</li>
|
116 |
-
<li><b>Q: How do I reset my progress in the game?</b></li>
|
117 |
-
<li>A: You can reset your progress in the game by tapping on the settings icon on the main menu screen. Then, select "Reset Game" from the list of options. This will erase all your data and start the game from scratch. Be careful, as this action cannot be undone.</li>
|
118 |
-
<li><b>Q: How do I contact the developers of the game?</b></li>
|
119 |
-
<li>A: You can contact the developers of the game by tapping on the settings icon on the main menu screen. Then, select "Contact Us" from the list of options. You can send them an email with your feedback, suggestions, or issues. You can also follow them on their social media accounts, such as Facebook, Twitter, Instagram, or YouTube.</li>
|
120 |
-
<li><b>Q: How do I rate and review the game?</b></li>
|
121 |
-
<li>A: You can rate and review the game by tapping on the settings icon on the main menu screen. Then, select "Rate Us" from the list of options. This will redirect you to the store page of the game, where you can give it a star rating and write a comment. Your feedback is very important for us and helps us improve the game.</li>
|
122 |
-
</ul></p> 401be4b1e0<br />
|
123 |
-
<br />
|
124 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/commands/env.py
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import platform
|
17 |
-
from argparse import ArgumentParser
|
18 |
-
|
19 |
-
from .. import __version__ as version
|
20 |
-
from ..utils import is_paddle_available, is_paddlenlp_available
|
21 |
-
from . import BasePPDiffusersCLICommand
|
22 |
-
|
23 |
-
|
24 |
-
def info_command_factory(_):
|
25 |
-
return EnvironmentCommand()
|
26 |
-
|
27 |
-
|
28 |
-
class EnvironmentCommand(BasePPDiffusersCLICommand):
|
29 |
-
@staticmethod
|
30 |
-
def register_subcommand(parser: ArgumentParser):
|
31 |
-
download_parser = parser.add_parser("env")
|
32 |
-
download_parser.set_defaults(func=info_command_factory)
|
33 |
-
|
34 |
-
def run(self):
|
35 |
-
|
36 |
-
pd_version = "not installed"
|
37 |
-
pd_cuda_available = "NA"
|
38 |
-
if is_paddle_available():
|
39 |
-
import paddle
|
40 |
-
|
41 |
-
pd_version = paddle.__version__
|
42 |
-
pd_cuda_available = paddle.device.is_compiled_with_cuda()
|
43 |
-
|
44 |
-
paddlenlp_version = "not installed"
|
45 |
-
if is_paddlenlp_available:
|
46 |
-
import paddlenlp
|
47 |
-
|
48 |
-
paddlenlp_version = paddlenlp.__version__
|
49 |
-
|
50 |
-
info = {
|
51 |
-
"`ppdiffusers` version": version,
|
52 |
-
"Platform": platform.platform(),
|
53 |
-
"Python version": platform.python_version(),
|
54 |
-
"Paddle version (GPU?)": f"{pd_version} ({pd_cuda_available})",
|
55 |
-
"PaddleNLP version": paddlenlp_version,
|
56 |
-
"Using GPU in script?": "<fill in>",
|
57 |
-
"Using distributed or parallel set-up in script?": "<fill in>",
|
58 |
-
}
|
59 |
-
|
60 |
-
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
|
61 |
-
print(self.format_dict(info))
|
62 |
-
|
63 |
-
return info
|
64 |
-
|
65 |
-
@staticmethod
|
66 |
-
def format_dict(d):
|
67 |
-
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/model/stylegan/prepare_data.py
DELETED
@@ -1,105 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
from io import BytesIO
|
3 |
-
import multiprocessing
|
4 |
-
from functools import partial
|
5 |
-
|
6 |
-
import os
|
7 |
-
from PIL import Image
|
8 |
-
import lmdb
|
9 |
-
from tqdm import tqdm
|
10 |
-
from torchvision import datasets
|
11 |
-
from torchvision.transforms import functional as trans_fn
|
12 |
-
|
13 |
-
|
14 |
-
def resize_and_convert(img, size, resample, quality=100):
|
15 |
-
img = trans_fn.resize(img, size, resample)
|
16 |
-
img = trans_fn.center_crop(img, size)
|
17 |
-
buffer = BytesIO()
|
18 |
-
img.save(buffer, format="jpeg", quality=quality)
|
19 |
-
val = buffer.getvalue()
|
20 |
-
|
21 |
-
return val
|
22 |
-
|
23 |
-
|
24 |
-
def resize_multiple(
|
25 |
-
img, sizes=(128, 256, 512, 1024), resample=Image.LANCZOS, quality=100
|
26 |
-
):
|
27 |
-
imgs = []
|
28 |
-
|
29 |
-
for size in sizes:
|
30 |
-
imgs.append(resize_and_convert(img, size, resample, quality))
|
31 |
-
|
32 |
-
return imgs
|
33 |
-
|
34 |
-
|
35 |
-
def resize_worker(img_file, sizes, resample):
|
36 |
-
i, file = img_file
|
37 |
-
img = Image.open(file)
|
38 |
-
img = img.convert("RGB")
|
39 |
-
out = resize_multiple(img, sizes=sizes, resample=resample)
|
40 |
-
|
41 |
-
return i, out
|
42 |
-
|
43 |
-
|
44 |
-
def prepare(
|
45 |
-
env, dataset, n_worker, sizes=(128, 256, 512, 1024), resample=Image.LANCZOS
|
46 |
-
):
|
47 |
-
resize_fn = partial(resize_worker, sizes=sizes, resample=resample)
|
48 |
-
|
49 |
-
files = sorted(dataset.imgs, key=lambda x: x[0])
|
50 |
-
files = [(i, file) for i, (file, label) in enumerate(files)]
|
51 |
-
total = 0
|
52 |
-
|
53 |
-
with multiprocessing.Pool(n_worker) as pool:
|
54 |
-
for i, imgs in tqdm(pool.imap_unordered(resize_fn, files)):
|
55 |
-
for size, img in zip(sizes, imgs):
|
56 |
-
key = f"{size}-{str(i).zfill(5)}".encode("utf-8")
|
57 |
-
|
58 |
-
with env.begin(write=True) as txn:
|
59 |
-
txn.put(key, img)
|
60 |
-
|
61 |
-
total += 1
|
62 |
-
|
63 |
-
with env.begin(write=True) as txn:
|
64 |
-
txn.put("length".encode("utf-8"), str(total).encode("utf-8"))
|
65 |
-
|
66 |
-
|
67 |
-
if __name__ == "__main__":
|
68 |
-
parser = argparse.ArgumentParser(description="Preprocess images for model training")
|
69 |
-
parser.add_argument("--out", type=str, help="filename of the result lmdb dataset")
|
70 |
-
parser.add_argument(
|
71 |
-
"--size",
|
72 |
-
type=str,
|
73 |
-
default="128,256,512,1024",
|
74 |
-
help="resolutions of images for the dataset",
|
75 |
-
)
|
76 |
-
parser.add_argument(
|
77 |
-
"--n_worker",
|
78 |
-
type=int,
|
79 |
-
default=8,
|
80 |
-
help="number of workers for preparing dataset",
|
81 |
-
)
|
82 |
-
parser.add_argument(
|
83 |
-
"--resample",
|
84 |
-
type=str,
|
85 |
-
default="lanczos",
|
86 |
-
help="resampling methods for resizing images",
|
87 |
-
)
|
88 |
-
parser.add_argument("path", type=str, help="path to the image dataset")
|
89 |
-
|
90 |
-
args = parser.parse_args()
|
91 |
-
|
92 |
-
if not os.path.exists(args.out):
|
93 |
-
os.makedirs(args.out)
|
94 |
-
|
95 |
-
resample_map = {"lanczos": Image.LANCZOS, "bilinear": Image.BILINEAR}
|
96 |
-
resample = resample_map[args.resample]
|
97 |
-
|
98 |
-
sizes = [int(s.strip()) for s in args.size.split(",")]
|
99 |
-
|
100 |
-
print(f"Make dataset of image sizes:", ", ".join(str(s) for s in sizes))
|
101 |
-
|
102 |
-
imgset = datasets.ImageFolder(args.path)
|
103 |
-
|
104 |
-
with lmdb.open(args.out, map_size=1024 ** 4, readahead=False) as env:
|
105 |
-
prepare(env, imgset, args.n_worker, sizes=sizes, resample=resample)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/52Hz/CMFNet_deraindrop/main_test_CMFNet.py
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import cv2
|
3 |
-
import glob
|
4 |
-
import numpy as np
|
5 |
-
from collections import OrderedDict
|
6 |
-
from skimage import img_as_ubyte
|
7 |
-
import os
|
8 |
-
import torch
|
9 |
-
import requests
|
10 |
-
from PIL import Image
|
11 |
-
import torchvision.transforms.functional as TF
|
12 |
-
import torch.nn.functional as F
|
13 |
-
from natsort import natsorted
|
14 |
-
from model.CMFNet import CMFNet
|
15 |
-
|
16 |
-
|
17 |
-
def main():
|
18 |
-
parser = argparse.ArgumentParser(description='Demo Image Deraindrop')
|
19 |
-
parser.add_argument('--input_dir', default='test/', type=str, help='Input images')
|
20 |
-
parser.add_argument('--result_dir', default='results/', type=str, help='Directory for results')
|
21 |
-
parser.add_argument('--weights',
|
22 |
-
default='experiments/pretrained_models/deraindrop_model.pth', type=str,
|
23 |
-
help='Path to weights')
|
24 |
-
|
25 |
-
args = parser.parse_args()
|
26 |
-
|
27 |
-
inp_dir = args.input_dir
|
28 |
-
out_dir = args.result_dir
|
29 |
-
|
30 |
-
os.makedirs(out_dir, exist_ok=True)
|
31 |
-
|
32 |
-
files = natsorted(glob.glob(os.path.join(inp_dir, '*')))
|
33 |
-
|
34 |
-
if len(files) == 0:
|
35 |
-
raise Exception(f"No files found at {inp_dir}")
|
36 |
-
|
37 |
-
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
38 |
-
|
39 |
-
# Load corresponding models architecture and weights
|
40 |
-
model = CMFNet()
|
41 |
-
model = model.to(device)
|
42 |
-
model.eval()
|
43 |
-
load_checkpoint(model, args.weights)
|
44 |
-
|
45 |
-
|
46 |
-
mul = 8
|
47 |
-
for file_ in files:
|
48 |
-
img = Image.open(file_).convert('RGB')
|
49 |
-
input_ = TF.to_tensor(img).unsqueeze(0).to(device)
|
50 |
-
|
51 |
-
# Pad the input if not_multiple_of 8
|
52 |
-
h, w = input_.shape[2], input_.shape[3]
|
53 |
-
H, W = ((h + mul) // mul) * mul, ((w + mul) // mul) * mul
|
54 |
-
padh = H - h if h % mul != 0 else 0
|
55 |
-
padw = W - w if w % mul != 0 else 0
|
56 |
-
input_ = F.pad(input_, (0, padw, 0, padh), 'reflect')
|
57 |
-
|
58 |
-
with torch.no_grad():
|
59 |
-
restored = model(input_)
|
60 |
-
|
61 |
-
restored = torch.clamp(restored, 0, 1)
|
62 |
-
restored = restored[:, :, :h, :w]
|
63 |
-
restored = restored.permute(0, 2, 3, 1).cpu().detach().numpy()
|
64 |
-
restored = img_as_ubyte(restored[0])
|
65 |
-
|
66 |
-
f = os.path.splitext(os.path.split(file_)[-1])[0]
|
67 |
-
save_img((os.path.join(out_dir, f + '.png')), restored)
|
68 |
-
|
69 |
-
|
70 |
-
def save_img(filepath, img):
|
71 |
-
cv2.imwrite(filepath, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
|
72 |
-
|
73 |
-
|
74 |
-
def load_checkpoint(model, weights):
|
75 |
-
checkpoint = torch.load(weights, map_location=torch.device('cpu'))
|
76 |
-
try:
|
77 |
-
model.load_state_dict(checkpoint["state_dict"])
|
78 |
-
except:
|
79 |
-
state_dict = checkpoint["state_dict"]
|
80 |
-
new_state_dict = OrderedDict()
|
81 |
-
for k, v in state_dict.items():
|
82 |
-
name = k[7:] # remove `module.`
|
83 |
-
new_state_dict[name] = v
|
84 |
-
model.load_state_dict(new_state_dict)
|
85 |
-
|
86 |
-
def clean_folder(folder):
|
87 |
-
for filename in os.listdir(folder):
|
88 |
-
file_path = os.path.join(folder, filename)
|
89 |
-
try:
|
90 |
-
if os.path.isfile(file_path) or os.path.islink(file_path):
|
91 |
-
os.unlink(file_path)
|
92 |
-
elif os.path.isdir(file_path):
|
93 |
-
shutil.rmtree(file_path)
|
94 |
-
except Exception as e:
|
95 |
-
print('Failed to delete %s. Reason: %s' % (file_path, e))
|
96 |
-
|
97 |
-
if __name__ == '__main__':
|
98 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7hao/bingo/src/lib/hooks/use-enter-submit.tsx
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import { useRef, type RefObject } from 'react'
|
2 |
-
|
3 |
-
export function useEnterSubmit(): {
|
4 |
-
formRef: RefObject<HTMLFormElement>
|
5 |
-
onKeyDown: (event: React.KeyboardEvent<HTMLTextAreaElement>) => void
|
6 |
-
} {
|
7 |
-
const formRef = useRef<HTMLFormElement>(null)
|
8 |
-
|
9 |
-
const handleKeyDown = (
|
10 |
-
event: React.KeyboardEvent<HTMLTextAreaElement>
|
11 |
-
): void => {
|
12 |
-
if (
|
13 |
-
event.key === 'Enter' &&
|
14 |
-
!event.shiftKey &&
|
15 |
-
!event.nativeEvent.isComposing
|
16 |
-
) {
|
17 |
-
formRef.current?.requestSubmit()
|
18 |
-
event.preventDefault()
|
19 |
-
}
|
20 |
-
}
|
21 |
-
|
22 |
-
return { formRef, onKeyDown: handleKeyDown }
|
23 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/losses/balancer.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import typing as tp
|
8 |
-
|
9 |
-
import flashy
|
10 |
-
import torch
|
11 |
-
from torch import autograd
|
12 |
-
|
13 |
-
|
14 |
-
class Balancer:
|
15 |
-
"""Loss balancer.
|
16 |
-
|
17 |
-
The loss balancer combines losses together to compute gradients for the backward.
|
18 |
-
Given `y = f(...)`, and a number of losses `l1(y, ...)`, `l2(y, ...)`, with `...`
|
19 |
-
not having any dependence on `f`, the balancer can efficiently normalize the partial gradients
|
20 |
-
`d l1 / d y`, `d l2 / dy` before summing them in order to achieve a desired ratio between
|
21 |
-
the losses. For instance if `weights = {'l1': 2, 'l2': 1}`, 66% of the gradient
|
22 |
-
going into `f(...)` will come from `l1` on average, and 33% from `l2`. This allows for an easy
|
23 |
-
interpration of the weights even if the intrisic scale of `l1`, `l2` ... is unknown.
|
24 |
-
|
25 |
-
Noting `g1 = d l1 / dy`, etc., the balanced gradient `G` will be
|
26 |
-
(with `avg` an exponential moving average over the updates),
|
27 |
-
|
28 |
-
G = sum_i total_norm * g_i / avg(||g_i||) * w_i / sum(w_i)
|
29 |
-
|
30 |
-
If `balance_grads` is False, this is deactivated, and instead the gradient will just be the
|
31 |
-
standard sum of the partial gradients with the given weights.
|
32 |
-
|
33 |
-
A call to the backward method of the balancer will compute the the partial gradients,
|
34 |
-
combining all the losses and potentially rescaling the gradients,
|
35 |
-
which can help stabilize the training and reason about multiple losses with varying scales.
|
36 |
-
The obtained gradient with respect to `y` is then back-propagated to `f(...)`.
|
37 |
-
|
38 |
-
Expected usage:
|
39 |
-
|
40 |
-
weights = {'loss_a': 1, 'loss_b': 4}
|
41 |
-
balancer = Balancer(weights, ...)
|
42 |
-
losses: dict = {}
|
43 |
-
losses['loss_a'] = compute_loss_a(x, y)
|
44 |
-
losses['loss_b'] = compute_loss_b(x, y)
|
45 |
-
if model.training():
|
46 |
-
effective_loss = balancer.backward(losses, x)
|
47 |
-
|
48 |
-
Args:
|
49 |
-
weights (dict[str, float]): Weight coefficient for each loss. The balancer expect the losses keys
|
50 |
-
from the backward method to match the weights keys to assign weight to each of the provided loss.
|
51 |
-
balance_grads (bool): Whether to rescale gradients so that weights reflect the fraction of the
|
52 |
-
overall gradient, rather than a constant multiplier.
|
53 |
-
total_norm (float): Reference norm when rescaling gradients, ignored otherwise.
|
54 |
-
emay_decay (float): EMA decay for averaging the norms.
|
55 |
-
per_batch_item (bool): Whether to compute the averaged norm per batch item or not. This only holds
|
56 |
-
when rescaling the gradients.
|
57 |
-
epsilon (float): Epsilon value for numerical stability.
|
58 |
-
monitor (bool): If True, stores in `self.metrics` the relative ratio between the norm of the gradients
|
59 |
-
coming from each loss, when calling `backward()`.
|
60 |
-
"""
|
61 |
-
def __init__(self, weights: tp.Dict[str, float], balance_grads: bool = True, total_norm: float = 1.,
|
62 |
-
ema_decay: float = 0.999, per_batch_item: bool = True, epsilon: float = 1e-12,
|
63 |
-
monitor: bool = False):
|
64 |
-
self.weights = weights
|
65 |
-
self.per_batch_item = per_batch_item
|
66 |
-
self.total_norm = total_norm or 1.
|
67 |
-
self.averager = flashy.averager(ema_decay or 1.)
|
68 |
-
self.epsilon = epsilon
|
69 |
-
self.monitor = monitor
|
70 |
-
self.balance_grads = balance_grads
|
71 |
-
self._metrics: tp.Dict[str, tp.Any] = {}
|
72 |
-
|
73 |
-
@property
|
74 |
-
def metrics(self):
|
75 |
-
return self._metrics
|
76 |
-
|
77 |
-
def backward(self, losses: tp.Dict[str, torch.Tensor], input: torch.Tensor) -> torch.Tensor:
|
78 |
-
"""Compute the backward and return the effective train loss, e.g. the loss obtained from
|
79 |
-
computing the effective weights. If `balance_grads` is True, the effective weights
|
80 |
-
are the one that needs to be applied to each gradient to respect the desired relative
|
81 |
-
scale of gradients coming from each loss.
|
82 |
-
|
83 |
-
Args:
|
84 |
-
losses (Dict[str, torch.Tensor]): dictionary with the same keys as `self.weights`.
|
85 |
-
input (torch.Tensor): the input of the losses, typically the output of the model.
|
86 |
-
This should be the single point of dependence between the losses
|
87 |
-
and the model being trained.
|
88 |
-
"""
|
89 |
-
norms = {}
|
90 |
-
grads = {}
|
91 |
-
for name, loss in losses.items():
|
92 |
-
# Compute partial derivative of the less with respect to the input.
|
93 |
-
grad, = autograd.grad(loss, [input], retain_graph=True)
|
94 |
-
if self.per_batch_item:
|
95 |
-
# We do not average the gradient over the batch dimension.
|
96 |
-
dims = tuple(range(1, grad.dim()))
|
97 |
-
norm = grad.norm(dim=dims, p=2).mean()
|
98 |
-
else:
|
99 |
-
norm = grad.norm(p=2)
|
100 |
-
norms[name] = norm
|
101 |
-
grads[name] = grad
|
102 |
-
|
103 |
-
count = 1
|
104 |
-
if self.per_batch_item:
|
105 |
-
count = len(grad)
|
106 |
-
# Average norms across workers. Theoretically we should average the
|
107 |
-
# squared norm, then take the sqrt, but it worked fine like that.
|
108 |
-
avg_norms = flashy.distrib.average_metrics(self.averager(norms), count)
|
109 |
-
# We approximate the total norm of the gradient as the sums of the norms.
|
110 |
-
# Obviously this can be very incorrect if all gradients are aligned, but it works fine.
|
111 |
-
total = sum(avg_norms.values())
|
112 |
-
|
113 |
-
self._metrics = {}
|
114 |
-
if self.monitor:
|
115 |
-
# Store the ratio of the total gradient represented by each loss.
|
116 |
-
for k, v in avg_norms.items():
|
117 |
-
self._metrics[f'ratio_{k}'] = v / total
|
118 |
-
|
119 |
-
total_weights = sum([self.weights[k] for k in avg_norms])
|
120 |
-
assert total_weights > 0.
|
121 |
-
desired_ratios = {k: w / total_weights for k, w in self.weights.items()}
|
122 |
-
|
123 |
-
out_grad = torch.zeros_like(input)
|
124 |
-
effective_loss = torch.tensor(0., device=input.device, dtype=input.dtype)
|
125 |
-
for name, avg_norm in avg_norms.items():
|
126 |
-
if self.balance_grads:
|
127 |
-
# g_balanced = g / avg(||g||) * total_norm * desired_ratio
|
128 |
-
scale = desired_ratios[name] * self.total_norm / (self.epsilon + avg_norm)
|
129 |
-
else:
|
130 |
-
# We just do regular weighted sum of the gradients.
|
131 |
-
scale = self.weights[name]
|
132 |
-
out_grad.add_(grads[name], alpha=scale)
|
133 |
-
effective_loss += scale * losses[name].detach()
|
134 |
-
# Send the computed partial derivative with respect to the output of the model to the model.
|
135 |
-
input.backward(out_grad)
|
136 |
-
return effective_loss
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/train.py
DELETED
@@ -1,157 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
"""
|
8 |
-
Entry point for dora to launch solvers for running training loops.
|
9 |
-
See more info on how to use dora: https://github.com/facebookresearch/dora
|
10 |
-
"""
|
11 |
-
|
12 |
-
import logging
|
13 |
-
import multiprocessing
|
14 |
-
import os
|
15 |
-
import sys
|
16 |
-
import typing as tp
|
17 |
-
|
18 |
-
from dora import git_save, hydra_main, XP
|
19 |
-
import flashy
|
20 |
-
import hydra
|
21 |
-
import omegaconf
|
22 |
-
|
23 |
-
from .environment import AudioCraftEnvironment
|
24 |
-
from .utils.cluster import get_slurm_parameters
|
25 |
-
|
26 |
-
logger = logging.getLogger(__name__)
|
27 |
-
|
28 |
-
|
29 |
-
def resolve_config_dset_paths(cfg):
|
30 |
-
"""Enable Dora to load manifest from git clone repository."""
|
31 |
-
# manifest files for the different splits
|
32 |
-
for key, value in cfg.datasource.items():
|
33 |
-
if isinstance(value, str):
|
34 |
-
cfg.datasource[key] = git_save.to_absolute_path(value)
|
35 |
-
|
36 |
-
|
37 |
-
def get_solver(cfg):
|
38 |
-
from . import solvers
|
39 |
-
# Convert batch size to batch size for each GPU
|
40 |
-
assert cfg.dataset.batch_size % flashy.distrib.world_size() == 0
|
41 |
-
cfg.dataset.batch_size //= flashy.distrib.world_size()
|
42 |
-
for split in ['train', 'valid', 'evaluate', 'generate']:
|
43 |
-
if hasattr(cfg.dataset, split) and hasattr(cfg.dataset[split], 'batch_size'):
|
44 |
-
assert cfg.dataset[split].batch_size % flashy.distrib.world_size() == 0
|
45 |
-
cfg.dataset[split].batch_size //= flashy.distrib.world_size()
|
46 |
-
resolve_config_dset_paths(cfg)
|
47 |
-
solver = solvers.get_solver(cfg)
|
48 |
-
return solver
|
49 |
-
|
50 |
-
|
51 |
-
def get_solver_from_xp(xp: XP, override_cfg: tp.Optional[tp.Union[dict, omegaconf.DictConfig]] = None,
|
52 |
-
restore: bool = True, load_best: bool = True,
|
53 |
-
ignore_state_keys: tp.List[str] = [], disable_fsdp: bool = True):
|
54 |
-
"""Given a XP, return the Solver object.
|
55 |
-
|
56 |
-
Args:
|
57 |
-
xp (XP): Dora experiment for which to retrieve the solver.
|
58 |
-
override_cfg (dict or None): If not None, should be a dict used to
|
59 |
-
override some values in the config of `xp`. This will not impact
|
60 |
-
the XP signature or folder. The format is different
|
61 |
-
than the one used in Dora grids, nested keys should actually be nested dicts,
|
62 |
-
not flattened, e.g. `{'optim': {'batch_size': 32}}`.
|
63 |
-
restore (bool): If `True` (the default), restore state from the last checkpoint.
|
64 |
-
load_best (bool): If `True` (the default), load the best state from the checkpoint.
|
65 |
-
ignore_state_keys (list[str]): List of sources to ignore when loading the state, e.g. `optimizer`.
|
66 |
-
disable_fsdp (bool): if True, disables FSDP entirely. This will
|
67 |
-
also automatically skip loading the EMA. For solver specific
|
68 |
-
state sources, like the optimizer, you might want to
|
69 |
-
use along `ignore_state_keys=['optimizer']`. Must be used with `load_best=True`.
|
70 |
-
"""
|
71 |
-
logger.info(f"Loading solver from XP {xp.sig}. "
|
72 |
-
f"Overrides used: {xp.argv}")
|
73 |
-
cfg = xp.cfg
|
74 |
-
if override_cfg is not None:
|
75 |
-
cfg = omegaconf.OmegaConf.merge(cfg, omegaconf.DictConfig(override_cfg))
|
76 |
-
if disable_fsdp and cfg.fsdp.use:
|
77 |
-
cfg.fsdp.use = False
|
78 |
-
assert load_best is True
|
79 |
-
# ignoring some keys that were FSDP sharded like model, ema, and best_state.
|
80 |
-
# fsdp_best_state will be used in that case. When using a specific solver,
|
81 |
-
# one is responsible for adding the relevant keys, e.g. 'optimizer'.
|
82 |
-
# We could make something to automatically register those inside the solver, but that
|
83 |
-
# seem overkill at this point.
|
84 |
-
ignore_state_keys = ignore_state_keys + ['model', 'ema', 'best_state']
|
85 |
-
|
86 |
-
try:
|
87 |
-
with xp.enter():
|
88 |
-
solver = get_solver(cfg)
|
89 |
-
if restore:
|
90 |
-
solver.restore(load_best=load_best, ignore_state_keys=ignore_state_keys)
|
91 |
-
return solver
|
92 |
-
finally:
|
93 |
-
hydra.core.global_hydra.GlobalHydra.instance().clear()
|
94 |
-
|
95 |
-
|
96 |
-
def get_solver_from_sig(sig: str, *args, **kwargs):
|
97 |
-
"""Return Solver object from Dora signature, i.e. to play with it from a notebook.
|
98 |
-
See `get_solver_from_xp` for more information.
|
99 |
-
"""
|
100 |
-
xp = main.get_xp_from_sig(sig)
|
101 |
-
return get_solver_from_xp(xp, *args, **kwargs)
|
102 |
-
|
103 |
-
|
104 |
-
def init_seed_and_system(cfg):
|
105 |
-
import numpy as np
|
106 |
-
import torch
|
107 |
-
import random
|
108 |
-
from audiocraft.modules.transformer import set_efficient_attention_backend
|
109 |
-
|
110 |
-
multiprocessing.set_start_method(cfg.mp_start_method)
|
111 |
-
logger.debug('Setting mp start method to %s', cfg.mp_start_method)
|
112 |
-
random.seed(cfg.seed)
|
113 |
-
np.random.seed(cfg.seed)
|
114 |
-
# torch also initialize cuda seed if available
|
115 |
-
torch.manual_seed(cfg.seed)
|
116 |
-
torch.set_num_threads(cfg.num_threads)
|
117 |
-
os.environ['MKL_NUM_THREADS'] = str(cfg.num_threads)
|
118 |
-
os.environ['OMP_NUM_THREADS'] = str(cfg.num_threads)
|
119 |
-
logger.debug('Setting num threads to %d', cfg.num_threads)
|
120 |
-
set_efficient_attention_backend(cfg.efficient_attention_backend)
|
121 |
-
logger.debug('Setting efficient attention backend to %s', cfg.efficient_attention_backend)
|
122 |
-
|
123 |
-
|
124 |
-
@hydra_main(config_path='../config', config_name='config', version_base='1.1')
|
125 |
-
def main(cfg):
|
126 |
-
init_seed_and_system(cfg)
|
127 |
-
|
128 |
-
# Setup logging both to XP specific folder, and to stderr.
|
129 |
-
log_name = '%s.log.{rank}' % cfg.execute_only if cfg.execute_only else 'solver.log.{rank}'
|
130 |
-
flashy.setup_logging(level=str(cfg.logging.level).upper(), log_name=log_name)
|
131 |
-
# Initialize distributed training, no need to specify anything when using Dora.
|
132 |
-
flashy.distrib.init()
|
133 |
-
solver = get_solver(cfg)
|
134 |
-
if cfg.show:
|
135 |
-
solver.show()
|
136 |
-
return
|
137 |
-
|
138 |
-
if cfg.execute_only:
|
139 |
-
assert cfg.execute_inplace or cfg.continue_from is not None, \
|
140 |
-
"Please explicitly specify the checkpoint to continue from with continue_from=<sig_or_path> " + \
|
141 |
-
"when running with execute_only or set execute_inplace to True."
|
142 |
-
solver.restore(replay_metrics=False) # load checkpoint
|
143 |
-
solver.run_one_stage(cfg.execute_only)
|
144 |
-
return
|
145 |
-
|
146 |
-
return solver.run()
|
147 |
-
|
148 |
-
|
149 |
-
main.dora.dir = AudioCraftEnvironment.get_dora_dir()
|
150 |
-
main._base_cfg.slurm = get_slurm_parameters(main._base_cfg.slurm)
|
151 |
-
|
152 |
-
if main.dora.shared is not None and not os.access(main.dora.shared, os.R_OK):
|
153 |
-
print("No read permission on dora.shared folder, ignoring it.", file=sys.stderr)
|
154 |
-
main.dora.shared = None
|
155 |
-
|
156 |
-
if __name__ == '__main__':
|
157 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIZero2Hero4Health/5-QuantumStreamlitAIDashboard-SL/app.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import gradio as gr
|
3 |
-
import IPython
|
4 |
-
import streamlit as st
|
5 |
-
import streamlit.components.v1 as components
|
6 |
-
from IPython.display import IFrame
|
7 |
-
|
8 |
-
#quantum imports:
|
9 |
-
import qiskit
|
10 |
-
from qiskit import QuantumCircuit, QuantumRegister, execute
|
11 |
-
|
12 |
-
src='' # URL parameter to change the iframe url
|
13 |
-
|
14 |
-
def SetIframeURL(option_selected):
|
15 |
-
if (option_selected=='QCEngine'):
|
16 |
-
src='https://oreilly-qc.github.io?p=2-1'
|
17 |
-
if (option_selected=='Grok'):
|
18 |
-
src='https://javafxpert.github.io/grok-bloch/'
|
19 |
-
if (option_selected=='Playground'):
|
20 |
-
src='https://davidbkemp.github.io/quantum-gate-playground/'
|
21 |
-
if (option_selected=='Circuit'):
|
22 |
-
src='https://algassert.com/quirk#circuit={%22cols%22:[[%22H%22],[%22Bloch%22],[%22Measure%22]]}'
|
23 |
-
|
24 |
-
# Render iframe contents
|
25 |
-
#st.set_page_config(layout="wide")
|
26 |
-
width = st.sidebar.slider("Width", 200, 1500, 800, 100)
|
27 |
-
height = st.sidebar.slider("Height", 200, 1500, 900, 100)
|
28 |
-
st.components.v1.iframe(src, width, height, scrolling=True)
|
29 |
-
|
30 |
-
# query params exist
|
31 |
-
try:
|
32 |
-
options = ['QCEngine', 'Grok', 'Playground', 'Circuit']
|
33 |
-
query_params = st.experimental_get_query_params()
|
34 |
-
query_option = query_params['option'][0] #throws an exception when visiting http://host:port
|
35 |
-
option_selected = st.sidebar.selectbox('Pick option', options, index=options.index(query_option))
|
36 |
-
if option_selected:
|
37 |
-
st.experimental_set_query_params(option=option_selected)
|
38 |
-
SetIframeURL(option_selected)
|
39 |
-
|
40 |
-
# run when query params don't exist. e.g on first launch
|
41 |
-
except: # catch exception and set query param to predefined value
|
42 |
-
options = ['QCEngine', 'Grok', 'Playground', 'Circuit']
|
43 |
-
st.experimental_set_query_params(option=options[1]) # defaults to dog
|
44 |
-
query_params = st.experimental_get_query_params()
|
45 |
-
query_option = query_params['option'][0]
|
46 |
-
option_selected = st.sidebar.selectbox('Pick option', options, index=options.index(query_option))
|
47 |
-
if option_selected:
|
48 |
-
st.experimental_set_query_params(option=option_selected)
|
49 |
-
SetIframeURL(option_selected)
|
50 |
-
|
51 |
-
def LoadGradioAIModels():
|
52 |
-
title = "AI Quantum - QGAN and QCEngine"
|
53 |
-
description = "Using Superposition Advantage from Quantum for QGAN AI."
|
54 |
-
article = "<p style='text-align: center'></p>"
|
55 |
-
|
56 |
-
examples = [
|
57 |
-
["Scientific breakthroughs in treatment of HIV/AIDS may be solved in our lifetime using a procedure called [MASK] modulation which strengthens the immune system to fight the disease."],["A disease called [MASK] disease involves progressive memory loss and has new treatments to improve memory and delay progression of the disease."],["[MASK] refers to the uncontrolled growth of abnormal cells in the body. With chemotherapy and radiation therapy have improvements and replacements that destroy cancer cells before they become resistant to current treatment methods."],["The hereditary disease [MASK] is caused by mucus abnormally thick preventing lungs and pancreas from doing their jobs correctly."],["[MASK] or atherosclerosis is the buildup of cholesterol, fatty cells, and inflammatory deposits in the arteries. Stem cells, mechanical devices, and lowering cholesterol and blood pressure levels are helping prevention."]]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/work_dirs/yolov6_s_df2_0.4/__init__.py
DELETED
File without changes
|
spaces/Abdullah-Habib/Text_to_Speech_Urdu/app.py
DELETED
@@ -1,127 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from transformers import SpeechT5ForTextToSpeech, SpeechT5Processor, SpeechT5HifiGan
|
3 |
-
import soundfile as sf
|
4 |
-
import gradio as gr
|
5 |
-
import scipy.io.wavfile as wav
|
6 |
-
import numpy as np
|
7 |
-
import wave
|
8 |
-
from datasets import load_dataset, Audio, config
|
9 |
-
from IPython.display import Audio
|
10 |
-
|
11 |
-
# Load the TTS model from the Hugging Face Hub
|
12 |
-
checkpoint = "Abdullah-Habib/urdu_speech_tt" # Replace with your actual model name
|
13 |
-
processor = SpeechT5Processor.from_pretrained(checkpoint)
|
14 |
-
model = SpeechT5ForTextToSpeech.from_pretrained(checkpoint)
|
15 |
-
tokenizer = processor.tokenizer
|
16 |
-
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
|
17 |
-
|
18 |
-
|
19 |
-
# Buckwalter to Unicode mapping
|
20 |
-
buck2uni = {
|
21 |
-
u"\u0627":"a",
|
22 |
-
u"\u0627":"a",
|
23 |
-
u"\u0675":"a",
|
24 |
-
u"\u0673":"a",
|
25 |
-
u"\u0630":"a",
|
26 |
-
u"\u0622":"aa",
|
27 |
-
u"\u0628":"b",
|
28 |
-
u"\u067E":"p",
|
29 |
-
u"\u062A":"t",
|
30 |
-
u"\u0637":"t",
|
31 |
-
u"\u0679":"t",
|
32 |
-
u"\u062C":"j",
|
33 |
-
u"\u0633":"s",
|
34 |
-
u"\u062B":"s",
|
35 |
-
u"\u0635":"s",
|
36 |
-
u"\u0686":"ch",
|
37 |
-
u"\u062D":"h",
|
38 |
-
u"\u0647":"h",
|
39 |
-
u"\u0629":"h",
|
40 |
-
u"\u06DF":"h",
|
41 |
-
u"\u062E":"kh",
|
42 |
-
u"\u062F":"d",
|
43 |
-
u"\u0688":"d",
|
44 |
-
u"\u0630":"z",
|
45 |
-
u"\u0632":"z",
|
46 |
-
u"\u0636":"z",
|
47 |
-
u"\u0638":"z",
|
48 |
-
u"\u068E":"z",
|
49 |
-
u"\u0631":"r",
|
50 |
-
u"\u0691":"r",
|
51 |
-
u"\u0634":"sh",
|
52 |
-
u"\u063A":"gh",
|
53 |
-
u"\u0641":"f",
|
54 |
-
u"\u06A9":"k",
|
55 |
-
u"\u0642":"k",
|
56 |
-
u"\u06AF":"g",
|
57 |
-
u"\u0644":"l",
|
58 |
-
u"\u0645":"m",
|
59 |
-
u"\u0646":"n",
|
60 |
-
u"\u06BA":"n",
|
61 |
-
u"\u0648":"o",
|
62 |
-
u"\u0649":"y",
|
63 |
-
u"\u0626":"y",
|
64 |
-
u"\u06CC":"y",
|
65 |
-
u"\u06D2":"e",
|
66 |
-
u"\u06C1":"h",
|
67 |
-
u"\u064A":"e" ,
|
68 |
-
u"\u06C2":"ah" ,
|
69 |
-
u"\u06BE":"h" ,
|
70 |
-
u"\u0639":"a" ,
|
71 |
-
u"\u0643":"k" ,
|
72 |
-
u"\u0621":"a",
|
73 |
-
u"\u0624":"o",
|
74 |
-
u"\u060C":"" #seperator ulta comma
|
75 |
-
}
|
76 |
-
def transString(string, reverse=0):
|
77 |
-
"""Given a Unicode string, transliterate into Buckwalter. To go from
|
78 |
-
Buckwalter back to Unicode, set reverse=1"""
|
79 |
-
for k, v in buck2uni.items():
|
80 |
-
if not reverse:
|
81 |
-
string = string.replace(k, v)
|
82 |
-
else:
|
83 |
-
string = string.replace(v, k)
|
84 |
-
return string
|
85 |
-
|
86 |
-
|
87 |
-
def generate_audio(text):
|
88 |
-
# Convert input text to Roman Urdu
|
89 |
-
roman_urdu = transString(text)
|
90 |
-
|
91 |
-
# Tokenize the input text
|
92 |
-
inputs = processor(text=roman_urdu, return_tensors="pt", type = "numpy")
|
93 |
-
|
94 |
-
# Generate audio from the SpeechT5 model
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
# speaker_embeddings = torch.tensor(np.load("speaker_embeddings.npy"))
|
99 |
-
|
100 |
-
speaker_embeddings = torch.load("speaker_embeddings_29.pt")
|
101 |
-
# speaker_embeddings= torch.tensor([[-0.0917, -0.0461, 0.0347, 0.0341, 0.0197, -0.0438, -0.0377, -0.0212, 0.0361, 0.0220, -0.0676, -0.0731, 0.0827, 0.0132, 0.0187, 0.0577, -0.0026, 0.0618, 0.0088, 0.0159, 0.0344, 0.0243, -0.0164, -0.0430, -0.0556, -0.0044, -0.0413, -0.0003, 0.0310, 0.0369, -0.0034, 0.0424, 0.0474, 0.0102, 0.0392, -0.0611, 0.0405, 0.0652, -0.0386, -0.0638, 0.0255, -0.0411, 0.0398, 0.0490, 0.0297, -0.1218, -0.0206, 0.0146,-0.0649, 0.0550, 0.0177, 0.0407, 0.0017, -0.0113, -0.0990, -0.0015,0.0158, 0.0481, 0.0286, 0.0300, 0.0346, -0.0104, -0.0142, -0.0005,0.0264, 0.0412, 0.0227, -0.0389, -0.0489, -0.0750, 0.0238, 0.0101,0.0171, 0.0141, 0.0224, 0.0344, 0.0402, 0.0336, -0.0641, -0.0818, -0.0731, -0.0470, -0.0512, -0.0602, -0.0344, -0.0442, -0.0541, 0.0097, 0.0198, 0.0482, 0.0323, -0.0885, 0.0210, -0.0798, 0.0417, -0.0436, 0.0402, 0.0256, -0.0641, -0.0668, -0.0023, -0.0706, -0.0928, 0.0121, 0.0355, -0.0376, 0.0522, 0.0482, 0.0200, 0.0290, -0.0698, -0.0232, 0.0878, 0.0044, 0.0559, 0.0581, -0.0718, 0.0095, -0.0538, 0.0125, 0.0023, -0.0562, 0.0424, 0.0261, -0.0498, 0.0255, -0.0840, 0.0331, 0.0406, 0.0162, -0.0522, 0.0218, 0.0323, 0.0359, 0.0128, -0.0891, -0.0569, 0.0031, -0.0694, -0.0102, 0.0118, 0.0033, 0.0127, 0.0589, -0.0783, 0.0179, 0.0200, -0.0371, 0.0325, -0.1033, 0.0483, -0.0343, -0.0714, 0.0102, 0.0665, 0.0278, 0.0285, -0.0653, -0.0834, 0.0196, 0.0399, 0.0085, 0.0246, -0.0400, 0.0215, 0.0083, 0.0302, 0.0204, 0.0360, 0.0309, -0.0306, -0.0828, 0.0142, -0.0614, -0.0103, 0.0372, -0.0456, 0.0291, 0.0565, -0.0271, 0.0518, -0.0671, 0.0012, -0.0048, -0.0565, -0.0092, 0.0336, 0.0476, -0.0351, -0.0698, 0.0487, 0.0313, -0.0491, 0.0401, 0.0246, 0.0178, 0.0405, 0.0012, 0.0311, -0.0041, 0.0367, 0.0330, -0.0609, 0.0099, -0.0097, 0.0173, 0.0494, -0.0305, 0.0272, -0.0349, 0.0025, -0.0697, -0.0414, 0.0604, -0.0707, 0.0420, 0.0380, -0.0731, 0.0546, 0.0339, -0.0758, 0.0365, -0.0712, -0.0140, 0.0365, 0.0477, 0.0796, 0.0572, 0.0212, 0.0098, 0.0133, 0.0261, 0.0329, -0.0269, 0.0437, -0.0359, 0.0296, 0.0180, -0.0008, 0.0668, -0.0448, 0.0269, -0.0734, 0.0194, -0.0494, 0.0432, 0.0449, 0.0442, 0.0389, 0.0530, 0.0420, 0.0021, 0.0084, -0.0820, -0.0081, 0.0326, 0.0265, 0.0536, -0.0714, 0.0188, 0.0298, -0.0737, 0.0110, 0.0340, 0.0016, 0.0262, 0.0179, 0.0109, 0.0426, -0.0538, 0.0649, 0.0160, 0.0146, -0.0419, -0.0851, 0.0138, 0.0399, 0.0445, -0.0849, -0.0425, 0.0293, 0.0477, 0.0108, -0.0941, -0.0386, 0.0600, 0.0089, 0.0557,-0.0892, 0.0026, 0.0192, 0.0136, -0.0207, -0.0023, 0.0163, 0.0263, -0.0112, 0.0245, 0.0411, 0.0285, 0.0267, 0.0297, 0.0213, -0.0577, 0.0169, 0.0592, 0.0227, 0.0290, 0.0074, 0.0197, 0.0282, 0.0368,0.0064, 0.0092, -0.0896, -0.0693, -0.0295, 0.0316, -0.0674, 0.0645,-0.0655, 0.0355, -0.0389, 0.0134, 0.0299, -0.0534, 0.0537, 0.0900, -0.0770, -0.0666, -0.0600, -0.0019, 0.0276, 0.0590, -0.0705, 0.0222, 0.0517, -0.0089, 0.0063, -0.0270, 0.0185, -0.0626, -0.0065, 0.0187,-0.0670, 0.0216, 0.0356, 0.0384, -0.0268, -0.0628, -0.0443, -0.0195, -0.0495, 0.1405, 0.0274, -0.0455, -0.0068, 0.0686, -0.0756, -0.0073, -0.0981, 0.0025, 0.0383, 0.0157, 0.0651, 0.0252, -0.0665, 0.0054, 0.0223, 0.0509, 0.0101, 0.0454, -0.0527, 0.0252, -0.0157, -0.0022, 0.0526, 0.0224, 0.0494, 0.0293, -0.0808, -0.1220, 0.0196, 0.0135, 0.0303, -0.0467, 0.0411, -0.0639, 0.0358, 0.0499, 0.0425, 0.0169, -0.0579, 0.0388, 0.0414, -0.0101, 0.0490, -0.0773, 0.0478, -0.0238, -0.0142, -0.0508, 0.0018, -0.0085, 0.0198, 0.0126, 0.0133, -0.0554, -0.0583, -0.0699, -0.0167, 0.0131, 0.0288, -0.0132, 0.0343, -0.0476, -0.0039, -0.0825, -0.1180, -0.0570, -0.0590, 0.0233, 0.0500, -0.0328, -0.0426, 0.0241, 0.0441, 0.0372, 0.0488, -0.0366, -0.0233, -0.0118, -0.0256, 0.0254, 0.0041, 0.0119, 0.0423, 0.0178, -0.0245, -0.0769, 0.0056, 0.0428, 0.0341, -0.0009, -0.0197, 0.0395, 0.0247, 0.0090, 0.0098, -0.0083, 0.0346, 0.0411, 0.0416, 0.0413, 0.0312, 0.0054, 0.0390, -0.0571, -0.0403, 0.0441, -0.0132, 0.0117, 0.0467, 0.0516,-0.0639, 0.0296, 0.0337, -0.0557, 0.0110, 0.0277, -0.0026, 0.0347, 0.0301, 0.0056, -0.0572, -0.0663, 0.0124, -0.0065, 0.0222, 0.0441,-0.0570, -0.0519, 0.0132, 0.0323, 0.0401, 0.0357, -0.0555, 0.0310,0.0028, -0.0102, -0.0598, 0.0153, -0.0438, 0.0268, -0.0097, 0.0388,-0.0330, -0.0277, -0.0581, -0.0389, 0.0099, 0.0371, -0.0455, 0.0553, 0.0753, -0.0154, -0.0385, 0.0359, 0.0403, 0.0464, 0.0499, -0.0365]])
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)
|
106 |
-
|
107 |
-
return speech
|
108 |
-
|
109 |
-
def text_to_speech(text):
|
110 |
-
# Generate audio
|
111 |
-
audio_output = generate_audio(text)
|
112 |
-
|
113 |
-
output_path = "output.wav"
|
114 |
-
sf.write(output_path, audio_output.numpy(), 16000, "PCM_16")
|
115 |
-
|
116 |
-
return output_path
|
117 |
-
|
118 |
-
|
119 |
-
examples = [
|
120 |
-
['اگر رشتے داری ہے تو پیسے کی'],
|
121 |
-
['میری تعلیم جیکی کی ہے۔']
|
122 |
-
]
|
123 |
-
|
124 |
-
|
125 |
-
interface = gr.Interface(fn=text_to_speech, inputs="text", outputs="audio", verbose = True, title="Urdu TTS",
|
126 |
-
description = "A simple Urdu Text to Speech Application. It is not by any means perfect and will not work for all text. You can sometimes expect it to generate random noise on an input of your choice. Right now it works successfully on very basic urdu text, such the ones in the example.", examples = examples)
|
127 |
-
interface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/DfeHub.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import json
|
4 |
-
import re
|
5 |
-
import time
|
6 |
-
|
7 |
-
import requests
|
8 |
-
|
9 |
-
from ..typing import Any, CreateResult
|
10 |
-
from .base_provider import BaseProvider
|
11 |
-
|
12 |
-
|
13 |
-
class DfeHub(BaseProvider):
|
14 |
-
url = "https://chat.dfehub.com/"
|
15 |
-
supports_stream = True
|
16 |
-
supports_gpt_35_turbo = True
|
17 |
-
|
18 |
-
@staticmethod
|
19 |
-
def create_completion(
|
20 |
-
model: str,
|
21 |
-
messages: list[dict[str, str]],
|
22 |
-
stream: bool, **kwargs: Any) -> CreateResult:
|
23 |
-
|
24 |
-
headers = {
|
25 |
-
"authority" : "chat.dfehub.com",
|
26 |
-
"accept" : "*/*",
|
27 |
-
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
28 |
-
"content-type" : "application/json",
|
29 |
-
"origin" : "https://chat.dfehub.com",
|
30 |
-
"referer" : "https://chat.dfehub.com/",
|
31 |
-
"sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
32 |
-
"sec-ch-ua-mobile" : "?0",
|
33 |
-
"sec-ch-ua-platform": '"macOS"',
|
34 |
-
"sec-fetch-dest" : "empty",
|
35 |
-
"sec-fetch-mode" : "cors",
|
36 |
-
"sec-fetch-site" : "same-origin",
|
37 |
-
"user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
38 |
-
"x-requested-with" : "XMLHttpRequest",
|
39 |
-
}
|
40 |
-
|
41 |
-
json_data = {
|
42 |
-
"messages" : messages,
|
43 |
-
"model" : "gpt-3.5-turbo",
|
44 |
-
"temperature" : kwargs.get("temperature", 0.5),
|
45 |
-
"presence_penalty" : kwargs.get("presence_penalty", 0),
|
46 |
-
"frequency_penalty" : kwargs.get("frequency_penalty", 0),
|
47 |
-
"top_p" : kwargs.get("top_p", 1),
|
48 |
-
"stream" : True
|
49 |
-
}
|
50 |
-
|
51 |
-
response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions",
|
52 |
-
headers=headers, json=json_data, timeout=3)
|
53 |
-
|
54 |
-
for chunk in response.iter_lines():
|
55 |
-
if b"detail" in chunk:
|
56 |
-
delay = re.findall(r"\d+\.\d+", chunk.decode())
|
57 |
-
delay = float(delay[-1])
|
58 |
-
time.sleep(delay)
|
59 |
-
yield from DfeHub.create_completion(model, messages, stream, **kwargs)
|
60 |
-
if b"content" in chunk:
|
61 |
-
data = json.loads(chunk.decode().split("data: ")[1])
|
62 |
-
yield (data["choices"][0]["delta"]["content"])
|
63 |
-
|
64 |
-
@classmethod
|
65 |
-
@property
|
66 |
-
def params(cls):
|
67 |
-
params = [
|
68 |
-
("model", "str"),
|
69 |
-
("messages", "list[dict[str, str]]"),
|
70 |
-
("stream", "bool"),
|
71 |
-
("temperature", "float"),
|
72 |
-
("presence_penalty", "int"),
|
73 |
-
("frequency_penalty", "int"),
|
74 |
-
("top_p", "int"),
|
75 |
-
]
|
76 |
-
param = ", ".join([": ".join(p) for p in params])
|
77 |
-
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/midas/midas/dpt_depth.py
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
|
5 |
-
from .base_model import BaseModel
|
6 |
-
from .blocks import (
|
7 |
-
FeatureFusionBlock,
|
8 |
-
FeatureFusionBlock_custom,
|
9 |
-
Interpolate,
|
10 |
-
_make_encoder,
|
11 |
-
forward_vit,
|
12 |
-
)
|
13 |
-
|
14 |
-
|
15 |
-
def _make_fusion_block(features, use_bn):
|
16 |
-
return FeatureFusionBlock_custom(
|
17 |
-
features,
|
18 |
-
nn.ReLU(False),
|
19 |
-
deconv=False,
|
20 |
-
bn=use_bn,
|
21 |
-
expand=False,
|
22 |
-
align_corners=True,
|
23 |
-
)
|
24 |
-
|
25 |
-
|
26 |
-
class DPT(BaseModel):
|
27 |
-
def __init__(
|
28 |
-
self,
|
29 |
-
head,
|
30 |
-
features=256,
|
31 |
-
backbone="vitb_rn50_384",
|
32 |
-
readout="project",
|
33 |
-
channels_last=False,
|
34 |
-
use_bn=False,
|
35 |
-
):
|
36 |
-
|
37 |
-
super(DPT, self).__init__()
|
38 |
-
|
39 |
-
self.channels_last = channels_last
|
40 |
-
|
41 |
-
hooks = {
|
42 |
-
"vitb_rn50_384": [0, 1, 8, 11],
|
43 |
-
"vitb16_384": [2, 5, 8, 11],
|
44 |
-
"vitl16_384": [5, 11, 17, 23],
|
45 |
-
}
|
46 |
-
|
47 |
-
# Instantiate backbone and reassemble blocks
|
48 |
-
self.pretrained, self.scratch = _make_encoder(
|
49 |
-
backbone,
|
50 |
-
features,
|
51 |
-
False, # Set to true of you want to train from scratch, uses ImageNet weights
|
52 |
-
groups=1,
|
53 |
-
expand=False,
|
54 |
-
exportable=False,
|
55 |
-
hooks=hooks[backbone],
|
56 |
-
use_readout=readout,
|
57 |
-
)
|
58 |
-
|
59 |
-
self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
|
60 |
-
self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
|
61 |
-
self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
|
62 |
-
self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
|
63 |
-
|
64 |
-
self.scratch.output_conv = head
|
65 |
-
|
66 |
-
|
67 |
-
def forward(self, x):
|
68 |
-
if self.channels_last == True:
|
69 |
-
x.contiguous(memory_format=torch.channels_last)
|
70 |
-
|
71 |
-
layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
|
72 |
-
|
73 |
-
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
74 |
-
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
75 |
-
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
76 |
-
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
77 |
-
|
78 |
-
path_4 = self.scratch.refinenet4(layer_4_rn)
|
79 |
-
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
|
80 |
-
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
|
81 |
-
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
82 |
-
|
83 |
-
out = self.scratch.output_conv(path_1)
|
84 |
-
|
85 |
-
return out
|
86 |
-
|
87 |
-
|
88 |
-
class DPTDepthModel(DPT):
|
89 |
-
def __init__(self, path=None, non_negative=True, **kwargs):
|
90 |
-
features = kwargs["features"] if "features" in kwargs else 256
|
91 |
-
|
92 |
-
head = nn.Sequential(
|
93 |
-
nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
|
94 |
-
Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
|
95 |
-
nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
|
96 |
-
nn.ReLU(True),
|
97 |
-
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
|
98 |
-
nn.ReLU(True) if non_negative else nn.Identity(),
|
99 |
-
nn.Identity(),
|
100 |
-
)
|
101 |
-
|
102 |
-
super().__init__(head, **kwargs)
|
103 |
-
|
104 |
-
if path is not None:
|
105 |
-
self.load(path)
|
106 |
-
|
107 |
-
def forward(self, x):
|
108 |
-
return super().forward(x).squeeze(dim=1)
|
109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/swipe/Swipe.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import { Swipe } from '../../../plugins/gestures';
|
2 |
-
export default Swipe;
|
|
|
|
|
|
spaces/AlanMars/QYL-AI-Space/assets/custom.js
DELETED
@@ -1,607 +0,0 @@
|
|
1 |
-
|
2 |
-
// custom javascript here
|
3 |
-
|
4 |
-
const MAX_HISTORY_LENGTH = 32;
|
5 |
-
|
6 |
-
var key_down_history = [];
|
7 |
-
var currentIndex = -1;
|
8 |
-
var user_input_ta;
|
9 |
-
|
10 |
-
var gradioContainer = null;
|
11 |
-
var user_input_ta = null;
|
12 |
-
var user_input_tb = null;
|
13 |
-
var userInfoDiv = null;
|
14 |
-
var appTitleDiv = null;
|
15 |
-
var chatbot = null;
|
16 |
-
var chatbotWrap = null;
|
17 |
-
var apSwitch = null;
|
18 |
-
var empty_botton = null;
|
19 |
-
var messageBotDivs = null;
|
20 |
-
// var renderLatex = null;
|
21 |
-
var loginUserForm = null;
|
22 |
-
var logginUser = null;
|
23 |
-
|
24 |
-
var userLogged = false;
|
25 |
-
var usernameGotten = false;
|
26 |
-
var shouldRenderLatex = false;
|
27 |
-
var historyLoaded = false;
|
28 |
-
|
29 |
-
var ga = document.getElementsByTagName("gradio-app");
|
30 |
-
var targetNode = ga[0];
|
31 |
-
var isInIframe = (window.self !== window.top);
|
32 |
-
var language = navigator.language.slice(0,2);
|
33 |
-
|
34 |
-
var forView_i18n = {
|
35 |
-
'zh': "仅供查看",
|
36 |
-
'en': "For viewing only",
|
37 |
-
'ja': "閲覧専用",
|
38 |
-
'fr': "Pour consultation seulement",
|
39 |
-
'es': "Solo para visualización",
|
40 |
-
};
|
41 |
-
|
42 |
-
// gradio 页面加载好了么??? 我能动你的元素了么??
|
43 |
-
function gradioLoaded(mutations) {
|
44 |
-
for (var i = 0; i < mutations.length; i++) {
|
45 |
-
if (mutations[i].addedNodes.length) {
|
46 |
-
loginUserForm = document.querySelector(".gradio-container > .main > .wrap > .panel > .form")
|
47 |
-
gradioContainer = document.querySelector(".gradio-container");
|
48 |
-
user_input_tb = document.getElementById('user_input_tb');
|
49 |
-
userInfoDiv = document.getElementById("user_info");
|
50 |
-
appTitleDiv = document.getElementById("app_title");
|
51 |
-
chatbot = document.querySelector('#chuanhu_chatbot');
|
52 |
-
chatbotWrap = document.querySelector('#chuanhu_chatbot > .wrap');
|
53 |
-
apSwitch = document.querySelector('.apSwitch input[type="checkbox"]');
|
54 |
-
// renderLatex = document.querySelector("#render_latex_checkbox > label > input");
|
55 |
-
empty_botton = document.getElementById("empty_btn")
|
56 |
-
|
57 |
-
if (loginUserForm) {
|
58 |
-
localStorage.setItem("userLogged", true);
|
59 |
-
userLogged = true;
|
60 |
-
}
|
61 |
-
|
62 |
-
if (gradioContainer && apSwitch) { // gradioCainter 加载出来了没?
|
63 |
-
adjustDarkMode();
|
64 |
-
}
|
65 |
-
if (user_input_tb) { // user_input_tb 加载出来了没?
|
66 |
-
selectHistory();
|
67 |
-
}
|
68 |
-
if (userInfoDiv && appTitleDiv) { // userInfoDiv 和 appTitleDiv 加载出来了没?
|
69 |
-
if (!usernameGotten) {
|
70 |
-
getUserInfo();
|
71 |
-
}
|
72 |
-
setTimeout(showOrHideUserInfo(), 2000);
|
73 |
-
}
|
74 |
-
if (chatbot) { // chatbot 加载出来了没?
|
75 |
-
setChatbotHeight();
|
76 |
-
}
|
77 |
-
if (chatbotWrap) {
|
78 |
-
if (!historyLoaded) {
|
79 |
-
loadHistoryHtml();
|
80 |
-
}
|
81 |
-
setChatbotScroll();
|
82 |
-
}
|
83 |
-
// if (renderLatex) { // renderLatex 加载出来了没?
|
84 |
-
// shouldRenderLatex = renderLatex.checked;
|
85 |
-
// updateMathJax();
|
86 |
-
// }
|
87 |
-
if (empty_botton) {
|
88 |
-
emptyHistory();
|
89 |
-
}
|
90 |
-
}
|
91 |
-
}
|
92 |
-
}
|
93 |
-
|
94 |
-
function webLocale() {
|
95 |
-
console.log("webLocale", language);
|
96 |
-
if (forView_i18n.hasOwnProperty(language)) {
|
97 |
-
var forView = forView_i18n[language];
|
98 |
-
var forViewStyle = document.createElement('style');
|
99 |
-
forViewStyle.innerHTML = '.wrap>.history-message>:last-child::after { content: "' + forView + '"!important; }';
|
100 |
-
document.head.appendChild(forViewStyle);
|
101 |
-
// console.log("added forViewStyle", forView);
|
102 |
-
}
|
103 |
-
}
|
104 |
-
|
105 |
-
function selectHistory() {
|
106 |
-
user_input_ta = user_input_tb.querySelector("textarea");
|
107 |
-
if (user_input_ta) {
|
108 |
-
observer.disconnect(); // 停止监听
|
109 |
-
// 在 textarea 上监听 keydown 事件
|
110 |
-
user_input_ta.addEventListener("keydown", function (event) {
|
111 |
-
var value = user_input_ta.value.trim();
|
112 |
-
// 判断按下的是否为方向键
|
113 |
-
if (event.code === 'ArrowUp' || event.code === 'ArrowDown') {
|
114 |
-
// 如果按下的是方向键,且输入框中有内容,且历史记录中没有该内容,则不执行操作
|
115 |
-
if (value && key_down_history.indexOf(value) === -1)
|
116 |
-
return;
|
117 |
-
// 对于需要响应的动作,阻止默认行为。
|
118 |
-
event.preventDefault();
|
119 |
-
var length = key_down_history.length;
|
120 |
-
if (length === 0) {
|
121 |
-
currentIndex = -1; // 如果历史记录为空,直接将当前选中的记录重置
|
122 |
-
return;
|
123 |
-
}
|
124 |
-
if (currentIndex === -1) {
|
125 |
-
currentIndex = length;
|
126 |
-
}
|
127 |
-
if (event.code === 'ArrowUp' && currentIndex > 0) {
|
128 |
-
currentIndex--;
|
129 |
-
user_input_ta.value = key_down_history[currentIndex];
|
130 |
-
} else if (event.code === 'ArrowDown' && currentIndex < length - 1) {
|
131 |
-
currentIndex++;
|
132 |
-
user_input_ta.value = key_down_history[currentIndex];
|
133 |
-
}
|
134 |
-
user_input_ta.selectionStart = user_input_ta.value.length;
|
135 |
-
user_input_ta.selectionEnd = user_input_ta.value.length;
|
136 |
-
const input_event = new InputEvent("input", { bubbles: true, cancelable: true });
|
137 |
-
user_input_ta.dispatchEvent(input_event);
|
138 |
-
} else if (event.code === "Enter") {
|
139 |
-
if (value) {
|
140 |
-
currentIndex = -1;
|
141 |
-
if (key_down_history.indexOf(value) === -1) {
|
142 |
-
key_down_history.push(value);
|
143 |
-
if (key_down_history.length > MAX_HISTORY_LENGTH) {
|
144 |
-
key_down_history.shift();
|
145 |
-
}
|
146 |
-
}
|
147 |
-
}
|
148 |
-
}
|
149 |
-
});
|
150 |
-
}
|
151 |
-
}
|
152 |
-
|
153 |
-
var username = null;
|
154 |
-
function getUserInfo() {
|
155 |
-
if (usernameGotten) {
|
156 |
-
return;
|
157 |
-
}
|
158 |
-
userLogged = localStorage.getItem('userLogged');
|
159 |
-
if (userLogged) {
|
160 |
-
username = userInfoDiv.innerText;
|
161 |
-
if (username) {
|
162 |
-
if (username.includes("getting user info…")) {
|
163 |
-
setTimeout(getUserInfo, 500);
|
164 |
-
return;
|
165 |
-
} else if (username === " ") {
|
166 |
-
localStorage.removeItem("username");
|
167 |
-
localStorage.removeItem("userLogged")
|
168 |
-
userLogged = false;
|
169 |
-
usernameGotten = true;
|
170 |
-
return;
|
171 |
-
} else {
|
172 |
-
username = username.match(/User:\s*(.*)/)[1] || username;
|
173 |
-
localStorage.setItem("username", username);
|
174 |
-
usernameGotten = true;
|
175 |
-
clearHistoryHtml();
|
176 |
-
}
|
177 |
-
}
|
178 |
-
}
|
179 |
-
}
|
180 |
-
|
181 |
-
function toggleUserInfoVisibility(shouldHide) {
|
182 |
-
if (userInfoDiv) {
|
183 |
-
if (shouldHide) {
|
184 |
-
userInfoDiv.classList.add("hideK");
|
185 |
-
} else {
|
186 |
-
userInfoDiv.classList.remove("hideK");
|
187 |
-
}
|
188 |
-
}
|
189 |
-
}
|
190 |
-
function showOrHideUserInfo() {
|
191 |
-
var sendBtn = document.getElementById("submit_btn");
|
192 |
-
|
193 |
-
// Bind mouse/touch events to show/hide user info
|
194 |
-
appTitleDiv.addEventListener("mouseenter", function () {
|
195 |
-
toggleUserInfoVisibility(false);
|
196 |
-
});
|
197 |
-
userInfoDiv.addEventListener("mouseenter", function () {
|
198 |
-
toggleUserInfoVisibility(false);
|
199 |
-
});
|
200 |
-
sendBtn.addEventListener("mouseenter", function () {
|
201 |
-
toggleUserInfoVisibility(false);
|
202 |
-
});
|
203 |
-
|
204 |
-
appTitleDiv.addEventListener("mouseleave", function () {
|
205 |
-
toggleUserInfoVisibility(true);
|
206 |
-
});
|
207 |
-
userInfoDiv.addEventListener("mouseleave", function () {
|
208 |
-
toggleUserInfoVisibility(true);
|
209 |
-
});
|
210 |
-
sendBtn.addEventListener("mouseleave", function () {
|
211 |
-
toggleUserInfoVisibility(true);
|
212 |
-
});
|
213 |
-
|
214 |
-
appTitleDiv.ontouchstart = function () {
|
215 |
-
toggleUserInfoVisibility(false);
|
216 |
-
};
|
217 |
-
userInfoDiv.ontouchstart = function () {
|
218 |
-
toggleUserInfoVisibility(false);
|
219 |
-
};
|
220 |
-
sendBtn.ontouchstart = function () {
|
221 |
-
toggleUserInfoVisibility(false);
|
222 |
-
};
|
223 |
-
|
224 |
-
appTitleDiv.ontouchend = function () {
|
225 |
-
setTimeout(function () {
|
226 |
-
toggleUserInfoVisibility(true);
|
227 |
-
}, 3000);
|
228 |
-
};
|
229 |
-
userInfoDiv.ontouchend = function () {
|
230 |
-
setTimeout(function () {
|
231 |
-
toggleUserInfoVisibility(true);
|
232 |
-
}, 3000);
|
233 |
-
};
|
234 |
-
sendBtn.ontouchend = function () {
|
235 |
-
setTimeout(function () {
|
236 |
-
toggleUserInfoVisibility(true);
|
237 |
-
}, 3000); // Delay 1 second to hide user info
|
238 |
-
};
|
239 |
-
|
240 |
-
// Hide user info after 2 second
|
241 |
-
setTimeout(function () {
|
242 |
-
toggleUserInfoVisibility(true);
|
243 |
-
}, 2000);
|
244 |
-
}
|
245 |
-
|
246 |
-
function toggleDarkMode(isEnabled) {
|
247 |
-
if (isEnabled) {
|
248 |
-
gradioContainer.classList.add("dark");
|
249 |
-
document.body.style.setProperty("background-color", "var(--neutral-950)", "important");
|
250 |
-
} else {
|
251 |
-
gradioContainer.classList.remove("dark");
|
252 |
-
document.body.style.backgroundColor = "";
|
253 |
-
}
|
254 |
-
}
|
255 |
-
function adjustDarkMode() {
|
256 |
-
const darkModeQuery = window.matchMedia("(prefers-color-scheme: dark)");
|
257 |
-
|
258 |
-
// 根据当前颜色模式设置初始状态
|
259 |
-
apSwitch.checked = darkModeQuery.matches;
|
260 |
-
toggleDarkMode(darkModeQuery.matches);
|
261 |
-
// 监听颜色模式变化
|
262 |
-
darkModeQuery.addEventListener("change", (e) => {
|
263 |
-
apSwitch.checked = e.matches;
|
264 |
-
toggleDarkMode(e.matches);
|
265 |
-
});
|
266 |
-
// apSwitch = document.querySelector('.apSwitch input[type="checkbox"]');
|
267 |
-
apSwitch.addEventListener("change", (e) => {
|
268 |
-
toggleDarkMode(e.target.checked);
|
269 |
-
});
|
270 |
-
}
|
271 |
-
|
272 |
-
function setChatbotHeight() {
|
273 |
-
const screenWidth = window.innerWidth;
|
274 |
-
const statusDisplay = document.querySelector('#status_display');
|
275 |
-
const statusDisplayHeight = statusDisplay ? statusDisplay.offsetHeight : 0;
|
276 |
-
const wrap = chatbot.querySelector('.wrap');
|
277 |
-
const vh = window.innerHeight * 0.01;
|
278 |
-
document.documentElement.style.setProperty('--vh', `${vh}px`);
|
279 |
-
if (isInIframe) {
|
280 |
-
chatbot.style.height = `520px`;
|
281 |
-
wrap.style.maxHeight = `calc(520px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`
|
282 |
-
} else {
|
283 |
-
if (screenWidth <= 320) {
|
284 |
-
chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px)`;
|
285 |
-
wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
|
286 |
-
} else if (screenWidth <= 499) {
|
287 |
-
chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px)`;
|
288 |
-
wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
|
289 |
-
} else {
|
290 |
-
chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px)`;
|
291 |
-
wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
|
292 |
-
}
|
293 |
-
}
|
294 |
-
}
|
295 |
-
function setChatbotScroll() {
|
296 |
-
var scrollHeight = chatbotWrap.scrollHeight;
|
297 |
-
chatbotWrap.scrollTo(0,scrollHeight)
|
298 |
-
}
|
299 |
-
var rangeInputs = null;
|
300 |
-
var numberInputs = null;
|
301 |
-
function setSlider() {
|
302 |
-
rangeInputs = document.querySelectorAll('input[type="range"]');
|
303 |
-
numberInputs = document.querySelectorAll('input[type="number"]')
|
304 |
-
setSliderRange();
|
305 |
-
rangeInputs.forEach(rangeInput => {
|
306 |
-
rangeInput.addEventListener('input', setSliderRange);
|
307 |
-
});
|
308 |
-
numberInputs.forEach(numberInput => {
|
309 |
-
numberInput.addEventListener('input', setSliderRange);
|
310 |
-
})
|
311 |
-
}
|
312 |
-
function setSliderRange() {
|
313 |
-
var range = document.querySelectorAll('input[type="range"]');
|
314 |
-
range.forEach(range => {
|
315 |
-
range.style.backgroundSize = (range.value - range.min) / (range.max - range.min) * 100 + '% 100%';
|
316 |
-
});
|
317 |
-
}
|
318 |
-
|
319 |
-
function addChuanhuButton(botElement) {
|
320 |
-
var rawMessage = null;
|
321 |
-
var mdMessage = null;
|
322 |
-
rawMessage = botElement.querySelector('.raw-message');
|
323 |
-
mdMessage = botElement.querySelector('.md-message');
|
324 |
-
if (!rawMessage) {
|
325 |
-
var buttons = botElement.querySelectorAll('button.chuanhu-btn');
|
326 |
-
for (var i = 0; i < buttons.length; i++) {
|
327 |
-
buttons[i].parentNode.removeChild(buttons[i]);
|
328 |
-
}
|
329 |
-
return;
|
330 |
-
}
|
331 |
-
var copyButton = null;
|
332 |
-
var toggleButton = null;
|
333 |
-
copyButton = botElement.querySelector('button.copy-bot-btn');
|
334 |
-
toggleButton = botElement.querySelector('button.toggle-md-btn');
|
335 |
-
if (copyButton) copyButton.remove();
|
336 |
-
if (toggleButton) toggleButton.remove();
|
337 |
-
|
338 |
-
// Copy bot button
|
339 |
-
var copyButton = document.createElement('button');
|
340 |
-
copyButton.classList.add('chuanhu-btn');
|
341 |
-
copyButton.classList.add('copy-bot-btn');
|
342 |
-
copyButton.setAttribute('aria-label', 'Copy');
|
343 |
-
copyButton.innerHTML = copyIcon;
|
344 |
-
copyButton.addEventListener('click', () => {
|
345 |
-
const textToCopy = rawMessage.innerText;
|
346 |
-
navigator.clipboard
|
347 |
-
.writeText(textToCopy)
|
348 |
-
.then(() => {
|
349 |
-
copyButton.innerHTML = copiedIcon;
|
350 |
-
setTimeout(() => {
|
351 |
-
copyButton.innerHTML = copyIcon;
|
352 |
-
}, 1500);
|
353 |
-
})
|
354 |
-
.catch(() => {
|
355 |
-
console.error("copy failed");
|
356 |
-
});
|
357 |
-
});
|
358 |
-
botElement.appendChild(copyButton);
|
359 |
-
|
360 |
-
// Toggle button
|
361 |
-
var toggleButton = document.createElement('button');
|
362 |
-
toggleButton.classList.add('chuanhu-btn');
|
363 |
-
toggleButton.classList.add('toggle-md-btn');
|
364 |
-
toggleButton.setAttribute('aria-label', 'Toggle');
|
365 |
-
var renderMarkdown = mdMessage.classList.contains('hideM');
|
366 |
-
toggleButton.innerHTML = renderMarkdown ? mdIcon : rawIcon;
|
367 |
-
toggleButton.addEventListener('click', () => {
|
368 |
-
renderMarkdown = mdMessage.classList.contains('hideM');
|
369 |
-
if (renderMarkdown){
|
370 |
-
renderMarkdownText(botElement);
|
371 |
-
toggleButton.innerHTML=rawIcon;
|
372 |
-
} else {
|
373 |
-
removeMarkdownText(botElement);
|
374 |
-
toggleButton.innerHTML=mdIcon;
|
375 |
-
}
|
376 |
-
});
|
377 |
-
botElement.insertBefore(toggleButton, copyButton);
|
378 |
-
}
|
379 |
-
|
380 |
-
function addCopyCodeButton(pre) {
|
381 |
-
var code = null;
|
382 |
-
var firstChild = null;
|
383 |
-
code = pre.querySelector('code');
|
384 |
-
if (!code) return;
|
385 |
-
firstChild = code.querySelector('div');
|
386 |
-
if (!firstChild) return;
|
387 |
-
var oldCopyButton = null;
|
388 |
-
oldCopyButton = code.querySelector('button.copy-code-btn');
|
389 |
-
// if (oldCopyButton) oldCopyButton.remove();
|
390 |
-
if (oldCopyButton) return; // 没太有用,新生成的对话中始终会被pre覆盖,导致按钮消失,这段代码不启用……
|
391 |
-
var codeButton = document.createElement('button');
|
392 |
-
codeButton.classList.add('copy-code-btn');
|
393 |
-
codeButton.textContent = '\uD83D\uDCCE';
|
394 |
-
|
395 |
-
code.insertBefore(codeButton, firstChild);
|
396 |
-
codeButton.addEventListener('click', function () {
|
397 |
-
var range = document.createRange();
|
398 |
-
range.selectNodeContents(code);
|
399 |
-
range.setStartBefore(firstChild);
|
400 |
-
navigator.clipboard
|
401 |
-
.writeText(range.toString())
|
402 |
-
.then(() => {
|
403 |
-
codeButton.textContent = '\u2714';
|
404 |
-
setTimeout(function () {
|
405 |
-
codeButton.textContent = '\uD83D\uDCCE';
|
406 |
-
}, 2000);
|
407 |
-
})
|
408 |
-
.catch(e => {
|
409 |
-
console.error(e);
|
410 |
-
codeButton.textContent = '\u2716';
|
411 |
-
});
|
412 |
-
});
|
413 |
-
}
|
414 |
-
|
415 |
-
function renderMarkdownText(message) {
|
416 |
-
var mdDiv = message.querySelector('.md-message');
|
417 |
-
if (mdDiv) mdDiv.classList.remove('hideM');
|
418 |
-
var rawDiv = message.querySelector('.raw-message');
|
419 |
-
if (rawDiv) rawDiv.classList.add('hideM');
|
420 |
-
}
|
421 |
-
function removeMarkdownText(message) {
|
422 |
-
var rawDiv = message.querySelector('.raw-message');
|
423 |
-
if (rawDiv) rawDiv.classList.remove('hideM');
|
424 |
-
var mdDiv = message.querySelector('.md-message');
|
425 |
-
if (mdDiv) mdDiv.classList.add('hideM');
|
426 |
-
}
|
427 |
-
|
428 |
-
var rendertime = 0; // for debugging
|
429 |
-
var mathjaxUpdated = false;
|
430 |
-
|
431 |
-
function renderMathJax() {
|
432 |
-
messageBotDivs = document.querySelectorAll('.message.bot .md-message');
|
433 |
-
for (var i = 0; i < messageBotDivs.length; i++) {
|
434 |
-
var mathJaxSpan = messageBotDivs[i].querySelector('.MathJax_Preview');
|
435 |
-
if (!mathJaxSpan && shouldRenderLatex && !mathjaxUpdated) {
|
436 |
-
MathJax.Hub.Queue(["Typeset", MathJax.Hub, messageBotDivs[i]]);
|
437 |
-
rendertime +=1; // for debugging
|
438 |
-
// console.log("renderingMathJax", i)
|
439 |
-
}
|
440 |
-
}
|
441 |
-
mathjaxUpdated = true;
|
442 |
-
// console.log("MathJax Rendered")
|
443 |
-
}
|
444 |
-
|
445 |
-
function removeMathjax() {
|
446 |
-
// var jax = MathJax.Hub.getAllJax();
|
447 |
-
// for (var i = 0; i < jax.length; i++) {
|
448 |
-
// // MathJax.typesetClear(jax[i]);
|
449 |
-
// jax[i].Text(newmath)
|
450 |
-
// jax[i].Reprocess()
|
451 |
-
// }
|
452 |
-
// 我真的不会了啊啊啊,mathjax并没有提供转换为原先文本的办法。
|
453 |
-
mathjaxUpdated = true;
|
454 |
-
// console.log("MathJax removed!");
|
455 |
-
}
|
456 |
-
|
457 |
-
function updateMathJax() {
|
458 |
-
// renderLatex.addEventListener("change", function() {
|
459 |
-
// shouldRenderLatex = renderLatex.checked;
|
460 |
-
// if (!mathjaxUpdated) {
|
461 |
-
// if (shouldRenderLatex) {
|
462 |
-
// renderMathJax();
|
463 |
-
// } else {
|
464 |
-
// console.log("MathJax Disabled")
|
465 |
-
// removeMathjax();
|
466 |
-
// }
|
467 |
-
// } else {
|
468 |
-
// if (!shouldRenderLatex) {
|
469 |
-
// mathjaxUpdated = false; // reset
|
470 |
-
// }
|
471 |
-
// }
|
472 |
-
// });
|
473 |
-
if (shouldRenderLatex && !mathjaxUpdated) {
|
474 |
-
renderMathJax();
|
475 |
-
}
|
476 |
-
mathjaxUpdated = false;
|
477 |
-
}
|
478 |
-
|
479 |
-
let timeoutId;
|
480 |
-
let isThrottled = false;
|
481 |
-
var mmutation
|
482 |
-
// 监听所有元素中 bot message 的变化,用来查找需要渲染的mathjax, 并为 bot 消息添加复制按钮。
|
483 |
-
var mObserver = new MutationObserver(function (mutationsList) {
|
484 |
-
for (mmutation of mutationsList) {
|
485 |
-
if (mmutation.type === 'childList') {
|
486 |
-
for (var node of mmutation.addedNodes) {
|
487 |
-
if (node.nodeType === 1 && node.classList.contains('message') && node.getAttribute('data-testid') === 'bot') {
|
488 |
-
if (shouldRenderLatex) {
|
489 |
-
renderMathJax();
|
490 |
-
mathjaxUpdated = false;
|
491 |
-
}
|
492 |
-
saveHistoryHtml();
|
493 |
-
document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton);
|
494 |
-
document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot pre').forEach(addCopyCodeButton);
|
495 |
-
}
|
496 |
-
if (node.tagName === 'INPUT' && node.getAttribute('type') === 'range') {
|
497 |
-
setSlider();
|
498 |
-
}
|
499 |
-
}
|
500 |
-
for (var node of mmutation.removedNodes) {
|
501 |
-
if (node.nodeType === 1 && node.classList.contains('message') && node.getAttribute('data-testid') === 'bot') {
|
502 |
-
if (shouldRenderLatex) {
|
503 |
-
renderMathJax();
|
504 |
-
mathjaxUpdated = false;
|
505 |
-
}
|
506 |
-
saveHistoryHtml();
|
507 |
-
document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton);
|
508 |
-
document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot pre').forEach(addCopyCodeButton);
|
509 |
-
}
|
510 |
-
}
|
511 |
-
} else if (mmutation.type === 'attributes') {
|
512 |
-
if (mmutation.target.nodeType === 1 && mmutation.target.classList.contains('message') && mmutation.target.getAttribute('data-testid') === 'bot') {
|
513 |
-
document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot pre').forEach(addCopyCodeButton); // 目前写的是有点问题的,会导致加button次数过多,但是bot对话内容生成时又是不断覆盖pre的……
|
514 |
-
if (isThrottled) break; // 为了防止重复不断疯狂渲染,加上等待_(:з」∠)_
|
515 |
-
isThrottled = true;
|
516 |
-
clearTimeout(timeoutId);
|
517 |
-
timeoutId = setTimeout(() => {
|
518 |
-
isThrottled = false;
|
519 |
-
if (shouldRenderLatex) {
|
520 |
-
renderMathJax();
|
521 |
-
mathjaxUpdated = false;
|
522 |
-
}
|
523 |
-
document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton);
|
524 |
-
saveHistoryHtml();
|
525 |
-
}, 500);
|
526 |
-
}
|
527 |
-
}
|
528 |
-
}
|
529 |
-
});
|
530 |
-
mObserver.observe(document.documentElement, { attributes: true, childList: true, subtree: true });
|
531 |
-
|
532 |
-
var loadhistorytime = 0; // for debugging
|
533 |
-
function saveHistoryHtml() {
|
534 |
-
var historyHtml = document.querySelector('#chuanhu_chatbot > .wrap');
|
535 |
-
localStorage.setItem('chatHistory', historyHtml.innerHTML);
|
536 |
-
// console.log("History Saved")
|
537 |
-
historyLoaded = false;
|
538 |
-
}
|
539 |
-
function loadHistoryHtml() {
|
540 |
-
var historyHtml = localStorage.getItem('chatHistory');
|
541 |
-
if (!historyHtml) {
|
542 |
-
historyLoaded = true;
|
543 |
-
return; // no history, do nothing
|
544 |
-
}
|
545 |
-
userLogged = localStorage.getItem('userLogged');
|
546 |
-
if (userLogged){
|
547 |
-
historyLoaded = true;
|
548 |
-
return; // logged in, do nothing
|
549 |
-
}
|
550 |
-
if (!historyLoaded) {
|
551 |
-
var tempDiv = document.createElement('div');
|
552 |
-
tempDiv.innerHTML = historyHtml;
|
553 |
-
var buttons = tempDiv.querySelectorAll('button.chuanhu-btn');
|
554 |
-
for (var i = 0; i < buttons.length; i++) {
|
555 |
-
buttons[i].parentNode.removeChild(buttons[i]);
|
556 |
-
}
|
557 |
-
var fakeHistory = document.createElement('div');
|
558 |
-
fakeHistory.classList.add('history-message');
|
559 |
-
fakeHistory.innerHTML = tempDiv.innerHTML;
|
560 |
-
webLocale();
|
561 |
-
chatbotWrap.insertBefore(fakeHistory, chatbotWrap.firstChild);
|
562 |
-
// var fakeHistory = document.createElement('div');
|
563 |
-
// fakeHistory.classList.add('history-message');
|
564 |
-
// fakeHistory.innerHTML = historyHtml;
|
565 |
-
// chatbotWrap.insertBefore(fakeHistory, chatbotWrap.firstChild);
|
566 |
-
historyLoaded = true;
|
567 |
-
console.log("History Loaded");
|
568 |
-
loadhistorytime += 1; // for debugging
|
569 |
-
} else {
|
570 |
-
historyLoaded = false;
|
571 |
-
}
|
572 |
-
}
|
573 |
-
function clearHistoryHtml() {
|
574 |
-
localStorage.removeItem("chatHistory");
|
575 |
-
historyMessages = chatbotWrap.querySelector('.history-message');
|
576 |
-
if (historyMessages) {
|
577 |
-
chatbotWrap.removeChild(historyMessages);
|
578 |
-
console.log("History Cleared");
|
579 |
-
}
|
580 |
-
}
|
581 |
-
function emptyHistory() {
|
582 |
-
empty_botton.addEventListener("click", function () {
|
583 |
-
clearHistoryHtml();
|
584 |
-
});
|
585 |
-
}
|
586 |
-
|
587 |
-
// 监视页面内部 DOM 变动
|
588 |
-
var observer = new MutationObserver(function (mutations) {
|
589 |
-
gradioLoaded(mutations);
|
590 |
-
});
|
591 |
-
observer.observe(targetNode, { childList: true, subtree: true });
|
592 |
-
|
593 |
-
// 监视页面变化
|
594 |
-
window.addEventListener("DOMContentLoaded", function () {
|
595 |
-
isInIframe = (window.self !== window.top);
|
596 |
-
historyLoaded = false;
|
597 |
-
shouldRenderLatex = !!document.querySelector('script[src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/MathJax.js?config=TeX-MML-AM_CHTML"]');
|
598 |
-
});
|
599 |
-
window.addEventListener('resize', setChatbotHeight);
|
600 |
-
window.addEventListener('scroll', setChatbotHeight);
|
601 |
-
window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change", adjustDarkMode);
|
602 |
-
|
603 |
-
// button svg code
|
604 |
-
const copyIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path></svg></span>';
|
605 |
-
const copiedIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><polyline points="20 6 9 17 4 12"></polyline></svg></span>';
|
606 |
-
const mdIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="1" viewBox="0 0 14 18" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><g transform-origin="center" transform="scale(0.85)"><path d="M1.5,0 L12.5,0 C13.3284271,-1.52179594e-16 14,0.671572875 14,1.5 L14,16.5 C14,17.3284271 13.3284271,18 12.5,18 L1.5,18 C0.671572875,18 1.01453063e-16,17.3284271 0,16.5 L0,1.5 C-1.01453063e-16,0.671572875 0.671572875,1.52179594e-16 1.5,0 Z" stroke-width="1.8"></path><line x1="3.5" y1="3.5" x2="10.5" y2="3.5"></line><line x1="3.5" y1="6.5" x2="8" y2="6.5"></line></g><path d="M4,9 L10,9 C10.5522847,9 11,9.44771525 11,10 L11,13.5 C11,14.0522847 10.5522847,14.5 10,14.5 L4,14.5 C3.44771525,14.5 3,14.0522847 3,13.5 L3,10 C3,9.44771525 3.44771525,9 4,9 Z" stroke="none" fill="currentColor"></path></svg></span>';
|
607 |
-
const rawIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="1.8" viewBox="0 0 18 14" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><g transform-origin="center" transform="scale(0.85)"><polyline points="4 3 0 7 4 11"></polyline><polyline points="14 3 18 7 14 11"></polyline><line x1="12" y1="0" x2="6" y2="14"></line></g></svg></span>';
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWang/lama/saicinpainting/evaluation/masks/countless/test.py
DELETED
@@ -1,195 +0,0 @@
|
|
1 |
-
from copy import deepcopy
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
|
5 |
-
import countless2d
|
6 |
-
import countless3d
|
7 |
-
|
8 |
-
def test_countless2d():
|
9 |
-
def test_all_cases(fn, test_zero):
|
10 |
-
case1 = np.array([ [ 1, 2 ], [ 3, 4 ] ]).reshape((2,2,1,1)) # all different
|
11 |
-
case2 = np.array([ [ 1, 1 ], [ 2, 3 ] ]).reshape((2,2,1,1)) # two are same
|
12 |
-
case1z = np.array([ [ 0, 1 ], [ 2, 3 ] ]).reshape((2,2,1,1)) # all different
|
13 |
-
case2z = np.array([ [ 0, 0 ], [ 2, 3 ] ]).reshape((2,2,1,1)) # two are same
|
14 |
-
case3 = np.array([ [ 1, 1 ], [ 2, 2 ] ]).reshape((2,2,1,1)) # two groups are same
|
15 |
-
case4 = np.array([ [ 1, 2 ], [ 2, 2 ] ]).reshape((2,2,1,1)) # 3 are the same
|
16 |
-
case5 = np.array([ [ 5, 5 ], [ 5, 5 ] ]).reshape((2,2,1,1)) # all are the same
|
17 |
-
|
18 |
-
is_255_handled = np.array([ [ 255, 255 ], [ 1, 2 ] ], dtype=np.uint8).reshape((2,2,1,1))
|
19 |
-
|
20 |
-
test = lambda case: fn(case)
|
21 |
-
|
22 |
-
if test_zero:
|
23 |
-
assert test(case1z) == [[[[3]]]] # d
|
24 |
-
assert test(case2z) == [[[[0]]]] # a==b
|
25 |
-
else:
|
26 |
-
assert test(case1) == [[[[4]]]] # d
|
27 |
-
assert test(case2) == [[[[1]]]] # a==b
|
28 |
-
|
29 |
-
assert test(case3) == [[[[1]]]] # a==b
|
30 |
-
assert test(case4) == [[[[2]]]] # b==c
|
31 |
-
assert test(case5) == [[[[5]]]] # a==b
|
32 |
-
|
33 |
-
assert test(is_255_handled) == [[[[255]]]]
|
34 |
-
|
35 |
-
assert fn(case1).dtype == case1.dtype
|
36 |
-
|
37 |
-
test_all_cases(countless2d.simplest_countless, False)
|
38 |
-
test_all_cases(countless2d.quick_countless, False)
|
39 |
-
test_all_cases(countless2d.quickest_countless, False)
|
40 |
-
test_all_cases(countless2d.stippled_countless, False)
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
methods = [
|
45 |
-
countless2d.zero_corrected_countless,
|
46 |
-
countless2d.countless,
|
47 |
-
countless2d.countless_if,
|
48 |
-
# countless2d.counting, # counting doesn't respect order so harder to write a test
|
49 |
-
]
|
50 |
-
|
51 |
-
for fn in methods:
|
52 |
-
print(fn.__name__)
|
53 |
-
test_all_cases(fn, True)
|
54 |
-
|
55 |
-
def test_stippled_countless2d():
|
56 |
-
a = np.array([ [ 1, 2 ], [ 3, 4 ] ]).reshape((2,2,1,1))
|
57 |
-
b = np.array([ [ 0, 2 ], [ 3, 4 ] ]).reshape((2,2,1,1))
|
58 |
-
c = np.array([ [ 1, 0 ], [ 3, 4 ] ]).reshape((2,2,1,1))
|
59 |
-
d = np.array([ [ 1, 2 ], [ 0, 4 ] ]).reshape((2,2,1,1))
|
60 |
-
e = np.array([ [ 1, 2 ], [ 3, 0 ] ]).reshape((2,2,1,1))
|
61 |
-
f = np.array([ [ 0, 0 ], [ 3, 4 ] ]).reshape((2,2,1,1))
|
62 |
-
g = np.array([ [ 0, 2 ], [ 0, 4 ] ]).reshape((2,2,1,1))
|
63 |
-
h = np.array([ [ 0, 2 ], [ 3, 0 ] ]).reshape((2,2,1,1))
|
64 |
-
i = np.array([ [ 1, 0 ], [ 0, 4 ] ]).reshape((2,2,1,1))
|
65 |
-
j = np.array([ [ 1, 2 ], [ 0, 0 ] ]).reshape((2,2,1,1))
|
66 |
-
k = np.array([ [ 1, 0 ], [ 3, 0 ] ]).reshape((2,2,1,1))
|
67 |
-
l = np.array([ [ 1, 0 ], [ 0, 0 ] ]).reshape((2,2,1,1))
|
68 |
-
m = np.array([ [ 0, 2 ], [ 0, 0 ] ]).reshape((2,2,1,1))
|
69 |
-
n = np.array([ [ 0, 0 ], [ 3, 0 ] ]).reshape((2,2,1,1))
|
70 |
-
o = np.array([ [ 0, 0 ], [ 0, 4 ] ]).reshape((2,2,1,1))
|
71 |
-
z = np.array([ [ 0, 0 ], [ 0, 0 ] ]).reshape((2,2,1,1))
|
72 |
-
|
73 |
-
test = countless2d.stippled_countless
|
74 |
-
|
75 |
-
# Note: We only tested non-matching cases above,
|
76 |
-
# cases f,g,h,i,j,k prove their duals work as well
|
77 |
-
# b/c if two pixels are black, either one can be chosen
|
78 |
-
# if they are different or the same.
|
79 |
-
|
80 |
-
assert test(a) == [[[[4]]]]
|
81 |
-
assert test(b) == [[[[4]]]]
|
82 |
-
assert test(c) == [[[[4]]]]
|
83 |
-
assert test(d) == [[[[4]]]]
|
84 |
-
assert test(e) == [[[[1]]]]
|
85 |
-
assert test(f) == [[[[4]]]]
|
86 |
-
assert test(g) == [[[[4]]]]
|
87 |
-
assert test(h) == [[[[2]]]]
|
88 |
-
assert test(i) == [[[[4]]]]
|
89 |
-
assert test(j) == [[[[1]]]]
|
90 |
-
assert test(k) == [[[[1]]]]
|
91 |
-
assert test(l) == [[[[1]]]]
|
92 |
-
assert test(m) == [[[[2]]]]
|
93 |
-
assert test(n) == [[[[3]]]]
|
94 |
-
assert test(o) == [[[[4]]]]
|
95 |
-
assert test(z) == [[[[0]]]]
|
96 |
-
|
97 |
-
bc = np.array([ [ 0, 2 ], [ 2, 4 ] ]).reshape((2,2,1,1))
|
98 |
-
bd = np.array([ [ 0, 2 ], [ 3, 2 ] ]).reshape((2,2,1,1))
|
99 |
-
cd = np.array([ [ 0, 2 ], [ 3, 3 ] ]).reshape((2,2,1,1))
|
100 |
-
|
101 |
-
assert test(bc) == [[[[2]]]]
|
102 |
-
assert test(bd) == [[[[2]]]]
|
103 |
-
assert test(cd) == [[[[3]]]]
|
104 |
-
|
105 |
-
ab = np.array([ [ 1, 1 ], [ 0, 4 ] ]).reshape((2,2,1,1))
|
106 |
-
ac = np.array([ [ 1, 2 ], [ 1, 0 ] ]).reshape((2,2,1,1))
|
107 |
-
ad = np.array([ [ 1, 0 ], [ 3, 1 ] ]).reshape((2,2,1,1))
|
108 |
-
|
109 |
-
assert test(ab) == [[[[1]]]]
|
110 |
-
assert test(ac) == [[[[1]]]]
|
111 |
-
assert test(ad) == [[[[1]]]]
|
112 |
-
|
113 |
-
def test_countless3d():
|
114 |
-
def test_all_cases(fn):
|
115 |
-
alldifferent = [
|
116 |
-
[
|
117 |
-
[1,2],
|
118 |
-
[3,4],
|
119 |
-
],
|
120 |
-
[
|
121 |
-
[5,6],
|
122 |
-
[7,8]
|
123 |
-
]
|
124 |
-
]
|
125 |
-
allsame = [
|
126 |
-
[
|
127 |
-
[1,1],
|
128 |
-
[1,1],
|
129 |
-
],
|
130 |
-
[
|
131 |
-
[1,1],
|
132 |
-
[1,1]
|
133 |
-
]
|
134 |
-
]
|
135 |
-
|
136 |
-
assert fn(np.array(alldifferent)) == [[[8]]]
|
137 |
-
assert fn(np.array(allsame)) == [[[1]]]
|
138 |
-
|
139 |
-
twosame = deepcopy(alldifferent)
|
140 |
-
twosame[1][1][0] = 2
|
141 |
-
|
142 |
-
assert fn(np.array(twosame)) == [[[2]]]
|
143 |
-
|
144 |
-
threemixed = [
|
145 |
-
[
|
146 |
-
[3,3],
|
147 |
-
[1,2],
|
148 |
-
],
|
149 |
-
[
|
150 |
-
[2,4],
|
151 |
-
[4,3]
|
152 |
-
]
|
153 |
-
]
|
154 |
-
assert fn(np.array(threemixed)) == [[[3]]]
|
155 |
-
|
156 |
-
foursame = [
|
157 |
-
[
|
158 |
-
[4,4],
|
159 |
-
[1,2],
|
160 |
-
],
|
161 |
-
[
|
162 |
-
[2,4],
|
163 |
-
[4,3]
|
164 |
-
]
|
165 |
-
]
|
166 |
-
|
167 |
-
assert fn(np.array(foursame)) == [[[4]]]
|
168 |
-
|
169 |
-
fivesame = [
|
170 |
-
[
|
171 |
-
[5,4],
|
172 |
-
[5,5],
|
173 |
-
],
|
174 |
-
[
|
175 |
-
[2,4],
|
176 |
-
[5,5]
|
177 |
-
]
|
178 |
-
]
|
179 |
-
|
180 |
-
assert fn(np.array(fivesame)) == [[[5]]]
|
181 |
-
|
182 |
-
def countless3d_generalized(img):
|
183 |
-
return countless3d.countless_generalized(img, (2,2,2))
|
184 |
-
def countless3d_dynamic_generalized(img):
|
185 |
-
return countless3d.dynamic_countless_generalized(img, (2,2,2))
|
186 |
-
|
187 |
-
methods = [
|
188 |
-
countless3d.countless3d,
|
189 |
-
countless3d.dynamic_countless3d,
|
190 |
-
countless3d_generalized,
|
191 |
-
countless3d_dynamic_generalized,
|
192 |
-
]
|
193 |
-
|
194 |
-
for fn in methods:
|
195 |
-
test_all_cases(fn)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aloento/9Nine-PITS/text/symbols.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Defines the set of symbols used in text input to the model.
|
3 |
-
"""
|
4 |
-
|
5 |
-
_pad = '_'
|
6 |
-
_punctuation = ',.!?-~…'
|
7 |
-
_letters = 'NQabdefghijklmnopstuvwxyzɑæʃʑçɯɪɔɛɹðəɫɥɸʊɾʒθβŋɦ⁼ʰ`^#*=ˈˌ→↓↑ '
|
8 |
-
|
9 |
-
_extra = "ˌ%$"
|
10 |
-
# Export all symbols:
|
11 |
-
symbols = [_pad] + list(_punctuation) + list(_letters) + list(_extra)
|
12 |
-
|
13 |
-
# Special symbol ids
|
14 |
-
SPACE_ID = symbols.index(" ")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/mulit_token_textual_inversion/multi_token_clip.py
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
The main idea for this code is to provide a way for users to not need to bother with the hassle of multiple tokens for a concept by typing
|
3 |
-
a photo of <concept>_0 <concept>_1 ... and so on
|
4 |
-
and instead just do
|
5 |
-
a photo of <concept>
|
6 |
-
which gets translated to the above. This needs to work for both inference and training.
|
7 |
-
For inference,
|
8 |
-
the tokenizer encodes the text. So, we would want logic for our tokenizer to replace the placeholder token with
|
9 |
-
it's underlying vectors
|
10 |
-
For training,
|
11 |
-
we would want to abstract away some logic like
|
12 |
-
1. Adding tokens
|
13 |
-
2. Updating gradient mask
|
14 |
-
3. Saving embeddings
|
15 |
-
to our Util class here.
|
16 |
-
so
|
17 |
-
TODO:
|
18 |
-
1. have tokenizer keep track of concept, multiconcept pairs and replace during encode call x
|
19 |
-
2. have mechanism for adding tokens x
|
20 |
-
3. have mech for saving emebeddings x
|
21 |
-
4. get mask to update x
|
22 |
-
5. Loading tokens from embedding x
|
23 |
-
6. Integrate to training x
|
24 |
-
7. Test
|
25 |
-
"""
|
26 |
-
import copy
|
27 |
-
import random
|
28 |
-
|
29 |
-
from transformers import CLIPTokenizer
|
30 |
-
|
31 |
-
|
32 |
-
class MultiTokenCLIPTokenizer(CLIPTokenizer):
|
33 |
-
def __init__(self, *args, **kwargs):
|
34 |
-
super().__init__(*args, **kwargs)
|
35 |
-
self.token_map = {}
|
36 |
-
|
37 |
-
def try_adding_tokens(self, placeholder_token, *args, **kwargs):
|
38 |
-
num_added_tokens = super().add_tokens(placeholder_token, *args, **kwargs)
|
39 |
-
if num_added_tokens == 0:
|
40 |
-
raise ValueError(
|
41 |
-
f"The tokenizer already contains the token {placeholder_token}. Please pass a different"
|
42 |
-
" `placeholder_token` that is not already in the tokenizer."
|
43 |
-
)
|
44 |
-
|
45 |
-
def add_placeholder_tokens(self, placeholder_token, *args, num_vec_per_token=1, **kwargs):
|
46 |
-
output = []
|
47 |
-
if num_vec_per_token == 1:
|
48 |
-
self.try_adding_tokens(placeholder_token, *args, **kwargs)
|
49 |
-
output.append(placeholder_token)
|
50 |
-
else:
|
51 |
-
output = []
|
52 |
-
for i in range(num_vec_per_token):
|
53 |
-
ith_token = placeholder_token + f"_{i}"
|
54 |
-
self.try_adding_tokens(ith_token, *args, **kwargs)
|
55 |
-
output.append(ith_token)
|
56 |
-
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
|
57 |
-
for token in self.token_map:
|
58 |
-
if token in placeholder_token:
|
59 |
-
raise ValueError(
|
60 |
-
f"The tokenizer already has placeholder token {token} that can get confused with"
|
61 |
-
f" {placeholder_token}keep placeholder tokens independent"
|
62 |
-
)
|
63 |
-
self.token_map[placeholder_token] = output
|
64 |
-
|
65 |
-
def replace_placeholder_tokens_in_text(self, text, vector_shuffle=False, prop_tokens_to_load=1.0):
|
66 |
-
"""
|
67 |
-
Here, we replace the placeholder tokens in text recorded in token_map so that the text_encoder
|
68 |
-
can encode them
|
69 |
-
vector_shuffle was inspired by https://github.com/rinongal/textual_inversion/pull/119
|
70 |
-
where shuffling tokens were found to force the model to learn the concepts more descriptively.
|
71 |
-
"""
|
72 |
-
if isinstance(text, list):
|
73 |
-
output = []
|
74 |
-
for i in range(len(text)):
|
75 |
-
output.append(self.replace_placeholder_tokens_in_text(text[i], vector_shuffle=vector_shuffle))
|
76 |
-
return output
|
77 |
-
for placeholder_token in self.token_map:
|
78 |
-
if placeholder_token in text:
|
79 |
-
tokens = self.token_map[placeholder_token]
|
80 |
-
tokens = tokens[: 1 + int(len(tokens) * prop_tokens_to_load)]
|
81 |
-
if vector_shuffle:
|
82 |
-
tokens = copy.copy(tokens)
|
83 |
-
random.shuffle(tokens)
|
84 |
-
text = text.replace(placeholder_token, " ".join(tokens))
|
85 |
-
return text
|
86 |
-
|
87 |
-
def __call__(self, text, *args, vector_shuffle=False, prop_tokens_to_load=1.0, **kwargs):
|
88 |
-
return super().__call__(
|
89 |
-
self.replace_placeholder_tokens_in_text(
|
90 |
-
text, vector_shuffle=vector_shuffle, prop_tokens_to_load=prop_tokens_to_load
|
91 |
-
),
|
92 |
-
*args,
|
93 |
-
**kwargs,
|
94 |
-
)
|
95 |
-
|
96 |
-
def encode(self, text, *args, vector_shuffle=False, prop_tokens_to_load=1.0, **kwargs):
|
97 |
-
return super().encode(
|
98 |
-
self.replace_placeholder_tokens_in_text(
|
99 |
-
text, vector_shuffle=vector_shuffle, prop_tokens_to_load=prop_tokens_to_load
|
100 |
-
),
|
101 |
-
*args,
|
102 |
-
**kwargs,
|
103 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/shap_e/camera.py
DELETED
@@ -1,147 +0,0 @@
|
|
1 |
-
# Copyright 2023 Open AI and The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
from dataclasses import dataclass
|
16 |
-
from typing import Tuple
|
17 |
-
|
18 |
-
import numpy as np
|
19 |
-
import torch
|
20 |
-
|
21 |
-
|
22 |
-
@dataclass
|
23 |
-
class DifferentiableProjectiveCamera:
|
24 |
-
"""
|
25 |
-
Implements a batch, differentiable, standard pinhole camera
|
26 |
-
"""
|
27 |
-
|
28 |
-
origin: torch.Tensor # [batch_size x 3]
|
29 |
-
x: torch.Tensor # [batch_size x 3]
|
30 |
-
y: torch.Tensor # [batch_size x 3]
|
31 |
-
z: torch.Tensor # [batch_size x 3]
|
32 |
-
width: int
|
33 |
-
height: int
|
34 |
-
x_fov: float
|
35 |
-
y_fov: float
|
36 |
-
shape: Tuple[int]
|
37 |
-
|
38 |
-
def __post_init__(self):
|
39 |
-
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
|
40 |
-
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
|
41 |
-
assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2
|
42 |
-
|
43 |
-
def resolution(self):
|
44 |
-
return torch.from_numpy(np.array([self.width, self.height], dtype=np.float32))
|
45 |
-
|
46 |
-
def fov(self):
|
47 |
-
return torch.from_numpy(np.array([self.x_fov, self.y_fov], dtype=np.float32))
|
48 |
-
|
49 |
-
def get_image_coords(self) -> torch.Tensor:
|
50 |
-
"""
|
51 |
-
:return: coords of shape (width * height, 2)
|
52 |
-
"""
|
53 |
-
pixel_indices = torch.arange(self.height * self.width)
|
54 |
-
coords = torch.stack(
|
55 |
-
[
|
56 |
-
pixel_indices % self.width,
|
57 |
-
torch.div(pixel_indices, self.width, rounding_mode="trunc"),
|
58 |
-
],
|
59 |
-
axis=1,
|
60 |
-
)
|
61 |
-
return coords
|
62 |
-
|
63 |
-
@property
|
64 |
-
def camera_rays(self):
|
65 |
-
batch_size, *inner_shape = self.shape
|
66 |
-
inner_batch_size = int(np.prod(inner_shape))
|
67 |
-
|
68 |
-
coords = self.get_image_coords()
|
69 |
-
coords = torch.broadcast_to(coords.unsqueeze(0), [batch_size * inner_batch_size, *coords.shape])
|
70 |
-
rays = self.get_camera_rays(coords)
|
71 |
-
|
72 |
-
rays = rays.view(batch_size, inner_batch_size * self.height * self.width, 2, 3)
|
73 |
-
|
74 |
-
return rays
|
75 |
-
|
76 |
-
def get_camera_rays(self, coords: torch.Tensor) -> torch.Tensor:
|
77 |
-
batch_size, *shape, n_coords = coords.shape
|
78 |
-
assert n_coords == 2
|
79 |
-
assert batch_size == self.origin.shape[0]
|
80 |
-
|
81 |
-
flat = coords.view(batch_size, -1, 2)
|
82 |
-
|
83 |
-
res = self.resolution()
|
84 |
-
fov = self.fov()
|
85 |
-
|
86 |
-
fracs = (flat.float() / (res - 1)) * 2 - 1
|
87 |
-
fracs = fracs * torch.tan(fov / 2)
|
88 |
-
|
89 |
-
fracs = fracs.view(batch_size, -1, 2)
|
90 |
-
directions = (
|
91 |
-
self.z.view(batch_size, 1, 3)
|
92 |
-
+ self.x.view(batch_size, 1, 3) * fracs[:, :, :1]
|
93 |
-
+ self.y.view(batch_size, 1, 3) * fracs[:, :, 1:]
|
94 |
-
)
|
95 |
-
directions = directions / directions.norm(dim=-1, keepdim=True)
|
96 |
-
rays = torch.stack(
|
97 |
-
[
|
98 |
-
torch.broadcast_to(self.origin.view(batch_size, 1, 3), [batch_size, directions.shape[1], 3]),
|
99 |
-
directions,
|
100 |
-
],
|
101 |
-
dim=2,
|
102 |
-
)
|
103 |
-
return rays.view(batch_size, *shape, 2, 3)
|
104 |
-
|
105 |
-
def resize_image(self, width: int, height: int) -> "DifferentiableProjectiveCamera":
|
106 |
-
"""
|
107 |
-
Creates a new camera for the resized view assuming the aspect ratio does not change.
|
108 |
-
"""
|
109 |
-
assert width * self.height == height * self.width, "The aspect ratio should not change."
|
110 |
-
return DifferentiableProjectiveCamera(
|
111 |
-
origin=self.origin,
|
112 |
-
x=self.x,
|
113 |
-
y=self.y,
|
114 |
-
z=self.z,
|
115 |
-
width=width,
|
116 |
-
height=height,
|
117 |
-
x_fov=self.x_fov,
|
118 |
-
y_fov=self.y_fov,
|
119 |
-
)
|
120 |
-
|
121 |
-
|
122 |
-
def create_pan_cameras(size: int) -> DifferentiableProjectiveCamera:
|
123 |
-
origins = []
|
124 |
-
xs = []
|
125 |
-
ys = []
|
126 |
-
zs = []
|
127 |
-
for theta in np.linspace(0, 2 * np.pi, num=20):
|
128 |
-
z = np.array([np.sin(theta), np.cos(theta), -0.5])
|
129 |
-
z /= np.sqrt(np.sum(z**2))
|
130 |
-
origin = -z * 4
|
131 |
-
x = np.array([np.cos(theta), -np.sin(theta), 0.0])
|
132 |
-
y = np.cross(z, x)
|
133 |
-
origins.append(origin)
|
134 |
-
xs.append(x)
|
135 |
-
ys.append(y)
|
136 |
-
zs.append(z)
|
137 |
-
return DifferentiableProjectiveCamera(
|
138 |
-
origin=torch.from_numpy(np.stack(origins, axis=0)).float(),
|
139 |
-
x=torch.from_numpy(np.stack(xs, axis=0)).float(),
|
140 |
-
y=torch.from_numpy(np.stack(ys, axis=0)).float(),
|
141 |
-
z=torch.from_numpy(np.stack(zs, axis=0)).float(),
|
142 |
-
width=size,
|
143 |
-
height=size,
|
144 |
-
x_fov=0.7,
|
145 |
-
y_fov=0.7,
|
146 |
-
shape=(1, len(xs)),
|
147 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
_base_ = './htc_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://resnext101_32x4d',
|
4 |
-
backbone=dict(
|
5 |
-
type='ResNeXt',
|
6 |
-
depth=101,
|
7 |
-
groups=32,
|
8 |
-
base_width=4,
|
9 |
-
num_stages=4,
|
10 |
-
out_indices=(0, 1, 2, 3),
|
11 |
-
frozen_stages=1,
|
12 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
13 |
-
norm_eval=True,
|
14 |
-
style='pytorch'))
|
15 |
-
data = dict(samples_per_gpu=1, workers_per_gpu=1)
|
16 |
-
# learning policy
|
17 |
-
lr_config = dict(step=[16, 19])
|
18 |
-
runner = dict(type='EpochBasedRunner', max_epochs=20)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/necks/fpn_carafe.py
DELETED
@@ -1,267 +0,0 @@
|
|
1 |
-
import torch.nn as nn
|
2 |
-
from mmcv.cnn import ConvModule, build_upsample_layer, xavier_init
|
3 |
-
from mmcv.ops.carafe import CARAFEPack
|
4 |
-
|
5 |
-
from ..builder import NECKS
|
6 |
-
|
7 |
-
|
8 |
-
@NECKS.register_module()
|
9 |
-
class FPN_CARAFE(nn.Module):
|
10 |
-
"""FPN_CARAFE is a more flexible implementation of FPN. It allows more
|
11 |
-
choice for upsample methods during the top-down pathway.
|
12 |
-
|
13 |
-
It can reproduce the performance of ICCV 2019 paper
|
14 |
-
CARAFE: Content-Aware ReAssembly of FEatures
|
15 |
-
Please refer to https://arxiv.org/abs/1905.02188 for more details.
|
16 |
-
|
17 |
-
Args:
|
18 |
-
in_channels (list[int]): Number of channels for each input feature map.
|
19 |
-
out_channels (int): Output channels of feature pyramids.
|
20 |
-
num_outs (int): Number of output stages.
|
21 |
-
start_level (int): Start level of feature pyramids.
|
22 |
-
(Default: 0)
|
23 |
-
end_level (int): End level of feature pyramids.
|
24 |
-
(Default: -1 indicates the last level).
|
25 |
-
norm_cfg (dict): Dictionary to construct and config norm layer.
|
26 |
-
activate (str): Type of activation function in ConvModule
|
27 |
-
(Default: None indicates w/o activation).
|
28 |
-
order (dict): Order of components in ConvModule.
|
29 |
-
upsample (str): Type of upsample layer.
|
30 |
-
upsample_cfg (dict): Dictionary to construct and config upsample layer.
|
31 |
-
"""
|
32 |
-
|
33 |
-
def __init__(self,
|
34 |
-
in_channels,
|
35 |
-
out_channels,
|
36 |
-
num_outs,
|
37 |
-
start_level=0,
|
38 |
-
end_level=-1,
|
39 |
-
norm_cfg=None,
|
40 |
-
act_cfg=None,
|
41 |
-
order=('conv', 'norm', 'act'),
|
42 |
-
upsample_cfg=dict(
|
43 |
-
type='carafe',
|
44 |
-
up_kernel=5,
|
45 |
-
up_group=1,
|
46 |
-
encoder_kernel=3,
|
47 |
-
encoder_dilation=1)):
|
48 |
-
super(FPN_CARAFE, self).__init__()
|
49 |
-
assert isinstance(in_channels, list)
|
50 |
-
self.in_channels = in_channels
|
51 |
-
self.out_channels = out_channels
|
52 |
-
self.num_ins = len(in_channels)
|
53 |
-
self.num_outs = num_outs
|
54 |
-
self.norm_cfg = norm_cfg
|
55 |
-
self.act_cfg = act_cfg
|
56 |
-
self.with_bias = norm_cfg is None
|
57 |
-
self.upsample_cfg = upsample_cfg.copy()
|
58 |
-
self.upsample = self.upsample_cfg.get('type')
|
59 |
-
self.relu = nn.ReLU(inplace=False)
|
60 |
-
|
61 |
-
self.order = order
|
62 |
-
assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')]
|
63 |
-
|
64 |
-
assert self.upsample in [
|
65 |
-
'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None
|
66 |
-
]
|
67 |
-
if self.upsample in ['deconv', 'pixel_shuffle']:
|
68 |
-
assert hasattr(
|
69 |
-
self.upsample_cfg,
|
70 |
-
'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0
|
71 |
-
self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel')
|
72 |
-
|
73 |
-
if end_level == -1:
|
74 |
-
self.backbone_end_level = self.num_ins
|
75 |
-
assert num_outs >= self.num_ins - start_level
|
76 |
-
else:
|
77 |
-
# if end_level < inputs, no extra level is allowed
|
78 |
-
self.backbone_end_level = end_level
|
79 |
-
assert end_level <= len(in_channels)
|
80 |
-
assert num_outs == end_level - start_level
|
81 |
-
self.start_level = start_level
|
82 |
-
self.end_level = end_level
|
83 |
-
|
84 |
-
self.lateral_convs = nn.ModuleList()
|
85 |
-
self.fpn_convs = nn.ModuleList()
|
86 |
-
self.upsample_modules = nn.ModuleList()
|
87 |
-
|
88 |
-
for i in range(self.start_level, self.backbone_end_level):
|
89 |
-
l_conv = ConvModule(
|
90 |
-
in_channels[i],
|
91 |
-
out_channels,
|
92 |
-
1,
|
93 |
-
norm_cfg=norm_cfg,
|
94 |
-
bias=self.with_bias,
|
95 |
-
act_cfg=act_cfg,
|
96 |
-
inplace=False,
|
97 |
-
order=self.order)
|
98 |
-
fpn_conv = ConvModule(
|
99 |
-
out_channels,
|
100 |
-
out_channels,
|
101 |
-
3,
|
102 |
-
padding=1,
|
103 |
-
norm_cfg=self.norm_cfg,
|
104 |
-
bias=self.with_bias,
|
105 |
-
act_cfg=act_cfg,
|
106 |
-
inplace=False,
|
107 |
-
order=self.order)
|
108 |
-
if i != self.backbone_end_level - 1:
|
109 |
-
upsample_cfg_ = self.upsample_cfg.copy()
|
110 |
-
if self.upsample == 'deconv':
|
111 |
-
upsample_cfg_.update(
|
112 |
-
in_channels=out_channels,
|
113 |
-
out_channels=out_channels,
|
114 |
-
kernel_size=self.upsample_kernel,
|
115 |
-
stride=2,
|
116 |
-
padding=(self.upsample_kernel - 1) // 2,
|
117 |
-
output_padding=(self.upsample_kernel - 1) // 2)
|
118 |
-
elif self.upsample == 'pixel_shuffle':
|
119 |
-
upsample_cfg_.update(
|
120 |
-
in_channels=out_channels,
|
121 |
-
out_channels=out_channels,
|
122 |
-
scale_factor=2,
|
123 |
-
upsample_kernel=self.upsample_kernel)
|
124 |
-
elif self.upsample == 'carafe':
|
125 |
-
upsample_cfg_.update(channels=out_channels, scale_factor=2)
|
126 |
-
else:
|
127 |
-
# suppress warnings
|
128 |
-
align_corners = (None
|
129 |
-
if self.upsample == 'nearest' else False)
|
130 |
-
upsample_cfg_.update(
|
131 |
-
scale_factor=2,
|
132 |
-
mode=self.upsample,
|
133 |
-
align_corners=align_corners)
|
134 |
-
upsample_module = build_upsample_layer(upsample_cfg_)
|
135 |
-
self.upsample_modules.append(upsample_module)
|
136 |
-
self.lateral_convs.append(l_conv)
|
137 |
-
self.fpn_convs.append(fpn_conv)
|
138 |
-
|
139 |
-
# add extra conv layers (e.g., RetinaNet)
|
140 |
-
extra_out_levels = (
|
141 |
-
num_outs - self.backbone_end_level + self.start_level)
|
142 |
-
if extra_out_levels >= 1:
|
143 |
-
for i in range(extra_out_levels):
|
144 |
-
in_channels = (
|
145 |
-
self.in_channels[self.backbone_end_level -
|
146 |
-
1] if i == 0 else out_channels)
|
147 |
-
extra_l_conv = ConvModule(
|
148 |
-
in_channels,
|
149 |
-
out_channels,
|
150 |
-
3,
|
151 |
-
stride=2,
|
152 |
-
padding=1,
|
153 |
-
norm_cfg=norm_cfg,
|
154 |
-
bias=self.with_bias,
|
155 |
-
act_cfg=act_cfg,
|
156 |
-
inplace=False,
|
157 |
-
order=self.order)
|
158 |
-
if self.upsample == 'deconv':
|
159 |
-
upsampler_cfg_ = dict(
|
160 |
-
in_channels=out_channels,
|
161 |
-
out_channels=out_channels,
|
162 |
-
kernel_size=self.upsample_kernel,
|
163 |
-
stride=2,
|
164 |
-
padding=(self.upsample_kernel - 1) // 2,
|
165 |
-
output_padding=(self.upsample_kernel - 1) // 2)
|
166 |
-
elif self.upsample == 'pixel_shuffle':
|
167 |
-
upsampler_cfg_ = dict(
|
168 |
-
in_channels=out_channels,
|
169 |
-
out_channels=out_channels,
|
170 |
-
scale_factor=2,
|
171 |
-
upsample_kernel=self.upsample_kernel)
|
172 |
-
elif self.upsample == 'carafe':
|
173 |
-
upsampler_cfg_ = dict(
|
174 |
-
channels=out_channels,
|
175 |
-
scale_factor=2,
|
176 |
-
**self.upsample_cfg)
|
177 |
-
else:
|
178 |
-
# suppress warnings
|
179 |
-
align_corners = (None
|
180 |
-
if self.upsample == 'nearest' else False)
|
181 |
-
upsampler_cfg_ = dict(
|
182 |
-
scale_factor=2,
|
183 |
-
mode=self.upsample,
|
184 |
-
align_corners=align_corners)
|
185 |
-
upsampler_cfg_['type'] = self.upsample
|
186 |
-
upsample_module = build_upsample_layer(upsampler_cfg_)
|
187 |
-
extra_fpn_conv = ConvModule(
|
188 |
-
out_channels,
|
189 |
-
out_channels,
|
190 |
-
3,
|
191 |
-
padding=1,
|
192 |
-
norm_cfg=self.norm_cfg,
|
193 |
-
bias=self.with_bias,
|
194 |
-
act_cfg=act_cfg,
|
195 |
-
inplace=False,
|
196 |
-
order=self.order)
|
197 |
-
self.upsample_modules.append(upsample_module)
|
198 |
-
self.fpn_convs.append(extra_fpn_conv)
|
199 |
-
self.lateral_convs.append(extra_l_conv)
|
200 |
-
|
201 |
-
# default init_weights for conv(msra) and norm in ConvModule
|
202 |
-
def init_weights(self):
|
203 |
-
"""Initialize the weights of module."""
|
204 |
-
for m in self.modules():
|
205 |
-
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
206 |
-
xavier_init(m, distribution='uniform')
|
207 |
-
for m in self.modules():
|
208 |
-
if isinstance(m, CARAFEPack):
|
209 |
-
m.init_weights()
|
210 |
-
|
211 |
-
def slice_as(self, src, dst):
|
212 |
-
"""Slice ``src`` as ``dst``
|
213 |
-
|
214 |
-
Note:
|
215 |
-
``src`` should have the same or larger size than ``dst``.
|
216 |
-
|
217 |
-
Args:
|
218 |
-
src (torch.Tensor): Tensors to be sliced.
|
219 |
-
dst (torch.Tensor): ``src`` will be sliced to have the same
|
220 |
-
size as ``dst``.
|
221 |
-
|
222 |
-
Returns:
|
223 |
-
torch.Tensor: Sliced tensor.
|
224 |
-
"""
|
225 |
-
assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3))
|
226 |
-
if src.size(2) == dst.size(2) and src.size(3) == dst.size(3):
|
227 |
-
return src
|
228 |
-
else:
|
229 |
-
return src[:, :, :dst.size(2), :dst.size(3)]
|
230 |
-
|
231 |
-
def tensor_add(self, a, b):
|
232 |
-
"""Add tensors ``a`` and ``b`` that might have different sizes."""
|
233 |
-
if a.size() == b.size():
|
234 |
-
c = a + b
|
235 |
-
else:
|
236 |
-
c = a + self.slice_as(b, a)
|
237 |
-
return c
|
238 |
-
|
239 |
-
def forward(self, inputs):
|
240 |
-
"""Forward function."""
|
241 |
-
assert len(inputs) == len(self.in_channels)
|
242 |
-
|
243 |
-
# build laterals
|
244 |
-
laterals = []
|
245 |
-
for i, lateral_conv in enumerate(self.lateral_convs):
|
246 |
-
if i <= self.backbone_end_level - self.start_level:
|
247 |
-
input = inputs[min(i + self.start_level, len(inputs) - 1)]
|
248 |
-
else:
|
249 |
-
input = laterals[-1]
|
250 |
-
lateral = lateral_conv(input)
|
251 |
-
laterals.append(lateral)
|
252 |
-
|
253 |
-
# build top-down path
|
254 |
-
for i in range(len(laterals) - 1, 0, -1):
|
255 |
-
if self.upsample is not None:
|
256 |
-
upsample_feat = self.upsample_modules[i - 1](laterals[i])
|
257 |
-
else:
|
258 |
-
upsample_feat = laterals[i]
|
259 |
-
laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat)
|
260 |
-
|
261 |
-
# build outputs
|
262 |
-
num_conv_outs = len(self.fpn_convs)
|
263 |
-
outs = []
|
264 |
-
for i in range(num_conv_outs):
|
265 |
-
out = self.fpn_convs[i](laterals[i])
|
266 |
-
outs.append(out)
|
267 |
-
return tuple(outs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/openai/images.py
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import time
|
3 |
-
|
4 |
-
import requests
|
5 |
-
from extensions.openai.errors import ServiceUnavailableError
|
6 |
-
|
7 |
-
|
8 |
-
def generations(prompt: str, size: str, response_format: str, n: int):
|
9 |
-
# Stable Diffusion callout wrapper for txt2img
|
10 |
-
# Low effort implementation for compatibility. With only "prompt" being passed and assuming DALL-E
|
11 |
-
# the results will be limited and likely poor. SD has hundreds of models and dozens of settings.
|
12 |
-
# If you want high quality tailored results you should just use the Stable Diffusion API directly.
|
13 |
-
# it's too general an API to try and shape the result with specific tags like negative prompts
|
14 |
-
# or "masterpiece", etc. SD configuration is beyond the scope of this API.
|
15 |
-
# At this point I will not add the edits and variations endpoints (ie. img2img) because they
|
16 |
-
# require changing the form data handling to accept multipart form data, also to properly support
|
17 |
-
# url return types will require file management and a web serving files... Perhaps later!
|
18 |
-
base_model_size = 512 if 'SD_BASE_MODEL_SIZE' not in os.environ else int(os.environ.get('SD_BASE_MODEL_SIZE', 512))
|
19 |
-
sd_defaults = {
|
20 |
-
'sampler_name': 'DPM++ 2M Karras', # vast improvement
|
21 |
-
'steps': 30,
|
22 |
-
}
|
23 |
-
|
24 |
-
width, height = [int(x) for x in size.split('x')] # ignore the restrictions on size
|
25 |
-
|
26 |
-
# to hack on better generation, edit default payload.
|
27 |
-
payload = {
|
28 |
-
'prompt': prompt, # ignore prompt limit of 1000 characters
|
29 |
-
'width': width,
|
30 |
-
'height': height,
|
31 |
-
'batch_size': n,
|
32 |
-
}
|
33 |
-
payload.update(sd_defaults)
|
34 |
-
|
35 |
-
scale = min(width, height) / base_model_size
|
36 |
-
if scale >= 1.2:
|
37 |
-
# for better performance with the default size (1024), and larger res.
|
38 |
-
scaler = {
|
39 |
-
'width': width // scale,
|
40 |
-
'height': height // scale,
|
41 |
-
'hr_scale': scale,
|
42 |
-
'enable_hr': True,
|
43 |
-
'hr_upscaler': 'Latent',
|
44 |
-
'denoising_strength': 0.68,
|
45 |
-
}
|
46 |
-
payload.update(scaler)
|
47 |
-
|
48 |
-
resp = {
|
49 |
-
'created': int(time.time()),
|
50 |
-
'data': []
|
51 |
-
}
|
52 |
-
from extensions.openai.script import params
|
53 |
-
# TODO: support SD_WEBUI_AUTH username:password pair.
|
54 |
-
sd_url = f"{os.environ.get('SD_WEBUI_URL', params.get('sd_webui_url', ''))}/sdapi/v1/txt2img"
|
55 |
-
|
56 |
-
response = requests.post(url=sd_url, json=payload)
|
57 |
-
r = response.json()
|
58 |
-
if response.status_code != 200 or 'images' not in r:
|
59 |
-
print(r)
|
60 |
-
raise ServiceUnavailableError(r.get('error', 'Unknown error calling Stable Diffusion'), code=response.status_code, internal_message=r.get('errors', None))
|
61 |
-
# r['parameters']...
|
62 |
-
for b64_json in r['images']:
|
63 |
-
if response_format == 'b64_json':
|
64 |
-
resp['data'].extend([{'b64_json': b64_json}])
|
65 |
-
else:
|
66 |
-
resp['data'].extend([{'url': f'data:image/png;base64,{b64_json}'}]) # yeah it's lazy. requests.get() will not work with this
|
67 |
-
|
68 |
-
return resp
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/ldm/models/diffusion/__init__.py
DELETED
File without changes
|
spaces/Anonymous-sub/Rerender/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Rerender
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.44.4
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Asifpa6/emotion-analyzer-app/emotion_analysis.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
|
2 |
-
from transformers import RobertaTokenizerFast, TFRobertaForSequenceClassification, pipeline
|
3 |
-
|
4 |
-
tokenizer = RobertaTokenizerFast.from_pretrained("arpanghoshal/EmoRoBERTa")
|
5 |
-
model = TFRobertaForSequenceClassification.from_pretrained("arpanghoshal/EmoRoBERTa")
|
6 |
-
|
7 |
-
emotion = pipeline('sentiment-analysis',
|
8 |
-
model='arpanghoshal/EmoRoBERTa')
|
9 |
-
|
10 |
-
|
11 |
-
def get_emotion(text):
|
12 |
-
emotion_labels = emotion(text)
|
13 |
-
emotion_detail = [item['label'] for item in emotion_labels]
|
14 |
-
print("The detected emotion is:", emotion_detail)
|
15 |
-
confidence_score = str(round([item['score'] for item in emotion_labels][0]*100, 2)) + "%"
|
16 |
-
print("The confidence score is:", confidence_score)
|
17 |
-
return emotion_detail, confidence_score
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/wheel.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
"""Support functions for working with wheel files.
|
2 |
-
"""
|
3 |
-
|
4 |
-
import logging
|
5 |
-
from email.message import Message
|
6 |
-
from email.parser import Parser
|
7 |
-
from typing import Tuple
|
8 |
-
from zipfile import BadZipFile, ZipFile
|
9 |
-
|
10 |
-
from pip._vendor.packaging.utils import canonicalize_name
|
11 |
-
|
12 |
-
from pip._internal.exceptions import UnsupportedWheel
|
13 |
-
|
14 |
-
VERSION_COMPATIBLE = (1, 0)
|
15 |
-
|
16 |
-
|
17 |
-
logger = logging.getLogger(__name__)
|
18 |
-
|
19 |
-
|
20 |
-
def parse_wheel(wheel_zip: ZipFile, name: str) -> Tuple[str, Message]:
|
21 |
-
"""Extract information from the provided wheel, ensuring it meets basic
|
22 |
-
standards.
|
23 |
-
|
24 |
-
Returns the name of the .dist-info directory and the parsed WHEEL metadata.
|
25 |
-
"""
|
26 |
-
try:
|
27 |
-
info_dir = wheel_dist_info_dir(wheel_zip, name)
|
28 |
-
metadata = wheel_metadata(wheel_zip, info_dir)
|
29 |
-
version = wheel_version(metadata)
|
30 |
-
except UnsupportedWheel as e:
|
31 |
-
raise UnsupportedWheel("{} has an invalid wheel, {}".format(name, str(e)))
|
32 |
-
|
33 |
-
check_compatibility(version, name)
|
34 |
-
|
35 |
-
return info_dir, metadata
|
36 |
-
|
37 |
-
|
38 |
-
def wheel_dist_info_dir(source: ZipFile, name: str) -> str:
|
39 |
-
"""Returns the name of the contained .dist-info directory.
|
40 |
-
|
41 |
-
Raises AssertionError or UnsupportedWheel if not found, >1 found, or
|
42 |
-
it doesn't match the provided name.
|
43 |
-
"""
|
44 |
-
# Zip file path separators must be /
|
45 |
-
subdirs = {p.split("/", 1)[0] for p in source.namelist()}
|
46 |
-
|
47 |
-
info_dirs = [s for s in subdirs if s.endswith(".dist-info")]
|
48 |
-
|
49 |
-
if not info_dirs:
|
50 |
-
raise UnsupportedWheel(".dist-info directory not found")
|
51 |
-
|
52 |
-
if len(info_dirs) > 1:
|
53 |
-
raise UnsupportedWheel(
|
54 |
-
"multiple .dist-info directories found: {}".format(", ".join(info_dirs))
|
55 |
-
)
|
56 |
-
|
57 |
-
info_dir = info_dirs[0]
|
58 |
-
|
59 |
-
info_dir_name = canonicalize_name(info_dir)
|
60 |
-
canonical_name = canonicalize_name(name)
|
61 |
-
if not info_dir_name.startswith(canonical_name):
|
62 |
-
raise UnsupportedWheel(
|
63 |
-
".dist-info directory {!r} does not start with {!r}".format(
|
64 |
-
info_dir, canonical_name
|
65 |
-
)
|
66 |
-
)
|
67 |
-
|
68 |
-
return info_dir
|
69 |
-
|
70 |
-
|
71 |
-
def read_wheel_metadata_file(source: ZipFile, path: str) -> bytes:
|
72 |
-
try:
|
73 |
-
return source.read(path)
|
74 |
-
# BadZipFile for general corruption, KeyError for missing entry,
|
75 |
-
# and RuntimeError for password-protected files
|
76 |
-
except (BadZipFile, KeyError, RuntimeError) as e:
|
77 |
-
raise UnsupportedWheel(f"could not read {path!r} file: {e!r}")
|
78 |
-
|
79 |
-
|
80 |
-
def wheel_metadata(source: ZipFile, dist_info_dir: str) -> Message:
|
81 |
-
"""Return the WHEEL metadata of an extracted wheel, if possible.
|
82 |
-
Otherwise, raise UnsupportedWheel.
|
83 |
-
"""
|
84 |
-
path = f"{dist_info_dir}/WHEEL"
|
85 |
-
# Zip file path separators must be /
|
86 |
-
wheel_contents = read_wheel_metadata_file(source, path)
|
87 |
-
|
88 |
-
try:
|
89 |
-
wheel_text = wheel_contents.decode()
|
90 |
-
except UnicodeDecodeError as e:
|
91 |
-
raise UnsupportedWheel(f"error decoding {path!r}: {e!r}")
|
92 |
-
|
93 |
-
# FeedParser (used by Parser) does not raise any exceptions. The returned
|
94 |
-
# message may have .defects populated, but for backwards-compatibility we
|
95 |
-
# currently ignore them.
|
96 |
-
return Parser().parsestr(wheel_text)
|
97 |
-
|
98 |
-
|
99 |
-
def wheel_version(wheel_data: Message) -> Tuple[int, ...]:
|
100 |
-
"""Given WHEEL metadata, return the parsed Wheel-Version.
|
101 |
-
Otherwise, raise UnsupportedWheel.
|
102 |
-
"""
|
103 |
-
version_text = wheel_data["Wheel-Version"]
|
104 |
-
if version_text is None:
|
105 |
-
raise UnsupportedWheel("WHEEL is missing Wheel-Version")
|
106 |
-
|
107 |
-
version = version_text.strip()
|
108 |
-
|
109 |
-
try:
|
110 |
-
return tuple(map(int, version.split(".")))
|
111 |
-
except ValueError:
|
112 |
-
raise UnsupportedWheel(f"invalid Wheel-Version: {version!r}")
|
113 |
-
|
114 |
-
|
115 |
-
def check_compatibility(version: Tuple[int, ...], name: str) -> None:
|
116 |
-
"""Raises errors or warns if called with an incompatible Wheel-Version.
|
117 |
-
|
118 |
-
pip should refuse to install a Wheel-Version that's a major series
|
119 |
-
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
|
120 |
-
installing a version only minor version ahead (e.g 1.2 > 1.1).
|
121 |
-
|
122 |
-
version: a 2-tuple representing a Wheel-Version (Major, Minor)
|
123 |
-
name: name of wheel or package to raise exception about
|
124 |
-
|
125 |
-
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
|
126 |
-
"""
|
127 |
-
if version[0] > VERSION_COMPATIBLE[0]:
|
128 |
-
raise UnsupportedWheel(
|
129 |
-
"{}'s Wheel-Version ({}) is not compatible with this version "
|
130 |
-
"of pip".format(name, ".".join(map(str, version)))
|
131 |
-
)
|
132 |
-
elif version > VERSION_COMPATIBLE:
|
133 |
-
logger.warning(
|
134 |
-
"Installing from a newer Wheel-Version (%s)",
|
135 |
-
".".join(map(str, version)),
|
136 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ayaka2022/anime-aesthetic-predict/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Anime Aesthetic Predict
|
3 |
-
emoji: ❤️🖼️
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.16.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
duplicated_from: skytnt/anime-aesthetic-predict
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/lexers/__init__.py
DELETED
@@ -1,334 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
pygments.lexers
|
3 |
-
~~~~~~~~~~~~~~~
|
4 |
-
|
5 |
-
Pygments lexers.
|
6 |
-
|
7 |
-
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
|
8 |
-
:license: BSD, see LICENSE for details.
|
9 |
-
"""
|
10 |
-
|
11 |
-
import sys
|
12 |
-
import types
|
13 |
-
from fnmatch import fnmatch
|
14 |
-
from os.path import basename
|
15 |
-
|
16 |
-
from pip._vendor.pygments.lexers._mapping import LEXERS
|
17 |
-
from pip._vendor.pygments.modeline import get_filetype_from_buffer
|
18 |
-
from pip._vendor.pygments.plugin import find_plugin_lexers
|
19 |
-
from pip._vendor.pygments.util import ClassNotFound, guess_decode
|
20 |
-
|
21 |
-
COMPAT = {
|
22 |
-
'Python3Lexer': 'PythonLexer',
|
23 |
-
'Python3TracebackLexer': 'PythonTracebackLexer',
|
24 |
-
}
|
25 |
-
|
26 |
-
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
|
27 |
-
'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT)
|
28 |
-
|
29 |
-
_lexer_cache = {}
|
30 |
-
|
31 |
-
def _load_lexers(module_name):
|
32 |
-
"""Load a lexer (and all others in the module too)."""
|
33 |
-
mod = __import__(module_name, None, None, ['__all__'])
|
34 |
-
for lexer_name in mod.__all__:
|
35 |
-
cls = getattr(mod, lexer_name)
|
36 |
-
_lexer_cache[cls.name] = cls
|
37 |
-
|
38 |
-
|
39 |
-
def get_all_lexers(plugins=True):
|
40 |
-
"""Return a generator of tuples in the form ``(name, aliases,
|
41 |
-
filenames, mimetypes)`` of all know lexers.
|
42 |
-
|
43 |
-
If *plugins* is true (the default), plugin lexers supplied by entrypoints
|
44 |
-
are also returned. Otherwise, only builtin ones are considered.
|
45 |
-
"""
|
46 |
-
for item in LEXERS.values():
|
47 |
-
yield item[1:]
|
48 |
-
if plugins:
|
49 |
-
for lexer in find_plugin_lexers():
|
50 |
-
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
|
51 |
-
|
52 |
-
|
53 |
-
def find_lexer_class(name):
|
54 |
-
"""Lookup a lexer class by name.
|
55 |
-
|
56 |
-
Return None if not found.
|
57 |
-
"""
|
58 |
-
if name in _lexer_cache:
|
59 |
-
return _lexer_cache[name]
|
60 |
-
# lookup builtin lexers
|
61 |
-
for module_name, lname, aliases, _, _ in LEXERS.values():
|
62 |
-
if name == lname:
|
63 |
-
_load_lexers(module_name)
|
64 |
-
return _lexer_cache[name]
|
65 |
-
# continue with lexers from setuptools entrypoints
|
66 |
-
for cls in find_plugin_lexers():
|
67 |
-
if cls.name == name:
|
68 |
-
return cls
|
69 |
-
|
70 |
-
|
71 |
-
def find_lexer_class_by_name(_alias):
|
72 |
-
"""Lookup a lexer class by alias.
|
73 |
-
|
74 |
-
Like `get_lexer_by_name`, but does not instantiate the class.
|
75 |
-
|
76 |
-
.. versionadded:: 2.2
|
77 |
-
"""
|
78 |
-
if not _alias:
|
79 |
-
raise ClassNotFound('no lexer for alias %r found' % _alias)
|
80 |
-
# lookup builtin lexers
|
81 |
-
for module_name, name, aliases, _, _ in LEXERS.values():
|
82 |
-
if _alias.lower() in aliases:
|
83 |
-
if name not in _lexer_cache:
|
84 |
-
_load_lexers(module_name)
|
85 |
-
return _lexer_cache[name]
|
86 |
-
# continue with lexers from setuptools entrypoints
|
87 |
-
for cls in find_plugin_lexers():
|
88 |
-
if _alias.lower() in cls.aliases:
|
89 |
-
return cls
|
90 |
-
raise ClassNotFound('no lexer for alias %r found' % _alias)
|
91 |
-
|
92 |
-
|
93 |
-
def get_lexer_by_name(_alias, **options):
|
94 |
-
"""Get a lexer by an alias.
|
95 |
-
|
96 |
-
Raises ClassNotFound if not found.
|
97 |
-
"""
|
98 |
-
if not _alias:
|
99 |
-
raise ClassNotFound('no lexer for alias %r found' % _alias)
|
100 |
-
|
101 |
-
# lookup builtin lexers
|
102 |
-
for module_name, name, aliases, _, _ in LEXERS.values():
|
103 |
-
if _alias.lower() in aliases:
|
104 |
-
if name not in _lexer_cache:
|
105 |
-
_load_lexers(module_name)
|
106 |
-
return _lexer_cache[name](**options)
|
107 |
-
# continue with lexers from setuptools entrypoints
|
108 |
-
for cls in find_plugin_lexers():
|
109 |
-
if _alias.lower() in cls.aliases:
|
110 |
-
return cls(**options)
|
111 |
-
raise ClassNotFound('no lexer for alias %r found' % _alias)
|
112 |
-
|
113 |
-
|
114 |
-
def load_lexer_from_file(filename, lexername="CustomLexer", **options):
|
115 |
-
"""Load a lexer from a file.
|
116 |
-
|
117 |
-
This method expects a file located relative to the current working
|
118 |
-
directory, which contains a Lexer class. By default, it expects the
|
119 |
-
Lexer to be name CustomLexer; you can specify your own class name
|
120 |
-
as the second argument to this function.
|
121 |
-
|
122 |
-
Users should be very careful with the input, because this method
|
123 |
-
is equivalent to running eval on the input file.
|
124 |
-
|
125 |
-
Raises ClassNotFound if there are any problems importing the Lexer.
|
126 |
-
|
127 |
-
.. versionadded:: 2.2
|
128 |
-
"""
|
129 |
-
try:
|
130 |
-
# This empty dict will contain the namespace for the exec'd file
|
131 |
-
custom_namespace = {}
|
132 |
-
with open(filename, 'rb') as f:
|
133 |
-
exec(f.read(), custom_namespace)
|
134 |
-
# Retrieve the class `lexername` from that namespace
|
135 |
-
if lexername not in custom_namespace:
|
136 |
-
raise ClassNotFound('no valid %s class found in %s' %
|
137 |
-
(lexername, filename))
|
138 |
-
lexer_class = custom_namespace[lexername]
|
139 |
-
# And finally instantiate it with the options
|
140 |
-
return lexer_class(**options)
|
141 |
-
except OSError as err:
|
142 |
-
raise ClassNotFound('cannot read %s: %s' % (filename, err))
|
143 |
-
except ClassNotFound:
|
144 |
-
raise
|
145 |
-
except Exception as err:
|
146 |
-
raise ClassNotFound('error when loading custom lexer: %s' % err)
|
147 |
-
|
148 |
-
|
149 |
-
def find_lexer_class_for_filename(_fn, code=None):
|
150 |
-
"""Get a lexer for a filename.
|
151 |
-
|
152 |
-
If multiple lexers match the filename pattern, use ``analyse_text()`` to
|
153 |
-
figure out which one is more appropriate.
|
154 |
-
|
155 |
-
Returns None if not found.
|
156 |
-
"""
|
157 |
-
matches = []
|
158 |
-
fn = basename(_fn)
|
159 |
-
for modname, name, _, filenames, _ in LEXERS.values():
|
160 |
-
for filename in filenames:
|
161 |
-
if fnmatch(fn, filename):
|
162 |
-
if name not in _lexer_cache:
|
163 |
-
_load_lexers(modname)
|
164 |
-
matches.append((_lexer_cache[name], filename))
|
165 |
-
for cls in find_plugin_lexers():
|
166 |
-
for filename in cls.filenames:
|
167 |
-
if fnmatch(fn, filename):
|
168 |
-
matches.append((cls, filename))
|
169 |
-
|
170 |
-
if isinstance(code, bytes):
|
171 |
-
# decode it, since all analyse_text functions expect unicode
|
172 |
-
code = guess_decode(code)
|
173 |
-
|
174 |
-
def get_rating(info):
|
175 |
-
cls, filename = info
|
176 |
-
# explicit patterns get a bonus
|
177 |
-
bonus = '*' not in filename and 0.5 or 0
|
178 |
-
# The class _always_ defines analyse_text because it's included in
|
179 |
-
# the Lexer class. The default implementation returns None which
|
180 |
-
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
|
181 |
-
# to find lexers which need it overridden.
|
182 |
-
if code:
|
183 |
-
return cls.analyse_text(code) + bonus, cls.__name__
|
184 |
-
return cls.priority + bonus, cls.__name__
|
185 |
-
|
186 |
-
if matches:
|
187 |
-
matches.sort(key=get_rating)
|
188 |
-
# print "Possible lexers, after sort:", matches
|
189 |
-
return matches[-1][0]
|
190 |
-
|
191 |
-
|
192 |
-
def get_lexer_for_filename(_fn, code=None, **options):
|
193 |
-
"""Get a lexer for a filename.
|
194 |
-
|
195 |
-
If multiple lexers match the filename pattern, use ``analyse_text()`` to
|
196 |
-
figure out which one is more appropriate.
|
197 |
-
|
198 |
-
Raises ClassNotFound if not found.
|
199 |
-
"""
|
200 |
-
res = find_lexer_class_for_filename(_fn, code)
|
201 |
-
if not res:
|
202 |
-
raise ClassNotFound('no lexer for filename %r found' % _fn)
|
203 |
-
return res(**options)
|
204 |
-
|
205 |
-
|
206 |
-
def get_lexer_for_mimetype(_mime, **options):
|
207 |
-
"""Get a lexer for a mimetype.
|
208 |
-
|
209 |
-
Raises ClassNotFound if not found.
|
210 |
-
"""
|
211 |
-
for modname, name, _, _, mimetypes in LEXERS.values():
|
212 |
-
if _mime in mimetypes:
|
213 |
-
if name not in _lexer_cache:
|
214 |
-
_load_lexers(modname)
|
215 |
-
return _lexer_cache[name](**options)
|
216 |
-
for cls in find_plugin_lexers():
|
217 |
-
if _mime in cls.mimetypes:
|
218 |
-
return cls(**options)
|
219 |
-
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
|
220 |
-
|
221 |
-
|
222 |
-
def _iter_lexerclasses(plugins=True):
|
223 |
-
"""Return an iterator over all lexer classes."""
|
224 |
-
for key in sorted(LEXERS):
|
225 |
-
module_name, name = LEXERS[key][:2]
|
226 |
-
if name not in _lexer_cache:
|
227 |
-
_load_lexers(module_name)
|
228 |
-
yield _lexer_cache[name]
|
229 |
-
if plugins:
|
230 |
-
yield from find_plugin_lexers()
|
231 |
-
|
232 |
-
|
233 |
-
def guess_lexer_for_filename(_fn, _text, **options):
|
234 |
-
"""
|
235 |
-
Lookup all lexers that handle those filenames primary (``filenames``)
|
236 |
-
or secondary (``alias_filenames``). Then run a text analysis for those
|
237 |
-
lexers and choose the best result.
|
238 |
-
|
239 |
-
usage::
|
240 |
-
|
241 |
-
>>> from pygments.lexers import guess_lexer_for_filename
|
242 |
-
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
|
243 |
-
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
|
244 |
-
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
|
245 |
-
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
|
246 |
-
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
|
247 |
-
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
|
248 |
-
"""
|
249 |
-
fn = basename(_fn)
|
250 |
-
primary = {}
|
251 |
-
matching_lexers = set()
|
252 |
-
for lexer in _iter_lexerclasses():
|
253 |
-
for filename in lexer.filenames:
|
254 |
-
if fnmatch(fn, filename):
|
255 |
-
matching_lexers.add(lexer)
|
256 |
-
primary[lexer] = True
|
257 |
-
for filename in lexer.alias_filenames:
|
258 |
-
if fnmatch(fn, filename):
|
259 |
-
matching_lexers.add(lexer)
|
260 |
-
primary[lexer] = False
|
261 |
-
if not matching_lexers:
|
262 |
-
raise ClassNotFound('no lexer for filename %r found' % fn)
|
263 |
-
if len(matching_lexers) == 1:
|
264 |
-
return matching_lexers.pop()(**options)
|
265 |
-
result = []
|
266 |
-
for lexer in matching_lexers:
|
267 |
-
rv = lexer.analyse_text(_text)
|
268 |
-
if rv == 1.0:
|
269 |
-
return lexer(**options)
|
270 |
-
result.append((rv, lexer))
|
271 |
-
|
272 |
-
def type_sort(t):
|
273 |
-
# sort by:
|
274 |
-
# - analyse score
|
275 |
-
# - is primary filename pattern?
|
276 |
-
# - priority
|
277 |
-
# - last resort: class name
|
278 |
-
return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
|
279 |
-
result.sort(key=type_sort)
|
280 |
-
|
281 |
-
return result[-1][1](**options)
|
282 |
-
|
283 |
-
|
284 |
-
def guess_lexer(_text, **options):
|
285 |
-
"""Guess a lexer by strong distinctions in the text (eg, shebang)."""
|
286 |
-
|
287 |
-
if not isinstance(_text, str):
|
288 |
-
inencoding = options.get('inencoding', options.get('encoding'))
|
289 |
-
if inencoding:
|
290 |
-
_text = _text.decode(inencoding or 'utf8')
|
291 |
-
else:
|
292 |
-
_text, _ = guess_decode(_text)
|
293 |
-
|
294 |
-
# try to get a vim modeline first
|
295 |
-
ft = get_filetype_from_buffer(_text)
|
296 |
-
|
297 |
-
if ft is not None:
|
298 |
-
try:
|
299 |
-
return get_lexer_by_name(ft, **options)
|
300 |
-
except ClassNotFound:
|
301 |
-
pass
|
302 |
-
|
303 |
-
best_lexer = [0.0, None]
|
304 |
-
for lexer in _iter_lexerclasses():
|
305 |
-
rv = lexer.analyse_text(_text)
|
306 |
-
if rv == 1.0:
|
307 |
-
return lexer(**options)
|
308 |
-
if rv > best_lexer[0]:
|
309 |
-
best_lexer[:] = (rv, lexer)
|
310 |
-
if not best_lexer[0] or best_lexer[1] is None:
|
311 |
-
raise ClassNotFound('no lexer matching the text found')
|
312 |
-
return best_lexer[1](**options)
|
313 |
-
|
314 |
-
|
315 |
-
class _automodule(types.ModuleType):
|
316 |
-
"""Automatically import lexers."""
|
317 |
-
|
318 |
-
def __getattr__(self, name):
|
319 |
-
info = LEXERS.get(name)
|
320 |
-
if info:
|
321 |
-
_load_lexers(info[0])
|
322 |
-
cls = _lexer_cache[info[1]]
|
323 |
-
setattr(self, name, cls)
|
324 |
-
return cls
|
325 |
-
if name in COMPAT:
|
326 |
-
return getattr(self, COMPAT[name])
|
327 |
-
raise AttributeError(name)
|
328 |
-
|
329 |
-
|
330 |
-
oldmod = sys.modules[__name__]
|
331 |
-
newmod = _automodule(__name__)
|
332 |
-
newmod.__dict__.update(oldmod.__dict__)
|
333 |
-
sys.modules[__name__] = newmod
|
334 |
-
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/diagram/__init__.py
DELETED
@@ -1,642 +0,0 @@
|
|
1 |
-
import railroad
|
2 |
-
from pip._vendor import pyparsing
|
3 |
-
import typing
|
4 |
-
from typing import (
|
5 |
-
List,
|
6 |
-
NamedTuple,
|
7 |
-
Generic,
|
8 |
-
TypeVar,
|
9 |
-
Dict,
|
10 |
-
Callable,
|
11 |
-
Set,
|
12 |
-
Iterable,
|
13 |
-
)
|
14 |
-
from jinja2 import Template
|
15 |
-
from io import StringIO
|
16 |
-
import inspect
|
17 |
-
|
18 |
-
|
19 |
-
jinja2_template_source = """\
|
20 |
-
<!DOCTYPE html>
|
21 |
-
<html>
|
22 |
-
<head>
|
23 |
-
{% if not head %}
|
24 |
-
<style type="text/css">
|
25 |
-
.railroad-heading {
|
26 |
-
font-family: monospace;
|
27 |
-
}
|
28 |
-
</style>
|
29 |
-
{% else %}
|
30 |
-
{{ head | safe }}
|
31 |
-
{% endif %}
|
32 |
-
</head>
|
33 |
-
<body>
|
34 |
-
{{ body | safe }}
|
35 |
-
{% for diagram in diagrams %}
|
36 |
-
<div class="railroad-group">
|
37 |
-
<h1 class="railroad-heading">{{ diagram.title }}</h1>
|
38 |
-
<div class="railroad-description">{{ diagram.text }}</div>
|
39 |
-
<div class="railroad-svg">
|
40 |
-
{{ diagram.svg }}
|
41 |
-
</div>
|
42 |
-
</div>
|
43 |
-
{% endfor %}
|
44 |
-
</body>
|
45 |
-
</html>
|
46 |
-
"""
|
47 |
-
|
48 |
-
template = Template(jinja2_template_source)
|
49 |
-
|
50 |
-
# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet
|
51 |
-
NamedDiagram = NamedTuple(
|
52 |
-
"NamedDiagram",
|
53 |
-
[("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)],
|
54 |
-
)
|
55 |
-
"""
|
56 |
-
A simple structure for associating a name with a railroad diagram
|
57 |
-
"""
|
58 |
-
|
59 |
-
T = TypeVar("T")
|
60 |
-
|
61 |
-
|
62 |
-
class EachItem(railroad.Group):
|
63 |
-
"""
|
64 |
-
Custom railroad item to compose a:
|
65 |
-
- Group containing a
|
66 |
-
- OneOrMore containing a
|
67 |
-
- Choice of the elements in the Each
|
68 |
-
with the group label indicating that all must be matched
|
69 |
-
"""
|
70 |
-
|
71 |
-
all_label = "[ALL]"
|
72 |
-
|
73 |
-
def __init__(self, *items):
|
74 |
-
choice_item = railroad.Choice(len(items) - 1, *items)
|
75 |
-
one_or_more_item = railroad.OneOrMore(item=choice_item)
|
76 |
-
super().__init__(one_or_more_item, label=self.all_label)
|
77 |
-
|
78 |
-
|
79 |
-
class AnnotatedItem(railroad.Group):
|
80 |
-
"""
|
81 |
-
Simple subclass of Group that creates an annotation label
|
82 |
-
"""
|
83 |
-
|
84 |
-
def __init__(self, label: str, item):
|
85 |
-
super().__init__(item=item, label="[{}]".format(label) if label else label)
|
86 |
-
|
87 |
-
|
88 |
-
class EditablePartial(Generic[T]):
|
89 |
-
"""
|
90 |
-
Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been
|
91 |
-
constructed.
|
92 |
-
"""
|
93 |
-
|
94 |
-
# We need this here because the railroad constructors actually transform the data, so can't be called until the
|
95 |
-
# entire tree is assembled
|
96 |
-
|
97 |
-
def __init__(self, func: Callable[..., T], args: list, kwargs: dict):
|
98 |
-
self.func = func
|
99 |
-
self.args = args
|
100 |
-
self.kwargs = kwargs
|
101 |
-
|
102 |
-
@classmethod
|
103 |
-
def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]":
|
104 |
-
"""
|
105 |
-
If you call this function in the same way that you would call the constructor, it will store the arguments
|
106 |
-
as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3)
|
107 |
-
"""
|
108 |
-
return EditablePartial(func=func, args=list(args), kwargs=kwargs)
|
109 |
-
|
110 |
-
@property
|
111 |
-
def name(self):
|
112 |
-
return self.kwargs["name"]
|
113 |
-
|
114 |
-
def __call__(self) -> T:
|
115 |
-
"""
|
116 |
-
Evaluate the partial and return the result
|
117 |
-
"""
|
118 |
-
args = self.args.copy()
|
119 |
-
kwargs = self.kwargs.copy()
|
120 |
-
|
121 |
-
# This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g.
|
122 |
-
# args=['list', 'of', 'things'])
|
123 |
-
arg_spec = inspect.getfullargspec(self.func)
|
124 |
-
if arg_spec.varargs in self.kwargs:
|
125 |
-
args += kwargs.pop(arg_spec.varargs)
|
126 |
-
|
127 |
-
return self.func(*args, **kwargs)
|
128 |
-
|
129 |
-
|
130 |
-
def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str:
|
131 |
-
"""
|
132 |
-
Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams
|
133 |
-
:params kwargs: kwargs to be passed in to the template
|
134 |
-
"""
|
135 |
-
data = []
|
136 |
-
for diagram in diagrams:
|
137 |
-
if diagram.diagram is None:
|
138 |
-
continue
|
139 |
-
io = StringIO()
|
140 |
-
diagram.diagram.writeSvg(io.write)
|
141 |
-
title = diagram.name
|
142 |
-
if diagram.index == 0:
|
143 |
-
title += " (root)"
|
144 |
-
data.append({"title": title, "text": "", "svg": io.getvalue()})
|
145 |
-
|
146 |
-
return template.render(diagrams=data, **kwargs)
|
147 |
-
|
148 |
-
|
149 |
-
def resolve_partial(partial: "EditablePartial[T]") -> T:
|
150 |
-
"""
|
151 |
-
Recursively resolves a collection of Partials into whatever type they are
|
152 |
-
"""
|
153 |
-
if isinstance(partial, EditablePartial):
|
154 |
-
partial.args = resolve_partial(partial.args)
|
155 |
-
partial.kwargs = resolve_partial(partial.kwargs)
|
156 |
-
return partial()
|
157 |
-
elif isinstance(partial, list):
|
158 |
-
return [resolve_partial(x) for x in partial]
|
159 |
-
elif isinstance(partial, dict):
|
160 |
-
return {key: resolve_partial(x) for key, x in partial.items()}
|
161 |
-
else:
|
162 |
-
return partial
|
163 |
-
|
164 |
-
|
165 |
-
def to_railroad(
|
166 |
-
element: pyparsing.ParserElement,
|
167 |
-
diagram_kwargs: typing.Optional[dict] = None,
|
168 |
-
vertical: int = 3,
|
169 |
-
show_results_names: bool = False,
|
170 |
-
show_groups: bool = False,
|
171 |
-
) -> List[NamedDiagram]:
|
172 |
-
"""
|
173 |
-
Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram
|
174 |
-
creation if you want to access the Railroad tree before it is converted to HTML
|
175 |
-
:param element: base element of the parser being diagrammed
|
176 |
-
:param diagram_kwargs: kwargs to pass to the Diagram() constructor
|
177 |
-
:param vertical: (optional) - int - limit at which number of alternatives should be
|
178 |
-
shown vertically instead of horizontally
|
179 |
-
:param show_results_names - bool to indicate whether results name annotations should be
|
180 |
-
included in the diagram
|
181 |
-
:param show_groups - bool to indicate whether groups should be highlighted with an unlabeled
|
182 |
-
surrounding box
|
183 |
-
"""
|
184 |
-
# Convert the whole tree underneath the root
|
185 |
-
lookup = ConverterState(diagram_kwargs=diagram_kwargs or {})
|
186 |
-
_to_diagram_element(
|
187 |
-
element,
|
188 |
-
lookup=lookup,
|
189 |
-
parent=None,
|
190 |
-
vertical=vertical,
|
191 |
-
show_results_names=show_results_names,
|
192 |
-
show_groups=show_groups,
|
193 |
-
)
|
194 |
-
|
195 |
-
root_id = id(element)
|
196 |
-
# Convert the root if it hasn't been already
|
197 |
-
if root_id in lookup:
|
198 |
-
if not element.customName:
|
199 |
-
lookup[root_id].name = ""
|
200 |
-
lookup[root_id].mark_for_extraction(root_id, lookup, force=True)
|
201 |
-
|
202 |
-
# Now that we're finished, we can convert from intermediate structures into Railroad elements
|
203 |
-
diags = list(lookup.diagrams.values())
|
204 |
-
if len(diags) > 1:
|
205 |
-
# collapse out duplicate diags with the same name
|
206 |
-
seen = set()
|
207 |
-
deduped_diags = []
|
208 |
-
for d in diags:
|
209 |
-
# don't extract SkipTo elements, they are uninformative as subdiagrams
|
210 |
-
if d.name == "...":
|
211 |
-
continue
|
212 |
-
if d.name is not None and d.name not in seen:
|
213 |
-
seen.add(d.name)
|
214 |
-
deduped_diags.append(d)
|
215 |
-
resolved = [resolve_partial(partial) for partial in deduped_diags]
|
216 |
-
else:
|
217 |
-
# special case - if just one diagram, always display it, even if
|
218 |
-
# it has no name
|
219 |
-
resolved = [resolve_partial(partial) for partial in diags]
|
220 |
-
return sorted(resolved, key=lambda diag: diag.index)
|
221 |
-
|
222 |
-
|
223 |
-
def _should_vertical(
|
224 |
-
specification: int, exprs: Iterable[pyparsing.ParserElement]
|
225 |
-
) -> bool:
|
226 |
-
"""
|
227 |
-
Returns true if we should return a vertical list of elements
|
228 |
-
"""
|
229 |
-
if specification is None:
|
230 |
-
return False
|
231 |
-
else:
|
232 |
-
return len(_visible_exprs(exprs)) >= specification
|
233 |
-
|
234 |
-
|
235 |
-
class ElementState:
|
236 |
-
"""
|
237 |
-
State recorded for an individual pyparsing Element
|
238 |
-
"""
|
239 |
-
|
240 |
-
# Note: this should be a dataclass, but we have to support Python 3.5
|
241 |
-
def __init__(
|
242 |
-
self,
|
243 |
-
element: pyparsing.ParserElement,
|
244 |
-
converted: EditablePartial,
|
245 |
-
parent: EditablePartial,
|
246 |
-
number: int,
|
247 |
-
name: str = None,
|
248 |
-
parent_index: typing.Optional[int] = None,
|
249 |
-
):
|
250 |
-
#: The pyparsing element that this represents
|
251 |
-
self.element: pyparsing.ParserElement = element
|
252 |
-
#: The name of the element
|
253 |
-
self.name: typing.Optional[str] = name
|
254 |
-
#: The output Railroad element in an unconverted state
|
255 |
-
self.converted: EditablePartial = converted
|
256 |
-
#: The parent Railroad element, which we store so that we can extract this if it's duplicated
|
257 |
-
self.parent: EditablePartial = parent
|
258 |
-
#: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram
|
259 |
-
self.number: int = number
|
260 |
-
#: The index of this inside its parent
|
261 |
-
self.parent_index: typing.Optional[int] = parent_index
|
262 |
-
#: If true, we should extract this out into a subdiagram
|
263 |
-
self.extract: bool = False
|
264 |
-
#: If true, all of this element's children have been filled out
|
265 |
-
self.complete: bool = False
|
266 |
-
|
267 |
-
def mark_for_extraction(
|
268 |
-
self, el_id: int, state: "ConverterState", name: str = None, force: bool = False
|
269 |
-
):
|
270 |
-
"""
|
271 |
-
Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram
|
272 |
-
:param el_id: id of the element
|
273 |
-
:param state: element/diagram state tracker
|
274 |
-
:param name: name to use for this element's text
|
275 |
-
:param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the
|
276 |
-
root element when we know we're finished
|
277 |
-
"""
|
278 |
-
self.extract = True
|
279 |
-
|
280 |
-
# Set the name
|
281 |
-
if not self.name:
|
282 |
-
if name:
|
283 |
-
# Allow forcing a custom name
|
284 |
-
self.name = name
|
285 |
-
elif self.element.customName:
|
286 |
-
self.name = self.element.customName
|
287 |
-
else:
|
288 |
-
self.name = ""
|
289 |
-
|
290 |
-
# Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children
|
291 |
-
# to be added
|
292 |
-
# Also, if this is just a string literal etc, don't bother extracting it
|
293 |
-
if force or (self.complete and _worth_extracting(self.element)):
|
294 |
-
state.extract_into_diagram(el_id)
|
295 |
-
|
296 |
-
|
297 |
-
class ConverterState:
|
298 |
-
"""
|
299 |
-
Stores some state that persists between recursions into the element tree
|
300 |
-
"""
|
301 |
-
|
302 |
-
def __init__(self, diagram_kwargs: typing.Optional[dict] = None):
|
303 |
-
#: A dictionary mapping ParserElements to state relating to them
|
304 |
-
self._element_diagram_states: Dict[int, ElementState] = {}
|
305 |
-
#: A dictionary mapping ParserElement IDs to subdiagrams generated from them
|
306 |
-
self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {}
|
307 |
-
#: The index of the next unnamed element
|
308 |
-
self.unnamed_index: int = 1
|
309 |
-
#: The index of the next element. This is used for sorting
|
310 |
-
self.index: int = 0
|
311 |
-
#: Shared kwargs that are used to customize the construction of diagrams
|
312 |
-
self.diagram_kwargs: dict = diagram_kwargs or {}
|
313 |
-
self.extracted_diagram_names: Set[str] = set()
|
314 |
-
|
315 |
-
def __setitem__(self, key: int, value: ElementState):
|
316 |
-
self._element_diagram_states[key] = value
|
317 |
-
|
318 |
-
def __getitem__(self, key: int) -> ElementState:
|
319 |
-
return self._element_diagram_states[key]
|
320 |
-
|
321 |
-
def __delitem__(self, key: int):
|
322 |
-
del self._element_diagram_states[key]
|
323 |
-
|
324 |
-
def __contains__(self, key: int):
|
325 |
-
return key in self._element_diagram_states
|
326 |
-
|
327 |
-
def generate_unnamed(self) -> int:
|
328 |
-
"""
|
329 |
-
Generate a number used in the name of an otherwise unnamed diagram
|
330 |
-
"""
|
331 |
-
self.unnamed_index += 1
|
332 |
-
return self.unnamed_index
|
333 |
-
|
334 |
-
def generate_index(self) -> int:
|
335 |
-
"""
|
336 |
-
Generate a number used to index a diagram
|
337 |
-
"""
|
338 |
-
self.index += 1
|
339 |
-
return self.index
|
340 |
-
|
341 |
-
def extract_into_diagram(self, el_id: int):
|
342 |
-
"""
|
343 |
-
Used when we encounter the same token twice in the same tree. When this
|
344 |
-
happens, we replace all instances of that token with a terminal, and
|
345 |
-
create a new subdiagram for the token
|
346 |
-
"""
|
347 |
-
position = self[el_id]
|
348 |
-
|
349 |
-
# Replace the original definition of this element with a regular block
|
350 |
-
if position.parent:
|
351 |
-
ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name)
|
352 |
-
if "item" in position.parent.kwargs:
|
353 |
-
position.parent.kwargs["item"] = ret
|
354 |
-
elif "items" in position.parent.kwargs:
|
355 |
-
position.parent.kwargs["items"][position.parent_index] = ret
|
356 |
-
|
357 |
-
# If the element we're extracting is a group, skip to its content but keep the title
|
358 |
-
if position.converted.func == railroad.Group:
|
359 |
-
content = position.converted.kwargs["item"]
|
360 |
-
else:
|
361 |
-
content = position.converted
|
362 |
-
|
363 |
-
self.diagrams[el_id] = EditablePartial.from_call(
|
364 |
-
NamedDiagram,
|
365 |
-
name=position.name,
|
366 |
-
diagram=EditablePartial.from_call(
|
367 |
-
railroad.Diagram, content, **self.diagram_kwargs
|
368 |
-
),
|
369 |
-
index=position.number,
|
370 |
-
)
|
371 |
-
|
372 |
-
del self[el_id]
|
373 |
-
|
374 |
-
|
375 |
-
def _worth_extracting(element: pyparsing.ParserElement) -> bool:
|
376 |
-
"""
|
377 |
-
Returns true if this element is worth having its own sub-diagram. Simply, if any of its children
|
378 |
-
themselves have children, then its complex enough to extract
|
379 |
-
"""
|
380 |
-
children = element.recurse()
|
381 |
-
return any(child.recurse() for child in children)
|
382 |
-
|
383 |
-
|
384 |
-
def _apply_diagram_item_enhancements(fn):
|
385 |
-
"""
|
386 |
-
decorator to ensure enhancements to a diagram item (such as results name annotations)
|
387 |
-
get applied on return from _to_diagram_element (we do this since there are several
|
388 |
-
returns in _to_diagram_element)
|
389 |
-
"""
|
390 |
-
|
391 |
-
def _inner(
|
392 |
-
element: pyparsing.ParserElement,
|
393 |
-
parent: typing.Optional[EditablePartial],
|
394 |
-
lookup: ConverterState = None,
|
395 |
-
vertical: int = None,
|
396 |
-
index: int = 0,
|
397 |
-
name_hint: str = None,
|
398 |
-
show_results_names: bool = False,
|
399 |
-
show_groups: bool = False,
|
400 |
-
) -> typing.Optional[EditablePartial]:
|
401 |
-
|
402 |
-
ret = fn(
|
403 |
-
element,
|
404 |
-
parent,
|
405 |
-
lookup,
|
406 |
-
vertical,
|
407 |
-
index,
|
408 |
-
name_hint,
|
409 |
-
show_results_names,
|
410 |
-
show_groups,
|
411 |
-
)
|
412 |
-
|
413 |
-
# apply annotation for results name, if present
|
414 |
-
if show_results_names and ret is not None:
|
415 |
-
element_results_name = element.resultsName
|
416 |
-
if element_results_name:
|
417 |
-
# add "*" to indicate if this is a "list all results" name
|
418 |
-
element_results_name += "" if element.modalResults else "*"
|
419 |
-
ret = EditablePartial.from_call(
|
420 |
-
railroad.Group, item=ret, label=element_results_name
|
421 |
-
)
|
422 |
-
|
423 |
-
return ret
|
424 |
-
|
425 |
-
return _inner
|
426 |
-
|
427 |
-
|
428 |
-
def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]):
|
429 |
-
non_diagramming_exprs = (
|
430 |
-
pyparsing.ParseElementEnhance,
|
431 |
-
pyparsing.PositionToken,
|
432 |
-
pyparsing.And._ErrorStop,
|
433 |
-
)
|
434 |
-
return [
|
435 |
-
e
|
436 |
-
for e in exprs
|
437 |
-
if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs))
|
438 |
-
]
|
439 |
-
|
440 |
-
|
441 |
-
@_apply_diagram_item_enhancements
|
442 |
-
def _to_diagram_element(
|
443 |
-
element: pyparsing.ParserElement,
|
444 |
-
parent: typing.Optional[EditablePartial],
|
445 |
-
lookup: ConverterState = None,
|
446 |
-
vertical: int = None,
|
447 |
-
index: int = 0,
|
448 |
-
name_hint: str = None,
|
449 |
-
show_results_names: bool = False,
|
450 |
-
show_groups: bool = False,
|
451 |
-
) -> typing.Optional[EditablePartial]:
|
452 |
-
"""
|
453 |
-
Recursively converts a PyParsing Element to a railroad Element
|
454 |
-
:param lookup: The shared converter state that keeps track of useful things
|
455 |
-
:param index: The index of this element within the parent
|
456 |
-
:param parent: The parent of this element in the output tree
|
457 |
-
:param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default),
|
458 |
-
it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never
|
459 |
-
do so
|
460 |
-
:param name_hint: If provided, this will override the generated name
|
461 |
-
:param show_results_names: bool flag indicating whether to add annotations for results names
|
462 |
-
:returns: The converted version of the input element, but as a Partial that hasn't yet been constructed
|
463 |
-
:param show_groups: bool flag indicating whether to show groups using bounding box
|
464 |
-
"""
|
465 |
-
exprs = element.recurse()
|
466 |
-
name = name_hint or element.customName or element.__class__.__name__
|
467 |
-
|
468 |
-
# Python's id() is used to provide a unique identifier for elements
|
469 |
-
el_id = id(element)
|
470 |
-
|
471 |
-
element_results_name = element.resultsName
|
472 |
-
|
473 |
-
# Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram
|
474 |
-
if not element.customName:
|
475 |
-
if isinstance(
|
476 |
-
element,
|
477 |
-
(
|
478 |
-
# pyparsing.TokenConverter,
|
479 |
-
# pyparsing.Forward,
|
480 |
-
pyparsing.Located,
|
481 |
-
),
|
482 |
-
):
|
483 |
-
# However, if this element has a useful custom name, and its child does not, we can pass it on to the child
|
484 |
-
if exprs:
|
485 |
-
if not exprs[0].customName:
|
486 |
-
propagated_name = name
|
487 |
-
else:
|
488 |
-
propagated_name = None
|
489 |
-
|
490 |
-
return _to_diagram_element(
|
491 |
-
element.expr,
|
492 |
-
parent=parent,
|
493 |
-
lookup=lookup,
|
494 |
-
vertical=vertical,
|
495 |
-
index=index,
|
496 |
-
name_hint=propagated_name,
|
497 |
-
show_results_names=show_results_names,
|
498 |
-
show_groups=show_groups,
|
499 |
-
)
|
500 |
-
|
501 |
-
# If the element isn't worth extracting, we always treat it as the first time we say it
|
502 |
-
if _worth_extracting(element):
|
503 |
-
if el_id in lookup:
|
504 |
-
# If we've seen this element exactly once before, we are only just now finding out that it's a duplicate,
|
505 |
-
# so we have to extract it into a new diagram.
|
506 |
-
looked_up = lookup[el_id]
|
507 |
-
looked_up.mark_for_extraction(el_id, lookup, name=name_hint)
|
508 |
-
ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name)
|
509 |
-
return ret
|
510 |
-
|
511 |
-
elif el_id in lookup.diagrams:
|
512 |
-
# If we have seen the element at least twice before, and have already extracted it into a subdiagram, we
|
513 |
-
# just put in a marker element that refers to the sub-diagram
|
514 |
-
ret = EditablePartial.from_call(
|
515 |
-
railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
|
516 |
-
)
|
517 |
-
return ret
|
518 |
-
|
519 |
-
# Recursively convert child elements
|
520 |
-
# Here we find the most relevant Railroad element for matching pyparsing Element
|
521 |
-
# We use ``items=[]`` here to hold the place for where the child elements will go once created
|
522 |
-
if isinstance(element, pyparsing.And):
|
523 |
-
# detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat
|
524 |
-
# (all will have the same name, and resultsName)
|
525 |
-
if not exprs:
|
526 |
-
return None
|
527 |
-
if len(set((e.name, e.resultsName) for e in exprs)) == 1:
|
528 |
-
ret = EditablePartial.from_call(
|
529 |
-
railroad.OneOrMore, item="", repeat=str(len(exprs))
|
530 |
-
)
|
531 |
-
elif _should_vertical(vertical, exprs):
|
532 |
-
ret = EditablePartial.from_call(railroad.Stack, items=[])
|
533 |
-
else:
|
534 |
-
ret = EditablePartial.from_call(railroad.Sequence, items=[])
|
535 |
-
elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)):
|
536 |
-
if not exprs:
|
537 |
-
return None
|
538 |
-
if _should_vertical(vertical, exprs):
|
539 |
-
ret = EditablePartial.from_call(railroad.Choice, 0, items=[])
|
540 |
-
else:
|
541 |
-
ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[])
|
542 |
-
elif isinstance(element, pyparsing.Each):
|
543 |
-
if not exprs:
|
544 |
-
return None
|
545 |
-
ret = EditablePartial.from_call(EachItem, items=[])
|
546 |
-
elif isinstance(element, pyparsing.NotAny):
|
547 |
-
ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="")
|
548 |
-
elif isinstance(element, pyparsing.FollowedBy):
|
549 |
-
ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="")
|
550 |
-
elif isinstance(element, pyparsing.PrecededBy):
|
551 |
-
ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="")
|
552 |
-
elif isinstance(element, pyparsing.Group):
|
553 |
-
if show_groups:
|
554 |
-
ret = EditablePartial.from_call(AnnotatedItem, label="", item="")
|
555 |
-
else:
|
556 |
-
ret = EditablePartial.from_call(railroad.Group, label="", item="")
|
557 |
-
elif isinstance(element, pyparsing.TokenConverter):
|
558 |
-
ret = EditablePartial.from_call(
|
559 |
-
AnnotatedItem, label=type(element).__name__.lower(), item=""
|
560 |
-
)
|
561 |
-
elif isinstance(element, pyparsing.Opt):
|
562 |
-
ret = EditablePartial.from_call(railroad.Optional, item="")
|
563 |
-
elif isinstance(element, pyparsing.OneOrMore):
|
564 |
-
ret = EditablePartial.from_call(railroad.OneOrMore, item="")
|
565 |
-
elif isinstance(element, pyparsing.ZeroOrMore):
|
566 |
-
ret = EditablePartial.from_call(railroad.ZeroOrMore, item="")
|
567 |
-
elif isinstance(element, pyparsing.Group):
|
568 |
-
ret = EditablePartial.from_call(
|
569 |
-
railroad.Group, item=None, label=element_results_name
|
570 |
-
)
|
571 |
-
elif isinstance(element, pyparsing.Empty) and not element.customName:
|
572 |
-
# Skip unnamed "Empty" elements
|
573 |
-
ret = None
|
574 |
-
elif len(exprs) > 1:
|
575 |
-
ret = EditablePartial.from_call(railroad.Sequence, items=[])
|
576 |
-
elif len(exprs) > 0 and not element_results_name:
|
577 |
-
ret = EditablePartial.from_call(railroad.Group, item="", label=name)
|
578 |
-
else:
|
579 |
-
terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName)
|
580 |
-
ret = terminal
|
581 |
-
|
582 |
-
if ret is None:
|
583 |
-
return
|
584 |
-
|
585 |
-
# Indicate this element's position in the tree so we can extract it if necessary
|
586 |
-
lookup[el_id] = ElementState(
|
587 |
-
element=element,
|
588 |
-
converted=ret,
|
589 |
-
parent=parent,
|
590 |
-
parent_index=index,
|
591 |
-
number=lookup.generate_index(),
|
592 |
-
)
|
593 |
-
if element.customName:
|
594 |
-
lookup[el_id].mark_for_extraction(el_id, lookup, element.customName)
|
595 |
-
|
596 |
-
i = 0
|
597 |
-
for expr in exprs:
|
598 |
-
# Add a placeholder index in case we have to extract the child before we even add it to the parent
|
599 |
-
if "items" in ret.kwargs:
|
600 |
-
ret.kwargs["items"].insert(i, None)
|
601 |
-
|
602 |
-
item = _to_diagram_element(
|
603 |
-
expr,
|
604 |
-
parent=ret,
|
605 |
-
lookup=lookup,
|
606 |
-
vertical=vertical,
|
607 |
-
index=i,
|
608 |
-
show_results_names=show_results_names,
|
609 |
-
show_groups=show_groups,
|
610 |
-
)
|
611 |
-
|
612 |
-
# Some elements don't need to be shown in the diagram
|
613 |
-
if item is not None:
|
614 |
-
if "item" in ret.kwargs:
|
615 |
-
ret.kwargs["item"] = item
|
616 |
-
elif "items" in ret.kwargs:
|
617 |
-
# If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal
|
618 |
-
ret.kwargs["items"][i] = item
|
619 |
-
i += 1
|
620 |
-
elif "items" in ret.kwargs:
|
621 |
-
# If we're supposed to skip this element, remove it from the parent
|
622 |
-
del ret.kwargs["items"][i]
|
623 |
-
|
624 |
-
# If all this items children are none, skip this item
|
625 |
-
if ret and (
|
626 |
-
("items" in ret.kwargs and len(ret.kwargs["items"]) == 0)
|
627 |
-
or ("item" in ret.kwargs and ret.kwargs["item"] is None)
|
628 |
-
):
|
629 |
-
ret = EditablePartial.from_call(railroad.Terminal, name)
|
630 |
-
|
631 |
-
# Mark this element as "complete", ie it has all of its children
|
632 |
-
if el_id in lookup:
|
633 |
-
lookup[el_id].complete = True
|
634 |
-
|
635 |
-
if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete:
|
636 |
-
lookup.extract_into_diagram(el_id)
|
637 |
-
if ret is not None:
|
638 |
-
ret = EditablePartial.from_call(
|
639 |
-
railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
|
640 |
-
)
|
641 |
-
|
642 |
-
return ret
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Blessin/yes-and-improv-game/app.py
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import openai
|
3 |
-
|
4 |
-
# Function to extract the last statement from the input
|
5 |
-
def extract_last_statement(input_text):
|
6 |
-
lines = input_text.strip().split('\n')
|
7 |
-
last_line = lines[-1]
|
8 |
-
last_statement = last_line.split(':')[-1].strip() if ':' in last_line else last_line
|
9 |
-
return last_statement
|
10 |
-
|
11 |
-
def yes_and_game(api_key, user_input):
|
12 |
-
# Initialize OpenAI API client
|
13 |
-
openai.api_key = api_key
|
14 |
-
|
15 |
-
# Extract the last statement from the user input
|
16 |
-
last_statement = extract_last_statement(user_input)
|
17 |
-
|
18 |
-
# Create the prompt for GPT
|
19 |
-
gpt_prompt = (f"Play the Yes, And improv game. "
|
20 |
-
f"You will start your response with 'Yes, and'. "
|
21 |
-
f"Keep your responses short. Not more than one statement. Responses can be funny or absurd. "
|
22 |
-
f"The input statement can be a single line or a multi line statement.\n"
|
23 |
-
f"Yes, And {last_statement}\n"
|
24 |
-
f"Yes, And ")
|
25 |
-
|
26 |
-
# Generate GPT response
|
27 |
-
gpt_response = openai.Completion.create(
|
28 |
-
engine="text-davinci-002",
|
29 |
-
prompt=gpt_prompt,
|
30 |
-
max_tokens=20,
|
31 |
-
temperature=0.9 # Increased temperature for more randomness
|
32 |
-
)['choices'][0]['text'].strip()
|
33 |
-
|
34 |
-
# Format and return the result
|
35 |
-
result = f"{last_statement}\nYes, And {gpt_response}"
|
36 |
-
return result
|
37 |
-
|
38 |
-
iface = gr.Interface(
|
39 |
-
fn=yes_and_game,
|
40 |
-
inputs=[
|
41 |
-
gr.Textbox(label="OpenAI API Key", type="password"),
|
42 |
-
gr.Textbox(lines=5, label="Statement"),
|
43 |
-
],
|
44 |
-
outputs=gr.Textbox(label="Game Transcript", live=True, flagging=True), # Setting live=True for real-time updates, flagging=True to allow copying
|
45 |
-
title="The Yes, And Game" # Adding title here
|
46 |
-
)
|
47 |
-
|
48 |
-
|
49 |
-
# This will create a link to host your model on Hugging Face Spaces when executed
|
50 |
-
iface.launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CM-15/NLP-demo/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: NLP Demo
|
3 |
-
emoji: 😻
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.9.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/CVPR2022_papers/style.css
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
h1 {
|
2 |
-
text-align: center;
|
3 |
-
}
|
4 |
-
table a {
|
5 |
-
background-color: transparent;
|
6 |
-
color: #58a6ff;
|
7 |
-
text-decoration: none;
|
8 |
-
}
|
9 |
-
a:active,
|
10 |
-
a:hover {
|
11 |
-
outline-width: 0;
|
12 |
-
}
|
13 |
-
a:hover {
|
14 |
-
text-decoration: underline;
|
15 |
-
}
|
16 |
-
table, th, td {
|
17 |
-
border: 1px solid;
|
18 |
-
}
|
19 |
-
img#visitor-badge {
|
20 |
-
display: block;
|
21 |
-
margin: auto;
|
22 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/models/mmnasnet/adapter.py
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
# --------------------------------------------------------
|
2 |
-
# OpenVQA
|
3 |
-
# Written by Zhenwei Shao https://github.com/ParadoxZW
|
4 |
-
# --------------------------------------------------------
|
5 |
-
|
6 |
-
import torch.nn as nn
|
7 |
-
import torch
|
8 |
-
from openvqa.core.base_dataset import BaseAdapter
|
9 |
-
from openvqa.utils.make_mask import make_mask
|
10 |
-
|
11 |
-
|
12 |
-
class Adapter(BaseAdapter):
|
13 |
-
def __init__(self, __C):
|
14 |
-
super(Adapter, self).__init__(__C)
|
15 |
-
self.__C = __C
|
16 |
-
|
17 |
-
|
18 |
-
def relation_embedding(self, f_g):
|
19 |
-
x_min, y_min, x_max, y_max = torch.chunk(f_g, 4, dim=2) # [bs, n_obj, 1]
|
20 |
-
|
21 |
-
cx = (x_min + x_max) * 0.5 # [bs, n_obj, 1]
|
22 |
-
cy = (y_min + y_max) * 0.5 # [bs, n_obj, 1]
|
23 |
-
w = (x_max - x_min) + 1. # [bs, n_obj, 1]
|
24 |
-
h = (y_max - y_min) + 1. # [bs, n_obj, 1]
|
25 |
-
|
26 |
-
delta_x = cx - cx.transpose(-1, -2)
|
27 |
-
delta_x = torch.clamp(torch.abs(delta_x / w), min=1e-3)
|
28 |
-
delta_x = torch.log(delta_x) # [bs, n_obj, n_obj]
|
29 |
-
|
30 |
-
delta_y = cy - cy.transpose(-1, -2)
|
31 |
-
delta_y = torch.clamp(torch.abs(delta_y / h), min=1e-3)
|
32 |
-
delta_y = torch.log(delta_y) # [bs, n_obj, n_obj]
|
33 |
-
|
34 |
-
delta_w = torch.log(w / w.transpose(-1, -2)) # [bs, n_obj, n_obj]
|
35 |
-
delta_h = torch.log(h / h.transpose(-1, -2)) # [bs, n_obj, n_obj]
|
36 |
-
size = delta_h.size()
|
37 |
-
|
38 |
-
delta_x = delta_x.view(size[0], size[1], size[2], 1)
|
39 |
-
delta_y = delta_y.view(size[0], size[1], size[2], 1)
|
40 |
-
delta_w = delta_w.view(size[0], size[1], size[2], 1)
|
41 |
-
delta_h = delta_h.view(size[0], size[1], size[2], 1) # [bs, n_obj, n_obj, 1]
|
42 |
-
position_mat = torch.cat(
|
43 |
-
(delta_x, delta_y, delta_w, delta_h), -1) # [bs, n_obj, n_obj, 4]
|
44 |
-
|
45 |
-
return position_mat
|
46 |
-
|
47 |
-
def vqa_init(self, __C):
|
48 |
-
imgfeat_linear_size = __C.FEAT_SIZE['vqa']['FRCN_FEAT_SIZE'][1]
|
49 |
-
if __C.USE_BBOX_FEAT:
|
50 |
-
self.bbox_linear = nn.Linear(5, __C.BBOXFEAT_EMB_SIZE)
|
51 |
-
imgfeat_linear_size += __C.BBOXFEAT_EMB_SIZE
|
52 |
-
self.frcn_linear = nn.Linear(imgfeat_linear_size, __C.HIDDEN_SIZE)
|
53 |
-
|
54 |
-
|
55 |
-
def gqa_init(self, __C):
|
56 |
-
imgfeat_linear_size = __C.FEAT_SIZE['gqa']['FRCN_FEAT_SIZE'][1]
|
57 |
-
if __C.USE_BBOX_FEAT:
|
58 |
-
self.bbox_linear = nn.Linear(5, __C.BBOXFEAT_EMB_SIZE)
|
59 |
-
imgfeat_linear_size += __C.BBOXFEAT_EMB_SIZE
|
60 |
-
self.frcn_linear = nn.Linear(imgfeat_linear_size, __C.HIDDEN_SIZE)
|
61 |
-
|
62 |
-
if __C.USE_AUX_FEAT:
|
63 |
-
self.grid_linear = nn.Linear(__C.FEAT_SIZE['gqa']['GRID_FEAT_SIZE'][1], __C.HIDDEN_SIZE)
|
64 |
-
|
65 |
-
|
66 |
-
def clevr_init(self, __C):
|
67 |
-
self.grid_linear = nn.Linear(__C.FEAT_SIZE['clevr']['GRID_FEAT_SIZE'][1], __C.HIDDEN_SIZE)
|
68 |
-
|
69 |
-
|
70 |
-
def vqa_forward(self, feat_dict):
|
71 |
-
frcn_feat = feat_dict['FRCN_FEAT']
|
72 |
-
bbox_feat = feat_dict['BBOX_FEAT']
|
73 |
-
|
74 |
-
img_feat_mask = make_mask(frcn_feat)
|
75 |
-
|
76 |
-
if self.__C.USE_BBOX_FEAT:
|
77 |
-
bbox_feat = self.bbox_proc(bbox_feat)
|
78 |
-
bbox_feat = self.bbox_linear(bbox_feat)
|
79 |
-
frcn_feat = torch.cat((frcn_feat, bbox_feat), dim=-1)
|
80 |
-
img_feat = self.frcn_linear(frcn_feat)
|
81 |
-
rel_embed = self.relation_embedding(bbox_feat)
|
82 |
-
|
83 |
-
return img_feat, rel_embed, img_feat_mask
|
84 |
-
|
85 |
-
|
86 |
-
def gqa_forward(self, feat_dict):
|
87 |
-
frcn_feat = feat_dict['FRCN_FEAT']
|
88 |
-
bbox_feat = feat_dict['BBOX_FEAT']
|
89 |
-
grid_feat = feat_dict['GRID_FEAT']
|
90 |
-
|
91 |
-
img_feat_mask = make_mask(frcn_feat)
|
92 |
-
|
93 |
-
if self.__C.USE_BBOX_FEAT:
|
94 |
-
bbox_feat = self.bbox_linear(bbox_feat)
|
95 |
-
frcn_feat = torch.cat((frcn_feat, bbox_feat), dim=-1)
|
96 |
-
img_feat = self.frcn_linear(frcn_feat)
|
97 |
-
|
98 |
-
if self.__C.USE_AUX_FEAT:
|
99 |
-
grid_feat_mask = make_mask(grid_feat)
|
100 |
-
img_feat_mask = torch.cat((img_feat_mask, grid_feat_mask), dim=-1)
|
101 |
-
grid_feat = self.grid_linear(grid_feat)
|
102 |
-
img_feat = torch.cat((img_feat, grid_feat), dim=1)
|
103 |
-
|
104 |
-
rel_embed = self.relation_embedding(bbox_feat)
|
105 |
-
|
106 |
-
return img_feat, rel_embed, img_feat_mask
|
107 |
-
|
108 |
-
|
109 |
-
def clevr_forward(self, feat_dict):
|
110 |
-
grid_feat = feat_dict['GRID_FEAT']
|
111 |
-
|
112 |
-
img_feat_mask = make_mask(grid_feat)
|
113 |
-
img_feat = self.grid_linear(grid_feat)
|
114 |
-
|
115 |
-
rel_embed = self.relation_embedding(bbox_feat)
|
116 |
-
|
117 |
-
return img_feat, rel_embed, img_feat_mask
|
118 |
-
|
119 |
-
|
120 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tests/test_modules.py
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
from pybind11_tests import modules as m
|
3 |
-
from pybind11_tests.modules import subsubmodule as ms
|
4 |
-
from pybind11_tests import ConstructorStats
|
5 |
-
|
6 |
-
|
7 |
-
def test_nested_modules():
|
8 |
-
import pybind11_tests
|
9 |
-
assert pybind11_tests.__name__ == "pybind11_tests"
|
10 |
-
assert pybind11_tests.modules.__name__ == "pybind11_tests.modules"
|
11 |
-
assert pybind11_tests.modules.subsubmodule.__name__ == "pybind11_tests.modules.subsubmodule"
|
12 |
-
assert m.__name__ == "pybind11_tests.modules"
|
13 |
-
assert ms.__name__ == "pybind11_tests.modules.subsubmodule"
|
14 |
-
|
15 |
-
assert ms.submodule_func() == "submodule_func()"
|
16 |
-
|
17 |
-
|
18 |
-
def test_reference_internal():
|
19 |
-
b = ms.B()
|
20 |
-
assert str(b.get_a1()) == "A[1]"
|
21 |
-
assert str(b.a1) == "A[1]"
|
22 |
-
assert str(b.get_a2()) == "A[2]"
|
23 |
-
assert str(b.a2) == "A[2]"
|
24 |
-
|
25 |
-
b.a1 = ms.A(42)
|
26 |
-
b.a2 = ms.A(43)
|
27 |
-
assert str(b.get_a1()) == "A[42]"
|
28 |
-
assert str(b.a1) == "A[42]"
|
29 |
-
assert str(b.get_a2()) == "A[43]"
|
30 |
-
assert str(b.a2) == "A[43]"
|
31 |
-
|
32 |
-
astats, bstats = ConstructorStats.get(ms.A), ConstructorStats.get(ms.B)
|
33 |
-
assert astats.alive() == 2
|
34 |
-
assert bstats.alive() == 1
|
35 |
-
del b
|
36 |
-
assert astats.alive() == 0
|
37 |
-
assert bstats.alive() == 0
|
38 |
-
assert astats.values() == ['1', '2', '42', '43']
|
39 |
-
assert bstats.values() == []
|
40 |
-
assert astats.default_constructions == 0
|
41 |
-
assert bstats.default_constructions == 1
|
42 |
-
assert astats.copy_constructions == 0
|
43 |
-
assert bstats.copy_constructions == 0
|
44 |
-
# assert astats.move_constructions >= 0 # Don't invoke any
|
45 |
-
# assert bstats.move_constructions >= 0 # Don't invoke any
|
46 |
-
assert astats.copy_assignments == 2
|
47 |
-
assert bstats.copy_assignments == 0
|
48 |
-
assert astats.move_assignments == 0
|
49 |
-
assert bstats.move_assignments == 0
|
50 |
-
|
51 |
-
|
52 |
-
def test_importing():
|
53 |
-
from pybind11_tests.modules import OD
|
54 |
-
from collections import OrderedDict
|
55 |
-
|
56 |
-
assert OD is OrderedDict
|
57 |
-
assert str(OD([(1, 'a'), (2, 'b')])) == "OrderedDict([(1, 'a'), (2, 'b')])"
|
58 |
-
|
59 |
-
|
60 |
-
def test_pydoc():
|
61 |
-
"""Pydoc needs to be able to provide help() for everything inside a pybind11 module"""
|
62 |
-
import pybind11_tests
|
63 |
-
import pydoc
|
64 |
-
|
65 |
-
assert pybind11_tests.__name__ == "pybind11_tests"
|
66 |
-
assert pybind11_tests.__doc__ == "pybind11 test module"
|
67 |
-
assert pydoc.text.docmodule(pybind11_tests)
|
68 |
-
|
69 |
-
|
70 |
-
def test_duplicate_registration():
|
71 |
-
"""Registering two things with the same name"""
|
72 |
-
|
73 |
-
assert m.duplicate_registration() == []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/datasets/samplers/distributed_sampler.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
|
3 |
-
import torch
|
4 |
-
from torch.utils.data import DistributedSampler as _DistributedSampler
|
5 |
-
|
6 |
-
|
7 |
-
class DistributedSampler(_DistributedSampler):
|
8 |
-
|
9 |
-
def __init__(self,
|
10 |
-
dataset,
|
11 |
-
num_replicas=None,
|
12 |
-
rank=None,
|
13 |
-
shuffle=True,
|
14 |
-
seed=0):
|
15 |
-
super().__init__(
|
16 |
-
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
|
17 |
-
# for the compatibility from PyTorch 1.3+
|
18 |
-
self.seed = seed if seed is not None else 0
|
19 |
-
|
20 |
-
def __iter__(self):
|
21 |
-
# deterministically shuffle based on epoch
|
22 |
-
if self.shuffle:
|
23 |
-
g = torch.Generator()
|
24 |
-
g.manual_seed(self.epoch + self.seed)
|
25 |
-
indices = torch.randperm(len(self.dataset), generator=g).tolist()
|
26 |
-
else:
|
27 |
-
indices = torch.arange(len(self.dataset)).tolist()
|
28 |
-
|
29 |
-
# add extra samples to make it evenly divisible
|
30 |
-
# in case that indices is shorter than half of total_size
|
31 |
-
indices = (indices *
|
32 |
-
math.ceil(self.total_size / len(indices)))[:self.total_size]
|
33 |
-
assert len(indices) == self.total_size
|
34 |
-
|
35 |
-
# subsample
|
36 |
-
indices = indices[self.rank:self.total_size:self.num_replicas]
|
37 |
-
assert len(indices) == self.num_samples
|
38 |
-
|
39 |
-
return iter(indices)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Candyraider/Proxy4/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Proxy4
|
3 |
-
emoji: 🏢
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: purple
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chris4K/llms_compare/Antares Mic Mod Efx Mac ~UPD~ Crack Torrent.md
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
## Antares Mic Mod Efx Mac Crack Torrent
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
**CLICK HERE ->>> [https://www.google.com/url?q=https%3A%2F%2Furlca.com%2F2txP1A&sa=D&sntz=1&usg=AOvVaw2UH1YkG1xYBKItn2Gwxll7](https://www.google.com/url?q=https%3A%2F%2Furlca.com%2F2txP1A&sa=D&sntz=1&usg=AOvVaw2UH1YkG1xYBKItn2Gwxll7)**
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
# How to Get Antares Mic Mod Efx Mac Crack Torrent for Free
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
Antares Mic Mod Efx is a popular plugin that allows you to emulate the sound of hundreds of different microphones with your existing mic. Whether you want to record vocals, guitars, drums, or any other instrument, you can use Mic Mod Efx to change the tone and character of your sound. But how can you get this plugin for free without paying the hefty price tag?
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
One way is to download a cracked version of Antares Mic Mod Efx Mac from a torrent site. A torrent is a file that contains information about other files that are distributed across a network of computers. By using a torrent client, you can download the files you want from other users who have them. However, this method is not recommended for several reasons.
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
First of all, downloading cracked software is illegal and unethical. You are violating the copyright and license agreement of the software developer, and you are depriving them of their rightful income. Secondly, downloading cracked software is risky and unsafe. You never know what kind of malware or viruses might be hidden in the files you download. You could end up infecting your computer or compromising your personal data. Thirdly, downloading cracked software is unreliable and unstable. You might encounter errors, bugs, or compatibility issues that could affect your performance or quality of your recordings.
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
So what is the best way to get Antares Mic Mod Efx Mac for free? The answer is simple: use a trial version. Antares offers a free 14-day trial of Mic Mod Efx on their website. You can download and install the plugin on your Mac and use it for two weeks without any limitations or restrictions. You can try out all the features and functions of the plugin and see how it works for you. You can also compare the sound of different microphones and find the ones that suit your style and preference.
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
After the trial period is over, you can decide whether you want to buy the full version of Antares Mic Mod Efx Mac or not. The full version costs $129 and comes with lifetime updates and support. You can also get it as part of the Antares AVOX bundle, which includes other vocal processing plugins such as Auto-Tune, Harmony Engine, Articulator, and more.
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
If you are serious about your music production and want to get the best sound possible, then investing in Antares Mic Mod Efx Mac is worth it. You will get access to a huge collection of microphone models that will enhance your recordings and give you more creative options. You will also get a legal and safe software that will work smoothly and reliably on your Mac.
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
So don't waste your time and risk your security by downloading Antares Mic Mod Efx Mac crack torrent from shady sites. Instead, go to the official Antares website and download the free trial version of Mic Mod Efx today. You will be amazed by what this plugin can do for your sound.
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
## What Users Say About Antares Mic Mod Efx Mac
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
If you are still not convinced by the benefits of Antares Mic Mod Efx Mac, you might want to hear what other users have to say about it. Many users have shared their positive experiences and reviews of this plugin on various platforms and websites. Here are some of the testimonials from real users who have tried Antares Mic Mod Efx Mac:
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
- "I was just recording on the Sony C800g not too long ago and when I use this plugin at home (with my ml 770) and hear myself it sounds like I'm on the Sony. Blown away by how good this plugin is." - Michael from Newport Beach, CA[^1^]
|
66 |
-
|
67 |
-
- "This tool is just that... A tool. I used it alongside my 1977 U87 and my U87ai. I was unable to tell the difference between my Ai and my Vintage U87 when I used this plugin to turn one into the other. Like a few others have stated... I'm shocked this tool doesn't get more exposure." - CC from Colorado[^1^]
|
68 |
-
|
69 |
-
- "I'm using this plug-in with a Manley ref cad, I have no clie what the actual version of most of these mics are really suppose to sound like. All I know is they sound great!!" - Rony from Philadelphia[^1^]
|
70 |
-
|
71 |
-
- "I'm astounded at the lack of credit MIc Mod has gotten. This software is really easy to use and also sounds extremely convincing to my ear. By no means does it sound like my own mic being EQ'ed. What I hear is dynamic frequency response change and saturation as well." - Anthony Lowery from Manteca, CA[^1^]
|
72 |
-
|
73 |
-
- "This is clearly not something you could do in the real world, but if it creates a sound that works then it's more than justified. The mic models themselves are stored as separate files which, in the case of Mac users, are located within the Preferences folder in the System folder." - Paul White from Sound On Sound[^3^]
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
As you can see, Antares Mic Mod Efx Mac has received rave reviews from users who have tried it and loved it. They have praised its ease of use, its realism, its versatility, and its quality. They have also compared it favorably to some of the most expensive and sought-after microphones in the world.
|
78 |
-
|
79 |
-
dfd1c89656
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChrisPreston/diff-svc_minato_aqua/modules/commons/ssim.py
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Adapted from https://github.com/Po-Hsun-Su/pytorch-ssim
|
3 |
-
"""
|
4 |
-
|
5 |
-
from math import exp
|
6 |
-
|
7 |
-
import torch
|
8 |
-
import torch.nn.functional as F
|
9 |
-
from torch.autograd import Variable
|
10 |
-
|
11 |
-
|
12 |
-
def gaussian(window_size, sigma):
|
13 |
-
gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
|
14 |
-
return gauss / gauss.sum()
|
15 |
-
|
16 |
-
|
17 |
-
def create_window(window_size, channel):
|
18 |
-
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
|
19 |
-
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
|
20 |
-
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
|
21 |
-
return window
|
22 |
-
|
23 |
-
|
24 |
-
def _ssim(img1, img2, window, window_size, channel, size_average=True):
|
25 |
-
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
|
26 |
-
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
|
27 |
-
|
28 |
-
mu1_sq = mu1.pow(2)
|
29 |
-
mu2_sq = mu2.pow(2)
|
30 |
-
mu1_mu2 = mu1 * mu2
|
31 |
-
|
32 |
-
sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
|
33 |
-
sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
|
34 |
-
sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
|
35 |
-
|
36 |
-
C1 = 0.01 ** 2
|
37 |
-
C2 = 0.03 ** 2
|
38 |
-
|
39 |
-
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
|
40 |
-
|
41 |
-
if size_average:
|
42 |
-
return ssim_map.mean()
|
43 |
-
else:
|
44 |
-
return ssim_map.mean(1)
|
45 |
-
|
46 |
-
|
47 |
-
class SSIM(torch.nn.Module):
|
48 |
-
def __init__(self, window_size=11, size_average=True):
|
49 |
-
super(SSIM, self).__init__()
|
50 |
-
self.window_size = window_size
|
51 |
-
self.size_average = size_average
|
52 |
-
self.channel = 1
|
53 |
-
self.window = create_window(window_size, self.channel)
|
54 |
-
|
55 |
-
def forward(self, img1, img2):
|
56 |
-
(_, channel, _, _) = img1.size()
|
57 |
-
|
58 |
-
if channel == self.channel and self.window.data.type() == img1.data.type():
|
59 |
-
window = self.window
|
60 |
-
else:
|
61 |
-
window = create_window(self.window_size, channel)
|
62 |
-
|
63 |
-
if img1.is_cuda:
|
64 |
-
window = window.cuda(img1.get_device())
|
65 |
-
window = window.type_as(img1)
|
66 |
-
|
67 |
-
self.window = window
|
68 |
-
self.channel = channel
|
69 |
-
|
70 |
-
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
|
71 |
-
|
72 |
-
|
73 |
-
window = None
|
74 |
-
|
75 |
-
|
76 |
-
def ssim(img1, img2, window_size=11, size_average=True):
|
77 |
-
(_, channel, _, _) = img1.size()
|
78 |
-
global window
|
79 |
-
if window is None:
|
80 |
-
window = create_window(window_size, channel)
|
81 |
-
if img1.is_cuda:
|
82 |
-
window = window.cuda(img1.get_device())
|
83 |
-
window = window.type_as(img1)
|
84 |
-
return _ssim(img1, img2, window, window_size, channel, size_average)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/structures/segmentation_mask.py
DELETED
@@ -1,535 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import copy
|
3 |
-
import torch
|
4 |
-
import numpy as np
|
5 |
-
from maskrcnn_benchmark.layers.misc import interpolate
|
6 |
-
|
7 |
-
import pycocotools.mask as mask_utils
|
8 |
-
|
9 |
-
# transpose
|
10 |
-
FLIP_LEFT_RIGHT = 0
|
11 |
-
FLIP_TOP_BOTTOM = 1
|
12 |
-
|
13 |
-
|
14 |
-
""" ABSTRACT
|
15 |
-
Segmentations come in either:
|
16 |
-
1) Binary masks
|
17 |
-
2) Polygons
|
18 |
-
|
19 |
-
Binary masks can be represented in a contiguous array
|
20 |
-
and operations can be carried out more efficiently,
|
21 |
-
therefore BinaryMaskList handles them together.
|
22 |
-
|
23 |
-
Polygons are handled separately for each instance,
|
24 |
-
by PolygonInstance and instances are handled by
|
25 |
-
PolygonList.
|
26 |
-
|
27 |
-
SegmentationList is supposed to represent both,
|
28 |
-
therefore it wraps the functions of BinaryMaskList
|
29 |
-
and PolygonList to make it transparent.
|
30 |
-
"""
|
31 |
-
|
32 |
-
|
33 |
-
class BinaryMaskList(object):
|
34 |
-
"""
|
35 |
-
This class handles binary masks for all objects in the image
|
36 |
-
"""
|
37 |
-
|
38 |
-
def __init__(self, masks, size):
|
39 |
-
"""
|
40 |
-
Arguments:
|
41 |
-
masks: Either torch.tensor of [num_instances, H, W]
|
42 |
-
or list of torch.tensors of [H, W] with num_instances elems,
|
43 |
-
or RLE (Run Length Encoding) - interpreted as list of dicts,
|
44 |
-
or BinaryMaskList.
|
45 |
-
size: absolute image size, width first
|
46 |
-
|
47 |
-
After initialization, a hard copy will be made, to leave the
|
48 |
-
initializing source data intact.
|
49 |
-
"""
|
50 |
-
|
51 |
-
if isinstance(masks, torch.Tensor):
|
52 |
-
# The raw data representation is passed as argument
|
53 |
-
masks = masks.clone()
|
54 |
-
elif isinstance(masks, (list, tuple)):
|
55 |
-
if isinstance(masks[0], torch.Tensor):
|
56 |
-
masks = torch.stack(masks, dim=2).clone()
|
57 |
-
elif isinstance(masks[0], dict) and "count" in masks[0]:
|
58 |
-
# RLE interpretation
|
59 |
-
|
60 |
-
masks = mask_utils
|
61 |
-
else:
|
62 |
-
RuntimeError(
|
63 |
-
"Type of `masks[0]` could not be interpreted: %s" % type(masks)
|
64 |
-
)
|
65 |
-
elif isinstance(masks, BinaryMaskList):
|
66 |
-
# just hard copy the BinaryMaskList instance's underlying data
|
67 |
-
masks = masks.masks.clone()
|
68 |
-
else:
|
69 |
-
RuntimeError(
|
70 |
-
"Type of `masks` argument could not be interpreted:%s" % type(masks)
|
71 |
-
)
|
72 |
-
|
73 |
-
if len(masks.shape) == 2:
|
74 |
-
# if only a single instance mask is passed
|
75 |
-
masks = masks[None]
|
76 |
-
|
77 |
-
assert len(masks.shape) == 3
|
78 |
-
assert masks.shape[1] == size[1], "%s != %s" % (masks.shape[1], size[1])
|
79 |
-
assert masks.shape[2] == size[0], "%s != %s" % (masks.shape[2], size[0])
|
80 |
-
|
81 |
-
self.masks = masks
|
82 |
-
self.size = tuple(size)
|
83 |
-
|
84 |
-
def transpose(self, method):
|
85 |
-
dim = 1 if method == FLIP_TOP_BOTTOM else 2
|
86 |
-
flipped_masks = self.masks.flip(dim)
|
87 |
-
return BinaryMaskList(flipped_masks, self.size)
|
88 |
-
|
89 |
-
def crop(self, box):
|
90 |
-
assert isinstance(box, (list, tuple, torch.Tensor)), str(type(box))
|
91 |
-
# box is assumed to be xyxy
|
92 |
-
current_width, current_height = self.size
|
93 |
-
xmin, ymin, xmax, ymax = [round(float(b)) for b in box]
|
94 |
-
|
95 |
-
assert xmin <= xmax and ymin <= ymax, str(box)
|
96 |
-
xmin = min(max(xmin, 0), current_width - 1)
|
97 |
-
ymin = min(max(ymin, 0), current_height - 1)
|
98 |
-
|
99 |
-
xmax = min(max(xmax, 0), current_width)
|
100 |
-
ymax = min(max(ymax, 0), current_height)
|
101 |
-
|
102 |
-
xmax = max(xmax, xmin + 1)
|
103 |
-
ymax = max(ymax, ymin + 1)
|
104 |
-
|
105 |
-
width, height = xmax - xmin, ymax - ymin
|
106 |
-
cropped_masks = self.masks[:, ymin:ymax, xmin:xmax]
|
107 |
-
cropped_size = width, height
|
108 |
-
return BinaryMaskList(cropped_masks, cropped_size)
|
109 |
-
|
110 |
-
def resize(self, size):
|
111 |
-
try:
|
112 |
-
iter(size)
|
113 |
-
except TypeError:
|
114 |
-
assert isinstance(size, (int, float))
|
115 |
-
size = size, size
|
116 |
-
width, height = map(int, size)
|
117 |
-
|
118 |
-
assert width > 0
|
119 |
-
assert height > 0
|
120 |
-
|
121 |
-
# Height comes first here!
|
122 |
-
resized_masks = torch.nn.functional.interpolate(
|
123 |
-
input=self.masks[None].float(),
|
124 |
-
size=(height, width),
|
125 |
-
mode="bilinear",
|
126 |
-
align_corners=False,
|
127 |
-
)[0].type_as(self.masks)
|
128 |
-
resized_size = width, height
|
129 |
-
return BinaryMaskList(resized_masks, resized_size)
|
130 |
-
|
131 |
-
def convert_to_polygon(self):
|
132 |
-
contours = self._findContours()
|
133 |
-
return PolygonList(contours, self.size)
|
134 |
-
|
135 |
-
def to(self, *args, **kwargs):
|
136 |
-
return self
|
137 |
-
|
138 |
-
def _findContours(self):
|
139 |
-
contours = []
|
140 |
-
masks = self.masks.detach().numpy()
|
141 |
-
for mask in masks:
|
142 |
-
mask = cv2.UMat(mask)
|
143 |
-
contour, hierarchy = cv2.findContours(
|
144 |
-
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1
|
145 |
-
)
|
146 |
-
|
147 |
-
reshaped_contour = []
|
148 |
-
for entity in contour:
|
149 |
-
assert len(entity.shape) == 3
|
150 |
-
assert entity.shape[1] == 1, "Hierarchical contours are not allowed"
|
151 |
-
reshaped_contour.append(entity.reshape(-1).tolist())
|
152 |
-
contours.append(reshaped_contour)
|
153 |
-
return contours
|
154 |
-
|
155 |
-
def __len__(self):
|
156 |
-
return len(self.masks)
|
157 |
-
|
158 |
-
def __getitem__(self, index):
|
159 |
-
# Probably it can cause some overhead
|
160 |
-
# but preserves consistency
|
161 |
-
masks = self.masks[index].clone()
|
162 |
-
return BinaryMaskList(masks, self.size)
|
163 |
-
|
164 |
-
def __iter__(self):
|
165 |
-
return iter(self.masks)
|
166 |
-
|
167 |
-
def __repr__(self):
|
168 |
-
s = self.__class__.__name__ + "("
|
169 |
-
s += "num_instances={}, ".format(len(self.masks))
|
170 |
-
s += "image_width={}, ".format(self.size[0])
|
171 |
-
s += "image_height={})".format(self.size[1])
|
172 |
-
return s
|
173 |
-
|
174 |
-
|
175 |
-
class PolygonInstance(object):
|
176 |
-
"""
|
177 |
-
This class holds a set of polygons that represents a single instance
|
178 |
-
of an object mask. The object can be represented as a set of
|
179 |
-
polygons
|
180 |
-
"""
|
181 |
-
|
182 |
-
def __init__(self, polygons, size):
|
183 |
-
"""
|
184 |
-
Arguments:
|
185 |
-
a list of lists of numbers.
|
186 |
-
The first level refers to all the polygons that compose the
|
187 |
-
object, and the second level to the polygon coordinates.
|
188 |
-
"""
|
189 |
-
if isinstance(polygons, (list, tuple)):
|
190 |
-
valid_polygons = []
|
191 |
-
for p in polygons:
|
192 |
-
p = torch.as_tensor(p, dtype=torch.float32)
|
193 |
-
if len(p) >= 6: # 3 * 2 coordinates
|
194 |
-
valid_polygons.append(p)
|
195 |
-
polygons = valid_polygons
|
196 |
-
|
197 |
-
elif isinstance(polygons, PolygonInstance):
|
198 |
-
polygons = copy.copy(polygons.polygons)
|
199 |
-
else:
|
200 |
-
RuntimeError(
|
201 |
-
"Type of argument `polygons` is not allowed:%s" % (type(polygons))
|
202 |
-
)
|
203 |
-
|
204 |
-
""" This crashes the training way too many times...
|
205 |
-
for p in polygons:
|
206 |
-
assert p[::2].min() >= 0
|
207 |
-
assert p[::2].max() < size[0]
|
208 |
-
assert p[1::2].min() >= 0
|
209 |
-
assert p[1::2].max() , size[1]
|
210 |
-
"""
|
211 |
-
|
212 |
-
self.polygons = polygons
|
213 |
-
self.size = tuple(size)
|
214 |
-
|
215 |
-
def transpose(self, method):
|
216 |
-
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
|
217 |
-
raise NotImplementedError(
|
218 |
-
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
|
219 |
-
)
|
220 |
-
|
221 |
-
flipped_polygons = []
|
222 |
-
width, height = self.size
|
223 |
-
if method == FLIP_LEFT_RIGHT:
|
224 |
-
dim = width
|
225 |
-
idx = 0
|
226 |
-
elif method == FLIP_TOP_BOTTOM:
|
227 |
-
dim = height
|
228 |
-
idx = 1
|
229 |
-
|
230 |
-
for poly in self.polygons:
|
231 |
-
p = poly.clone()
|
232 |
-
TO_REMOVE = 1
|
233 |
-
p[idx::2] = dim - poly[idx::2] - TO_REMOVE
|
234 |
-
flipped_polygons.append(p)
|
235 |
-
|
236 |
-
return PolygonInstance(flipped_polygons, size=self.size)
|
237 |
-
|
238 |
-
def crop(self, box):
|
239 |
-
assert isinstance(box, (list, tuple, torch.Tensor)), str(type(box))
|
240 |
-
|
241 |
-
# box is assumed to be xyxy
|
242 |
-
current_width, current_height = self.size
|
243 |
-
xmin, ymin, xmax, ymax = map(float, box)
|
244 |
-
|
245 |
-
assert xmin <= xmax and ymin <= ymax, str(box)
|
246 |
-
xmin = min(max(xmin, 0), current_width - 1)
|
247 |
-
ymin = min(max(ymin, 0), current_height - 1)
|
248 |
-
|
249 |
-
xmax = min(max(xmax, 0), current_width)
|
250 |
-
ymax = min(max(ymax, 0), current_height)
|
251 |
-
|
252 |
-
xmax = max(xmax, xmin + 1)
|
253 |
-
ymax = max(ymax, ymin + 1)
|
254 |
-
|
255 |
-
w, h = xmax - xmin, ymax - ymin
|
256 |
-
|
257 |
-
cropped_polygons = []
|
258 |
-
for poly in self.polygons:
|
259 |
-
p = poly.clone()
|
260 |
-
p[0::2] = p[0::2] - xmin # .clamp(min=0, max=w)
|
261 |
-
p[1::2] = p[1::2] - ymin # .clamp(min=0, max=h)
|
262 |
-
cropped_polygons.append(p)
|
263 |
-
|
264 |
-
return PolygonInstance(cropped_polygons, size=(w, h))
|
265 |
-
|
266 |
-
def resize(self, size):
|
267 |
-
try:
|
268 |
-
iter(size)
|
269 |
-
except TypeError:
|
270 |
-
assert isinstance(size, (int, float))
|
271 |
-
size = size, size
|
272 |
-
|
273 |
-
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
|
274 |
-
|
275 |
-
if ratios[0] == ratios[1]:
|
276 |
-
ratio = ratios[0]
|
277 |
-
scaled_polys = [p * ratio for p in self.polygons]
|
278 |
-
return PolygonInstance(scaled_polys, size)
|
279 |
-
|
280 |
-
ratio_w, ratio_h = ratios
|
281 |
-
scaled_polygons = []
|
282 |
-
for poly in self.polygons:
|
283 |
-
p = poly.clone()
|
284 |
-
p[0::2] *= ratio_w
|
285 |
-
p[1::2] *= ratio_h
|
286 |
-
scaled_polygons.append(p)
|
287 |
-
|
288 |
-
return PolygonInstance(scaled_polygons, size=size)
|
289 |
-
|
290 |
-
def convert_to_binarymask(self):
|
291 |
-
width, height = self.size
|
292 |
-
# formatting for COCO PythonAPI
|
293 |
-
polygons = [p.numpy() for p in self.polygons]
|
294 |
-
rles = mask_utils.frPyObjects(polygons, height, width)
|
295 |
-
rle = mask_utils.merge(rles)
|
296 |
-
mask = mask_utils.decode(rle)
|
297 |
-
mask = torch.from_numpy(mask)
|
298 |
-
return mask
|
299 |
-
|
300 |
-
def __len__(self):
|
301 |
-
return len(self.polygons)
|
302 |
-
|
303 |
-
def __repr__(self):
|
304 |
-
s = self.__class__.__name__ + "("
|
305 |
-
s += "num_groups={}, ".format(len(self.polygons))
|
306 |
-
s += "image_width={}, ".format(self.size[0])
|
307 |
-
s += "image_height={}, ".format(self.size[1])
|
308 |
-
return s
|
309 |
-
|
310 |
-
|
311 |
-
class PolygonList(object):
|
312 |
-
"""
|
313 |
-
This class handles PolygonInstances for all objects in the image
|
314 |
-
"""
|
315 |
-
|
316 |
-
def __init__(self, polygons, size):
|
317 |
-
"""
|
318 |
-
Arguments:
|
319 |
-
polygons:
|
320 |
-
a list of list of lists of numbers. The first
|
321 |
-
level of the list correspond to individual instances,
|
322 |
-
the second level to all the polygons that compose the
|
323 |
-
object, and the third level to the polygon coordinates.
|
324 |
-
|
325 |
-
OR
|
326 |
-
|
327 |
-
a list of PolygonInstances.
|
328 |
-
|
329 |
-
OR
|
330 |
-
|
331 |
-
a PolygonList
|
332 |
-
|
333 |
-
size: absolute image size
|
334 |
-
|
335 |
-
"""
|
336 |
-
if isinstance(polygons, (list, tuple)):
|
337 |
-
if len(polygons) == 0:
|
338 |
-
polygons = [[[]]]
|
339 |
-
if isinstance(polygons[0], (list, tuple)):
|
340 |
-
assert isinstance(polygons[0][0], (list, tuple)), str(
|
341 |
-
type(polygons[0][0])
|
342 |
-
)
|
343 |
-
else:
|
344 |
-
assert isinstance(polygons[0], PolygonInstance), str(type(polygons[0]))
|
345 |
-
|
346 |
-
elif isinstance(polygons, PolygonList):
|
347 |
-
size = polygons.size
|
348 |
-
polygons = polygons.polygons
|
349 |
-
|
350 |
-
else:
|
351 |
-
RuntimeError(
|
352 |
-
"Type of argument `polygons` is not allowed:%s" % (type(polygons))
|
353 |
-
)
|
354 |
-
|
355 |
-
assert isinstance(size, (list, tuple)), str(type(size))
|
356 |
-
|
357 |
-
self.polygons = []
|
358 |
-
for p in polygons:
|
359 |
-
p = PolygonInstance(p, size)
|
360 |
-
if len(p) > 0:
|
361 |
-
self.polygons.append(p)
|
362 |
-
|
363 |
-
self.size = tuple(size)
|
364 |
-
|
365 |
-
def transpose(self, method):
|
366 |
-
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
|
367 |
-
raise NotImplementedError(
|
368 |
-
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
|
369 |
-
)
|
370 |
-
|
371 |
-
flipped_polygons = []
|
372 |
-
for polygon in self.polygons:
|
373 |
-
flipped_polygons.append(polygon.transpose(method))
|
374 |
-
|
375 |
-
return PolygonList(flipped_polygons, size=self.size)
|
376 |
-
|
377 |
-
def crop(self, box):
|
378 |
-
w, h = box[2] - box[0], box[3] - box[1]
|
379 |
-
cropped_polygons = []
|
380 |
-
for polygon in self.polygons:
|
381 |
-
cropped_polygons.append(polygon.crop(box))
|
382 |
-
|
383 |
-
cropped_size = w, h
|
384 |
-
return PolygonList(cropped_polygons, cropped_size)
|
385 |
-
|
386 |
-
def resize(self, size):
|
387 |
-
resized_polygons = []
|
388 |
-
for polygon in self.polygons:
|
389 |
-
resized_polygons.append(polygon.resize(size))
|
390 |
-
|
391 |
-
resized_size = size
|
392 |
-
return PolygonList(resized_polygons, resized_size)
|
393 |
-
|
394 |
-
def to(self, *args, **kwargs):
|
395 |
-
return self
|
396 |
-
|
397 |
-
def convert_to_binarymask(self):
|
398 |
-
if len(self) > 0:
|
399 |
-
masks = torch.stack([p.convert_to_binarymask() for p in self.polygons])
|
400 |
-
else:
|
401 |
-
size = self.size
|
402 |
-
masks = torch.empty([0, size[1], size[0]], dtype=torch.uint8)
|
403 |
-
|
404 |
-
return BinaryMaskList(masks, size=self.size)
|
405 |
-
|
406 |
-
def __len__(self):
|
407 |
-
return len(self.polygons)
|
408 |
-
|
409 |
-
def __getitem__(self, item):
|
410 |
-
if isinstance(item, int):
|
411 |
-
selected_polygons = [self.polygons[item]]
|
412 |
-
elif isinstance(item, slice):
|
413 |
-
selected_polygons = self.polygons[item]
|
414 |
-
else:
|
415 |
-
# advanced indexing on a single dimension
|
416 |
-
selected_polygons = []
|
417 |
-
if isinstance(item, torch.Tensor) and item.dtype == torch.uint8:
|
418 |
-
item = item.nonzero()
|
419 |
-
item = item.squeeze(1) if item.numel() > 0 else item
|
420 |
-
item = item.tolist()
|
421 |
-
for i in item:
|
422 |
-
selected_polygons.append(self.polygons[i])
|
423 |
-
return PolygonList(selected_polygons, size=self.size)
|
424 |
-
|
425 |
-
def __iter__(self):
|
426 |
-
return iter(self.polygons)
|
427 |
-
|
428 |
-
def __repr__(self):
|
429 |
-
s = self.__class__.__name__ + "("
|
430 |
-
s += "num_instances={}, ".format(len(self.polygons))
|
431 |
-
s += "image_width={}, ".format(self.size[0])
|
432 |
-
s += "image_height={})".format(self.size[1])
|
433 |
-
return s
|
434 |
-
|
435 |
-
|
436 |
-
class SegmentationMask(object):
|
437 |
-
|
438 |
-
"""
|
439 |
-
This class stores the segmentations for all objects in the image.
|
440 |
-
It wraps BinaryMaskList and PolygonList conveniently.
|
441 |
-
"""
|
442 |
-
|
443 |
-
def __init__(self, instances, size, mode="poly"):
|
444 |
-
"""
|
445 |
-
Arguments:
|
446 |
-
instances: two types
|
447 |
-
(1) polygon
|
448 |
-
(2) binary mask
|
449 |
-
size: (width, height)
|
450 |
-
mode: 'poly', 'mask'. if mode is 'mask', convert mask of any format to binary mask
|
451 |
-
"""
|
452 |
-
|
453 |
-
assert isinstance(size, (list, tuple))
|
454 |
-
assert len(size) == 2
|
455 |
-
if isinstance(size[0], torch.Tensor):
|
456 |
-
assert isinstance(size[1], torch.Tensor)
|
457 |
-
size = size[0].item(), size[1].item()
|
458 |
-
|
459 |
-
assert isinstance(size[0], (int, float))
|
460 |
-
assert isinstance(size[1], (int, float))
|
461 |
-
|
462 |
-
if mode == "poly":
|
463 |
-
self.instances = PolygonList(instances, size)
|
464 |
-
elif mode == "mask":
|
465 |
-
self.instances = BinaryMaskList(instances, size)
|
466 |
-
else:
|
467 |
-
raise NotImplementedError("Unknown mode: %s" % str(mode))
|
468 |
-
|
469 |
-
self.mode = mode
|
470 |
-
self.size = tuple(size)
|
471 |
-
|
472 |
-
def transpose(self, method):
|
473 |
-
flipped_instances = self.instances.transpose(method)
|
474 |
-
return SegmentationMask(flipped_instances, self.size, self.mode)
|
475 |
-
|
476 |
-
def crop(self, box):
|
477 |
-
cropped_instances = self.instances.crop(box)
|
478 |
-
cropped_size = cropped_instances.size
|
479 |
-
return SegmentationMask(cropped_instances, cropped_size, self.mode)
|
480 |
-
|
481 |
-
def resize(self, size, *args, **kwargs):
|
482 |
-
resized_instances = self.instances.resize(size)
|
483 |
-
resized_size = size
|
484 |
-
return SegmentationMask(resized_instances, resized_size, self.mode)
|
485 |
-
|
486 |
-
def to(self, *args, **kwargs):
|
487 |
-
return self
|
488 |
-
|
489 |
-
def convert(self, mode):
|
490 |
-
if mode == self.mode:
|
491 |
-
return self
|
492 |
-
|
493 |
-
if mode == "poly":
|
494 |
-
converted_instances = self.instances.convert_to_polygon()
|
495 |
-
elif mode == "mask":
|
496 |
-
converted_instances = self.instances.convert_to_binarymask()
|
497 |
-
else:
|
498 |
-
raise NotImplementedError("Unknown mode: %s" % str(mode))
|
499 |
-
|
500 |
-
return SegmentationMask(converted_instances, self.size, mode)
|
501 |
-
|
502 |
-
def get_mask_tensor(self):
|
503 |
-
instances = self.instances
|
504 |
-
if self.mode == "poly":
|
505 |
-
instances = instances.convert_to_binarymask()
|
506 |
-
# If there is only 1 instance
|
507 |
-
return instances.masks.squeeze(0)
|
508 |
-
|
509 |
-
def __len__(self):
|
510 |
-
return len(self.instances)
|
511 |
-
|
512 |
-
def __getitem__(self, item):
|
513 |
-
selected_instances = self.instances.__getitem__(item)
|
514 |
-
return SegmentationMask(selected_instances, self.size, self.mode)
|
515 |
-
|
516 |
-
def __iter__(self):
|
517 |
-
self.iter_idx = 0
|
518 |
-
return self
|
519 |
-
|
520 |
-
def __next__(self):
|
521 |
-
if self.iter_idx < self.__len__():
|
522 |
-
next_segmentation = self.__getitem__(self.iter_idx)
|
523 |
-
self.iter_idx += 1
|
524 |
-
return next_segmentation
|
525 |
-
raise StopIteration()
|
526 |
-
|
527 |
-
next = __next__ # Python 2 compatibility
|
528 |
-
|
529 |
-
def __repr__(self):
|
530 |
-
s = self.__class__.__name__ + "("
|
531 |
-
s += "num_instances={}, ".format(len(self.instances))
|
532 |
-
s += "image_width={}, ".format(self.size[0])
|
533 |
-
s += "image_height={}, ".format(self.size[1])
|
534 |
-
s += "mode={})".format(self.mode)
|
535 |
-
return s
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/background.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from starlette.background import BackgroundTasks as BackgroundTasks # noqa
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/T_S_I_C_.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
from .otBase import BaseTTXConverter
|
2 |
-
|
3 |
-
|
4 |
-
class table_T_S_I_C_(BaseTTXConverter):
|
5 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DaleChen/AutoGPT/autogpt/__main__.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
"""Auto-GPT: A GPT powered AI Assistant"""
|
2 |
-
import autogpt.cli
|
3 |
-
|
4 |
-
if __name__ == "__main__":
|
5 |
-
autogpt.cli.main()
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DanteOz/Minimal-Endpoint/app.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
from flask import Flask
|
2 |
-
|
3 |
-
app = Flask(__name__)
|
4 |
-
|
5 |
-
@app.route("/")
|
6 |
-
def index():
|
7 |
-
return "<p>Hello, World!</p>"
|
8 |
-
|
9 |
-
@app.route("/predict")
|
10 |
-
def predict():
|
11 |
-
return {"output": "prediction"}
|
12 |
-
|
13 |
-
if __name__ == "__main__":
|
14 |
-
app.run(host="0.0.0.0", port=7860)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/loss/boundary_loss.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
@Date: 2021/08/12
|
3 |
-
@description: For HorizonNet, using latitudes to calculate loss.
|
4 |
-
"""
|
5 |
-
import torch
|
6 |
-
import torch.nn as nn
|
7 |
-
from utils.conversion import depth2xyz, xyz2lonlat
|
8 |
-
|
9 |
-
|
10 |
-
class BoundaryLoss(nn.Module):
|
11 |
-
def __init__(self):
|
12 |
-
super().__init__()
|
13 |
-
self.loss = nn.L1Loss()
|
14 |
-
|
15 |
-
def forward(self, gt, dt):
|
16 |
-
gt_floor_xyz = depth2xyz(gt['depth'])
|
17 |
-
gt_ceil_xyz = gt_floor_xyz.clone()
|
18 |
-
gt_ceil_xyz[..., 1] = -gt['ratio']
|
19 |
-
|
20 |
-
gt_floor_boundary = xyz2lonlat(gt_floor_xyz)[..., -1:]
|
21 |
-
gt_ceil_boundary = xyz2lonlat(gt_ceil_xyz)[..., -1:]
|
22 |
-
|
23 |
-
gt_boundary = torch.cat([gt_floor_boundary, gt_ceil_boundary], dim=-1).permute(0, 2, 1)
|
24 |
-
dt_boundary = dt['boundary']
|
25 |
-
|
26 |
-
loss = self.loss(gt_boundary, dt_boundary)
|
27 |
-
return loss
|
28 |
-
|
29 |
-
|
30 |
-
if __name__ == '__main__':
|
31 |
-
import numpy as np
|
32 |
-
from dataset.mp3d_dataset import MP3DDataset
|
33 |
-
|
34 |
-
mp3d_dataset = MP3DDataset(root_dir='../src/dataset/mp3d', mode='train')
|
35 |
-
gt = mp3d_dataset.__getitem__(0)
|
36 |
-
|
37 |
-
gt['depth'] = torch.from_numpy(gt['depth'][np.newaxis]) # batch size is 1
|
38 |
-
gt['ratio'] = torch.from_numpy(gt['ratio'][np.newaxis]) # batch size is 1
|
39 |
-
|
40 |
-
dummy_dt = {
|
41 |
-
'depth': gt['depth'].clone(),
|
42 |
-
'boundary': torch.cat([
|
43 |
-
xyz2lonlat(depth2xyz(gt['depth']))[..., -1:],
|
44 |
-
xyz2lonlat(depth2xyz(gt['depth'], plan_y=-gt['ratio']))[..., -1:]
|
45 |
-
], dim=-1).permute(0, 2, 1)
|
46 |
-
}
|
47 |
-
# dummy_dt['boundary'][:, :, :20] /= 1.2 # some different
|
48 |
-
|
49 |
-
boundary_loss = BoundaryLoss()
|
50 |
-
loss = boundary_loss(gt, dummy_dt)
|
51 |
-
print(loss)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DemoLou/moe-tts/text/shanghainese.py
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
import cn2an
|
3 |
-
import opencc
|
4 |
-
|
5 |
-
|
6 |
-
converter = opencc.OpenCC('chinese_dialect_lexicons/zaonhe')
|
7 |
-
|
8 |
-
# List of (Latin alphabet, ipa) pairs:
|
9 |
-
_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
|
10 |
-
('A', 'ᴇ'),
|
11 |
-
('B', 'bi'),
|
12 |
-
('C', 'si'),
|
13 |
-
('D', 'di'),
|
14 |
-
('E', 'i'),
|
15 |
-
('F', 'ᴇf'),
|
16 |
-
('G', 'dʑi'),
|
17 |
-
('H', 'ᴇtɕʰ'),
|
18 |
-
('I', 'ᴀi'),
|
19 |
-
('J', 'dʑᴇ'),
|
20 |
-
('K', 'kʰᴇ'),
|
21 |
-
('L', 'ᴇl'),
|
22 |
-
('M', 'ᴇm'),
|
23 |
-
('N', 'ᴇn'),
|
24 |
-
('O', 'o'),
|
25 |
-
('P', 'pʰi'),
|
26 |
-
('Q', 'kʰiu'),
|
27 |
-
('R', 'ᴀl'),
|
28 |
-
('S', 'ᴇs'),
|
29 |
-
('T', 'tʰi'),
|
30 |
-
('U', 'ɦiu'),
|
31 |
-
('V', 'vi'),
|
32 |
-
('W', 'dᴀbɤliu'),
|
33 |
-
('X', 'ᴇks'),
|
34 |
-
('Y', 'uᴀi'),
|
35 |
-
('Z', 'zᴇ')
|
36 |
-
]]
|
37 |
-
|
38 |
-
|
39 |
-
def _number_to_shanghainese(num):
|
40 |
-
num = cn2an.an2cn(num).replace('一十','十').replace('二十', '廿').replace('二', '两')
|
41 |
-
return re.sub(r'((?:^|[^三四五六七八九])十|廿)两', r'\1二', num)
|
42 |
-
|
43 |
-
|
44 |
-
def number_to_shanghainese(text):
|
45 |
-
return re.sub(r'\d+(?:\.?\d+)?', lambda x: _number_to_shanghainese(x.group()), text)
|
46 |
-
|
47 |
-
|
48 |
-
def latin_to_ipa(text):
|
49 |
-
for regex, replacement in _latin_to_ipa:
|
50 |
-
text = re.sub(regex, replacement, text)
|
51 |
-
return text
|
52 |
-
|
53 |
-
|
54 |
-
def shanghainese_to_ipa(text):
|
55 |
-
text = number_to_shanghainese(text.upper())
|
56 |
-
text = converter.convert(text).replace('-','').replace('$',' ')
|
57 |
-
text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text)
|
58 |
-
text = re.sub(r'[、;:]', ',', text)
|
59 |
-
text = re.sub(r'\s*,\s*', ', ', text)
|
60 |
-
text = re.sub(r'\s*。\s*', '. ', text)
|
61 |
-
text = re.sub(r'\s*?\s*', '? ', text)
|
62 |
-
text = re.sub(r'\s*!\s*', '! ', text)
|
63 |
-
text = re.sub(r'\s*$', '', text)
|
64 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DiamondYin/AnewGame/index.html
DELETED
@@ -1,122 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html lang="en-us">
|
3 |
-
<head>
|
4 |
-
<meta charset="utf-8">
|
5 |
-
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
6 |
-
<title>Unity WebGL Player | New Unity Project</title>
|
7 |
-
<link rel="shortcut icon" href="TemplateData/favicon.ico">
|
8 |
-
<link rel="stylesheet" href="TemplateData/style.css">
|
9 |
-
</head>
|
10 |
-
<body>
|
11 |
-
<div id="unity-container" class="unity-desktop">
|
12 |
-
<canvas id="unity-canvas" width=960 height=600 tabindex="-1"></canvas>
|
13 |
-
<div id="unity-loading-bar">
|
14 |
-
<div id="unity-logo"></div>
|
15 |
-
<div id="unity-progress-bar-empty">
|
16 |
-
<div id="unity-progress-bar-full"></div>
|
17 |
-
</div>
|
18 |
-
</div>
|
19 |
-
<div id="unity-warning"> </div>
|
20 |
-
<div id="unity-footer">
|
21 |
-
<div id="unity-webgl-logo"></div>
|
22 |
-
<div id="unity-fullscreen-button"></div>
|
23 |
-
<div id="unity-build-title">New Unity Project</div>
|
24 |
-
</div>
|
25 |
-
</div>
|
26 |
-
<script>
|
27 |
-
|
28 |
-
var container = document.querySelector("#unity-container");
|
29 |
-
var canvas = document.querySelector("#unity-canvas");
|
30 |
-
var loadingBar = document.querySelector("#unity-loading-bar");
|
31 |
-
var progressBarFull = document.querySelector("#unity-progress-bar-full");
|
32 |
-
var fullscreenButton = document.querySelector("#unity-fullscreen-button");
|
33 |
-
var warningBanner = document.querySelector("#unity-warning");
|
34 |
-
|
35 |
-
// Shows a temporary message banner/ribbon for a few seconds, or
|
36 |
-
// a permanent error message on top of the canvas if type=='error'.
|
37 |
-
// If type=='warning', a yellow highlight color is used.
|
38 |
-
// Modify or remove this function to customize the visually presented
|
39 |
-
// way that non-critical warnings and error messages are presented to the
|
40 |
-
// user.
|
41 |
-
function unityShowBanner(msg, type) {
|
42 |
-
function updateBannerVisibility() {
|
43 |
-
warningBanner.style.display = warningBanner.children.length ? 'block' : 'none';
|
44 |
-
}
|
45 |
-
var div = document.createElement('div');
|
46 |
-
div.innerHTML = msg;
|
47 |
-
warningBanner.appendChild(div);
|
48 |
-
if (type == 'error') div.style = 'background: red; padding: 10px;';
|
49 |
-
else {
|
50 |
-
if (type == 'warning') div.style = 'background: yellow; padding: 10px;';
|
51 |
-
setTimeout(function() {
|
52 |
-
warningBanner.removeChild(div);
|
53 |
-
updateBannerVisibility();
|
54 |
-
}, 5000);
|
55 |
-
}
|
56 |
-
updateBannerVisibility();
|
57 |
-
}
|
58 |
-
|
59 |
-
var buildUrl = "Build";
|
60 |
-
var loaderUrl = buildUrl + "/WaliwebGLgameFPS.loader.js";
|
61 |
-
var config = {
|
62 |
-
dataUrl: buildUrl + "/WaliwebGLgameFPS.data",
|
63 |
-
frameworkUrl: buildUrl + "/WaliwebGLgameFPS.framework.js",
|
64 |
-
codeUrl: buildUrl + "/WaliwebGLgameFPS.wasm",
|
65 |
-
streamingAssetsUrl: "StreamingAssets",
|
66 |
-
companyName: "DefaultCompany",
|
67 |
-
productName: "New Unity Project",
|
68 |
-
productVersion: "0.1",
|
69 |
-
showBanner: unityShowBanner,
|
70 |
-
};
|
71 |
-
|
72 |
-
// By default Unity keeps WebGL canvas render target size matched with
|
73 |
-
// the DOM size of the canvas element (scaled by window.devicePixelRatio)
|
74 |
-
// Set this to false if you want to decouple this synchronization from
|
75 |
-
// happening inside the engine, and you would instead like to size up
|
76 |
-
// the canvas DOM size and WebGL render target sizes yourself.
|
77 |
-
// config.matchWebGLToCanvasSize = false;
|
78 |
-
|
79 |
-
if (/iPhone|iPad|iPod|Android/i.test(navigator.userAgent)) {
|
80 |
-
// Mobile device style: fill the whole browser client area with the game canvas:
|
81 |
-
|
82 |
-
var meta = document.createElement('meta');
|
83 |
-
meta.name = 'viewport';
|
84 |
-
meta.content = 'width=device-width, height=device-height, initial-scale=1.0, user-scalable=no, shrink-to-fit=yes';
|
85 |
-
document.getElementsByTagName('head')[0].appendChild(meta);
|
86 |
-
container.className = "unity-mobile";
|
87 |
-
canvas.className = "unity-mobile";
|
88 |
-
|
89 |
-
// To lower canvas resolution on mobile devices to gain some
|
90 |
-
// performance, uncomment the following line:
|
91 |
-
// config.devicePixelRatio = 1;
|
92 |
-
|
93 |
-
|
94 |
-
} else {
|
95 |
-
// Desktop style: Render the game canvas in a window that can be maximized to fullscreen:
|
96 |
-
|
97 |
-
canvas.style.width = "960px";
|
98 |
-
canvas.style.height = "600px";
|
99 |
-
}
|
100 |
-
|
101 |
-
loadingBar.style.display = "block";
|
102 |
-
|
103 |
-
var script = document.createElement("script");
|
104 |
-
script.src = loaderUrl;
|
105 |
-
script.onload = () => {
|
106 |
-
createUnityInstance(canvas, config, (progress) => {
|
107 |
-
progressBarFull.style.width = 100 * progress + "%";
|
108 |
-
}).then((unityInstance) => {
|
109 |
-
loadingBar.style.display = "none";
|
110 |
-
fullscreenButton.onclick = () => {
|
111 |
-
unityInstance.SetFullscreen(1);
|
112 |
-
};
|
113 |
-
}).catch((message) => {
|
114 |
-
alert(message);
|
115 |
-
});
|
116 |
-
};
|
117 |
-
|
118 |
-
document.body.appendChild(script);
|
119 |
-
|
120 |
-
</script>
|
121 |
-
</body>
|
122 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan-Inversion/stylegan_human/training_scripts/sg3/training/networks_stylegan2.py
DELETED
@@ -1,1007 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
4 |
-
#
|
5 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
6 |
-
# and proprietary rights in and to this software, related documentation
|
7 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
8 |
-
# distribution of this software and related documentation without an express
|
9 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
10 |
-
|
11 |
-
"""Network architectures from the paper
|
12 |
-
"Analyzing and Improving the Image Quality of StyleGAN".
|
13 |
-
Matches the original implementation of configs E-F by Karras et al. at
|
14 |
-
https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py"""
|
15 |
-
|
16 |
-
import numpy as np
|
17 |
-
import torch
|
18 |
-
from torch_utils import misc
|
19 |
-
from torch_utils import persistence
|
20 |
-
from torch_utils.ops import conv2d_resample
|
21 |
-
from torch_utils.ops import upfirdn2d
|
22 |
-
from torch_utils.ops import bias_act
|
23 |
-
from torch_utils.ops import fma
|
24 |
-
|
25 |
-
# ----------------------------------------------------------------------------
|
26 |
-
|
27 |
-
|
28 |
-
@misc.profiled_function
|
29 |
-
def normalize_2nd_moment(x, dim=1, eps=1e-8):
|
30 |
-
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
|
31 |
-
|
32 |
-
# ----------------------------------------------------------------------------
|
33 |
-
|
34 |
-
|
35 |
-
@misc.profiled_function
|
36 |
-
def modulated_conv2d(
|
37 |
-
# Input tensor of shape [batch_size, in_channels, in_height, in_width].
|
38 |
-
x,
|
39 |
-
# Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
|
40 |
-
weight,
|
41 |
-
# Modulation coefficients of shape [batch_size, in_channels].
|
42 |
-
styles,
|
43 |
-
noise=None, # Optional noise tensor to add to the output activations.
|
44 |
-
up=1, # Integer upsampling factor.
|
45 |
-
down=1, # Integer downsampling factor.
|
46 |
-
padding=0, # Padding with respect to the upsampled image.
|
47 |
-
# Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
|
48 |
-
resample_filter=None,
|
49 |
-
demodulate=True, # Apply weight demodulation?
|
50 |
-
# False = convolution, True = correlation (matches torch.nn.functional.conv2d).
|
51 |
-
flip_weight=True,
|
52 |
-
# Perform modulation, convolution, and demodulation as a single fused operation?
|
53 |
-
fused_modconv=True,
|
54 |
-
):
|
55 |
-
batch_size = x.shape[0]
|
56 |
-
out_channels, in_channels, kh, kw = weight.shape
|
57 |
-
misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
|
58 |
-
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
|
59 |
-
misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
|
60 |
-
|
61 |
-
# Pre-normalize inputs to avoid FP16 overflow.
|
62 |
-
if x.dtype == torch.float16 and demodulate:
|
63 |
-
weight = weight * (1 / np.sqrt(in_channels * kh * kw) /
|
64 |
-
weight.norm(float('inf'), dim=[1, 2, 3], keepdim=True)) # max_Ikk
|
65 |
-
styles = styles / \
|
66 |
-
styles.norm(float('inf'), dim=1, keepdim=True) # max_I
|
67 |
-
|
68 |
-
# Calculate per-sample weights and demodulation coefficients.
|
69 |
-
w = None
|
70 |
-
dcoefs = None
|
71 |
-
if demodulate or fused_modconv:
|
72 |
-
w = weight.unsqueeze(0) # [NOIkk]
|
73 |
-
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
|
74 |
-
if demodulate:
|
75 |
-
dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO]
|
76 |
-
if demodulate and fused_modconv:
|
77 |
-
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
|
78 |
-
|
79 |
-
# Execute by scaling the activations before and after the convolution.
|
80 |
-
if not fused_modconv:
|
81 |
-
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
|
82 |
-
x = conv2d_resample.conv2d_resample(x=x, w=weight.to(
|
83 |
-
x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
|
84 |
-
if demodulate and noise is not None:
|
85 |
-
x = fma.fma(x, dcoefs.to(x.dtype).reshape(
|
86 |
-
batch_size, -1, 1, 1), noise.to(x.dtype))
|
87 |
-
elif demodulate:
|
88 |
-
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
|
89 |
-
elif noise is not None:
|
90 |
-
x = x.add_(noise.to(x.dtype))
|
91 |
-
return x
|
92 |
-
|
93 |
-
# Execute as one fused op using grouped convolution.
|
94 |
-
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
|
95 |
-
batch_size = int(batch_size)
|
96 |
-
misc.assert_shape(x, [batch_size, in_channels, None, None])
|
97 |
-
x = x.reshape(1, -1, *x.shape[2:])
|
98 |
-
w = w.reshape(-1, in_channels, kh, kw)
|
99 |
-
x = conv2d_resample.conv2d_resample(x=x, w=w.to(
|
100 |
-
x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
|
101 |
-
x = x.reshape(batch_size, -1, *x.shape[2:])
|
102 |
-
if noise is not None:
|
103 |
-
x = x.add_(noise)
|
104 |
-
return x
|
105 |
-
|
106 |
-
# ----------------------------------------------------------------------------
|
107 |
-
|
108 |
-
|
109 |
-
@persistence.persistent_class
|
110 |
-
class FullyConnectedLayer(torch.nn.Module):
|
111 |
-
def __init__(self,
|
112 |
-
in_features, # Number of input features.
|
113 |
-
out_features, # Number of output features.
|
114 |
-
bias=True, # Apply additive bias before the activation function?
|
115 |
-
# Activation function: 'relu', 'lrelu', etc.
|
116 |
-
activation='linear',
|
117 |
-
lr_multiplier=1, # Learning rate multiplier.
|
118 |
-
bias_init=0, # Initial value for the additive bias.
|
119 |
-
):
|
120 |
-
super().__init__()
|
121 |
-
self.in_features = in_features
|
122 |
-
self.out_features = out_features
|
123 |
-
self.activation = activation
|
124 |
-
self.weight = torch.nn.Parameter(torch.randn(
|
125 |
-
[out_features, in_features]) / lr_multiplier)
|
126 |
-
self.bias = torch.nn.Parameter(torch.full(
|
127 |
-
[out_features], np.float32(bias_init))) if bias else None
|
128 |
-
self.weight_gain = lr_multiplier / np.sqrt(in_features)
|
129 |
-
self.bias_gain = lr_multiplier
|
130 |
-
|
131 |
-
def forward(self, x):
|
132 |
-
w = self.weight.to(x.dtype) * self.weight_gain
|
133 |
-
b = self.bias
|
134 |
-
if b is not None:
|
135 |
-
b = b.to(x.dtype)
|
136 |
-
if self.bias_gain != 1:
|
137 |
-
b = b * self.bias_gain
|
138 |
-
|
139 |
-
if self.activation == 'linear' and b is not None:
|
140 |
-
x = torch.addmm(b.unsqueeze(0), x, w.t())
|
141 |
-
else:
|
142 |
-
x = x.matmul(w.t())
|
143 |
-
x = bias_act.bias_act(x, b, act=self.activation)
|
144 |
-
return x
|
145 |
-
|
146 |
-
def extra_repr(self):
|
147 |
-
return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'
|
148 |
-
|
149 |
-
# ----------------------------------------------------------------------------
|
150 |
-
|
151 |
-
|
152 |
-
@persistence.persistent_class
|
153 |
-
class Conv2dLayer(torch.nn.Module):
|
154 |
-
def __init__(self,
|
155 |
-
in_channels, # Number of input channels.
|
156 |
-
out_channels, # Number of output channels.
|
157 |
-
# Width and height of the convolution kernel.
|
158 |
-
kernel_size,
|
159 |
-
bias=True, # Apply additive bias before the activation function?
|
160 |
-
# Activation function: 'relu', 'lrelu', etc.
|
161 |
-
activation='linear',
|
162 |
-
up=1, # Integer upsampling factor.
|
163 |
-
down=1, # Integer downsampling factor.
|
164 |
-
# Low-pass filter to apply when resampling activations.
|
165 |
-
resample_filter=[1, 3, 3, 1],
|
166 |
-
# Clamp the output to +-X, None = disable clamping.
|
167 |
-
conv_clamp=None,
|
168 |
-
channels_last=False, # Expect the input to have memory_format=channels_last?
|
169 |
-
trainable=True, # Update the weights of this layer during training?
|
170 |
-
):
|
171 |
-
super().__init__()
|
172 |
-
self.in_channels = in_channels
|
173 |
-
self.out_channels = out_channels
|
174 |
-
self.activation = activation
|
175 |
-
self.up = up
|
176 |
-
self.down = down
|
177 |
-
self.conv_clamp = conv_clamp
|
178 |
-
self.register_buffer(
|
179 |
-
'resample_filter', upfirdn2d.setup_filter(resample_filter))
|
180 |
-
self.padding = kernel_size // 2
|
181 |
-
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
|
182 |
-
self.act_gain = bias_act.activation_funcs[activation].def_gain
|
183 |
-
|
184 |
-
memory_format = torch.channels_last if channels_last else torch.contiguous_format
|
185 |
-
weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(
|
186 |
-
memory_format=memory_format)
|
187 |
-
bias = torch.zeros([out_channels]) if bias else None
|
188 |
-
if trainable:
|
189 |
-
self.weight = torch.nn.Parameter(weight)
|
190 |
-
self.bias = torch.nn.Parameter(bias) if bias is not None else None
|
191 |
-
else:
|
192 |
-
self.register_buffer('weight', weight)
|
193 |
-
if bias is not None:
|
194 |
-
self.register_buffer('bias', bias)
|
195 |
-
else:
|
196 |
-
self.bias = None
|
197 |
-
|
198 |
-
def forward(self, x, gain=1):
|
199 |
-
w = self.weight * self.weight_gain
|
200 |
-
b = self.bias.to(x.dtype) if self.bias is not None else None
|
201 |
-
flip_weight = (self.up == 1) # slightly faster
|
202 |
-
x = conv2d_resample.conv2d_resample(x=x, w=w.to(
|
203 |
-
x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)
|
204 |
-
|
205 |
-
act_gain = self.act_gain * gain
|
206 |
-
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
|
207 |
-
x = bias_act.bias_act(x, b, act=self.activation,
|
208 |
-
gain=act_gain, clamp=act_clamp)
|
209 |
-
return x
|
210 |
-
|
211 |
-
def extra_repr(self):
|
212 |
-
return ' '.join([
|
213 |
-
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, activation={self.activation:s},',
|
214 |
-
f'up={self.up}, down={self.down}'])
|
215 |
-
|
216 |
-
# ----------------------------------------------------------------------------
|
217 |
-
|
218 |
-
|
219 |
-
@persistence.persistent_class
|
220 |
-
class MappingNetwork(torch.nn.Module):
|
221 |
-
def __init__(self,
|
222 |
-
# Input latent (Z) dimensionality, 0 = no latent.
|
223 |
-
z_dim,
|
224 |
-
# Conditioning label (C) dimensionality, 0 = no label.
|
225 |
-
c_dim,
|
226 |
-
# Intermediate latent (W) dimensionality.
|
227 |
-
w_dim,
|
228 |
-
# Number of intermediate latents to output, None = do not broadcast.
|
229 |
-
num_ws,
|
230 |
-
num_layers=8, # Number of mapping layers.
|
231 |
-
# Label embedding dimensionality, None = same as w_dim.
|
232 |
-
embed_features=None,
|
233 |
-
# Number of intermediate features in the mapping layers, None = same as w_dim.
|
234 |
-
layer_features=None,
|
235 |
-
# Activation function: 'relu', 'lrelu', etc.
|
236 |
-
activation='lrelu',
|
237 |
-
# Learning rate multiplier for the mapping layers.
|
238 |
-
lr_multiplier=0.01,
|
239 |
-
# Decay for tracking the moving average of W during training, None = do not track.
|
240 |
-
w_avg_beta=0.998,
|
241 |
-
):
|
242 |
-
super().__init__()
|
243 |
-
self.z_dim = z_dim
|
244 |
-
self.c_dim = c_dim
|
245 |
-
self.w_dim = w_dim
|
246 |
-
self.num_ws = num_ws
|
247 |
-
self.num_layers = num_layers
|
248 |
-
self.w_avg_beta = w_avg_beta
|
249 |
-
|
250 |
-
if embed_features is None:
|
251 |
-
embed_features = w_dim
|
252 |
-
if c_dim == 0:
|
253 |
-
embed_features = 0
|
254 |
-
if layer_features is None:
|
255 |
-
layer_features = w_dim
|
256 |
-
features_list = [z_dim + embed_features] + \
|
257 |
-
[layer_features] * (num_layers - 1) + [w_dim]
|
258 |
-
|
259 |
-
if c_dim > 0:
|
260 |
-
self.embed = FullyConnectedLayer(c_dim, embed_features)
|
261 |
-
for idx in range(num_layers):
|
262 |
-
in_features = features_list[idx]
|
263 |
-
out_features = features_list[idx + 1]
|
264 |
-
layer = FullyConnectedLayer(
|
265 |
-
in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
|
266 |
-
setattr(self, f'fc{idx}', layer)
|
267 |
-
|
268 |
-
if num_ws is not None and w_avg_beta is not None:
|
269 |
-
self.register_buffer('w_avg', torch.zeros([w_dim]))
|
270 |
-
|
271 |
-
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
|
272 |
-
# Embed, normalize, and concat inputs.
|
273 |
-
x = None
|
274 |
-
with torch.autograd.profiler.record_function('input'):
|
275 |
-
if self.z_dim > 0:
|
276 |
-
misc.assert_shape(z, [None, self.z_dim])
|
277 |
-
x = normalize_2nd_moment(z.to(torch.float32))
|
278 |
-
if self.c_dim > 0:
|
279 |
-
misc.assert_shape(c, [None, self.c_dim])
|
280 |
-
y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
|
281 |
-
x = torch.cat([x, y], dim=1) if x is not None else y
|
282 |
-
|
283 |
-
# Main layers.
|
284 |
-
for idx in range(self.num_layers):
|
285 |
-
layer = getattr(self, f'fc{idx}')
|
286 |
-
x = layer(x)
|
287 |
-
|
288 |
-
# Update moving average of W.
|
289 |
-
if update_emas and self.w_avg_beta is not None:
|
290 |
-
with torch.autograd.profiler.record_function('update_w_avg'):
|
291 |
-
self.w_avg.copy_(x.detach().mean(
|
292 |
-
dim=0).lerp(self.w_avg, self.w_avg_beta))
|
293 |
-
|
294 |
-
# Broadcast.
|
295 |
-
if self.num_ws is not None:
|
296 |
-
with torch.autograd.profiler.record_function('broadcast'):
|
297 |
-
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
|
298 |
-
|
299 |
-
# Apply truncation.
|
300 |
-
if truncation_psi != 1:
|
301 |
-
with torch.autograd.profiler.record_function('truncate'):
|
302 |
-
assert self.w_avg_beta is not None
|
303 |
-
if self.num_ws is None or truncation_cutoff is None:
|
304 |
-
x = self.w_avg.lerp(x, truncation_psi)
|
305 |
-
else:
|
306 |
-
x[:, :truncation_cutoff] = self.w_avg.lerp(
|
307 |
-
x[:, :truncation_cutoff], truncation_psi)
|
308 |
-
return x
|
309 |
-
|
310 |
-
def extra_repr(self):
|
311 |
-
return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
|
312 |
-
|
313 |
-
# ----------------------------------------------------------------------------
|
314 |
-
|
315 |
-
|
316 |
-
@persistence.persistent_class
|
317 |
-
class SynthesisLayer(torch.nn.Module):
|
318 |
-
def __init__(self,
|
319 |
-
in_channels, # Number of input channels.
|
320 |
-
out_channels, # Number of output channels.
|
321 |
-
# Intermediate latent (W) dimensionality.
|
322 |
-
w_dim,
|
323 |
-
resolution, # Resolution of this layer.
|
324 |
-
kernel_size=3, # Convolution kernel size.
|
325 |
-
up=1, # Integer upsampling factor.
|
326 |
-
use_noise=True, # Enable noise input?
|
327 |
-
# Activation function: 'relu', 'lrelu', etc.
|
328 |
-
activation='lrelu',
|
329 |
-
# Low-pass filter to apply when resampling activations.
|
330 |
-
resample_filter=[1, 3, 3, 1],
|
331 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
332 |
-
conv_clamp=None,
|
333 |
-
channels_last=False, # Use channels_last format for the weights?
|
334 |
-
square=False, # default if for rectangle images
|
335 |
-
):
|
336 |
-
super().__init__()
|
337 |
-
self.in_channels = in_channels
|
338 |
-
self.out_channels = out_channels
|
339 |
-
self.w_dim = w_dim
|
340 |
-
self.resolution = resolution
|
341 |
-
self.up = up
|
342 |
-
self.use_noise = use_noise
|
343 |
-
self.activation = activation
|
344 |
-
self.conv_clamp = conv_clamp
|
345 |
-
self.register_buffer(
|
346 |
-
'resample_filter', upfirdn2d.setup_filter(resample_filter))
|
347 |
-
self.padding = kernel_size // 2
|
348 |
-
self.act_gain = bias_act.activation_funcs[activation].def_gain
|
349 |
-
self.square = square
|
350 |
-
|
351 |
-
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
|
352 |
-
memory_format = torch.channels_last if channels_last else torch.contiguous_format
|
353 |
-
self.weight = torch.nn.Parameter(torch.randn(
|
354 |
-
[out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
|
355 |
-
if use_noise:
|
356 |
-
if self.square:
|
357 |
-
self.register_buffer(
|
358 |
-
'noise_const', torch.randn([resolution, resolution]))
|
359 |
-
else:
|
360 |
-
self.register_buffer('noise_const', torch.randn(
|
361 |
-
[resolution, resolution // 2]))
|
362 |
-
self.noise_strength = torch.nn.Parameter(torch.zeros([]))
|
363 |
-
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
|
364 |
-
|
365 |
-
def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
|
366 |
-
assert noise_mode in ['random', 'const', 'none']
|
367 |
-
in_resolution = self.resolution // self.up
|
368 |
-
if self.square:
|
369 |
-
misc.assert_shape(
|
370 |
-
x, [None, self.weight.shape[1], in_resolution, in_resolution])
|
371 |
-
else:
|
372 |
-
misc.assert_shape(
|
373 |
-
x, [None, self.weight.shape[1], in_resolution, in_resolution // 2])
|
374 |
-
styles = self.affine(w)
|
375 |
-
|
376 |
-
noise = None
|
377 |
-
if self.use_noise and noise_mode == 'random':
|
378 |
-
if self.square:
|
379 |
-
noise = torch.randn(
|
380 |
-
[x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength
|
381 |
-
else:
|
382 |
-
noise = torch.randn(
|
383 |
-
[x.shape[0], 1, self.resolution, self.resolution // 2], device=x.device) * self.noise_strength
|
384 |
-
if self.use_noise and noise_mode == 'const':
|
385 |
-
noise = self.noise_const * self.noise_strength
|
386 |
-
|
387 |
-
flip_weight = (self.up == 1) # slightly faster
|
388 |
-
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
|
389 |
-
padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
|
390 |
-
|
391 |
-
act_gain = self.act_gain * gain
|
392 |
-
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
|
393 |
-
x = bias_act.bias_act(x, self.bias.to(
|
394 |
-
x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
|
395 |
-
return x
|
396 |
-
|
397 |
-
def extra_repr(self):
|
398 |
-
return ' '.join([
|
399 |
-
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d},',
|
400 |
-
f'resolution={self.resolution:d}, up={self.up}, activation={self.activation:s}'])
|
401 |
-
|
402 |
-
# ----------------------------------------------------------------------------
|
403 |
-
|
404 |
-
|
405 |
-
@persistence.persistent_class
|
406 |
-
class ToRGBLayer(torch.nn.Module):
|
407 |
-
def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):
|
408 |
-
super().__init__()
|
409 |
-
self.in_channels = in_channels
|
410 |
-
self.out_channels = out_channels
|
411 |
-
self.w_dim = w_dim
|
412 |
-
self.conv_clamp = conv_clamp
|
413 |
-
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
|
414 |
-
memory_format = torch.channels_last if channels_last else torch.contiguous_format
|
415 |
-
self.weight = torch.nn.Parameter(torch.randn(
|
416 |
-
[out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
|
417 |
-
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
|
418 |
-
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
|
419 |
-
|
420 |
-
def forward(self, x, w, fused_modconv=True):
|
421 |
-
styles = self.affine(w) * self.weight_gain
|
422 |
-
x = modulated_conv2d(x=x, weight=self.weight, styles=styles,
|
423 |
-
demodulate=False, fused_modconv=fused_modconv)
|
424 |
-
x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
|
425 |
-
return x
|
426 |
-
|
427 |
-
def extra_repr(self):
|
428 |
-
return f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d}'
|
429 |
-
|
430 |
-
# ----------------------------------------------------------------------------
|
431 |
-
|
432 |
-
|
433 |
-
@persistence.persistent_class
|
434 |
-
class SynthesisBlock(torch.nn.Module):
|
435 |
-
def __init__(self,
|
436 |
-
# Number of input channels, 0 = first block.
|
437 |
-
in_channels,
|
438 |
-
# Number of output channels.
|
439 |
-
out_channels,
|
440 |
-
# Intermediate latent (W) dimensionality.
|
441 |
-
w_dim,
|
442 |
-
# Resolution of this block.
|
443 |
-
resolution,
|
444 |
-
# Number of output color channels.
|
445 |
-
img_channels,
|
446 |
-
is_last, # Is this the last block?
|
447 |
-
# Architecture: 'orig', 'skip', 'resnet'.
|
448 |
-
architecture='skip',
|
449 |
-
# Low-pass filter to apply when resampling activations.
|
450 |
-
resample_filter=[1, 3, 3, 1],
|
451 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
452 |
-
conv_clamp=256,
|
453 |
-
use_fp16=False, # Use FP16 for this block?
|
454 |
-
fp16_channels_last=False, # Use channels-last memory format with FP16?
|
455 |
-
square=False, # default is for rectangle images
|
456 |
-
# Default value of fused_modconv. 'inference_only' = True for inference, False for training.
|
457 |
-
fused_modconv_default=True,
|
458 |
-
# Arguments for SynthesisLayer.
|
459 |
-
**layer_kwargs,
|
460 |
-
):
|
461 |
-
assert architecture in ['orig', 'skip', 'resnet']
|
462 |
-
super().__init__()
|
463 |
-
self.in_channels = in_channels
|
464 |
-
self.w_dim = w_dim
|
465 |
-
self.resolution = resolution
|
466 |
-
self.img_channels = img_channels
|
467 |
-
self.is_last = is_last
|
468 |
-
self.architecture = architecture
|
469 |
-
self.use_fp16 = use_fp16
|
470 |
-
self.channels_last = (use_fp16 and fp16_channels_last)
|
471 |
-
self.fused_modconv_default = fused_modconv_default
|
472 |
-
self.register_buffer(
|
473 |
-
'resample_filter', upfirdn2d.setup_filter(resample_filter))
|
474 |
-
self.num_conv = 0
|
475 |
-
self.num_torgb = 0
|
476 |
-
self.square = square
|
477 |
-
|
478 |
-
if in_channels == 0:
|
479 |
-
if self.square:
|
480 |
-
self.const = torch.nn.Parameter(torch.randn(
|
481 |
-
[out_channels, resolution, resolution]))
|
482 |
-
else: # rectangle
|
483 |
-
self.const = torch.nn.Parameter(torch.randn(
|
484 |
-
[out_channels, resolution, resolution // 2]))
|
485 |
-
|
486 |
-
if in_channels != 0:
|
487 |
-
self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
|
488 |
-
resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, square=square, **layer_kwargs)
|
489 |
-
self.num_conv += 1
|
490 |
-
|
491 |
-
self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
|
492 |
-
conv_clamp=conv_clamp, channels_last=self.channels_last, square=square, **layer_kwargs)
|
493 |
-
self.num_conv += 1
|
494 |
-
|
495 |
-
if is_last or architecture == 'skip':
|
496 |
-
self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
|
497 |
-
conv_clamp=conv_clamp, channels_last=self.channels_last)
|
498 |
-
self.num_torgb += 1
|
499 |
-
|
500 |
-
if in_channels != 0 and architecture == 'resnet':
|
501 |
-
self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
|
502 |
-
resample_filter=resample_filter, channels_last=self.channels_last)
|
503 |
-
|
504 |
-
def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs):
|
505 |
-
_ = update_emas # unused
|
506 |
-
misc.assert_shape(
|
507 |
-
ws, [None, self.num_conv + self.num_torgb, self.w_dim])
|
508 |
-
w_iter = iter(ws.unbind(dim=1))
|
509 |
-
if ws.device.type != 'cuda':
|
510 |
-
force_fp32 = True
|
511 |
-
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
|
512 |
-
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
|
513 |
-
if fused_modconv is None:
|
514 |
-
fused_modconv = self.fused_modconv_default
|
515 |
-
if fused_modconv == 'inference_only':
|
516 |
-
fused_modconv = (not self.training)
|
517 |
-
|
518 |
-
# Input.
|
519 |
-
if self.in_channels == 0:
|
520 |
-
x = self.const.to(dtype=dtype, memory_format=memory_format)
|
521 |
-
x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
|
522 |
-
else:
|
523 |
-
if self.square:
|
524 |
-
misc.assert_shape(
|
525 |
-
x, [None, self.in_channels, self.resolution // 2, self.resolution // 2])
|
526 |
-
else: # rectangle
|
527 |
-
misc.assert_shape(
|
528 |
-
x, [None, self.in_channels, self.resolution // 2, self.resolution // 4])
|
529 |
-
x = x.to(dtype=dtype, memory_format=memory_format)
|
530 |
-
|
531 |
-
# Main layers.
|
532 |
-
if self.in_channels == 0:
|
533 |
-
x = self.conv1(x, next(w_iter),
|
534 |
-
fused_modconv=fused_modconv, **layer_kwargs)
|
535 |
-
elif self.architecture == 'resnet':
|
536 |
-
y = self.skip(x, gain=np.sqrt(0.5))
|
537 |
-
x = self.conv0(x, next(w_iter),
|
538 |
-
fused_modconv=fused_modconv, **layer_kwargs)
|
539 |
-
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv,
|
540 |
-
gain=np.sqrt(0.5), **layer_kwargs)
|
541 |
-
x = y.add_(x)
|
542 |
-
else:
|
543 |
-
x = self.conv0(x, next(w_iter),
|
544 |
-
fused_modconv=fused_modconv, **layer_kwargs)
|
545 |
-
x = self.conv1(x, next(w_iter),
|
546 |
-
fused_modconv=fused_modconv, **layer_kwargs)
|
547 |
-
|
548 |
-
# ToRGB.
|
549 |
-
if img is not None:
|
550 |
-
if self.square:
|
551 |
-
misc.assert_shape(
|
552 |
-
img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])
|
553 |
-
else:
|
554 |
-
misc.assert_shape(
|
555 |
-
img, [None, self.img_channels, self.resolution // 2, self.resolution // 4])
|
556 |
-
img = upfirdn2d.upsample2d(img, self.resample_filter)
|
557 |
-
if self.is_last or self.architecture == 'skip':
|
558 |
-
y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
|
559 |
-
y = y.to(dtype=torch.float32,
|
560 |
-
memory_format=torch.contiguous_format)
|
561 |
-
img = img.add_(y) if img is not None else y
|
562 |
-
|
563 |
-
assert x.dtype == dtype
|
564 |
-
assert img is None or img.dtype == torch.float32
|
565 |
-
return x, img
|
566 |
-
|
567 |
-
def extra_repr(self):
|
568 |
-
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
|
569 |
-
|
570 |
-
# ----------------------------------------------------------------------------
|
571 |
-
|
572 |
-
|
573 |
-
@persistence.persistent_class
|
574 |
-
class SynthesisNetwork(torch.nn.Module):
|
575 |
-
def __init__(self,
|
576 |
-
# Intermediate latent (W) dimensionality.
|
577 |
-
w_dim,
|
578 |
-
img_resolution, # Output image resolution.
|
579 |
-
img_channels, # Number of color channels.
|
580 |
-
square,
|
581 |
-
# Overall multiplier for the number of channels.
|
582 |
-
channel_base=32768,
|
583 |
-
# Maximum number of channels in any layer.
|
584 |
-
channel_max=512,
|
585 |
-
# Use FP16 for the N highest resolutions.
|
586 |
-
num_fp16_res=4,
|
587 |
-
**block_kwargs, # Arguments for SynthesisBlock.
|
588 |
-
):
|
589 |
-
assert img_resolution >= 4 and img_resolution & (
|
590 |
-
img_resolution - 1) == 0
|
591 |
-
super().__init__()
|
592 |
-
self.w_dim = w_dim
|
593 |
-
self.img_resolution = img_resolution
|
594 |
-
self.img_resolution_log2 = int(np.log2(img_resolution))
|
595 |
-
self.img_channels = img_channels
|
596 |
-
self.square = square
|
597 |
-
self.num_fp16_res = num_fp16_res
|
598 |
-
self.block_resolutions = [
|
599 |
-
2 ** i for i in range(2, self.img_resolution_log2 + 1)]
|
600 |
-
channels_dict = {res: min(channel_base // res, channel_max)
|
601 |
-
for res in self.block_resolutions}
|
602 |
-
fp16_resolution = max(
|
603 |
-
2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
|
604 |
-
|
605 |
-
self.num_ws = 0
|
606 |
-
for res in self.block_resolutions:
|
607 |
-
in_channels = channels_dict[res // 2] if res > 4 else 0
|
608 |
-
out_channels = channels_dict[res]
|
609 |
-
use_fp16 = (res >= fp16_resolution)
|
610 |
-
is_last = (res == self.img_resolution)
|
611 |
-
block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
|
612 |
-
img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, square=square, **block_kwargs)
|
613 |
-
self.num_ws += block.num_conv
|
614 |
-
if is_last:
|
615 |
-
self.num_ws += block.num_torgb
|
616 |
-
setattr(self, f'b{res}', block)
|
617 |
-
|
618 |
-
def forward(self, ws, **block_kwargs):
|
619 |
-
block_ws = []
|
620 |
-
with torch.autograd.profiler.record_function('split_ws'):
|
621 |
-
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
|
622 |
-
ws = ws.to(torch.float32)
|
623 |
-
w_idx = 0
|
624 |
-
for res in self.block_resolutions:
|
625 |
-
block = getattr(self, f'b{res}')
|
626 |
-
block_ws.append(
|
627 |
-
ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
|
628 |
-
w_idx += block.num_conv
|
629 |
-
|
630 |
-
x = img = None
|
631 |
-
for res, cur_ws in zip(self.block_resolutions, block_ws):
|
632 |
-
block = getattr(self, f'b{res}')
|
633 |
-
x, img = block(x, img, cur_ws, **block_kwargs)
|
634 |
-
return img
|
635 |
-
|
636 |
-
def extra_repr(self):
|
637 |
-
return ' '.join([
|
638 |
-
f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},',
|
639 |
-
f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},',
|
640 |
-
f'num_fp16_res={self.num_fp16_res:d}'])
|
641 |
-
|
642 |
-
# ----------------------------------------------------------------------------
|
643 |
-
|
644 |
-
|
645 |
-
@persistence.persistent_class
|
646 |
-
class Generator(torch.nn.Module):
|
647 |
-
def __init__(self,
|
648 |
-
z_dim, # Input latent (Z) dimensionality.
|
649 |
-
# Conditioning label (C) dimensionality.
|
650 |
-
c_dim,
|
651 |
-
# Intermediate latent (W) dimensionality.
|
652 |
-
w_dim,
|
653 |
-
square,
|
654 |
-
img_resolution, # Output resolution.
|
655 |
-
img_channels, # Number of output color channels.
|
656 |
-
mapping_kwargs={}, # Arguments for MappingNetwork.
|
657 |
-
**synthesis_kwargs, # Arguments for SynthesisNetwork.
|
658 |
-
):
|
659 |
-
super().__init__()
|
660 |
-
self.z_dim = z_dim
|
661 |
-
self.c_dim = c_dim
|
662 |
-
self.w_dim = w_dim
|
663 |
-
self.square = square
|
664 |
-
self.img_resolution = img_resolution
|
665 |
-
self.img_channels = img_channels
|
666 |
-
self.synthesis = SynthesisNetwork(
|
667 |
-
w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, square=square, **synthesis_kwargs)
|
668 |
-
self.num_ws = self.synthesis.num_ws
|
669 |
-
self.mapping = MappingNetwork(
|
670 |
-
z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
|
671 |
-
|
672 |
-
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
|
673 |
-
ws = self.mapping(z, c, truncation_psi=truncation_psi,
|
674 |
-
truncation_cutoff=truncation_cutoff, update_emas=update_emas)
|
675 |
-
img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
|
676 |
-
return img
|
677 |
-
|
678 |
-
# ----------------------------------------------------------------------------
|
679 |
-
|
680 |
-
|
681 |
-
@persistence.persistent_class
|
682 |
-
class DiscriminatorBlock(torch.nn.Module):
|
683 |
-
def __init__(self,
|
684 |
-
# Number of input channels, 0 = first block.
|
685 |
-
in_channels,
|
686 |
-
# Number of intermediate channels.
|
687 |
-
tmp_channels,
|
688 |
-
# Number of output channels.
|
689 |
-
out_channels,
|
690 |
-
# Resolution of this block.
|
691 |
-
resolution,
|
692 |
-
# Number of input color channels.
|
693 |
-
img_channels,
|
694 |
-
# Index of the first layer.
|
695 |
-
first_layer_idx,
|
696 |
-
# Architecture: 'orig', 'skip', 'resnet'.
|
697 |
-
architecture='resnet',
|
698 |
-
# Activation function: 'relu', 'lrelu', etc.
|
699 |
-
activation='lrelu',
|
700 |
-
# Low-pass filter to apply when resampling activations.
|
701 |
-
resample_filter=[1, 3, 3, 1],
|
702 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
703 |
-
conv_clamp=None,
|
704 |
-
use_fp16=False, # Use FP16 for this block?
|
705 |
-
fp16_channels_last=False, # Use channels-last memory format with FP16?
|
706 |
-
# Freeze-D: Number of layers to freeze.
|
707 |
-
freeze_layers=0,
|
708 |
-
square=False,
|
709 |
-
):
|
710 |
-
assert in_channels in [0, tmp_channels]
|
711 |
-
assert architecture in ['orig', 'skip', 'resnet']
|
712 |
-
super().__init__()
|
713 |
-
self.in_channels = in_channels
|
714 |
-
self.resolution = resolution
|
715 |
-
self.img_channels = img_channels
|
716 |
-
self.first_layer_idx = first_layer_idx
|
717 |
-
self.architecture = architecture
|
718 |
-
self.use_fp16 = use_fp16
|
719 |
-
self.channels_last = (use_fp16 and fp16_channels_last)
|
720 |
-
self.register_buffer(
|
721 |
-
'resample_filter', upfirdn2d.setup_filter(resample_filter))
|
722 |
-
self.square = square
|
723 |
-
|
724 |
-
self.num_layers = 0
|
725 |
-
|
726 |
-
def trainable_gen():
|
727 |
-
while True:
|
728 |
-
layer_idx = self.first_layer_idx + self.num_layers
|
729 |
-
trainable = (layer_idx >= freeze_layers)
|
730 |
-
self.num_layers += 1
|
731 |
-
yield trainable
|
732 |
-
trainable_iter = trainable_gen()
|
733 |
-
|
734 |
-
if in_channels == 0 or architecture == 'skip':
|
735 |
-
self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation,
|
736 |
-
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
|
737 |
-
|
738 |
-
self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation,
|
739 |
-
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
|
740 |
-
|
741 |
-
self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2,
|
742 |
-
trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last)
|
743 |
-
|
744 |
-
if architecture == 'resnet':
|
745 |
-
self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2,
|
746 |
-
trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last)
|
747 |
-
|
748 |
-
def forward(self, x, img, force_fp32=False):
|
749 |
-
if (x if x is not None else img).device.type != 'cuda':
|
750 |
-
force_fp32 = True
|
751 |
-
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
|
752 |
-
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
|
753 |
-
|
754 |
-
# Input.
|
755 |
-
if x is not None:
|
756 |
-
if self.square:
|
757 |
-
misc.assert_shape(
|
758 |
-
x, [None, self.in_channels, self.resolution, self.resolution])
|
759 |
-
else:
|
760 |
-
misc.assert_shape(
|
761 |
-
x, [None, self.in_channels, self.resolution, self.resolution // 2])
|
762 |
-
x = x.to(dtype=dtype, memory_format=memory_format)
|
763 |
-
|
764 |
-
# FromRGB.
|
765 |
-
if self.in_channels == 0 or self.architecture == 'skip':
|
766 |
-
if self.square:
|
767 |
-
misc.assert_shape(
|
768 |
-
img, [None, self.img_channels, self.resolution, self.resolution])
|
769 |
-
else:
|
770 |
-
misc.assert_shape(
|
771 |
-
img, [None, self.img_channels, self.resolution, self.resolution // 2])
|
772 |
-
img = img.to(dtype=dtype, memory_format=memory_format)
|
773 |
-
y = self.fromrgb(img)
|
774 |
-
x = x + y if x is not None else y
|
775 |
-
img = upfirdn2d.downsample2d(
|
776 |
-
img, self.resample_filter) if self.architecture == 'skip' else None
|
777 |
-
|
778 |
-
# Main layers.
|
779 |
-
if self.architecture == 'resnet':
|
780 |
-
y = self.skip(x, gain=np.sqrt(0.5))
|
781 |
-
x = self.conv0(x)
|
782 |
-
x = self.conv1(x, gain=np.sqrt(0.5))
|
783 |
-
x = y.add_(x)
|
784 |
-
else:
|
785 |
-
x = self.conv0(x)
|
786 |
-
x = self.conv1(x)
|
787 |
-
|
788 |
-
assert x.dtype == dtype
|
789 |
-
return x, img
|
790 |
-
|
791 |
-
def extra_repr(self):
|
792 |
-
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
|
793 |
-
|
794 |
-
# ----------------------------------------------------------------------------
|
795 |
-
|
796 |
-
|
797 |
-
@persistence.persistent_class
|
798 |
-
class MinibatchStdLayer(torch.nn.Module):
|
799 |
-
def __init__(self, group_size, num_channels=1):
|
800 |
-
super().__init__()
|
801 |
-
self.group_size = group_size
|
802 |
-
self.num_channels = num_channels
|
803 |
-
|
804 |
-
def forward(self, x):
|
805 |
-
N, C, H, W = x.shape
|
806 |
-
with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants
|
807 |
-
G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(
|
808 |
-
N)) if self.group_size is not None else N
|
809 |
-
F = self.num_channels
|
810 |
-
c = C // F
|
811 |
-
|
812 |
-
# [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
|
813 |
-
y = x.reshape(G, -1, F, c, H, W)
|
814 |
-
# [GnFcHW] Subtract mean over group.
|
815 |
-
y = y - y.mean(dim=0)
|
816 |
-
# [nFcHW] Calc variance over group.
|
817 |
-
y = y.square().mean(dim=0)
|
818 |
-
y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
|
819 |
-
# [nF] Take average over channels and pixels.
|
820 |
-
y = y.mean(dim=[2, 3, 4])
|
821 |
-
y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
|
822 |
-
# [NFHW] Replicate over group and pixels.
|
823 |
-
y = y.repeat(G, 1, H, W)
|
824 |
-
# [NCHW] Append to input as new channels.
|
825 |
-
x = torch.cat([x, y], dim=1)
|
826 |
-
return x
|
827 |
-
|
828 |
-
def extra_repr(self):
|
829 |
-
return f'group_size={self.group_size}, num_channels={self.num_channels:d}'
|
830 |
-
|
831 |
-
# ----------------------------------------------------------------------------
|
832 |
-
|
833 |
-
|
834 |
-
@persistence.persistent_class
|
835 |
-
class DiscriminatorEpilogue(torch.nn.Module):
|
836 |
-
def __init__(self,
|
837 |
-
in_channels, # Number of input channels.
|
838 |
-
# Dimensionality of mapped conditioning label, 0 = no label.
|
839 |
-
cmap_dim,
|
840 |
-
resolution, # Resolution of this block.
|
841 |
-
# Number of input color channels.
|
842 |
-
img_channels,
|
843 |
-
# Architecture: 'orig', 'skip', 'resnet'.
|
844 |
-
architecture='resnet',
|
845 |
-
# Group size for the minibatch standard deviation layer, None = entire minibatch.
|
846 |
-
mbstd_group_size=4,
|
847 |
-
# Number of features for the minibatch standard deviation layer, 0 = disable.
|
848 |
-
mbstd_num_channels=1,
|
849 |
-
# Activation function: 'relu', 'lrelu', etc.
|
850 |
-
activation='lrelu',
|
851 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
852 |
-
conv_clamp=None,
|
853 |
-
square=False,
|
854 |
-
):
|
855 |
-
assert architecture in ['orig', 'skip', 'resnet']
|
856 |
-
super().__init__()
|
857 |
-
self.in_channels = in_channels
|
858 |
-
self.cmap_dim = cmap_dim
|
859 |
-
self.resolution = resolution
|
860 |
-
self.img_channels = img_channels
|
861 |
-
self.architecture = architecture
|
862 |
-
self.square = square
|
863 |
-
|
864 |
-
if architecture == 'skip':
|
865 |
-
self.fromrgb = Conv2dLayer(
|
866 |
-
img_channels, in_channels, kernel_size=1, activation=activation)
|
867 |
-
self.mbstd = MinibatchStdLayer(
|
868 |
-
group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None
|
869 |
-
self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels,
|
870 |
-
kernel_size=3, activation=activation, conv_clamp=conv_clamp)
|
871 |
-
|
872 |
-
if self.square:
|
873 |
-
self.fc = FullyConnectedLayer(
|
874 |
-
in_channels * (resolution ** 2), in_channels, activation=activation)
|
875 |
-
else:
|
876 |
-
self.fc = FullyConnectedLayer(
|
877 |
-
in_channels * (resolution ** 2 // 2), in_channels, activation=activation)
|
878 |
-
|
879 |
-
self.out = FullyConnectedLayer(
|
880 |
-
in_channels, 1 if cmap_dim == 0 else cmap_dim)
|
881 |
-
|
882 |
-
def forward(self, x, img, cmap, force_fp32=False):
|
883 |
-
if self.square:
|
884 |
-
misc.assert_shape(x, [None, self.in_channels,
|
885 |
-
self.resolution, self.resolution])
|
886 |
-
else:
|
887 |
-
misc.assert_shape(
|
888 |
-
x, [None, self.in_channels, self.resolution, self.resolution // 2]) # [NCHW]
|
889 |
-
|
890 |
-
_ = force_fp32 # unused
|
891 |
-
dtype = torch.float32
|
892 |
-
memory_format = torch.contiguous_format
|
893 |
-
|
894 |
-
# FromRGB.
|
895 |
-
x = x.to(dtype=dtype, memory_format=memory_format)
|
896 |
-
if self.architecture == 'skip':
|
897 |
-
if self.square:
|
898 |
-
misc.assert_shape(
|
899 |
-
img, [None, self.img_channels, self.resolution, self.resolution])
|
900 |
-
else:
|
901 |
-
misc.assert_shape(
|
902 |
-
img, [None, self.img_channels, self.resolution, self.resolution // 2])
|
903 |
-
|
904 |
-
img = img.to(dtype=dtype, memory_format=memory_format)
|
905 |
-
x = x + self.fromrgb(img)
|
906 |
-
|
907 |
-
# Main layers.
|
908 |
-
if self.mbstd is not None:
|
909 |
-
x = self.mbstd(x)
|
910 |
-
x = self.conv(x)
|
911 |
-
x = self.fc(x.flatten(1))
|
912 |
-
x = self.out(x)
|
913 |
-
|
914 |
-
# Conditioning.
|
915 |
-
if self.cmap_dim > 0:
|
916 |
-
misc.assert_shape(cmap, [None, self.cmap_dim])
|
917 |
-
x = (x * cmap).sum(dim=1, keepdim=True) * \
|
918 |
-
(1 / np.sqrt(self.cmap_dim))
|
919 |
-
|
920 |
-
assert x.dtype == dtype
|
921 |
-
return x
|
922 |
-
|
923 |
-
def extra_repr(self):
|
924 |
-
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
|
925 |
-
|
926 |
-
# ----------------------------------------------------------------------------
|
927 |
-
|
928 |
-
|
929 |
-
@persistence.persistent_class
|
930 |
-
class Discriminator(torch.nn.Module):
|
931 |
-
def __init__(self,
|
932 |
-
# Conditioning label (C) dimensionality.
|
933 |
-
c_dim,
|
934 |
-
img_resolution, # Input resolution.
|
935 |
-
# Number of input color channels.
|
936 |
-
img_channels,
|
937 |
-
# Architecture: 'orig', 'skip', 'resnet'.
|
938 |
-
architecture='resnet',
|
939 |
-
# Overall multiplier for the number of channels.
|
940 |
-
channel_base=32768,
|
941 |
-
# Maximum number of channels in any layer.
|
942 |
-
channel_max=512,
|
943 |
-
# Use FP16 for the N highest resolutions.
|
944 |
-
num_fp16_res=4,
|
945 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
946 |
-
conv_clamp=256,
|
947 |
-
# Dimensionality of mapped conditioning label, None = default.
|
948 |
-
cmap_dim=None,
|
949 |
-
square=False, # default for rectangle images
|
950 |
-
block_kwargs={}, # Arguments for DiscriminatorBlock.
|
951 |
-
mapping_kwargs={}, # Arguments for MappingNetwork.
|
952 |
-
# Arguments for DiscriminatorEpilogue.
|
953 |
-
epilogue_kwargs={},
|
954 |
-
):
|
955 |
-
super().__init__()
|
956 |
-
self.c_dim = c_dim
|
957 |
-
self.img_resolution = img_resolution
|
958 |
-
self.img_resolution_log2 = int(np.log2(img_resolution))
|
959 |
-
self.img_channels = img_channels
|
960 |
-
self.square = square
|
961 |
-
self.block_resolutions = [
|
962 |
-
2 ** i for i in range(self.img_resolution_log2, 2, -1)]
|
963 |
-
channels_dict = {res: min(channel_base // res, channel_max)
|
964 |
-
for res in self.block_resolutions + [4]}
|
965 |
-
fp16_resolution = max(
|
966 |
-
2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
|
967 |
-
|
968 |
-
if cmap_dim is None:
|
969 |
-
cmap_dim = channels_dict[4]
|
970 |
-
if c_dim == 0:
|
971 |
-
cmap_dim = 0
|
972 |
-
|
973 |
-
common_kwargs = dict(img_channels=img_channels,
|
974 |
-
architecture=architecture, conv_clamp=conv_clamp)
|
975 |
-
cur_layer_idx = 0
|
976 |
-
for res in self.block_resolutions:
|
977 |
-
in_channels = channels_dict[res] if res < img_resolution else 0
|
978 |
-
tmp_channels = channels_dict[res]
|
979 |
-
out_channels = channels_dict[res // 2]
|
980 |
-
use_fp16 = (res >= fp16_resolution)
|
981 |
-
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
|
982 |
-
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, square=square, **block_kwargs, **common_kwargs)
|
983 |
-
setattr(self, f'b{res}', block)
|
984 |
-
cur_layer_idx += block.num_layers
|
985 |
-
if c_dim > 0:
|
986 |
-
self.mapping = MappingNetwork(
|
987 |
-
z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
|
988 |
-
self.b4 = DiscriminatorEpilogue(
|
989 |
-
channels_dict[4], cmap_dim=cmap_dim, resolution=4, square=square, **epilogue_kwargs, **common_kwargs)
|
990 |
-
|
991 |
-
def forward(self, img, c, update_emas=False, **block_kwargs):
|
992 |
-
_ = update_emas # unused
|
993 |
-
x = None
|
994 |
-
for res in self.block_resolutions:
|
995 |
-
block = getattr(self, f'b{res}')
|
996 |
-
x, img = block(x, img, **block_kwargs)
|
997 |
-
|
998 |
-
cmap = None
|
999 |
-
if self.c_dim > 0:
|
1000 |
-
cmap = self.mapping(None, c)
|
1001 |
-
x = self.b4(x, img, cmap)
|
1002 |
-
return x
|
1003 |
-
|
1004 |
-
def extra_repr(self):
|
1005 |
-
return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}'
|
1006 |
-
|
1007 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan/stylegan_human/torch_utils/op_edit/fused_act.py
DELETED
@@ -1,99 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
import os
|
4 |
-
|
5 |
-
import torch
|
6 |
-
from torch import nn
|
7 |
-
from torch.nn import functional as F
|
8 |
-
from torch.autograd import Function
|
9 |
-
from torch.utils.cpp_extension import load
|
10 |
-
|
11 |
-
|
12 |
-
module_path = os.path.dirname(__file__)
|
13 |
-
fused = load(
|
14 |
-
"fused",
|
15 |
-
sources=[
|
16 |
-
os.path.join(module_path, "fused_bias_act.cpp"),
|
17 |
-
os.path.join(module_path, "fused_bias_act_kernel.cu"),
|
18 |
-
],
|
19 |
-
)
|
20 |
-
|
21 |
-
|
22 |
-
class FusedLeakyReLUFunctionBackward(Function):
|
23 |
-
@staticmethod
|
24 |
-
def forward(ctx, grad_output, out, negative_slope, scale):
|
25 |
-
ctx.save_for_backward(out)
|
26 |
-
ctx.negative_slope = negative_slope
|
27 |
-
ctx.scale = scale
|
28 |
-
|
29 |
-
empty = grad_output.new_empty(0)
|
30 |
-
|
31 |
-
grad_input = fused.fused_bias_act(
|
32 |
-
grad_output, empty, out, 3, 1, negative_slope, scale
|
33 |
-
)
|
34 |
-
|
35 |
-
dim = [0]
|
36 |
-
|
37 |
-
if grad_input.ndim > 2:
|
38 |
-
dim += list(range(2, grad_input.ndim))
|
39 |
-
|
40 |
-
grad_bias = grad_input.sum(dim).detach()
|
41 |
-
|
42 |
-
return grad_input, grad_bias
|
43 |
-
|
44 |
-
@staticmethod
|
45 |
-
def backward(ctx, gradgrad_input, gradgrad_bias):
|
46 |
-
(out,) = ctx.saved_tensors
|
47 |
-
gradgrad_out = fused.fused_bias_act(
|
48 |
-
gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale
|
49 |
-
)
|
50 |
-
|
51 |
-
return gradgrad_out, None, None, None
|
52 |
-
|
53 |
-
|
54 |
-
class FusedLeakyReLUFunction(Function):
|
55 |
-
@staticmethod
|
56 |
-
def forward(ctx, input, bias, negative_slope, scale):
|
57 |
-
empty = input.new_empty(0)
|
58 |
-
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
|
59 |
-
ctx.save_for_backward(out)
|
60 |
-
ctx.negative_slope = negative_slope
|
61 |
-
ctx.scale = scale
|
62 |
-
|
63 |
-
return out
|
64 |
-
|
65 |
-
@staticmethod
|
66 |
-
def backward(ctx, grad_output):
|
67 |
-
(out,) = ctx.saved_tensors
|
68 |
-
|
69 |
-
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
|
70 |
-
grad_output, out, ctx.negative_slope, ctx.scale
|
71 |
-
)
|
72 |
-
|
73 |
-
return grad_input, grad_bias, None, None
|
74 |
-
|
75 |
-
|
76 |
-
class FusedLeakyReLU(nn.Module):
|
77 |
-
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
|
78 |
-
super().__init__()
|
79 |
-
|
80 |
-
self.bias = nn.Parameter(torch.zeros(channel))
|
81 |
-
self.negative_slope = negative_slope
|
82 |
-
self.scale = scale
|
83 |
-
|
84 |
-
def forward(self, input):
|
85 |
-
return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
|
86 |
-
|
87 |
-
|
88 |
-
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
|
89 |
-
if input.device.type == "cpu":
|
90 |
-
rest_dim = [1] * (input.ndim - bias.ndim - 1)
|
91 |
-
return (
|
92 |
-
F.leaky_relu(
|
93 |
-
input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2
|
94 |
-
)
|
95 |
-
* scale
|
96 |
-
)
|
97 |
-
|
98 |
-
else:
|
99 |
-
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|