Commit
·
3c1bcb0
1
Parent(s):
7bf4d45
Update parquet files (step 46 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/((HOT)) Download Easy Office Recovery 2.0 Full Crack.md +0 -14
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Aster V7 Keygen Unlock the Full Potential of ASTER Multiseat Software.md +0 -121
- spaces/1gistliPinn/ChatGPT4/Examples/Call Of Duty Black Ops 2 Crack File Download.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Catia P3 V5-6r2014 Free Crack 412.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Chief Architect X6 Crack Keygens.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Adobe Flash Professional CS6 Whats New in the Latest Version?.md +0 -227
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Badminton League Mod APK with Unlimited Money and Coins.md +0 -106
- spaces/1phancelerku/anime-remove-background/Download 6th Tamil Book PDF for Free - Samacheer Kalvi New Syllabus 2021 to 2022.md +0 -100
- spaces/1phancelerku/anime-remove-background/Drive Through Highway Traffic with Stunning 3D Graphics in Traffic Racer Pro Car Games.md +0 -134
- spaces/4RiZ4/stabilityai-stable-diffusion-2/app.py +0 -3
- spaces/4Taps/SadTalker/src/facerender/animate.py +0 -182
- spaces/801artistry/RVC801/infer/modules/vc/__init__.py +0 -0
- spaces/AICODER009/food_detection/README.md +0 -13
- spaces/AIWaves/SOP_Generation-single/Component/ExtraComponent.py +0 -128
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_m-v61_syncbn_fast_8xb16-300e_coco.py +0 -79
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet34.py +0 -17
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/work_dirs/shufflenet-v2-1x_4xb32_2000e_3c_noF/__init__.py +0 -0
- spaces/Aashiue/speech_to_text/README.md +0 -12
- spaces/Adeeb-F/AI-Genrated-Image-Detector/README.md +0 -13
- spaces/Afnaan/chatbots/README.md +0 -12
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/dots/Factory.js +0 -13
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/los/Factory.d.ts +0 -6
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/intouching/Factory.d.ts +0 -7
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sides/defaultcallbacks/FadeCallbacks.js +0 -26
- spaces/Alpaca233/SadTalker/src/gradio_demo.py +0 -155
- spaces/Ameaou/academic-chatgpt3.1/request_llm/bridge_chatgpt.py +0 -272
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/versatile_diffusion/__init__.py +0 -24
- spaces/Andy1621/uniformer_image_detection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py +0 -3
- spaces/Andy1621/uniformer_image_detection/configs/grid_rcnn/README.md +0 -35
- spaces/Andy1621/uniformer_image_detection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py +0 -10
- spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_480x480_40k_pascal_context.py +0 -10
- spaces/AnnasBlackHat/Image-Similarity/src/similarity/model_implements/mobilenet_v3.py +0 -14
- spaces/Anthony7906/MengHuiMXD_GPT/modules/webui_locale.py +0 -26
- spaces/AriaMei/TTSdemo/commons.py +0 -161
- spaces/Artrajz/vits-simple-api/vits/text/cleaners.py +0 -278
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/queue.py +0 -22
- spaces/AtlasUnified/DeforumPromptGenerator/README.md +0 -12
- spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/transform.py +0 -45
- spaces/Audio-AGI/WavJourney/APIs.py +0 -202
- spaces/Averyng/averyng/app.py +0 -3
- spaces/BAAI/vid2vid-zero/vid2vid_zero/models/attention_2d.py +0 -434
- spaces/Bart92/RVC_HF/tools/rvc_for_realtime.py +0 -381
- spaces/Benson/text-generation/Examples/Decision Mod Apk.md +0 -47
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/__init__.py +0 -2
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/style.py +0 -197
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/styles/__init__.py +0 -97
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/syntax.py +0 -950
- spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/ssltransport.py +0 -221
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/README.md +0 -5
- spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/utils/proc_dict_gqa.py +0 -85
spaces/1acneusushi/gradio-2dmoleculeeditor/data/((HOT)) Download Easy Office Recovery 2.0 Full Crack.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Easy Office Recovery 2.0 Full Crack: A Risky and Illegal Way to Recover Your Office Files</h1>
|
3 |
-
<p>Easy Office Recovery is a software that claims to recover deleted or corrupted Microsoft Office files, such as Word documents, Excel spreadsheets, PowerPoint presentations and Outlook emails. It supports various file formats, such as DOC, DOCX, XLS, XLSX, PPT, PPTX and PST. It also claims to recover files from formatted or damaged disks, memory cards and USB drives.</p>
|
4 |
-
<p>If you have lost some important Office files due to accidental deletion, virus infection, power failure or other reasons, you might be looking for a way to get them back. You might have come across a website that offers to download Easy Office Recovery 2.0 full crack for free. A crack is a software that bypasses the security and licensing mechanisms of a program, allowing you to use it without paying. However, downloading and using Easy Office Recovery 2.0 full crack is not a good idea. Here are some reasons why:</p>
|
5 |
-
<h2>download easy office recovery 2.0 full crack</h2><br /><p><b><b>Download Zip</b> --->>> <a href="https://byltly.com/2uKw8q">https://byltly.com/2uKw8q</a></b></p><br /><br />
|
6 |
-
<ul>
|
7 |
-
<li>It is illegal. Downloading and using Easy Office Recovery 2.0 full crack violates the copyright laws and the terms of service of MunSoft, the developer of Easy Office Recovery. You could face legal consequences if you are caught.</li>
|
8 |
-
<li>It is unsafe. Downloading Easy Office Recovery 2.0 full crack from unknown sources exposes your computer to malware, viruses, spyware and other threats. You could lose your data, compromise your privacy or damage your system.</li>
|
9 |
-
<li>It is unreliable. Easy Office Recovery 2.0 full crack might not work properly or at all. You could experience crashes, glitches, errors, compatibility issues or missing features. You could also lose your files or corrupt them further.</li>
|
10 |
-
<li>It is unethical. Downloading and using Easy Office Recovery 2.0 full crack deprives the developers of their rightful income and recognition. You are also hurting the software industry and the users who pay for the software.</li>
|
11 |
-
</ul>
|
12 |
-
<p>Therefore, downloading and using Easy Office Recovery 2.0 full crack is not worth it. If you want to use Easy Office Recovery, you should buy it from the official website or a trusted reseller. You will get a legal, safe, reliable and ethical product that will help you recover your Office files.</p> ddb901b051<br />
|
13 |
-
<br />
|
14 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Aster V7 Keygen Unlock the Full Potential of ASTER Multiseat Software.md
DELETED
@@ -1,121 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Use Aster V7 Keygen to Activate Multiple Workstations on One PC</h1>
|
3 |
-
<p>Have you ever wanted to use multiple workstations on one PC without buying extra hardware or software? If so, you might be interested in Aster V7, a powerful and easy-to-use program that allows you to create several independent workspaces on a single computer. With Aster V7, you can share your PC with your family members, friends, or colleagues, and enjoy different tasks and applications at the same time. You can also save money, space, and energy by using one PC instead of several.</p>
|
4 |
-
<h2>aster v7 keygen</h2><br /><p><b><b>DOWNLOAD</b> ☑ <a href="https://byltly.com/2uKzcA">https://byltly.com/2uKzcA</a></b></p><br /><br />
|
5 |
-
<p>However, there is one catch: Aster V7 is not free. You need to purchase a license for each workstation you want to create, which can be quite expensive if you need many of them. That's why some people look for alternative ways to activate Aster V7 without paying for it. One of these ways is using a keygen.</p>
|
6 |
-
<p>A keygen is a small program that generates a valid license key for a software product. By using a keygen, you can bypass the activation process and use the software for free. However, using a keygen is illegal and risky, as it may contain viruses or malware that can harm your PC or compromise your privacy. Therefore, we do not recommend using a keygen for any software product, including Aster V7.</p>
|
7 |
-
<p>But if you still want to use a keygen for Aster V7, we will show you how to do it in this article. We will also show you how to set up multiple workstations on one PC with Aster V7, and give you some tips and tricks for using it. Please note that we are not responsible for any consequences that may arise from using a keygen for Aster V7. Use it at your own risk.</p>
|
8 |
-
<h2>How to Download and Install Aster V7 Keygen</h2>
|
9 |
-
<p>The first step is to download and install Aster V7 keygen on your PC. You can find many websites that offer Aster V7 keygen for free download, but be careful as some of them may be fake or malicious. To avoid getting infected by viruses or malware, you should only download Aster V7 keygen from trusted sources.</p>
|
10 |
-
<p>One of the most popular websites that provide Aster V7 keygen is <a href="https://cracknest.com/2018/01/aster-v7-crack.html">CrackNest.com</a>. This website claims that its keygen is 100% working and safe, and that it can generate unlimited license keys for Aster V7. However, we cannot guarantee the authenticity or reliability of this website or its keygen. Use it at your own discretion.</p>
|
11 |
-
<p>To download and install Aster V7 keygen from CrackNest.com, follow these steps:</p>
|
12 |
-
<ul>
|
13 |
-
<li>Go to <a href="https://cracknest.com/2018/01/aster-v7-crack.html">CrackNest.com</a> and click on the "Download" button.</li>
|
14 |
-
<li>Wait for the download to complete and then extract the ZIP file.</li>
|
15 |
-
<li>Open the extracted folder and run the "Setup.exe" file.</li>
|
16 |
-
<li>Follow the instructions on the screen and complete the installation.</li>
|
17 |
-
<li>After the installation is done, run the "Keygen.exe" file.</li>
|
18 |
-
</ul>
|
19 |
-
<p>You have now successfully installed Aster V7 keygen on your PC. The next step is to activate Aster V7 with it.</p>
|
20 |
-
<p>aster v7 crack download free 2021<br />
|
21 |
-
aster v7 activation key 2023<br />
|
22 |
-
aster v7 patch latest version<br />
|
23 |
-
aster v7 multiseat software for windows<br />
|
24 |
-
aster v7 full version with keygen<br />
|
25 |
-
aster v7 serial key 2021 crack<br />
|
26 |
-
aster v7 license key free download<br />
|
27 |
-
aster v7 pre-activated full version<br />
|
28 |
-
aster v7 2.31 crack with activation key<br />
|
29 |
-
aster v7 2.28 crack with keygen<br />
|
30 |
-
aster v7 2.25 crack with activation<br />
|
31 |
-
aster v7 2.23 crack latest full version<br />
|
32 |
-
aster v7 sensor aboard NASA's Terra satellite<br />
|
33 |
-
aster v7 data for Earth observation<br />
|
34 |
-
aster v7 images of the Earth's surface<br />
|
35 |
-
aster v7 data for geology and mineralogy<br />
|
36 |
-
aster v7 data for environmental monitoring<br />
|
37 |
-
aster v7 data for vegetation and land use<br />
|
38 |
-
aster v7 data for natural disasters and hazards<br />
|
39 |
-
aster v7 data for climate change and ecosystems<br />
|
40 |
-
aster v7 high spatial resolution and accuracy<br />
|
41 |
-
aster v7 14 different wavelength regions<br />
|
42 |
-
aster v7 stereoscopic images of the Earth's surface<br />
|
43 |
-
aster v7 study of volcanoes and earthquakes<br />
|
44 |
-
aster v7 map of mineral deposits and resources<br />
|
45 |
-
aster v7 monitor vegetation health and crop conditions<br />
|
46 |
-
aster v7 identify areas at risk of wildfires and floods<br />
|
47 |
-
aster v7 assess land use changes and conservation efforts<br />
|
48 |
-
aster v7 one computer multiple users station<br />
|
49 |
-
aster v7 internet cafes offices libraries setup<br />
|
50 |
-
aster v7 video and games pvp mode setup<br />
|
51 |
-
aster v7 scanners printers scanners sharing setup<br />
|
52 |
-
aster v7 separate displays controllers audio cards setup<br />
|
53 |
-
aster v7 VGA DVI HDMI DP video cards supported setup<br />
|
54 |
-
aster v7 external monitors via USB or WIFI LAN setup<br />
|
55 |
-
aster v7 unique configuration for each user setup<br />
|
56 |
-
aster v7 compatible with windows xp 7 8 10 setup<br />
|
57 |
-
aster v7 easy to use tool for multi-user station setup<br />
|
58 |
-
aster v7 improve your computer performance setup <br />
|
59 |
-
how to install and use aster v7 keygen</p>
|
60 |
-
<h2>How to Activate Aster V7 with Keygen</h2>
|
61 |
-
<p>The second step is to activate Aster V7 with the keygen you have installed. To do this, follow these steps:</p>
|
62 |
-
<ul>
|
63 |
-
<li>Run Aster V on your PC </li>
|
64 |
-
<li>Go to the "Help" menu and click on "Enter License Key" </li>
|
65 |
-
<li>A window will pop up asking you to enter your license key </li>
|
66 |
-
<li>Run the "Keygen.exe" file again and click on the "Generate" button </li>
|
67 |
-
<li>A random license key will be generated by the keygen.</li>
|
68 |
-
<li>Copy the license key and paste it in the window of Aster V7.</li>
|
69 |
-
<li>Click on "OK" and then "Apply".</li>
|
70 |
-
<li>Restart your PC.</li>
|
71 |
-
</ul>
|
72 |
-
<p>You have now successfully activated Aster V7 with the keygen. The next step is to set up multiple workstations on one PC with it.</p>
|
73 |
-
<h2>How to Set Up Multiple Workstations on One PC with Aster V7</h2>
|
74 |
-
<p>The third step is to set up multiple workstations on one PC with Aster V7. To do this, you need to have multiple monitors, keyboards, mice, and speakers connected to your PC. You also need to configure Aster V7 settings and assign resources to each workstation. To do this, follow these steps:</p>
|
75 |
-
<ul>
|
76 |
-
<li>Connect your monitors, keyboards, mice, and speakers to your PC using HDMI cables, USB ports, or wireless adapters.</li>
|
77 |
-
<li>Run Aster V7 on your PC.</li>
|
78 |
-
<li>Go to the "Settings" menu and click on "Workplace Layout".</li>
|
79 |
-
<li>A window will show you a graphical representation of your PC and its connected devices.</li>
|
80 |
-
<li>Drag and drop each device icon onto one of the numbered squares at the bottom of the window. Each square represents a workstation.</li>
|
81 |
-
<li>Assign each workstation a name and a color by clicking on its square.</li>
|
82 |
-
<li>Click on "OK" and then "Apply".</li>
|
83 |
-
<li>Restart your PC.</li>
|
84 |
-
</ul>
|
85 |
-
<p>You have now successfully set up multiple workstations on one PC with Aster V7. The next step is to switch between workstations and use them simultaneously.</p>
|
86 |
-
<h2>How to Switch Between Workstations and Use Them Simultaneously</h2>
|
87 |
-
<p>The fourth step is to switch between workstations and use them simultaneously with Aster V7. To do this, follow these steps:</p>
|
88 |
-
<ul>
|
89 |
-
<li>After restarting your PC, you will see a login screen for each workstation on each monitor. </li>
|
90 |
-
<li>Log in with your username and password for each workstation. </li>
|
91 |
-
<li>You can now use each workstation independently as if they were separate PCs. </li>
|
92 |
-
<li>You can switch between workstations by pressing Ctrl+Alt+Shift+<number>, where <number> is the number of the workstation you want to switch to. </li>
|
93 |
-
<li>You can also use a mouse gesture by moving your mouse cursor from one monitor edge to another monitor edge in a straight line. </li>
|
94 |
-
<li>You can use different applications and tasks on each workstation simultaneously without interfering with each other. </li>
|
95 |
-
</ul>
|
96 |
-
<p>You have now successfully learned how to switch between workstations and use them simultaneously with Aster V7. The next step is to learn some tips and tricks for using it.</p>
|
97 |
-
<h2>Tips and Tricks for Using Aster V7 Keygen</h2>
|
98 |
-
<p>The fifth step is to learn some tips and tricks for using Aster V7 keygen. Here are some of them:</p>
|
99 |
-
<ul>
|
100 |
-
<li>To update Aster V7 without losing the activation, - To update Aster V7 without losing the activation, you need to run the "Keygen.exe" file again before updating it. Then copy the new license key generated by the keygen and paste it in the window of Aster V7 after updating it. - To backup and restore your license key, you need to copy the file named "aster.vl" from the folder where you installed Aster V7 (usually C:\Program Files\ASTER) and save it somewhere else. Then if you need to restore your license key, you need to copy this file back into the same folder. - To troubleshoot common issues with Aster V7 keygen, such as invalid license keys or activation errors, you need to check if your antivirus or firewall software is blocking or deleting the keygen or its files. If so, you need to disable or whitelist them before running the keygen or activating Aster V7. These are some of the tips and tricks for using Aster V7 keygen. However, we remind you again that using a keygen is illegal and risky, and we do not endorse or support it in any way. <h2>Conclusion</h2>
|
101 |
-
<p>In this article, we have shown you how to use Aster V key to activate multiple workstations on one PC with Aster V You have learned how to download and install Aster V key how to activate Aster V with key how to set up multiple workstations on one PC with Aster V how to switch between workstations and use them simultaneously with Aster V, and how to use some tips and tricks for using Aster V7 keygen.</p>
|
102 |
-
<p>We hope that this article has been helpful and informative for you. However, we also hope that you will reconsider using a keygen for Aster V7, as it is illegal and risky. Instead, we suggest that you purchase a legitimate license for Aster V7 from its official website, and support the developers who created this amazing software. By doing so, you will not only enjoy the full features and benefits of Aster V7, but also avoid any legal or technical problems that may arise from using a keygen.</p>
|
103 |
-
<p>If you are interested in buying a license for Aster V7, you can visit its official website at <a href="https://www.ibik.ru/">https://www.ibik.ru/</a> and choose the best option for you. You can also find more information and support about Aster V7 on its website.</p>
|
104 |
-
<p>Thank you for reading this article. We hope that you have learned something new and useful today. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you.</p>
|
105 |
-
<h2>FAQs</h2>
|
106 |
-
<p>Here are some frequently asked questions about Aster V7 keygen:</p>
|
107 |
-
<ol>
|
108 |
-
<li><b>What is Aster V7?</b></li>
|
109 |
-
<p>Aster V7 is a software program that allows you to create multiple independent workspaces on a single PC. You can use different applications and tasks on each workspace simultaneously without interfering with each other. You can also share your PC with other users by connecting multiple monitors, keyboards, mice, and speakers to it.</p>
|
110 |
-
<li><b>What is a keygen?</b></li>
|
111 |
-
<p>A keygen is a small program that generates a valid license key for a software product. By using a keygen, you can bypass the activation process and use the software for free.</p>
|
112 |
-
<li><b>Is using a keygen legal?</b></li>
|
113 |
-
<p>No, using a keygen is illegal. It violates the copyright and intellectual property rights of the software developers. It also exposes your PC to viruses or malware that may harm it or compromise your privacy.</p>
|
114 |
-
<li><b>Where can I download Aster V7 keygen?</b></li>
|
115 |
-
<p>You can find many websites that offer Aster V7 keygen for free download, but be careful as some of them may be fake or malicious. To avoid getting infected by viruses or malware, you should only download Aster V7 keygen from trusted sources.</p>
|
116 |
-
<li><b>How can I buy a license for Aster V7?</b></li>
|
117 |
-
<p>You can buy a license for Aster V7 from its official website at <a href="https://www.ibik.ru/">https://www.ibik.ru/</a>. You can choose the best option for you depending on how many workspaces you want to create and how long you want to use them.</p>
|
118 |
-
</ol>
|
119 |
-
</p> 0a6ba089eb<br />
|
120 |
-
<br />
|
121 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Call Of Duty Black Ops 2 Crack File Download.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>call of duty black ops 2 crack file download</h2><br /><p><b><b>Download File</b> === <a href="https://imgfil.com/2uy1lx">https://imgfil.com/2uy1lx</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Successful conversion of password encrypted OST files. Safe and fast scanning. Supports c... powered by Peatix : More than a ticket. 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Catia P3 V5-6r2014 Free Crack 412.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>catia p3 v5-6r2014 crack 412</h2><br /><p><b><b>Download Zip</b> ⚙ <a href="https://imgfil.com/2uy1Xg">https://imgfil.com/2uy1Xg</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
d5da3c52bf<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Chief Architect X6 Crack Keygens.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>chief architect x6 crack keygens</h2><br /><p><b><b>DOWNLOAD</b> ↔ <a href="https://imgfil.com/2uy0Nw">https://imgfil.com/2uy0Nw</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Chief Architect Premier X6 serial numbers, cracks and keygens are presented here. No registration is needed. Just download and enjoy. 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Adobe Flash Professional CS6 Whats New in the Latest Version?.md
DELETED
@@ -1,227 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Introduction</h1>
|
3 |
-
<p>Adobe Flash Pro CS6 is a software that allows you to create animation and multimedia content for web, desktop, and mobile platforms. You can use it to design interactive experiences that present consistently across devices and browsers. You can also use it to create games, cartoons, banners, presentations, e-learning materials, and more.</p>
|
4 |
-
<p>Some of the features of Adobe Flash Pro CS6 include:</p>
|
5 |
-
<h2>adobe flash pro cs6 free download</h2><br /><p><b><b>DOWNLOAD</b> · <a href="https://urlin.us/2uT0D2">https://urlin.us/2uT0D2</a></b></p><br /><br />
|
6 |
-
<ul>
|
7 |
-
<li>Support for HTML5: You can use the Toolkit for CreateJS extension to export your content as JavaScript that can run on HTML5 canvas.</li>
|
8 |
-
<li>Sprite sheet generation: You can export your symbols and animation sequences as sprite sheets that help improve the gaming experience, workflow, and performance.</li>
|
9 |
-
<li>Wide platform and device support: You can reach Android and iOS devices by targeting the latest Adobe Flash Player and AIR runtimes. You can also create and deliver applications with a prepackaged Adobe AIR captive runtime for a better user experience.</li>
|
10 |
-
<li>Adobe AIR mobile simulation: You can simulate common mobile application interactions like screen orientation, touch gestures, and accelerometer to help speed up testing.</li>
|
11 |
-
<li>Stage 3D targeting: You can turbocharge rendering performance by using direct mode to leverage the open source Starling Framework for hardware-accelerated 2D content.</li>
|
12 |
-
</ul>
|
13 |
-
<h1>Downloading and installing Adobe Flash Pro CS6</h1>
|
14 |
-
<p>If you want to use Adobe Flash Pro CS6 for free, you can download it from a Google Drive link that contains the setup file and the crack file. Here are the steps to download and install it:</p>
|
15 |
-
<ol>
|
16 |
-
<li>Click on the Google Drive link and download the Adobe.Flash.Professional.CS6.rar file.</li>
|
17 |
-
<li>Extract the rar file using a software like WinRAR or 7-Zip.</li>
|
18 |
-
<li>Open the extracted folder and run Set-up.exe as administrator.</li>
|
19 |
-
<li>Follow the on-screen instructions to complete the installation. When prompted to enter a serial number, enter any of the serial numbers provided in the Serial Key.txt file.</li>
|
20 |
-
<li>After the installation is finished, do not launch the program yet.</li>
|
21 |
-
<li>Copy the amtlib.dll file from the Crack folder and paste it in the installation directory (usually C:\Program Files\Adobe\Adobe Flash CS6).</li>
|
22 |
-
<li>Replace the existing file if asked.</li>
|
23 |
-
<li>You can now launch Adobe Flash Pro CS6 and enjoy using it for free.</li>
|
24 |
-
</ol>
|
25 |
-
<h1>Getting started with Adobe Flash Pro CS6</h1>
|
26 |
-
<p>Now that you have installed Adobe Flash Pro CS6, you can start creating your own projects. Here are some basic steps to get you started:</p>
|
27 |
-
<h2>Creating a new document</h2>
|
28 |
-
<p>To create a new document in Adobe Flash Pro CS6, follow these steps:</p>
|
29 |
-
<ol>
|
30 |
-
<li>Launch Adobe Flash Pro CS6.</li>
|
31 |
-
<li>Select File > New or press Ctrl+N on your keyboard.</li>
|
32 |
-
<li>In the New Document dialog box, choose a document type from the tabs. You can choose from ActionScript 3.0, ActionScript 2.0, AIR for Desktop, AIR for Android, AIR for iOS, or HTML5 Canvas.</li>
|
33 |
-
<li>Click OK to create a new document with default settings or click Change to customize the settings such as the size, frame rate, background color, and ruler units of your document.</li>
|
34 |
-
</ol>
|
35 |
-
<h2>Setting up the stage</h2>
|
36 |
-
<p>The stage is the rectangular area where you create and arrange the objects that appear in your animation. You can customize the appearance and behavior of the stage by using the Properties panel and the View menu. Here are some things you can do with the stage:</p>
|
37 |
-
<ul>
|
38 |
-
<li>Change the stage size: You can change the width and height of the stage by entering the values in the Size section of the Properties panel or by dragging the edges of the stage.</li>
|
39 |
-
<li>Change the stage color: You can change the background color of the stage by clicking on the color box in the Background Color section of the Properties panel and choosing a color from the color picker.</li>
|
40 |
-
<li>Change the stage alignment: You can change how the stage is aligned within the browser window by clicking on the Align button in the Align section of the Properties panel and choosing an alignment option from the drop-down menu.</li>
|
41 |
-
<li>Change the stage scale mode: You can change how the stage scales when the browser window is resized by clicking on the Scale button in the Scale section of the Properties panel and choosing a scale mode from the drop-down menu. The options are Show All, No Border, Exact Fit, and No Scale.</li>
|
42 |
-
<li>Zoom in or out of the stage: You can zoom in or out of the stage by using the Zoom tool from the Tools panel or by choosing a zoom level from the View > Magnification menu.</li>
|
43 |
-
<li>Show or hide rulers, guides, and grids: You can show or hide rulers, guides, and grids on the stage by choosing View > Rulers, View > Guides, or View > Grid. You can also customize their settings by choosing Edit > Preferences > Guides & Grid.</li>
|
44 |
-
</ul>
|
45 |
-
<h2>Using the tools</h2>
|
46 |
-
<p>The Tools panel contains various tools that you can use to create and modify objects on the stage. You can access it by choosing Window > Tools or by pressing Ctrl+F2 on your keyboard. Here are some of the tools you can use:</p>
|
47 |
-
<table>
|
48 |
-
<tr><th>Tool</th><th>Description</th></tr>
|
49 |
-
<tr><td>Selection tool</td><td>Lets you select and move objects on the stage. You can also use it to resize, rotate, skew, or distort objects by dragging their handles.</td></tr>
|
50 |
-
<tr><td>Subselection tool</td><td>Lets you select and edit individual anchor points and segments of an object. You can also use it to add or delete anchor points or convert them between corner and smooth points.</td></tr>
|
51 |
-
<tr><td>Free Transform tool</td><td>Lets you transform objects on the stage using various options such as scale, rotate, skew, distort, envelope, and perspective. You can also use it to flip or rotate objects in 3D space.</td></tr>
|
52 |
-
<tr><td>Lasso tool</td><td>Lets you select objects or parts of objects by drawing a freehand shape around them. You can also use it to select pixels in bitmap images.</td></tr>
|
53 |
-
<tr><td>Pen tool</td><td>Lets you draw straight or curved lines by placing anchor points on the stage. You can also use it to modify existing lines by adding, deleting, or moving anchor points.</td></tr>
|
54 |
-
<tr><td>Text tool</td><td>Lets you create and edit text on the stage. You can also use it to format text using various options such as font, size, color, alignment, and style.</td></tr>
|
55 |
-
<tr><td>Line tool</td><td>Lets you draw straight lines on the stage. You can also use it to set the stroke color, width, and style of the lines.</td></tr>
|
56 |
-
<tr><td>Oval tool</td><td>Lets you draw ovals and circles on the stage. You can also use it to set the fill and stroke color, width, and style of the ovals.</td></tr>
|
57 |
-
<tr><td>Rectangle tool</td><td>Lets you draw rectangles and squares on the stage. You can also use it to set the fill and stroke color, width, and style of the rectangles. You can also use it to adjust the corner radius of the rectangles.</td></tr>
|
58 |
-
<tr><td>Pencil tool</td><td>Lets you draw freehand lines on the stage. You can also use it to set the stroke color, width, and style of the lines. You can also use it to choose a drawing mode from straighten, smooth, or ink.</td></tr>
|
59 |
-
<tr><td>Brush tool</td><td>Lets you draw freehand shapes with a fill on the stage. You can also use it to set the fill color and style of the shapes. You can also use it to choose a brush size, shape, and mode from paint normal, paint fills, paint behind, paint selection, or paint inside.</td></tr>
|
60 |
-
<tr><td>Paint Bucket tool</td><td>Lets you fill an enclosed area or shape with a color on the stage. You can also use it to set the fill color and style of the area or shape. You can also use it to choose a fill mode from paint normal, paint fills, paint behind, paint selection, or paint inside.</td></tr>
|
61 |
-
<tr><td>Eyedropper tool</td><td>Lets you pick up a color from an object on the stage or from the color picker. You can also use it to apply the picked color to another object on the stage.</td></tr>
|
62 |
-
<tr><td>Eraser tool</td><td>Lets you erase parts of an object or a bitmap image on the stage. You can also use it to set the eraser mode from erase normal, erase fills, erase lines, or erase selected fills and lines. You can also use it to choose an eraser size and shape.</td></tr>
|
63 |
-
<tr><td>Hand tool</td><td>Lets you move the stage view by dragging it with your mouse. You can also use it to zoom in or out of the stage by holding down the Alt key and scrolling your mouse wheel.</td></tr>
|
64 |
-
<tr><td>Zoom tool</td><td>Lets you zoom in or out of a specific area on the stage by clicking or dragging your mouse. You can also use it to zoom out by holding down the Alt key and clicking your mouse.</td></tr>
|
65 |
-
</table>
|
66 |
-
<h1>Creating animation with Adobe Flash Pro CS6</h1>
|
67 |
-
<p>One of the main features of Adobe Flash Pro CS6 is that you can create animation using various techniques such as frames, keyframes, layers, tweens, and symbols. Here are some basic concepts and steps to help you create animation with Adobe Flash Pro CS6:</p>
|
68 |
-
<h2>Frames and keyframes</h2>
|
69 |
-
<p>Frames are the basic units of time in an animation. Each frame represents a single image or state of your animation. Keyframes are special frames that mark the beginning or end of a change in your animation. You can create frames and keyframes by using the Timeline panel at the bottom of your screen. Here are some things you can do with frames and keyframes:</p>
|
70 |
-
<ul>
|
71 |
-
<li>Add frames: You can add frames by selecting a frame on the Timeline panel and choosing Insert > Timeline > Frame or pressing F5 on your keyboard.</li>
|
72 |
-
<li>Add keyframes: You can add keyframes by selecting a frame on the Timeline panel and choosing Insert > Timeline > Keyframe or pressing F6 on your keyboard.</li>
|
73 |
-
<li>Delete frames: You can delete frames by selecting them on the Timeline panel and choosing Edit > Timeline > Remove Frames or pressing Shift+F5 on your keyboard.</li>
|
74 |
-
<li>Copy and paste frames: You can copy and paste frames by selecting them on the Timeline panel and choosing Edit > Timeline > Copy Frames or Edit > Timeline > Paste Frames or pressing Ctrl+Alt+C or Ctrl+Alt+V on your keyboard.</li>
|
75 |
-
<li>Move frames: You can move frames by selecting them on the Timeline panel and dragging them to a new location.</li>
|
76 |
-
<li>Extend frames: You can extend frames by selecting them on the Timeline panel and dragging their right edge to a new position.</li>
|
77 |
-
<li>Insert blank keyframes: You can insert blank keyframes by selecting a frame on the Timeline panel and choosing Insert > Timeline > Blank Keyframe or pressing F7 on your keyboard.</li>
|
78 |
-
<li>Convert frames to keyframes: You can convert frames to keyframes by selecting them on the Timeline panel and choosing Modify > Timeline > Convert to Keyframes or pressing Ctrl+Alt+K on your keyboard.</li>
|
79 |
-
</ul>
|
80 |
-
<h2>Layers</h2>
|
81 |
-
<p>Layers are the vertical stacks of frames on the Timeline panel that help you organize and control the visibility and order of your objects on the stage. You can create and manage layers by using the Layer panel at the bottom of your screen. Here are some things you can do with layers:</p>
|
82 |
-
<p>adobe flash professional cs6 download<br />
|
83 |
-
adobe flash pro cs6 free trial<br />
|
84 |
-
adobe flash pro cs6 full version<br />
|
85 |
-
adobe flash pro cs6 crack<br />
|
86 |
-
adobe flash pro cs6 serial number<br />
|
87 |
-
adobe flash pro cs6 portable<br />
|
88 |
-
adobe flash pro cs6 tutorial<br />
|
89 |
-
adobe flash pro cs6 system requirements<br />
|
90 |
-
adobe flash pro cs6 animation<br />
|
91 |
-
adobe flash pro cs6 keygen<br />
|
92 |
-
adobe flash pro cs6 mac<br />
|
93 |
-
adobe flash pro cs6 offline installer<br />
|
94 |
-
adobe flash pro cs6 for windows 10<br />
|
95 |
-
adobe flash pro cs6 update<br />
|
96 |
-
adobe flash pro cs6 license key<br />
|
97 |
-
adobe flash pro cs6 iso<br />
|
98 |
-
adobe flash pro cs6 patch<br />
|
99 |
-
adobe flash pro cs6 online<br />
|
100 |
-
adobe flash pro cs6 features<br />
|
101 |
-
adobe flash pro cs6 activation code<br />
|
102 |
-
adobe flash pro cs6 rar<br />
|
103 |
-
adobe flash pro cs6 setup<br />
|
104 |
-
adobe flash pro cs6 software<br />
|
105 |
-
adobe flash pro cs6 toolkit for createjs<br />
|
106 |
-
adobe flash pro cs6 video editing<br />
|
107 |
-
adobe flash pro cs6 game development<br />
|
108 |
-
adobe flash pro cs6 classroom in a book pdf<br />
|
109 |
-
adobe flash pro cs6 ebook<br />
|
110 |
-
adobe flash pro cs6 templates<br />
|
111 |
-
adobe flash pro cs6 extensions<br />
|
112 |
-
adobe flash pro cs6 sprite sheet generator<br />
|
113 |
-
adobe flash pro cs6 export to html5<br />
|
114 |
-
adobe flash pro cs6 stage 3d targeting<br />
|
115 |
-
adobe flash pro cs6 air mobile simulation<br />
|
116 |
-
adobe flash pro cs6 prepackaged air application creation<br />
|
117 |
-
adobe flash pro cs6 wide platform and device support<br />
|
118 |
-
adobe flash pro cs6 actionscript 3.0 reference<br />
|
119 |
-
adobe flash pro cs6 bone tool tutorial<br />
|
120 |
-
adobe flash pro cs6 motion editor tutorial<br />
|
121 |
-
adobe flash pro cs6 motion presets tutorial<br />
|
122 |
-
adobe flash pro cs6 shape tween tutorial<br />
|
123 |
-
adobe flash pro cs6 classic tween tutorial<br />
|
124 |
-
adobe flash pro cs6 frame by frame animation tutorial<br />
|
125 |
-
adobe flash pro cs6 button tutorial<br />
|
126 |
-
adobe flash pro cs6 movie clip tutorial<br />
|
127 |
-
adobe flash pro cs6 symbol tutorial<br />
|
128 |
-
adobe flash pro cs6 text tool tutorial<br />
|
129 |
-
adobe flash pro cs6 mask layer tutorial<br />
|
130 |
-
adobe flash pro cs6 filters and blend modes tutorial</p>
|
131 |
-
<ul>
|
132 |
-
<li>Add layers: You can add layers by clicking on the New Layer button at the bottom of the Layer panel or by choosing Insert > Timeline > Layer.</li>
|
133 |
-
<li>Delete layers: You can delete layers by selecting them on the Layer panel and clicking on the Delete Layer button at the bottom of the Layer panel or by choosing Edit > Timeline > Remove Layer.</li>
|
134 |
-
<li>Rename layers: You can rename layers by double-clicking on their names on the Layer panel and typing a new name.</li>
|
135 |
-
<li>Lock layers: You can lock layers by clicking on the Lock icon next to their names on the Layer panel. This prevents you from accidentally modifying or selecting the objects on those layers.</li>
|
136 |
-
<li>Hide layers: You can hide layers by clicking on the Eye icon next to their names on the Layer panel. This makes the objects on those layers invisible on the stage and in the output.</li>
|
137 |
-
<li>Reorder layers: You can reorder layers by selecting them on the Layer panel and dragging them up or down to a new position.</li>
|
138 |
-
<li>Create layer folders: You can create layer folders by clicking on the New Folder button at the bottom of the Layer panel or by choosing Insert > Timeline > Folder. This helps you group related layers together and collapse or expand them as needed.</li>
|
139 |
-
<li>Create guide layers: You can create guide layers by right-clicking on a layer on the Layer panel and choosing Guide. This makes the objects on that layer visible only on the stage and not in the output. You can use guide layers to place reference images, sketches, or notes that help you design your animation.</li>
|
140 |
-
<li>Create mask layers: You can create mask layers by right-clicking on a layer on the Layer panel and choosing Mask. This makes the objects on that layer act as a mask that reveals or hides the objects on the layer below it. You can use mask layers to create effects such as spotlight, cutout, or transition.</li>
|
141 |
-
</ul>
|
142 |
-
<h2>Tweens</h2>
|
143 |
-
<p>Tweens are a type of animation that lets you create smooth transitions between two or more keyframes. You can create tweens by using the Motion Editor panel at the bottom of your screen. There are two types of tweens in Adobe Flash Pro CS6: motion tweens and classic tweens. Here are some differences between them:</p>
|
144 |
-
<table>
|
145 |
-
<tr><th>Motion tweens</th><th>Classic tweens</th></tr>
|
146 |
-
<tr><td>Use property keyframes to define changes in properties such as position, rotation, scale, color, and filters.</td><td>Use regular keyframes to define changes in properties such as position, rotation, scale, color, and filters.</td></tr>
|
147 |
-
<tr><td>Apply to symbols, text fields, groups, or instances.</td><td>Apply to symbols or instances only.</td></tr>
|
148 |
-
<tr><td>Use motion paths to define curved or complex paths for animation.</td><td>Use straight lines or shape hints to define curved or complex paths for animation.</td></tr>
|
149 |
-
<tr><td>Use eases to control the speed and acceleration of animation.</td><td>Use eases to control the speed and acceleration of animation.</td></tr>
|
150 |
-
<tr><td>Use 3D rotation and translation tools to create 3D effects.</td><td>Do not support 3D effects.</td></tr>
|
151 |
-
</table>
|
152 |
-
<h2>Symbols</h2>
|
153 |
-
<p>Symbols are reusable objects that you can create and store in the Library panel at the right side of your screen. You can use symbols to save time and reduce file size by reusing them in different parts of your animation. There are three types of symbols in Adobe Flash Pro CS6: graphic symbols, button symbols, and movie clip symbols. Here are some differences between them:</p>
|
154 |
-
<table>
|
155 |
-
<tr><th>Graphic symbols</th><th>Button symbols</th><th>Movie clip symbols</th></tr>
|
156 |
-
<tr><td>Contain static or animated graphics that play in sync with the main Timeline.</td><td>Contain graphics that change appearance based on user interaction such as mouse over, click, or release.</td><td>Contain graphics that play independently from li>Design your button states: You can design your button states by adding graphics or text to the four frames in the button symbol Timeline. The four frames correspond to the four states of the button: Up, Over, Down, and Hit. The Up state is how the button appears normally, the Over state is how the button appears when the mouse pointer is over it, the Down state is how the button appears when the mouse button is pressed, and the Hit state is the area that responds to mouse clicks. You can use any of the tools or symbols to create your button states.</li>
|
157 |
-
<li>Add actions to your button: You can add actions to your button by selecting a frame in the button symbol Timeline and opening the Actions panel by choosing Window > Actions or by pressing F9 on your keyboard. In the Actions panel, you can write your own ActionScript code or use code snippets to add functionality to your button. For example, you can use the following code snippet to make your button go to a specific frame and play: //This code snippet goes to a specific frame and plays //Assign this code snippet to a keyframe in a button symbol this.addEventListener(MouseEvent.CLICK, fl_ClickToGoToAndPlayFromFrame); function fl_ClickToGoToAndPlayFromFrame(event:MouseEvent):void gotoAndPlay(5); This code snippet will make your button go to frame 5 and play when it is clicked.</li>
|
158 |
-
<li>Test your button: You can test your button by choosing Control > Test Movie > In Flash Professional or by pressing Ctrl+Enter on your keyboard. This will open a new window where you can see how your button works and interacts with the user.</li>
|
159 |
-
</ol>
|
160 |
-
<h2>Using actions</h2>
|
161 |
-
<p>Actions are commands that control the behavior of your project. You can use actions to add logic, interactivity, and functionality to your project. You can write actions using ActionScript, a scripting language that is based on JavaScript. You can access actions by using the Actions panel at the right side of your screen. Here are some things you can do with actions:</p>
|
162 |
-
<ul>
|
163 |
-
<li>Add actions to frames: You can add actions to frames by selecting a frame on the main Timeline or on a symbol Timeline and opening the Actions panel by choosing Window > Actions or by pressing F9 on your keyboard. In the Actions panel, you can write your own ActionScript code or use code snippets to add functionality to your frame. For example, you can use the following code snippet to stop your project from playing: //This code snippet stops at this frame //Assign this code snippet to a keyframe stop(); This code snippet will make your project stop at the frame where it is placed.</li>
|
164 |
-
<li>Add actions to objects: You can add actions to objects by selecting an object on the stage or in the Library panel and opening the Actions panel by choosing Window > Actions or by pressing F9 on your keyboard. In the Actions panel, you can write your own ActionScript code or use code snippets to add functionality to your object. For example, you can use the following code snippet to make an object draggable: //This code snippet makes an object draggable //Assign this code snippet to an object this.addEventListener(MouseEvent.MOUSE_DOWN, fl_ClickToDrag); function fl_ClickToDrag(event:MouseEvent):void this.startDrag(); this.addEventListener(MouseEvent.MOUSE_UP, fl_ReleaseToDrop); function fl_ReleaseToDrop(event:MouseEvent):void this.stopDrag(); This code snippet will make your object draggable when it is clicked and dropped when it is released.</li>
|
165 |
-
<li>Add actions to events: You can add actions to events by using event listeners and event handlers. Event listeners are commands that listen for specific events such as mouse clicks, keyboard presses, or timer ticks. Event handlers are functions that execute when an event occurs. You can use event listeners and event handlers to trigger actions based on user input or other conditions. For example, you can use the following code snippet to play a sound when a timer event occurs: //This code snippet plays a sound when a timer event occurs //Assign this code snippet to a keyframe var myTimer:Timer = new Timer(1000); //create a timer that ticks every second var mySound:Sound = new Sound(new URLRequest("sound.mp3")); //create a sound object that loads a sound file myTimer.addEventListener(TimerEvent.TIMER, fl_TimerHandler); //add an event listener that listens for timer events function fl_TimerHandler(event:TimerEvent):void //define an event handler function that executes when a timer event occurs mySound.play(); //play the sound myTimer.start(); //start the timer This code snippet will play a sound every second using a timer.</li>
|
166 |
-
</ul>
|
167 |
-
<h1>Exporting and publishing your project with Adobe Flash Pro CS6</h1>
|
168 |
-
<p>After you have finished creating your project with Adobe Flash Pro CS6, you can export and publish it in different formats and platforms. You can export your project as a SWF file or an HTML5 canvas file that can be viewed in a web browser. You can also publish your project as an AIR application that can be installed and run on desktop or mobile devices. Here are some basic steps to help you export and publish your project with Adobe Flash Pro CS6:</p>
|
169 |
-
<h2>Exporting your project as a SWF file</h2>
|
170 |
-
<p>A SWF file is a compressed file format that contains your project content and code. You can export your project as a SWF file by following these steps:</p>
|
171 |
-
<ol>
|
172 |
-
<li>Choose File > Export > Export Movie from the main menu or press Ctrl+Alt+Shift+S on your keyboard.</li>
|
173 |
-
<li>In the Export Movie dialog box, choose a location and a name for your SWF file.</li>
|
174 |
-
<li>Click Save to export your project as a SWF file.</li>
|
175 |
-
</ol>
|
176 |
-
<p>You can view your SWF file in a web browser that has the Adobe Flash Player plugin installed. You can also embed your SWF file in an HTML page by using the Publish Settings dialog box.</p>
|
177 |
-
<h2>Exporting your project as an HTML5 canvas file</h2>
|
178 |
-
<p>An HTML5 canvas file is a JavaScript file that contains your project content and code converted to HTML5 standards. You can export your project as an HTML5 canvas file by following these steps:</p>
|
179 |
-
<ol>
|
180 |
-
<li>Choose File > Convert To > HTML5 Canvas from the main menu or press Ctrl+Alt+Shift+C on your keyboard.</li>
|
181 |
-
<li>In the Convert To HTML5 Canvas dialog box, click OK to confirm the conversion.</li>
|
182 |
-
<li>Choose File > Export > Export HTML5 Canvas from the main menu or press Ctrl+Alt+Shift+H on your keyboard.</li>
|
183 |
-
<li>In the Export HTML5 Canvas dialog box, choose a location and a name for your HTML5 canvas file.</li>
|
184 |
-
<li>Click Save to export your project as an HTML5 canvas file.</li>
|
185 |
-
</ol>
|
186 |
-
<p>You can view your HTML5 canvas file in a web browser that supports the HTML5 canvas element. You can also embed your HTML5 canvas file in an HTML page by using the Publish Settings dialog box.</p>
|
187 |
-
<h2>Publishing your project as an AIR application</h2>
|
188 |
-
<p>An AIR application is a standalone application that contains your project content and code packaged with the Adobe AIR runtime. You can publish your project as an AIR application by following these steps:</p>
|
189 |
-
<ol>
|
190 |
-
<li>Choose File > Publish Settings from the main menu or press Ctrl+Shift+F12 on your keyboard.</li>
|
191 |
-
<li>In the Publish Settings dialog box, select AIR for Desktop, AIR for Android, or AIR for iOS from the Formats tab depending on your target platform.</li>
|
192 |
-
<li>Click on the wrench icon next to the format name to open the AIR Settings dialog box.</li>
|
193 |
-
<li>In the AIR Settings dialog box, enter the details of your application such as name, version, description, icons, and certificates.</li>
|
194 |
-
<li>Click OK to close the AIR Settings dialog box.</li>
|
195 |
-
<li>Click Publish to publish your project as an AIR application.</li>
|
196 |
-
</ol>
|
197 |
-
<p>You can install and run your AIR application on desktop or mobile devices that have the Adobe AIR runtime installed. You can also distribute your AIR application through online stores or websites by using the appropriate installer files.</p>
|
198 |
-
<h1>Conclusion</h1>
|
199 |
-
<p>In this article, you learned how to use Adobe Flash Pro CS6 to create animation and multimedia content. You learned how to download, install, and use Adobe Flash Pro CS6 for free. You learned how to create and modify objects on the stage using various tools. You learned how to create animation using frames, keyframes, layers, tweens, and symbols. You learned how to add interactivity using code snippets, buttons, and actions. You learned how to export and publish your project in different formats and platforms. You also learned some useful resources and tips to help you master this software and create amazing projects.</p>
|
200 |
-
<h1>FAQs</h1>
|
201 |
-
<p>Here are some common questions and answers about Adobe Flash Pro CS6:</p>
|
202 |
-
<h2>Q: Is Adobe Flash Pro CS6 still supported by Adobe?</h2>
|
203 |
-
<p>A: No, Adobe Flash Pro CS6 is not supported by Adobe anymore. Adobe discontinued support for Flash Player on December 31, 2020. Adobe also recommends uninstalling Flash Player from your devices. However, you can still use Adobe Flash Pro CS6 to create content for other platforms such as HTML5 canvas or AIR applications.</p>
|
204 |
-
<h2>Q: How can I learn more about Adobe Flash Pro CS6?</h2>
|
205 |
-
<p>A: You can learn more about Adobe Flash Pro CS6 by visiting the official website, reading the user guide, watching the video tutorials or browsing the online forums. You can also take online courses or read books that teach you how to use Adobe Flash Pro CS6 for various purposes such as animation, game development, web design, and more.</p>
|
206 |
-
<h2>Q: What are some alternatives to Adobe Flash Pro CS6?</h2>
|
207 |
-
<p>A: Some alternatives to Adobe Flash Pro CS6 are:</p>
|
208 |
-
<ul>
|
209 |
-
<li>Animate CC: This is the successor of Adobe Flash Pro CS6 that lets you create animation and interactive content using HTML5 canvas, WebGL, AIR, and more. You can access it through a subscription to Adobe Creative Cloud.</li>
|
210 |
-
<li>Toon Boom Harmony: This is a software that lets you create animation and multimedia content for TV, film, games, and web. You can use it to draw, rig, animate, and export your projects in various formats. You can access it through a subscription or a perpetual license.</li>
|
211 |
-
<li>Blender: This is a free and open source software that lets you create 3D animation and multimedia content. You can use it to model, sculpt, texture, rig, animate, render, and export your projects in various formats. You can download it from the official website.</li>
|
212 |
-
</ul>
|
213 |
-
<h2>Q: How can I improve my skills in Adobe Flash Pro CS6?</h2>
|
214 |
-
<p>A: You can improve your skills in Adobe Flash Pro CS6 by practicing regularly, experimenting with different features and techniques, and seeking feedback from others. You can also join online communities and groups that share tips, tutorials, and projects related to Adobe Flash Pro CS6. You can also participate in challenges and contests that test your creativity and skills in Adobe Flash Pro CS6.</p>
|
215 |
-
<h2>Q: How can I troubleshoot problems in Adobe Flash Pro CS6?</h2>
|
216 |
-
<p>A: You can troubleshoot problems in Adobe Flash Pro CS6 by following these steps:</p>
|
217 |
-
<ol>
|
218 |
-
<li>Check the system requirements and compatibility of Adobe Flash Pro CS6 with your device and operating system.</li>
|
219 |
-
<li>Update Adobe Flash Pro CS6 to the latest version and install any patches or fixes available.</li>
|
220 |
-
<li>Check the settings and preferences of Adobe Flash Pro CS6 and make sure they are correct and appropriate for your project.</li>
|
221 |
-
<li>Check the code and syntax of your ActionScript code and make sure they are error-free and follow the best practices.</li>
|
222 |
-
<li>Check the output and performance of your project and make sure they are optimal and consistent across different platforms and browsers.</li>
|
223 |
-
<li>Search for solutions online or contact Adobe support if you encounter any issues or errors that you cannot resolve.</li>
|
224 |
-
</ol>
|
225 |
-
<p>I hope you found this article helpful and informative. Thank you for reading!</p> 197e85843d<br />
|
226 |
-
<br />
|
227 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Badminton League Mod APK with Unlimited Money and Coins.md
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Badminton League Mod Apk</h1>
|
3 |
-
<p>Badminton is a fun and exciting sport that can be played by anyone, anywhere. But if you want to take your badminton game to the next level, you might want to try Badminton League Mod Apk, a modified version of the popular badminton game app that gives you unlimited money, coins, and gems. With Badminton League Mod Apk, you can customize your character, unlock new rackets and outfits, upgrade your skills, and challenge your friends or other players online. In this article, we will show you how to download and install Badminton League Mod Apk on your Android device, as well as how to play it and some tips and tricks to win at badminton.</p>
|
4 |
-
<h2>how to download badminton league mod apk</h2><br /><p><b><b>Download Zip</b> ✅ <a href="https://urlin.us/2uST6j">https://urlin.us/2uST6j</a></b></p><br /><br />
|
5 |
-
<h2>What is Badminton League Mod Apk?</h2>
|
6 |
-
<p>Badminton League is a 3D badminton game app that lets you play badminton with realistic physics and graphics. You can create your own character, choose your racket and outfit, and compete in various modes, such as tournament, league, or 1v1. You can also play with your friends or other players online in real-time matches.</p>
|
7 |
-
<h3>Features of Badminton League Mod Apk</h3>
|
8 |
-
<p>Badminton League Mod Apk is a modified version of the original game that gives you some extra features that are not available in the official version. Some of these features are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Unlimited money, coins, and gems. You can use these resources to buy new rackets, outfits, and skills for your character.</li>
|
11 |
-
<li>All rackets and outfits unlocked. You can choose from a variety of rackets and outfits that have different stats and effects.</li>
|
12 |
-
<li>All skills unlocked. You can upgrade your skills to improve your performance on the court.</li>
|
13 |
-
<li>No ads. You can enjoy the game without any interruptions or distractions.</li>
|
14 |
-
</ul>
|
15 |
-
<h3>Benefits of Badminton League Mod Apk</h3>
|
16 |
-
<p>Badminton League Mod Apk has some benefits that make it more enjoyable and fun than the original game. Some of these benefits are:</p>
|
17 |
-
<ul>
|
18 |
-
<li>You can customize your character according to your preferences and style.</li>
|
19 |
-
<li>You can access more rackets and outfits that can enhance your gameplay.</li>
|
20 |
-
<li>You can improve your skills faster and easier.</li>
|
21 |
-
<li>You can play with more confidence and challenge higher-level opponents.</li>
|
22 |
-
<li>You can save your time and money by not having to watch ads or make in-app purchases.</li>
|
23 |
-
</ul>
|
24 |
-
<h2>How to Download and Install Badminton League Mod Apk</h2>
|
25 |
-
<p>If you want to download and install Badminton League Mod Apk on your Android device, you need to follow these steps:</p>
|
26 |
-
<p>how to install badminton league mod apk on android<br />
|
27 |
-
badminton league mod apk unlimited money and gems<br />
|
28 |
-
badminton league hack mod apk download latest version<br />
|
29 |
-
how to get badminton league mod apk for free<br />
|
30 |
-
badminton league mod apk offline mode<br />
|
31 |
-
badminton league mod apk with unlimited coins and diamonds<br />
|
32 |
-
how to update badminton league mod apk<br />
|
33 |
-
badminton league mod apk no root required<br />
|
34 |
-
badminton league mod apk online multiplayer<br />
|
35 |
-
how to play badminton league mod apk on pc<br />
|
36 |
-
badminton league mod apk all characters unlocked<br />
|
37 |
-
badminton league mod apk unlimited everything<br />
|
38 |
-
badminton league mod apk download for android 2023<br />
|
39 |
-
how to uninstall badminton league mod apk<br />
|
40 |
-
badminton league mod apk with cheat menu<br />
|
41 |
-
how to backup badminton league mod apk data<br />
|
42 |
-
badminton league mod apk new features and modes<br />
|
43 |
-
badminton league mod apk without ads<br />
|
44 |
-
how to fix badminton league mod apk not working<br />
|
45 |
-
badminton league mod apk best settings and tips<br />
|
46 |
-
how to download badminton league original apk<br />
|
47 |
-
badminton league mod apk latest version 5.51.5081.0<br />
|
48 |
-
how to transfer badminton league mod apk to another device<br />
|
49 |
-
badminton league mod apk with custom skins and outfits<br />
|
50 |
-
how to join badminton league mod apk community<br />
|
51 |
-
badminton league mod apk vs real badminton game<br />
|
52 |
-
how to download badminton league mod apk from google play store<br />
|
53 |
-
badminton league mod apk with realistic physics and graphics<br />
|
54 |
-
how to hack badminton league game without mod apk<br />
|
55 |
-
badminton league mod apk with different courts and rackets<br />
|
56 |
-
how to download badminton league mod apk for ios<br />
|
57 |
-
badminton league mod apk with advanced controls and gameplay<br />
|
58 |
-
how to create your own character in badminton league mod apk<br />
|
59 |
-
badminton league mod apk with special skills and abilities<br />
|
60 |
-
how to challenge your friends in badminton league mod apk<br />
|
61 |
-
badminton league mod apk with tournaments and leagues<br />
|
62 |
-
how to download badminton league lite mod apk for low-end devices<br />
|
63 |
-
badminton league mod apk with fun mini-games and events<br />
|
64 |
-
how to earn rewards and bonuses in badminton league mod apk<br />
|
65 |
-
badminton league mod apk with voice chat and emojis</p>
|
66 |
-
<h3>Step 1: Enable Unknown Sources</h3>
|
67 |
-
<p>Since Badminton League Mod Apk is not available on the Google Play Store, you need to enable unknown sources on your device settings. This will allow you to install apps from sources other than the official store. To do this, go to Settings > Security > Unknown Sources and toggle it on.</p>
|
68 |
-
<h3>Step 2: Download Badminton League Mod Apk File</h3>
|
69 |
-
<p>Next, you need to download the Badminton League Mod Apk file from a reliable source. You can use this link to download the latest version of the file. Make sure you have enough storage space on your device before downloading.</p>
|
70 |
-
<h3>Step 3: Install Bad minton League Mod Apk File</h3>
|
71 |
-
<p>After downloading the file, you need to install it on your device. To do this, locate the file in your file manager and tap on it. You will see a pop-up window asking you to confirm the installation. Tap on Install and wait for the process to finish.</p>
|
72 |
-
<h3>Step 4: Launch Badminton League Mod Apk and Enjoy</h3>
|
73 |
-
<p>Once the installation is done, you can launch the game from your app drawer or home screen. You will see a welcome screen with some instructions and options. You can choose your language, sign in with your Facebook account, or play as a guest. You can also adjust the sound and graphics settings. After that, you can start playing Badminton League Mod Apk and enjoy its features.</p>
|
74 |
-
<h2>How to Play Badminton League Mod Apk</h2>
|
75 |
-
<p>Badminton League Mod Apk is easy to play and has a simple interface. You can use the virtual joystick on the left side of the screen to move your character, and the buttons on the right side to hit the shuttlecock. You can also swipe on the screen to perform different shots, such as smash, drop, or lob. You can play in different modes, such as tournament, league, or 1v1. You can also play with your friends or other players online in real-time matches.</p>
|
76 |
-
<h3>Basic Rules and Scoring System</h3>
|
77 |
-
<p>The basic rules and scoring system of Badminton League Mod Apk are similar to the real badminton game. Here are some of them:</p>
|
78 |
-
<ul>
|
79 |
-
<li>A match consists of three games, each with 21 points. The first player to reach 21 points wins the game.</li>
|
80 |
-
<li>A point is scored when the shuttlecock lands inside the opponent's court or when the opponent commits a fault, such as hitting the shuttlecock out of bounds, into the net, or before it crosses the net.</li>
|
81 |
-
<li>The server must hit the shuttlecock from below the waist level and diagonally across the net. The receiver must stand inside the service court opposite to the server.</li>
|
82 |
-
<li>The server changes after each point. The player who wins a game serves first in the next game.</li>
|
83 |
-
<li>If the score is tied at 20-20, the game continues until one player has a two-point lead or reaches 30 points.</li>
|
84 |
-
</ul>
|
85 |
-
<h3>Tips and Tricks to Win at Badminton</h3>
|
86 |
-
<p>Badminton is a game of skill, speed, and strategy. Here are some tips and tricks to help you win at Badminton League Mod Apk:</p>
|
87 |
-
<ul>
|
88 |
-
<li>Choose your racket and outfit wisely. Different rackets and outfits have different stats and effects, such as power, speed, control, stamina, and luck. You can also upgrade your skills to improve your performance on the court.</li>
|
89 |
-
<li>Vary your shots and angles. Don't be predictable and use different shots and angles to confuse your opponent. For example, you can use smashes to attack, drops to deceive, lobs to defend, or drives to counterattack.</li>
|
90 |
-
<li>Use your coins and gems wisely. You can use your coins and gems to buy new rackets, outfits, and skills for your character. You can also use them to revive yourself if you lose a match or to get extra rewards.</li>
|
91 |
-
<li>Play with your friends or other players online. You can challenge your friends or other players online in real-time matches. This will help you improve your skills and have more fun.</li>
|
92 |
-
</ul>
|
93 |
-
<h2>Conclusion</h2>
|
94 |
-
<p>Badminton League Mod Apk is a great game for badminton lovers and casual gamers alike. It has realistic physics and graphics, various modes and features, and unlimited money, coins, and gems. You can download and install it easily on your Android device by following our guide above. You can also play it with your friends or other players online in real-time matches. If you want to enjoy badminton anytime, anywhere, Badminton League Mod Apk is the game for you.</p>
|
95 |
-
<h2>FAQs</h2>
|
96 |
-
<p>Here are some frequently asked questions about Badminton League Mod Apk:</p>
|
97 |
-
<table>
|
98 |
-
<tr><td><b>Question</b></td><td><b>Answer</b></td></tr>
|
99 |
-
<tr><td>Is Badminton League Mod Apk safe to download and install?</td><td>Yes, Badminton League Mod Apk is safe to download and install as long as you use a reliable source like this link. However, you should always be careful when downloading apps from unknown sources and scan them for viruses before installing them.</td></tr>
|
100 |
-
<tr><td>Do I need an internet connection to play Badminton League Mod Ap k?</td><td>No, you don't need an internet connection to play Badminton League Mod Apk in offline modes, such as tournament or league. However, you do need an internet connection to play online modes, such as 1v1 or friends.</td></tr>
|
101 |
-
<tr><td>Can I play Badminton League Mod Apk on other devices, such as PC or iOS?</td><td>No, Badminton League Mod Apk is only compatible with Android devices. However, you can use an Android emulator to play it on your PC or Mac. You can also use a VPN to bypass the regional restrictions and play it on your iOS device.</td></tr>
|
102 |
-
<tr><td>Will I get banned for using Badminton League Mod Apk?</td><td>No, you will not get banned for using Badminton League Mod Apk as long as you don't abuse its features or cheat in online matches. However, you should always use it at your own risk and discretion.</td></tr>
|
103 |
-
<tr><td>How can I update Badminton League Mod Apk?</td><td>You can update Badminton League Mod Apk by downloading and installing the latest version of the file from this link. You can also check for updates from the game settings or the app store.</td></tr>
|
104 |
-
</table></p> 197e85843d<br />
|
105 |
-
<br />
|
106 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download 6th Tamil Book PDF for Free - Samacheer Kalvi New Syllabus 2021 to 2022.md
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>6th Tamil Book PDF Download 2021 to 2022</h1>
|
3 |
-
<p>If you are a student of class 6 in Tamil Nadu, you might be looking for the 6th Tamil book PDF download 2021 to 2022. The 6th Tamil book is one of the essential textbooks that you need to study the Tamil language and literature. In this article, we will tell you everything you need to know about the 6th Tamil book PDF download, including its benefits, challenges, and how to do it online. Read on to find out more.</p>
|
4 |
-
<h2>6th tamil book pdf download 2021 to 2022</h2><br /><p><b><b>Download File</b> • <a href="https://jinyurl.com/2uNQJ4">https://jinyurl.com/2uNQJ4</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<h3>What is the 6th Tamil book?</h3>
|
7 |
-
<p>The 6th Tamil book is a textbook that covers the Tamil language and literature for class 6 students in Tamil Nadu. It is based on the new syllabus and curriculum that was announced by the Tamil Nadu State Council of Educational Research and Training (TNSCERT) for the academic year 2021 to 2022. The 6th Tamil book contains various chapters that teach you the basics of grammar, vocabulary, reading comprehension, writing skills, and literary appreciation. It also includes poems, stories, essays, and other forms of literature that showcase the rich culture and heritage of Tamil Nadu.</p>
|
8 |
-
<h3>Why do you need to download the 6th Tamil book PDF?</h3>
|
9 |
-
<p>You might be wondering why you need to download the 6th Tamil book PDF when you can get a printed copy from your school or bookstore. Well, there are many reasons why downloading the 6th Tamil book PDF can be beneficial for you. For instance, you might want to download the 6th Tamil book PDF if:</p>
|
10 |
-
<ul>
|
11 |
-
<li>You want to access the latest version of the textbook that is updated with the new syllabus and curriculum.</li>
|
12 |
-
<li>You want to study at your own pace and convenience without carrying a heavy book around.</li>
|
13 |
-
<li>You want to save money and paper by avoiding buying or printing a hard copy of the textbook.</li>
|
14 |
-
<li>You want to prepare for your exams and assessments by revising the chapters and practicing the exercises online.</li>
|
15 |
-
</ul>
|
16 |
-
<h3>How to download the 6th Tamil book PDF online?</h3>
|
17 |
-
<p>Now that you know why you need to download the 6th Tamil book PDF, you might be wondering how to do it online. Well, there are many websites that offer free download of the 6th Tamil book PDF online. However, not all of them are reliable or safe. Therefore, you need to be careful and choose a trusted source for downloading the 6th Tamil book PDF online. Here are some of the steps that you can follow to download the 6th Tamil book PDF online:</p>
|
18 |
-
<p>6th standard tamil nadu revised text books 2020-2021 pdf<br />
|
19 |
-
6th new books free download samacheer kalvi term 1 2 3<br />
|
20 |
-
6th std new books tamil medium english medium all subjects<br />
|
21 |
-
tamilnadu 6th class new syllabus books online pdf<br />
|
22 |
-
tn school 6th samacheer kalvi textbooks term wise<br />
|
23 |
-
6th book new syllabus pdf 2022 - 2023 tn state board<br />
|
24 |
-
tamilnadu education board 6th standard books revised syllabus<br />
|
25 |
-
6th std new samacheer kalvi books pdf download online<br />
|
26 |
-
tnscert 6th new books feedback from teachers and parents<br />
|
27 |
-
tamil nadu 6th class new books for academic year 2022 - 2023<br />
|
28 |
-
how to download tamilnadu 6th new books online in pdf format<br />
|
29 |
-
tn 6th new books to be given to students through schools<br />
|
30 |
-
tamilnadu 6th std new books term i ii iii free download<br />
|
31 |
-
tamilnadu 6th new books for all subjects like tamil english maths science social science<br />
|
32 |
-
tn school ebooks are available for new syllabus 2022 - 2023<br />
|
33 |
-
tamilnadu sixth standard new samacheer kalvi books pdf tamil in medium & english medium<br />
|
34 |
-
tn school class 6 samacheer kalvi textbooks all terms subjects<br />
|
35 |
-
tamilnadu sixth std new books more practical type of learning lessons<br />
|
36 |
-
tamilnadu sixth class new syllabus books zip download link<br />
|
37 |
-
tn nic in tamilnadu 6th new books free download online<br />
|
38 |
-
korea institute of fusion energy tamilnadu 6th new books for term i ii iii<br />
|
39 |
-
tamilnadu sixth standard textbooks for all subjects in pdf format for free download<br />
|
40 |
-
padavelai tntet arts tamilnadu sixth standard new syllabus english medium book<br />
|
41 |
-
kalvinews in tamilnadu sixth standard revised text books download pdf<br />
|
42 |
-
tntextbooks in tamilnadu sixth std new books free download pdf online<br />
|
43 |
-
samacheer kalvi term wise textbooks for sixth standard students in tamil nadu<br />
|
44 |
-
tnscert prepared the new syllabus for the sixth standard and posted online<br />
|
45 |
-
sixth standard books are revised with the new syllabus for this academic year by tamilnadu education board<br />
|
46 |
-
sixth class students are expected to learn completely by understanding each chapters with practical examples<br />
|
47 |
-
sixth std new books might play an important role in tamilnadu students life<br />
|
48 |
-
how to get the sixth standard new books online from tnscert website<br />
|
49 |
-
where to find the latest version of sixth standard textbooks for all subjects in tamil nadu<br />
|
50 |
-
what are the changes in the chapters of sixth std new books compared to old books<br />
|
51 |
-
how to read the sixth class new books from computer or smart phones in pdf format<br />
|
52 |
-
what are the benefits of using the sixth std new samacheer kalvi books for learning<br />
|
53 |
-
how to print the sixth standard new books from online in pdf format for offline reading<br />
|
54 |
-
what are the feedbacks and reviews of the sixth class new syllabus books from students and teachers<br />
|
55 |
-
how to study effectively with the sixth std new books for all subjects in tamil nadu state board syllabus<br />
|
56 |
-
what are the best websites to download the sixth standard new books for free online in pdf format<br />
|
57 |
-
how to access the tn school ebooks for new syllabus 2022 - 2023 for sixth class students online</p>
|
58 |
-
<ol>
|
59 |
-
<li>Go to a reputable website that provides free download of the 6th Tamil book PDF online. For example, you can visit [Tamilnadu 6th New Books Free Download Samacheer Kalvi Term 1 2 3](^1^) or [6th Standard Books - PDF Download](^3^).</li>
|
60 |
-
<li>Select the term and medium that you want to download. For example, if you want to download the term I English medium book, click on "Term I" and then "English Medium".</li>
|
61 |
-
<li>Click on the subject that you want to download. For example, if you want to download the Tamil subject book, click on "Tamil".</li>
|
62 |
-
<li>A new page will open with a link to download the PDF file of the book. Click on "Download" or "Click Here" to start downloading the file.</li>
|
63 |
-
<li>Save the file on your device or cloud storage for future access. You can also open the file with a PDF reader or browser to view the contents of the book.</li>
|
64 |
-
</ol>
|
65 |
-
<h2>Benefits of downloading the 6th Tamil book PDF</h2>
|
66 |
-
<h3>Access the latest syllabus and curriculum</h3>
|
67 |
-
<p>One of the main benefits of downloading the 6th Tamil book PDF is that you can access the latest syllabus and curriculum that is prescribed by the TNSCERT for the academic year 2021 to 2022. This way, you can ensure that you are studying the most relevant and updated topics and concepts that are required for your class 6 education. You can also compare the old and new syllabus and curriculum to see what changes have been made and how they affect your learning outcomes.</p>
|
68 |
-
<h3>Study anytime and anywhere</h3>
|
69 |
-
<p>Another benefit of downloading the 6th Tamil book PDF is that you can study anytime and anywhere without depending on a physical copy of the book. You can access the book from your smartphone, tablet, laptop, or desktop and read it whenever you want. You can also study offline by saving the file on your device or cloud storage. This way, you can avoid missing out on any important lessons or assignments due to lack of availability or accessibility of the book.</p>
|
70 |
-
<h3>Save money and paper</h3>
|
71 |
-
<p>A third benefit of downloading the 6th Tamil book PDF is that you can save money and paper by avoiding buying or printing a hard copy of the book. You can download the book for free from various websites and use it as many times as you want. You can also share it with your friends or classmates who need it. By doing so, you can reduce the cost of education and also contribute to environmental conservation by saving paper and ink.</p>
|
72 |
-
<h3>Prepare for exams and assessments</h3>
|
73 |
-
<p>A fourth benefit of downloading the 6th Tamil book PDF is that you can prepare for your exams and assessments by revising the chapters and practicing the exercises online. You can easily access the book from your device or cloud storage and review the key points and summaries of each chapter. You can also test your knowledge and skills by solving the questions and activities given at the end of each chapter. You can also refer to other online resources such as sample papers, mock tests, previous year papers, etc. to enhance your preparation.</p>
|
74 |
-
<h2>Challenges of downloading the 6th Tamil book PDF</h2>
|
75 |
-
<h3>Internet connectivity and speed</h3>
|
76 |
-
<p>One of the main challenges of downloading the 6th Tamil book PDF is that you need a good internet connection and speed to do it online. If you have a slow or unstable internet connection, you might face difficulties in downloading the file or opening it with a PDF reader or browser. You might also experience interruptions or errors during the download process that might affect the quality or completeness of the file. Therefore, you need to ensure that you have a reliable and fast internet connection before downloading the 6th Tamil book PDF online.</p>
|
77 |
-
<h3>Storage space and compatibility</h3>
|
78 |
-
<p>Another challenge of downloading the 6th Tamil book PDF is that you need enough storage space and compatibility on your device or cloud storage to save and access the file. The 6th Tamil book PDF file might be large in size and require a lot of space on your device or cloud storage. If you have limited space or memory on your device or cloud storage, you might not be able to download or save the file properly. You might also need a compatible PDF reader or browser to open and view the file on your device or cloud storage. If you have an incompatible PDF reader or browser, you might not be able to see or read the contents of the file clearly.</p>
|
79 |
-
<h3>Quality and accuracy of the PDF files</h3>
|
80 |
-
<p>A third challenge of downloading the 6th Tamil book PDF is that you need to check the quality and accuracy of the PDF files that you download online. Not all websites that offer free download of the 6th Tamil book PDF online are trustworthy or authentic. Some of them might provide low-quality or inaccurate PDF files that might contain errors, omissions, or distortions. Some of them might also provide outdated or obsolete PDF files that might not match the latest syllabus and curriculum. Therefore, you need to verify the quality and accuracy of the PDF files that you download online by checking the source, date, and content of the files.</p>
|
81 |
-
<h3>Security and privacy issues</h3>
|
82 |
-
<p>A fourth challenge of downloading the 6th Tamil book PDF is that you need to be aware of the security and privacy issues that might arise from downloading the file online. Some websites that offer free download of the 6th Tamil book PDF online might not have proper security measures or encryption to protect the file from hackers, viruses, or malware. Some of them might also have hidden or malicious links or ads that might redirect you to unsafe or inappropriate sites or download unwanted or harmful software on your device or cloud storage. Therefore, you need to be careful and cautious when downloading the 6th Tamil book PDF online and avoid clicking on any suspicious or unknown links or ads.</p>
|
83 |
-
<h2>Conclusion</h2>
|
84 |
-
<p>Downloading the 6th Tamil book PDF online can be a great way to study the Tamil language and literature for class 6 students in Tamil Nadu. It can help you access the latest syllabus and curriculum, study anytime and anywhere, save money and paper, and prepare for exams and assessments. However, it can also pose some challenges such as internet connectivity and speed, storage space and compatibility, quality and accuracy of the PDF files, and security and privacy issues. Therefore, you need to weigh the pros and cons of downloading the 6th Tamil book PDF online and choose a reliable and safe source for doing so. We hope this article has helped you understand everything you need to know about the 6th Tamil book PDF download 2021 to 2022.</p>
|
85 |
-
<h2>FAQs</h2>
|
86 |
-
<p>Here are some of the frequently asked questions about the 6th Tamil book PDF download 2021 to 2022:</p>
|
87 |
-
<ol>
|
88 |
-
<li>What is the official website for downloading the 6th Tamil book PDF online?</li>
|
89 |
-
<p>The official website for downloading the 6th Tamil book PDF online is [TNSCERT], which is the official portal of the Tamil Nadu State Council of Educational Research and Training. You can find all the textbooks for class 1 to 12 in various subjects and mediums on this website.</p>
|
90 |
-
<li>How can I download the 6th Tamil book PDF offline?</li>
|
91 |
-
<p>If you want to download the 6th Tamil book PDF offline, you can visit your nearest school or bookstore and ask for a CD or DVD that contains the PDF file of the book. You can also ask your teacher or friend who has already downloaded the file to share it with you via a pen drive or email.</p>
|
92 |
-
<li>How can I print the 6th Tamil book PDF?</li>
|
93 |
-
<p>If you want to print the 6th Tamil book PDF, you can open the file with a PDF reader or browser and select the print option from the menu. You can also adjust the settings such as page size, orientation, margins, etc. before printing. However, we recommend that you avoid printing the 6th Tamil book PDF as it might waste paper and ink and harm the environment. You can instead use the digital version of the book on your device or cloud storage.</p>
|
94 |
-
<li>How can I edit or annotate the 6th Tamil book PDF?</li>
|
95 |
-
<p>If you want to edit or annotate the 6th Tamil book PDF, you can use a PDF editor or annotator software or app that allows you to modify, highlight, comment, or draw on the PDF file. You can also use online tools such as [PDFescape] or [PDF Candy] that let you edit or annotate the PDF file for free.</p>
|
96 |
-
<li>How can I convert the 6th Tamil book PDF to other formats?</li>
|
97 |
-
<p>If you want to convert the 6th Tamil book PDF to other formats such as Word, Excel, PowerPoint, etc., you can use a PDF converter software or app that allows you to change the format of the PDF file. You can also use online tools such as [Smallpdf] or [ILovePDF] that let you convert the PDF file to other formats for free.</p>
|
98 |
-
</ol></p> 197e85843d<br />
|
99 |
-
<br />
|
100 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Drive Through Highway Traffic with Stunning 3D Graphics in Traffic Racer Pro Car Games.md
DELETED
@@ -1,134 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Traffic Racer Pro: Car Games - A Review</h1>
|
3 |
-
<p>If you are looking for a thrilling and realistic car racing game that will keep you entertained for hours, you might want to check out <strong>Traffic Racer Pro</strong>. This game is a milestone in the genre of endless car racing games, where you can drive through highway traffic, upgrade and tune cars, participate in online races, and more. In this article, we will review the game and tell you everything you need to know about it.</p>
|
4 |
-
<h2>What is Traffic Racer Pro?</h2>
|
5 |
-
<p>Traffic Racer Pro is a car racing game developed by TOJGAMES, a studio that specializes in car racing games and driving simulators. The game was released in 2021 and has been downloaded by over 1 million players worldwide. The game features stunning 3D graphics, smooth and realistic car handling, extreme car driving in highway traffic, easy to learn and drive controls, 3D realistic car interior views, endless game mode, different locations and cars to choose from, realistic car controls, 40+ different cars to choose from, advanced car customization through paint, decals, wheels, etc., online multiplayer mode, career mode, and more.</p>
|
6 |
-
<h2>traffic racer pro car game download</h2><br /><p><b><b>Download File</b> 🗹 <a href="https://jinyurl.com/2uNJT3">https://jinyurl.com/2uNJT3</a></b></p><br /><br />
|
7 |
-
<h2>How to download and install Traffic Racer Pro?</h2>
|
8 |
-
<p>Traffic Racer Pro is available for both Android and iOS devices. You can download it from the <a href="(^1^)">Google Play Store</a> or the <a href="(^2^)">App Store</a> depending on your device. The game is free to play but it contains ads and in-app purchases that you can disable or buy if you want. The game requires an internet connection to play online multiplayer mode.</p>
|
9 |
-
<p>To install the game on your device, follow these steps - Open the Google Play Store or the App Store on your device and search for Traffic Racer Pro. - Tap on the game icon and then tap on the Install or Get button. - Wait for the game to download and install on your device. - Once the game is installed, tap on the Open or Play button to launch the game. - Enjoy playing Traffic Racer Pro!</p>
|
10 |
-
<h2>How to play Traffic Racer Pro?</h2>
|
11 |
-
<p>Traffic Racer Pro is easy to play but hard to master. The game has two modes: endless and career. In endless mode, you can drive as long as you can without crashing into other cars or obstacles. In career mode, you can complete various missions and challenges to earn coins and cash. You can use the coins and cash to buy and upgrade new cars and locations.</p>
|
12 |
-
<p>The game has four control options: tilt, touch, steering wheel, and buttons. You can choose the one that suits you best from the settings menu. You can also adjust the sensitivity, sound, graphics, and language settings from there. To accelerate, you can either tap on the gas pedal or use the auto-acceleration option. To brake, you can tap on the brake pedal or use the auto-brake option. To steer, you can either tilt your device, touch the screen, use the steering wheel, or use the buttons.</p>
|
13 |
-
<p>The game has four camera views: first person, third person, hood, and rear. You can switch between them by tapping on the camera icon. You can also use the horn, headlights, and indicators by tapping on their icons. To perform a nitro boost, you can tap on the nitro icon or swipe up on the screen.</p>
|
14 |
-
<p>The game has different traffic scenarios: light, medium, heavy, and extreme. You can choose the one that you prefer from the main menu. The more traffic there is, the more difficult it is to drive but also the more points you get. You can also choose between different weather conditions: sunny, rainy, foggy, and snowy. The weather affects the visibility and handling of your car.</p>
|
15 |
-
<p>traffic racer pro extreme car driving tour<br />
|
16 |
-
traffic racer pro online multiplayer racing<br />
|
17 |
-
traffic racer pro car customization and tuning<br />
|
18 |
-
traffic racer pro 3D realistic car interior views<br />
|
19 |
-
traffic racer pro 40+ different cars to choose from<br />
|
20 |
-
traffic racer pro career mode with 50 levels<br />
|
21 |
-
traffic racer pro endless game mode with highway traffic<br />
|
22 |
-
traffic racer pro smooth and realistic car handling<br />
|
23 |
-
traffic racer pro stunning 3D graphics and sound effects<br />
|
24 |
-
traffic racer pro nitro boost and auto accelerate features<br />
|
25 |
-
traffic racer pro racing in car and racing limits<br />
|
26 |
-
traffic racer pro free car racing game for android<br />
|
27 |
-
traffic racer pro app store download for iOS devices<br />
|
28 |
-
traffic racer pro appbrain download for android devices<br />
|
29 |
-
traffic racer pro official website of TOJGAMES<br />
|
30 |
-
traffic racer pro tips and tricks for better performance<br />
|
31 |
-
traffic racer pro feedback and ratings from players<br />
|
32 |
-
traffic racer pro latest updates and news from developers<br />
|
33 |
-
traffic racer pro hypercars and supercars collection<br />
|
34 |
-
traffic racer pro muscle cars and classic cars collection<br />
|
35 |
-
traffic racer pro join free driving mode with friends<br />
|
36 |
-
traffic racer pro compete against real racers for the first place<br />
|
37 |
-
traffic racer pro create a unique look for your car with decals, stickers, etc.<br />
|
38 |
-
traffic racer pro upgrade cars engines, brakes, and max speed<br />
|
39 |
-
traffic racer pro different locations and environments to race in<br />
|
40 |
-
traffic racer pro realistic car controls with tilt or touch options<br />
|
41 |
-
traffic racer pro overtake cars closely to get bonus scores and cash<br />
|
42 |
-
traffic racer pro drive in the opposite direction in two-way mode for extra challenge<br />
|
43 |
-
traffic racer pro best car racing game of 2023<br />
|
44 |
-
traffic racer pro addictive and fun gameplay for all ages<br />
|
45 |
-
traffic racer pro how to install and play on your device<br />
|
46 |
-
traffic racer pro compare your scores with global leaderboards<br />
|
47 |
-
traffic racer pro new car customization system available now<br />
|
48 |
-
traffic racer pro participate in online races with other players<br />
|
49 |
-
traffic racer pro drive through highway traffic with lifelike physics<br />
|
50 |
-
traffic racer pro learn to drive fast and furious cars <br />
|
51 |
-
traffic racer pro enjoy the thrill of speed and adrenaline <br />
|
52 |
-
traffic racer pro become the king of the road and the racing world <br />
|
53 |
-
traffic racer pro challenge yourself with different game modes and difficulties <br />
|
54 |
-
traffic racer pro experience the most realistic car racing game ever</p>
|
55 |
-
<p>The game has different locations: city, desert, snow, forest, and night. You can unlock them by completing certain missions or by paying with coins or cash. Each location has its own scenery and challenges.</p>
|
56 |
-
<p>The game has different goals: distance, speed, time, overtaking, near miss, etc. You can see your current goal on the top of the screen. You can also see your speedometer, score multiplier, nitro meter, distance traveled, time elapsed, coins earned, and cash earned on the screen.</p>
|
57 |
-
<h2>What are the benefits of playing Traffic Racer Pro?</h2>
|
58 |
-
<p>Traffic Racer Pro is a fun and exciting game that will keep you hooked for hours. Here are some of the benefits of playing it:</p>
|
59 |
-
<ul>
|
60 |
-
<li>It improves your reflexes and hand-eye coordination as you have to react quickly and accurately to avoid crashing.</li>
|
61 |
-
<li>It enhances your concentration and focus as you have to pay attention to every detail on the road.</li>
|
62 |
-
<li>It stimulates your creativity and imagination as you can customize your car and explore different locations.</li>
|
63 |
-
<li>It provides you with a sense of achievement and satisfaction as you complete missions and challenges and unlock new cars and locations.</li>
|
64 |
-
<li>It entertains you with its stunning graphics, realistic sounds, and immersive gameplay.</li>
|
65 |
-
</ul> <h2>What are the drawbacks of playing Traffic Racer Pro?</h2>
|
66 |
-
<p>Traffic Racer Pro is not a perfect game and it has some drawbacks that you should be aware of. Here are some of the drawbacks of playing it:</p>
|
67 |
-
<ul>
|
68 |
-
<li>It contains ads that can be annoying and distracting. You can remove them by paying a small fee or by turning off your internet connection.</li>
|
69 |
-
<li>It offers in-app purchases that can give you an advantage over other players. You can buy coins, cash, cars, locations, and more with real money. However, you can also earn them by playing the game or by watching videos.</li>
|
70 |
-
<li>It drains your battery quickly as it uses a lot of resources and power. You should play the game with a charger nearby or with a low battery mode on.</li>
|
71 |
-
<li>It can cause eye strain and headaches as it has bright colors and fast movements. You should play the game in a well-lit room and with a comfortable distance from the screen.</li>
|
72 |
-
<li>It can be addictive and interfere with your daily life. You should play the game in moderation and with a timer on. You should also take breaks and do other activities.</li>
|
73 |
-
</ul>
|
74 |
-
<h2>How to customize your car in Traffic Racer Pro?</h2>
|
75 |
-
<p>Traffic Racer Pro allows you to customize your car in various ways. You can change the appearance and performance of your car to suit your preferences and style. Here are some of the options for customizing your car:</p>
|
76 |
-
<ul>
|
77 |
-
<li>You can paint your car with different colors and patterns. You can also add decals, stickers, logos, and numbers to your car.</li>
|
78 |
-
<li>You can change the wheels of your car with different designs and sizes. You can also adjust the tire pressure and suspension of your car.</li>
|
79 |
-
<li>You can upgrade the engine, transmission, brakes, turbo, and nitro of your car to improve its speed, acceleration, handling, and braking.</li>
|
80 |
-
<li>You can tune your car with different settings such as gear ratio, steering angle, camber angle, and more.</li>
|
81 |
-
</ul>
|
82 |
-
<p>To customize your car, you need to go to the garage menu and select the car that you want to modify. Then, you can choose from the different tabs such as paint, wheels, upgrade, and tune. You can see the changes in real time on the screen. You can also test drive your car before saving the changes.</p> <h2>How to join online multiplayer mode in Traffic Racer Pro?</h2>
|
83 |
-
<p>Traffic Racer Pro also has an online multiplayer mode where you can race against other players from around the world. You can join the online multiplayer mode by tapping on the multiplayer icon on the main menu. You can choose between two modes: quick race and tournament.</p>
|
84 |
-
<p>In quick race mode, you can join a random race with up to 8 players. You can choose the traffic scenario, weather condition, and location of the race. You can also see the names, ranks, and cars of the other players. The race lasts for 3 minutes and the player with the highest score wins.</p>
|
85 |
-
<p>In tournament mode, you can join a seasonal competition with up to 100 players. You can choose the traffic scenario, weather condition, and location of the race. You can also see the names, ranks, and cars of the other players. The race lasts for 5 minutes and the player with the highest score wins. You can also earn trophies and rewards based on your performance.</p>
|
86 |
-
<p>To play online multiplayer mode, you need to have a stable internet connection and a registered account. You can create an account by tapping on the profile icon on the main menu and entering your username, email, and password. You can also log in with your Facebook or Google account.</p>
|
87 |
-
<h2>How to earn coins and cash in Traffic Racer Pro?</h2>
|
88 |
-
<p>Coins and cash are the two currencies in Traffic Racer Pro. You can use them to buy and upgrade new cars and locations. You can earn coins and cash by playing the game or by watching videos. Here are some of the ways to earn coins and cash in Traffic Racer Pro:</p>
|
89 |
-
<ul>
|
90 |
-
<li>You can earn coins by driving in traffic, overtaking other cars, near missing other cars, driving in opposite direction, driving fast, driving long distance, completing missions, winning races, etc.</li>
|
91 |
-
<li>You can earn cash by completing achievements, leveling up, winning tournaments, etc.</li>
|
92 |
-
<li>You can earn both coins and cash by watching videos that appear on the screen from time to time.</li>
|
93 |
-
<li>You can also buy coins and cash with real money by tapping on the shop icon on the main menu.</li>
|
94 |
-
</ul>
|
95 |
-
<h2>How to unlock new cars and locations in Traffic Racer Pro?</h2>
|
96 |
-
<p>Traffic Racer Pro has 40+ different cars and 5 different locations that you can unlock by playing the game. Each car and location has its own price, specifications, and requirements. Here are some of the ways to unlock new cars and locations in Traffic Racer Pro:</p>
|
97 |
-
<ul>
|
98 |
-
<li>You can unlock new cars by paying with coins or cash. You can see the price of each car on the garage menu. Some cars also require you to reach a certain level or complete a certain mission before you can buy them.</li>
|
99 |
-
<li>You can unlock new locations by paying with coins or cash. You can see the price of each location on the main menu. Some locations also require you to reach a certain level or complete a certain mission before you can access them.</li>
|
100 |
-
<li>You can also unlock new cars and locations by winning them as rewards in tournaments or as gifts in special events.</li>
|
101 |
-
</ul> <h2>How to contact the developers of Traffic Racer Pro?</h2>
|
102 |
-
<p>If you have any questions, suggestions, feedback, or issues with Traffic Racer Pro, you can contact the developers of the game by using the following methods:</p>
|
103 |
-
<ul>
|
104 |
-
<li>You can send an email to <a href="mailto:[email protected]">[email protected]</a> and they will reply to you as soon as possible.</li>
|
105 |
-
<li>You can visit their website at <a href="">https://www.tojgames.com</a> and fill out the contact form or read the FAQ section.</li>
|
106 |
-
<li>You can follow them on social media platforms such as Facebook, Twitter, Instagram, and YouTube and leave a comment or message them directly.</li>
|
107 |
-
<li>You can rate and review the game on the Google Play Store or the App Store and share your opinion and experience with other players.</li>
|
108 |
-
</ul>
|
109 |
-
<p>The developers of Traffic Racer Pro are very responsive and friendly and they appreciate your feedback and support. They are constantly working on improving the game and adding new features and content.</p>
|
110 |
-
<h2>What are some alternatives to Traffic Racer Pro?</h2>
|
111 |
-
<p>If you enjoy playing Traffic Racer Pro, you might also like some other car racing games that are similar or different in some aspects. Here are some of the alternatives to Traffic Racer Pro that you can try:</p>
|
112 |
-
<ul>
|
113 |
-
<li><strong>Traffic Rider</strong>: This is another game by TOJGAMES that is similar to Traffic Racer Pro but with motorcycles instead of cars. You can ride your bike through traffic, perform wheelies, upgrade and buy new bikes, and more.</li>
|
114 |
-
<li><strong>Asphalt 9: Legends</strong>: This is a popular game by Gameloft that is different from Traffic Racer Pro in terms of graphics, gameplay, and features. You can race with supercars, perform stunts, customize your car, join clubs, and more.</li>
|
115 |
-
<li><strong>Real Racing 3</strong>: This is a realistic game by Electronic Arts that is different from Traffic Racer Pro in terms of physics, simulation, and modes. You can race with real cars, tracks, and events, compete with other players online, and more.</li>
|
116 |
-
<li><strong>Need for Speed: No Limits</strong>: This is an action-packed game by Electronic Arts that is different from Traffic Racer Pro in terms of story, missions, and challenges. You can race with street cars, evade cops, build your reputation, and more.</li>
|
117 |
-
<li><strong>Crazy for Speed 2</strong>: This is a fun game by MAGIC SEVEN that is similar to Traffic Racer Pro but with more variety and excitement. You can race with different cars, locations, and modes, perform drifts, collect coins, and more.</li>
|
118 |
-
</ul>
|
119 |
-
<h2>Conclusion</h2>
|
120 |
-
<p>Traffic Racer Pro is a car racing game that will give you an adrenaline rush and a sense of adventure. You can drive through highway traffic, upgrade and tune cars, participate in online races, and more. The game has stunning 3D graphics, smooth and realistic car handling, easy to learn and drive controls, 3D realistic car interior views, endless game mode, different locations and cars to choose from, realistic car controls, 40+ different cars to choose from, advanced car customization through paint, decals, wheels etc., online multiplayer mode , career mode ,and more.</p>
|
121 |
-
<p>If you are looking for a thrilling and realistic car racing game that will keep you entertained for hours , you should download Traffic Racer Pro today. You will not regret it!</p>
|
122 |
-
<h3>FAQs</h3>
|
123 |
-
<h4>Q1: Is Traffic Racer Pro free to play?</h4>
|
124 |
-
<p>A1: Yes , Traffic Racer Pro is free to play but it contains ads and in-app purchases that you can disable or buy if you want.</p>
|
125 |
-
<h4>Q2: Is Traffic Racer Pro compatible with my device?</h4>
|
126 |
-
<p>A2: It depends on your device's specifications and operating system. The game requires Android 4.4 or higher or iOS 9.0 or higher to run smoothly.</p>
|
127 |
-
<h4>Q3: Is Traffic Racer Pro safe to download?</h4>
|
128 |
-
<p>A3: Yes , but make sure you download it from a trusted source such as the Google Play Store or the App Store. Do not download it from unknown or suspicious websites or links.</p>
|
129 |
-
<h4>Q4: Is Traffic Racer Pro updated regularly?</h4>
|
130 |
-
<p>A4: Yes , the developers are constantly working on improving the game and adding new features and content. You can check the latest updates on the game's page on the Google Play Store or the App Store or on their social media accounts.</p>
|
131 |
-
<h4>Q5: Is Traffic Racer Pro addictive?</h4>
|
132 |
-
<p>A5: It can be, so play responsibly and take breaks. The game is very fun and exciting, but it can also be distracting and time-consuming. Do not let the game interfere with your daily life, work, or studies.</p> 401be4b1e0<br />
|
133 |
-
<br />
|
134 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4RiZ4/stabilityai-stable-diffusion-2/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/stabilityai/stable-diffusion-2").launch()
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/facerender/animate.py
DELETED
@@ -1,182 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import cv2
|
3 |
-
import yaml
|
4 |
-
import numpy as np
|
5 |
-
import warnings
|
6 |
-
from skimage import img_as_ubyte
|
7 |
-
warnings.filterwarnings('ignore')
|
8 |
-
|
9 |
-
import imageio
|
10 |
-
import torch
|
11 |
-
|
12 |
-
from src.facerender.modules.keypoint_detector import HEEstimator, KPDetector
|
13 |
-
from src.facerender.modules.mapping import MappingNet
|
14 |
-
from src.facerender.modules.generator import OcclusionAwareGenerator, OcclusionAwareSPADEGenerator
|
15 |
-
from src.facerender.modules.make_animation import make_animation
|
16 |
-
|
17 |
-
from pydub import AudioSegment
|
18 |
-
from src.utils.face_enhancer import enhancer as face_enhancer
|
19 |
-
|
20 |
-
|
21 |
-
class AnimateFromCoeff():
|
22 |
-
|
23 |
-
def __init__(self, free_view_checkpoint, mapping_checkpoint,
|
24 |
-
config_path, device):
|
25 |
-
|
26 |
-
with open(config_path) as f:
|
27 |
-
config = yaml.safe_load(f)
|
28 |
-
|
29 |
-
generator = OcclusionAwareSPADEGenerator(**config['model_params']['generator_params'],
|
30 |
-
**config['model_params']['common_params'])
|
31 |
-
kp_extractor = KPDetector(**config['model_params']['kp_detector_params'],
|
32 |
-
**config['model_params']['common_params'])
|
33 |
-
mapping = MappingNet(**config['model_params']['mapping_params'])
|
34 |
-
|
35 |
-
|
36 |
-
generator.to(device)
|
37 |
-
kp_extractor.to(device)
|
38 |
-
mapping.to(device)
|
39 |
-
for param in generator.parameters():
|
40 |
-
param.requires_grad = False
|
41 |
-
for param in kp_extractor.parameters():
|
42 |
-
param.requires_grad = False
|
43 |
-
for param in mapping.parameters():
|
44 |
-
param.requires_grad = False
|
45 |
-
|
46 |
-
if free_view_checkpoint is not None:
|
47 |
-
self.load_cpk_facevid2vid(free_view_checkpoint, kp_detector=kp_extractor, generator=generator)
|
48 |
-
else:
|
49 |
-
raise AttributeError("Checkpoint should be specified for video head pose estimator.")
|
50 |
-
|
51 |
-
if mapping_checkpoint is not None:
|
52 |
-
self.load_cpk_mapping(mapping_checkpoint, mapping=mapping)
|
53 |
-
else:
|
54 |
-
raise AttributeError("Checkpoint should be specified for video head pose estimator.")
|
55 |
-
|
56 |
-
self.kp_extractor = kp_extractor
|
57 |
-
self.generator = generator
|
58 |
-
self.mapping = mapping
|
59 |
-
|
60 |
-
self.kp_extractor.eval()
|
61 |
-
self.generator.eval()
|
62 |
-
self.mapping.eval()
|
63 |
-
|
64 |
-
self.device = device
|
65 |
-
|
66 |
-
def load_cpk_facevid2vid(self, checkpoint_path, generator=None, discriminator=None,
|
67 |
-
kp_detector=None, he_estimator=None, optimizer_generator=None,
|
68 |
-
optimizer_discriminator=None, optimizer_kp_detector=None,
|
69 |
-
optimizer_he_estimator=None, device="cpu"):
|
70 |
-
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
|
71 |
-
if generator is not None:
|
72 |
-
generator.load_state_dict(checkpoint['generator'])
|
73 |
-
if kp_detector is not None:
|
74 |
-
kp_detector.load_state_dict(checkpoint['kp_detector'])
|
75 |
-
if he_estimator is not None:
|
76 |
-
he_estimator.load_state_dict(checkpoint['he_estimator'])
|
77 |
-
if discriminator is not None:
|
78 |
-
try:
|
79 |
-
discriminator.load_state_dict(checkpoint['discriminator'])
|
80 |
-
except:
|
81 |
-
print ('No discriminator in the state-dict. Dicriminator will be randomly initialized')
|
82 |
-
if optimizer_generator is not None:
|
83 |
-
optimizer_generator.load_state_dict(checkpoint['optimizer_generator'])
|
84 |
-
if optimizer_discriminator is not None:
|
85 |
-
try:
|
86 |
-
optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
|
87 |
-
except RuntimeError as e:
|
88 |
-
print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized')
|
89 |
-
if optimizer_kp_detector is not None:
|
90 |
-
optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector'])
|
91 |
-
if optimizer_he_estimator is not None:
|
92 |
-
optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator'])
|
93 |
-
|
94 |
-
return checkpoint['epoch']
|
95 |
-
|
96 |
-
def load_cpk_mapping(self, checkpoint_path, mapping=None, discriminator=None,
|
97 |
-
optimizer_mapping=None, optimizer_discriminator=None, device='cpu'):
|
98 |
-
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
|
99 |
-
if mapping is not None:
|
100 |
-
mapping.load_state_dict(checkpoint['mapping'])
|
101 |
-
if discriminator is not None:
|
102 |
-
discriminator.load_state_dict(checkpoint['discriminator'])
|
103 |
-
if optimizer_mapping is not None:
|
104 |
-
optimizer_mapping.load_state_dict(checkpoint['optimizer_mapping'])
|
105 |
-
if optimizer_discriminator is not None:
|
106 |
-
optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
|
107 |
-
|
108 |
-
return checkpoint['epoch']
|
109 |
-
|
110 |
-
def generate(self, x, video_save_dir, enhancer=None, original_size=None):
|
111 |
-
|
112 |
-
source_image=x['source_image'].type(torch.FloatTensor)
|
113 |
-
source_semantics=x['source_semantics'].type(torch.FloatTensor)
|
114 |
-
target_semantics=x['target_semantics_list'].type(torch.FloatTensor)
|
115 |
-
yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor)
|
116 |
-
pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor)
|
117 |
-
roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor)
|
118 |
-
source_image=source_image.to(self.device)
|
119 |
-
source_semantics=source_semantics.to(self.device)
|
120 |
-
target_semantics=target_semantics.to(self.device)
|
121 |
-
yaw_c_seq = x['yaw_c_seq'].to(self.device)
|
122 |
-
pitch_c_seq = x['pitch_c_seq'].to(self.device)
|
123 |
-
roll_c_seq = x['roll_c_seq'].to(self.device)
|
124 |
-
|
125 |
-
frame_num = x['frame_num']
|
126 |
-
|
127 |
-
predictions_video = make_animation(source_image, source_semantics, target_semantics,
|
128 |
-
self.generator, self.kp_extractor, self.mapping,
|
129 |
-
yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True,)
|
130 |
-
|
131 |
-
predictions_video = predictions_video.reshape((-1,)+predictions_video.shape[2:])
|
132 |
-
predictions_video = predictions_video[:frame_num]
|
133 |
-
|
134 |
-
video = []
|
135 |
-
for idx in range(predictions_video.shape[0]):
|
136 |
-
image = predictions_video[idx]
|
137 |
-
image = np.transpose(image.data.cpu().numpy(), [1, 2, 0]).astype(np.float32)
|
138 |
-
video.append(image)
|
139 |
-
result = img_as_ubyte(video)
|
140 |
-
|
141 |
-
### the generated video is 256x256, so we keep the aspect ratio,
|
142 |
-
if original_size:
|
143 |
-
result = [ cv2.resize(result_i,(256, int(256.0 * original_size[1]/original_size[0]) )) for result_i in result ]
|
144 |
-
|
145 |
-
video_name = x['video_name'] + '.mp4'
|
146 |
-
path = os.path.join(video_save_dir, 'temp_'+video_name)
|
147 |
-
imageio.mimsave(path, result, fps=float(25))
|
148 |
-
|
149 |
-
if enhancer:
|
150 |
-
video_name_enhancer = x['video_name'] + '_enhanced.mp4'
|
151 |
-
av_path_enhancer = os.path.join(video_save_dir, video_name_enhancer)
|
152 |
-
enhanced_path = os.path.join(video_save_dir, 'temp_'+video_name_enhancer)
|
153 |
-
enhanced_images = face_enhancer(result, method=enhancer)
|
154 |
-
|
155 |
-
if original_size:
|
156 |
-
enhanced_images = [ cv2.resize(result_i,(256, int(256.0 * original_size[1]/original_size[0]) )) for result_i in enhanced_images ]
|
157 |
-
|
158 |
-
imageio.mimsave(enhanced_path, enhanced_images, fps=float(25))
|
159 |
-
|
160 |
-
av_path = os.path.join(video_save_dir, video_name)
|
161 |
-
audio_path = x['audio_path']
|
162 |
-
audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]
|
163 |
-
new_audio_path = os.path.join(video_save_dir, audio_name+'.wav')
|
164 |
-
start_time = 0
|
165 |
-
sound = AudioSegment.from_mp3(audio_path)
|
166 |
-
frames = frame_num
|
167 |
-
end_time = start_time + frames*1/25*1000
|
168 |
-
word1=sound.set_frame_rate(16000)
|
169 |
-
word = word1[start_time:end_time]
|
170 |
-
word.export(new_audio_path, format="wav")
|
171 |
-
|
172 |
-
cmd = r'ffmpeg -y -i "%s" -i "%s" -vcodec copy "%s"' % (path, new_audio_path, av_path)
|
173 |
-
os.system(cmd)
|
174 |
-
|
175 |
-
if enhancer:
|
176 |
-
cmd = r'ffmpeg -y -i "%s" -i "%s" -vcodec copy "%s"' % (enhanced_path, new_audio_path, av_path_enhancer)
|
177 |
-
os.system(cmd)
|
178 |
-
os.remove(enhanced_path)
|
179 |
-
|
180 |
-
os.remove(path)
|
181 |
-
os.remove(new_audio_path)
|
182 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/infer/modules/vc/__init__.py
DELETED
File without changes
|
spaces/AICODER009/food_detection/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Food Detection
|
3 |
-
emoji: 📉
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.29.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIWaves/SOP_Generation-single/Component/ExtraComponent.py
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
from .ToolComponent import ToolComponent
|
2 |
-
import json
|
3 |
-
from utils import flatten_dict,get_embedding,matching_category,search_with_api,limit_keys,limit_values
|
4 |
-
import os
|
5 |
-
|
6 |
-
|
7 |
-
class CategoryRequirementsComponent(ToolComponent):
|
8 |
-
def __init__(self, information_path):
|
9 |
-
super().__init__()
|
10 |
-
self.information_dataset = []
|
11 |
-
self.leaf_name = []
|
12 |
-
for toy_path in information_path:
|
13 |
-
with open(toy_path, encoding="utf-8") as json_file:
|
14 |
-
data = json.load(json_file)
|
15 |
-
for d in data:
|
16 |
-
if "/" in d["cat_leaf_name"]:
|
17 |
-
leaf_names = d["cat_leaf_name"].split("/") + [d["cat_leaf_name"]]
|
18 |
-
else:
|
19 |
-
leaf_names = [d["cat_leaf_name"]]
|
20 |
-
for name in leaf_names:
|
21 |
-
self.leaf_name.append(name)
|
22 |
-
new_d = d.copy()
|
23 |
-
new_d["cat_leaf_name"] = name
|
24 |
-
new_d["information"] = flatten_dict(new_d["information"])
|
25 |
-
self.information_dataset.append(new_d)
|
26 |
-
|
27 |
-
self.target_embbeding = get_embedding(
|
28 |
-
self.leaf_name
|
29 |
-
)
|
30 |
-
|
31 |
-
def search_information(self, category, information_dataset):
|
32 |
-
knowledge = {}
|
33 |
-
for d in information_dataset:
|
34 |
-
if category == d["cat_leaf_name"]:
|
35 |
-
knowledge = d["information"]
|
36 |
-
knowledge = {
|
37 |
-
key: value
|
38 |
-
for key, value in knowledge.items()
|
39 |
-
if (value and key != "相关分类")
|
40 |
-
}
|
41 |
-
break
|
42 |
-
return knowledge
|
43 |
-
|
44 |
-
def func(self, agent):
|
45 |
-
prompt = ""
|
46 |
-
messages = agent.long_term_memory
|
47 |
-
outputdict = {}
|
48 |
-
functions = [
|
49 |
-
{
|
50 |
-
"name": "search_information",
|
51 |
-
"description": "根据用户所需要购买商品的种类跟用户的需求去寻找用户所需要的商品",
|
52 |
-
"parameters": {
|
53 |
-
"type": "object",
|
54 |
-
"properties": {
|
55 |
-
"category": {
|
56 |
-
"type": "string",
|
57 |
-
"description": "用户现在所需要的商品类别,比如纸尿布,笔记本电脑等,注意,只能有一个",
|
58 |
-
},
|
59 |
-
"requirements": {
|
60 |
-
"type": "string",
|
61 |
-
"description": "用户现在的需求,比如说便宜,安踏品牌等等,可以有多个需求,中间以“ ”分隔",
|
62 |
-
},
|
63 |
-
},
|
64 |
-
"required": ["category", "requirements"],
|
65 |
-
},
|
66 |
-
}
|
67 |
-
]
|
68 |
-
|
69 |
-
response = agent.LLM.get_response(
|
70 |
-
messages,
|
71 |
-
None,
|
72 |
-
None,
|
73 |
-
functions=functions,
|
74 |
-
stream=False,
|
75 |
-
function_call={"name": "search_information"},
|
76 |
-
)
|
77 |
-
response_message = json.loads(response["function_call"]["arguments"])
|
78 |
-
category = (
|
79 |
-
response_message["category"] if response_message["category"] else None
|
80 |
-
)
|
81 |
-
requirements = (
|
82 |
-
response_message["requirements"]
|
83 |
-
if response_message["requirements"]
|
84 |
-
else category
|
85 |
-
)
|
86 |
-
if not (category or requirements):
|
87 |
-
return {}
|
88 |
-
|
89 |
-
topk_result = matching_category(
|
90 |
-
category, self.leaf_name, None, self.target_embbeding, top_k=3
|
91 |
-
)
|
92 |
-
|
93 |
-
top1_score = topk_result[1][0]
|
94 |
-
request_items, top_category = search_with_api(requirements, category)
|
95 |
-
|
96 |
-
|
97 |
-
MIN_CATEGORY_SIM = eval(os.environ["MIN_CATEGORY_SIM"]
|
98 |
-
) if "MIN_CATEGORY_SIM" in os.environ else 0.7
|
99 |
-
|
100 |
-
if top1_score > MIN_CATEGORY_SIM:
|
101 |
-
agent.environment.shared_memory["category"] = topk_result[0][0]
|
102 |
-
category = topk_result[0][0]
|
103 |
-
information = self.search_information(
|
104 |
-
topk_result[0][0], self.information_dataset
|
105 |
-
)
|
106 |
-
information = limit_keys(information, 3)
|
107 |
-
information = limit_values(information, 2)
|
108 |
-
prompt += f"""你需要知道的是:用户目前选择的商品是{category},该商品信息为{information}。你需要根据这些商品信息来详细介绍商品,比如详细介绍商品有哪些品牌,有哪些分类等等,并且询问用户是否有更多的需求。"""
|
109 |
-
if category in top_category:
|
110 |
-
top_category.remove(category)
|
111 |
-
|
112 |
-
recommend = "\n经过搜索后,推荐商品如下:\n"
|
113 |
-
prompt += "筛选出的商品如下:\n"
|
114 |
-
|
115 |
-
for i, request_item in enumerate(request_items):
|
116 |
-
|
117 |
-
itemTitle = request_item["itemTitle"]
|
118 |
-
itemPrice = request_item["itemPrice"]
|
119 |
-
itemPicUrl = request_item["itemPicUrl"]
|
120 |
-
recommend += f"[{i}.商品名称:{itemTitle},商品价格:{float(itemPrice)/100}]({itemPicUrl})\n"
|
121 |
-
prompt += f"[{i}.商品名称:{itemTitle},商品价格:{float(itemPrice)/100}]\n"
|
122 |
-
outputdict["recommend"] = recommend
|
123 |
-
print(recommend)
|
124 |
-
else:
|
125 |
-
prompt += f"""你需要知道的是:用户目前选择的商品是{category},而我们店里没有这类商品,但是我们店里有一些近似商品,如{top_category},{topk_result[0][0]},你需要对这些近似商品进行介绍,并引导用户购买"""
|
126 |
-
outputdict["prompt"] = prompt
|
127 |
-
return outputdict
|
128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_m-v61_syncbn_fast_8xb16-300e_coco.py
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
_base_ = './yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py'
|
2 |
-
|
3 |
-
# ========================modified parameters======================
|
4 |
-
deepen_factor = 0.67
|
5 |
-
widen_factor = 0.75
|
6 |
-
lr_factor = 0.1
|
7 |
-
affine_scale = 0.9
|
8 |
-
loss_cls_weight = 0.3
|
9 |
-
loss_obj_weight = 0.7
|
10 |
-
mixup_prob = 0.1
|
11 |
-
|
12 |
-
# =======================Unmodified in most cases==================
|
13 |
-
num_classes = _base_.num_classes
|
14 |
-
num_det_layers = _base_.num_det_layers
|
15 |
-
img_scale = _base_.img_scale
|
16 |
-
|
17 |
-
model = dict(
|
18 |
-
backbone=dict(
|
19 |
-
deepen_factor=deepen_factor,
|
20 |
-
widen_factor=widen_factor,
|
21 |
-
),
|
22 |
-
neck=dict(
|
23 |
-
deepen_factor=deepen_factor,
|
24 |
-
widen_factor=widen_factor,
|
25 |
-
),
|
26 |
-
bbox_head=dict(
|
27 |
-
head_module=dict(widen_factor=widen_factor),
|
28 |
-
loss_cls=dict(loss_weight=loss_cls_weight *
|
29 |
-
(num_classes / 80 * 3 / num_det_layers)),
|
30 |
-
loss_obj=dict(loss_weight=loss_obj_weight *
|
31 |
-
((img_scale[0] / 640)**2 * 3 / num_det_layers))))
|
32 |
-
|
33 |
-
pre_transform = _base_.pre_transform
|
34 |
-
albu_train_transforms = _base_.albu_train_transforms
|
35 |
-
|
36 |
-
mosaic_affine_pipeline = [
|
37 |
-
dict(
|
38 |
-
type='Mosaic',
|
39 |
-
img_scale=img_scale,
|
40 |
-
pad_val=114.0,
|
41 |
-
pre_transform=pre_transform),
|
42 |
-
dict(
|
43 |
-
type='YOLOv5RandomAffine',
|
44 |
-
max_rotate_degree=0.0,
|
45 |
-
max_shear_degree=0.0,
|
46 |
-
scaling_ratio_range=(1 - affine_scale, 1 + affine_scale),
|
47 |
-
# img_scale is (width, height)
|
48 |
-
border=(-img_scale[0] // 2, -img_scale[1] // 2),
|
49 |
-
border_val=(114, 114, 114))
|
50 |
-
]
|
51 |
-
|
52 |
-
# enable mixup
|
53 |
-
train_pipeline = [
|
54 |
-
*pre_transform, *mosaic_affine_pipeline,
|
55 |
-
dict(
|
56 |
-
type='YOLOv5MixUp',
|
57 |
-
prob=mixup_prob,
|
58 |
-
pre_transform=[*pre_transform, *mosaic_affine_pipeline]),
|
59 |
-
dict(
|
60 |
-
type='mmdet.Albu',
|
61 |
-
transforms=albu_train_transforms,
|
62 |
-
bbox_params=dict(
|
63 |
-
type='BboxParams',
|
64 |
-
format='pascal_voc',
|
65 |
-
label_fields=['gt_bboxes_labels', 'gt_ignore_flags']),
|
66 |
-
keymap={
|
67 |
-
'img': 'image',
|
68 |
-
'gt_bboxes': 'bboxes'
|
69 |
-
}),
|
70 |
-
dict(type='YOLOv5HSVRandomAug'),
|
71 |
-
dict(type='mmdet.RandomFlip', prob=0.5),
|
72 |
-
dict(
|
73 |
-
type='mmdet.PackDetInputs',
|
74 |
-
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip',
|
75 |
-
'flip_direction'))
|
76 |
-
]
|
77 |
-
|
78 |
-
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
79 |
-
default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet34.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
# model settings
|
2 |
-
model = dict(
|
3 |
-
type='ImageClassifier',
|
4 |
-
backbone=dict(
|
5 |
-
type='ResNet',
|
6 |
-
depth=34,
|
7 |
-
num_stages=4,
|
8 |
-
out_indices=(3, ),
|
9 |
-
style='pytorch'),
|
10 |
-
neck=dict(type='GlobalAveragePooling'),
|
11 |
-
head=dict(
|
12 |
-
type='LinearClsHead',
|
13 |
-
num_classes=1000,
|
14 |
-
in_channels=512,
|
15 |
-
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
|
16 |
-
topk=(1, 5),
|
17 |
-
))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/work_dirs/shufflenet-v2-1x_4xb32_2000e_3c_noF/__init__.py
DELETED
File without changes
|
spaces/Aashiue/speech_to_text/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Speech To Text
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.3
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adeeb-F/AI-Genrated-Image-Detector/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: AI Genrated Image Detector
|
3 |
-
emoji: 👁
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.36.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: gpl-3.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Afnaan/chatbots/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Chatbots
|
3 |
-
emoji: 🏢
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.24.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/dots/Factory.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import Dots from './Dots.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('dots', function (config) {
|
6 |
-
var gameObject = new Dots(this.scene, config);
|
7 |
-
this.scene.add.existing(gameObject);
|
8 |
-
return gameObject;
|
9 |
-
});
|
10 |
-
|
11 |
-
SetValue(window, 'RexPlugins.Spinner.Dots', Dots);
|
12 |
-
|
13 |
-
export default Dots;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/los/Factory.d.ts
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
import Los from './Los';
|
2 |
-
import Base from '../base/Base';
|
3 |
-
|
4 |
-
export default function Factory(
|
5 |
-
config?: Base.IConfig
|
6 |
-
): Los;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/intouching/Factory.d.ts
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
// import * as Phaser from 'phaser';
|
2 |
-
import InTouching from './InTouching';
|
3 |
-
|
4 |
-
export default function (
|
5 |
-
gameObject: Phaser.GameObjects.GameObject,
|
6 |
-
config?: InTouching.IConfig
|
7 |
-
): InTouching;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sides/defaultcallbacks/FadeCallbacks.js
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
var GetShowCallback = function (duration, alpha) {
|
2 |
-
if (alpha === undefined) {
|
3 |
-
alpha = 1;
|
4 |
-
}
|
5 |
-
return function (child, key, sides, reset) {
|
6 |
-
if (key !== 'panel') {
|
7 |
-
sides.fadeChild(child, ((reset) ? 0 : duration), alpha);
|
8 |
-
}
|
9 |
-
}
|
10 |
-
}
|
11 |
-
|
12 |
-
var GetHideCallback = function (duration, alpha) {
|
13 |
-
if (alpha === undefined) {
|
14 |
-
alpha = 0;
|
15 |
-
}
|
16 |
-
return function (child, key, sides, reset) {
|
17 |
-
if (key !== 'panel') {
|
18 |
-
sides.fadeChild(child, ((reset) ? 0 : duration), alpha);
|
19 |
-
}
|
20 |
-
}
|
21 |
-
}
|
22 |
-
|
23 |
-
export default {
|
24 |
-
show: GetShowCallback,
|
25 |
-
hide: GetHideCallback
|
26 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/gradio_demo.py
DELETED
@@ -1,155 +0,0 @@
|
|
1 |
-
import torch, uuid
|
2 |
-
import os, sys, shutil
|
3 |
-
from src.utils.preprocess import CropAndExtract
|
4 |
-
from src.test_audio2coeff import Audio2Coeff
|
5 |
-
from src.facerender.animate import AnimateFromCoeff
|
6 |
-
from src.generate_batch import get_data
|
7 |
-
from src.generate_facerender_batch import get_facerender_data
|
8 |
-
|
9 |
-
from src.utils.init_path import init_path
|
10 |
-
|
11 |
-
from pydub import AudioSegment
|
12 |
-
|
13 |
-
|
14 |
-
def mp3_to_wav(mp3_filename,wav_filename,frame_rate):
|
15 |
-
mp3_file = AudioSegment.from_file(file=mp3_filename)
|
16 |
-
mp3_file.set_frame_rate(frame_rate).export(wav_filename,format="wav")
|
17 |
-
|
18 |
-
|
19 |
-
class SadTalker():
|
20 |
-
|
21 |
-
def __init__(self, checkpoint_path='checkpoints', config_path='src/config', lazy_load=False):
|
22 |
-
|
23 |
-
if torch.cuda.is_available() :
|
24 |
-
device = "cuda"
|
25 |
-
else:
|
26 |
-
device = "cpu"
|
27 |
-
|
28 |
-
self.device = device
|
29 |
-
|
30 |
-
os.environ['TORCH_HOME']= checkpoint_path
|
31 |
-
|
32 |
-
self.checkpoint_path = checkpoint_path
|
33 |
-
self.config_path = config_path
|
34 |
-
|
35 |
-
|
36 |
-
def test(self, source_image, driven_audio, preprocess='crop',
|
37 |
-
still_mode=False, use_enhancer=False, batch_size=1, size=256,
|
38 |
-
pose_style = 0, exp_scale=1.0,
|
39 |
-
use_ref_video = False,
|
40 |
-
ref_video = None,
|
41 |
-
ref_info = None,
|
42 |
-
use_idle_mode = False,
|
43 |
-
length_of_audio = 0, use_blink=True,
|
44 |
-
result_dir='./results/'):
|
45 |
-
|
46 |
-
self.sadtalker_paths = init_path(self.checkpoint_path, self.config_path, size, False, preprocess)
|
47 |
-
print(self.sadtalker_paths)
|
48 |
-
|
49 |
-
self.audio_to_coeff = Audio2Coeff(self.sadtalker_paths, self.device)
|
50 |
-
self.preprocess_model = CropAndExtract(self.sadtalker_paths, self.device)
|
51 |
-
self.animate_from_coeff = AnimateFromCoeff(self.sadtalker_paths, self.device)
|
52 |
-
|
53 |
-
time_tag = str(uuid.uuid4())
|
54 |
-
save_dir = os.path.join(result_dir, time_tag)
|
55 |
-
os.makedirs(save_dir, exist_ok=True)
|
56 |
-
|
57 |
-
input_dir = os.path.join(save_dir, 'input')
|
58 |
-
os.makedirs(input_dir, exist_ok=True)
|
59 |
-
|
60 |
-
print(source_image)
|
61 |
-
pic_path = os.path.join(input_dir, os.path.basename(source_image))
|
62 |
-
shutil.move(source_image, input_dir)
|
63 |
-
|
64 |
-
if driven_audio is not None and os.path.isfile(driven_audio):
|
65 |
-
audio_path = os.path.join(input_dir, os.path.basename(driven_audio))
|
66 |
-
|
67 |
-
#### mp3 to wav
|
68 |
-
if '.mp3' in audio_path:
|
69 |
-
mp3_to_wav(driven_audio, audio_path.replace('.mp3', '.wav'), 16000)
|
70 |
-
audio_path = audio_path.replace('.mp3', '.wav')
|
71 |
-
else:
|
72 |
-
shutil.move(driven_audio, input_dir)
|
73 |
-
|
74 |
-
elif use_idle_mode:
|
75 |
-
audio_path = os.path.join(input_dir, 'idlemode_'+str(length_of_audio)+'.wav') ## generate audio from this new audio_path
|
76 |
-
from pydub import AudioSegment
|
77 |
-
one_sec_segment = AudioSegment.silent(duration=1000*length_of_audio) #duration in milliseconds
|
78 |
-
one_sec_segment.export(audio_path, format="wav")
|
79 |
-
else:
|
80 |
-
print(use_ref_video, ref_info)
|
81 |
-
assert use_ref_video == True and ref_info == 'all'
|
82 |
-
|
83 |
-
if use_ref_video and ref_info == 'all': # full ref mode
|
84 |
-
ref_video_videoname = os.path.basename(ref_video)
|
85 |
-
audio_path = os.path.join(save_dir, ref_video_videoname+'.wav')
|
86 |
-
print('new audiopath:',audio_path)
|
87 |
-
# if ref_video contains audio, set the audio from ref_video.
|
88 |
-
cmd = r"ffmpeg -y -hide_banner -loglevel error -i %s %s"%(ref_video, audio_path)
|
89 |
-
os.system(cmd)
|
90 |
-
|
91 |
-
os.makedirs(save_dir, exist_ok=True)
|
92 |
-
|
93 |
-
#crop image and extract 3dmm from image
|
94 |
-
first_frame_dir = os.path.join(save_dir, 'first_frame_dir')
|
95 |
-
os.makedirs(first_frame_dir, exist_ok=True)
|
96 |
-
first_coeff_path, crop_pic_path, crop_info = self.preprocess_model.generate(pic_path, first_frame_dir, preprocess, True, size)
|
97 |
-
|
98 |
-
if first_coeff_path is None:
|
99 |
-
raise AttributeError("No face is detected")
|
100 |
-
|
101 |
-
if use_ref_video:
|
102 |
-
print('using ref video for genreation')
|
103 |
-
ref_video_videoname = os.path.splitext(os.path.split(ref_video)[-1])[0]
|
104 |
-
ref_video_frame_dir = os.path.join(save_dir, ref_video_videoname)
|
105 |
-
os.makedirs(ref_video_frame_dir, exist_ok=True)
|
106 |
-
print('3DMM Extraction for the reference video providing pose')
|
107 |
-
ref_video_coeff_path, _, _ = self.preprocess_model.generate(ref_video, ref_video_frame_dir, preprocess, source_image_flag=False)
|
108 |
-
else:
|
109 |
-
ref_video_coeff_path = None
|
110 |
-
|
111 |
-
if use_ref_video:
|
112 |
-
if ref_info == 'pose':
|
113 |
-
ref_pose_coeff_path = ref_video_coeff_path
|
114 |
-
ref_eyeblink_coeff_path = None
|
115 |
-
elif ref_info == 'blink':
|
116 |
-
ref_pose_coeff_path = None
|
117 |
-
ref_eyeblink_coeff_path = ref_video_coeff_path
|
118 |
-
elif ref_info == 'pose+blink':
|
119 |
-
ref_pose_coeff_path = ref_video_coeff_path
|
120 |
-
ref_eyeblink_coeff_path = ref_video_coeff_path
|
121 |
-
elif ref_info == 'all':
|
122 |
-
ref_pose_coeff_path = None
|
123 |
-
ref_eyeblink_coeff_path = None
|
124 |
-
else:
|
125 |
-
raise('error in refinfo')
|
126 |
-
else:
|
127 |
-
ref_pose_coeff_path = None
|
128 |
-
ref_eyeblink_coeff_path = None
|
129 |
-
|
130 |
-
#audio2ceoff
|
131 |
-
if use_ref_video and ref_info == 'all':
|
132 |
-
coeff_path = ref_video_coeff_path # self.audio_to_coeff.generate(batch, save_dir, pose_style, ref_pose_coeff_path)
|
133 |
-
else:
|
134 |
-
batch = get_data(first_coeff_path, audio_path, self.device, ref_eyeblink_coeff_path=ref_eyeblink_coeff_path, still=still_mode, idlemode=use_idle_mode, length_of_audio=length_of_audio, use_blink=use_blink) # longer audio?
|
135 |
-
coeff_path = self.audio_to_coeff.generate(batch, save_dir, pose_style, ref_pose_coeff_path)
|
136 |
-
|
137 |
-
#coeff2video
|
138 |
-
data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path, batch_size, still_mode=still_mode, preprocess=preprocess, size=size, expression_scale = exp_scale)
|
139 |
-
return_path = self.animate_from_coeff.generate(data, save_dir, pic_path, crop_info, enhancer='gfpgan' if use_enhancer else None, preprocess=preprocess, img_size=size)
|
140 |
-
video_name = data['video_name']
|
141 |
-
print(f'The generated video is named {video_name} in {save_dir}')
|
142 |
-
|
143 |
-
del self.preprocess_model
|
144 |
-
del self.audio_to_coeff
|
145 |
-
del self.animate_from_coeff
|
146 |
-
|
147 |
-
if torch.cuda.is_available():
|
148 |
-
torch.cuda.empty_cache()
|
149 |
-
torch.cuda.synchronize()
|
150 |
-
|
151 |
-
import gc; gc.collect()
|
152 |
-
|
153 |
-
return return_path
|
154 |
-
|
155 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ameaou/academic-chatgpt3.1/request_llm/bridge_chatgpt.py
DELETED
@@ -1,272 +0,0 @@
|
|
1 |
-
# 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目
|
2 |
-
|
3 |
-
"""
|
4 |
-
该文件中主要包含三个函数
|
5 |
-
|
6 |
-
不具备多线程能力的函数:
|
7 |
-
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
|
8 |
-
|
9 |
-
具备多线程调用能力的函数
|
10 |
-
2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑
|
11 |
-
3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程
|
12 |
-
"""
|
13 |
-
|
14 |
-
import json
|
15 |
-
import time
|
16 |
-
import gradio as gr
|
17 |
-
import logging
|
18 |
-
import traceback
|
19 |
-
import requests
|
20 |
-
import importlib
|
21 |
-
|
22 |
-
# config_private.py放自己的秘密如API和代理网址
|
23 |
-
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
24 |
-
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key
|
25 |
-
proxies, API_KEY, TIMEOUT_SECONDS, MAX_RETRY = \
|
26 |
-
get_conf('proxies', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY')
|
27 |
-
|
28 |
-
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
29 |
-
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
30 |
-
|
31 |
-
def get_full_error(chunk, stream_response):
|
32 |
-
"""
|
33 |
-
获取完整的从Openai返回的报错
|
34 |
-
"""
|
35 |
-
while True:
|
36 |
-
try:
|
37 |
-
chunk += next(stream_response)
|
38 |
-
except:
|
39 |
-
break
|
40 |
-
return chunk
|
41 |
-
|
42 |
-
|
43 |
-
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
44 |
-
"""
|
45 |
-
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
46 |
-
inputs:
|
47 |
-
是本次问询的输入
|
48 |
-
sys_prompt:
|
49 |
-
系统静默prompt
|
50 |
-
llm_kwargs:
|
51 |
-
chatGPT的内部调优参数
|
52 |
-
history:
|
53 |
-
是之前的对话列表
|
54 |
-
observe_window = None:
|
55 |
-
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
56 |
-
"""
|
57 |
-
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
58 |
-
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
|
59 |
-
retry = 0
|
60 |
-
while True:
|
61 |
-
try:
|
62 |
-
# make a POST request to the API endpoint, stream=False
|
63 |
-
from .bridge_all import model_info
|
64 |
-
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
65 |
-
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
66 |
-
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
|
67 |
-
except requests.exceptions.ReadTimeout as e:
|
68 |
-
retry += 1
|
69 |
-
traceback.print_exc()
|
70 |
-
if retry > MAX_RETRY: raise TimeoutError
|
71 |
-
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
72 |
-
|
73 |
-
stream_response = response.iter_lines()
|
74 |
-
result = ''
|
75 |
-
while True:
|
76 |
-
try: chunk = next(stream_response).decode()
|
77 |
-
except StopIteration:
|
78 |
-
break
|
79 |
-
except requests.exceptions.ConnectionError:
|
80 |
-
chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。
|
81 |
-
if len(chunk)==0: continue
|
82 |
-
if not chunk.startswith('data:'):
|
83 |
-
error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode()
|
84 |
-
if "reduce the length" in error_msg:
|
85 |
-
raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
|
86 |
-
else:
|
87 |
-
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
|
88 |
-
if ('data: [DONE]' in chunk): break # api2d 正常完成
|
89 |
-
json_data = json.loads(chunk.lstrip('data:'))['choices'][0]
|
90 |
-
delta = json_data["delta"]
|
91 |
-
if len(delta) == 0: break
|
92 |
-
if "role" in delta: continue
|
93 |
-
if "content" in delta:
|
94 |
-
result += delta["content"]
|
95 |
-
if not console_slience: print(delta["content"], end='')
|
96 |
-
if observe_window is not None:
|
97 |
-
# 观测窗,把已经获取的数据显示出去
|
98 |
-
if len(observe_window) >= 1: observe_window[0] += delta["content"]
|
99 |
-
# 看门狗,如果超过期限没有喂狗,则终止
|
100 |
-
if len(observe_window) >= 2:
|
101 |
-
if (time.time()-observe_window[1]) > watch_dog_patience:
|
102 |
-
raise RuntimeError("用户取消了程序。")
|
103 |
-
else: raise RuntimeError("意外Json结构:"+delta)
|
104 |
-
if json_data['finish_reason'] == 'length':
|
105 |
-
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
|
106 |
-
return result
|
107 |
-
|
108 |
-
|
109 |
-
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
110 |
-
"""
|
111 |
-
发送至chatGPT,流式获取输出。
|
112 |
-
用于基础的对话功能。
|
113 |
-
inputs 是本次问询的输入
|
114 |
-
top_p, temperature是chatGPT的内部调优参数
|
115 |
-
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
116 |
-
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
117 |
-
additional_fn代表点击的哪个按钮,按钮见functional.py
|
118 |
-
"""
|
119 |
-
if is_any_api_key(inputs):
|
120 |
-
chatbot._cookies['api_key'] = inputs
|
121 |
-
chatbot.append(("输入已识别为openai的api_key", "api_key已导入"))
|
122 |
-
yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
|
123 |
-
return
|
124 |
-
elif not is_any_api_key(chatbot._cookies['api_key']):
|
125 |
-
chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。"))
|
126 |
-
yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面
|
127 |
-
return
|
128 |
-
|
129 |
-
if additional_fn is not None:
|
130 |
-
import core_functional
|
131 |
-
importlib.reload(core_functional) # 热更新prompt
|
132 |
-
core_functional = core_functional.get_core_functions()
|
133 |
-
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
134 |
-
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
135 |
-
|
136 |
-
raw_input = inputs
|
137 |
-
logging.info(f'[raw_input] {raw_input}')
|
138 |
-
chatbot.append((inputs, ""))
|
139 |
-
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
140 |
-
|
141 |
-
try:
|
142 |
-
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
143 |
-
except RuntimeError as e:
|
144 |
-
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。")
|
145 |
-
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
146 |
-
return
|
147 |
-
|
148 |
-
history.append(inputs); history.append(" ")
|
149 |
-
|
150 |
-
retry = 0
|
151 |
-
while True:
|
152 |
-
try:
|
153 |
-
# make a POST request to the API endpoint, stream=True
|
154 |
-
from .bridge_all import model_info
|
155 |
-
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
156 |
-
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
157 |
-
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
|
158 |
-
except:
|
159 |
-
retry += 1
|
160 |
-
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
|
161 |
-
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
|
162 |
-
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
|
163 |
-
if retry > MAX_RETRY: raise TimeoutError
|
164 |
-
|
165 |
-
gpt_replying_buffer = ""
|
166 |
-
|
167 |
-
is_head_of_the_stream = True
|
168 |
-
if stream:
|
169 |
-
stream_response = response.iter_lines()
|
170 |
-
while True:
|
171 |
-
chunk = next(stream_response)
|
172 |
-
# print(chunk.decode()[6:])
|
173 |
-
if is_head_of_the_stream and (r'"object":"error"' not in chunk.decode()):
|
174 |
-
# 数据流的第一帧不携带content
|
175 |
-
is_head_of_the_stream = False; continue
|
176 |
-
|
177 |
-
if chunk:
|
178 |
-
try:
|
179 |
-
chunk_decoded = chunk.decode()
|
180 |
-
# 前者API2D的
|
181 |
-
if ('data: [DONE]' in chunk_decoded) or (len(json.loads(chunk_decoded[6:])['choices'][0]["delta"]) == 0):
|
182 |
-
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
183 |
-
logging.info(f'[response] {gpt_replying_buffer}')
|
184 |
-
break
|
185 |
-
# 处理数据流的主体
|
186 |
-
chunkjson = json.loads(chunk_decoded[6:])
|
187 |
-
status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}"
|
188 |
-
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
|
189 |
-
gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk_decoded[6:])['choices'][0]["delta"]["content"]
|
190 |
-
history[-1] = gpt_replying_buffer
|
191 |
-
chatbot[-1] = (history[-2], history[-1])
|
192 |
-
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
193 |
-
|
194 |
-
except Exception as e:
|
195 |
-
traceback.print_exc()
|
196 |
-
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
|
197 |
-
chunk = get_full_error(chunk, stream_response)
|
198 |
-
chunk_decoded = chunk.decode()
|
199 |
-
error_msg = chunk_decoded
|
200 |
-
if "reduce the length" in error_msg:
|
201 |
-
chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长,或历史数据过长. 历史缓存数据现已释放,您可以请再次尝试.")
|
202 |
-
history = [] # 清除历史
|
203 |
-
elif "does not exist" in error_msg:
|
204 |
-
chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在,或者您没有获得体验资格.")
|
205 |
-
elif "Incorrect API key" in error_msg:
|
206 |
-
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由,拒绝服务.")
|
207 |
-
elif "exceeded your current quota" in error_msg:
|
208 |
-
chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由,拒绝服务.")
|
209 |
-
elif "bad forward key" in error_msg:
|
210 |
-
chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.")
|
211 |
-
else:
|
212 |
-
from toolbox import regular_txt_to_markdown
|
213 |
-
tb_str = '```\n' + traceback.format_exc() + '```'
|
214 |
-
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded[4:])}")
|
215 |
-
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
|
216 |
-
return
|
217 |
-
|
218 |
-
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
219 |
-
"""
|
220 |
-
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
221 |
-
"""
|
222 |
-
if not is_any_api_key(llm_kwargs['api_key']):
|
223 |
-
raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")
|
224 |
-
|
225 |
-
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
226 |
-
|
227 |
-
headers = {
|
228 |
-
"Content-Type": "application/json",
|
229 |
-
"Authorization": f"Bearer {api_key}"
|
230 |
-
}
|
231 |
-
|
232 |
-
conversation_cnt = len(history) // 2
|
233 |
-
|
234 |
-
messages = [{"role": "system", "content": system_prompt}]
|
235 |
-
if conversation_cnt:
|
236 |
-
for index in range(0, 2*conversation_cnt, 2):
|
237 |
-
what_i_have_asked = {}
|
238 |
-
what_i_have_asked["role"] = "user"
|
239 |
-
what_i_have_asked["content"] = history[index]
|
240 |
-
what_gpt_answer = {}
|
241 |
-
what_gpt_answer["role"] = "assistant"
|
242 |
-
what_gpt_answer["content"] = history[index+1]
|
243 |
-
if what_i_have_asked["content"] != "":
|
244 |
-
if what_gpt_answer["content"] == "": continue
|
245 |
-
if what_gpt_answer["content"] == timeout_bot_msg: continue
|
246 |
-
messages.append(what_i_have_asked)
|
247 |
-
messages.append(what_gpt_answer)
|
248 |
-
else:
|
249 |
-
messages[-1]['content'] = what_gpt_answer['content']
|
250 |
-
|
251 |
-
what_i_ask_now = {}
|
252 |
-
what_i_ask_now["role"] = "user"
|
253 |
-
what_i_ask_now["content"] = inputs
|
254 |
-
messages.append(what_i_ask_now)
|
255 |
-
|
256 |
-
payload = {
|
257 |
-
"model": llm_kwargs['llm_model'].strip('api2d-'),
|
258 |
-
"messages": messages,
|
259 |
-
"temperature": llm_kwargs['temperature'], # 1.0,
|
260 |
-
"top_p": llm_kwargs['top_p'], # 1.0,
|
261 |
-
"n": 1,
|
262 |
-
"stream": stream,
|
263 |
-
"presence_penalty": 0,
|
264 |
-
"frequency_penalty": 0,
|
265 |
-
}
|
266 |
-
try:
|
267 |
-
print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
|
268 |
-
except:
|
269 |
-
print('输入中可能存在乱码。')
|
270 |
-
return headers,payload
|
271 |
-
|
272 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/versatile_diffusion/__init__.py
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
from ...utils import (
|
2 |
-
OptionalDependencyNotAvailable,
|
3 |
-
is_torch_available,
|
4 |
-
is_transformers_available,
|
5 |
-
is_transformers_version,
|
6 |
-
)
|
7 |
-
|
8 |
-
|
9 |
-
try:
|
10 |
-
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
|
11 |
-
raise OptionalDependencyNotAvailable()
|
12 |
-
except OptionalDependencyNotAvailable:
|
13 |
-
from ...utils.dummy_torch_and_transformers_objects import (
|
14 |
-
VersatileDiffusionDualGuidedPipeline,
|
15 |
-
VersatileDiffusionImageVariationPipeline,
|
16 |
-
VersatileDiffusionPipeline,
|
17 |
-
VersatileDiffusionTextToImagePipeline,
|
18 |
-
)
|
19 |
-
else:
|
20 |
-
from .modeling_text_unet import UNetFlatConditionModel
|
21 |
-
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
|
22 |
-
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
|
23 |
-
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
|
24 |
-
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://jhu/resnet101_gn_ws', backbone=dict(depth=101))
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/grid_rcnn/README.md
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
# Grid R-CNN
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
[ALGORITHM]
|
6 |
-
|
7 |
-
```latex
|
8 |
-
@inproceedings{lu2019grid,
|
9 |
-
title={Grid r-cnn},
|
10 |
-
author={Lu, Xin and Li, Buyu and Yue, Yuxin and Li, Quanquan and Yan, Junjie},
|
11 |
-
booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
|
12 |
-
year={2019}
|
13 |
-
}
|
14 |
-
|
15 |
-
@article{lu2019grid,
|
16 |
-
title={Grid R-CNN Plus: Faster and Better},
|
17 |
-
author={Lu, Xin and Li, Buyu and Yue, Yuxin and Li, Quanquan and Yan, Junjie},
|
18 |
-
journal={arXiv preprint arXiv:1906.05688},
|
19 |
-
year={2019}
|
20 |
-
}
|
21 |
-
```
|
22 |
-
|
23 |
-
## Results and Models
|
24 |
-
|
25 |
-
| Backbone | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
|
26 |
-
|:-----------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:|
|
27 |
-
| R-50 | 2x | 5.1 | 15.0 | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130_221140.log.json) |
|
28 |
-
| R-101 | 2x | 7.0 | 12.6 | 41.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco/grid_rcnn_r101_fpn_gn-head_2x_coco_20200309-d6eca030.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco/grid_rcnn_r101_fpn_gn-head_2x_coco_20200309_164224.log.json) |
|
29 |
-
| X-101-32x4d | 2x | 8.3 | 10.8 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco_20200130-d8f0e3ff.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco_20200130_215413.log.json) |
|
30 |
-
| X-101-64x4d | 2x | 11.3 | 7.7 | 43.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco_20200204-ec76a754.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco_20200204_080641.log.json) |
|
31 |
-
|
32 |
-
**Notes:**
|
33 |
-
|
34 |
-
- All models are trained with 8 GPUs instead of 32 GPUs in the original paper.
|
35 |
-
- The warming up lasts for 1 epoch and `2x` here indicates 25 epochs.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
_base_ = './cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py'
|
2 |
-
# model settings
|
3 |
-
model = dict(
|
4 |
-
pretrained='open-mmlab://msra/hrnetv2_w18',
|
5 |
-
backbone=dict(
|
6 |
-
extra=dict(
|
7 |
-
stage2=dict(num_channels=(18, 36)),
|
8 |
-
stage3=dict(num_channels=(18, 36, 72)),
|
9 |
-
stage4=dict(num_channels=(18, 36, 72, 144)))),
|
10 |
-
neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_480x480_40k_pascal_context.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
_base_ = './fcn_hr18_480x480_40k_pascal_context.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://msra/hrnetv2_w48',
|
4 |
-
backbone=dict(
|
5 |
-
extra=dict(
|
6 |
-
stage2=dict(num_channels=(48, 96)),
|
7 |
-
stage3=dict(num_channels=(48, 96, 192)),
|
8 |
-
stage4=dict(num_channels=(48, 96, 192, 384)))),
|
9 |
-
decode_head=dict(
|
10 |
-
in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnnasBlackHat/Image-Similarity/src/similarity/model_implements/mobilenet_v3.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
import tensorflow_hub as hub
|
2 |
-
import numpy as np
|
3 |
-
|
4 |
-
class ModelnetV3():
|
5 |
-
def __init__(self):
|
6 |
-
module_handle = "https://tfhub.dev/google/imagenet/mobilenet_v3_large_100_224/feature_vector/5"
|
7 |
-
self.module = hub.load(module_handle)
|
8 |
-
|
9 |
-
def extract_feature(self, imgs):
|
10 |
-
print('getting with ModelnetV3...')
|
11 |
-
features = []
|
12 |
-
for img in imgs:
|
13 |
-
features.append(np.squeeze(self.module(img)))
|
14 |
-
return features
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anthony7906/MengHuiMXD_GPT/modules/webui_locale.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import locale
|
3 |
-
import commentjson as json
|
4 |
-
|
5 |
-
class I18nAuto:
|
6 |
-
def __init__(self):
|
7 |
-
if os.path.exists("config.json"):
|
8 |
-
with open("config.json", "r", encoding='utf-8') as f:
|
9 |
-
config = json.load(f)
|
10 |
-
else:
|
11 |
-
config = {}
|
12 |
-
lang_config = config.get("language", "auto")
|
13 |
-
language = os.environ.get("LANGUAGE", lang_config)
|
14 |
-
if language == "auto":
|
15 |
-
language = locale.getdefaultlocale()[0] # get the language code of the system (ex. zh_CN)
|
16 |
-
self.language_map = {}
|
17 |
-
self.file_is_exists = os.path.isfile(f"./locale/{language}.json")
|
18 |
-
if self.file_is_exists:
|
19 |
-
with open(f"./locale/{language}.json", "r", encoding="utf-8") as f:
|
20 |
-
self.language_map.update(json.load(f))
|
21 |
-
|
22 |
-
def __call__(self, key):
|
23 |
-
if self.file_is_exists and key in self.language_map:
|
24 |
-
return self.language_map[key]
|
25 |
-
else:
|
26 |
-
return key
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AriaMei/TTSdemo/commons.py
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import numpy as np
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
|
8 |
-
def init_weights(m, mean=0.0, std=0.01):
|
9 |
-
classname = m.__class__.__name__
|
10 |
-
if classname.find("Conv") != -1:
|
11 |
-
m.weight.data.normal_(mean, std)
|
12 |
-
|
13 |
-
|
14 |
-
def get_padding(kernel_size, dilation=1):
|
15 |
-
return int((kernel_size*dilation - dilation)/2)
|
16 |
-
|
17 |
-
|
18 |
-
def convert_pad_shape(pad_shape):
|
19 |
-
l = pad_shape[::-1]
|
20 |
-
pad_shape = [item for sublist in l for item in sublist]
|
21 |
-
return pad_shape
|
22 |
-
|
23 |
-
|
24 |
-
def intersperse(lst, item):
|
25 |
-
result = [item] * (len(lst) * 2 + 1)
|
26 |
-
result[1::2] = lst
|
27 |
-
return result
|
28 |
-
|
29 |
-
|
30 |
-
def kl_divergence(m_p, logs_p, m_q, logs_q):
|
31 |
-
"""KL(P||Q)"""
|
32 |
-
kl = (logs_q - logs_p) - 0.5
|
33 |
-
kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
|
34 |
-
return kl
|
35 |
-
|
36 |
-
|
37 |
-
def rand_gumbel(shape):
|
38 |
-
"""Sample from the Gumbel distribution, protect from overflows."""
|
39 |
-
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
|
40 |
-
return -torch.log(-torch.log(uniform_samples))
|
41 |
-
|
42 |
-
|
43 |
-
def rand_gumbel_like(x):
|
44 |
-
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
|
45 |
-
return g
|
46 |
-
|
47 |
-
|
48 |
-
def slice_segments(x, ids_str, segment_size=4):
|
49 |
-
ret = torch.zeros_like(x[:, :, :segment_size])
|
50 |
-
for i in range(x.size(0)):
|
51 |
-
idx_str = ids_str[i]
|
52 |
-
idx_end = idx_str + segment_size
|
53 |
-
ret[i] = x[i, :, idx_str:idx_end]
|
54 |
-
return ret
|
55 |
-
|
56 |
-
|
57 |
-
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
58 |
-
b, d, t = x.size()
|
59 |
-
if x_lengths is None:
|
60 |
-
x_lengths = t
|
61 |
-
ids_str_max = x_lengths - segment_size + 1
|
62 |
-
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
63 |
-
ret = slice_segments(x, ids_str, segment_size)
|
64 |
-
return ret, ids_str
|
65 |
-
|
66 |
-
|
67 |
-
def get_timing_signal_1d(
|
68 |
-
length, channels, min_timescale=1.0, max_timescale=1.0e4):
|
69 |
-
position = torch.arange(length, dtype=torch.float)
|
70 |
-
num_timescales = channels // 2
|
71 |
-
log_timescale_increment = (
|
72 |
-
math.log(float(max_timescale) / float(min_timescale)) /
|
73 |
-
(num_timescales - 1))
|
74 |
-
inv_timescales = min_timescale * torch.exp(
|
75 |
-
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
|
76 |
-
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
|
77 |
-
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
|
78 |
-
signal = F.pad(signal, [0, 0, 0, channels % 2])
|
79 |
-
signal = signal.view(1, channels, length)
|
80 |
-
return signal
|
81 |
-
|
82 |
-
|
83 |
-
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
|
84 |
-
b, channels, length = x.size()
|
85 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
86 |
-
return x + signal.to(dtype=x.dtype, device=x.device)
|
87 |
-
|
88 |
-
|
89 |
-
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
|
90 |
-
b, channels, length = x.size()
|
91 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
92 |
-
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
|
93 |
-
|
94 |
-
|
95 |
-
def subsequent_mask(length):
|
96 |
-
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
97 |
-
return mask
|
98 |
-
|
99 |
-
|
100 |
-
@torch.jit.script
|
101 |
-
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
102 |
-
n_channels_int = n_channels[0]
|
103 |
-
in_act = input_a + input_b
|
104 |
-
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
105 |
-
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
106 |
-
acts = t_act * s_act
|
107 |
-
return acts
|
108 |
-
|
109 |
-
|
110 |
-
def convert_pad_shape(pad_shape):
|
111 |
-
l = pad_shape[::-1]
|
112 |
-
pad_shape = [item for sublist in l for item in sublist]
|
113 |
-
return pad_shape
|
114 |
-
|
115 |
-
|
116 |
-
def shift_1d(x):
|
117 |
-
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
|
118 |
-
return x
|
119 |
-
|
120 |
-
|
121 |
-
def sequence_mask(length, max_length=None):
|
122 |
-
if max_length is None:
|
123 |
-
max_length = length.max()
|
124 |
-
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
125 |
-
return x.unsqueeze(0) < length.unsqueeze(1)
|
126 |
-
|
127 |
-
|
128 |
-
def generate_path(duration, mask):
|
129 |
-
"""
|
130 |
-
duration: [b, 1, t_x]
|
131 |
-
mask: [b, 1, t_y, t_x]
|
132 |
-
"""
|
133 |
-
device = duration.device
|
134 |
-
|
135 |
-
b, _, t_y, t_x = mask.shape
|
136 |
-
cum_duration = torch.cumsum(duration, -1)
|
137 |
-
|
138 |
-
cum_duration_flat = cum_duration.view(b * t_x)
|
139 |
-
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
140 |
-
path = path.view(b, t_x, t_y)
|
141 |
-
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
142 |
-
path = path.unsqueeze(1).transpose(2,3) * mask
|
143 |
-
return path
|
144 |
-
|
145 |
-
|
146 |
-
def clip_grad_value_(parameters, clip_value, norm_type=2):
|
147 |
-
if isinstance(parameters, torch.Tensor):
|
148 |
-
parameters = [parameters]
|
149 |
-
parameters = list(filter(lambda p: p.grad is not None, parameters))
|
150 |
-
norm_type = float(norm_type)
|
151 |
-
if clip_value is not None:
|
152 |
-
clip_value = float(clip_value)
|
153 |
-
|
154 |
-
total_norm = 0
|
155 |
-
for p in parameters:
|
156 |
-
param_norm = p.grad.data.norm(norm_type)
|
157 |
-
total_norm += param_norm.item() ** norm_type
|
158 |
-
if clip_value is not None:
|
159 |
-
p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
160 |
-
total_norm = total_norm ** (1. / norm_type)
|
161 |
-
return total_norm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artrajz/vits-simple-api/vits/text/cleaners.py
DELETED
@@ -1,278 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
import config
|
3 |
-
from unidecode import unidecode
|
4 |
-
from phonemizer import phonemize
|
5 |
-
from phonemizer.backend.espeak.wrapper import EspeakWrapper
|
6 |
-
|
7 |
-
ESPEAK_LIBRARY = getattr(config, "ESPEAK_LIBRARY", "")
|
8 |
-
if ESPEAK_LIBRARY != "":
|
9 |
-
EspeakWrapper.set_library(ESPEAK_LIBRARY)
|
10 |
-
|
11 |
-
# List of (regular expression, replacement) pairs for abbreviations:
|
12 |
-
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
|
13 |
-
('mrs', 'misess'),
|
14 |
-
('mr', 'mister'),
|
15 |
-
('dr', 'doctor'),
|
16 |
-
('st', 'saint'),
|
17 |
-
('co', 'company'),
|
18 |
-
('jr', 'junior'),
|
19 |
-
('maj', 'major'),
|
20 |
-
('gen', 'general'),
|
21 |
-
('drs', 'doctors'),
|
22 |
-
('rev', 'reverend'),
|
23 |
-
('lt', 'lieutenant'),
|
24 |
-
('hon', 'honorable'),
|
25 |
-
('sgt', 'sergeant'),
|
26 |
-
('capt', 'captain'),
|
27 |
-
('esq', 'esquire'),
|
28 |
-
('ltd', 'limited'),
|
29 |
-
('col', 'colonel'),
|
30 |
-
('ft', 'fort'),
|
31 |
-
]]
|
32 |
-
|
33 |
-
|
34 |
-
def expand_abbreviations(text):
|
35 |
-
for regex, replacement in _abbreviations:
|
36 |
-
text = re.sub(regex, replacement, text)
|
37 |
-
return text
|
38 |
-
|
39 |
-
|
40 |
-
def transliteration_cleaners(text):
|
41 |
-
'''Pipeline for non-English text that transliterates to ASCII.'''
|
42 |
-
text = unidecode(text)
|
43 |
-
text = text.lower()
|
44 |
-
text = re.sub(r'\s+', ' ', text)
|
45 |
-
text = expand_abbreviations(text)
|
46 |
-
return text
|
47 |
-
|
48 |
-
|
49 |
-
# for English text
|
50 |
-
def english_cleaners(text):
|
51 |
-
'''Pipeline for English text, including abbreviation expansion.'''
|
52 |
-
text = re.sub(r'\[EN\](.*?)\[EN\]', lambda x: transliteration_cleaners(x.group(1)) + ' ', text)
|
53 |
-
phonemes = phonemize(text, language='en-us', backend='espeak', strip=True)
|
54 |
-
return phonemes
|
55 |
-
|
56 |
-
|
57 |
-
# for non-English text that can be transliterated to ASCII
|
58 |
-
def english_cleaners2(text):
|
59 |
-
'''Pipeline for English text, including abbreviation expansion. + punctuation + stress'''
|
60 |
-
text = re.sub(r'\[EN\](.*?)\[EN\]', lambda x: transliteration_cleaners(x.group(1)) + ' ', text)
|
61 |
-
phonemes = phonemize(text, language='en-us', backend='espeak', strip=True, preserve_punctuation=True,
|
62 |
-
with_stress=True)
|
63 |
-
return phonemes
|
64 |
-
|
65 |
-
|
66 |
-
def japanese_cleaners(text):
|
67 |
-
from vits.text.japanese import japanese_to_romaji_with_accent
|
68 |
-
|
69 |
-
def clean(text):
|
70 |
-
text = japanese_to_romaji_with_accent(text)
|
71 |
-
text = re.sub(r'([A-Za-z])$', r'\1.', text)
|
72 |
-
return text
|
73 |
-
|
74 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: clean(x.group(1)) + ' ', text)
|
75 |
-
return text
|
76 |
-
|
77 |
-
|
78 |
-
def japanese_cleaners2(text):
|
79 |
-
return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…')
|
80 |
-
|
81 |
-
|
82 |
-
def korean_cleaners(text):
|
83 |
-
'''Pipeline for Korean text'''
|
84 |
-
from vits.text.korean import latin_to_hangul, number_to_hangul, divide_hangul
|
85 |
-
|
86 |
-
def clean(text):
|
87 |
-
text = latin_to_hangul(text)
|
88 |
-
text = number_to_hangul(text)
|
89 |
-
text = divide_hangul(text)
|
90 |
-
text = re.sub(r'([\u3131-\u3163])$', r'\1.', text)
|
91 |
-
return text
|
92 |
-
|
93 |
-
text = re.sub(r'\[KO\](.*?)\[KO\]', lambda x: clean(x.group(1)) + ' ', text)
|
94 |
-
return text
|
95 |
-
|
96 |
-
|
97 |
-
def chinese_cleaners(text):
|
98 |
-
'''Pipeline for Chinese text'''
|
99 |
-
from vits.text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo, symbols_to_chinese
|
100 |
-
|
101 |
-
def clean(text):
|
102 |
-
text = symbols_to_chinese(text)
|
103 |
-
text = number_to_chinese(text)
|
104 |
-
text = chinese_to_bopomofo(text)
|
105 |
-
text = latin_to_bopomofo(text)
|
106 |
-
text = re.sub(r'([ˉˊˇˋ˙])$', r'\1。', text)
|
107 |
-
return text
|
108 |
-
|
109 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]', lambda x: clean(x.group(1)) + ' ', text)
|
110 |
-
return text
|
111 |
-
|
112 |
-
|
113 |
-
def zh_ja_mixture_cleaners(text):
|
114 |
-
from vits.text.mandarin import chinese_to_romaji
|
115 |
-
from vits.text.japanese import japanese_to_romaji_with_accent
|
116 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]',
|
117 |
-
lambda x: chinese_to_romaji(x.group(1)) + ' ', text)
|
118 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_romaji_with_accent(
|
119 |
-
x.group(1)).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…') + ' ', text)
|
120 |
-
text = re.sub(r'\s+$', '', text)
|
121 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
122 |
-
return text
|
123 |
-
|
124 |
-
|
125 |
-
def sanskrit_cleaners(text):
|
126 |
-
text = text.replace('॥', '।').replace('ॐ', 'ओम्')
|
127 |
-
text = re.sub(r'([^।])$', r'\1।', text)
|
128 |
-
return text
|
129 |
-
|
130 |
-
|
131 |
-
def cjks_cleaners(text):
|
132 |
-
from vits.text.mandarin import chinese_to_lazy_ipa
|
133 |
-
from vits.text.japanese import japanese_to_ipa
|
134 |
-
from vits.text.korean import korean_to_lazy_ipa
|
135 |
-
from vits.text.sanskrit import devanagari_to_ipa
|
136 |
-
from vits.text.english import english_to_lazy_ipa
|
137 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]',
|
138 |
-
lambda x: chinese_to_lazy_ipa(x.group(1)) + ' ', text)
|
139 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]',
|
140 |
-
lambda x: japanese_to_ipa(x.group(1)) + ' ', text)
|
141 |
-
text = re.sub(r'\[KO\](.*?)\[KO\]',
|
142 |
-
lambda x: korean_to_lazy_ipa(x.group(1)) + ' ', text)
|
143 |
-
text = re.sub(r'\[SA\](.*?)\[SA\]',
|
144 |
-
lambda x: devanagari_to_ipa(x.group(1)) + ' ', text)
|
145 |
-
text = re.sub(r'\[EN\](.*?)\[EN\]',
|
146 |
-
lambda x: english_to_lazy_ipa(x.group(1)) + ' ', text)
|
147 |
-
text = re.sub(r'\s+$', '', text)
|
148 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
149 |
-
return text
|
150 |
-
|
151 |
-
|
152 |
-
def cjke_cleaners(text):
|
153 |
-
from vits.text.mandarin import chinese_to_lazy_ipa
|
154 |
-
from vits.text.japanese import japanese_to_ipa
|
155 |
-
from vits.text.korean import korean_to_ipa
|
156 |
-
from vits.text.english import english_to_ipa2
|
157 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]', lambda x: chinese_to_lazy_ipa(x.group(1)).replace(
|
158 |
-
'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn') + ' ', text)
|
159 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_ipa(x.group(1)).replace('ʧ', 'tʃ').replace(
|
160 |
-
'ʦ', 'ts').replace('ɥan', 'ɥæn').replace('ʥ', 'dz') + ' ', text)
|
161 |
-
text = re.sub(r'\[KO\](.*?)\[KO\]',
|
162 |
-
lambda x: korean_to_ipa(x.group(1)) + ' ', text)
|
163 |
-
text = re.sub(r'\[EN\](.*?)\[EN\]', lambda x: english_to_ipa2(x.group(1)).replace('ɑ', 'a').replace(
|
164 |
-
'ɔ', 'o').replace('ɛ', 'e').replace('ɪ', 'i').replace('ʊ', 'u') + ' ', text)
|
165 |
-
text = re.sub(r'\s+$', '', text)
|
166 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
167 |
-
return text
|
168 |
-
|
169 |
-
|
170 |
-
def cjke_cleaners2(text):
|
171 |
-
from vits.text.mandarin import chinese_to_ipa
|
172 |
-
from vits.text.japanese import japanese_to_ipa2
|
173 |
-
from vits.text.korean import korean_to_ipa
|
174 |
-
from vits.text.english import english_to_ipa2
|
175 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]',
|
176 |
-
lambda x: chinese_to_ipa(x.group(1)) + ' ', text)
|
177 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]',
|
178 |
-
lambda x: japanese_to_ipa2(x.group(1)) + ' ', text)
|
179 |
-
text = re.sub(r'\[KO\](.*?)\[KO\]',
|
180 |
-
lambda x: korean_to_ipa(x.group(1)) + ' ', text)
|
181 |
-
text = re.sub(r'\[EN\](.*?)\[EN\]',
|
182 |
-
lambda x: english_to_ipa2(x.group(1)) + ' ', text)
|
183 |
-
text = re.sub(r'\s+$', '', text)
|
184 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
185 |
-
return text
|
186 |
-
|
187 |
-
|
188 |
-
def cje_cleaners(text):
|
189 |
-
from vits.text.mandarin import chinese_to_lazy_ipa
|
190 |
-
from vits.text.japanese import japanese_to_ipa
|
191 |
-
from vits.text.english import english_to_ipa2
|
192 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]', lambda x: chinese_to_lazy_ipa(x.group(1)).replace(
|
193 |
-
'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn') + ' ', text)
|
194 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_ipa(x.group(1)).replace('ʧ', 'tʃ').replace(
|
195 |
-
'ʦ', 'ts').replace('ɥan', 'ɥæn').replace('ʥ', 'dz') + ' ', text)
|
196 |
-
text = re.sub(r'\[EN\](.*?)\[EN\]', lambda x: english_to_ipa2(x.group(1)).replace('ɑ', 'a').replace(
|
197 |
-
'ɔ', 'o').replace('ɛ', 'e').replace('ɪ', 'i').replace('ʊ', 'u') + ' ', text)
|
198 |
-
text = re.sub(r'\s+$', '', text)
|
199 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
200 |
-
return text
|
201 |
-
|
202 |
-
|
203 |
-
def cje_cleaners2(text):
|
204 |
-
from vits.text.mandarin import chinese_to_ipa
|
205 |
-
from vits.text.japanese import japanese_to_ipa2
|
206 |
-
from vits.text.english import english_to_ipa2
|
207 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]',
|
208 |
-
lambda x: chinese_to_ipa(x.group(1)) + ' ', text)
|
209 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]',
|
210 |
-
lambda x: japanese_to_ipa2(x.group(1)) + ' ', text)
|
211 |
-
text = re.sub(r'\[EN\](.*?)\[EN\]',
|
212 |
-
lambda x: english_to_ipa2(x.group(1)) + ' ', text)
|
213 |
-
text = re.sub(r'\s+$', '', text)
|
214 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
215 |
-
return text
|
216 |
-
|
217 |
-
|
218 |
-
def thai_cleaners(text):
|
219 |
-
from vits.text.thai import num_to_thai, latin_to_thai
|
220 |
-
|
221 |
-
def clean(text):
|
222 |
-
text = num_to_thai(text)
|
223 |
-
text = latin_to_thai(text)
|
224 |
-
return text
|
225 |
-
|
226 |
-
text = re.sub(r'\[TH\](.*?)\[TH\]', lambda x: clean(x.group(1)) + ' ', text)
|
227 |
-
return text
|
228 |
-
|
229 |
-
|
230 |
-
def shanghainese_cleaners(text):
|
231 |
-
from vits.text.shanghainese import shanghainese_to_ipa
|
232 |
-
|
233 |
-
def clean(text):
|
234 |
-
text = shanghainese_to_ipa(text)
|
235 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
236 |
-
return text
|
237 |
-
|
238 |
-
text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: clean(x.group(1)) + ' ', text)
|
239 |
-
return text
|
240 |
-
|
241 |
-
|
242 |
-
def chinese_dialect_cleaners(text):
|
243 |
-
from vits.text.mandarin import chinese_to_ipa2
|
244 |
-
from vits.text.japanese import japanese_to_ipa3
|
245 |
-
from vits.text.shanghainese import shanghainese_to_ipa
|
246 |
-
from vits.text.cantonese import cantonese_to_ipa
|
247 |
-
from vits.text.english import english_to_lazy_ipa2
|
248 |
-
from vits.text.ngu_dialect import ngu_dialect_to_ipa
|
249 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]',
|
250 |
-
lambda x: chinese_to_ipa2(x.group(1)) + ' ', text)
|
251 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]',
|
252 |
-
lambda x: japanese_to_ipa3(x.group(1)).replace('Q', 'ʔ') + ' ', text)
|
253 |
-
text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: shanghainese_to_ipa(x.group(1)).replace('1', '˥˧').replace('5',
|
254 |
-
'˧˧˦').replace(
|
255 |
-
'6', '˩˩˧').replace('7', '˥').replace('8', '˩˨').replace('ᴀ', 'ɐ').replace('ᴇ', 'e') + ' ', text)
|
256 |
-
text = re.sub(r'\[GD\](.*?)\[GD\]',
|
257 |
-
lambda x: cantonese_to_ipa(x.group(1)) + ' ', text)
|
258 |
-
text = re.sub(r'\[EN\](.*?)\[EN\]',
|
259 |
-
lambda x: english_to_lazy_ipa2(x.group(1)) + ' ', text)
|
260 |
-
text = re.sub(r'\[([A-Z]{2})\](.*?)\[\1\]', lambda x: ngu_dialect_to_ipa(x.group(2), x.group(
|
261 |
-
1)).replace('ʣ', 'dz').replace('ʥ', 'dʑ').replace('ʦ', 'ts').replace('ʨ', 'tɕ') + ' ', text)
|
262 |
-
text = re.sub(r'\s+$', '', text)
|
263 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
264 |
-
return text
|
265 |
-
|
266 |
-
|
267 |
-
def bert_chinese_cleaners(text):
|
268 |
-
from vits.text import mandarin
|
269 |
-
matches = re.findall(r"\[ZH\](.*?)\[ZH\]", text)
|
270 |
-
text = "".join(matches)
|
271 |
-
if text[-1] not in [".", "。", ",", ","]: text += "."
|
272 |
-
text = mandarin.symbols_to_chinese(text)
|
273 |
-
text = mandarin.number_transform_to_chinese(text)
|
274 |
-
if not hasattr(bert_chinese_cleaners, "tts_front"):
|
275 |
-
bert_chinese_cleaners.tts_front = mandarin.VITS_PinYin_model()
|
276 |
-
tts_front = bert_chinese_cleaners.tts_front
|
277 |
-
cleaned_text, char_embeds = tts_front.chinese_to_phonemes(text)
|
278 |
-
return cleaned_text, char_embeds
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/queue.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
import collections
|
2 |
-
|
3 |
-
from ..packages import six
|
4 |
-
from ..packages.six.moves import queue
|
5 |
-
|
6 |
-
if six.PY2:
|
7 |
-
# Queue is imported for side effects on MS Windows. See issue #229.
|
8 |
-
import Queue as _unused_module_Queue # noqa: F401
|
9 |
-
|
10 |
-
|
11 |
-
class LifoQueue(queue.Queue):
|
12 |
-
def _init(self, _):
|
13 |
-
self.queue = collections.deque()
|
14 |
-
|
15 |
-
def _qsize(self, len=len):
|
16 |
-
return len(self.queue)
|
17 |
-
|
18 |
-
def _put(self, item):
|
19 |
-
self.queue.append(item)
|
20 |
-
|
21 |
-
def _get(self):
|
22 |
-
return self.queue.pop()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AtlasUnified/DeforumPromptGenerator/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: DeforumPromptGenerator
|
3 |
-
emoji: 💻
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.28.3
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/transform.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
from torchvision.transforms import (
|
2 |
-
Normalize,
|
3 |
-
Compose,
|
4 |
-
RandomResizedCrop,
|
5 |
-
InterpolationMode,
|
6 |
-
ToTensor,
|
7 |
-
Resize,
|
8 |
-
CenterCrop,
|
9 |
-
)
|
10 |
-
|
11 |
-
|
12 |
-
def _convert_to_rgb(image):
|
13 |
-
return image.convert("RGB")
|
14 |
-
|
15 |
-
|
16 |
-
def image_transform(
|
17 |
-
image_size: int,
|
18 |
-
is_train: bool,
|
19 |
-
mean=(0.48145466, 0.4578275, 0.40821073),
|
20 |
-
std=(0.26862954, 0.26130258, 0.27577711),
|
21 |
-
):
|
22 |
-
normalize = Normalize(mean=mean, std=std)
|
23 |
-
if is_train:
|
24 |
-
return Compose(
|
25 |
-
[
|
26 |
-
RandomResizedCrop(
|
27 |
-
image_size,
|
28 |
-
scale=(0.9, 1.0),
|
29 |
-
interpolation=InterpolationMode.BICUBIC,
|
30 |
-
),
|
31 |
-
_convert_to_rgb,
|
32 |
-
ToTensor(),
|
33 |
-
normalize,
|
34 |
-
]
|
35 |
-
)
|
36 |
-
else:
|
37 |
-
return Compose(
|
38 |
-
[
|
39 |
-
Resize(image_size, interpolation=InterpolationMode.BICUBIC),
|
40 |
-
CenterCrop(image_size),
|
41 |
-
_convert_to_rgb,
|
42 |
-
ToTensor(),
|
43 |
-
normalize,
|
44 |
-
]
|
45 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audio-AGI/WavJourney/APIs.py
DELETED
@@ -1,202 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import numpy as np
|
3 |
-
import requests
|
4 |
-
import yaml
|
5 |
-
import pyloudnorm as pyln
|
6 |
-
from scipy.io.wavfile import write
|
7 |
-
import torchaudio
|
8 |
-
from retrying import retry
|
9 |
-
from utils import get_service_port, get_service_url
|
10 |
-
|
11 |
-
|
12 |
-
os.environ['OPENBLAS_NUM_THREADS'] = '1'
|
13 |
-
SAMPLE_RATE = 32000
|
14 |
-
|
15 |
-
|
16 |
-
with open('config.yaml', 'r') as file:
|
17 |
-
config = yaml.safe_load(file)
|
18 |
-
service_port = get_service_port()
|
19 |
-
localhost_addr = get_service_url()
|
20 |
-
enable_sr = config['Speech-Restoration']['Enable']
|
21 |
-
|
22 |
-
def LOUDNESS_NORM(audio, sr=32000, volumn=-25):
|
23 |
-
# peak normalize audio to -1 dB
|
24 |
-
peak_normalized_audio = pyln.normalize.peak(audio, -10.0)
|
25 |
-
# measure the loudness first
|
26 |
-
meter = pyln.Meter(sr) # create BS.1770 meter
|
27 |
-
loudness = meter.integrated_loudness(peak_normalized_audio)
|
28 |
-
# loudness normalize audio to -12 dB LUFS
|
29 |
-
normalized_audio = pyln.normalize.loudness(peak_normalized_audio, loudness, volumn)
|
30 |
-
return normalized_audio
|
31 |
-
|
32 |
-
|
33 |
-
def WRITE_AUDIO(wav, name=None, sr=SAMPLE_RATE):
|
34 |
-
"""
|
35 |
-
function: write audio numpy to .wav file
|
36 |
-
@params:
|
37 |
-
wav: np.array [samples]
|
38 |
-
"""
|
39 |
-
if name is None:
|
40 |
-
name = 'output.wav'
|
41 |
-
|
42 |
-
if len(wav.shape) > 1:
|
43 |
-
wav = wav[0]
|
44 |
-
|
45 |
-
# declipping
|
46 |
-
|
47 |
-
max_value = np.max(np.abs(wav))
|
48 |
-
if max_value > 1:
|
49 |
-
wav *= 0.9 / max_value
|
50 |
-
|
51 |
-
# write audio
|
52 |
-
write(name, sr, np.round(wav*32767).astype(np.int16))
|
53 |
-
|
54 |
-
|
55 |
-
def READ_AUDIO_NUMPY(wav, sr=SAMPLE_RATE):
|
56 |
-
"""
|
57 |
-
function: read audio numpy
|
58 |
-
return: np.array [samples]
|
59 |
-
"""
|
60 |
-
waveform, sample_rate = torchaudio.load(wav)
|
61 |
-
|
62 |
-
if sample_rate != sr:
|
63 |
-
waveform = torchaudio.functional.resample(waveform, orig_freq=sample_rate, new_freq=sr)
|
64 |
-
|
65 |
-
wav_numpy = waveform[0].numpy()
|
66 |
-
|
67 |
-
return wav_numpy
|
68 |
-
|
69 |
-
|
70 |
-
def MIX(wavs=[['1.wav', 0.], ['2.wav', 10.]], out_wav='out.wav', sr=SAMPLE_RATE):
|
71 |
-
"""
|
72 |
-
wavs:[[wav_name, absolute_offset], ...]
|
73 |
-
"""
|
74 |
-
|
75 |
-
max_length = max([int(wav[1]*sr + len(READ_AUDIO_NUMPY(wav[0]))) for wav in wavs])
|
76 |
-
template_wav = np.zeros(max_length)
|
77 |
-
|
78 |
-
for wav in wavs:
|
79 |
-
cur_name, cur_offset = wav
|
80 |
-
cur_wav = READ_AUDIO_NUMPY(cur_name)
|
81 |
-
cur_len = len(cur_wav)
|
82 |
-
cur_offset = int(cur_offset * sr)
|
83 |
-
|
84 |
-
# mix
|
85 |
-
template_wav[cur_offset:cur_offset+cur_len] += cur_wav
|
86 |
-
|
87 |
-
WRITE_AUDIO(template_wav, name=out_wav)
|
88 |
-
|
89 |
-
|
90 |
-
def CAT(wavs, out_wav='out.wav'):
|
91 |
-
"""
|
92 |
-
wavs: List of wav file ['1.wav', '2.wav', ...]
|
93 |
-
"""
|
94 |
-
wav_num = len(wavs)
|
95 |
-
|
96 |
-
segment0 = READ_AUDIO_NUMPY(wavs[0])
|
97 |
-
|
98 |
-
cat_wav = segment0
|
99 |
-
|
100 |
-
if wav_num > 1:
|
101 |
-
for i in range(1, wav_num):
|
102 |
-
next_wav = READ_AUDIO_NUMPY(wavs[i])
|
103 |
-
cat_wav = np.concatenate((cat_wav, next_wav), axis=-1)
|
104 |
-
|
105 |
-
WRITE_AUDIO(cat_wav, name=out_wav)
|
106 |
-
|
107 |
-
|
108 |
-
def COMPUTE_LEN(wav):
|
109 |
-
wav= READ_AUDIO_NUMPY(wav)
|
110 |
-
return len(wav) / 32000
|
111 |
-
|
112 |
-
|
113 |
-
@retry(stop_max_attempt_number=5, wait_fixed=2000)
|
114 |
-
def TTM(text, length=10, volume=-28, out_wav='out.wav'):
|
115 |
-
url = f'http://{localhost_addr}:{service_port}/generate_music'
|
116 |
-
data = {
|
117 |
-
'text': f'{text}',
|
118 |
-
'length': f'{length}',
|
119 |
-
'volume': f'{volume}',
|
120 |
-
'output_wav': f'{out_wav}',
|
121 |
-
}
|
122 |
-
|
123 |
-
response = requests.post(url, json=data)
|
124 |
-
|
125 |
-
if response.status_code == 200:
|
126 |
-
print('Success:', response.json()['message'])
|
127 |
-
else:
|
128 |
-
print('Error:', response.json()['API error'])
|
129 |
-
raise RuntimeError(response.json()['API error'])
|
130 |
-
|
131 |
-
@retry(stop_max_attempt_number=5, wait_fixed=2000)
|
132 |
-
def TTA(text, length=5, volume=-35, out_wav='out.wav'):
|
133 |
-
url = f'http://{localhost_addr}:{service_port}/generate_audio'
|
134 |
-
data = {
|
135 |
-
'text': f'{text}',
|
136 |
-
'length': f'{length}',
|
137 |
-
'volume': f'{volume}',
|
138 |
-
'output_wav': f'{out_wav}',
|
139 |
-
}
|
140 |
-
|
141 |
-
response = requests.post(url, json=data)
|
142 |
-
|
143 |
-
if response.status_code == 200:
|
144 |
-
print('Success:', response.json()['message'])
|
145 |
-
else:
|
146 |
-
print('Error:', response.json()['API error'])
|
147 |
-
raise RuntimeError(response.json()['API error'])
|
148 |
-
|
149 |
-
|
150 |
-
@retry(stop_max_attempt_number=5, wait_fixed=2000)
|
151 |
-
def TTS(text, volume=-20, out_wav='out.wav', enhanced=enable_sr, speaker_id='', speaker_npz=''):
|
152 |
-
url = f'http://{localhost_addr}:{service_port}/generate_speech'
|
153 |
-
data = {
|
154 |
-
'text': f'{text}',
|
155 |
-
'speaker_id': f'{speaker_id}',
|
156 |
-
'speaker_npz': f'{speaker_npz}',
|
157 |
-
'volume': f'{volume}',
|
158 |
-
'output_wav': f'{out_wav}',
|
159 |
-
}
|
160 |
-
|
161 |
-
response = requests.post(url, json=data)
|
162 |
-
|
163 |
-
if response.status_code == 200:
|
164 |
-
print('Success:', response.json()['message'])
|
165 |
-
else:
|
166 |
-
print('Error:', response.json()['API error'])
|
167 |
-
raise RuntimeError(response.json()['API error'])
|
168 |
-
|
169 |
-
if enhanced:
|
170 |
-
SR(processfile=out_wav)
|
171 |
-
|
172 |
-
|
173 |
-
@retry(stop_max_attempt_number=5, wait_fixed=2000)
|
174 |
-
def SR(processfile):
|
175 |
-
url = f'http://{localhost_addr}:{service_port}/fix_audio'
|
176 |
-
data = {'processfile': f'{processfile}'}
|
177 |
-
|
178 |
-
response = requests.post(url, json=data)
|
179 |
-
|
180 |
-
if response.status_code == 200:
|
181 |
-
print('Success:', response.json()['message'])
|
182 |
-
else:
|
183 |
-
print('Error:', response.json()['API error'])
|
184 |
-
raise RuntimeError(response.json()['API error'])
|
185 |
-
|
186 |
-
|
187 |
-
@retry(stop_max_attempt_number=5, wait_fixed=2000)
|
188 |
-
def VP(wav_path, out_dir):
|
189 |
-
url = f'http://{localhost_addr}:{service_port}/parse_voice'
|
190 |
-
data = {
|
191 |
-
'wav_path': f'{wav_path}',
|
192 |
-
'out_dir':f'{out_dir}'
|
193 |
-
}
|
194 |
-
|
195 |
-
response = requests.post(url, json=data)
|
196 |
-
|
197 |
-
if response.status_code == 200:
|
198 |
-
print('Success:', response.json()['message'])
|
199 |
-
else:
|
200 |
-
print('Error:', response.json()['API error'])
|
201 |
-
raise RuntimeError(response.json()['API error'])
|
202 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Averyng/averyng/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("huggingface/EleutherAI/gpt-neo-2.7B",title="averyng",description="Input name and submit").launch()
|
|
|
|
|
|
|
|
spaces/BAAI/vid2vid-zero/vid2vid_zero/models/attention_2d.py
DELETED
@@ -1,434 +0,0 @@
|
|
1 |
-
# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py
|
2 |
-
|
3 |
-
from dataclasses import dataclass
|
4 |
-
from typing import Optional
|
5 |
-
|
6 |
-
import torch
|
7 |
-
import torch.nn.functional as F
|
8 |
-
from torch import nn
|
9 |
-
|
10 |
-
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
11 |
-
from diffusers.modeling_utils import ModelMixin
|
12 |
-
from diffusers.utils import BaseOutput
|
13 |
-
from diffusers.utils.import_utils import is_xformers_available
|
14 |
-
from diffusers.models.attention import CrossAttention, FeedForward, AdaLayerNorm
|
15 |
-
|
16 |
-
from einops import rearrange, repeat
|
17 |
-
|
18 |
-
|
19 |
-
@dataclass
|
20 |
-
class Transformer2DModelOutput(BaseOutput):
|
21 |
-
sample: torch.FloatTensor
|
22 |
-
|
23 |
-
|
24 |
-
if is_xformers_available():
|
25 |
-
import xformers
|
26 |
-
import xformers.ops
|
27 |
-
else:
|
28 |
-
xformers = None
|
29 |
-
|
30 |
-
|
31 |
-
class Transformer2DModel(ModelMixin, ConfigMixin):
|
32 |
-
@register_to_config
|
33 |
-
def __init__(
|
34 |
-
self,
|
35 |
-
num_attention_heads: int = 16,
|
36 |
-
attention_head_dim: int = 88,
|
37 |
-
in_channels: Optional[int] = None,
|
38 |
-
num_layers: int = 1,
|
39 |
-
dropout: float = 0.0,
|
40 |
-
norm_num_groups: int = 32,
|
41 |
-
cross_attention_dim: Optional[int] = None,
|
42 |
-
attention_bias: bool = False,
|
43 |
-
sample_size: Optional[int] = None,
|
44 |
-
num_vector_embeds: Optional[int] = None,
|
45 |
-
activation_fn: str = "geglu",
|
46 |
-
num_embeds_ada_norm: Optional[int] = None,
|
47 |
-
use_linear_projection: bool = False,
|
48 |
-
only_cross_attention: bool = False,
|
49 |
-
upcast_attention: bool = False,
|
50 |
-
use_sc_attn: bool = False,
|
51 |
-
use_st_attn: bool = False,
|
52 |
-
):
|
53 |
-
super().__init__()
|
54 |
-
self.use_linear_projection = use_linear_projection
|
55 |
-
self.num_attention_heads = num_attention_heads
|
56 |
-
self.attention_head_dim = attention_head_dim
|
57 |
-
inner_dim = num_attention_heads * attention_head_dim
|
58 |
-
|
59 |
-
# 1. Transformer2DModel can process both standard continous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)`
|
60 |
-
# Define whether input is continuous or discrete depending on configuration
|
61 |
-
self.is_input_continuous = in_channels is not None
|
62 |
-
self.is_input_vectorized = num_vector_embeds is not None
|
63 |
-
|
64 |
-
if self.is_input_continuous and self.is_input_vectorized:
|
65 |
-
raise ValueError(
|
66 |
-
f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make"
|
67 |
-
" sure that either `in_channels` or `num_vector_embeds` is None."
|
68 |
-
)
|
69 |
-
elif not self.is_input_continuous and not self.is_input_vectorized:
|
70 |
-
raise ValueError(
|
71 |
-
f"Has to define either `in_channels`: {in_channels} or `num_vector_embeds`: {num_vector_embeds}. Make"
|
72 |
-
" sure that either `in_channels` or `num_vector_embeds` is not None."
|
73 |
-
)
|
74 |
-
|
75 |
-
# 2. Define input layers
|
76 |
-
if self.is_input_continuous:
|
77 |
-
self.in_channels = in_channels
|
78 |
-
|
79 |
-
self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)
|
80 |
-
if use_linear_projection:
|
81 |
-
self.proj_in = nn.Linear(in_channels, inner_dim)
|
82 |
-
else:
|
83 |
-
self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
|
84 |
-
else:
|
85 |
-
raise NotImplementedError
|
86 |
-
|
87 |
-
# Define transformers blocks
|
88 |
-
self.transformer_blocks = nn.ModuleList(
|
89 |
-
[
|
90 |
-
BasicTransformerBlock(
|
91 |
-
inner_dim,
|
92 |
-
num_attention_heads,
|
93 |
-
attention_head_dim,
|
94 |
-
dropout=dropout,
|
95 |
-
cross_attention_dim=cross_attention_dim,
|
96 |
-
activation_fn=activation_fn,
|
97 |
-
num_embeds_ada_norm=num_embeds_ada_norm,
|
98 |
-
attention_bias=attention_bias,
|
99 |
-
only_cross_attention=only_cross_attention,
|
100 |
-
upcast_attention=upcast_attention,
|
101 |
-
use_sc_attn=use_sc_attn,
|
102 |
-
use_st_attn=True if (d == 0 and use_st_attn) else False ,
|
103 |
-
)
|
104 |
-
for d in range(num_layers)
|
105 |
-
]
|
106 |
-
)
|
107 |
-
|
108 |
-
# 4. Define output layers
|
109 |
-
if use_linear_projection:
|
110 |
-
self.proj_out = nn.Linear(in_channels, inner_dim)
|
111 |
-
else:
|
112 |
-
self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
|
113 |
-
|
114 |
-
def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, return_dict: bool = True, normal_infer: bool = False):
|
115 |
-
# Input
|
116 |
-
assert hidden_states.dim() == 5, f"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}."
|
117 |
-
video_length = hidden_states.shape[2]
|
118 |
-
hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
|
119 |
-
encoder_hidden_states = repeat(encoder_hidden_states, 'b n c -> (b f) n c', f=video_length)
|
120 |
-
|
121 |
-
batch, channel, height, weight = hidden_states.shape
|
122 |
-
residual = hidden_states
|
123 |
-
|
124 |
-
hidden_states = self.norm(hidden_states)
|
125 |
-
if not self.use_linear_projection:
|
126 |
-
hidden_states = self.proj_in(hidden_states)
|
127 |
-
inner_dim = hidden_states.shape[1]
|
128 |
-
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
|
129 |
-
else:
|
130 |
-
inner_dim = hidden_states.shape[1]
|
131 |
-
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
|
132 |
-
hidden_states = self.proj_in(hidden_states)
|
133 |
-
|
134 |
-
# Blocks
|
135 |
-
for block in self.transformer_blocks:
|
136 |
-
hidden_states = block(
|
137 |
-
hidden_states,
|
138 |
-
encoder_hidden_states=encoder_hidden_states,
|
139 |
-
timestep=timestep,
|
140 |
-
video_length=video_length,
|
141 |
-
normal_infer=normal_infer,
|
142 |
-
)
|
143 |
-
|
144 |
-
# Output
|
145 |
-
if not self.use_linear_projection:
|
146 |
-
hidden_states = (
|
147 |
-
hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
|
148 |
-
)
|
149 |
-
hidden_states = self.proj_out(hidden_states)
|
150 |
-
else:
|
151 |
-
hidden_states = self.proj_out(hidden_states)
|
152 |
-
hidden_states = (
|
153 |
-
hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
|
154 |
-
)
|
155 |
-
|
156 |
-
output = hidden_states + residual
|
157 |
-
|
158 |
-
output = rearrange(output, "(b f) c h w -> b c f h w", f=video_length)
|
159 |
-
if not return_dict:
|
160 |
-
return (output,)
|
161 |
-
|
162 |
-
return Transformer2DModelOutput(sample=output)
|
163 |
-
|
164 |
-
|
165 |
-
class BasicTransformerBlock(nn.Module):
|
166 |
-
def __init__(
|
167 |
-
self,
|
168 |
-
dim: int,
|
169 |
-
num_attention_heads: int,
|
170 |
-
attention_head_dim: int,
|
171 |
-
dropout=0.0,
|
172 |
-
cross_attention_dim: Optional[int] = None,
|
173 |
-
activation_fn: str = "geglu",
|
174 |
-
num_embeds_ada_norm: Optional[int] = None,
|
175 |
-
attention_bias: bool = False,
|
176 |
-
only_cross_attention: bool = False,
|
177 |
-
upcast_attention: bool = False,
|
178 |
-
use_sc_attn: bool = False,
|
179 |
-
use_st_attn: bool = False,
|
180 |
-
):
|
181 |
-
super().__init__()
|
182 |
-
self.only_cross_attention = only_cross_attention
|
183 |
-
self.use_ada_layer_norm = num_embeds_ada_norm is not None
|
184 |
-
|
185 |
-
# Attn with temporal modeling
|
186 |
-
self.use_sc_attn = use_sc_attn
|
187 |
-
self.use_st_attn = use_st_attn
|
188 |
-
|
189 |
-
attn_type = SparseCausalAttention if self.use_sc_attn else CrossAttention
|
190 |
-
attn_type = SpatialTemporalAttention if self.use_st_attn else attn_type
|
191 |
-
self.attn1 = attn_type(
|
192 |
-
query_dim=dim,
|
193 |
-
heads=num_attention_heads,
|
194 |
-
dim_head=attention_head_dim,
|
195 |
-
dropout=dropout,
|
196 |
-
bias=attention_bias,
|
197 |
-
cross_attention_dim=cross_attention_dim if only_cross_attention else None,
|
198 |
-
upcast_attention=upcast_attention,
|
199 |
-
) # is a self-attention
|
200 |
-
self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn)
|
201 |
-
|
202 |
-
# Cross-Attn
|
203 |
-
if cross_attention_dim is not None:
|
204 |
-
self.attn2 = CrossAttention(
|
205 |
-
query_dim=dim,
|
206 |
-
cross_attention_dim=cross_attention_dim,
|
207 |
-
heads=num_attention_heads,
|
208 |
-
dim_head=attention_head_dim,
|
209 |
-
dropout=dropout,
|
210 |
-
bias=attention_bias,
|
211 |
-
upcast_attention=upcast_attention,
|
212 |
-
) # is self-attn if encoder_hidden_states is none
|
213 |
-
else:
|
214 |
-
self.attn2 = None
|
215 |
-
|
216 |
-
self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim)
|
217 |
-
|
218 |
-
if cross_attention_dim is not None:
|
219 |
-
self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim)
|
220 |
-
else:
|
221 |
-
self.norm2 = None
|
222 |
-
|
223 |
-
# 3. Feed-forward
|
224 |
-
self.norm3 = nn.LayerNorm(dim)
|
225 |
-
|
226 |
-
def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_attention_xformers: bool):
|
227 |
-
if not is_xformers_available():
|
228 |
-
print("Here is how to install it")
|
229 |
-
raise ModuleNotFoundError(
|
230 |
-
"Refer to https://github.com/facebookresearch/xformers for more information on how to install"
|
231 |
-
" xformers",
|
232 |
-
name="xformers",
|
233 |
-
)
|
234 |
-
elif not torch.cuda.is_available():
|
235 |
-
raise ValueError(
|
236 |
-
"torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is only"
|
237 |
-
" available for GPU "
|
238 |
-
)
|
239 |
-
else:
|
240 |
-
try:
|
241 |
-
# Make sure we can run the memory efficient attention
|
242 |
-
_ = xformers.ops.memory_efficient_attention(
|
243 |
-
torch.randn((1, 2, 40), device="cuda"),
|
244 |
-
torch.randn((1, 2, 40), device="cuda"),
|
245 |
-
torch.randn((1, 2, 40), device="cuda"),
|
246 |
-
)
|
247 |
-
except Exception as e:
|
248 |
-
raise e
|
249 |
-
self.attn1._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers
|
250 |
-
if self.attn2 is not None:
|
251 |
-
self.attn2._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers
|
252 |
-
# self.attn_temp._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers
|
253 |
-
|
254 |
-
def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, attention_mask=None, video_length=None, normal_infer=False):
|
255 |
-
# SparseCausal-Attention
|
256 |
-
norm_hidden_states = (
|
257 |
-
self.norm1(hidden_states, timestep) if self.use_ada_layer_norm else self.norm1(hidden_states)
|
258 |
-
)
|
259 |
-
|
260 |
-
if self.only_cross_attention:
|
261 |
-
hidden_states = (
|
262 |
-
self.attn1(norm_hidden_states, encoder_hidden_states, attention_mask=attention_mask) + hidden_states
|
263 |
-
)
|
264 |
-
else:
|
265 |
-
if self.use_sc_attn or self.use_st_attn:
|
266 |
-
hidden_states = self.attn1(
|
267 |
-
norm_hidden_states, attention_mask=attention_mask, video_length=video_length, normal_infer=normal_infer,
|
268 |
-
) + hidden_states
|
269 |
-
else:
|
270 |
-
# shape of hidden_states: (b*f, len, dim)
|
271 |
-
hidden_states = self.attn1(norm_hidden_states, attention_mask=attention_mask) + hidden_states
|
272 |
-
|
273 |
-
if self.attn2 is not None:
|
274 |
-
# Cross-Attention
|
275 |
-
norm_hidden_states = (
|
276 |
-
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
|
277 |
-
)
|
278 |
-
hidden_states = (
|
279 |
-
self.attn2(
|
280 |
-
norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask
|
281 |
-
)
|
282 |
-
+ hidden_states
|
283 |
-
)
|
284 |
-
|
285 |
-
# Feed-forward
|
286 |
-
hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states
|
287 |
-
|
288 |
-
return hidden_states
|
289 |
-
|
290 |
-
|
291 |
-
class SparseCausalAttention(CrossAttention):
|
292 |
-
def forward_sc_attn(self, hidden_states, encoder_hidden_states=None, attention_mask=None, video_length=None):
|
293 |
-
batch_size, sequence_length, _ = hidden_states.shape
|
294 |
-
|
295 |
-
encoder_hidden_states = encoder_hidden_states
|
296 |
-
|
297 |
-
if self.group_norm is not None:
|
298 |
-
hidden_states = self.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
299 |
-
|
300 |
-
query = self.to_q(hidden_states)
|
301 |
-
dim = query.shape[-1]
|
302 |
-
query = self.reshape_heads_to_batch_dim(query)
|
303 |
-
|
304 |
-
if self.added_kv_proj_dim is not None:
|
305 |
-
raise NotImplementedError
|
306 |
-
|
307 |
-
encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
|
308 |
-
key = self.to_k(encoder_hidden_states)
|
309 |
-
value = self.to_v(encoder_hidden_states)
|
310 |
-
|
311 |
-
former_frame_index = torch.arange(video_length) - 1
|
312 |
-
former_frame_index[0] = 0
|
313 |
-
|
314 |
-
key = rearrange(key, "(b f) d c -> b f d c", f=video_length)
|
315 |
-
key = torch.cat([key[:, [0] * video_length], key[:, former_frame_index]], dim=2)
|
316 |
-
key = rearrange(key, "b f d c -> (b f) d c")
|
317 |
-
|
318 |
-
value = rearrange(value, "(b f) d c -> b f d c", f=video_length)
|
319 |
-
value = torch.cat([value[:, [0] * video_length], value[:, former_frame_index]], dim=2)
|
320 |
-
value = rearrange(value, "b f d c -> (b f) d c")
|
321 |
-
|
322 |
-
key = self.reshape_heads_to_batch_dim(key)
|
323 |
-
value = self.reshape_heads_to_batch_dim(value)
|
324 |
-
|
325 |
-
if attention_mask is not None:
|
326 |
-
if attention_mask.shape[-1] != query.shape[1]:
|
327 |
-
target_length = query.shape[1]
|
328 |
-
attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)
|
329 |
-
attention_mask = attention_mask.repeat_interleave(self.heads, dim=0)
|
330 |
-
|
331 |
-
# attention, what we cannot get enough of
|
332 |
-
if self._use_memory_efficient_attention_xformers:
|
333 |
-
hidden_states = self._memory_efficient_attention_xformers(query, key, value, attention_mask)
|
334 |
-
# Some versions of xformers return output in fp32, cast it back to the dtype of the input
|
335 |
-
hidden_states = hidden_states.to(query.dtype)
|
336 |
-
else:
|
337 |
-
if self._slice_size is None or query.shape[0] // self._slice_size == 1:
|
338 |
-
hidden_states = self._attention(query, key, value, attention_mask)
|
339 |
-
else:
|
340 |
-
hidden_states = self._sliced_attention(query, key, value, sequence_length, dim, attention_mask)
|
341 |
-
|
342 |
-
# linear proj
|
343 |
-
hidden_states = self.to_out[0](hidden_states)
|
344 |
-
|
345 |
-
# dropout
|
346 |
-
hidden_states = self.to_out[1](hidden_states)
|
347 |
-
return hidden_states
|
348 |
-
|
349 |
-
def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None, video_length=None, normal_infer=False):
|
350 |
-
if normal_infer:
|
351 |
-
return super().forward(
|
352 |
-
hidden_states=hidden_states,
|
353 |
-
encoder_hidden_states=encoder_hidden_states,
|
354 |
-
attention_mask=attention_mask,
|
355 |
-
# video_length=video_length,
|
356 |
-
)
|
357 |
-
else:
|
358 |
-
return self.forward_sc_attn(
|
359 |
-
hidden_states=hidden_states,
|
360 |
-
encoder_hidden_states=encoder_hidden_states,
|
361 |
-
attention_mask=attention_mask,
|
362 |
-
video_length=video_length,
|
363 |
-
)
|
364 |
-
|
365 |
-
class SpatialTemporalAttention(CrossAttention):
|
366 |
-
def forward_dense_attn(self, hidden_states, encoder_hidden_states=None, attention_mask=None, video_length=None):
|
367 |
-
batch_size, sequence_length, _ = hidden_states.shape
|
368 |
-
|
369 |
-
encoder_hidden_states = encoder_hidden_states
|
370 |
-
|
371 |
-
if self.group_norm is not None:
|
372 |
-
hidden_states = self.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
373 |
-
|
374 |
-
query = self.to_q(hidden_states)
|
375 |
-
dim = query.shape[-1]
|
376 |
-
query = self.reshape_heads_to_batch_dim(query)
|
377 |
-
|
378 |
-
if self.added_kv_proj_dim is not None:
|
379 |
-
raise NotImplementedError
|
380 |
-
|
381 |
-
encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
|
382 |
-
key = self.to_k(encoder_hidden_states)
|
383 |
-
value = self.to_v(encoder_hidden_states)
|
384 |
-
|
385 |
-
key = rearrange(key, "(b f) n d -> b f n d", f=video_length)
|
386 |
-
key = key.unsqueeze(1).repeat(1, video_length, 1, 1, 1) # (b f f n d)
|
387 |
-
key = rearrange(key, "b f g n d -> (b f) (g n) d")
|
388 |
-
|
389 |
-
value = rearrange(value, "(b f) n d -> b f n d", f=video_length)
|
390 |
-
value = value.unsqueeze(1).repeat(1, video_length, 1, 1, 1) # (b f f n d)
|
391 |
-
value = rearrange(value, "b f g n d -> (b f) (g n) d")
|
392 |
-
|
393 |
-
key = self.reshape_heads_to_batch_dim(key)
|
394 |
-
value = self.reshape_heads_to_batch_dim(value)
|
395 |
-
|
396 |
-
if attention_mask is not None:
|
397 |
-
if attention_mask.shape[-1] != query.shape[1]:
|
398 |
-
target_length = query.shape[1]
|
399 |
-
attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)
|
400 |
-
attention_mask = attention_mask.repeat_interleave(self.heads, dim=0)
|
401 |
-
|
402 |
-
# attention, what we cannot get enough of
|
403 |
-
if self._use_memory_efficient_attention_xformers:
|
404 |
-
hidden_states = self._memory_efficient_attention_xformers(query, key, value, attention_mask)
|
405 |
-
# Some versions of xformers return output in fp32, cast it back to the dtype of the input
|
406 |
-
hidden_states = hidden_states.to(query.dtype)
|
407 |
-
else:
|
408 |
-
if self._slice_size is None or query.shape[0] // self._slice_size == 1:
|
409 |
-
hidden_states = self._attention(query, key, value, attention_mask)
|
410 |
-
else:
|
411 |
-
hidden_states = self._sliced_attention(query, key, value, sequence_length, dim, attention_mask)
|
412 |
-
|
413 |
-
# linear proj
|
414 |
-
hidden_states = self.to_out[0](hidden_states)
|
415 |
-
|
416 |
-
# dropout
|
417 |
-
hidden_states = self.to_out[1](hidden_states)
|
418 |
-
return hidden_states
|
419 |
-
|
420 |
-
def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None, video_length=None, normal_infer=False):
|
421 |
-
if normal_infer:
|
422 |
-
return super().forward(
|
423 |
-
hidden_states=hidden_states,
|
424 |
-
encoder_hidden_states=encoder_hidden_states,
|
425 |
-
attention_mask=attention_mask,
|
426 |
-
# video_length=video_length,
|
427 |
-
)
|
428 |
-
else:
|
429 |
-
return self.forward_dense_attn(
|
430 |
-
hidden_states=hidden_states,
|
431 |
-
encoder_hidden_states=encoder_hidden_states,
|
432 |
-
attention_mask=attention_mask,
|
433 |
-
video_length=video_length,
|
434 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/tools/rvc_for_realtime.py
DELETED
@@ -1,381 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import sys
|
3 |
-
import traceback
|
4 |
-
import logging
|
5 |
-
|
6 |
-
logger = logging.getLogger(__name__)
|
7 |
-
|
8 |
-
from time import time as ttime
|
9 |
-
|
10 |
-
import fairseq
|
11 |
-
import faiss
|
12 |
-
import numpy as np
|
13 |
-
import parselmouth
|
14 |
-
import pyworld
|
15 |
-
import scipy.signal as signal
|
16 |
-
import torch
|
17 |
-
import torch.nn as nn
|
18 |
-
import torch.nn.functional as F
|
19 |
-
import torchcrepe
|
20 |
-
|
21 |
-
from infer.lib.infer_pack.models import (
|
22 |
-
SynthesizerTrnMs256NSFsid,
|
23 |
-
SynthesizerTrnMs256NSFsid_nono,
|
24 |
-
SynthesizerTrnMs768NSFsid,
|
25 |
-
SynthesizerTrnMs768NSFsid_nono,
|
26 |
-
)
|
27 |
-
|
28 |
-
now_dir = os.getcwd()
|
29 |
-
sys.path.append(now_dir)
|
30 |
-
from multiprocessing import Manager as M
|
31 |
-
|
32 |
-
from configs.config import Config
|
33 |
-
|
34 |
-
config = Config()
|
35 |
-
|
36 |
-
mm = M()
|
37 |
-
if config.dml == True:
|
38 |
-
|
39 |
-
def forward_dml(ctx, x, scale):
|
40 |
-
ctx.scale = scale
|
41 |
-
res = x.clone().detach()
|
42 |
-
return res
|
43 |
-
|
44 |
-
fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml
|
45 |
-
|
46 |
-
|
47 |
-
# config.device=torch.device("cpu")########强制cpu测试
|
48 |
-
# config.is_half=False########强制cpu测试
|
49 |
-
class RVC:
|
50 |
-
def __init__(
|
51 |
-
self,
|
52 |
-
key,
|
53 |
-
pth_path,
|
54 |
-
index_path,
|
55 |
-
index_rate,
|
56 |
-
n_cpu,
|
57 |
-
inp_q,
|
58 |
-
opt_q,
|
59 |
-
device,
|
60 |
-
last_rvc=None,
|
61 |
-
) -> None:
|
62 |
-
"""
|
63 |
-
初始化
|
64 |
-
"""
|
65 |
-
try:
|
66 |
-
global config
|
67 |
-
self.inp_q = inp_q
|
68 |
-
self.opt_q = opt_q
|
69 |
-
# device="cpu"########强制cpu测试
|
70 |
-
self.device = device
|
71 |
-
self.f0_up_key = key
|
72 |
-
self.time_step = 160 / 16000 * 1000
|
73 |
-
self.f0_min = 50
|
74 |
-
self.f0_max = 1100
|
75 |
-
self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
|
76 |
-
self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
|
77 |
-
self.sr = 16000
|
78 |
-
self.window = 160
|
79 |
-
self.n_cpu = n_cpu
|
80 |
-
if index_rate != 0:
|
81 |
-
self.index = faiss.read_index(index_path)
|
82 |
-
self.big_npy = self.index.reconstruct_n(0, self.index.ntotal)
|
83 |
-
logger.info("Index search enabled")
|
84 |
-
self.pth_path = pth_path
|
85 |
-
self.index_path = index_path
|
86 |
-
self.index_rate = index_rate
|
87 |
-
|
88 |
-
if last_rvc is None:
|
89 |
-
models, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
|
90 |
-
["assets/hubert/hubert_base.pt"],
|
91 |
-
suffix="",
|
92 |
-
)
|
93 |
-
hubert_model = models[0]
|
94 |
-
hubert_model = hubert_model.to(device)
|
95 |
-
if config.is_half:
|
96 |
-
hubert_model = hubert_model.half()
|
97 |
-
else:
|
98 |
-
hubert_model = hubert_model.float()
|
99 |
-
hubert_model.eval()
|
100 |
-
self.model = hubert_model
|
101 |
-
else:
|
102 |
-
self.model = last_rvc.model
|
103 |
-
|
104 |
-
if last_rvc is None or last_rvc.pth_path != self.pth_path:
|
105 |
-
cpt = torch.load(self.pth_path, map_location="cpu")
|
106 |
-
self.tgt_sr = cpt["config"][-1]
|
107 |
-
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
|
108 |
-
self.if_f0 = cpt.get("f0", 1)
|
109 |
-
self.version = cpt.get("version", "v1")
|
110 |
-
if self.version == "v1":
|
111 |
-
if self.if_f0 == 1:
|
112 |
-
self.net_g = SynthesizerTrnMs256NSFsid(
|
113 |
-
*cpt["config"], is_half=config.is_half
|
114 |
-
)
|
115 |
-
else:
|
116 |
-
self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
|
117 |
-
elif self.version == "v2":
|
118 |
-
if self.if_f0 == 1:
|
119 |
-
self.net_g = SynthesizerTrnMs768NSFsid(
|
120 |
-
*cpt["config"], is_half=config.is_half
|
121 |
-
)
|
122 |
-
else:
|
123 |
-
self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
|
124 |
-
del self.net_g.enc_q
|
125 |
-
logger.debug(self.net_g.load_state_dict(cpt["weight"], strict=False))
|
126 |
-
self.net_g.eval().to(device)
|
127 |
-
# print(2333333333,device,config.device,self.device)#net_g是device,hubert是config.device
|
128 |
-
if config.is_half:
|
129 |
-
self.net_g = self.net_g.half()
|
130 |
-
else:
|
131 |
-
self.net_g = self.net_g.float()
|
132 |
-
self.is_half = config.is_half
|
133 |
-
else:
|
134 |
-
self.tgt_sr = last_rvc.tgt_sr
|
135 |
-
self.if_f0 = last_rvc.if_f0
|
136 |
-
self.version = last_rvc.version
|
137 |
-
self.net_g = last_rvc.net_g
|
138 |
-
self.is_half = last_rvc.is_half
|
139 |
-
|
140 |
-
if last_rvc is not None and hasattr(last_rvc, "model_rmvpe"):
|
141 |
-
self.model_rmvpe = last_rvc.model_rmvpe
|
142 |
-
except:
|
143 |
-
logger.warn(traceback.format_exc())
|
144 |
-
|
145 |
-
def change_key(self, new_key):
|
146 |
-
self.f0_up_key = new_key
|
147 |
-
|
148 |
-
def change_index_rate(self, new_index_rate):
|
149 |
-
if new_index_rate != 0 and self.index_rate == 0:
|
150 |
-
self.index = faiss.read_index(self.index_path)
|
151 |
-
self.big_npy = self.index.reconstruct_n(0, self.index.ntotal)
|
152 |
-
logger.info("Index search enabled")
|
153 |
-
self.index_rate = new_index_rate
|
154 |
-
|
155 |
-
def get_f0_post(self, f0):
|
156 |
-
f0_min = self.f0_min
|
157 |
-
f0_max = self.f0_max
|
158 |
-
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
159 |
-
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
160 |
-
f0bak = f0.copy()
|
161 |
-
f0_mel = 1127 * np.log(1 + f0 / 700)
|
162 |
-
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
|
163 |
-
f0_mel_max - f0_mel_min
|
164 |
-
) + 1
|
165 |
-
f0_mel[f0_mel <= 1] = 1
|
166 |
-
f0_mel[f0_mel > 255] = 255
|
167 |
-
f0_coarse = np.rint(f0_mel).astype(np.int32)
|
168 |
-
return f0_coarse, f0bak
|
169 |
-
|
170 |
-
def get_f0(self, x, f0_up_key, n_cpu, method="harvest"):
|
171 |
-
n_cpu = int(n_cpu)
|
172 |
-
if method == "crepe":
|
173 |
-
return self.get_f0_crepe(x, f0_up_key)
|
174 |
-
if method == "rmvpe":
|
175 |
-
return self.get_f0_rmvpe(x, f0_up_key)
|
176 |
-
if method == "pm":
|
177 |
-
p_len = x.shape[0] // 160 + 1
|
178 |
-
f0 = (
|
179 |
-
parselmouth.Sound(x, 16000)
|
180 |
-
.to_pitch_ac(
|
181 |
-
time_step=0.01,
|
182 |
-
voicing_threshold=0.6,
|
183 |
-
pitch_floor=50,
|
184 |
-
pitch_ceiling=1100,
|
185 |
-
)
|
186 |
-
.selected_array["frequency"]
|
187 |
-
)
|
188 |
-
|
189 |
-
pad_size = (p_len - len(f0) + 1) // 2
|
190 |
-
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
191 |
-
# print(pad_size, p_len - len(f0) - pad_size)
|
192 |
-
f0 = np.pad(
|
193 |
-
f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
|
194 |
-
)
|
195 |
-
|
196 |
-
f0 *= pow(2, f0_up_key / 12)
|
197 |
-
return self.get_f0_post(f0)
|
198 |
-
if n_cpu == 1:
|
199 |
-
f0, t = pyworld.harvest(
|
200 |
-
x.astype(np.double),
|
201 |
-
fs=16000,
|
202 |
-
f0_ceil=1100,
|
203 |
-
f0_floor=50,
|
204 |
-
frame_period=10,
|
205 |
-
)
|
206 |
-
f0 = signal.medfilt(f0, 3)
|
207 |
-
f0 *= pow(2, f0_up_key / 12)
|
208 |
-
return self.get_f0_post(f0)
|
209 |
-
f0bak = np.zeros(x.shape[0] // 160 + 1, dtype=np.float64)
|
210 |
-
length = len(x)
|
211 |
-
part_length = 160 * ((length // 160 - 1) // n_cpu + 1)
|
212 |
-
n_cpu = (length // 160 - 1) // (part_length // 160) + 1
|
213 |
-
ts = ttime()
|
214 |
-
res_f0 = mm.dict()
|
215 |
-
for idx in range(n_cpu):
|
216 |
-
tail = part_length * (idx + 1) + 320
|
217 |
-
if idx == 0:
|
218 |
-
self.inp_q.put((idx, x[:tail], res_f0, n_cpu, ts))
|
219 |
-
else:
|
220 |
-
self.inp_q.put(
|
221 |
-
(idx, x[part_length * idx - 320 : tail], res_f0, n_cpu, ts)
|
222 |
-
)
|
223 |
-
while 1:
|
224 |
-
res_ts = self.opt_q.get()
|
225 |
-
if res_ts == ts:
|
226 |
-
break
|
227 |
-
f0s = [i[1] for i in sorted(res_f0.items(), key=lambda x: x[0])]
|
228 |
-
for idx, f0 in enumerate(f0s):
|
229 |
-
if idx == 0:
|
230 |
-
f0 = f0[:-3]
|
231 |
-
elif idx != n_cpu - 1:
|
232 |
-
f0 = f0[2:-3]
|
233 |
-
else:
|
234 |
-
f0 = f0[2:]
|
235 |
-
f0bak[
|
236 |
-
part_length * idx // 160 : part_length * idx // 160 + f0.shape[0]
|
237 |
-
] = f0
|
238 |
-
f0bak = signal.medfilt(f0bak, 3)
|
239 |
-
f0bak *= pow(2, f0_up_key / 12)
|
240 |
-
return self.get_f0_post(f0bak)
|
241 |
-
|
242 |
-
def get_f0_crepe(self, x, f0_up_key):
|
243 |
-
if "privateuseone" in str(self.device): ###不支持dml,cpu又太慢用不成,拿pm顶替
|
244 |
-
return self.get_f0(x, f0_up_key, 1, "pm")
|
245 |
-
audio = torch.tensor(np.copy(x))[None].float()
|
246 |
-
# print("using crepe,device:%s"%self.device)
|
247 |
-
f0, pd = torchcrepe.predict(
|
248 |
-
audio,
|
249 |
-
self.sr,
|
250 |
-
160,
|
251 |
-
self.f0_min,
|
252 |
-
self.f0_max,
|
253 |
-
"full",
|
254 |
-
batch_size=512,
|
255 |
-
# device=self.device if self.device.type!="privateuseone" else "cpu",###crepe不用半精度全部是全精度所以不愁###cpu延迟高到没法用
|
256 |
-
device=self.device,
|
257 |
-
return_periodicity=True,
|
258 |
-
)
|
259 |
-
pd = torchcrepe.filter.median(pd, 3)
|
260 |
-
f0 = torchcrepe.filter.mean(f0, 3)
|
261 |
-
f0[pd < 0.1] = 0
|
262 |
-
f0 = f0[0].cpu().numpy()
|
263 |
-
f0 *= pow(2, f0_up_key / 12)
|
264 |
-
return self.get_f0_post(f0)
|
265 |
-
|
266 |
-
def get_f0_rmvpe(self, x, f0_up_key):
|
267 |
-
if hasattr(self, "model_rmvpe") == False:
|
268 |
-
from infer.lib.rmvpe import RMVPE
|
269 |
-
|
270 |
-
logger.info("Loading rmvpe model")
|
271 |
-
self.model_rmvpe = RMVPE(
|
272 |
-
# "rmvpe.pt", is_half=self.is_half if self.device.type!="privateuseone" else False, device=self.device if self.device.type!="privateuseone"else "cpu"####dml时强制对rmvpe用cpu跑
|
273 |
-
# "rmvpe.pt", is_half=False, device=self.device####dml配置
|
274 |
-
# "rmvpe.pt", is_half=False, device="cpu"####锁定cpu配置
|
275 |
-
"assets/rmvpe/rmvpe.pt",
|
276 |
-
is_half=self.is_half,
|
277 |
-
device=self.device, ####正常逻辑
|
278 |
-
)
|
279 |
-
# self.model_rmvpe = RMVPE("aug2_58000_half.pt", is_half=self.is_half, device=self.device)
|
280 |
-
f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
|
281 |
-
f0 *= pow(2, f0_up_key / 12)
|
282 |
-
return self.get_f0_post(f0)
|
283 |
-
|
284 |
-
def infer(
|
285 |
-
self,
|
286 |
-
feats: torch.Tensor,
|
287 |
-
indata: np.ndarray,
|
288 |
-
block_frame_16k,
|
289 |
-
rate,
|
290 |
-
cache_pitch,
|
291 |
-
cache_pitchf,
|
292 |
-
f0method,
|
293 |
-
) -> np.ndarray:
|
294 |
-
feats = feats.view(1, -1)
|
295 |
-
if config.is_half:
|
296 |
-
feats = feats.half()
|
297 |
-
else:
|
298 |
-
feats = feats.float()
|
299 |
-
feats = feats.to(self.device)
|
300 |
-
t1 = ttime()
|
301 |
-
with torch.no_grad():
|
302 |
-
padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
|
303 |
-
inputs = {
|
304 |
-
"source": feats,
|
305 |
-
"padding_mask": padding_mask,
|
306 |
-
"output_layer": 9 if self.version == "v1" else 12,
|
307 |
-
}
|
308 |
-
logits = self.model.extract_features(**inputs)
|
309 |
-
feats = (
|
310 |
-
self.model.final_proj(logits[0]) if self.version == "v1" else logits[0]
|
311 |
-
)
|
312 |
-
feats = F.pad(feats, (0, 0, 1, 0))
|
313 |
-
t2 = ttime()
|
314 |
-
try:
|
315 |
-
if hasattr(self, "index") and self.index_rate != 0:
|
316 |
-
leng_replace_head = int(rate * feats[0].shape[0])
|
317 |
-
npy = feats[0][-leng_replace_head:].cpu().numpy().astype("float32")
|
318 |
-
score, ix = self.index.search(npy, k=8)
|
319 |
-
weight = np.square(1 / score)
|
320 |
-
weight /= weight.sum(axis=1, keepdims=True)
|
321 |
-
npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
|
322 |
-
if config.is_half:
|
323 |
-
npy = npy.astype("float16")
|
324 |
-
feats[0][-leng_replace_head:] = (
|
325 |
-
torch.from_numpy(npy).unsqueeze(0).to(self.device) * self.index_rate
|
326 |
-
+ (1 - self.index_rate) * feats[0][-leng_replace_head:]
|
327 |
-
)
|
328 |
-
else:
|
329 |
-
logger.warn("Index search FAILED or disabled")
|
330 |
-
except:
|
331 |
-
traceback.print_exc()
|
332 |
-
logger.warn("Index search FAILED")
|
333 |
-
feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
|
334 |
-
t3 = ttime()
|
335 |
-
if self.if_f0 == 1:
|
336 |
-
pitch, pitchf = self.get_f0(indata, self.f0_up_key, self.n_cpu, f0method)
|
337 |
-
start_frame = block_frame_16k // 160
|
338 |
-
end_frame = len(cache_pitch) - (pitch.shape[0] - 4) + start_frame
|
339 |
-
cache_pitch[:] = np.append(cache_pitch[start_frame:end_frame], pitch[3:-1])
|
340 |
-
cache_pitchf[:] = np.append(
|
341 |
-
cache_pitchf[start_frame:end_frame], pitchf[3:-1]
|
342 |
-
)
|
343 |
-
p_len = min(feats.shape[1], 13000, cache_pitch.shape[0])
|
344 |
-
else:
|
345 |
-
cache_pitch, cache_pitchf = None, None
|
346 |
-
p_len = min(feats.shape[1], 13000)
|
347 |
-
t4 = ttime()
|
348 |
-
feats = feats[:, :p_len, :]
|
349 |
-
if self.if_f0 == 1:
|
350 |
-
cache_pitch = cache_pitch[:p_len]
|
351 |
-
cache_pitchf = cache_pitchf[:p_len]
|
352 |
-
cache_pitch = torch.LongTensor(cache_pitch).unsqueeze(0).to(self.device)
|
353 |
-
cache_pitchf = torch.FloatTensor(cache_pitchf).unsqueeze(0).to(self.device)
|
354 |
-
p_len = torch.LongTensor([p_len]).to(self.device)
|
355 |
-
ii = 0 # sid
|
356 |
-
sid = torch.LongTensor([ii]).to(self.device)
|
357 |
-
with torch.no_grad():
|
358 |
-
if self.if_f0 == 1:
|
359 |
-
# print(12222222222,feats.device,p_len.device,cache_pitch.device,cache_pitchf.device,sid.device,rate2)
|
360 |
-
infered_audio = (
|
361 |
-
self.net_g.infer(
|
362 |
-
feats, p_len, cache_pitch, cache_pitchf, sid, rate
|
363 |
-
)[0][0, 0]
|
364 |
-
.data
|
365 |
-
.float()
|
366 |
-
)
|
367 |
-
else:
|
368 |
-
infered_audio = (
|
369 |
-
self.net_g.infer(feats, p_len, sid, rate)[0][0, 0]
|
370 |
-
.data
|
371 |
-
.float()
|
372 |
-
)
|
373 |
-
t5 = ttime()
|
374 |
-
logger.info(
|
375 |
-
"Spent time: fea = %.2fs, index = %.2fs, f0 = %.2fs, model = %.2fs",
|
376 |
-
t2 - t1,
|
377 |
-
t3 - t2,
|
378 |
-
t4 - t3,
|
379 |
-
t5 - t4,
|
380 |
-
)
|
381 |
-
return infered_audio
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Decision Mod Apk.md
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Descargar King’s Choice Mod Apk y regla de su propio imperio! </h1>
|
3 |
-
<p>¿Te gustan los juegos de simulación histórica con elementos RPG? ¿Quieres experimentar la vida de un rey y tomar decisiones que afectan el destino de tu reino? ¿Quieres reclutar héroes legendarios y bellezas a tu lado y disfrutar de recursos ilimitados? Si usted respondió sí a cualquiera de estas preguntas, entonces usted debe descargar King’s Choice mod apk y comenzar su aventura real! </p>
|
4 |
-
<h2>decision mod apk</h2><br /><p><b><b>DOWNLOAD</b> ⚡ <a href="https://bltlly.com/2v6Jud">https://bltlly.com/2v6Jud</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es King’s Choice? </h2>
|
6 |
-
<p>King’s Choice es un popular juego para móviles desarrollado por ONEMT. Es un juego de simulación histórico con elementos RPG, donde puedes jugar como rey de un país europeo medieval. Puede elegir entre diferentes países, como Inglaterra, Francia, Alemania, España y más. También puede personalizar su apariencia, nombre y título. </p>
|
7 |
-
<h3>Un juego de simulación histórica con elementos RPG</h3>
|
8 |
-
<p>Como rey, tienes que manejar tu reino, lidiar con asuntos políticos, expandir tu territorio y enfrentar varios desafíos. También puede interactuar con otros jugadores en tiempo real, unirse a alianzas, intercambiar recursos y participar en guerras. También puedes explorar el mapa del mundo, descubrir secretos y completar misiones. </p>
|
9 |
-
<h3>Un juego donde puedes tomar decisiones que afectan la historia</h3>
|
10 |
-
<p>Una de las características más interesantes de King’s Choice es que puedes tomar decisiones que afectan la historia y el resultado del juego. Puedes elegir cómo lidiar con diferentes situaciones, como rebeliones, invasiones, matrimonios, asesinatos y más. También puedes elegir cómo tratar a tus súbditos, aliados, enemigos y amantes. Tus elecciones determinarán tu reputación, popularidad, lealtad y romance. </p>
|
11 |
-
<h3>Un juego donde se puede reclutar héroes legendarios y bellezas</h3>
|
12 |
-
|
13 |
-
<h2>¿Por qué descargar King’s Choice mod apk? </h2>
|
14 |
-
<p>Si usted es un fan de King’s Choice, usted puede preguntarse por qué usted debe descargar King’s Choice mod apk en lugar de jugar la versión original. Bueno, hay muchas razones por las que descargar King’s Choice mod apk es una buena idea. Aquí están algunos de ellos:</p>
|
15 |
-
<p></p>
|
16 |
-
<h3>Recursos ilimitados para actualizar tu reino</h3>
|
17 |
-
<p>Una de las principales ventajas de descargar King’s Choice mod apk es que usted tendrá recursos ilimitados para actualizar su reino. Tendrás oro ilimitado, gemas, comida, madera, hierro y más. Puedes usarlos para construir y mejorar tus edificios, investigar tecnologías, entrenar tropas y más. No tendrás que preocuparte por quedarte sin recursos o gastar dinero real en ellos. </p>
|
18 |
-
<h3>Funciones VIP gratuitas para disfrutar de más beneficios</h3>
|
19 |
-
<p>Otro beneficio de la descarga de King’s Choice mod apk es que usted tendrá funciones VIP gratuitas para disfrutar de más beneficios. Tendrás el nivel VIP 15 desbloqueado desde el principio, lo que significa que tendrás acceso a privilegios exclusivos, como velocidad de construcción más rápida, recompensas más diarias, más sorteos gratis, más espacio de almacenamiento y más. También tendrás puntos VIP gratis para aumentar aún más tu nivel VIP. </p>
|
20 |
-
<h3>No hay anuncios para interrumpir tu juego</h3>
|
21 |
-
<p>Una ventaja final de la descarga de King’s Choice mod apk es que no tendrá ningún anuncio para interrumpir su juego. <p>Los anuncios pueden ser molestos y distraer, especialmente cuando estás inmerso en un juego como King’s Choice. También pueden consumir tus datos y batería. Al descargar King’s Choice mod apk, no tendrás que lidiar con ningún anuncio. Puedes disfrutar del juego sin interrupciones o molestias. </p>
|
22 |
-
<h2>Cómo descargar e instalar King’s Choice mod apk? </h2>
|
23 |
-
<p>Si usted está convencido por los beneficios de la descarga de King’s Choice mod apk, usted puede preguntarse cómo descargar e instalar en su dispositivo. No te preocupes, es muy fácil y sencillo. Solo sigue estos pasos:</p>
|
24 |
-
|
25 |
-
<p>El primer paso es descargar el archivo apk mod de una fuente de confianza. Usted puede encontrar muchos sitios web que ofrecen King’s Choice mod apk, pero no todos ellos son seguros y fiables. Algunos de ellos pueden contener virus, malware o spyware que pueden dañar su dispositivo o robar su información personal. Para evitar cualquier riesgo, usted debe descargar King’s Choice mod apk de un sitio web de buena reputación, como [este]. </p>
|
26 |
-
<h3>Paso 2: Habilitar fuentes desconocidas en el dispositivo</h3>
|
27 |
-
<p>El segundo paso es habilitar fuentes desconocidas en su dispositivo. Esto es necesario porque King’s Choice mod apk no está disponible en la tienda de aplicaciones oficial, por lo que tiene que instalarlo desde una fuente externa. Para hacer esto, debe ir a la configuración del dispositivo, luego a la seguridad, luego a fuentes desconocidas y luego activarla. Esto le permitirá instalar aplicaciones desde fuentes distintas de la tienda de aplicaciones. </p>
|
28 |
-
<h3>Paso 3: Instalar el archivo apk mod y lanzar el juego</h3>
|
29 |
-
<p>El tercer y último paso es instalar el archivo apk mod y lanzar el juego. Para hacer esto, usted tiene que localizar el archivo apk mod descargado en su dispositivo, a continuación, toque en él y siga las instrucciones. Llevará unos segundos instalar el juego en tu dispositivo. Una vez que se hace, se puede iniciar el juego y disfrutar de todas las características de King’s Choice mod apk. </p>
|
30 |
-
<h2>Conclusión</h2>
|
31 |
-
<p>King’s Choice es un juego divertido y adictivo que te permite experimentar la vida de un rey en la Europa medieval. Puedes tomar decisiones que afecten la historia, reclutar héroes legendarios y bellezas, administrar tu reino e interactuar con otros jugadores. Sin embargo, si quieres disfrutar del juego al máximo, usted debe descargar King’s Choice mod apk y obtener recursos ilimitados, características VIP gratis, y sin anuncios. Es fácil y seguro para descargar e instalar King’s Choice mod apk en su dispositivo. Solo tienes que seguir los pasos anteriores y empezar a gobernar su propio imperio! </p>
|
32 |
-
<h2>Preguntas frecuentes</h2>
|
33 |
-
<p>Aquí hay algunas preguntas frecuentes sobre King’s Choice mod apk:</p>
|
34 |
-
<ul>
|
35 |
-
|
36 |
-
<p>Sí, Elección del rey mod apk es seguro, siempre y cuando se descarga desde una fuente de confianza. No contiene ningún virus, malware o spyware que pueda dañar su dispositivo o robar su información personal. </p>
|
37 |
-
<li><b>Es King’s Choice mod apk compatible con mi dispositivo? </b></li>
|
38 |
-
<p>Elección del rey mod apk es compatible con la mayoría de los dispositivos Android que tienen Android 4.4 o superior. Sin embargo, es posible que algunos dispositivos no admitan algunas características o funciones del juego. </p>
|
39 |
-
<li><b>¿Se me prohibirá el uso de King’s Choice mod apk? </b></li>
|
40 |
-
<p>No, no se le prohibió el uso de King’s Choice mod apk. El mod apk tiene características anti-van que impiden que el juego detecte cualquier modificación o trucos. Puedes jugar el juego sin ninguna preocupación. </p>
|
41 |
-
<li><b>¿Puedo actualizar King’s Choice mod apk? </b></li>
|
42 |
-
<p>Sí, puede actualizar apk Elección del rey mod cada vez que hay una nueva versión disponible. Sin embargo, tiene que descargar e instalar la nueva versión manualmente desde la misma fuente que antes. </p>
|
43 |
-
<li><b>¿Puedo jugar King’s Choice mod apk offline? </b></li>
|
44 |
-
<p>No, no se puede jugar King’s Choice mod apk offline. El juego requiere una conexión a Internet para funcionar correctamente y acceder a todas las características. </p>
|
45 |
-
</ul></p> 64aa2da5cf<br />
|
46 |
-
<br />
|
47 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/__init__.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
"""A package that contains models that represent entities.
|
2 |
-
"""
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/style.py
DELETED
@@ -1,197 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
pygments.style
|
3 |
-
~~~~~~~~~~~~~~
|
4 |
-
|
5 |
-
Basic style object.
|
6 |
-
|
7 |
-
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
|
8 |
-
:license: BSD, see LICENSE for details.
|
9 |
-
"""
|
10 |
-
|
11 |
-
from pip._vendor.pygments.token import Token, STANDARD_TYPES
|
12 |
-
|
13 |
-
# Default mapping of ansixxx to RGB colors.
|
14 |
-
_ansimap = {
|
15 |
-
# dark
|
16 |
-
'ansiblack': '000000',
|
17 |
-
'ansired': '7f0000',
|
18 |
-
'ansigreen': '007f00',
|
19 |
-
'ansiyellow': '7f7fe0',
|
20 |
-
'ansiblue': '00007f',
|
21 |
-
'ansimagenta': '7f007f',
|
22 |
-
'ansicyan': '007f7f',
|
23 |
-
'ansigray': 'e5e5e5',
|
24 |
-
# normal
|
25 |
-
'ansibrightblack': '555555',
|
26 |
-
'ansibrightred': 'ff0000',
|
27 |
-
'ansibrightgreen': '00ff00',
|
28 |
-
'ansibrightyellow': 'ffff00',
|
29 |
-
'ansibrightblue': '0000ff',
|
30 |
-
'ansibrightmagenta': 'ff00ff',
|
31 |
-
'ansibrightcyan': '00ffff',
|
32 |
-
'ansiwhite': 'ffffff',
|
33 |
-
}
|
34 |
-
# mapping of deprecated #ansixxx colors to new color names
|
35 |
-
_deprecated_ansicolors = {
|
36 |
-
# dark
|
37 |
-
'#ansiblack': 'ansiblack',
|
38 |
-
'#ansidarkred': 'ansired',
|
39 |
-
'#ansidarkgreen': 'ansigreen',
|
40 |
-
'#ansibrown': 'ansiyellow',
|
41 |
-
'#ansidarkblue': 'ansiblue',
|
42 |
-
'#ansipurple': 'ansimagenta',
|
43 |
-
'#ansiteal': 'ansicyan',
|
44 |
-
'#ansilightgray': 'ansigray',
|
45 |
-
# normal
|
46 |
-
'#ansidarkgray': 'ansibrightblack',
|
47 |
-
'#ansired': 'ansibrightred',
|
48 |
-
'#ansigreen': 'ansibrightgreen',
|
49 |
-
'#ansiyellow': 'ansibrightyellow',
|
50 |
-
'#ansiblue': 'ansibrightblue',
|
51 |
-
'#ansifuchsia': 'ansibrightmagenta',
|
52 |
-
'#ansiturquoise': 'ansibrightcyan',
|
53 |
-
'#ansiwhite': 'ansiwhite',
|
54 |
-
}
|
55 |
-
ansicolors = set(_ansimap)
|
56 |
-
|
57 |
-
|
58 |
-
class StyleMeta(type):
|
59 |
-
|
60 |
-
def __new__(mcs, name, bases, dct):
|
61 |
-
obj = type.__new__(mcs, name, bases, dct)
|
62 |
-
for token in STANDARD_TYPES:
|
63 |
-
if token not in obj.styles:
|
64 |
-
obj.styles[token] = ''
|
65 |
-
|
66 |
-
def colorformat(text):
|
67 |
-
if text in ansicolors:
|
68 |
-
return text
|
69 |
-
if text[0:1] == '#':
|
70 |
-
col = text[1:]
|
71 |
-
if len(col) == 6:
|
72 |
-
return col
|
73 |
-
elif len(col) == 3:
|
74 |
-
return col[0] * 2 + col[1] * 2 + col[2] * 2
|
75 |
-
elif text == '':
|
76 |
-
return ''
|
77 |
-
elif text.startswith('var') or text.startswith('calc'):
|
78 |
-
return text
|
79 |
-
assert False, "wrong color format %r" % text
|
80 |
-
|
81 |
-
_styles = obj._styles = {}
|
82 |
-
|
83 |
-
for ttype in obj.styles:
|
84 |
-
for token in ttype.split():
|
85 |
-
if token in _styles:
|
86 |
-
continue
|
87 |
-
ndef = _styles.get(token.parent, None)
|
88 |
-
styledefs = obj.styles.get(token, '').split()
|
89 |
-
if not ndef or token is None:
|
90 |
-
ndef = ['', 0, 0, 0, '', '', 0, 0, 0]
|
91 |
-
elif 'noinherit' in styledefs and token is not Token:
|
92 |
-
ndef = _styles[Token][:]
|
93 |
-
else:
|
94 |
-
ndef = ndef[:]
|
95 |
-
_styles[token] = ndef
|
96 |
-
for styledef in obj.styles.get(token, '').split():
|
97 |
-
if styledef == 'noinherit':
|
98 |
-
pass
|
99 |
-
elif styledef == 'bold':
|
100 |
-
ndef[1] = 1
|
101 |
-
elif styledef == 'nobold':
|
102 |
-
ndef[1] = 0
|
103 |
-
elif styledef == 'italic':
|
104 |
-
ndef[2] = 1
|
105 |
-
elif styledef == 'noitalic':
|
106 |
-
ndef[2] = 0
|
107 |
-
elif styledef == 'underline':
|
108 |
-
ndef[3] = 1
|
109 |
-
elif styledef == 'nounderline':
|
110 |
-
ndef[3] = 0
|
111 |
-
elif styledef[:3] == 'bg:':
|
112 |
-
ndef[4] = colorformat(styledef[3:])
|
113 |
-
elif styledef[:7] == 'border:':
|
114 |
-
ndef[5] = colorformat(styledef[7:])
|
115 |
-
elif styledef == 'roman':
|
116 |
-
ndef[6] = 1
|
117 |
-
elif styledef == 'sans':
|
118 |
-
ndef[7] = 1
|
119 |
-
elif styledef == 'mono':
|
120 |
-
ndef[8] = 1
|
121 |
-
else:
|
122 |
-
ndef[0] = colorformat(styledef)
|
123 |
-
|
124 |
-
return obj
|
125 |
-
|
126 |
-
def style_for_token(cls, token):
|
127 |
-
t = cls._styles[token]
|
128 |
-
ansicolor = bgansicolor = None
|
129 |
-
color = t[0]
|
130 |
-
if color in _deprecated_ansicolors:
|
131 |
-
color = _deprecated_ansicolors[color]
|
132 |
-
if color in ansicolors:
|
133 |
-
ansicolor = color
|
134 |
-
color = _ansimap[color]
|
135 |
-
bgcolor = t[4]
|
136 |
-
if bgcolor in _deprecated_ansicolors:
|
137 |
-
bgcolor = _deprecated_ansicolors[bgcolor]
|
138 |
-
if bgcolor in ansicolors:
|
139 |
-
bgansicolor = bgcolor
|
140 |
-
bgcolor = _ansimap[bgcolor]
|
141 |
-
|
142 |
-
return {
|
143 |
-
'color': color or None,
|
144 |
-
'bold': bool(t[1]),
|
145 |
-
'italic': bool(t[2]),
|
146 |
-
'underline': bool(t[3]),
|
147 |
-
'bgcolor': bgcolor or None,
|
148 |
-
'border': t[5] or None,
|
149 |
-
'roman': bool(t[6]) or None,
|
150 |
-
'sans': bool(t[7]) or None,
|
151 |
-
'mono': bool(t[8]) or None,
|
152 |
-
'ansicolor': ansicolor,
|
153 |
-
'bgansicolor': bgansicolor,
|
154 |
-
}
|
155 |
-
|
156 |
-
def list_styles(cls):
|
157 |
-
return list(cls)
|
158 |
-
|
159 |
-
def styles_token(cls, ttype):
|
160 |
-
return ttype in cls._styles
|
161 |
-
|
162 |
-
def __iter__(cls):
|
163 |
-
for token in cls._styles:
|
164 |
-
yield token, cls.style_for_token(token)
|
165 |
-
|
166 |
-
def __len__(cls):
|
167 |
-
return len(cls._styles)
|
168 |
-
|
169 |
-
|
170 |
-
class Style(metaclass=StyleMeta):
|
171 |
-
|
172 |
-
#: overall background color (``None`` means transparent)
|
173 |
-
background_color = '#ffffff'
|
174 |
-
|
175 |
-
#: highlight background color
|
176 |
-
highlight_color = '#ffffcc'
|
177 |
-
|
178 |
-
#: line number font color
|
179 |
-
line_number_color = 'inherit'
|
180 |
-
|
181 |
-
#: line number background color
|
182 |
-
line_number_background_color = 'transparent'
|
183 |
-
|
184 |
-
#: special line number font color
|
185 |
-
line_number_special_color = '#000000'
|
186 |
-
|
187 |
-
#: special line number background color
|
188 |
-
line_number_special_background_color = '#ffffc0'
|
189 |
-
|
190 |
-
#: Style definitions for individual token types.
|
191 |
-
styles = {}
|
192 |
-
|
193 |
-
# Attribute for lexers defined within Pygments. If set
|
194 |
-
# to True, the style is not shown in the style gallery
|
195 |
-
# on the website. This is intended for language-specific
|
196 |
-
# styles.
|
197 |
-
web_style_gallery_exclude = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/styles/__init__.py
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
pygments.styles
|
3 |
-
~~~~~~~~~~~~~~~
|
4 |
-
|
5 |
-
Contains built-in styles.
|
6 |
-
|
7 |
-
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
|
8 |
-
:license: BSD, see LICENSE for details.
|
9 |
-
"""
|
10 |
-
|
11 |
-
from pip._vendor.pygments.plugin import find_plugin_styles
|
12 |
-
from pip._vendor.pygments.util import ClassNotFound
|
13 |
-
|
14 |
-
|
15 |
-
#: Maps style names to 'submodule::classname'.
|
16 |
-
STYLE_MAP = {
|
17 |
-
'default': 'default::DefaultStyle',
|
18 |
-
'emacs': 'emacs::EmacsStyle',
|
19 |
-
'friendly': 'friendly::FriendlyStyle',
|
20 |
-
'friendly_grayscale': 'friendly_grayscale::FriendlyGrayscaleStyle',
|
21 |
-
'colorful': 'colorful::ColorfulStyle',
|
22 |
-
'autumn': 'autumn::AutumnStyle',
|
23 |
-
'murphy': 'murphy::MurphyStyle',
|
24 |
-
'manni': 'manni::ManniStyle',
|
25 |
-
'material': 'material::MaterialStyle',
|
26 |
-
'monokai': 'monokai::MonokaiStyle',
|
27 |
-
'perldoc': 'perldoc::PerldocStyle',
|
28 |
-
'pastie': 'pastie::PastieStyle',
|
29 |
-
'borland': 'borland::BorlandStyle',
|
30 |
-
'trac': 'trac::TracStyle',
|
31 |
-
'native': 'native::NativeStyle',
|
32 |
-
'fruity': 'fruity::FruityStyle',
|
33 |
-
'bw': 'bw::BlackWhiteStyle',
|
34 |
-
'vim': 'vim::VimStyle',
|
35 |
-
'vs': 'vs::VisualStudioStyle',
|
36 |
-
'tango': 'tango::TangoStyle',
|
37 |
-
'rrt': 'rrt::RrtStyle',
|
38 |
-
'xcode': 'xcode::XcodeStyle',
|
39 |
-
'igor': 'igor::IgorStyle',
|
40 |
-
'paraiso-light': 'paraiso_light::ParaisoLightStyle',
|
41 |
-
'paraiso-dark': 'paraiso_dark::ParaisoDarkStyle',
|
42 |
-
'lovelace': 'lovelace::LovelaceStyle',
|
43 |
-
'algol': 'algol::AlgolStyle',
|
44 |
-
'algol_nu': 'algol_nu::Algol_NuStyle',
|
45 |
-
'arduino': 'arduino::ArduinoStyle',
|
46 |
-
'rainbow_dash': 'rainbow_dash::RainbowDashStyle',
|
47 |
-
'abap': 'abap::AbapStyle',
|
48 |
-
'solarized-dark': 'solarized::SolarizedDarkStyle',
|
49 |
-
'solarized-light': 'solarized::SolarizedLightStyle',
|
50 |
-
'sas': 'sas::SasStyle',
|
51 |
-
'staroffice' : 'staroffice::StarofficeStyle',
|
52 |
-
'stata': 'stata_light::StataLightStyle',
|
53 |
-
'stata-light': 'stata_light::StataLightStyle',
|
54 |
-
'stata-dark': 'stata_dark::StataDarkStyle',
|
55 |
-
'inkpot': 'inkpot::InkPotStyle',
|
56 |
-
'zenburn': 'zenburn::ZenburnStyle',
|
57 |
-
'gruvbox-dark': 'gruvbox::GruvboxDarkStyle',
|
58 |
-
'gruvbox-light': 'gruvbox::GruvboxLightStyle',
|
59 |
-
'dracula': 'dracula::DraculaStyle',
|
60 |
-
'one-dark': 'onedark::OneDarkStyle',
|
61 |
-
'lilypond' : 'lilypond::LilyPondStyle',
|
62 |
-
'nord': 'nord::NordStyle',
|
63 |
-
'nord-darker': 'nord::NordDarkerStyle',
|
64 |
-
'github-dark': 'gh_dark::GhDarkStyle'
|
65 |
-
}
|
66 |
-
|
67 |
-
|
68 |
-
def get_style_by_name(name):
|
69 |
-
if name in STYLE_MAP:
|
70 |
-
mod, cls = STYLE_MAP[name].split('::')
|
71 |
-
builtin = "yes"
|
72 |
-
else:
|
73 |
-
for found_name, style in find_plugin_styles():
|
74 |
-
if name == found_name:
|
75 |
-
return style
|
76 |
-
# perhaps it got dropped into our styles package
|
77 |
-
builtin = ""
|
78 |
-
mod = name
|
79 |
-
cls = name.title() + "Style"
|
80 |
-
|
81 |
-
try:
|
82 |
-
mod = __import__('pygments.styles.' + mod, None, None, [cls])
|
83 |
-
except ImportError:
|
84 |
-
raise ClassNotFound("Could not find style module %r" % mod +
|
85 |
-
(builtin and ", though it should be builtin") + ".")
|
86 |
-
try:
|
87 |
-
return getattr(mod, cls)
|
88 |
-
except AttributeError:
|
89 |
-
raise ClassNotFound("Could not find style class %r in style module." % cls)
|
90 |
-
|
91 |
-
|
92 |
-
def get_all_styles():
|
93 |
-
"""Return a generator for all styles by name,
|
94 |
-
both builtin and plugin."""
|
95 |
-
yield from STYLE_MAP
|
96 |
-
for name, _ in find_plugin_styles():
|
97 |
-
yield name
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/syntax.py
DELETED
@@ -1,950 +0,0 @@
|
|
1 |
-
import os.path
|
2 |
-
import platform
|
3 |
-
import re
|
4 |
-
import sys
|
5 |
-
import textwrap
|
6 |
-
from abc import ABC, abstractmethod
|
7 |
-
from pathlib import Path
|
8 |
-
from typing import (
|
9 |
-
Any,
|
10 |
-
Dict,
|
11 |
-
Iterable,
|
12 |
-
List,
|
13 |
-
NamedTuple,
|
14 |
-
Optional,
|
15 |
-
Sequence,
|
16 |
-
Set,
|
17 |
-
Tuple,
|
18 |
-
Type,
|
19 |
-
Union,
|
20 |
-
)
|
21 |
-
|
22 |
-
from pip._vendor.pygments.lexer import Lexer
|
23 |
-
from pip._vendor.pygments.lexers import get_lexer_by_name, guess_lexer_for_filename
|
24 |
-
from pip._vendor.pygments.style import Style as PygmentsStyle
|
25 |
-
from pip._vendor.pygments.styles import get_style_by_name
|
26 |
-
from pip._vendor.pygments.token import (
|
27 |
-
Comment,
|
28 |
-
Error,
|
29 |
-
Generic,
|
30 |
-
Keyword,
|
31 |
-
Name,
|
32 |
-
Number,
|
33 |
-
Operator,
|
34 |
-
String,
|
35 |
-
Token,
|
36 |
-
Whitespace,
|
37 |
-
)
|
38 |
-
from pip._vendor.pygments.util import ClassNotFound
|
39 |
-
|
40 |
-
from pip._vendor.rich.containers import Lines
|
41 |
-
from pip._vendor.rich.padding import Padding, PaddingDimensions
|
42 |
-
|
43 |
-
from ._loop import loop_first
|
44 |
-
from .cells import cell_len
|
45 |
-
from .color import Color, blend_rgb
|
46 |
-
from .console import Console, ConsoleOptions, JustifyMethod, RenderResult
|
47 |
-
from .jupyter import JupyterMixin
|
48 |
-
from .measure import Measurement
|
49 |
-
from .segment import Segment, Segments
|
50 |
-
from .style import Style, StyleType
|
51 |
-
from .text import Text
|
52 |
-
|
53 |
-
TokenType = Tuple[str, ...]
|
54 |
-
|
55 |
-
WINDOWS = platform.system() == "Windows"
|
56 |
-
DEFAULT_THEME = "monokai"
|
57 |
-
|
58 |
-
# The following styles are based on https://github.com/pygments/pygments/blob/master/pygments/formatters/terminal.py
|
59 |
-
# A few modifications were made
|
60 |
-
|
61 |
-
ANSI_LIGHT: Dict[TokenType, Style] = {
|
62 |
-
Token: Style(),
|
63 |
-
Whitespace: Style(color="white"),
|
64 |
-
Comment: Style(dim=True),
|
65 |
-
Comment.Preproc: Style(color="cyan"),
|
66 |
-
Keyword: Style(color="blue"),
|
67 |
-
Keyword.Type: Style(color="cyan"),
|
68 |
-
Operator.Word: Style(color="magenta"),
|
69 |
-
Name.Builtin: Style(color="cyan"),
|
70 |
-
Name.Function: Style(color="green"),
|
71 |
-
Name.Namespace: Style(color="cyan", underline=True),
|
72 |
-
Name.Class: Style(color="green", underline=True),
|
73 |
-
Name.Exception: Style(color="cyan"),
|
74 |
-
Name.Decorator: Style(color="magenta", bold=True),
|
75 |
-
Name.Variable: Style(color="red"),
|
76 |
-
Name.Constant: Style(color="red"),
|
77 |
-
Name.Attribute: Style(color="cyan"),
|
78 |
-
Name.Tag: Style(color="bright_blue"),
|
79 |
-
String: Style(color="yellow"),
|
80 |
-
Number: Style(color="blue"),
|
81 |
-
Generic.Deleted: Style(color="bright_red"),
|
82 |
-
Generic.Inserted: Style(color="green"),
|
83 |
-
Generic.Heading: Style(bold=True),
|
84 |
-
Generic.Subheading: Style(color="magenta", bold=True),
|
85 |
-
Generic.Prompt: Style(bold=True),
|
86 |
-
Generic.Error: Style(color="bright_red"),
|
87 |
-
Error: Style(color="red", underline=True),
|
88 |
-
}
|
89 |
-
|
90 |
-
ANSI_DARK: Dict[TokenType, Style] = {
|
91 |
-
Token: Style(),
|
92 |
-
Whitespace: Style(color="bright_black"),
|
93 |
-
Comment: Style(dim=True),
|
94 |
-
Comment.Preproc: Style(color="bright_cyan"),
|
95 |
-
Keyword: Style(color="bright_blue"),
|
96 |
-
Keyword.Type: Style(color="bright_cyan"),
|
97 |
-
Operator.Word: Style(color="bright_magenta"),
|
98 |
-
Name.Builtin: Style(color="bright_cyan"),
|
99 |
-
Name.Function: Style(color="bright_green"),
|
100 |
-
Name.Namespace: Style(color="bright_cyan", underline=True),
|
101 |
-
Name.Class: Style(color="bright_green", underline=True),
|
102 |
-
Name.Exception: Style(color="bright_cyan"),
|
103 |
-
Name.Decorator: Style(color="bright_magenta", bold=True),
|
104 |
-
Name.Variable: Style(color="bright_red"),
|
105 |
-
Name.Constant: Style(color="bright_red"),
|
106 |
-
Name.Attribute: Style(color="bright_cyan"),
|
107 |
-
Name.Tag: Style(color="bright_blue"),
|
108 |
-
String: Style(color="yellow"),
|
109 |
-
Number: Style(color="bright_blue"),
|
110 |
-
Generic.Deleted: Style(color="bright_red"),
|
111 |
-
Generic.Inserted: Style(color="bright_green"),
|
112 |
-
Generic.Heading: Style(bold=True),
|
113 |
-
Generic.Subheading: Style(color="bright_magenta", bold=True),
|
114 |
-
Generic.Prompt: Style(bold=True),
|
115 |
-
Generic.Error: Style(color="bright_red"),
|
116 |
-
Error: Style(color="red", underline=True),
|
117 |
-
}
|
118 |
-
|
119 |
-
RICH_SYNTAX_THEMES = {"ansi_light": ANSI_LIGHT, "ansi_dark": ANSI_DARK}
|
120 |
-
NUMBERS_COLUMN_DEFAULT_PADDING = 2
|
121 |
-
|
122 |
-
|
123 |
-
class SyntaxTheme(ABC):
|
124 |
-
"""Base class for a syntax theme."""
|
125 |
-
|
126 |
-
@abstractmethod
|
127 |
-
def get_style_for_token(self, token_type: TokenType) -> Style:
|
128 |
-
"""Get a style for a given Pygments token."""
|
129 |
-
raise NotImplementedError # pragma: no cover
|
130 |
-
|
131 |
-
@abstractmethod
|
132 |
-
def get_background_style(self) -> Style:
|
133 |
-
"""Get the background color."""
|
134 |
-
raise NotImplementedError # pragma: no cover
|
135 |
-
|
136 |
-
|
137 |
-
class PygmentsSyntaxTheme(SyntaxTheme):
|
138 |
-
"""Syntax theme that delegates to Pygments theme."""
|
139 |
-
|
140 |
-
def __init__(self, theme: Union[str, Type[PygmentsStyle]]) -> None:
|
141 |
-
self._style_cache: Dict[TokenType, Style] = {}
|
142 |
-
if isinstance(theme, str):
|
143 |
-
try:
|
144 |
-
self._pygments_style_class = get_style_by_name(theme)
|
145 |
-
except ClassNotFound:
|
146 |
-
self._pygments_style_class = get_style_by_name("default")
|
147 |
-
else:
|
148 |
-
self._pygments_style_class = theme
|
149 |
-
|
150 |
-
self._background_color = self._pygments_style_class.background_color
|
151 |
-
self._background_style = Style(bgcolor=self._background_color)
|
152 |
-
|
153 |
-
def get_style_for_token(self, token_type: TokenType) -> Style:
|
154 |
-
"""Get a style from a Pygments class."""
|
155 |
-
try:
|
156 |
-
return self._style_cache[token_type]
|
157 |
-
except KeyError:
|
158 |
-
try:
|
159 |
-
pygments_style = self._pygments_style_class.style_for_token(token_type)
|
160 |
-
except KeyError:
|
161 |
-
style = Style.null()
|
162 |
-
else:
|
163 |
-
color = pygments_style["color"]
|
164 |
-
bgcolor = pygments_style["bgcolor"]
|
165 |
-
style = Style(
|
166 |
-
color="#" + color if color else "#000000",
|
167 |
-
bgcolor="#" + bgcolor if bgcolor else self._background_color,
|
168 |
-
bold=pygments_style["bold"],
|
169 |
-
italic=pygments_style["italic"],
|
170 |
-
underline=pygments_style["underline"],
|
171 |
-
)
|
172 |
-
self._style_cache[token_type] = style
|
173 |
-
return style
|
174 |
-
|
175 |
-
def get_background_style(self) -> Style:
|
176 |
-
return self._background_style
|
177 |
-
|
178 |
-
|
179 |
-
class ANSISyntaxTheme(SyntaxTheme):
|
180 |
-
"""Syntax theme to use standard colors."""
|
181 |
-
|
182 |
-
def __init__(self, style_map: Dict[TokenType, Style]) -> None:
|
183 |
-
self.style_map = style_map
|
184 |
-
self._missing_style = Style.null()
|
185 |
-
self._background_style = Style.null()
|
186 |
-
self._style_cache: Dict[TokenType, Style] = {}
|
187 |
-
|
188 |
-
def get_style_for_token(self, token_type: TokenType) -> Style:
|
189 |
-
"""Look up style in the style map."""
|
190 |
-
try:
|
191 |
-
return self._style_cache[token_type]
|
192 |
-
except KeyError:
|
193 |
-
# Styles form a hierarchy
|
194 |
-
# We need to go from most to least specific
|
195 |
-
# e.g. ("foo", "bar", "baz") to ("foo", "bar") to ("foo",)
|
196 |
-
get_style = self.style_map.get
|
197 |
-
token = tuple(token_type)
|
198 |
-
style = self._missing_style
|
199 |
-
while token:
|
200 |
-
_style = get_style(token)
|
201 |
-
if _style is not None:
|
202 |
-
style = _style
|
203 |
-
break
|
204 |
-
token = token[:-1]
|
205 |
-
self._style_cache[token_type] = style
|
206 |
-
return style
|
207 |
-
|
208 |
-
def get_background_style(self) -> Style:
|
209 |
-
return self._background_style
|
210 |
-
|
211 |
-
|
212 |
-
SyntaxPosition = Tuple[int, int]
|
213 |
-
|
214 |
-
|
215 |
-
class _SyntaxHighlightRange(NamedTuple):
|
216 |
-
"""
|
217 |
-
A range to highlight in a Syntax object.
|
218 |
-
`start` and `end` are 2-integers tuples, where the first integer is the line number
|
219 |
-
(starting from 1) and the second integer is the column index (starting from 0).
|
220 |
-
"""
|
221 |
-
|
222 |
-
style: StyleType
|
223 |
-
start: SyntaxPosition
|
224 |
-
end: SyntaxPosition
|
225 |
-
|
226 |
-
|
227 |
-
class Syntax(JupyterMixin):
|
228 |
-
"""Construct a Syntax object to render syntax highlighted code.
|
229 |
-
|
230 |
-
Args:
|
231 |
-
code (str): Code to highlight.
|
232 |
-
lexer (Lexer | str): Lexer to use (see https://pygments.org/docs/lexers/)
|
233 |
-
theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "monokai".
|
234 |
-
dedent (bool, optional): Enable stripping of initial whitespace. Defaults to False.
|
235 |
-
line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False.
|
236 |
-
start_line (int, optional): Starting number for line numbers. Defaults to 1.
|
237 |
-
line_range (Tuple[int | None, int | None], optional): If given should be a tuple of the start and end line to render.
|
238 |
-
A value of None in the tuple indicates the range is open in that direction.
|
239 |
-
highlight_lines (Set[int]): A set of line numbers to highlight.
|
240 |
-
code_width: Width of code to render (not including line numbers), or ``None`` to use all available width.
|
241 |
-
tab_size (int, optional): Size of tabs. Defaults to 4.
|
242 |
-
word_wrap (bool, optional): Enable word wrapping.
|
243 |
-
background_color (str, optional): Optional background color, or None to use theme color. Defaults to None.
|
244 |
-
indent_guides (bool, optional): Show indent guides. Defaults to False.
|
245 |
-
padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding).
|
246 |
-
"""
|
247 |
-
|
248 |
-
_pygments_style_class: Type[PygmentsStyle]
|
249 |
-
_theme: SyntaxTheme
|
250 |
-
|
251 |
-
@classmethod
|
252 |
-
def get_theme(cls, name: Union[str, SyntaxTheme]) -> SyntaxTheme:
|
253 |
-
"""Get a syntax theme instance."""
|
254 |
-
if isinstance(name, SyntaxTheme):
|
255 |
-
return name
|
256 |
-
theme: SyntaxTheme
|
257 |
-
if name in RICH_SYNTAX_THEMES:
|
258 |
-
theme = ANSISyntaxTheme(RICH_SYNTAX_THEMES[name])
|
259 |
-
else:
|
260 |
-
theme = PygmentsSyntaxTheme(name)
|
261 |
-
return theme
|
262 |
-
|
263 |
-
def __init__(
|
264 |
-
self,
|
265 |
-
code: str,
|
266 |
-
lexer: Union[Lexer, str],
|
267 |
-
*,
|
268 |
-
theme: Union[str, SyntaxTheme] = DEFAULT_THEME,
|
269 |
-
dedent: bool = False,
|
270 |
-
line_numbers: bool = False,
|
271 |
-
start_line: int = 1,
|
272 |
-
line_range: Optional[Tuple[Optional[int], Optional[int]]] = None,
|
273 |
-
highlight_lines: Optional[Set[int]] = None,
|
274 |
-
code_width: Optional[int] = None,
|
275 |
-
tab_size: int = 4,
|
276 |
-
word_wrap: bool = False,
|
277 |
-
background_color: Optional[str] = None,
|
278 |
-
indent_guides: bool = False,
|
279 |
-
padding: PaddingDimensions = 0,
|
280 |
-
) -> None:
|
281 |
-
self.code = code
|
282 |
-
self._lexer = lexer
|
283 |
-
self.dedent = dedent
|
284 |
-
self.line_numbers = line_numbers
|
285 |
-
self.start_line = start_line
|
286 |
-
self.line_range = line_range
|
287 |
-
self.highlight_lines = highlight_lines or set()
|
288 |
-
self.code_width = code_width
|
289 |
-
self.tab_size = tab_size
|
290 |
-
self.word_wrap = word_wrap
|
291 |
-
self.background_color = background_color
|
292 |
-
self.background_style = (
|
293 |
-
Style(bgcolor=background_color) if background_color else Style()
|
294 |
-
)
|
295 |
-
self.indent_guides = indent_guides
|
296 |
-
self.padding = padding
|
297 |
-
|
298 |
-
self._theme = self.get_theme(theme)
|
299 |
-
self._stylized_ranges: List[_SyntaxHighlightRange] = []
|
300 |
-
|
301 |
-
@classmethod
|
302 |
-
def from_path(
|
303 |
-
cls,
|
304 |
-
path: str,
|
305 |
-
encoding: str = "utf-8",
|
306 |
-
lexer: Optional[Union[Lexer, str]] = None,
|
307 |
-
theme: Union[str, SyntaxTheme] = DEFAULT_THEME,
|
308 |
-
dedent: bool = False,
|
309 |
-
line_numbers: bool = False,
|
310 |
-
line_range: Optional[Tuple[int, int]] = None,
|
311 |
-
start_line: int = 1,
|
312 |
-
highlight_lines: Optional[Set[int]] = None,
|
313 |
-
code_width: Optional[int] = None,
|
314 |
-
tab_size: int = 4,
|
315 |
-
word_wrap: bool = False,
|
316 |
-
background_color: Optional[str] = None,
|
317 |
-
indent_guides: bool = False,
|
318 |
-
padding: PaddingDimensions = 0,
|
319 |
-
) -> "Syntax":
|
320 |
-
"""Construct a Syntax object from a file.
|
321 |
-
|
322 |
-
Args:
|
323 |
-
path (str): Path to file to highlight.
|
324 |
-
encoding (str): Encoding of file.
|
325 |
-
lexer (str | Lexer, optional): Lexer to use. If None, lexer will be auto-detected from path/file content.
|
326 |
-
theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "emacs".
|
327 |
-
dedent (bool, optional): Enable stripping of initial whitespace. Defaults to True.
|
328 |
-
line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False.
|
329 |
-
start_line (int, optional): Starting number for line numbers. Defaults to 1.
|
330 |
-
line_range (Tuple[int, int], optional): If given should be a tuple of the start and end line to render.
|
331 |
-
highlight_lines (Set[int]): A set of line numbers to highlight.
|
332 |
-
code_width: Width of code to render (not including line numbers), or ``None`` to use all available width.
|
333 |
-
tab_size (int, optional): Size of tabs. Defaults to 4.
|
334 |
-
word_wrap (bool, optional): Enable word wrapping of code.
|
335 |
-
background_color (str, optional): Optional background color, or None to use theme color. Defaults to None.
|
336 |
-
indent_guides (bool, optional): Show indent guides. Defaults to False.
|
337 |
-
padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding).
|
338 |
-
|
339 |
-
Returns:
|
340 |
-
[Syntax]: A Syntax object that may be printed to the console
|
341 |
-
"""
|
342 |
-
code = Path(path).read_text(encoding=encoding)
|
343 |
-
|
344 |
-
if not lexer:
|
345 |
-
lexer = cls.guess_lexer(path, code=code)
|
346 |
-
|
347 |
-
return cls(
|
348 |
-
code,
|
349 |
-
lexer,
|
350 |
-
theme=theme,
|
351 |
-
dedent=dedent,
|
352 |
-
line_numbers=line_numbers,
|
353 |
-
line_range=line_range,
|
354 |
-
start_line=start_line,
|
355 |
-
highlight_lines=highlight_lines,
|
356 |
-
code_width=code_width,
|
357 |
-
tab_size=tab_size,
|
358 |
-
word_wrap=word_wrap,
|
359 |
-
background_color=background_color,
|
360 |
-
indent_guides=indent_guides,
|
361 |
-
padding=padding,
|
362 |
-
)
|
363 |
-
|
364 |
-
@classmethod
|
365 |
-
def guess_lexer(cls, path: str, code: Optional[str] = None) -> str:
|
366 |
-
"""Guess the alias of the Pygments lexer to use based on a path and an optional string of code.
|
367 |
-
If code is supplied, it will use a combination of the code and the filename to determine the
|
368 |
-
best lexer to use. For example, if the file is ``index.html`` and the file contains Django
|
369 |
-
templating syntax, then "html+django" will be returned. If the file is ``index.html``, and no
|
370 |
-
templating language is used, the "html" lexer will be used. If no string of code
|
371 |
-
is supplied, the lexer will be chosen based on the file extension..
|
372 |
-
|
373 |
-
Args:
|
374 |
-
path (AnyStr): The path to the file containing the code you wish to know the lexer for.
|
375 |
-
code (str, optional): Optional string of code that will be used as a fallback if no lexer
|
376 |
-
is found for the supplied path.
|
377 |
-
|
378 |
-
Returns:
|
379 |
-
str: The name of the Pygments lexer that best matches the supplied path/code.
|
380 |
-
"""
|
381 |
-
lexer: Optional[Lexer] = None
|
382 |
-
lexer_name = "default"
|
383 |
-
if code:
|
384 |
-
try:
|
385 |
-
lexer = guess_lexer_for_filename(path, code)
|
386 |
-
except ClassNotFound:
|
387 |
-
pass
|
388 |
-
|
389 |
-
if not lexer:
|
390 |
-
try:
|
391 |
-
_, ext = os.path.splitext(path)
|
392 |
-
if ext:
|
393 |
-
extension = ext.lstrip(".").lower()
|
394 |
-
lexer = get_lexer_by_name(extension)
|
395 |
-
except ClassNotFound:
|
396 |
-
pass
|
397 |
-
|
398 |
-
if lexer:
|
399 |
-
if lexer.aliases:
|
400 |
-
lexer_name = lexer.aliases[0]
|
401 |
-
else:
|
402 |
-
lexer_name = lexer.name
|
403 |
-
|
404 |
-
return lexer_name
|
405 |
-
|
406 |
-
def _get_base_style(self) -> Style:
|
407 |
-
"""Get the base style."""
|
408 |
-
default_style = self._theme.get_background_style() + self.background_style
|
409 |
-
return default_style
|
410 |
-
|
411 |
-
def _get_token_color(self, token_type: TokenType) -> Optional[Color]:
|
412 |
-
"""Get a color (if any) for the given token.
|
413 |
-
|
414 |
-
Args:
|
415 |
-
token_type (TokenType): A token type tuple from Pygments.
|
416 |
-
|
417 |
-
Returns:
|
418 |
-
Optional[Color]: Color from theme, or None for no color.
|
419 |
-
"""
|
420 |
-
style = self._theme.get_style_for_token(token_type)
|
421 |
-
return style.color
|
422 |
-
|
423 |
-
@property
|
424 |
-
def lexer(self) -> Optional[Lexer]:
|
425 |
-
"""The lexer for this syntax, or None if no lexer was found.
|
426 |
-
|
427 |
-
Tries to find the lexer by name if a string was passed to the constructor.
|
428 |
-
"""
|
429 |
-
|
430 |
-
if isinstance(self._lexer, Lexer):
|
431 |
-
return self._lexer
|
432 |
-
try:
|
433 |
-
return get_lexer_by_name(
|
434 |
-
self._lexer,
|
435 |
-
stripnl=False,
|
436 |
-
ensurenl=True,
|
437 |
-
tabsize=self.tab_size,
|
438 |
-
)
|
439 |
-
except ClassNotFound:
|
440 |
-
return None
|
441 |
-
|
442 |
-
def highlight(
|
443 |
-
self,
|
444 |
-
code: str,
|
445 |
-
line_range: Optional[Tuple[Optional[int], Optional[int]]] = None,
|
446 |
-
) -> Text:
|
447 |
-
"""Highlight code and return a Text instance.
|
448 |
-
|
449 |
-
Args:
|
450 |
-
code (str): Code to highlight.
|
451 |
-
line_range(Tuple[int, int], optional): Optional line range to highlight.
|
452 |
-
|
453 |
-
Returns:
|
454 |
-
Text: A text instance containing highlighted syntax.
|
455 |
-
"""
|
456 |
-
|
457 |
-
base_style = self._get_base_style()
|
458 |
-
justify: JustifyMethod = (
|
459 |
-
"default" if base_style.transparent_background else "left"
|
460 |
-
)
|
461 |
-
|
462 |
-
text = Text(
|
463 |
-
justify=justify,
|
464 |
-
style=base_style,
|
465 |
-
tab_size=self.tab_size,
|
466 |
-
no_wrap=not self.word_wrap,
|
467 |
-
)
|
468 |
-
_get_theme_style = self._theme.get_style_for_token
|
469 |
-
|
470 |
-
lexer = self.lexer
|
471 |
-
|
472 |
-
if lexer is None:
|
473 |
-
text.append(code)
|
474 |
-
else:
|
475 |
-
if line_range:
|
476 |
-
# More complicated path to only stylize a portion of the code
|
477 |
-
# This speeds up further operations as there are less spans to process
|
478 |
-
line_start, line_end = line_range
|
479 |
-
|
480 |
-
def line_tokenize() -> Iterable[Tuple[Any, str]]:
|
481 |
-
"""Split tokens to one per line."""
|
482 |
-
assert lexer # required to make MyPy happy - we know lexer is not None at this point
|
483 |
-
|
484 |
-
for token_type, token in lexer.get_tokens(code):
|
485 |
-
while token:
|
486 |
-
line_token, new_line, token = token.partition("\n")
|
487 |
-
yield token_type, line_token + new_line
|
488 |
-
|
489 |
-
def tokens_to_spans() -> Iterable[Tuple[str, Optional[Style]]]:
|
490 |
-
"""Convert tokens to spans."""
|
491 |
-
tokens = iter(line_tokenize())
|
492 |
-
line_no = 0
|
493 |
-
_line_start = line_start - 1 if line_start else 0
|
494 |
-
|
495 |
-
# Skip over tokens until line start
|
496 |
-
while line_no < _line_start:
|
497 |
-
try:
|
498 |
-
_token_type, token = next(tokens)
|
499 |
-
except StopIteration:
|
500 |
-
break
|
501 |
-
yield (token, None)
|
502 |
-
if token.endswith("\n"):
|
503 |
-
line_no += 1
|
504 |
-
# Generate spans until line end
|
505 |
-
for token_type, token in tokens:
|
506 |
-
yield (token, _get_theme_style(token_type))
|
507 |
-
if token.endswith("\n"):
|
508 |
-
line_no += 1
|
509 |
-
if line_end and line_no >= line_end:
|
510 |
-
break
|
511 |
-
|
512 |
-
text.append_tokens(tokens_to_spans())
|
513 |
-
|
514 |
-
else:
|
515 |
-
text.append_tokens(
|
516 |
-
(token, _get_theme_style(token_type))
|
517 |
-
for token_type, token in lexer.get_tokens(code)
|
518 |
-
)
|
519 |
-
if self.background_color is not None:
|
520 |
-
text.stylize(f"on {self.background_color}")
|
521 |
-
|
522 |
-
if self._stylized_ranges:
|
523 |
-
self._apply_stylized_ranges(text)
|
524 |
-
|
525 |
-
return text
|
526 |
-
|
527 |
-
def stylize_range(
|
528 |
-
self, style: StyleType, start: SyntaxPosition, end: SyntaxPosition
|
529 |
-
) -> None:
|
530 |
-
"""
|
531 |
-
Adds a custom style on a part of the code, that will be applied to the syntax display when it's rendered.
|
532 |
-
Line numbers are 1-based, while column indexes are 0-based.
|
533 |
-
|
534 |
-
Args:
|
535 |
-
style (StyleType): The style to apply.
|
536 |
-
start (Tuple[int, int]): The start of the range, in the form `[line number, column index]`.
|
537 |
-
end (Tuple[int, int]): The end of the range, in the form `[line number, column index]`.
|
538 |
-
"""
|
539 |
-
self._stylized_ranges.append(_SyntaxHighlightRange(style, start, end))
|
540 |
-
|
541 |
-
def _get_line_numbers_color(self, blend: float = 0.3) -> Color:
|
542 |
-
background_style = self._theme.get_background_style() + self.background_style
|
543 |
-
background_color = background_style.bgcolor
|
544 |
-
if background_color is None or background_color.is_system_defined:
|
545 |
-
return Color.default()
|
546 |
-
foreground_color = self._get_token_color(Token.Text)
|
547 |
-
if foreground_color is None or foreground_color.is_system_defined:
|
548 |
-
return foreground_color or Color.default()
|
549 |
-
new_color = blend_rgb(
|
550 |
-
background_color.get_truecolor(),
|
551 |
-
foreground_color.get_truecolor(),
|
552 |
-
cross_fade=blend,
|
553 |
-
)
|
554 |
-
return Color.from_triplet(new_color)
|
555 |
-
|
556 |
-
@property
|
557 |
-
def _numbers_column_width(self) -> int:
|
558 |
-
"""Get the number of characters used to render the numbers column."""
|
559 |
-
column_width = 0
|
560 |
-
if self.line_numbers:
|
561 |
-
column_width = (
|
562 |
-
len(str(self.start_line + self.code.count("\n")))
|
563 |
-
+ NUMBERS_COLUMN_DEFAULT_PADDING
|
564 |
-
)
|
565 |
-
return column_width
|
566 |
-
|
567 |
-
def _get_number_styles(self, console: Console) -> Tuple[Style, Style, Style]:
|
568 |
-
"""Get background, number, and highlight styles for line numbers."""
|
569 |
-
background_style = self._get_base_style()
|
570 |
-
if background_style.transparent_background:
|
571 |
-
return Style.null(), Style(dim=True), Style.null()
|
572 |
-
if console.color_system in ("256", "truecolor"):
|
573 |
-
number_style = Style.chain(
|
574 |
-
background_style,
|
575 |
-
self._theme.get_style_for_token(Token.Text),
|
576 |
-
Style(color=self._get_line_numbers_color()),
|
577 |
-
self.background_style,
|
578 |
-
)
|
579 |
-
highlight_number_style = Style.chain(
|
580 |
-
background_style,
|
581 |
-
self._theme.get_style_for_token(Token.Text),
|
582 |
-
Style(bold=True, color=self._get_line_numbers_color(0.9)),
|
583 |
-
self.background_style,
|
584 |
-
)
|
585 |
-
else:
|
586 |
-
number_style = background_style + Style(dim=True)
|
587 |
-
highlight_number_style = background_style + Style(dim=False)
|
588 |
-
return background_style, number_style, highlight_number_style
|
589 |
-
|
590 |
-
def __rich_measure__(
|
591 |
-
self, console: "Console", options: "ConsoleOptions"
|
592 |
-
) -> "Measurement":
|
593 |
-
|
594 |
-
_, right, _, left = Padding.unpack(self.padding)
|
595 |
-
padding = left + right
|
596 |
-
if self.code_width is not None:
|
597 |
-
width = self.code_width + self._numbers_column_width + padding + 1
|
598 |
-
return Measurement(self._numbers_column_width, width)
|
599 |
-
lines = self.code.splitlines()
|
600 |
-
width = (
|
601 |
-
self._numbers_column_width
|
602 |
-
+ padding
|
603 |
-
+ (max(cell_len(line) for line in lines) if lines else 0)
|
604 |
-
)
|
605 |
-
if self.line_numbers:
|
606 |
-
width += 1
|
607 |
-
return Measurement(self._numbers_column_width, width)
|
608 |
-
|
609 |
-
def __rich_console__(
|
610 |
-
self, console: Console, options: ConsoleOptions
|
611 |
-
) -> RenderResult:
|
612 |
-
segments = Segments(self._get_syntax(console, options))
|
613 |
-
if self.padding:
|
614 |
-
yield Padding(
|
615 |
-
segments, style=self._theme.get_background_style(), pad=self.padding
|
616 |
-
)
|
617 |
-
else:
|
618 |
-
yield segments
|
619 |
-
|
620 |
-
def _get_syntax(
|
621 |
-
self,
|
622 |
-
console: Console,
|
623 |
-
options: ConsoleOptions,
|
624 |
-
) -> Iterable[Segment]:
|
625 |
-
"""
|
626 |
-
Get the Segments for the Syntax object, excluding any vertical/horizontal padding
|
627 |
-
"""
|
628 |
-
transparent_background = self._get_base_style().transparent_background
|
629 |
-
code_width = (
|
630 |
-
(
|
631 |
-
(options.max_width - self._numbers_column_width - 1)
|
632 |
-
if self.line_numbers
|
633 |
-
else options.max_width
|
634 |
-
)
|
635 |
-
if self.code_width is None
|
636 |
-
else self.code_width
|
637 |
-
)
|
638 |
-
|
639 |
-
ends_on_nl, processed_code = self._process_code(self.code)
|
640 |
-
text = self.highlight(processed_code, self.line_range)
|
641 |
-
|
642 |
-
if not self.line_numbers and not self.word_wrap and not self.line_range:
|
643 |
-
if not ends_on_nl:
|
644 |
-
text.remove_suffix("\n")
|
645 |
-
# Simple case of just rendering text
|
646 |
-
style = (
|
647 |
-
self._get_base_style()
|
648 |
-
+ self._theme.get_style_for_token(Comment)
|
649 |
-
+ Style(dim=True)
|
650 |
-
+ self.background_style
|
651 |
-
)
|
652 |
-
if self.indent_guides and not options.ascii_only:
|
653 |
-
text = text.with_indent_guides(self.tab_size, style=style)
|
654 |
-
text.overflow = "crop"
|
655 |
-
if style.transparent_background:
|
656 |
-
yield from console.render(
|
657 |
-
text, options=options.update(width=code_width)
|
658 |
-
)
|
659 |
-
else:
|
660 |
-
syntax_lines = console.render_lines(
|
661 |
-
text,
|
662 |
-
options.update(width=code_width, height=None, justify="left"),
|
663 |
-
style=self.background_style,
|
664 |
-
pad=True,
|
665 |
-
new_lines=True,
|
666 |
-
)
|
667 |
-
for syntax_line in syntax_lines:
|
668 |
-
yield from syntax_line
|
669 |
-
return
|
670 |
-
|
671 |
-
start_line, end_line = self.line_range or (None, None)
|
672 |
-
line_offset = 0
|
673 |
-
if start_line:
|
674 |
-
line_offset = max(0, start_line - 1)
|
675 |
-
lines: Union[List[Text], Lines] = text.split("\n", allow_blank=ends_on_nl)
|
676 |
-
if self.line_range:
|
677 |
-
if line_offset > len(lines):
|
678 |
-
return
|
679 |
-
lines = lines[line_offset:end_line]
|
680 |
-
|
681 |
-
if self.indent_guides and not options.ascii_only:
|
682 |
-
style = (
|
683 |
-
self._get_base_style()
|
684 |
-
+ self._theme.get_style_for_token(Comment)
|
685 |
-
+ Style(dim=True)
|
686 |
-
+ self.background_style
|
687 |
-
)
|
688 |
-
lines = (
|
689 |
-
Text("\n")
|
690 |
-
.join(lines)
|
691 |
-
.with_indent_guides(self.tab_size, style=style)
|
692 |
-
.split("\n", allow_blank=True)
|
693 |
-
)
|
694 |
-
|
695 |
-
numbers_column_width = self._numbers_column_width
|
696 |
-
render_options = options.update(width=code_width)
|
697 |
-
|
698 |
-
highlight_line = self.highlight_lines.__contains__
|
699 |
-
_Segment = Segment
|
700 |
-
new_line = _Segment("\n")
|
701 |
-
|
702 |
-
line_pointer = "> " if options.legacy_windows else "❱ "
|
703 |
-
|
704 |
-
(
|
705 |
-
background_style,
|
706 |
-
number_style,
|
707 |
-
highlight_number_style,
|
708 |
-
) = self._get_number_styles(console)
|
709 |
-
|
710 |
-
for line_no, line in enumerate(lines, self.start_line + line_offset):
|
711 |
-
if self.word_wrap:
|
712 |
-
wrapped_lines = console.render_lines(
|
713 |
-
line,
|
714 |
-
render_options.update(height=None, justify="left"),
|
715 |
-
style=background_style,
|
716 |
-
pad=not transparent_background,
|
717 |
-
)
|
718 |
-
else:
|
719 |
-
segments = list(line.render(console, end=""))
|
720 |
-
if options.no_wrap:
|
721 |
-
wrapped_lines = [segments]
|
722 |
-
else:
|
723 |
-
wrapped_lines = [
|
724 |
-
_Segment.adjust_line_length(
|
725 |
-
segments,
|
726 |
-
render_options.max_width,
|
727 |
-
style=background_style,
|
728 |
-
pad=not transparent_background,
|
729 |
-
)
|
730 |
-
]
|
731 |
-
|
732 |
-
if self.line_numbers:
|
733 |
-
wrapped_line_left_pad = _Segment(
|
734 |
-
" " * numbers_column_width + " ", background_style
|
735 |
-
)
|
736 |
-
for first, wrapped_line in loop_first(wrapped_lines):
|
737 |
-
if first:
|
738 |
-
line_column = str(line_no).rjust(numbers_column_width - 2) + " "
|
739 |
-
if highlight_line(line_no):
|
740 |
-
yield _Segment(line_pointer, Style(color="red"))
|
741 |
-
yield _Segment(line_column, highlight_number_style)
|
742 |
-
else:
|
743 |
-
yield _Segment(" ", highlight_number_style)
|
744 |
-
yield _Segment(line_column, number_style)
|
745 |
-
else:
|
746 |
-
yield wrapped_line_left_pad
|
747 |
-
yield from wrapped_line
|
748 |
-
yield new_line
|
749 |
-
else:
|
750 |
-
for wrapped_line in wrapped_lines:
|
751 |
-
yield from wrapped_line
|
752 |
-
yield new_line
|
753 |
-
|
754 |
-
def _apply_stylized_ranges(self, text: Text) -> None:
|
755 |
-
"""
|
756 |
-
Apply stylized ranges to a text instance,
|
757 |
-
using the given code to determine the right portion to apply the style to.
|
758 |
-
|
759 |
-
Args:
|
760 |
-
text (Text): Text instance to apply the style to.
|
761 |
-
"""
|
762 |
-
code = text.plain
|
763 |
-
newlines_offsets = [
|
764 |
-
# Let's add outer boundaries at each side of the list:
|
765 |
-
0,
|
766 |
-
# N.B. using "\n" here is much faster than using metacharacters such as "^" or "\Z":
|
767 |
-
*[
|
768 |
-
match.start() + 1
|
769 |
-
for match in re.finditer("\n", code, flags=re.MULTILINE)
|
770 |
-
],
|
771 |
-
len(code) + 1,
|
772 |
-
]
|
773 |
-
|
774 |
-
for stylized_range in self._stylized_ranges:
|
775 |
-
start = _get_code_index_for_syntax_position(
|
776 |
-
newlines_offsets, stylized_range.start
|
777 |
-
)
|
778 |
-
end = _get_code_index_for_syntax_position(
|
779 |
-
newlines_offsets, stylized_range.end
|
780 |
-
)
|
781 |
-
if start is not None and end is not None:
|
782 |
-
text.stylize(stylized_range.style, start, end)
|
783 |
-
|
784 |
-
def _process_code(self, code: str) -> Tuple[bool, str]:
|
785 |
-
"""
|
786 |
-
Applies various processing to a raw code string
|
787 |
-
(normalises it so it always ends with a line return, dedents it if necessary, etc.)
|
788 |
-
|
789 |
-
Args:
|
790 |
-
code (str): The raw code string to process
|
791 |
-
|
792 |
-
Returns:
|
793 |
-
Tuple[bool, str]: the boolean indicates whether the raw code ends with a line return,
|
794 |
-
while the string is the processed code.
|
795 |
-
"""
|
796 |
-
ends_on_nl = code.endswith("\n")
|
797 |
-
processed_code = code if ends_on_nl else code + "\n"
|
798 |
-
processed_code = (
|
799 |
-
textwrap.dedent(processed_code) if self.dedent else processed_code
|
800 |
-
)
|
801 |
-
processed_code = processed_code.expandtabs(self.tab_size)
|
802 |
-
return ends_on_nl, processed_code
|
803 |
-
|
804 |
-
|
805 |
-
def _get_code_index_for_syntax_position(
|
806 |
-
newlines_offsets: Sequence[int], position: SyntaxPosition
|
807 |
-
) -> Optional[int]:
|
808 |
-
"""
|
809 |
-
Returns the index of the code string for the given positions.
|
810 |
-
|
811 |
-
Args:
|
812 |
-
newlines_offsets (Sequence[int]): The offset of each newline character found in the code snippet.
|
813 |
-
position (SyntaxPosition): The position to search for.
|
814 |
-
|
815 |
-
Returns:
|
816 |
-
Optional[int]: The index of the code string for this position, or `None`
|
817 |
-
if the given position's line number is out of range (if it's the column that is out of range
|
818 |
-
we silently clamp its value so that it reaches the end of the line)
|
819 |
-
"""
|
820 |
-
lines_count = len(newlines_offsets)
|
821 |
-
|
822 |
-
line_number, column_index = position
|
823 |
-
if line_number > lines_count or len(newlines_offsets) < (line_number + 1):
|
824 |
-
return None # `line_number` is out of range
|
825 |
-
line_index = line_number - 1
|
826 |
-
line_length = newlines_offsets[line_index + 1] - newlines_offsets[line_index] - 1
|
827 |
-
# If `column_index` is out of range: let's silently clamp it:
|
828 |
-
column_index = min(line_length, column_index)
|
829 |
-
return newlines_offsets[line_index] + column_index
|
830 |
-
|
831 |
-
|
832 |
-
if __name__ == "__main__": # pragma: no cover
|
833 |
-
|
834 |
-
import argparse
|
835 |
-
import sys
|
836 |
-
|
837 |
-
parser = argparse.ArgumentParser(
|
838 |
-
description="Render syntax to the console with Rich"
|
839 |
-
)
|
840 |
-
parser.add_argument(
|
841 |
-
"path",
|
842 |
-
metavar="PATH",
|
843 |
-
help="path to file, or - for stdin",
|
844 |
-
)
|
845 |
-
parser.add_argument(
|
846 |
-
"-c",
|
847 |
-
"--force-color",
|
848 |
-
dest="force_color",
|
849 |
-
action="store_true",
|
850 |
-
default=None,
|
851 |
-
help="force color for non-terminals",
|
852 |
-
)
|
853 |
-
parser.add_argument(
|
854 |
-
"-i",
|
855 |
-
"--indent-guides",
|
856 |
-
dest="indent_guides",
|
857 |
-
action="store_true",
|
858 |
-
default=False,
|
859 |
-
help="display indent guides",
|
860 |
-
)
|
861 |
-
parser.add_argument(
|
862 |
-
"-l",
|
863 |
-
"--line-numbers",
|
864 |
-
dest="line_numbers",
|
865 |
-
action="store_true",
|
866 |
-
help="render line numbers",
|
867 |
-
)
|
868 |
-
parser.add_argument(
|
869 |
-
"-w",
|
870 |
-
"--width",
|
871 |
-
type=int,
|
872 |
-
dest="width",
|
873 |
-
default=None,
|
874 |
-
help="width of output (default will auto-detect)",
|
875 |
-
)
|
876 |
-
parser.add_argument(
|
877 |
-
"-r",
|
878 |
-
"--wrap",
|
879 |
-
dest="word_wrap",
|
880 |
-
action="store_true",
|
881 |
-
default=False,
|
882 |
-
help="word wrap long lines",
|
883 |
-
)
|
884 |
-
parser.add_argument(
|
885 |
-
"-s",
|
886 |
-
"--soft-wrap",
|
887 |
-
action="store_true",
|
888 |
-
dest="soft_wrap",
|
889 |
-
default=False,
|
890 |
-
help="enable soft wrapping mode",
|
891 |
-
)
|
892 |
-
parser.add_argument(
|
893 |
-
"-t", "--theme", dest="theme", default="monokai", help="pygments theme"
|
894 |
-
)
|
895 |
-
parser.add_argument(
|
896 |
-
"-b",
|
897 |
-
"--background-color",
|
898 |
-
dest="background_color",
|
899 |
-
default=None,
|
900 |
-
help="Override background color",
|
901 |
-
)
|
902 |
-
parser.add_argument(
|
903 |
-
"-x",
|
904 |
-
"--lexer",
|
905 |
-
default=None,
|
906 |
-
dest="lexer_name",
|
907 |
-
help="Lexer name",
|
908 |
-
)
|
909 |
-
parser.add_argument(
|
910 |
-
"-p", "--padding", type=int, default=0, dest="padding", help="Padding"
|
911 |
-
)
|
912 |
-
parser.add_argument(
|
913 |
-
"--highlight-line",
|
914 |
-
type=int,
|
915 |
-
default=None,
|
916 |
-
dest="highlight_line",
|
917 |
-
help="The line number (not index!) to highlight",
|
918 |
-
)
|
919 |
-
args = parser.parse_args()
|
920 |
-
|
921 |
-
from pip._vendor.rich.console import Console
|
922 |
-
|
923 |
-
console = Console(force_terminal=args.force_color, width=args.width)
|
924 |
-
|
925 |
-
if args.path == "-":
|
926 |
-
code = sys.stdin.read()
|
927 |
-
syntax = Syntax(
|
928 |
-
code=code,
|
929 |
-
lexer=args.lexer_name,
|
930 |
-
line_numbers=args.line_numbers,
|
931 |
-
word_wrap=args.word_wrap,
|
932 |
-
theme=args.theme,
|
933 |
-
background_color=args.background_color,
|
934 |
-
indent_guides=args.indent_guides,
|
935 |
-
padding=args.padding,
|
936 |
-
highlight_lines={args.highlight_line},
|
937 |
-
)
|
938 |
-
else:
|
939 |
-
syntax = Syntax.from_path(
|
940 |
-
args.path,
|
941 |
-
lexer=args.lexer_name,
|
942 |
-
line_numbers=args.line_numbers,
|
943 |
-
word_wrap=args.word_wrap,
|
944 |
-
theme=args.theme,
|
945 |
-
background_color=args.background_color,
|
946 |
-
indent_guides=args.indent_guides,
|
947 |
-
padding=args.padding,
|
948 |
-
highlight_lines={args.highlight_line},
|
949 |
-
)
|
950 |
-
console.print(syntax, soft_wrap=args.soft_wrap)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/ssltransport.py
DELETED
@@ -1,221 +0,0 @@
|
|
1 |
-
import io
|
2 |
-
import socket
|
3 |
-
import ssl
|
4 |
-
|
5 |
-
from ..exceptions import ProxySchemeUnsupported
|
6 |
-
from ..packages import six
|
7 |
-
|
8 |
-
SSL_BLOCKSIZE = 16384
|
9 |
-
|
10 |
-
|
11 |
-
class SSLTransport:
|
12 |
-
"""
|
13 |
-
The SSLTransport wraps an existing socket and establishes an SSL connection.
|
14 |
-
|
15 |
-
Contrary to Python's implementation of SSLSocket, it allows you to chain
|
16 |
-
multiple TLS connections together. It's particularly useful if you need to
|
17 |
-
implement TLS within TLS.
|
18 |
-
|
19 |
-
The class supports most of the socket API operations.
|
20 |
-
"""
|
21 |
-
|
22 |
-
@staticmethod
|
23 |
-
def _validate_ssl_context_for_tls_in_tls(ssl_context):
|
24 |
-
"""
|
25 |
-
Raises a ProxySchemeUnsupported if the provided ssl_context can't be used
|
26 |
-
for TLS in TLS.
|
27 |
-
|
28 |
-
The only requirement is that the ssl_context provides the 'wrap_bio'
|
29 |
-
methods.
|
30 |
-
"""
|
31 |
-
|
32 |
-
if not hasattr(ssl_context, "wrap_bio"):
|
33 |
-
if six.PY2:
|
34 |
-
raise ProxySchemeUnsupported(
|
35 |
-
"TLS in TLS requires SSLContext.wrap_bio() which isn't "
|
36 |
-
"supported on Python 2"
|
37 |
-
)
|
38 |
-
else:
|
39 |
-
raise ProxySchemeUnsupported(
|
40 |
-
"TLS in TLS requires SSLContext.wrap_bio() which isn't "
|
41 |
-
"available on non-native SSLContext"
|
42 |
-
)
|
43 |
-
|
44 |
-
def __init__(
|
45 |
-
self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True
|
46 |
-
):
|
47 |
-
"""
|
48 |
-
Create an SSLTransport around socket using the provided ssl_context.
|
49 |
-
"""
|
50 |
-
self.incoming = ssl.MemoryBIO()
|
51 |
-
self.outgoing = ssl.MemoryBIO()
|
52 |
-
|
53 |
-
self.suppress_ragged_eofs = suppress_ragged_eofs
|
54 |
-
self.socket = socket
|
55 |
-
|
56 |
-
self.sslobj = ssl_context.wrap_bio(
|
57 |
-
self.incoming, self.outgoing, server_hostname=server_hostname
|
58 |
-
)
|
59 |
-
|
60 |
-
# Perform initial handshake.
|
61 |
-
self._ssl_io_loop(self.sslobj.do_handshake)
|
62 |
-
|
63 |
-
def __enter__(self):
|
64 |
-
return self
|
65 |
-
|
66 |
-
def __exit__(self, *_):
|
67 |
-
self.close()
|
68 |
-
|
69 |
-
def fileno(self):
|
70 |
-
return self.socket.fileno()
|
71 |
-
|
72 |
-
def read(self, len=1024, buffer=None):
|
73 |
-
return self._wrap_ssl_read(len, buffer)
|
74 |
-
|
75 |
-
def recv(self, len=1024, flags=0):
|
76 |
-
if flags != 0:
|
77 |
-
raise ValueError("non-zero flags not allowed in calls to recv")
|
78 |
-
return self._wrap_ssl_read(len)
|
79 |
-
|
80 |
-
def recv_into(self, buffer, nbytes=None, flags=0):
|
81 |
-
if flags != 0:
|
82 |
-
raise ValueError("non-zero flags not allowed in calls to recv_into")
|
83 |
-
if buffer and (nbytes is None):
|
84 |
-
nbytes = len(buffer)
|
85 |
-
elif nbytes is None:
|
86 |
-
nbytes = 1024
|
87 |
-
return self.read(nbytes, buffer)
|
88 |
-
|
89 |
-
def sendall(self, data, flags=0):
|
90 |
-
if flags != 0:
|
91 |
-
raise ValueError("non-zero flags not allowed in calls to sendall")
|
92 |
-
count = 0
|
93 |
-
with memoryview(data) as view, view.cast("B") as byte_view:
|
94 |
-
amount = len(byte_view)
|
95 |
-
while count < amount:
|
96 |
-
v = self.send(byte_view[count:])
|
97 |
-
count += v
|
98 |
-
|
99 |
-
def send(self, data, flags=0):
|
100 |
-
if flags != 0:
|
101 |
-
raise ValueError("non-zero flags not allowed in calls to send")
|
102 |
-
response = self._ssl_io_loop(self.sslobj.write, data)
|
103 |
-
return response
|
104 |
-
|
105 |
-
def makefile(
|
106 |
-
self, mode="r", buffering=None, encoding=None, errors=None, newline=None
|
107 |
-
):
|
108 |
-
"""
|
109 |
-
Python's httpclient uses makefile and buffered io when reading HTTP
|
110 |
-
messages and we need to support it.
|
111 |
-
|
112 |
-
This is unfortunately a copy and paste of socket.py makefile with small
|
113 |
-
changes to point to the socket directly.
|
114 |
-
"""
|
115 |
-
if not set(mode) <= {"r", "w", "b"}:
|
116 |
-
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
|
117 |
-
|
118 |
-
writing = "w" in mode
|
119 |
-
reading = "r" in mode or not writing
|
120 |
-
assert reading or writing
|
121 |
-
binary = "b" in mode
|
122 |
-
rawmode = ""
|
123 |
-
if reading:
|
124 |
-
rawmode += "r"
|
125 |
-
if writing:
|
126 |
-
rawmode += "w"
|
127 |
-
raw = socket.SocketIO(self, rawmode)
|
128 |
-
self.socket._io_refs += 1
|
129 |
-
if buffering is None:
|
130 |
-
buffering = -1
|
131 |
-
if buffering < 0:
|
132 |
-
buffering = io.DEFAULT_BUFFER_SIZE
|
133 |
-
if buffering == 0:
|
134 |
-
if not binary:
|
135 |
-
raise ValueError("unbuffered streams must be binary")
|
136 |
-
return raw
|
137 |
-
if reading and writing:
|
138 |
-
buffer = io.BufferedRWPair(raw, raw, buffering)
|
139 |
-
elif reading:
|
140 |
-
buffer = io.BufferedReader(raw, buffering)
|
141 |
-
else:
|
142 |
-
assert writing
|
143 |
-
buffer = io.BufferedWriter(raw, buffering)
|
144 |
-
if binary:
|
145 |
-
return buffer
|
146 |
-
text = io.TextIOWrapper(buffer, encoding, errors, newline)
|
147 |
-
text.mode = mode
|
148 |
-
return text
|
149 |
-
|
150 |
-
def unwrap(self):
|
151 |
-
self._ssl_io_loop(self.sslobj.unwrap)
|
152 |
-
|
153 |
-
def close(self):
|
154 |
-
self.socket.close()
|
155 |
-
|
156 |
-
def getpeercert(self, binary_form=False):
|
157 |
-
return self.sslobj.getpeercert(binary_form)
|
158 |
-
|
159 |
-
def version(self):
|
160 |
-
return self.sslobj.version()
|
161 |
-
|
162 |
-
def cipher(self):
|
163 |
-
return self.sslobj.cipher()
|
164 |
-
|
165 |
-
def selected_alpn_protocol(self):
|
166 |
-
return self.sslobj.selected_alpn_protocol()
|
167 |
-
|
168 |
-
def selected_npn_protocol(self):
|
169 |
-
return self.sslobj.selected_npn_protocol()
|
170 |
-
|
171 |
-
def shared_ciphers(self):
|
172 |
-
return self.sslobj.shared_ciphers()
|
173 |
-
|
174 |
-
def compression(self):
|
175 |
-
return self.sslobj.compression()
|
176 |
-
|
177 |
-
def settimeout(self, value):
|
178 |
-
self.socket.settimeout(value)
|
179 |
-
|
180 |
-
def gettimeout(self):
|
181 |
-
return self.socket.gettimeout()
|
182 |
-
|
183 |
-
def _decref_socketios(self):
|
184 |
-
self.socket._decref_socketios()
|
185 |
-
|
186 |
-
def _wrap_ssl_read(self, len, buffer=None):
|
187 |
-
try:
|
188 |
-
return self._ssl_io_loop(self.sslobj.read, len, buffer)
|
189 |
-
except ssl.SSLError as e:
|
190 |
-
if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:
|
191 |
-
return 0 # eof, return 0.
|
192 |
-
else:
|
193 |
-
raise
|
194 |
-
|
195 |
-
def _ssl_io_loop(self, func, *args):
|
196 |
-
"""Performs an I/O loop between incoming/outgoing and the socket."""
|
197 |
-
should_loop = True
|
198 |
-
ret = None
|
199 |
-
|
200 |
-
while should_loop:
|
201 |
-
errno = None
|
202 |
-
try:
|
203 |
-
ret = func(*args)
|
204 |
-
except ssl.SSLError as e:
|
205 |
-
if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
|
206 |
-
# WANT_READ, and WANT_WRITE are expected, others are not.
|
207 |
-
raise e
|
208 |
-
errno = e.errno
|
209 |
-
|
210 |
-
buf = self.outgoing.read()
|
211 |
-
self.socket.sendall(buf)
|
212 |
-
|
213 |
-
if errno is None:
|
214 |
-
should_loop = False
|
215 |
-
elif errno == ssl.SSL_ERROR_WANT_READ:
|
216 |
-
buf = self.socket.recv(SSL_BLOCKSIZE)
|
217 |
-
if buf:
|
218 |
-
self.incoming.write(buf)
|
219 |
-
else:
|
220 |
-
self.incoming.write_eof()
|
221 |
-
return ret
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/README.md
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
# Utility functions
|
2 |
-
|
3 |
-
This folder contain utility functions that are not used in the
|
4 |
-
core library, but are useful for building models or training
|
5 |
-
code using the config system.
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/utils/proc_dict_gqa.py
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
# --------------------------------------------------------
|
2 |
-
# mcan-vqa (Deep Modular Co-Attention Networks)
|
3 |
-
# Licensed under The MIT License [see LICENSE for details]
|
4 |
-
# Written by Yuhao Cui https://github.com/cuiyuhao1996
|
5 |
-
# --------------------------------------------------------
|
6 |
-
|
7 |
-
import sys
|
8 |
-
sys.path.append('../')
|
9 |
-
from openvqa.utils.ans_punct import prep_ans
|
10 |
-
from openvqa.core.path_cfgs import PATH
|
11 |
-
import json, re
|
12 |
-
|
13 |
-
path = PATH()
|
14 |
-
|
15 |
-
|
16 |
-
ques_dict_preread = {
|
17 |
-
'train': json.load(open(path.RAW_PATH['gqa']['train'], 'r')),
|
18 |
-
'val': json.load(open(path.RAW_PATH['gqa']['val'], 'r')),
|
19 |
-
'testdev': json.load(open(path.RAW_PATH['gqa']['testdev'], 'r')),
|
20 |
-
'test': json.load(open(path.RAW_PATH['gqa']['test'], 'r')),
|
21 |
-
}
|
22 |
-
|
23 |
-
# Loading question word list
|
24 |
-
stat_ques_dict = {
|
25 |
-
**ques_dict_preread['train'],
|
26 |
-
**ques_dict_preread['val'],
|
27 |
-
**ques_dict_preread['testdev'],
|
28 |
-
**ques_dict_preread['test'],
|
29 |
-
}
|
30 |
-
|
31 |
-
stat_ans_dict = {
|
32 |
-
**ques_dict_preread['train'],
|
33 |
-
**ques_dict_preread['val'],
|
34 |
-
**ques_dict_preread['testdev'],
|
35 |
-
}
|
36 |
-
|
37 |
-
|
38 |
-
def tokenize(stat_ques_dict):
|
39 |
-
token_to_ix = {
|
40 |
-
'PAD': 0,
|
41 |
-
'UNK': 1,
|
42 |
-
'CLS': 2,
|
43 |
-
}
|
44 |
-
|
45 |
-
max_token = 0
|
46 |
-
for qid in stat_ques_dict:
|
47 |
-
ques = stat_ques_dict[qid]['question']
|
48 |
-
words = re.sub(
|
49 |
-
r"([.,'!?\"()*#:;])",
|
50 |
-
'',
|
51 |
-
ques.lower()
|
52 |
-
).replace('-', ' ').replace('/', ' ').split()
|
53 |
-
|
54 |
-
if len(words) > max_token:
|
55 |
-
max_token = len(words)
|
56 |
-
|
57 |
-
for word in words:
|
58 |
-
if word not in token_to_ix:
|
59 |
-
token_to_ix[word] = len(token_to_ix)
|
60 |
-
|
61 |
-
return token_to_ix, max_token
|
62 |
-
|
63 |
-
|
64 |
-
def ans_stat(stat_ans_dict):
|
65 |
-
ans_to_ix = {}
|
66 |
-
ix_to_ans = {}
|
67 |
-
|
68 |
-
for qid in stat_ans_dict:
|
69 |
-
ans = stat_ans_dict[qid]['answer']
|
70 |
-
ans = prep_ans(ans)
|
71 |
-
|
72 |
-
if ans not in ans_to_ix:
|
73 |
-
ix_to_ans[ans_to_ix.__len__()] = ans
|
74 |
-
ans_to_ix[ans] = ans_to_ix.__len__()
|
75 |
-
|
76 |
-
return ans_to_ix, ix_to_ans
|
77 |
-
|
78 |
-
token_to_ix, max_token = tokenize(stat_ques_dict)
|
79 |
-
ans_to_ix, ix_to_ans = ans_stat(stat_ans_dict)
|
80 |
-
# print(ans_to_ix)
|
81 |
-
# print(ix_to_ans)
|
82 |
-
# print(token_to_ix)
|
83 |
-
# print(token_to_ix.__len__())
|
84 |
-
# print(max_token)
|
85 |
-
json.dump([ans_to_ix, ix_to_ans, token_to_ix, max_token], open('../openvqa/datasets/gqa/dicts.json', 'w'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|