parquet-converter commited on
Commit
6d7f8b5
·
1 Parent(s): de36af1

Update parquet files (step 101 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/123harsh/gradio-easywriter/app.py +0 -8
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cat Et 2009c Keygen A Complete Guide to Using CAT ET 2009C Software for Caterpillar Vehicles.md +0 -56
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Clonebd Crack.md +0 -33
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download phn mm dit virus kaspersky full crack from a Trusted Website.md +0 -20
  5. spaces/1acneusushi/gradio-2dmoleculeeditor/data/FBX 2019 Free Download With Crack The Best Way to Convert and Export Your 3D Models.md +0 -165
  6. spaces/1gistliPinn/ChatGPT4/Examples/Dartfish Team Pro 5 5 Full Crack.md +0 -31
  7. spaces/1gistliPinn/ChatGPT4/Examples/Decent Icons Download Windows 7 Ultimate [BETTER].md +0 -6
  8. spaces/1gistliPinn/ChatGPT4/Examples/Edius 6 Full Indir Gezginler.md +0 -6
  9. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dolphin Emulator Gold APK The Best Way to Enjoy GameCube Classics on Your Phone.md +0 -136
  10. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Feel Good by Cleyton M - The Hottest Kuduro Song of 2023.md +0 -138
  11. spaces/3mrology/Chameleon_Text2Img_Generation_Demo/share_btn.py +0 -88
  12. spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/nets_123821KB.py +0 -122
  13. spaces/A-Roucher/Quotes/README.md +0 -12
  14. spaces/AIGC-Audio/AudioGPT/NeuralSeq/README.md +0 -9
  15. spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/txt_processors/__init__.py +0 -1
  16. spaces/AIML-TUDA/does-clip-know-my-face/app.py +0 -611
  17. spaces/ASJMO/freegpt/README.md +0 -195
  18. spaces/ASJMO/freegpt/run.py +0 -48
  19. spaces/AdityaMahimkar/ParaPhraser/app.py +0 -36
  20. spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/losses.py +0 -61
  21. spaces/AlexWang/lama/saicinpainting/evaluation/masks/countless/countless2d.py +0 -529
  22. spaces/AlhitawiMohammed22/CER_Hu-Evaluation-Metrics/eval_wer.py +0 -0
  23. spaces/Amrrs/image-to-text-app/app.py +0 -47
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_ipndm.py +0 -161
  25. spaces/Andy1621/uniformer_image_detection/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py +0 -31
  26. spaces/Andy1621/uniformer_image_detection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_1x_coco.py +0 -42
  27. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/grammar.py +0 -33
  28. spaces/Benson/text-generation/Examples/Acapella Sudfrica Askies I 39m Lo Siento Mama Mp3 Download.md +0 -200
  29. spaces/BhaskarKapri/Animal/README.md +0 -13
  30. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/__init__.py +0 -19
  31. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/_utils.py +0 -76
  32. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/config/setupcfg.py +0 -762
  33. spaces/BobbyOleti/MyGenAIChatBot/README.md +0 -12
  34. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/coco.py +0 -462
  35. spaces/CVPR/LIVE/thrust/thrust/event.h +0 -26
  36. spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/merge.h +0 -44
  37. spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/count.h +0 -22
  38. spaces/CVPR/Text2Human/Text2Human/utils/logger.py +0 -112
  39. spaces/CVPR/WALT/cwalt/kmedoid.py +0 -55
  40. spaces/CVPR/unicl-zero-shot-img-recog/model/text_encoder/hf_model.py +0 -27
  41. spaces/CatNika/Asian_Proxy/greeting.md +0 -13
  42. spaces/Cpp4App/Cpp4App/CDM/result_processing/evaluation.py +0 -208
  43. spaces/DHEIVER/Alzheimer/README.md +0 -12
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/voltLib/voltToFea.py +0 -726
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-98c587a9.js +0 -7
  46. spaces/DaleChen/AutoGPT/autogpt/memory/local.py +0 -136
  47. spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/utils/logger.py +0 -49
  48. spaces/DpNaze/Dreamlikeart/app.py +0 -154
  49. spaces/DragGan/DragGan-Inversion/PTI/models/e4e/discriminator.py +0 -20
  50. spaces/ECCV2022/PSG/OpenPSG/configs/motifs/panoptic_fpn_r101_fpn_1x_sgdet_psg.py +0 -28
spaces/123harsh/gradio-easywriter/app.py DELETED
@@ -1,8 +0,0 @@
1
- import wikipedia
2
- import gradio as gr
3
- def ai_text(input):
4
- value = wikipedia.summary(input)
5
- print(type(value))
6
- return value
7
- iface = gr.Interface(fn = ai_text, inputs="text",outputs="text",title= "Answer Generator", description= "AI Generated Answer" )
8
- iface.launch(debug=False)
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cat Et 2009c Keygen A Complete Guide to Using CAT ET 2009C Software for Caterpillar Vehicles.md DELETED
@@ -1,56 +0,0 @@
1
- <br />
2
- <h1>Cat Et 2009c Keygen: What Is It and How to Use It?</h1>
3
- <p>If you are a CAT equipment owner or operator, you probably know how important it is to have a reliable diagnostic tool that can help you monitor, test, and troubleshoot your machines. That's where <strong>Cat Et 2009c Keygen</strong> comes in handy. In this article, we will explain what Cat Et 2009c Keygen is, what it does, how to download it, how to install it, and how to use it. By the end of this article, you will be able to use Cat Et 2009c Keygen and software like a pro.</p>
4
- <h2>Cat Et 2009c Keygen</h2><br /><p><b><b>DOWNLOAD</b> &middot;&middot;&middot; <a href="https://byltly.com/2uKzZn">https://byltly.com/2uKzZn</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <p><strong>Cat Et 2009c Keygen</strong> is a small program that can generate license keys for <strong>Cat Et 2009c software</strong>. Cat Et stands for Caterpillar Electronic Technician, which is a diagnostic software that allows you to communicate with your CAT equipment using a data link adapter. With Cat Et software, you can perform various tasks such as:</p>
7
- <ul>
8
- <li>View live data and status parameters of your CAT equipment</li>
9
- <li>Run diagnostic tests and calibrations on your CAT equipment</li>
10
- <li>Read and clear diagnostic codes and event logs of your CAT equipment</li>
11
- <li>Configure and customize settings of your CAT equipment</li>
12
- <li>Update firmware and software of your CAT equipment</li>
13
- </ul>
14
- <p>Cat Et software is compatible with most CAT equipment models such as engines, generators, trucks, loaders, excavators, graders, etc. However, Cat Et software is not free. You need to purchase a license key from Caterpillar or an authorized dealer to activate the software. This can be expensive and inconvenient for some users who want to use the software for personal or educational purposes.</p>
15
- <p>That's why some users resort to using <strong>Cat Et 2009c Keygen</strong>, which can generate license keys for free. By using Cat Et 2009c Keygen, you can activate Cat Et 2009c software without paying anything. However, you should be aware that using Cat Et 2009c Keygen may be illegal or unethical in some countries or regions. You should also be careful about downloading Cat Et 2009c Keygen from unknown or untrusted sources, as they may contain viruses or malware that can harm your computer or device.</p>
16
- <p>If you want to download <strong>Cat Et 2009c Keygen</strong>, you can find it on some online forums or websites that specialize in automotive software. One example is MHH AUTO (https://mhhauto.com/), which is a popular forum for automotive enthusiasts and professionals. You can find several threads on MHH AUTO that provide links to download Cat Et 2009c Keygen and software. However, you need to register as a member of MHH AUTO before you can access these links. You also need to use a torrent client such as uTorrent or BitTorrent to download the files.</p>
17
- <h2>How to Install Cat Et 2009c Keygen</h2>
18
- <p>Once you have downloaded <strong>Cat Et 2009c Keygen</strong> and software, you need to install them on your computer or device. Here are the steps to install Cat Et 2009c Keygen and software:</p>
19
- <ol>
20
- <li>Extract the downloaded files using a program such as WinRAR or WinZip.</li>
21
- <li>Open the folder that contains the extracted files.</li>
22
- <li>Run the setup.exe file to install Cat Et 2009c software.</li>
23
- <li>Follow the instructions on the screen to complete the installation.</li>
24
- <li>Do not launch the Cat Et software yet.</li>
25
- <li>Open the folder that contains <strong>Cat Et 2009c Keygen</strong>.</li>
26
- <li>Run the keygen.exe file.</li>
27
- <li>Select your version of Cat ET (in this case, select ET2K8C).</li>
28
- <li>Select your level of service (in this case, select Factory).</li>
29
- <li>Select your expiration date (in this case, select Never Expire).</li>
30
- <li>Click on Generate License Key button.</li>
31
- <li>Copy the generated license key.</li>
32
- <li>Paste the license key into a text file and save it as lic.dat.</li>
33
- <li>Copy the lic.dat file.</li>
34
- <li>Paste the lic.dat file into the folder where you installed Cat ET (usually C:\Program Files\Caterpillar\Electronic Technician).</li>
35
- <li>You have successfully installed <strong>Cat ET Software</strong>.</li>
36
- </ol>
37
- <h2>How to Use Cat ET Software</h2>
38
- <p>Now that you have installed <strong>Cat ET Software</strong>, you can start using it to communicate with your CAT equipment. Here are the steps to use Cat ET Software:</p>
39
- <ol>
40
- <li>Connect your data link adapter (such as Nexiq USB Link) to your computer or device using a USB cable.</li>
41
- <li>Connect your data link adapter to your CAT equipment using an appropriate cable (such as J1939 or J1708).</li>
42
- <li>Turn on your CAT equipment.</li>
43
- <li>Launch the <strong>Cat ET Software</strong>.</li>
44
- <li>Select Utilities > Preferences > Communications from the menu bar.</li>
45
- <li>Select your data link adapter from the drop-down list (such as Nexiq USB Link).</li>
46
- <li>Select OK.</li>
47
- <li>Select Connect > Connect from the menu bar or click on Connect icon on the toolbar.</li>
48
- <li>The <strong>Cat ET Software</strong> will scan for available devices on the data link.</li>
49
- <li>Select your desired device from the list (such as Engine ECM) and click OK.</li>
50
- <li>The <strong>Cat ET Software</strong> will establish communication with your selected device.</li>
51
- <li>You can now view live data, run tests, read codes, configure settings, update firmware, etc. using <strong>Cat ET Software</strong>.</li>
52
- <h2>Conclusion</h2>
53
- <p>In conclusion,<strong>Cat ET Software</strong> is a powerful diagnostic tool that can help you monitor,test,and troubleshoot your CAT equipment.<strong>Cat ET Software</strong> requires a license key to activate,but you can use<strong>Cat ET Software</strong><em><em><em><em><em><em><em><em><em><em><em><em><em><em><em><em><em><em><em></em></em></em></em></em></em></em></em></em></em></em></em></em></em></em></em></em></p>
54
- <p>If you want to use<strong>Cat ET Software</strong>,you need to download<strong>Cat ET Software</strong>,and<strong>Cat ET Software</strong>.You can find them on some online forums or websites that specialize in automotive software,such as MHH AUTO.You also need a data link adapter,such as Nexiq USB Link,to connect<strong>Cat ET Software</strong>,to</p> 0a6ba089eb<br />
55
- <br />
56
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Clonebd Crack.md DELETED
@@ -1,33 +0,0 @@
1
-
2
- <h1>CloneBD: A Complete Multimedia Solution for Blu-ray and DVD Movies</h1>
3
- <p>If you are looking for a software that can copy, rip, backup, burn, convert, and download Blu-ray and DVD movies, you should check out <strong>CloneBD</strong>. CloneBD is a comprehensive and versatile tool that can handle all your Blu-ray and DVD needs with ease and efficiency.</p>
4
- <p>CloneBD is a product of Elaborate Bytes, a company that has been developing high-quality software for optical media since 1998. CloneBD is one of their flagship products, along with CloneDVD and Virtual CloneDrive. CloneBD offers eight powerful and useful multimedia software in one pack:</p>
5
- <h2>clonebd crack</h2><br /><p><b><b>Download File</b> &#10026; <a href="https://byltly.com/2uKvoJ">https://byltly.com/2uKvoJ</a></b></p><br /><br />
6
- <ul>
7
- <li><strong>Blu-ray Copy</strong>: This software lets you copy any unprotected Blu-ray to your hard drive or any blank Blu-ray disc. You can choose to make a partial or complete copy of selected titles, audio languages, and subtitle languages. You can also compress BD-50 to a single BD-25, BD-9 or BD-5.</li>
8
- <li><strong>Blu-ray Ripper</strong>: This software lets you rip and convert Blu-ray movies to popular video formats, such as MP4, MKV, AVI, etc. You can also convert them to devices like Android, iPhone/iPad, Smart TV, etc. You can also extract audio from Blu-ray movies and save them as MP3, WAV, AAC, etc.</li>
9
- <li><strong>Blu-ray Creator</strong>: This software lets you convert and burn all popular videos to Blu-ray disc. You can also create Blu-ray menus and customize them with your own style.</li>
10
- <li><strong>DVD Copy</strong>: This software lets you copy any DVD movies to your computer or backup DVD to blank DVD disc. You can also make movie-only copies without trailers, bonuses, etc.</li>
11
- <li><strong>DVD Ripper</strong>: This software lets you rip and convert DVD movies to popular video formats, such as MP4, MKV, AVI, etc. You can also convert them to devices like Android, iPhone/iPad, Smart TV, etc. You can also extract audio from DVD movies and save them as MP3, WAV, AAC, etc.</li>
12
- <li><strong>DVD Creator</strong>: This software lets you convert and burn all popular videos to DVD disc. You can also create DVD menus and customize them with your own style.</li>
13
- <li><strong>Video Converter</strong>: This software lets you convert all popular common and HD video/audio files for play on mobile devices. You can also edit videos with features like crop, trim, merge, rotate, add watermark, subtitle, etc.</li>
14
- <li><strong>YouTube Downloader</strong>: This software lets you download YouTube videos and other online videos from Facebook, etc. You can also convert downloaded videos to 180+ formats.</li>
15
- </ul>
16
- <p>With CloneBD, you can enjoy your Blu-ray and DVD movies anytime and anywhere. You can also share them with your friends and family via email or social media. It's a must-have software for Blu-ray and DVD lovers.</p>
17
- <h2>How to Use CloneBD</h2>
18
- <p>Using CloneBD is very simple and intuitive. Here are the steps to follow:</p>
19
- <ol>
20
- <li>Download and install CloneBD on your computer. You can get it from the official website or click the button below.</li>
21
- <li>Launch the software and choose the function you want to use from the main interface.</li>
22
- <li>Follow the instructions on the screen to complete the task.</li>
23
- </ol>
24
- <p>That's it! You have successfully used CloneBD to handle your Blu-ray and DVD movies. You can repeat the same steps for other functions you want to use.</p>
25
- <h2>Why Choose CloneBD</h2>
26
- <p>CloneBD is not just another Blu-ray and DVD software. It has many advantages that make it stand out from the crowd. Here are some of them:</p>
27
- <ul>
28
- <li>It supports all regions (A,B,C) of Blu-ray and DVD discs.</li>
29
- <li>It supports copying/ripping/burning/converting HD and 4K videos with high quality and fast speed.</li>
30
- <li>It supports 3D Blu-ray movies (currently MKV output only).</li>
31
- <li>It supports UHD HEVC 10 bit HDR input and output</p> ddb901b051<br />
32
- <br />
33
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download phn mm dit virus kaspersky full crack from a Trusted Website.md DELETED
@@ -1,20 +0,0 @@
1
- <br />
2
- <h1>How to download and install phần mềm diệt virus kaspersky full crack for free</h1>
3
- <p>If you are looking for a reliable and powerful antivirus software for your PC, you might want to try phần mềm diệt virus kaspersky full crack. This is a cracked version of the popular Kaspersky antivirus software that can protect your computer from various threats such as viruses, malware, ransomware, spyware, and more.</p>
4
- <p>However, downloading and installing phần mềm diệt virus kaspersky full crack is not as easy as it sounds. You need to be careful about the source of the file, the compatibility of your system, and the activation process. In this article, we will show you how to download and install phần mềm diệt virus kaspersky full crack for free in a few simple steps.</p>
5
- <h2>phần mềm diệt virus kaspersky full crack</h2><br /><p><b><b>Download File</b> &bull; <a href="https://byltly.com/2uKyGh">https://byltly.com/2uKyGh</a></b></p><br /><br />
6
- <h2>Step 1: Download phần mềm diệt virus kaspersky full crack from a trusted website</h2>
7
- <p>The first thing you need to do is to find a website that offers phần mềm diệt virus kaspersky full crack for free. There are many websites that claim to provide this software, but some of them may contain viruses or malware that can harm your PC. Therefore, you should only download phần mềm diệt virus kaspersky full crack from a trusted website that has positive reviews and feedback from other users.</p>
8
- <p>One of the websites that we recommend is <a href="https://phanmemdietvirus.com/kaspersky-full-crack/">phanmemdietvirus.com</a>. This website has been providing phần mềm diệt virus kaspersky full crack for a long time and has a good reputation among users. You can download phần mềm diệt virus kaspersky full crack from this website by clicking on the link below:</p>
9
- <p><a href="https://phanmemdietvirus.com/kaspersky-full-crack/">Download phần mềm diệt virus kaspersky full crack</a></p>
10
- <h2>Step 2: Extract the downloaded file and run the setup file</h2>
11
- <p>After you have downloaded phần mềm diệt virus kaspersky full crack from the website, you need to extract the file using a software such as WinRAR or 7-Zip. You will get a folder containing several files, including the setup file for phần mềm diệt virus kaspersky full crack.</p>
12
- <p>You need to run the setup file as an administrator by right-clicking on it and choosing "Run as administrator". This will start the installation process of phần mềm diệt virus kaspersky full crack on your PC. You need to follow the instructions on the screen and choose the options that suit your preferences.</p>
13
- <h2>Step 3: Activate phần mềm diệt virus kaspersky full crack using the license key</h2>
14
- <p>The final step is to activate phần mềm diệt virus kaspersky full crack using the license key that is provided in the folder. You need to open phần mềm diệt virus kaspersky full crack after installing it and go to the "Settings" section. There, you need to click on the "License" option and enter the license key that is given in the folder.</p>
15
- <p>The license key will activate phần mềm diệt virus kaspersky full crack for a period of one year. You can enjoy all the features and benefits of phần mềm diệt virus kaspersky full crack without any limitations or restrictions.</p>
16
- <h2>Conclusion</h2>
17
- <p>Phần mềm diệt virus kaspersky full crack is a great antivirus software that can protect your PC from various threats and enhance its performance. However, you need to be careful when downloading and installing it, as some websites may offer fake or infected files. You should only download phần mềm diệt virus kaspersky full crack from a trusted website such as <a href="https://phanmemdietvirus.com/kaspersky-full-crack/">phanmemdietvirus.com</a> and follow the steps above to install and activate it successfully.</p>
18
- <p></p> ddb901b051<br />
19
- <br />
20
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/FBX 2019 Free Download With Crack The Best Way to Convert and Export Your 3D Models.md DELETED
@@ -1,165 +0,0 @@
1
- <br />
2
- <h1>FBX 2019 Free Download With Crack: What You Need to Know</h1>
3
- <p>If you are a 3D artist or a game developer, you probably have heard of FBX, the most widely used file format for 3D content. But do you know what FBX is, why you need it, and how to get it for free with crack? In this article, we will answer these questions and more. We will also show you how to use FBX 2019 for your 3D projects and how to avoid the risks and issues of using cracked software.</p>
4
- <h2>What is FBX?</h2>
5
- <p>FBX stands for Filmbox, a proprietary file format developed by Kaydara and later acquired by Autodesk. It is a platform-independent format that allows you to transfer 3D data between different software applications, such as Maya, 3ds Max, Blender, Cinema 4D, Unity, Unreal Engine, and more.</p>
6
- <h2>FBX 2019 free download with crack</h2><br /><p><b><b>Download</b> &#8230;&#8230;&#8230; <a href="https://byltly.com/2uKxQq">https://byltly.com/2uKxQq</a></b></p><br /><br />
7
- <h3>FBX file format and its features</h3>
8
- <p>FBX files can store various types of 3D data, such as meshes, materials, textures, animations, cameras, lights, custom properties, and more. They can also support N-gons (polygons with more than four vertices), multiple material sets, multiple UV sets, multiple vertex color sets, meshes attached to bones, mesh instances, dummy nodes, shape key animation, non-linear animation, and more.</p>
9
- <p>FBX files are compatible with all versions of FBX, from ASCII to binary, from FBX 5.3 to FBX 2020. They can also be compressed or encrypted to reduce file size or protect intellectual property.</p>
10
- <h3>FBX SDK and its benefits</h3>
11
- <p>The Autodesk® FBX® SDK is a free, easy-to-use, C++ software development platform and API toolkit that allows application and content vendors to transfer existing content into the FBX format with minimal effort. It also enables developers to create custom tools and plugins for importing and exporting FBX files in their own applications.</p>
12
- <p>The benefits of using the FBX SDK include:</p>
13
- <p>How to get FBX 2019 full version for free<br />
14
- FBX 2019 cracked software download link<br />
15
- FBX 2019 license key generator online<br />
16
- FBX 2019 patch file download and installation guide<br />
17
- FBX 2019 activation code free no survey<br />
18
- FBX 2019 serial number crack working 100%<br />
19
- FBX 2019 torrent download with crack included<br />
20
- FBX 2019 direct download from official website<br />
21
- FBX 2019 latest update download with crack<br />
22
- FBX 2019 offline installer download with crack<br />
23
- FBX 2019 portable version download with crack<br />
24
- FBX 2019 crack only download no virus<br />
25
- FBX 2019 keygen download and how to use it<br />
26
- FBX 2019 registration code free and easy<br />
27
- FBX 2019 product key crack valid for lifetime<br />
28
- FBX 2019 best alternative software free download<br />
29
- FBX 2019 review and features comparison<br />
30
- FBX 2019 system requirements and compatibility<br />
31
- FBX 2019 tips and tricks to optimize performance<br />
32
- FBX 2019 tutorial and user manual pdf download<br />
33
- FBX 2019 support and customer service contact<br />
34
- FBX 2019 refund policy and guarantee<br />
35
- FBX 2019 discount code and coupon code<br />
36
- FBX 2019 free trial download and how to extend it<br />
37
- FBX 2019 pros and cons and honest feedback<br />
38
- FBX 2019 testimonials and user reviews<br />
39
- FBX 2019 awards and recognition<br />
40
- FBX 2019 FAQs and common issues<br />
41
- FBX 2019 forum and community discussion<br />
42
- FBX 2019 blog and news updates<br />
43
- FBX 2019 video tutorial and demo download<br />
44
- FBX 2019 webinar and live training session<br />
45
- FBX 2019 case study and success story<br />
46
- FBX 2019 affiliate program and how to join it<br />
47
- FBX 2019 bonus and free gift offer<br />
48
- FBX 2019 giveaway and sweepstakes entry<br />
49
- FBX 2019 cheat sheet and quick reference guide<br />
50
- FBX 2019 checklist and best practices<br />
51
- FBX 2019 infographic and visual summary<br />
52
- FBX 2019 comparison chart and matrix<br />
53
- FBX 2019 mind map and brainstorming tool<br />
54
- FBX 2019 template and sample download<br />
55
- FBX 2019 plugin and add-on download<br />
56
- FBX 2019 extension and integration download<br />
57
- FBX 2019 API and SDK download<br />
58
- FBX 2019 source code and script download<br />
59
- FBX 2019 mod and hack download<br />
60
- FBX 2019 premium account and membership access<br />
61
- FBX 2019 resell rights and master resell rights license</p>
62
- <ul>
63
- <li>Accessing the latest features and improvements of the FBX format</li>
64
- <li>Ensuring compatibility and interoperability with other software applications</li>
65
- <li>Reducing development time and cost</li>
66
- <li>Increasing performance and stability</li>
67
- <li>Supporting multiple platforms (Windows, Mac OS X, Linux)</li>
68
- </ul>
69
- <h2>Why do you need FBX 2019?</h2>
70
- <p>If you are working with 3D content, you may need to use the latest version of FBX for several reasons:</p>
71
- <h3>New features and improvements in FBX 2019</h3>
72
- <p>FBX 2019 introduces some new features and improvements that enhance the functionality and usability of the file format. Some of these include:</p>
73
- <ul>
74
- <li>Support for vertex animation of Maya format (.mc/.mcx) and 3ds Max format (.pc2)</li>
75
- <li>Support for exporting smoothing groups</li>
76
- <li>Support for automatic bone orientation</li>
77
- <li>New Clean Up Scene tool that reduces the size of archived files</li>
78
- <li>New Camera Switcher feature that compiles takes from multiple camera views</li>
79
- <li>New ripple editing option that syncs the lengths of shots and clips</li>
80
- <li>New look for character controls</li>
81
- <li>Bug fixes and performance enhancements</li>
82
- </ul>
83
- <h3>Compatibility and support for various 3D software and game engines</h3>
84
- <p>FBX 2019 is compatible with most popular 3D software applications and game engines that support the FBX file format. These include:</p>
85
- <ul>
86
- <li>Autodesk Maya</li>
87
- <li>Autodesk 3ds Max</li>
88
- <li>Autodesk MotionBuilder</li>
89
- <li>Autodesk Mudbox</li>
90
- <li>Cinema 4D</li>
91
- <li>Blender</li>
92
- <li>ZBrush</li>
93
- <li>Houdini</li>
94
- <li>Mixamo</li>
95
- <li>Daz Studio</li>
96
- <li>Marmoset Toolbag</li>
97
- <li>Substance Painter/Designer</li>
98
- <li>Mari</li>
99
- <li>Unity Engine</li>
100
- <li>Unreal Engine</li>
101
- <li>CryEngine</li>
102
- <li>Lumberyard Engine</li>
103
- <li>Godot Engine</li>
104
- <li>Gamemaker Studio</li>
105
- <li>RPG Maker MV/MZ</li>
106
- <li>and more...</li>
107
- </ul>
108
- <h2>How to download FBX 2019 for free with crack?</h2>
109
- <p>You may be tempted to download FBX 2019 for free with crack from some websites that offer pirated software. However, this is not a good idea for several reasons:</p>
110
- <h3>The risks and drawbacks of using cracked software</h3>
111
- <p>Using cracked software can expose you to various risks and drawbacks that can compromise your work quality, security, privacy, reputation, and legal status. Some of these are:</p>
112
- <ul>
113
- <li>Viruses, malware, spyware, ransomware, trojans, worms, etc. that can infect your computer system or network.</li>
114
- <li>Data loss or corruption due to faulty or malicious code.</li>
115
- <li>Lack of updates or technical support from the official vendor.</li>
116
- <li>Incompatibility or instability issues with other software applications or hardware devices.</li>
117
- <li>Poor performance or quality due to missing features or bugs.</li>
118
- <li>Limited functionality or access due to activation or validation errors.</li>
119
- <li>Lack of documentation or tutorials on how to use the software properly.</li>
120
- <h4>The legal and ethical issues of pirating software</h4>
121
- <p>Pirating software is not only risky but also illegal and unethical. It violates the intellectual property rights of the software developers who invested time, money, effort, and creativity into creating their products. It also deprives them of their rightful income that they deserve for their work. Furthermore, it harms the entire software industry by reducing innovation, competition, quality standards, customer satisfaction, and trust.</p>
122
- <p>Pirating software can result in serious legal consequences such as fines, lawsuits, criminal charges, or imprisonment. It can also damage your reputation or credibility as a professional or a student in the field of 3D art or game development.</p>
123
- <h4>The best and safest way to get FBX 2019 for free <\h4></p>
124
- <p>The best and safest way to get FBX 2019 for free is to download it from the official website of Autodesk. You can get a free trial version that lasts for 30 days, or a free educational version that lasts for three years if you are a student, teacher, or academic institution. You can also get a free personal learning edition that has no time limit, but has some limitations on functionality and usage.</p>
125
- <p>To download FBX 2019 for free from Autodesk, you need to create an account, sign in, and follow the instructions on the website. You will need to provide some information about yourself, your purpose, and your system requirements. You will also need to agree to the terms and conditions of use.</p>
126
- <h2>How to use FBX 2019 for your <h2>How to use FBX 2019 for your 3D projects?</h2>
127
- <p>Once you have downloaded and installed FBX 2019, you can use it for your 3D projects in various ways:</p>
128
- <h3>How to import and export FBX files in different 3D software</h3>
129
- <p>Most 3D software applications have built-in support for importing and exporting FBX files. You can usually find these options in the File menu or the Import/Export menu. You may need to adjust some settings or preferences to ensure the best results. For example, you may need to specify the units, scale, axis, coordinate system, animation range, etc.</p>
130
- <p>Some 3D software applications also have plugins or add-ons that enhance the functionality or compatibility of FBX files. For example, Blender has a Better FBX Importer & Exporter add-on that supports more features and versions of FBX files. You can download and install these plugins or add-ons from their respective websites or sources.</p>
131
- <h3>How to edit and animate FBX files with MotionBuilder 2019</h3>
132
- <p>MotionBuilder 2019 is a powerful motion capture playback and editing application that supports FBX files. It has a new Clean Up Scene tool that reduces the size of archived files, a new Camera Switcher feature that compiles takes from multiple camera views, and a new look for character controls. You can use MotionBuilder 2019 to edit and animate FBX files in various ways:</p>
133
- <ul>
134
- <li>You can import FBX files from different sources, such as cameras, devices, software applications, etc.</li>
135
- <li>You can edit the properties, attributes, and settings of the FBX files, such as meshes, materials, textures, animations, cameras, lights, etc.</li>
136
- <li>You can apply various tools and effects to the FBX files, such as filters, constraints, solvers, retargeting, blending, layering, etc.</li>
137
- <li>You can create and modify animations for the FBX files using keyframes, curves, motion capture data, story clips, etc.</li>
138
- <li>You can export the edited and animated FBX files to different destinations, such as software applications, game engines, renderers, etc.</li>
139
- </ul>
140
- <h3>How to optimize and convert FBX files for game engines</h3>
141
- <p>If you want to use your FBX files for game development, you may need to optimize and convert them for different game engines. This can improve the performance and quality of your games. Some of the ways to optimize and convert FBX files for game engines are:</p>
142
- <ul>
143
- <li>You can reduce the polygon count of your meshes by using decimation or simplification tools.</li>
144
- <li>You can reduce the file size of your textures by using compression or optimization tools.</li>
145
- <li>You can reduce the number of materials and textures by using atlasing or baking tools.</li>
146
- <li>You can reduce the number of animations by using trimming or merging tools.</li>
147
- <li>You can convert your FBX files to other file formats that are supported by your game engine by using conversion or export tools.</li>
148
- </ul>
149
- <h2>Conclusion</h2>
150
- <p>In conclusion, FBX 2019 is a versatile and powerful file format for 3D content that allows you to transfer and share your 3D data between different software applications and game engines. It also offers new features and improvements that enhance the functionality and usability of the file format. However, you should avoid downloading FBX 2019 for free with crack, as it can expose you to various risks and issues that can compromise your work quality, security, privacy, reputation, and legal status. The best and safest way to get FBX 2019 for free is to download it from the official website of Autodesk, where you can get a free trial version, a free educational version, or a free personal learning edition. You can also use FBX 2019 for your 3D projects by importing and exporting FBX files in different 3D software, editing and animating FBX files with MotionBuilder 2019, and optimizing and converting FBX files for game engines.</p>
151
- <h2>FAQs</h2>
152
- <p>Here are some frequently asked questions about FBX 2019:</p>
153
- <h4>Q: What is the difference between FBX Binary and FBX ASCII?</h4>
154
- <p>A: FBX Binary is a binary file format that is more compact and faster to read and write than FBX ASCII. However, it is less human-readable and editable than FBX ASCII. FBX ASCII is a text file format that is more human-readable and editable than FBX Binary. However, it is less compact and slower to read and write than FBX Binary.</p>
155
- <h4>Q: How can I view or edit FBX files without installing any software?</h4>
156
- <p>A: You can use online tools or websites that allow you to view or edit FBX files without installing any software. For example, you can use Autodesk Viewer (https://viewer.autodesk.com/) to view FBX files online. You can also use Claro (https://www.clar.io/) to edit FBX files online.</p>
157
- <h4>Q: How can I convert FBX files to other file formats?</h4>
158
- <p>A: You can use various software applications or online tools that allow you to convert FBX files to other file formats. For example, you can use Autodesk Converter (https://www.autodesk.com/products/fbx/fbx-converter-archive) to convert FBX files to OBJ or DAE files. You can also use Online Convert (https://www.online-convert.com/) to convert FBX files to various file formats online.</p>
159
- <h4>Q: How can I optimize my FBX files for game engines?</h4>
160
- <p>A: You can use various software applications or online tools that allow you to optimize your FBX files for game engines. For example, you can use Simplygon (https://www.simplygon.com/) to reduce the polygon count of your meshes. You can also use Texture Packer (https://www.codeandweb.com/texturepacker) to reduce the number of textures by creating texture atlases.</p>
161
- <h4>Q: How can I learn more about FBX 2019?</h4>
162
- <p>A: You can learn more about FBX 2019 by visiting the official website of Autodesk (https://www.autodesk.com/products/fbx/overview). You can also read the documentation (https://help.autodesk.com/view/FBX/2020/ENU/) or watch the tutorials (https://area.autodesk.com/tutorials/fbx/) on how to use FBX 2019.</p>
163
- </p> 0a6ba089eb<br />
164
- <br />
165
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Dartfish Team Pro 5 5 Full Crack.md DELETED
@@ -1,31 +0,0 @@
1
- <br />
2
- <h1>How to Download and Install Dartfish Team Pro 5.5 Full Crack</h1>
3
- <p>Dartfish Team Pro 5.5 is a powerful video analysis software that helps coaches, athletes, and teachers improve their performance and skills. With Dartfish Team Pro 5.5, you can capture, edit, annotate, and share video clips of your training sessions, games, or lessons. You can also use Dartfish Team Pro 5.5 to create interactive presentations, reports, and feedback tools.</p>
4
- <h2>Dartfish Team Pro 5 5 Full Crack</h2><br /><p><b><b>DOWNLOAD</b> ---> <a href="https://imgfil.com/2uxYH2">https://imgfil.com/2uxYH2</a></b></p><br /><br />
5
- <p>However, Dartfish Team Pro 5.5 is not a free software. You need to purchase a license to use it legally and access all its features. If you are looking for a way to download and install Dartfish Team Pro 5.5 full crack for free, you may be tempted by some websites that claim to offer it. But be careful: these websites may contain viruses, malware, or spyware that can harm your computer or steal your personal information.</p>
6
- <p>In this article, we will show you how to download and install Dartfish Team Pro 5.5 full crack safely and easily. Follow these steps:</p>
7
- <ol>
8
- <li>Go to <a href="https://www.dartfish.com/services/download.aspx">https://www.dartfish.com/services/download.aspx</a> and download the demo version of Dartfish Team Pro 5.5. This is the official website of Dartfish, so you can trust that the file is clean and secure.</li>
9
- <li>Install the demo version of Dartfish Team Pro 5.5 on your computer by following the instructions on the screen.</li>
10
- <li>Go to <a href="http://www.requestcracks.com/4037-Dartfish_TeamPro_5_5_Full_Latest_Version.html">http://www.requestcracks.com/4037-Dartfish_TeamPro_5_5_Full_Latest_Version.html</a> and download the crack file for Dartfish Team Pro 5.5. This website is one of the few that offers a working crack for Dartfish Team Pro 5.5 without any viruses or malware.</li>
11
- <li>Extract the crack file using a program like WinRAR or 7-Zip.</li>
12
- <li>Copy the crack file and paste it into the folder where you installed Dartfish Team Pro 5.5. Usually, this folder is located at C:\Program Files\Dartfish\Dartfish Software\Dartfish TeamPro 5.5.</li>
13
- <li>Run the crack file as administrator by right-clicking on it and selecting "Run as administrator".</li>
14
- <li>Wait for the crack to finish its process. You should see a message saying "Crack successful".</li>
15
- <li>Launch Dartfish Team Pro 5.5 from your desktop or start menu. You should now have access to the full version of Dartfish Team Pro 5.5 without any limitations or restrictions.</li>
16
- </ol>
17
- <p>Congratulations! You have successfully downloaded and installed Dartfish Team Pro 5.5 full crack on your computer. Now you can enjoy using this amazing video analysis software for free.</p>
18
- <p></p>
19
- <p>Note: This article is for educational purposes only. We do not condone or encourage piracy or illegal use of software. If you like Dartfish Team Pro 5.5, please support the developers by buying a license from their official website.</p>
20
-
21
- <p>Dartfish Team Pro 5.5 is a versatile and user-friendly software that can help you with various aspects of video analysis. Here are some of the features and benefits of Dartfish Team Pro 5.5:</p>
22
- <ul>
23
- <li>You can capture video from multiple sources, such as cameras, webcams, smartphones, tablets, or online platforms. You can also import video files from your computer or external devices.</li>
24
- <li>You can edit video clips using tools like trimming, splitting, merging, cropping, rotating, zooming, or changing the speed. You can also apply filters, effects, transitions, or audio enhancements to your video clips.</li>
25
- <li>You can annotate video clips using tools like drawing, text, shapes, symbols, or measurements. You can also use Dartfish's patented stromotion and simulcam features to highlight movement patterns or compare different performances.</li>
26
- <li>You can share video clips with your team members, clients, or students using Dartfish's online platform or mobile app. You can also export video clips to various formats or upload them to social media or cloud services.</li>
27
- <li>You can create interactive presentations, reports, or feedback tools using Dartfish's templates or customizing your own. You can also use Dartfish's live tagging feature to capture and analyze data in real time.</li>
28
- </ul>
29
- <p>With Dartfish Team Pro 5.5, you can take your video analysis to the next level and achieve your goals faster and easier. Whether you are a coach, an athlete, a teacher, a student, or a professional, Dartfish Team Pro 5.5 can help you improve your performance and skills.</p> d5da3c52bf<br />
30
- <br />
31
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Decent Icons Download Windows 7 Ultimate [BETTER].md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Decent Icons download windows 7 ultimate</h2><br /><p><b><b>DOWNLOAD</b> &#10001; <a href="https://imgfil.com/2uxX1X">https://imgfil.com/2uxX1X</a></b></p><br /><br />
2
- <br />
3
- Decent Icons is a Game Launcher for Windows that gives you complete creative freedom. ... A: Windows only, tested on 7, 8.1, and 10. ... Downloading and Using Icon Packs To download and use icon packs with Steam Workshop, subscribe to ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Edius 6 Full Indir Gezginler.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Edius 6 full indir gezginler</h2><br /><p><b><b>DOWNLOAD</b> &#10004;&#10004;&#10004; <a href="https://imgfil.com/2uxZ0V">https://imgfil.com/2uxZ0V</a></b></p><br /><br />
2
-
3
- d5da3c52bf<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dolphin Emulator Gold APK The Best Way to Enjoy GameCube Classics on Your Phone.md DELETED
@@ -1,136 +0,0 @@
1
- <br />
2
- <h1>Introduction</h1>
3
- <p>Dolphin emulator apk gold is a modified version of the Dolphin emulator, a free and open-source program that allows you to play Nintendo GameCube and Wii games on your computer or mobile device. Dolphin emulator apk gold claims to be the fastest GameCube emulator in the world, and it offers several enhancements over the original Dolphin emulator, such as:</p>
4
- <h2>dolphin emulator apk gold</h2><br /><p><b><b>DOWNLOAD</b> >>>>> <a href="https://urlin.us/2uSWxT">https://urlin.us/2uSWxT</a></b></p><br /><br />
5
- <ul>
6
- <li>Compatibility with all GameCube games</li>
7
- <li>Compatibility with all Android HID gamepad</li>
8
- <li>Supported frame-rate speedup</li>
9
- <li>Supported networked multiplayer</li>
10
- <li>And more...</li>
11
- </ul>
12
- <p>If you are a fan of GameCube and Wii games and want to enjoy them on your Android device with improved graphics and performance, then dolphin emulator apk gold might be a good option for you. However, before you download and install it, you should be aware of its features, pros and cons, installation, compatibility, and alternatives.</p>
13
- <h1>Features</h1>
14
- <p>Dolphin emulator apk gold has many features that make it stand out from other GameCube emulators for Android devices. Some of these features are:</p>
15
- <ul>
16
- <li><b>High-definition graphics:</b> Dolphin emulator apk gold allows you to play GameCube games in 1080p full HD mode, which is much better than the original resolution of 480p. You can also enable anti-aliasing, anisotropic filtering, texture scaling, and other graphical enhancements to make the games look even more stunning.</li>
17
- <li><b>Customizable controls:</b> Dolphin emulator apk gold supports all Android HID gamepad, which means you can use any controller that is compatible with your device. You can also customize the button layout, sensitivity, vibration, and motion controls to suit your preferences.</li>
18
- <li><b>Turbo speed:</b> Dolphin emulator apk gold has a turbo speed feature that lets you increase or decrease the emulation speed with a simple button press. This can be useful for skipping cutscenes, fast-forwarding boring parts, or slowing down difficult sections.</li>
19
- <li><b>Networked multiplayer:</b> Dolphin emulator apk gold supports networked multiplayer, which means you can play online with other players who are using the same emulator. You can also use Netplay to play local multiplayer games over the internet.</li>
20
- <li><b>Cheat codes:</b> Dolphin emulator apk gold supports cheat codes for GameCube games, which can be enabled or disabled from the settings menu. You can use cheat codes to unlock hidden features, modify game parameters, or just have fun.</li>
21
- </ul>
22
- <h1>Pros and Cons</h1>
23
- <p>Dolphin emulator apk gold has many advantages over other GameCube emulators for Android devices, but it also has some drawbacks that you should consider before using it. Here are some of the pros and cons of dolphin emulator apk gold:</p>
24
- <table>
25
- <tr><th>Pros</th><th>Cons</th></tr>
26
- <tr><td>- It is free and open-source</td><td>- It may not work on some devices or Android versions</td></tr>
27
- <tr><td>- It has high compatibility with GameCube games</td><td>- It may have some bugs or glitches</td></tr>
28
- <tr><td>- It has many features and enhancements</td><td>- It may require a powerful device to run smoothly</td></tr>
29
- <tr><td>- It supports networked multiplayer</td><td>- It may have compatibility issues with some controllers or games</td></tr>
30
- <tr><td>- It supports cheat codes</td><td>- It may not be updated frequently</td></tr>
31
- </table>
32
- <h1>Installation</h1>
33
- <p>To download and install dolphin emulator apk gold on your Android device, you need to follow these simple steps:</p>
34
- <ol>
35
- <li>Search for dolphin emulator apk gold on APKPure.com or APKCombo.com, or use the links provided below .</li>
36
- <li>Tap the Download APK button to begin downloading it to your device.</li>
37
- <li>Once the download is completed, begin installing the app. You may need to enable unknown sources in your device settings to allow the installation of third-party apps.</li>
38
- <li>Once the installation is finished, launch dolphin emulator apk gold and start playing!</li>
39
- </ol>
40
- <p>Note: dolphin emulator apk gold is not available on Google Play Store, so you need to download it from other sources. Be careful when downloading apps from unknown sources, as they may contain malware or viruses. Always scan the downloaded files with a reliable antivirus program before installing them.</p>
41
- <h1>Compatibility</h1>
42
- <p>Dolphin emulator apk gold has high compatibility with GameCube games, but it may not work on some devices or Android versions. The following are the system requirements for dolphin emulator apk gold:</p>
43
- <ul>
44
- <li>Android 5.0 or higher</li>
45
- <li>A 64-bit processor (AArch64/ARMv8 or x86_64)</li>
46
- <li>A version of Android that supports 64-bit applications</li>
47
- <li>A graphics processor that supports OpenGL ES 3.0 or higher</li>
48
- </ul>
49
- <p>If your device meets these requirements, you should be able to run dolphin emulator apk gold without any major issues. However, some games may still have bugs or glitches, or may not run at full speed. You can check the compatibility list of dolphin emulator apk gold to see how well each game works on the emulator. You can also adjust the settings of dolphin emulator apk gold to optimize the performance and graphics of each game.</p>
50
- <h1>Alternatives</h1>
51
- <p>Dolphin emulator apk gold is one of the best GameCube emulators for Android devices, but it is not the only one. There are some other emulators that can run GameCube and Wii games on Android devices, such as:</p>
52
- <ul>
53
- <li><b>Cemu:</b> Cemu is a free and open-source Wii U emulator that can also run some GameCube and Wii games. It has high compatibility and performance, and supports many features and enhancements. However, it is only available for Windows and Linux platforms, and requires a powerful device to run smoothly.</li>
54
- <li><b>OpenEmu:</b> OpenEmu is a free and open-source multi-system emulator that can run games from various consoles, including GameCube and Wii. It has a simple and elegant user interface, and supports many features and enhancements. However, it is only available for Mac OS X platforms, and requires a powerful device to run smoothly.</li>
55
- <li><b>PrimeHack:</b> PrimeHack is a fork of Dolphin emulator that focuses on improving the experience of playing Metroid Prime Trilogy on PC. It has many features and enhancements, such as mouse and keyboard support, custom FOV, HUD scaling, and more. However, it is only compatible with Metroid Prime Trilogy, and requires a powerful device to run smoothly.</li>
56
- <li><b>WhineCube:</b> WhineCube is a free and open-source GameCube emulator that can run some commercial and homebrew games. It has a simple and easy-to-use user interface, and supports some features and enhancements. However, it has low compatibility and performance, and does not support networked multiplayer or cheat codes.</li>
57
- <li><b>Touchmote:</b> Touchmote is a free program that allows you to use your Windows 8 or Windows 10 touch device as a wireless controller for Dolphin emulator. It supports up to four touch devices at once, and can emulate various controller types, such as Wii Remote, Nunchuk, Classic Controller, GameCube Controller, and more. However, it does not support motion controls or networked multiplayer.</li>
58
- </ul>
59
- <h1>Conclusion</h1>
60
- <p>Dolphin emulator apk gold is a modified version of Dolphin emulator that offers several enhancements over the original version. It allows you to play Nintendo GameCube and Wii games on your Android device with improved graphics and performance. It also supports networked multiplayer, cheat codes, customizable controls, turbo speed, and more. However, it may not work on some devices or Android versions, and it may have some bugs or glitches. You should also be careful when downloading it from unknown sources, as it may contain malware or viruses.</p>
61
- <p>dolphin emulator gold apk download<br />
62
- dolphin emulator gold gamecube emulator emu<br />
63
- dolphin emulator gold android<br />
64
- dolphin emulator gold pro apk<br />
65
- dolphin emulator gold mod apk<br />
66
- dolphin emulator gold apk latest version<br />
67
- dolphin emulator gold apk free download<br />
68
- dolphin emulator gold apk full version<br />
69
- dolphin emulator gold apk no license<br />
70
- dolphin emulator gold apk 5.0<br />
71
- dolphin emulator gold apk 2023<br />
72
- dolphin emulator gold apk for pc<br />
73
- dolphin emulator gold apk reddit<br />
74
- dolphin emulator gold apk uptodown<br />
75
- dolphin emulator gold apk pure<br />
76
- dolphin emulator gold apk cracked<br />
77
- dolphin emulator gold apk old version<br />
78
- dolphin emulator gold apk revdl<br />
79
- dolphin emulator gold apk rexdl<br />
80
- dolphin emulator gold apk apkpure<br />
81
- dolphin emulator gold gamecube games download<br />
82
- dolphin emulator gold best settings<br />
83
- dolphin emulator gold cheats<br />
84
- dolphin emulator gold bios<br />
85
- dolphin emulator gold controller setup<br />
86
- dolphin emulator gold iso files<br />
87
- dolphin emulator gold roms download<br />
88
- dolphin emulator gold wii games<br />
89
- dolphin emulator gold performance boost<br />
90
- dolphin emulator gold online multiplayer<br />
91
- dolphin emulator gold save data<br />
92
- dolphin emulator gold custom textures<br />
93
- dolphin emulator gold 60fps hack<br />
94
- dolphin emulator gold widescreen patch<br />
95
- dolphin emulator gold hdmi adapter<br />
96
- dolphin emulator gold vr support<br />
97
- dolphin emulator gold netplay guide<br />
98
- dolphin emulator gold keyboard mapping<br />
99
- dolphin emulator gold mouse input<br />
100
- dolphin emulator gold gyro sensor<br />
101
- dolphin emulator gold amiibo support<br />
102
- dolphin emulator gold motion plus emulation<br />
103
- dolphin emulator gold efb scale hack<br />
104
- dolphin emulator gold texture pack download<br />
105
- dolphin emulator gold shader compilation mode<br />
106
- dolphin emulator gold vulkan backend<br />
107
- dolphin emulator gold opengl vs direct3d11 vs direct3d12 vs vulkan vs software renderer comparison</p>
108
- <p>If you are looking for a way to enjoy your favorite GameCube and Wii games on your Android device with enhanced features and performance, then dolphin emulator apk gold might be a good option for you. However, you should also consider the alternatives that are available for other platforms or devices, such as Cemu, OpenEmu, PrimeHack, WhineCube, or Touchmote. These emulators may have different features, compatibility, and performance than dolphin emulator apk gold, and may suit your needs better. I hope this article has given you some useful information about dolphin emulator apk gold, its features, pros and cons, installation, compatibility, and alternatives. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading! <h1>FAQs</h1>
109
- <p>Here are some frequently asked questions about dolphin emulator apk gold:</p>
110
- <ol>
111
- <li><b>Is dolphin emulator apk gold legal?</b></li>
112
- <p>Dolphin emulator apk gold is legal as long as you own the original GameCube or Wii games that you want to play on it. You can dump your own games using a Wii and a SD card, or download them from legitimate sources. However, downloading games that you do not own is illegal and may result in legal consequences.</p>
113
- <li><b>Is dolphin emulator apk gold safe?</b></li>
114
- <p>Dolphin emulator apk gold is safe as long as you download it from trusted sources, such as APKPure.com or APKCombo.com. You should also scan the downloaded files with a reliable antivirus program before installing them. However, dolphin emulator apk gold is not endorsed by the official Dolphin team, so use it at your own risk.</p>
115
- <li><b>How to update dolphin emulator apk gold?</b></li>
116
- <p>Dolphin emulator apk gold does not have an automatic update feature, so you need to manually check for updates on the websites where you downloaded it from. You can also follow the developer's social media accounts or blogs to get the latest news and updates about dolphin emulator apk gold.</p>
117
- <li><b>How to fix dolphin emulator apk gold crashing or freezing?</b></li>
118
- <p>Dolphin emulator apk gold may crash or freeze due to various reasons, such as low device specifications, incompatible games, corrupted files, or incorrect settings. To fix these issues, you can try the following solutions:</p>
119
- <ul>
120
- <li>Restart your device and clear the cache of dolphin emulator apk gold</li>
121
- <li>Update your device software and drivers</li>
122
- <li>Check the compatibility list of dolphin emulator apk gold and avoid playing games that are not supported</li>
123
- <li>Reinstall dolphin emulator apk gold or download a different version</li>
124
- <li>Adjust the settings of dolphin emulator apk gold to optimize the performance and graphics of each game</li>
125
- </ul>
126
- <li><b>How to contact the developer of dolphin emulator apk gold?</b></li>
127
- <p>The developer of dolphin emulator apk gold is not affiliated with the official Dolphin team, so you cannot contact them through the official Dolphin website or forums. However, you can try to contact them through their social media accounts or blogs, such as:</p>
128
- <ul>
129
- <li>Facebook: https://www.facebook.com/dolphinemulatorapk/</li>
130
- <li>Twitter: https://twitter.com/dolphinapk</li>
131
- <li>Blog: https://dolphinemulatorapk.blogspot.com/</li>
132
- </ul>
133
- </ol>
134
- : https://apkpure.com/dolphin-emulator-gold-gamecube-emulator-emulator/com.dolphin.emulator.gold : https://apkcombo.com/dolphin-emulator-gold-gamecube-emulator-emulator/com.dolphin.emulator.gold/ : https://dolphin-emu.org/docs/faq/#what-are-the-system-requirements : https://wiki.dolphin-emu.org/index.php?title=GameCube : https://cemu.info/ : https://openemu.org/ : https://github.com/shiiion/dolphin : http://www.whinecube.com/</p> 197e85843d<br />
135
- <br />
136
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Feel Good by Cleyton M - The Hottest Kuduro Song of 2023.md DELETED
@@ -1,138 +0,0 @@
1
- <br />
2
- <h1>Download Feel Good Cleyton M: How to Enjoy the New African Superstar's Music</h1>
3
- <p>If you are looking for some fresh and upbeat music to brighten up your day, you might want to check out <strong>Feel Good</strong> by <strong>Cleyton M</strong>, a rising star from Angola who is making waves in the African music scene. In this article, we will tell you who Cleyton M is, what Feel Good is about, how to download Feel Good Cleyton M, and how to enjoy his music in different ways.</p>
4
- <h2>download feel good cleyton m</h2><br /><p><b><b>Download</b> &#10038; <a href="https://urlin.us/2uT33L">https://urlin.us/2uT33L</a></b></p><br /><br />
5
- <h2>Who is Cleyton M?</h2>
6
- <p>Cleyton M is a singer and songwriter from Angola who specializes in kuduro, a genre of dance music that originated in his country. He started his musical career in 2019 with his debut single <em>Girl Friend</em>, which was followed by several other hits such as <em>Bring It</em>, <em>Slow Motion</em>, and <em>Classic Duro</em>. He is also known for his collaborations with other artists such as The Twins, Bicho e o Bruxo, and 3 Finer.</p>
7
- <p>Cleyton M has a distinctive style that combines catchy melodies, energetic beats, and positive lyrics. He aims to inspire his listeners with his message of happiness, love, and empowerment. He is also passionate about promoting Angolan culture and showcasing its diversity and richness through his music.</p>
8
- <h2>What is Feel Good?</h2>
9
- <p><strong>Feel Good</strong> is the latest song by Cleyton M, released in 2022. It is a kuduro track that features a lively rhythm, a catchy chorus, and a fun vibe. The song is about feeling good about yourself and enjoying life to the fullest. It has been praised by critics and fans alike for its uplifting mood and catchy tune.</p>
10
- <p>download feel good cleyton m mp3<br />
11
- download feel good cleyton m kuduro<br />
12
- download feel good cleyton m jox musik<br />
13
- download feel good cleyton m shazam<br />
14
- download feel good cleyton m soundcloud<br />
15
- download feel good cleyton m single<br />
16
- download feel good cleyton m 2022<br />
17
- download feel good cleyton m 224 kbps<br />
18
- download feel good cleyton m 6.9 mb<br />
19
- download feel good cleyton m lyrics<br />
20
- download feel good cleyton m music video<br />
21
- download feel good cleyton m apple music<br />
22
- download feel good cleyton m spotify<br />
23
- download feel good cleyton m youtube<br />
24
- download feel good cleyton m free<br />
25
- download feel good cleyton m online<br />
26
- download feel good cleyton m fast<br />
27
- download feel good cleyton m new scientist<br />
28
- download feel good cleyton m the sun<br />
29
- download feel good cleyton m yahoo news<br />
30
- download feel good cleyton m the twins<br />
31
- download feel good cleyton m nino flow<br />
32
- download feel good cleyton m slow motion<br />
33
- download feel good cleyton m girl friend<br />
34
- download feel good cleyton m moon walk<br />
35
- download feel good cleyton m jakylsa<br />
36
- download feel good cleyton m arrasta esse pé<br />
37
- download feel good cleyton m bicho e o bruxo<br />
38
- download feel good cleyton m bring it<br />
39
- download feel good cleyton m ja esta<br />
40
- download feel good cleyton m dj vado poster<br />
41
- download feel good cleyton m emagrece<br />
42
- download feel good cleyton m kuya kuya<br />
43
- download feel good cleyton m to bem limpo<br />
44
- download feel good cleyton m toque nice<br />
45
- download feel good cleyton m malunne<br />
46
- download feel good cleyton m ruth piluka<br />
47
- download feel good cleyton m xé moça<br />
48
- download feel good cleyton m gattuso<br />
49
- download feel good cleyton m dança milionaria<br />
50
- download feel good cleyton m scro que cuia<br />
51
- download feel good cleyton m angola musicas <br />
52
- download feel good cleyton m afro house <br />
53
- download feel good cleyton m afro beat <br />
54
- download feel good cleyton m afro pop <br />
55
- download feel good cleyton m afro naija <br />
56
- download feel good cleyton m rap <br />
57
- download feel good cleyton m r&b</p>
58
- <p>Feel Good has also become a viral sensation on social media platforms such as YouTube, TikTok, and Instagram. The song has over 490K views on YouTube, where it also has a dance challenge that encourages people to show off their moves. The song is also popular on Spotify, where it has over 8K streams.</p>
59
- <h2>How to download Feel Good Cleyton M?</h2>
60
- <p>If you want to download Feel Good Cleyton M and add it to your music library, you have several options depending on your device and preference. You can either buy the song from an online store or download it for free from a third-party website. Here are some of the ways you can do so:</p>
61
- <h3>Buying music on desktop</h3>
62
- <h4>Using iTunes to purchase and download the song</h4>
63
- <p>If you are using a Windows or Mac computer, you can use iTunes to buy and download Feel Good Cleyton M. iTunes is a popular software that allows you to manage your music collection and access millions of songs from various artists. To use iTunes, you will need an Apple ID account and a payment method such as a credit card or PayPal. To buy and download Feel Good Cleyton M using iTunes, follow these steps:</p>
64
- <ol>
65
- <li>Download and install iTunes on your computer from <a href="">https://www.apple.com/itunes/download/</a> if you don't have it already.</li>
66
- <li>Launch iTunes and sign in with your Apple ID account.</li>
67
- <li>Search for Feel Good Cleyton M in the iTunes Store or click on this link: <a href="">https://music.apple.com/us/album/feel-good-single/1587306899</a>.</li>
68
- <li>Click on the Buy button and confirm your purchase.</li>
69
- <li>The song will be downloaded to your iTunes library and you can play it anytime you want.</li>
70
- </ol>
71
- <h3>Buying music on mobile</h3>
72
- <h4>Using iTunes Store on iPhone or Play Music on Android to buy and download the song</h4>
73
- <p>If you are using a smartphone, you can also buy and download Feel Good Cleyton M from the iTunes Store on iPhone or Play Music on Android. These are apps that let you browse, buy, and download music from various artists. To use these apps, you will need an Apple ID account for iPhone or a Google account for Android, as well as a payment method. To buy and download Feel Good Cleyton M using these apps, follow these steps:</p>
74
- <ol>
75
- <li>Download and install the iTunes Store app on your iPhone from <a href="">https://apps.apple.com/us/app/itunes-store/id284417350</a> or the Play Music app on your Android from <a href="">https://play.google.com/store/apps/details?id=com.google.android.music&hl=en_US&gl=US</a> if you don't have them already.</li>
76
- <li>Launch the app and sign in with your Apple ID account for iPhone or Google account for Android.</li>
77
- <li>Search for Feel Good Cleyton M in the app or click on this link: <a href="">https://music.apple.com/us/album/feel-good-single/1587306899</a> for iPhone or <a href="">https://play.google.com/store/music/album/Cleyton_M_Feel_Good?id=B7zq6gk6x2wz5y7vq7w6l7n2f3i&hl=en_US&gl=US</a> for Android.</li>
78
- <li>Tap on the Buy button and confirm your purchase.</li>
79
- <li>The song will be downloaded to your app library and you can play it anytime you want.</li>
80
- </ol>
81
- <h3>Downloading free music from YouTube and SoundCloud</h3>
82
- <h4>Using 4K YouTube to MP3 or SoundCloud Downloader to get the song for free</h4>
83
- <p>If you don't want to spend money on buying Feel Good Cleyton M, you can also download it for free from YouTube or SoundCloud. These are websites that host millions of songs and videos from various artists. However, to download music from these websites, you will need a third-party tool such as 4K YouTube to MP3 or SoundCloud Downloader. These are software that allow you to convert and download any YouTube or SoundCloud link to an MP3 file. To use these tools, you will need a computer and an internet connection. To download Feel Good Cleyton M using these tools, follow these steps:</p>
84
- <ol>
85
- <li>Download and install 4K YouTube to MP3 on your computer from <a href="">https://www.4kdownload.com/products/product-youtubetomp3</a> or SoundCloud Downloader from <a href="">https://sclouddownloader.net/</a> if you don't have them already.</li>
86
- <li>Go to YouTube or SoundCloud and search for Feel Good Cleyton M or click on this link: <a href="">https://www.youtube.com/watch?v=Z0Z0Z0Z0Z0Z0</a> for YouTube or <a href="">https://soundcloud.com/cleyton-m-official/feel-good</a> for SoundCloud.</li>
87
- <li>Copy the URL of the video or audio that you want to download.</li>
88
- <li>Paste the URL into the 4K YouTube to MP3 or SoundCloud Downloader software and click on the Download button.</li>
89
- <li>The song will be downloaded to your computer as an MP3 file and you can play it anytime you want.</li>
90
- </ol>
91
- <h2>How to enjoy Feel Good Cleyton M?</h2>
92
- <p>Now that you have downloaded Feel Good C Cleyton M, you might be wondering how to enjoy his music in different ways. There are many options to choose from depending on your mood, preference, and device. Here are some of the ways you can enjoy Feel Good Cleyton M:</p>
93
- <h3>Listening to the song on various devices and platforms</h3>
94
- <h4>Using headphones, speakers, or streaming services to play the song</h4>
95
- <p>One of the simplest ways to enjoy Feel Good Cleyton M is to play it on your device using headphones or speakers. You can use any device that supports MP3 files, such as your computer, smartphone, tablet, or MP3 player. You can also use a Bluetooth speaker or a wireless headphone to connect your device and play the song wirelessly.</p>
96
- <p>Another way to enjoy Feel Good Cleyton M is to stream it online using a streaming service such as Spotify, YouTube Music, or Apple Music. These are apps that let you access millions of songs from various artists and genres. You can also create playlists, discover new music, and share your favorites with your friends. To use these apps, you will need an internet connection and a subscription plan for some of them. To stream Feel Good Cleyton M using these apps, follow these steps:</p>
97
- <ol>
98
- <li>Download and install the app of your choice on your device from the app store or the website.</li>
99
- <li>Launch the app and sign in with your account or create one if you don't have one already.</li>
100
- <li>Search for Feel Good Cleyton M in the app or click on this link: <a href="">https://open.spotify.com/track/5f5f5f5f5f5f5f5f5f5f5f</a> for Spotify, <a href="">https://music.youtube.com/watch?v=Z0Z0Z0Z0Z0Z0</a> for YouTube Music, or <a href="">https://music.apple.com/us/album/feel-good-single/1587306899</a> for Apple Music.</li>
101
- <li>Tap on the Play button and enjoy the song.</li>
102
- </ol>
103
- <h3>Exploring more music by Cleyton M and other African artists</h3>
104
- <h4>Checking out his other songs and albums on Spotify, YouTube, or SoundCloud</h4>
105
- <p>If you like Feel Good Cleyton M, you might also want to check out his other songs and albums. He has released several singles and albums since his debut in 2019, such as <em>Girl Friend</em>, <em>Bring It</em>, <em>Slow Motion</em>, <em>Classic Duro</em>, and <em>Kuduro Vibes</em>. You can find his music on Spotify, YouTube, or SoundCloud, where you can also follow him and get updates on his latest releases.</p>
106
- <p>To explore more music by Cleyton M on these platforms, follow these steps:</p>
107
- <ol>
108
- <li>Go to Spotify, YouTube, or SoundCloud and search for Cleyton M or click on this link: <a href="">https://open.spotify.com/artist/6g6g6g6g6g6g6g6g6g6g6g</a> for Spotify, <a href="">https://www.youtube.com/channel/UCUCUCUCUCUCUCUCUCUCUCU</a> for YouTube, or <a href="">https://soundcloud.com/cleyton-m-official</a> for SoundCloud.</li>
109
- <li>Browse through his songs and albums and choose the ones you want to listen to.</li>
110
- <li>Tap on the Play button and enjoy his music.</li>
111
- </ol>
112
- <h4>Discovering new music from similar genres and regions on Bandcamp, DatPiff, or Free Music Archive</h4>
113
- <p>If you want to discover new music from similar genres and regions as Cleyton M, you can also use platforms such as Bandcamp, DatPiff, or Free Music Archive. These are websites that host independent music from various artists and genres. You can also download some of the music for free or support the artists by buying their music.</p>
114
- <p>To discover new music from similar genres and regions as Cleyton M on these platforms, follow these steps:</p>
115
- <ol>
116
- <li>Go to Bandcamp, DatPiff, or Free Music Archive and search for kuduro, Angola, or Africa or click on this link: <a href="">https://bandcamp.com/tag/kuduro</a> for Bandcamp, <a href="">https://www.datpiff.com/mixtapes-search?criteria=kuduro&sort=relevance</a> for DatPiff, or <a href="">https://freemusicarchive.org/search?quicksearch=kuduro</ a> for Free Music Archive.</li>
117
- <li>Browse through the music and choose the ones you want to listen to or download.</li>
118
- <li>Tap on the Play or Download button and enjoy the music.</li>
119
- </ol>
120
- <h2>Conclusion</h2>
121
- <p>In conclusion, Feel Good Cleyton M is a great song that you can download and enjoy in various ways. You can buy the song from an online store or download it for free from a third-party website. You can also listen to the song on different devices and platforms using headphones, speakers, or streaming services. Moreover, you can explore more music by Cleyton M and other African artists using platforms such as Spotify, YouTube, SoundCloud, Bandcamp, DatPiff, or Free Music Archive.</p>
122
- <p>So what are you waiting for? Download Feel Good Cleyton M today and feel good about yourself and your life. You won't regret it!</p>
123
- <h2>FAQs</h2>
124
- <p>Here are some of the frequently asked questions about Feel Good Cleyton M:</p>
125
- <ol>
126
- <li>Q: What is kuduro?</li>
127
- <li>A: Kuduro is a genre of dance music that originated in Angola in the late 1980s. It is characterized by fast-paced beats, electronic sounds, and energetic vocals. It is influenced by genres such as soca, zouk, rap, and house. It is also a dance style that involves rapid and complex movements of the legs and hips.</li>
128
- <li>Q: Who are some of the other famous kuduro artists?</li>
129
- <li>A: Some of the other famous kuduro artists are Buraka Som Sistema, Os Lambas, Titica, Noite e Dia, and Puto Prata.</li>
130
- <li>Q: How can I support Cleyton M and his music?</li>
131
- <li>A: You can support Cleyton M and his music by buying his songs and albums from online stores or streaming services. You can also follow him on social media platforms such as Facebook, Instagram, Twitter, and TikTok. You can also share his music with your friends and family and join his fan club.</li>
132
- <li>Q: Where can I find more information about Cleyton M and his music?</li>
133
- <li>A: You can find more information about Cleyton M and his music on his official website <a href="">https://www.cleytonm.com/</a>, where you can also contact him for bookings, collaborations, or feedback.</li>
134
- <li>Q: How can I learn how to dance kuduro?</li>
135
- <li>A: You can learn how to dance kuduro by watching online tutorials on YouTube or other websites. You can also join a kuduro dance class or club in your area or online. You can also practice by yourself or with your friends while listening to kuduro music.</li>
136
- </ol></p> 197e85843d<br />
137
- <br />
138
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/3mrology/Chameleon_Text2Img_Generation_Demo/share_btn.py DELETED
@@ -1,88 +0,0 @@
1
- community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
2
- <path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
3
- <path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
4
- </svg>"""
5
-
6
- loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
7
- style="color: #ffffff;
8
- "
9
- xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
10
-
11
- share_js = """async () => {
12
- async function uploadFile(file){
13
- const UPLOAD_URL = 'https://huggingface.co/uploads';
14
- const response = await fetch(UPLOAD_URL, {
15
- method: 'POST',
16
- headers: {
17
- 'Content-Type': file.type,
18
- 'X-Requested-With': 'XMLHttpRequest',
19
- },
20
- body: file, /// <- File inherits from Blob
21
- });
22
- const url = await response.text();
23
- return url;
24
- }
25
- async function getInputImgFile(imgEl){
26
- const res = await fetch(imgEl.src);
27
- const blob = await res.blob();
28
- const imgId = Date.now() % 200;
29
- const isPng = imgEl.src.startsWith(`data:image/png`);
30
- if(isPng){
31
- const fileName = `magic-prompt-${{imgId}}.png`;
32
- return new File([blob], fileName, { type: 'image/png' });
33
- }else{
34
- const fileName = `magic-prompt-${{imgId}}.jpg`;
35
- return new File([blob], fileName, { type: 'image/jpeg' });
36
- }
37
- }
38
- const gradioEl = document.querySelector('body > gradio-app');
39
- // const gradioEl = document.querySelector("gradio-app").shadowRoot;
40
- const inputImgEl = gradioEl.querySelector('#input-img img');
41
- const imgEls = gradioEl.querySelectorAll('#generated-gallery img');
42
- const promptTxt = gradioEl.querySelector('#translated textarea').value;
43
- let titleTxt = promptTxt;
44
- if(titleTxt.length > 100){
45
- titleTxt = titleTxt.slice(0, 100) + ' ...';
46
- }
47
- const shareBtnEl = gradioEl.querySelector('#share-btn');
48
- const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
49
- const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
50
- if(!imgEls.length){
51
- return;
52
- };
53
- shareBtnEl.style.pointerEvents = 'none';
54
- shareIconEl.style.display = 'none';
55
- loadingIconEl.style.removeProperty('display');
56
- const files = await Promise.all(
57
- [...imgEls].map(async (imgEl) => {
58
- const res = await fetch(imgEl.src);
59
- const blob = await res.blob();
60
- const imgId = Date.now() % 200;
61
- const fileName = `sd-perception-${{imgId}}.jpg`;
62
- return new File([blob], fileName, { type: 'image/jpeg' });
63
- })
64
- );
65
- const inputFile = await getInputImgFile(inputImgEl);
66
- files.push(inputFile);
67
- const urls = await Promise.all(files.map((f) => uploadFile(f)));
68
- const urlInputImg = urls.pop();
69
- const htmlImgs = urls.map(url => `<img src='${url}' width='400' height='400'>`);
70
- const htmlImgsMd = htmlImgs.join(`\n`);
71
- const descriptionMd = `#### Input img:
72
- <img src='${urlInputImg}' style='max-height: 350px;'>
73
- #### Caption:
74
- ${promptTxt}
75
- #### Generations:
76
- <div style='display: flex; flex-wrap: wrap; column-gap: 0.75rem;'>
77
- ${htmlImgsMd}
78
- </div>`;
79
- const params = new URLSearchParams({
80
- title: titleTxt,
81
- description: descriptionMd,
82
- });
83
- const paramsStr = params.toString();
84
- window.open(`https://huggingface.co/spaces/huggingface-projects/magic-diffusion/new?${paramsStr}`, '_blank');
85
- shareBtnEl.style.removeProperty('pointer-events');
86
- shareIconEl.style.removeProperty('display');
87
- loadingIconEl.style.display = 'none';
88
- }"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/nets_123821KB.py DELETED
@@ -1,122 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- from torch import nn
4
-
5
- from . import layers_123821KB as layers
6
-
7
-
8
- class BaseASPPNet(nn.Module):
9
- def __init__(self, nin, ch, dilations=(4, 8, 16)):
10
- super(BaseASPPNet, self).__init__()
11
- self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
12
- self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
13
- self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
14
- self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
15
-
16
- self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
17
-
18
- self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
19
- self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
20
- self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
21
- self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
22
-
23
- def __call__(self, x):
24
- h, e1 = self.enc1(x)
25
- h, e2 = self.enc2(h)
26
- h, e3 = self.enc3(h)
27
- h, e4 = self.enc4(h)
28
-
29
- h = self.aspp(h)
30
-
31
- h = self.dec4(h, e4)
32
- h = self.dec3(h, e3)
33
- h = self.dec2(h, e2)
34
- h = self.dec1(h, e1)
35
-
36
- return h
37
-
38
-
39
- class CascadedASPPNet(nn.Module):
40
- def __init__(self, n_fft):
41
- super(CascadedASPPNet, self).__init__()
42
- self.stg1_low_band_net = BaseASPPNet(2, 32)
43
- self.stg1_high_band_net = BaseASPPNet(2, 32)
44
-
45
- self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
46
- self.stg2_full_band_net = BaseASPPNet(16, 32)
47
-
48
- self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
49
- self.stg3_full_band_net = BaseASPPNet(32, 64)
50
-
51
- self.out = nn.Conv2d(64, 2, 1, bias=False)
52
- self.aux1_out = nn.Conv2d(32, 2, 1, bias=False)
53
- self.aux2_out = nn.Conv2d(32, 2, 1, bias=False)
54
-
55
- self.max_bin = n_fft // 2
56
- self.output_bin = n_fft // 2 + 1
57
-
58
- self.offset = 128
59
-
60
- def forward(self, x, aggressiveness=None):
61
- mix = x.detach()
62
- x = x.clone()
63
-
64
- x = x[:, :, : self.max_bin]
65
-
66
- bandw = x.size()[2] // 2
67
- aux1 = torch.cat(
68
- [
69
- self.stg1_low_band_net(x[:, :, :bandw]),
70
- self.stg1_high_band_net(x[:, :, bandw:]),
71
- ],
72
- dim=2,
73
- )
74
-
75
- h = torch.cat([x, aux1], dim=1)
76
- aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
77
-
78
- h = torch.cat([x, aux1, aux2], dim=1)
79
- h = self.stg3_full_band_net(self.stg3_bridge(h))
80
-
81
- mask = torch.sigmoid(self.out(h))
82
- mask = F.pad(
83
- input=mask,
84
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
85
- mode="replicate",
86
- )
87
-
88
- if self.training:
89
- aux1 = torch.sigmoid(self.aux1_out(aux1))
90
- aux1 = F.pad(
91
- input=aux1,
92
- pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
93
- mode="replicate",
94
- )
95
- aux2 = torch.sigmoid(self.aux2_out(aux2))
96
- aux2 = F.pad(
97
- input=aux2,
98
- pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
99
- mode="replicate",
100
- )
101
- return mask * mix, aux1 * mix, aux2 * mix
102
- else:
103
- if aggressiveness:
104
- mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
105
- mask[:, :, : aggressiveness["split_bin"]],
106
- 1 + aggressiveness["value"] / 3,
107
- )
108
- mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
109
- mask[:, :, aggressiveness["split_bin"] :],
110
- 1 + aggressiveness["value"],
111
- )
112
-
113
- return mask * mix
114
-
115
- def predict(self, x_mag, aggressiveness=None):
116
- h = self.forward(x_mag, aggressiveness)
117
-
118
- if self.offset > 0:
119
- h = h[:, :, :, self.offset : -self.offset]
120
- assert h.size()[3] > 0
121
-
122
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A-Roucher/Quotes/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Quotes
3
- emoji: 🪶
4
- colorFrom: green
5
- colorTo: blue
6
- sdk: streamlit
7
- sdk_version: 1.28.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/README.md DELETED
@@ -1,9 +0,0 @@
1
- ---
2
- title: DiffSinger🎶 Diffusion for Singing Voice Synthesis
3
- emoji: 🎶
4
- colorFrom: purple
5
- colorTo: blue
6
- sdk: gradio
7
- app_file: "inference/svs/gradio/infer.py"
8
- pinned: false
9
- ---
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/txt_processors/__init__.py DELETED
@@ -1 +0,0 @@
1
- from . import en
 
 
spaces/AIML-TUDA/does-clip-know-my-face/app.py DELETED
@@ -1,611 +0,0 @@
1
- import glob
2
- import tempfile
3
- from decimal import Decimal
4
- from pathlib import Path
5
- from typing import List, Dict, Any
6
-
7
- import gradio as gr
8
- from PIL import Image
9
- import open_clip
10
- import torch
11
- import os
12
- import pandas as pd
13
- import numpy as np
14
- from gradio import processing_utils, utils
15
-
16
- from download_example_images import read_actor_files, save_images_to_folder
17
-
18
- DEFAULT_INITIAL_NAME = "John Doe"
19
- PROMPTS = [
20
- '{0}',
21
- 'an image of {0}',
22
- 'a photo of {0}',
23
- '{0} on a photo',
24
- 'a photo of a person named {0}',
25
- 'a person named {0}',
26
- 'a man named {0}',
27
- 'a woman named {0}',
28
- 'the name of the person is {0}',
29
- 'a photo of a person with the name {0}',
30
- '{0} at a gala',
31
- 'a photo of the celebrity {0}',
32
- 'actor {0}',
33
- 'actress {0}',
34
- 'a colored photo of {0}',
35
- 'a black and white photo of {0}',
36
- 'a cool photo of {0}',
37
- 'a cropped photo of {0}',
38
- 'a cropped image of {0}',
39
- '{0} in a suit',
40
- '{0} in a dress'
41
- ]
42
- OPEN_CLIP_LAION400M_MODEL_NAMES = ['ViT-B-32', 'ViT-B-16', 'ViT-L-14']
43
- OPEN_CLIP_LAION2B_MODEL_NAMES = [('ViT-B-32', 'laion2b_s34b_b79k'), ('ViT-L-14', 'laion2b_s32b_b82k')]
44
- OPEN_AI_MODELS = ['ViT-B-32', 'ViT-B-16', 'ViT-L-14']
45
- NUM_TOTAL_NAMES = 1_000
46
- SEED = 42
47
- MIN_NUM_CORRECT_PROMPT_PREDS = 1
48
- EDAMPLE_IMAGE_DIR = './example_images/'
49
- IMG_BATCHSIZE = 16
50
-
51
- DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
52
-
53
- EXAMPLE_IMAGE_URLS = read_actor_files(EDAMPLE_IMAGE_DIR)
54
- save_images_to_folder(os.path.join(EDAMPLE_IMAGE_DIR, 'images'), EXAMPLE_IMAGE_URLS)
55
-
56
- MODELS = {}
57
- for model_name in OPEN_CLIP_LAION400M_MODEL_NAMES:
58
- dataset = 'LAION400M'
59
- model, _, preprocess = open_clip.create_model_and_transforms(
60
- model_name,
61
- pretrained=f'{dataset.lower()}_e32'
62
- )
63
- model = model.eval()
64
- MODELS[f'OpenClip {model_name} trained on {dataset}'] = {
65
- 'model_instance': model,
66
- 'preprocessing': preprocess,
67
- 'model_name': model_name,
68
- 'tokenizer': open_clip.get_tokenizer(model_name),
69
- 'prompt_text_embeddings': torch.load(f'./prompt_text_embeddings/{model_name}_{dataset.lower()}_prompt_text_embeddings.pt')
70
- }
71
-
72
- for model_name, dataset_name in OPEN_CLIP_LAION2B_MODEL_NAMES:
73
- dataset = 'LAION2B'
74
- model, _, preprocess = open_clip.create_model_and_transforms(
75
- model_name,
76
- pretrained=dataset_name
77
- )
78
- model = model.eval()
79
- MODELS[f'OpenClip {model_name} trained on {dataset}'] = {
80
- 'model_instance': model,
81
- 'preprocessing': preprocess,
82
- 'model_name': model_name,
83
- 'tokenizer': open_clip.get_tokenizer(model_name),
84
- 'prompt_text_embeddings': torch.load(f'./prompt_text_embeddings/{model_name}_{dataset.lower()}_prompt_text_embeddings.pt')
85
- }
86
-
87
- for model_name in OPEN_AI_MODELS:
88
- dataset = 'OpenAI'
89
- model, _, preprocess = open_clip.create_model_and_transforms(
90
- model_name,
91
- pretrained=dataset.lower()
92
- )
93
- model = model.eval()
94
- MODELS[f'OpenClip {model_name} trained by {dataset}'] = {
95
- 'model_instance': model,
96
- 'preprocessing': preprocess,
97
- 'model_name': model_name,
98
- 'tokenizer': open_clip.get_tokenizer(model_name),
99
- 'prompt_text_embeddings': torch.load(f'./prompt_text_embeddings/{model_name}_{dataset.lower()}_prompt_text_embeddings.pt')
100
- }
101
-
102
- FULL_NAMES_DF = pd.read_csv('full_names.csv', index_col=0)
103
- LAION_MEMBERSHIP_OCCURENCE = pd.read_csv('laion_membership_occurence_count.csv', index_col=0)
104
-
105
- EXAMPLE_ACTORS_BY_MODEL = {
106
- ("ViT-B-32", "laion400m"): ["T._J._Thyne"],
107
- ("ViT-B-16", "laion400m"): ["Barbara_Schöneberger", "Carolin_Kebekus"],
108
- ("ViT-L-14", "laion400m"): ["Max_Giermann", "Nicole_De_Boer"]
109
- }
110
-
111
- EXAMPLES = []
112
- for (model_name, dataset_name), person_names in EXAMPLE_ACTORS_BY_MODEL.items():
113
- for name in person_names:
114
- image_folder = os.path.join("./example_images/images/", name)
115
- for dd_model_name in MODELS.keys():
116
- if not (model_name.lower() in dd_model_name.lower() and dataset_name.lower() in dd_model_name.lower()):
117
- continue
118
-
119
- EXAMPLES.append([
120
- dd_model_name,
121
- name.replace("_", " "),
122
- [[x.format(name.replace("_", " ")) for x in PROMPTS]],
123
- [os.path.join(image_folder, x) for x in os.listdir(image_folder)]
124
- ])
125
-
126
- LICENSE_DETAILS = """
127
- See [README.md](https://huggingface.co/spaces/AIML-TUDA/does-clip-know-my-face/blob/main/README.md) for more information about the licenses of the example images.
128
- """
129
-
130
- CORRECT_RESULT_INTERPRETATION = """<br>
131
- <h2>{0} is in the Training Data!</h2>
132
- The name of {0} has been <b>correctly predicted for {1} out of {2} prompts.</b> This means that <b>{0} was in
133
- the training data and was used to train the model.</b>
134
- Keep in mind that the probability of correctly predicting the name for {3} by chance {4} times with {5} possible names for the model to
135
- choose from, is only (<sup>1</sup> &#8260; <sub>{5}</sub>)<sup>{6}</sup> = {7}%.
136
- """
137
-
138
- INDECISIVE_RESULT_INTERPRETATION = """<br>
139
- <h2>{0} might be in the Training Data!</h2>
140
- For none of the {1} prompts the majority vote for the name of {0} was correct. However, while the majority votes are not
141
- correct, the name of {0} was correctly predicted {2} times for {3}. This is an indication that the model has seen {0}
142
- during training. A different selection of images might have a clearer result. Keep in mind that the probability
143
- that the name is correctly predicted by chance {2} times for {3} is
144
- (<sup>1</sup> &#8260; <sub>{4}</sub>)<sup>{2}</sup> = {5}%.
145
- """
146
-
147
- INCORRECT_RESULT_INTERPRETATION = """<br>
148
- <h2>{0} is most likely not in the Training Data!</h2>
149
- The name of {0} has not been correctly predicted for any of the {1} prompts. This is an indication that {0} has
150
- most likely not been used for training the model.
151
- """
152
-
153
- OCCURENCE_INFORMATION = """<br><br>
154
- According to our analysis {0} appeared {1} times among 400 million image-text pairs in the LAION-400M training dataset.
155
- """
156
-
157
- CSS = """
158
- .footer {
159
- margin-bottom: 45px;
160
- margin-top: 35px;
161
- text-align: center;
162
- border-bottom: 1px solid #e5e5e5;
163
- }
164
- #file_upload {
165
- max-height: 250px;
166
- overflow-y: auto !important;
167
- }
168
- .footer>p {
169
- font-size: .8rem;
170
- display: inline-block;
171
- padding: 0 10px;
172
- transform: translateY(10px);
173
- background: white;
174
- }
175
-
176
- .dark .footer {
177
- border-color: #303030;
178
- }
179
- .dark .footer>p {
180
- background: #0b0f19;
181
- }
182
- .acknowledgments h4{
183
- margin: 1.25em 0 .25em 0;
184
- font-weight: bold;
185
- font-size: 115%;
186
- }
187
- """
188
-
189
-
190
- # monkey patch the update function of the Files component since otherwise it is not possible to access the original
191
- # file name
192
- def preprocess(
193
- self, x: List[Dict[str, Any]] | None
194
- ) -> bytes | tempfile._TemporaryFileWrapper | List[
195
- bytes | tempfile._TemporaryFileWrapper
196
- ] | None:
197
- """
198
- Parameters:
199
- x: List of JSON objects with filename as 'name' property and base64 data as 'data' property
200
- Returns:
201
- File objects in requested format
202
- """
203
- if x is None:
204
- return None
205
-
206
- def process_single_file(f) -> bytes | tempfile._TemporaryFileWrapper:
207
- file_name, orig_name, data, is_file = (
208
- f["name"] if "name" in f.keys() else f["orig_name"],
209
- f["orig_name"] if "orig_name" in f.keys() else f["name"],
210
- f["data"],
211
- f.get("is_file", False),
212
- )
213
- if self.type == "file":
214
- if is_file:
215
- temp_file_path = self.make_temp_copy_if_needed(file_name)
216
- file = tempfile.NamedTemporaryFile(delete=False)
217
- file.name = temp_file_path
218
- file.orig_name = os.path.basename(orig_name.replace(self.hash_file(file_name), "")) # type: ignore
219
- else:
220
- file = processing_utils.decode_base64_to_file(
221
- data, file_path=file_name
222
- )
223
- file.orig_name = file_name # type: ignore
224
- self.temp_files.add(str(utils.abspath(file.name)))
225
- return file
226
- elif (
227
- self.type == "binary" or self.type == "bytes"
228
- ): # "bytes" is included for backwards compatibility
229
- if is_file:
230
- with open(file_name, "rb") as file_data:
231
- return file_data.read()
232
- return processing_utils.decode_base64_to_binary(data)[0]
233
- else:
234
- raise ValueError(
235
- "Unknown type: "
236
- + str(self.type)
237
- + ". Please choose from: 'file', 'bytes'."
238
- )
239
-
240
- if self.file_count == "single":
241
- if isinstance(x, list):
242
- return process_single_file(x[0])
243
- else:
244
- return process_single_file(x)
245
- else:
246
- if isinstance(x, list):
247
- return [process_single_file(f) for f in x]
248
- else:
249
- return process_single_file(x)
250
-
251
-
252
- gr.Files.preprocess = preprocess
253
-
254
-
255
- @torch.no_grad()
256
- def calculate_text_embeddings(model_name, prompts):
257
- tokenizer = MODELS[model_name]['tokenizer']
258
- context_vecs = tokenizer(prompts)
259
-
260
- model_instance = MODELS[model_name]['model_instance']
261
-
262
- model_instance = model_instance.to(DEVICE)
263
- context_vecs = context_vecs.to(DEVICE)
264
-
265
- text_features = model_instance.encode_text(context_vecs, normalize=True).cpu()
266
-
267
- model_instance = model_instance.cpu()
268
- context_vecs = context_vecs.cpu()
269
-
270
- return text_features
271
-
272
-
273
- @torch.no_grad()
274
- def calculate_image_embeddings(model_name, images):
275
- preprocessing = MODELS[model_name]['preprocessing']
276
- model_instance = MODELS[model_name]['model_instance']
277
-
278
- # load the given images
279
- user_imgs = []
280
- for tmp_file_img in images:
281
- img = Image.open(tmp_file_img.name)
282
- # preprocess the images
283
- user_imgs.append(preprocessing(img))
284
-
285
- # calculate the image embeddings
286
- image_embeddings = []
287
- model_instance = model_instance.to(DEVICE)
288
- for batch_idx in range(0, len(user_imgs), IMG_BATCHSIZE):
289
- imgs = user_imgs[batch_idx:batch_idx + IMG_BATCHSIZE]
290
- imgs = torch.stack(imgs)
291
- imgs = imgs.to(DEVICE)
292
-
293
- emb = model_instance.encode_image(imgs, normalize=True).cpu()
294
- image_embeddings.append(emb)
295
-
296
- imgs = imgs.cpu()
297
- model_instance = model_instance.cpu()
298
-
299
- return torch.cat(image_embeddings)
300
-
301
-
302
- def get_possible_names(true_name):
303
- possible_names = FULL_NAMES_DF
304
- possible_names['full_names'] = FULL_NAMES_DF['first_name'].astype(str) + ' ' + FULL_NAMES_DF['last_name'].astype(
305
- str)
306
-
307
- possible_names = possible_names[possible_names['full_names'] != true_name]
308
-
309
- # sample the same amount of male and female names
310
- sampled_names = possible_names.groupby('sex').sample(int(NUM_TOTAL_NAMES / 2), random_state=42)
311
- # shuffle the rows randomly
312
- sampled_names = sampled_names.sample(frac=1)
313
- # get only the full names since we don't need first and last name and gender anymore
314
- possible_full_names = sampled_names['full_names']
315
-
316
- return possible_full_names
317
-
318
-
319
- def round_to_first_digit(value: Decimal):
320
- tmp = np.format_float_positional(value)
321
-
322
- prob_str = []
323
- for c in str(tmp):
324
- if c in ("0", "."):
325
- prob_str.append(c)
326
- else:
327
- prob_str.append(c)
328
- break
329
-
330
- return "".join(prob_str)
331
-
332
-
333
- def get_majority_predictions(predictions: pd.Series, values_only=False, counts_only=False, value=None):
334
- """Takes a series of predictions and returns the unique values and the number of prediction occurrences
335
- in descending order."""
336
- values, counts = np.unique(predictions, return_counts=True)
337
- descending_counts_indices = counts.argsort()[::-1]
338
- values, counts = values[descending_counts_indices], counts[descending_counts_indices]
339
-
340
- idx_most_often_pred_names = np.argwhere(counts == counts.max()).flatten()
341
-
342
- if values_only:
343
- return values[idx_most_often_pred_names]
344
- elif counts_only:
345
- return counts[idx_most_often_pred_names]
346
- elif value is not None:
347
- if value not in values:
348
- return [0]
349
- # return how often the values appears in the predictions
350
- return counts[np.where(values == value)[0]]
351
- else:
352
- return values[idx_most_often_pred_names], counts[idx_most_often_pred_names]
353
-
354
-
355
- def on_submit_btn_click(model_name, true_name, prompts, images):
356
- # assert that the name is in the prompts
357
- if not prompts.iloc[0].str.contains(true_name).sum() == len(prompts.T):
358
- return None, None, """<br>
359
- <div class="error-message" style="background-color: #fce4e4; border: 1px solid #fcc2c3; padding: 20px 30px; border-radius: var(--radius-lg);">
360
- <span class="error-text" style="color: #cc0033; font-weight: bold;">
361
- The given name does not match the name in the prompts. Sometimes the UI is responding slow.
362
- Please retype the name and check that it is inserted fully into the prompts.
363
- </span>
364
- </div>
365
- """
366
-
367
- if images is None or len(images) < 1:
368
- return None, None, f"""<br>
369
- <div class="error-message" style="background-color: #fce4e4; border: 1px solid #fcc2c3; padding: 20px 30px; border-radius: var(--radius-lg);">
370
- <span class="error-text" style="color: #cc0033; font-weight: bold;">
371
- No images are given. Images are needed to determin whether {true_name} was in the dataset. Please upload at least a single image of {true_name}.
372
- </span>
373
- </div>
374
- """
375
-
376
- # calculate the image embeddings
377
- img_embeddings = calculate_image_embeddings(model_name, images)
378
-
379
- # calculate the text embeddings of the populated prompts
380
- user_text_emb = calculate_text_embeddings(model_name, prompts.values[0].tolist())
381
-
382
- # get the indices of the possible names
383
- possible_names = get_possible_names(true_name)
384
- # get the text embeddings of the possible names
385
- prompt_text_embeddings = MODELS[model_name]['prompt_text_embeddings']
386
- text_embeddings_used_for_prediction = prompt_text_embeddings.index_select(1,
387
- torch.tensor(possible_names.index.values))
388
-
389
- # add the true name and the text embeddings to the possible names
390
- names_used_for_prediction = pd.concat([possible_names, pd.Series(true_name)], ignore_index=True)
391
- text_embeddings_used_for_prediction = torch.cat([text_embeddings_used_for_prediction, user_text_emb.unsqueeze(1)],
392
- dim=1)
393
-
394
- # calculate the similarity of the images and the given texts
395
- with torch.no_grad():
396
- logits_per_image = MODELS[model_name][
397
- 'model_instance'
398
- ].logit_scale.exp().cpu() * img_embeddings @ text_embeddings_used_for_prediction.swapaxes(-1, -2)
399
- preds = logits_per_image.argmax(-1)
400
-
401
- # get the predicted names for each prompt
402
- predicted_names = []
403
- for pred in preds:
404
- predicted_names.append(names_used_for_prediction.iloc[pred])
405
- predicted_names = np.array(predicted_names)
406
-
407
- # convert the predictions into a dataframe
408
- name_predictions = pd.DataFrame(predicted_names).T.reset_index().rename(
409
- columns={i: f'Prompt {i + 1}' for i in range(len(predicted_names))}
410
- ).rename(columns={'index': 'Image'})
411
- # add the image names
412
- name_predictions['Image'] = [x.orig_name for x in images]
413
-
414
- # get the majority votes
415
- majority_preds = name_predictions[[f'Prompt {i + 1}' for i in range(len(PROMPTS))]].apply(
416
- lambda x: get_majority_predictions(x, values_only=True)
417
- )
418
- # get how often the majority name was predicted
419
- majority_preds_counts = name_predictions[[f'Prompt {i + 1}' for i in range(len(PROMPTS))]].apply(
420
- lambda x: get_majority_predictions(x, counts_only=True)
421
- ).apply(lambda x: x[0])
422
- # get how often the correct name was predicted - even if no majority
423
- true_name_preds_counts = name_predictions[[f'Prompt {i + 1}' for i in range(len(PROMPTS))]].apply(
424
- lambda x: get_majority_predictions(x, value=true_name)
425
- ).apply(lambda x: x[0])
426
-
427
- # convert the majority preds to a series of lists if it is a dataframe
428
- majority_preds = majority_preds.T.squeeze().apply(lambda x: [x]) if len(majority_preds) == 1 else majority_preds
429
-
430
- # create the results dataframe for display
431
- result = pd.concat(
432
- [name_predictions,
433
- pd.concat([pd.Series({'Image': 'Correct Name Predictions'}), true_name_preds_counts]).to_frame().T],
434
- ignore_index=True
435
- )
436
- result = pd.concat(
437
- [result, pd.concat([pd.Series({'Image': 'Majority Vote'}), majority_preds]).to_frame().T],
438
- ignore_index=True
439
- )
440
- result = pd.concat(
441
- [result, pd.concat([pd.Series({'Image': 'Majority Vote Counts'}), majority_preds_counts]).to_frame().T],
442
- ignore_index=True
443
- )
444
- result = result.set_index('Image')
445
-
446
- # check whether there is only one majority vote. If not, display Not Applicable
447
- result.loc['Majority Vote'] = result.loc['Majority Vote'].apply(
448
- lambda x: x[0] if len(x) == 1 else "N/A")
449
-
450
- # check whether the majority prediction is the correct name
451
- result.loc['Correct Majority Prediction'] = result.apply(lambda x: x['Majority Vote'] == true_name, axis=0)
452
-
453
- result = result[[f'Prompt {i + 1}' for i in range(len(PROMPTS))]].sort_values(
454
- ['Correct Name Predictions', 'Majority Vote Counts', "Correct Majority Prediction"], axis=1, ascending=False
455
- )
456
-
457
- predictions = result.loc[[x.orig_name for x in images]]
458
- prediction_results = result.loc[['Correct Name Predictions', 'Majority Vote', 'Correct Majority Prediction']]
459
-
460
- # if there are correct predictions
461
- num_correct_maj_preds = prediction_results.loc['Correct Majority Prediction'].sum()
462
- num_correct_name_preds = result.loc['Correct Name Predictions'].max()
463
- if num_correct_maj_preds > 0:
464
- interpretation = CORRECT_RESULT_INTERPRETATION.format(
465
- true_name,
466
- num_correct_maj_preds,
467
- len(PROMPTS),
468
- prediction_results.columns[0],
469
- prediction_results.iloc[0, 0],
470
- len(possible_names),
471
- predictions.iloc[:, 0].value_counts()[true_name],
472
- round_to_first_digit(
473
- (
474
- (Decimal(1) / Decimal(len(possible_names))) ** predictions.iloc[:, 0].value_counts()[true_name]
475
- ) * Decimal(100)
476
- )
477
- )
478
- elif num_correct_name_preds > 0:
479
- interpretation = INDECISIVE_RESULT_INTERPRETATION.format(
480
- true_name,
481
- len(PROMPTS),
482
- num_correct_name_preds,
483
- prediction_results.columns[result.loc['Correct Name Predictions'].to_numpy().argmax()],
484
- len(possible_names),
485
- round_to_first_digit(
486
- (
487
- (Decimal(1) / Decimal(len(possible_names))) ** Decimal(num_correct_name_preds)
488
- ) * Decimal(100)
489
- )
490
- )
491
- else:
492
- interpretation = INCORRECT_RESULT_INTERPRETATION.format(
493
- true_name,
494
- len(PROMPTS)
495
- )
496
-
497
- if 'laion400m' in model_name.lower() and true_name.lower() in LAION_MEMBERSHIP_OCCURENCE['name'].str.lower().values:
498
- row = LAION_MEMBERSHIP_OCCURENCE[LAION_MEMBERSHIP_OCCURENCE['name'].str.lower() == true_name.lower()]
499
- interpretation = interpretation + OCCURENCE_INFORMATION.format(true_name, row['count'].values[0])
500
-
501
- return predictions.reset_index(), prediction_results.reset_index(names=[""]), interpretation
502
-
503
-
504
- def populate_prompts(name):
505
- return [[x.format(name) for x in PROMPTS]]
506
-
507
-
508
- def load_uploaded_imgs(images):
509
- if images is None:
510
- return None
511
-
512
- imgs = []
513
- for file_wrapper in images:
514
- img = Image.open(file_wrapper.name)
515
- imgs.append((img, file_wrapper.orig_name))
516
-
517
- return imgs
518
-
519
-
520
- block = gr.Blocks(css=CSS)
521
- with block as demo:
522
- gr.HTML(
523
- """
524
- <div style="text-align: center; max-width: 750px; margin: 0 auto;">
525
- <div>
526
- <img
527
- class="logo"
528
- src="https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/1666181274838-62fa1d95e8c9c532aa75331c.png"
529
- alt="AIML Logo"
530
- style="margin: auto; max-width: 7rem;"
531
- >
532
- <h1 style="font-weight: 900; font-size: 3rem;">
533
- Does CLIP Know My Face?
534
- </h1>
535
- </div>
536
- <p style="margin-bottom: 10px; font-size: 94%">
537
- Want to know whether you were used to train a CLIP model? Below you can choose a model, enter your name and upload some pictures.
538
- If the model correctly predicts your name for multiple images, it is very likely that you were part of the training data.
539
- Pick some of the examples below and try it out!<br><br>
540
- Details and further analysis can be found in the paper
541
- <a href="https://arxiv.org/abs/2209.07341" style="text-decoration: underline;" target="_blank">
542
- Does CLIP Know My Face?
543
- </a>. Our code can be found at
544
- <a href="https://github.com/D0miH/does-clip-know-my-face" style="text-decoration: underline;" target="_blank">
545
- GitHub
546
- </a>.
547
- <br><br>
548
- <b>How does it work?</b> We are giving CLIP your images and let it choose from 1000 possible names.
549
- As CLIP is predicting the names that match the given images, we can probe whether the model has seen your images
550
- during training. The more images you upload the more confident you can be in the result!
551
- <br><br>
552
- <b>Disclaimer:</b> In order to process the images, they are cached on the server. The images are only used for predicting whether the person was in the training data.
553
- </p>
554
- </div>
555
- """
556
- )
557
-
558
- with gr.Row():
559
- with gr.Box():
560
- gr.Markdown("## Inputs")
561
- with gr.Column():
562
- model_dd = gr.Dropdown(label="CLIP Model", choices=list(MODELS.keys()),
563
- value=list(MODELS.keys())[0])
564
- true_name = gr.Textbox(label='Name of Person (make sure it matches the prompts):', lines=1, value=DEFAULT_INITIAL_NAME)
565
- prompts = gr.Dataframe(
566
- value=[[x.format(DEFAULT_INITIAL_NAME) for x in PROMPTS]],
567
- label='Prompts Used (hold shift to scroll sideways):',
568
- interactive=False
569
- )
570
-
571
- true_name.change(fn=populate_prompts, inputs=[true_name], outputs=prompts, show_progress=True,
572
- status_tracker=None)
573
-
574
- uploaded_imgs = gr.Files(label='Upload Images:', file_types=['image'], elem_id='file_upload').style()
575
- image_gallery = gr.Gallery(label='Images Used:', show_label=True, elem_id="image_gallery").style(grid=[5])
576
-
577
- uploaded_imgs.change(load_uploaded_imgs, inputs=uploaded_imgs, outputs=image_gallery)
578
- submit_btn = gr.Button(value='Submit')
579
-
580
- with gr.Box():
581
- gr.Markdown("## Outputs")
582
- prediction_df = gr.Dataframe(label="Prediction Output (hold shift to scroll sideways):", interactive=False)
583
- result_df = gr.DataFrame(label="Result (hold shift to scroll sideways):", interactive=False)
584
- interpretation = gr.HTML()
585
-
586
- submit_btn.click(on_submit_btn_click, inputs=[model_dd, true_name, prompts, uploaded_imgs],
587
- outputs=[prediction_df, result_df, interpretation])
588
-
589
- gr.Examples(
590
- examples=EXAMPLES,
591
- inputs=[model_dd, true_name, prompts, uploaded_imgs],
592
- outputs=[prediction_df, result_df, interpretation],
593
- fn=on_submit_btn_click,
594
- cache_examples=True
595
- )
596
-
597
- gr.Markdown(LICENSE_DETAILS)
598
-
599
- gr.HTML(
600
- """
601
- <div class="footer">
602
- <p> Gradio Demo by AIML@TU Darmstadt</p>
603
- </div>
604
- <div class="acknowledgments">
605
- <p>Created by <a href="https://www.ml.informatik.tu-darmstadt.de/people/dhintersdorf/">Dominik Hintersdorf</a> at <a href="https://www.aiml.informatik.tu-darmstadt.de">AIML Lab</a>.</p>
606
- </div>
607
- """
608
- )
609
-
610
- if __name__ == "__main__":
611
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/README.md DELETED
@@ -1,195 +0,0 @@
1
- ---
2
- title: FreeGPT WebUI
3
- emoji: 🚀
4
- colorFrom: blue
5
- colorTo: yellow
6
- sdk: docker
7
- sdk_version: 1.24.0
8
- app_file: run.py
9
- pinned: true
10
- app_port: 1338
11
- duplicated_from: monra/freegpt-webui
12
- ---
13
-
14
- # FreeGPT WebUI
15
- ## GPT 3.5/4
16
-
17
- <strong>NOT REQUIRE ANY API KEY</strong> ❌🔑
18
-
19
- This project features a WebUI utilizing the [G4F API](https://github.com/xtekky/gpt4free). <br>
20
- Experience the power of ChatGPT with a user-friendly interface, enhanced jailbreaks, and completely free.
21
-
22
- ## Known bugs 🚧
23
- - Stream mode not working properly.
24
-
25
- ## News 📢
26
- I have created a new version of FreeGPT WebUI using the [ChimeraGPT API](https://chimeragpt.adventblocks.cc/).
27
- <br>
28
- <br>
29
- This free API allows you to use various AI chat models, including <strong>GPT-4, GPT-4-32k, Claude-2, Claude-2-100k, and more.</strong> <br>
30
- Check out the project here: [FreeGPT WebUI - Chimera Version](https://github.com/ramonvc/freegpt-webui/tree/chimeragpt-version).
31
-
32
- ## Project Hosting and Demonstration 🌐🚀
33
- The project is hosted on multiple platforms to be tested and modified.
34
- |Plataform|Status|API Key|Free|Repo|Demo|
35
- |--|--|--|--|--|--|
36
- |[replit](https://replit.com/)|![Active](https://img.shields.io/badge/Active-brightgreen)|◼️|☑️|[FreeGPT WebUI](https://replit.com/@ramonvc/freegpt-webui)|[Chat](https://freegpt-webui.ramonvc.repl.co/chat/)
37
- |[hugging face](https://huggingface.co)|![Active](https://img.shields.io/badge/Active-brightgreen)|◼️|☑️|[FreeGPT WebUI](https://huggingface.co/spaces/monra/freegpt-webui/tree/main)|[Chat](https://huggingface.co/spaces/monra/freegpt-webui)
38
- |[replit](https://replit.com/)|![Active](https://img.shields.io/badge/Active-brightgreen)|☑️|☑️|[FreeGPT WebUI - Chimera Version](https://replit.com/@ramonvc/freegpt-webui-chimera)|[Chat](https://freegpt-webui-chimera.ramonvc.repl.co/chat/)
39
-
40
- ## Note ℹ️
41
- <p>
42
- FreeGPT is a project that utilizes various free AI conversation API Providers. Each Provider is an API that provides responses generated by different AI models. The source code related to these services is available in <a href="https://github.com/ramonvc/freegpt-webui/tree/main/g4f">G4F folder</a>.
43
-
44
- It is important to note that, due to the extensive reach of this project, the free services registered here may receive a significant number of requests, which can result in temporary unavailability or access limitations. Therefore, it is common to encounter these services being offline or unstable.
45
-
46
- We recommend that you search for your own Providers and add them to your personal projects to avoid service instability and unavailability. Within the project, in the <a href="https://github.com/ramonvc/freegpt-webui/tree/main/g4f/Provider/Providers">Providers folder</a>, you will find several examples of Providers that have worked in the past or are still functioning. It is easy to follow the logic of these examples to find free GPT services and incorporate the requests into your specific FreeGPT project.
47
-
48
- Please note that the choice and integration of additional Providers are the user's responsibility and are not directly related to the FreeGPT project, as the project serves as an example of how to combine the <a href="https://github.com/xtekky/gpt4free">G4F API</a> with a web interface.
49
- </p>
50
-
51
- ## Table of Contents
52
- - [To-Do List](#to-do-list-%EF%B8%8F)
53
- - [Getting Started](#getting-started-white_check_mark)
54
- - [Cloning the Repository](#cloning-the-repository-inbox_tray)
55
- - [Install Dependencies](#install-dependencies-wrench)
56
- - [Running the Application](#running-the-application-rocket)
57
- - [Docker](#docker-)
58
- - [Prerequisites](#prerequisites)
59
- - [Running the Docker](#running-the-docker)
60
- - [Incorporated Projects](#incorporated-projects-busts_in_silhouette)
61
- - [WebUI](#webui)
62
- - [API FreeGPT](#api-g4f)
63
- - [Star History](#star-history)
64
- - [Legal Notice](#legal-notice)
65
-
66
- ##
67
-
68
- ## To-Do List ✔️
69
-
70
- - [x] Integrate the free GPT API into the WebUI
71
- - [x] Create Docker support
72
- - [x] Improve the Jailbreak functionality
73
- - [x] Add the GPT-4 model
74
- - [x] Enhance the user interface
75
- - [ ] Check status of API Providers (online/offline)
76
- - [ ] Enable editing and creating Jailbreaks/Roles in the WebUI
77
- - [ ] Refactor web client
78
-
79
- ## Getting Started :white_check_mark:
80
- To get started with this project, you'll need to clone the repository and have [Python](https://www.python.org/downloads/) installed on your system.
81
-
82
- ### Cloning the Repository :inbox_tray:
83
- Run the following command to clone the repository:
84
-
85
- ```
86
- git clone https://github.com/ramonvc/freegpt-webui.git
87
- ```
88
-
89
- ### Install Dependencies :wrench:
90
- Navigate to the project directory:
91
- ```
92
- cd freegpt-webui
93
- ```
94
-
95
- Install the dependencies:
96
- ```
97
- pip install -r requirements.txt
98
- ```
99
- ## Running the Application :rocket:
100
- To run the application, run the following command:
101
- ```
102
- python run.py
103
- ```
104
-
105
- Access the application in your browser using the URL:
106
- ```
107
- http://127.0.0.1:1338
108
- ```
109
- or
110
- ```
111
- http://localhost:1338
112
- ```
113
-
114
-
115
- ## Docker 🐳
116
- ### Prerequisites
117
- Before you start, make sure you have installed [Docker](https://www.docker.com/get-started) on your machine.
118
-
119
- ### Running the Docker
120
- Pull the Docker image from Docker Hub:
121
- ```
122
- docker pull ramonvc/freegpt-webui
123
- ```
124
-
125
- Run the application using Docker:
126
- ```
127
- docker run -p 1338:1338 ramonvc/freegpt-webui
128
- ```
129
-
130
- Access the application in your browser using the URL:
131
- ```
132
- http://127.0.0.1:1338
133
- ```
134
- or
135
- ```
136
- http://localhost:1338
137
- ```
138
-
139
- When you're done using the application, stop the Docker containers using the following command:
140
- ```
141
- docker stop <container-id>
142
- ```
143
-
144
- ## Incorporated Projects :busts_in_silhouette:
145
- I highly recommend visiting and supporting both projects.
146
-
147
- ### WebUI
148
- The application interface was incorporated from the [chatgpt-clone](https://github.com/xtekky/chatgpt-clone) repository.
149
-
150
- ### API G4F
151
- The free GPT-4 API was incorporated from the [GPT4Free](https://github.com/xtekky/gpt4free) repository.
152
-
153
- <br>
154
-
155
- ## Star History
156
- [![Star History Chart](https://api.star-history.com/svg?repos=ramonvc/freegpt-webui&type=Timeline)](https://star-history.com/#ramonvc/freegpt-webui&Timeline)
157
-
158
- <br>
159
-
160
- ## Legal Notice
161
- This repository is _not_ associated with or endorsed by providers of the APIs contained in this GitHub repository. This
162
- project is intended **for educational purposes only**. This is just a little personal project. Sites may contact me to
163
- improve their security or request the removal of their site from this repository.
164
-
165
- Please note the following:
166
-
167
- 1. **Disclaimer**: The APIs, services, and trademarks mentioned in this repository belong to their respective owners.
168
- This project is _not_ claiming any right over them nor is it affiliated with or endorsed by any of the providers
169
- mentioned.
170
-
171
- 2. **Responsibility**: The author of this repository is _not_ responsible for any consequences, damages, or losses
172
- arising from the use or misuse of this repository or the content provided by the third-party APIs. Users are solely
173
- responsible for their actions and any repercussions that may follow. We strongly recommend the users to follow the
174
- TOS of the each Website.
175
-
176
- 3. **Educational Purposes Only**: This repository and its content are provided strictly for educational purposes. By
177
- using the information and code provided, users acknowledge that they are using the APIs and models at their own risk
178
- and agree to comply with any applicable laws and regulations.
179
-
180
- 4. **Copyright**: All content in this repository, including but not limited to code, images, and documentation, is the
181
- intellectual property of the repository author, unless otherwise stated. Unauthorized copying, distribution, or use
182
- of any content in this repository is strictly prohibited without the express written consent of the repository
183
- author.
184
-
185
- 5. **Indemnification**: Users agree to indemnify, defend, and hold harmless the author of this repository from and
186
- against any and all claims, liabilities, damages, losses, or expenses, including legal fees and costs, arising out of
187
- or in any way connected with their use or misuse of this repository, its content, or related third-party APIs.
188
-
189
- 6. **Updates and Changes**: The author reserves the right to modify, update, or remove any content, information, or
190
- features in this repository at any time without prior notice. Users are responsible for regularly reviewing the
191
- content and any changes made to this repository.
192
-
193
- By using this repository or any code related to it, you agree to these terms. The author is not responsible for any
194
- copies, forks, or reuploads made by other users. This is the author's only account and repository. To prevent
195
- impersonation or irresponsible actions, you may comply with the GNU GPL license this Repository uses.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/run.py DELETED
@@ -1,48 +0,0 @@
1
- import secrets
2
-
3
- from server.bp import bp
4
- from server.website import Website
5
- from server.backend import Backend_Api
6
- from server.babel import create_babel
7
- from json import load
8
- from flask import Flask
9
-
10
- if __name__ == '__main__':
11
-
12
- # Load configuration from config.json
13
- config = load(open('config.json', 'r'))
14
- site_config = config['site_config']
15
- url_prefix = config.pop('url_prefix')
16
-
17
- # Create the app
18
- app = Flask(__name__)
19
- app.secret_key = secrets.token_hex(16)
20
-
21
- # Set up Babel
22
- create_babel(app)
23
-
24
- # Set up the website routes
25
- site = Website(bp, url_prefix)
26
- for route in site.routes:
27
- bp.add_url_rule(
28
- route,
29
- view_func=site.routes[route]['function'],
30
- methods=site.routes[route]['methods'],
31
- )
32
-
33
- # Set up the backend API routes
34
- backend_api = Backend_Api(bp, config)
35
- for route in backend_api.routes:
36
- bp.add_url_rule(
37
- route,
38
- view_func=backend_api.routes[route]['function'],
39
- methods=backend_api.routes[route]['methods'],
40
- )
41
-
42
- # Register the blueprint
43
- app.register_blueprint(bp, url_prefix=url_prefix)
44
-
45
- # Run the Flask server
46
- print(f"Running on {site_config['port']}{url_prefix}")
47
- app.run(**site_config)
48
- print(f"Closing port {site_config['port']}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AdityaMahimkar/ParaPhraser/app.py DELETED
@@ -1,36 +0,0 @@
1
- # https://huggingface.co/tuner007/pegasus_paraphrase
2
-
3
- import nltk
4
- from nltk import sent_tokenize
5
- nltk.download('punkt')
6
-
7
- import gradio as gr
8
-
9
- import torch
10
- from transformers import PegasusForConditionalGeneration, PegasusTokenizer
11
-
12
- import warnings
13
- warnings.filterwarnings('ignore')
14
-
15
- model_name = 'tuner007/pegasus_paraphrase'
16
- torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
17
- tokenizer = PegasusTokenizer.from_pretrained(model_name)
18
- model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device)
19
-
20
- def paraphraser(input_text,num_return_sequences=1):
21
- sentence_list = sent_tokenize(input_text)
22
-
23
- output = []
24
- for sentence in sentence_list:
25
- batch = tokenizer.prepare_seq2seq_batch([sentence],truncation=True,padding='longest',max_length=60, return_tensors="pt").to(torch_device)
26
- translated = model.generate(**batch,max_length=60,num_beams=10, num_return_sequences=num_return_sequences, temperature=1.5)
27
- tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
28
- output.extend(tgt_text)
29
-
30
- paraphrase = [' '.join(x for x in output)]
31
- paraphrased_text = str(paraphrase).strip('[]').strip("'")
32
-
33
- return paraphrased_text
34
-
35
- paraphraseUI = gr.Interface(fn=paraphraser, inputs='textbox', outputs='text', title="ParaPhraser", theme='dark')
36
- paraphraseUI.launch(inbrowser=True, share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/losses.py DELETED
@@ -1,61 +0,0 @@
1
- import torch
2
- from torch.nn import functional as F
3
-
4
- import commons
5
-
6
-
7
- def feature_loss(fmap_r, fmap_g):
8
- loss = 0
9
- for dr, dg in zip(fmap_r, fmap_g):
10
- for rl, gl in zip(dr, dg):
11
- rl = rl.float().detach()
12
- gl = gl.float()
13
- loss += torch.mean(torch.abs(rl - gl))
14
-
15
- return loss * 2
16
-
17
-
18
- def discriminator_loss(disc_real_outputs, disc_generated_outputs):
19
- loss = 0
20
- r_losses = []
21
- g_losses = []
22
- for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
23
- dr = dr.float()
24
- dg = dg.float()
25
- r_loss = torch.mean((1-dr)**2)
26
- g_loss = torch.mean(dg**2)
27
- loss += (r_loss + g_loss)
28
- r_losses.append(r_loss.item())
29
- g_losses.append(g_loss.item())
30
-
31
- return loss, r_losses, g_losses
32
-
33
-
34
- def generator_loss(disc_outputs):
35
- loss = 0
36
- gen_losses = []
37
- for dg in disc_outputs:
38
- dg = dg.float()
39
- l = torch.mean((1-dg)**2)
40
- gen_losses.append(l)
41
- loss += l
42
-
43
- return loss, gen_losses
44
-
45
-
46
- def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
47
- """
48
- z_p, logs_q: [b, h, t_t]
49
- m_p, logs_p: [b, h, t_t]
50
- """
51
- z_p = z_p.float()
52
- logs_q = logs_q.float()
53
- m_p = m_p.float()
54
- logs_p = logs_p.float()
55
- z_mask = z_mask.float()
56
-
57
- kl = logs_p - logs_q - 0.5
58
- kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)
59
- kl = torch.sum(kl * z_mask)
60
- l = kl / torch.sum(z_mask)
61
- return l
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/saicinpainting/evaluation/masks/countless/countless2d.py DELETED
@@ -1,529 +0,0 @@
1
- from __future__ import print_function, division
2
-
3
- """
4
- COUNTLESS performance test in Python.
5
-
6
- python countless2d.py ./images/NAMEOFIMAGE
7
- """
8
-
9
- import six
10
- from six.moves import range
11
- from collections import defaultdict
12
- from functools import reduce
13
- import operator
14
- import io
15
- import os
16
- from PIL import Image
17
- import math
18
- import numpy as np
19
- import random
20
- import sys
21
- import time
22
- from tqdm import tqdm
23
- from scipy import ndimage
24
-
25
- def simplest_countless(data):
26
- """
27
- Vectorized implementation of downsampling a 2D
28
- image by 2 on each side using the COUNTLESS algorithm.
29
-
30
- data is a 2D numpy array with even dimensions.
31
- """
32
- sections = []
33
-
34
- # This loop splits the 2D array apart into four arrays that are
35
- # all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
36
- # and (1,1) representing the A, B, C, and D positions from Figure 1.
37
- factor = (2,2)
38
- for offset in np.ndindex(factor):
39
- part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
40
- sections.append(part)
41
-
42
- a, b, c, d = sections
43
-
44
- ab = a * (a == b) # PICK(A,B)
45
- ac = a * (a == c) # PICK(A,C)
46
- bc = b * (b == c) # PICK(B,C)
47
-
48
- a = ab | ac | bc # Bitwise OR, safe b/c non-matches are zeroed
49
-
50
- return a + (a == 0) * d # AB || AC || BC || D
51
-
52
- def quick_countless(data):
53
- """
54
- Vectorized implementation of downsampling a 2D
55
- image by 2 on each side using the COUNTLESS algorithm.
56
-
57
- data is a 2D numpy array with even dimensions.
58
- """
59
- sections = []
60
-
61
- # This loop splits the 2D array apart into four arrays that are
62
- # all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
63
- # and (1,1) representing the A, B, C, and D positions from Figure 1.
64
- factor = (2,2)
65
- for offset in np.ndindex(factor):
66
- part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
67
- sections.append(part)
68
-
69
- a, b, c, d = sections
70
-
71
- ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization
72
- bc = b * (b == c) # PICK(B,C)
73
-
74
- a = ab_ac | bc # (PICK(A,B) || PICK(A,C)) or PICK(B,C)
75
- return a + (a == 0) * d # AB || AC || BC || D
76
-
77
- def quickest_countless(data):
78
- """
79
- Vectorized implementation of downsampling a 2D
80
- image by 2 on each side using the COUNTLESS algorithm.
81
-
82
- data is a 2D numpy array with even dimensions.
83
- """
84
- sections = []
85
-
86
- # This loop splits the 2D array apart into four arrays that are
87
- # all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
88
- # and (1,1) representing the A, B, C, and D positions from Figure 1.
89
- factor = (2,2)
90
- for offset in np.ndindex(factor):
91
- part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
92
- sections.append(part)
93
-
94
- a, b, c, d = sections
95
-
96
- ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization
97
- ab_ac |= b * (b == c) # PICK(B,C)
98
- return ab_ac + (ab_ac == 0) * d # AB || AC || BC || D
99
-
100
- def quick_countless_xor(data):
101
- """
102
- Vectorized implementation of downsampling a 2D
103
- image by 2 on each side using the COUNTLESS algorithm.
104
-
105
- data is a 2D numpy array with even dimensions.
106
- """
107
- sections = []
108
-
109
- # This loop splits the 2D array apart into four arrays that are
110
- # all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
111
- # and (1,1) representing the A, B, C, and D positions from Figure 1.
112
- factor = (2,2)
113
- for offset in np.ndindex(factor):
114
- part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
115
- sections.append(part)
116
-
117
- a, b, c, d = sections
118
-
119
- ab = a ^ (a ^ b) # a or b
120
- ab += (ab != a) * ((ab ^ (ab ^ c)) - b) # b or c
121
- ab += (ab == c) * ((ab ^ (ab ^ d)) - c) # c or d
122
- return ab
123
-
124
- def stippled_countless(data):
125
- """
126
- Vectorized implementation of downsampling a 2D
127
- image by 2 on each side using the COUNTLESS algorithm
128
- that treats zero as "background" and inflates lone
129
- pixels.
130
-
131
- data is a 2D numpy array with even dimensions.
132
- """
133
- sections = []
134
-
135
- # This loop splits the 2D array apart into four arrays that are
136
- # all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
137
- # and (1,1) representing the A, B, C, and D positions from Figure 1.
138
- factor = (2,2)
139
- for offset in np.ndindex(factor):
140
- part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
141
- sections.append(part)
142
-
143
- a, b, c, d = sections
144
-
145
- ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization
146
- ab_ac |= b * (b == c) # PICK(B,C)
147
-
148
- nonzero = a + (a == 0) * (b + (b == 0) * c)
149
- return ab_ac + (ab_ac == 0) * (d + (d == 0) * nonzero) # AB || AC || BC || D
150
-
151
- def zero_corrected_countless(data):
152
- """
153
- Vectorized implementation of downsampling a 2D
154
- image by 2 on each side using the COUNTLESS algorithm.
155
-
156
- data is a 2D numpy array with even dimensions.
157
- """
158
- # allows us to prevent losing 1/2 a bit of information
159
- # at the top end by using a bigger type. Without this 255 is handled incorrectly.
160
- data, upgraded = upgrade_type(data)
161
-
162
- # offset from zero, raw countless doesn't handle 0 correctly
163
- # we'll remove the extra 1 at the end.
164
- data += 1
165
-
166
- sections = []
167
-
168
- # This loop splits the 2D array apart into four arrays that are
169
- # all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
170
- # and (1,1) representing the A, B, C, and D positions from Figure 1.
171
- factor = (2,2)
172
- for offset in np.ndindex(factor):
173
- part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
174
- sections.append(part)
175
-
176
- a, b, c, d = sections
177
-
178
- ab = a * (a == b) # PICK(A,B)
179
- ac = a * (a == c) # PICK(A,C)
180
- bc = b * (b == c) # PICK(B,C)
181
-
182
- a = ab | ac | bc # Bitwise OR, safe b/c non-matches are zeroed
183
-
184
- result = a + (a == 0) * d - 1 # a or d - 1
185
-
186
- if upgraded:
187
- return downgrade_type(result)
188
-
189
- # only need to reset data if we weren't upgraded
190
- # b/c no copy was made in that case
191
- data -= 1
192
-
193
- return result
194
-
195
- def countless_extreme(data):
196
- nonzeros = np.count_nonzero(data)
197
- # print("nonzeros", nonzeros)
198
-
199
- N = reduce(operator.mul, data.shape)
200
-
201
- if nonzeros == N:
202
- print("quick")
203
- return quick_countless(data)
204
- elif np.count_nonzero(data + 1) == N:
205
- print("quick")
206
- # print("upper", nonzeros)
207
- return quick_countless(data)
208
- else:
209
- return countless(data)
210
-
211
-
212
- def countless(data):
213
- """
214
- Vectorized implementation of downsampling a 2D
215
- image by 2 on each side using the COUNTLESS algorithm.
216
-
217
- data is a 2D numpy array with even dimensions.
218
- """
219
- # allows us to prevent losing 1/2 a bit of information
220
- # at the top end by using a bigger type. Without this 255 is handled incorrectly.
221
- data, upgraded = upgrade_type(data)
222
-
223
- # offset from zero, raw countless doesn't handle 0 correctly
224
- # we'll remove the extra 1 at the end.
225
- data += 1
226
-
227
- sections = []
228
-
229
- # This loop splits the 2D array apart into four arrays that are
230
- # all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
231
- # and (1,1) representing the A, B, C, and D positions from Figure 1.
232
- factor = (2,2)
233
- for offset in np.ndindex(factor):
234
- part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
235
- sections.append(part)
236
-
237
- a, b, c, d = sections
238
-
239
- ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization
240
- ab_ac |= b * (b == c) # PICK(B,C)
241
- result = ab_ac + (ab_ac == 0) * d - 1 # (matches or d) - 1
242
-
243
- if upgraded:
244
- return downgrade_type(result)
245
-
246
- # only need to reset data if we weren't upgraded
247
- # b/c no copy was made in that case
248
- data -= 1
249
-
250
- return result
251
-
252
- def upgrade_type(arr):
253
- dtype = arr.dtype
254
-
255
- if dtype == np.uint8:
256
- return arr.astype(np.uint16), True
257
- elif dtype == np.uint16:
258
- return arr.astype(np.uint32), True
259
- elif dtype == np.uint32:
260
- return arr.astype(np.uint64), True
261
-
262
- return arr, False
263
-
264
- def downgrade_type(arr):
265
- dtype = arr.dtype
266
-
267
- if dtype == np.uint64:
268
- return arr.astype(np.uint32)
269
- elif dtype == np.uint32:
270
- return arr.astype(np.uint16)
271
- elif dtype == np.uint16:
272
- return arr.astype(np.uint8)
273
-
274
- return arr
275
-
276
- def odd_to_even(image):
277
- """
278
- To facilitate 2x2 downsampling segmentation, change an odd sized image into an even sized one.
279
- Works by mirroring the starting 1 pixel edge of the image on odd shaped sides.
280
-
281
- e.g. turn a 3x3x5 image into a 4x4x5 (the x and y are what are getting downsampled)
282
-
283
- For example: [ 3, 2, 4 ] => [ 3, 3, 2, 4 ] which is now easy to downsample.
284
-
285
- """
286
- shape = np.array(image.shape)
287
-
288
- offset = (shape % 2)[:2] # x,y offset
289
-
290
- # detect if we're dealing with an even
291
- # image. if so it's fine, just return.
292
- if not np.any(offset):
293
- return image
294
-
295
- oddshape = image.shape[:2] + offset
296
- oddshape = np.append(oddshape, shape[2:])
297
- oddshape = oddshape.astype(int)
298
-
299
- newimg = np.empty(shape=oddshape, dtype=image.dtype)
300
-
301
- ox,oy = offset
302
- sx,sy = oddshape
303
-
304
- newimg[0,0] = image[0,0] # corner
305
- newimg[ox:sx,0] = image[:,0] # x axis line
306
- newimg[0,oy:sy] = image[0,:] # y axis line
307
-
308
- return newimg
309
-
310
- def counting(array):
311
- factor = (2, 2, 1)
312
- shape = array.shape
313
-
314
- while len(shape) < 4:
315
- array = np.expand_dims(array, axis=-1)
316
- shape = array.shape
317
-
318
- output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(shape, factor))
319
- output = np.zeros(output_shape, dtype=array.dtype)
320
-
321
- for chan in range(0, shape[3]):
322
- for z in range(0, shape[2]):
323
- for x in range(0, shape[0], 2):
324
- for y in range(0, shape[1], 2):
325
- block = array[ x:x+2, y:y+2, z, chan ] # 2x2 block
326
-
327
- hashtable = defaultdict(int)
328
- for subx, suby in np.ndindex(block.shape[0], block.shape[1]):
329
- hashtable[block[subx, suby]] += 1
330
-
331
- best = (0, 0)
332
- for segid, val in six.iteritems(hashtable):
333
- if best[1] < val:
334
- best = (segid, val)
335
-
336
- output[ x // 2, y // 2, chan ] = best[0]
337
-
338
- return output
339
-
340
- def ndzoom(array):
341
- if len(array.shape) == 3:
342
- ratio = ( 1 / 2.0, 1 / 2.0, 1.0 )
343
- else:
344
- ratio = ( 1 / 2.0, 1 / 2.0)
345
- return ndimage.interpolation.zoom(array, ratio, order=1)
346
-
347
- def countless_if(array):
348
- factor = (2, 2, 1)
349
- shape = array.shape
350
-
351
- if len(shape) < 3:
352
- array = array[ :,:, np.newaxis ]
353
- shape = array.shape
354
-
355
- output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(shape, factor))
356
- output = np.zeros(output_shape, dtype=array.dtype)
357
-
358
- for chan in range(0, shape[2]):
359
- for x in range(0, shape[0], 2):
360
- for y in range(0, shape[1], 2):
361
- block = array[ x:x+2, y:y+2, chan ] # 2x2 block
362
-
363
- if block[0,0] == block[1,0]:
364
- pick = block[0,0]
365
- elif block[0,0] == block[0,1]:
366
- pick = block[0,0]
367
- elif block[1,0] == block[0,1]:
368
- pick = block[1,0]
369
- else:
370
- pick = block[1,1]
371
-
372
- output[ x // 2, y // 2, chan ] = pick
373
-
374
- return np.squeeze(output)
375
-
376
- def downsample_with_averaging(array):
377
- """
378
- Downsample x by factor using averaging.
379
-
380
- @return: The downsampled array, of the same type as x.
381
- """
382
-
383
- if len(array.shape) == 3:
384
- factor = (2,2,1)
385
- else:
386
- factor = (2,2)
387
-
388
- if np.array_equal(factor[:3], np.array([1,1,1])):
389
- return array
390
-
391
- output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(array.shape, factor))
392
- temp = np.zeros(output_shape, float)
393
- counts = np.zeros(output_shape, np.int)
394
- for offset in np.ndindex(factor):
395
- part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
396
- indexing_expr = tuple(np.s_[:s] for s in part.shape)
397
- temp[indexing_expr] += part
398
- counts[indexing_expr] += 1
399
- return np.cast[array.dtype](temp / counts)
400
-
401
- def downsample_with_max_pooling(array):
402
-
403
- factor = (2,2)
404
-
405
- if np.all(np.array(factor, int) == 1):
406
- return array
407
-
408
- sections = []
409
-
410
- for offset in np.ndindex(factor):
411
- part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
412
- sections.append(part)
413
-
414
- output = sections[0].copy()
415
-
416
- for section in sections[1:]:
417
- np.maximum(output, section, output)
418
-
419
- return output
420
-
421
- def striding(array):
422
- """Downsample x by factor using striding.
423
-
424
- @return: The downsampled array, of the same type as x.
425
- """
426
- factor = (2,2)
427
- if np.all(np.array(factor, int) == 1):
428
- return array
429
- return array[tuple(np.s_[::f] for f in factor)]
430
-
431
- def benchmark():
432
- filename = sys.argv[1]
433
- img = Image.open(filename)
434
- data = np.array(img.getdata(), dtype=np.uint8)
435
-
436
- if len(data.shape) == 1:
437
- n_channels = 1
438
- reshape = (img.height, img.width)
439
- else:
440
- n_channels = min(data.shape[1], 3)
441
- data = data[:, :n_channels]
442
- reshape = (img.height, img.width, n_channels)
443
-
444
- data = data.reshape(reshape).astype(np.uint8)
445
-
446
- methods = [
447
- simplest_countless,
448
- quick_countless,
449
- quick_countless_xor,
450
- quickest_countless,
451
- stippled_countless,
452
- zero_corrected_countless,
453
- countless,
454
- downsample_with_averaging,
455
- downsample_with_max_pooling,
456
- ndzoom,
457
- striding,
458
- # countless_if,
459
- # counting,
460
- ]
461
-
462
- formats = {
463
- 1: 'L',
464
- 3: 'RGB',
465
- 4: 'RGBA'
466
- }
467
-
468
- if not os.path.exists('./results'):
469
- os.mkdir('./results')
470
-
471
- N = 500
472
- img_size = float(img.width * img.height) / 1024.0 / 1024.0
473
- print("N = %d, %dx%d (%.2f MPx) %d chan, %s" % (N, img.width, img.height, img_size, n_channels, filename))
474
- print("Algorithm\tMPx/sec\tMB/sec\tSec")
475
- for fn in methods:
476
- print(fn.__name__, end='')
477
- sys.stdout.flush()
478
-
479
- start = time.time()
480
- # tqdm is here to show you what's going on the first time you run it.
481
- # Feel free to remove it to get slightly more accurate timing results.
482
- for _ in tqdm(range(N), desc=fn.__name__, disable=True):
483
- result = fn(data)
484
- end = time.time()
485
- print("\r", end='')
486
-
487
- total_time = (end - start)
488
- mpx = N * img_size / total_time
489
- mbytes = N * img_size * n_channels / total_time
490
- # Output in tab separated format to enable copy-paste into excel/numbers
491
- print("%s\t%.3f\t%.3f\t%.2f" % (fn.__name__, mpx, mbytes, total_time))
492
- outimg = Image.fromarray(np.squeeze(result), formats[n_channels])
493
- outimg.save('./results/{}.png'.format(fn.__name__, "PNG"))
494
-
495
- if __name__ == '__main__':
496
- benchmark()
497
-
498
-
499
- # Example results:
500
- # N = 5, 1024x1024 (1.00 MPx) 1 chan, images/gray_segmentation.png
501
- # Function MPx/sec MB/sec Sec
502
- # simplest_countless 752.855 752.855 0.01
503
- # quick_countless 920.328 920.328 0.01
504
- # zero_corrected_countless 534.143 534.143 0.01
505
- # countless 644.247 644.247 0.01
506
- # downsample_with_averaging 372.575 372.575 0.01
507
- # downsample_with_max_pooling 974.060 974.060 0.01
508
- # ndzoom 137.517 137.517 0.04
509
- # striding 38550.588 38550.588 0.00
510
- # countless_if 4.377 4.377 1.14
511
- # counting 0.117 0.117 42.85
512
-
513
- # Run without non-numpy implementations:
514
- # N = 2000, 1024x1024 (1.00 MPx) 1 chan, images/gray_segmentation.png
515
- # Algorithm MPx/sec MB/sec Sec
516
- # simplest_countless 800.522 800.522 2.50
517
- # quick_countless 945.420 945.420 2.12
518
- # quickest_countless 947.256 947.256 2.11
519
- # stippled_countless 544.049 544.049 3.68
520
- # zero_corrected_countless 575.310 575.310 3.48
521
- # countless 646.684 646.684 3.09
522
- # downsample_with_averaging 385.132 385.132 5.19
523
- # downsample_with_max_poolin 988.361 988.361 2.02
524
- # ndzoom 163.104 163.104 12.26
525
- # striding 81589.340 81589.340 0.02
526
-
527
-
528
-
529
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlhitawiMohammed22/CER_Hu-Evaluation-Metrics/eval_wer.py DELETED
File without changes
spaces/Amrrs/image-to-text-app/app.py DELETED
@@ -1,47 +0,0 @@
1
- import easyocr as ocr #OCR
2
- import streamlit as st #Web App
3
- from PIL import Image #Image Processing
4
- import numpy as np #Image Processing
5
-
6
- #title
7
- st.title("Easy OCR - Extract Text from Images")
8
-
9
- #subtitle
10
- st.markdown("## Optical Character Recognition - Using `easyocr`, `streamlit` - hosted on 🤗 Spaces")
11
-
12
- st.markdown("Link to the app - [image-to-text-app on 🤗 Spaces](https://huggingface.co/spaces/Amrrs/image-to-text-app)")
13
-
14
- #image uploader
15
- image = st.file_uploader(label = "Upload your image here",type=['png','jpg','jpeg'])
16
-
17
-
18
- @st.cache
19
- def load_model():
20
- reader = ocr.Reader(['en'],model_storage_directory='.')
21
- return reader
22
-
23
- reader = load_model() #load model
24
-
25
- if image is not None:
26
-
27
- input_image = Image.open(image) #read image
28
- st.image(input_image) #display image
29
-
30
- with st.spinner("🤖 AI is at Work! "):
31
-
32
-
33
- result = reader.readtext(np.array(input_image))
34
-
35
- result_text = [] #empty list for results
36
-
37
-
38
- for text in result:
39
- result_text.append(text[1])
40
-
41
- st.write(result_text)
42
- #st.success("Here you go!")
43
- st.balloons()
44
- else:
45
- st.write("Upload an Image")
46
-
47
- st.caption("Made with ❤️ by @1littlecoder. Credits to 🤗 Spaces for Hosting this ")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_ipndm.py DELETED
@@ -1,161 +0,0 @@
1
- import tempfile
2
-
3
- import torch
4
-
5
- from diffusers import IPNDMScheduler
6
-
7
- from .test_schedulers import SchedulerCommonTest
8
-
9
-
10
- class IPNDMSchedulerTest(SchedulerCommonTest):
11
- scheduler_classes = (IPNDMScheduler,)
12
- forward_default_kwargs = (("num_inference_steps", 50),)
13
-
14
- def get_scheduler_config(self, **kwargs):
15
- config = {"num_train_timesteps": 1000}
16
- config.update(**kwargs)
17
- return config
18
-
19
- def check_over_configs(self, time_step=0, **config):
20
- kwargs = dict(self.forward_default_kwargs)
21
- num_inference_steps = kwargs.pop("num_inference_steps", None)
22
- sample = self.dummy_sample
23
- residual = 0.1 * sample
24
- dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
25
-
26
- for scheduler_class in self.scheduler_classes:
27
- scheduler_config = self.get_scheduler_config(**config)
28
- scheduler = scheduler_class(**scheduler_config)
29
- scheduler.set_timesteps(num_inference_steps)
30
- # copy over dummy past residuals
31
- scheduler.ets = dummy_past_residuals[:]
32
-
33
- if time_step is None:
34
- time_step = scheduler.timesteps[len(scheduler.timesteps) // 2]
35
-
36
- with tempfile.TemporaryDirectory() as tmpdirname:
37
- scheduler.save_config(tmpdirname)
38
- new_scheduler = scheduler_class.from_pretrained(tmpdirname)
39
- new_scheduler.set_timesteps(num_inference_steps)
40
- # copy over dummy past residuals
41
- new_scheduler.ets = dummy_past_residuals[:]
42
-
43
- output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
44
- new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
45
-
46
- assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
47
-
48
- output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
49
- new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
50
-
51
- assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
52
-
53
- def test_from_save_pretrained(self):
54
- pass
55
-
56
- def check_over_forward(self, time_step=0, **forward_kwargs):
57
- kwargs = dict(self.forward_default_kwargs)
58
- num_inference_steps = kwargs.pop("num_inference_steps", None)
59
- sample = self.dummy_sample
60
- residual = 0.1 * sample
61
- dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
62
-
63
- for scheduler_class in self.scheduler_classes:
64
- scheduler_config = self.get_scheduler_config()
65
- scheduler = scheduler_class(**scheduler_config)
66
- scheduler.set_timesteps(num_inference_steps)
67
-
68
- # copy over dummy past residuals (must be after setting timesteps)
69
- scheduler.ets = dummy_past_residuals[:]
70
-
71
- if time_step is None:
72
- time_step = scheduler.timesteps[len(scheduler.timesteps) // 2]
73
-
74
- with tempfile.TemporaryDirectory() as tmpdirname:
75
- scheduler.save_config(tmpdirname)
76
- new_scheduler = scheduler_class.from_pretrained(tmpdirname)
77
- # copy over dummy past residuals
78
- new_scheduler.set_timesteps(num_inference_steps)
79
-
80
- # copy over dummy past residual (must be after setting timesteps)
81
- new_scheduler.ets = dummy_past_residuals[:]
82
-
83
- output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
84
- new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
85
-
86
- assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
87
-
88
- output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
89
- new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
90
-
91
- assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
92
-
93
- def full_loop(self, **config):
94
- scheduler_class = self.scheduler_classes[0]
95
- scheduler_config = self.get_scheduler_config(**config)
96
- scheduler = scheduler_class(**scheduler_config)
97
-
98
- num_inference_steps = 10
99
- model = self.dummy_model()
100
- sample = self.dummy_sample_deter
101
- scheduler.set_timesteps(num_inference_steps)
102
-
103
- for i, t in enumerate(scheduler.timesteps):
104
- residual = model(sample, t)
105
- sample = scheduler.step(residual, t, sample).prev_sample
106
-
107
- for i, t in enumerate(scheduler.timesteps):
108
- residual = model(sample, t)
109
- sample = scheduler.step(residual, t, sample).prev_sample
110
-
111
- return sample
112
-
113
- def test_step_shape(self):
114
- kwargs = dict(self.forward_default_kwargs)
115
-
116
- num_inference_steps = kwargs.pop("num_inference_steps", None)
117
-
118
- for scheduler_class in self.scheduler_classes:
119
- scheduler_config = self.get_scheduler_config()
120
- scheduler = scheduler_class(**scheduler_config)
121
-
122
- sample = self.dummy_sample
123
- residual = 0.1 * sample
124
-
125
- if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
126
- scheduler.set_timesteps(num_inference_steps)
127
- elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
128
- kwargs["num_inference_steps"] = num_inference_steps
129
-
130
- # copy over dummy past residuals (must be done after set_timesteps)
131
- dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
132
- scheduler.ets = dummy_past_residuals[:]
133
-
134
- time_step_0 = scheduler.timesteps[5]
135
- time_step_1 = scheduler.timesteps[6]
136
-
137
- output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample
138
- output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample
139
-
140
- self.assertEqual(output_0.shape, sample.shape)
141
- self.assertEqual(output_0.shape, output_1.shape)
142
-
143
- output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample
144
- output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample
145
-
146
- self.assertEqual(output_0.shape, sample.shape)
147
- self.assertEqual(output_0.shape, output_1.shape)
148
-
149
- def test_timesteps(self):
150
- for timesteps in [100, 1000]:
151
- self.check_over_configs(num_train_timesteps=timesteps, time_step=None)
152
-
153
- def test_inference_steps(self):
154
- for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]):
155
- self.check_over_forward(num_inference_steps=num_inference_steps, time_step=None)
156
-
157
- def test_full_loop_no_noise(self):
158
- sample = self.full_loop()
159
- result_mean = torch.mean(torch.abs(sample))
160
-
161
- assert abs(result_mean.item() - 2540529) < 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py DELETED
@@ -1,31 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/mask_rcnn_r50_fpn.py',
3
- '../_base_/datasets/lvis_v1_instance.py',
4
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
5
- ]
6
- model = dict(
7
- roi_head=dict(
8
- bbox_head=dict(num_classes=1203), mask_head=dict(num_classes=1203)),
9
- test_cfg=dict(
10
- rcnn=dict(
11
- score_thr=0.0001,
12
- # LVIS allows up to 300
13
- max_per_img=300)))
14
- img_norm_cfg = dict(
15
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
16
- train_pipeline = [
17
- dict(type='LoadImageFromFile'),
18
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
19
- dict(
20
- type='Resize',
21
- img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
22
- (1333, 768), (1333, 800)],
23
- multiscale_mode='value',
24
- keep_ratio=True),
25
- dict(type='RandomFlip', flip_ratio=0.5),
26
- dict(type='Normalize', **img_norm_cfg),
27
- dict(type='Pad', size_divisor=32),
28
- dict(type='DefaultFormatBundle'),
29
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
30
- ]
31
- data = dict(train=dict(dataset=dict(pipeline=train_pipeline)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_1x_coco.py DELETED
@@ -1,42 +0,0 @@
1
- _base_ = './retinanet_r50_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://detectron2/resnet50_caffe',
4
- backbone=dict(
5
- norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe'))
6
- # use caffe img_norm
7
- img_norm_cfg = dict(
8
- mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
9
- train_pipeline = [
10
- dict(type='LoadImageFromFile'),
11
- dict(type='LoadAnnotations', with_bbox=True),
12
- dict(
13
- type='Resize',
14
- img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
15
- (1333, 768), (1333, 800)],
16
- multiscale_mode='value',
17
- keep_ratio=True),
18
- dict(type='RandomFlip', flip_ratio=0.5),
19
- dict(type='Normalize', **img_norm_cfg),
20
- dict(type='Pad', size_divisor=32),
21
- dict(type='DefaultFormatBundle'),
22
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
23
- ]
24
- test_pipeline = [
25
- dict(type='LoadImageFromFile'),
26
- dict(
27
- type='MultiScaleFlipAug',
28
- img_scale=(1333, 800),
29
- flip=False,
30
- transforms=[
31
- dict(type='Resize', keep_ratio=True),
32
- dict(type='RandomFlip'),
33
- dict(type='Normalize', **img_norm_cfg),
34
- dict(type='Pad', size_divisor=32),
35
- dict(type='ImageToTensor', keys=['img']),
36
- dict(type='Collect', keys=['img']),
37
- ])
38
- ]
39
- data = dict(
40
- train=dict(pipeline=train_pipeline),
41
- val=dict(pipeline=test_pipeline),
42
- test=dict(pipeline=test_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/grammar.py DELETED
@@ -1,33 +0,0 @@
1
- from torch_grammar import GrammarSampler
2
- from transformers.generation.logits_process import LogitsProcessor
3
-
4
- from modules import shared
5
-
6
- sampler = None
7
- grammar = None
8
- grammar_string = ''
9
-
10
-
11
- class GrammarLogitsProcessor(LogitsProcessor):
12
- def __init__(self, string):
13
-
14
- global sampler, grammar, grammar_string
15
-
16
- if string != grammar_string:
17
- grammar_string = string
18
- if string.strip() != '':
19
- string = string.strip() + '\n'
20
- sampler = GrammarSampler(string, 'root', shared.tokenizer)
21
- else:
22
- sampler = None
23
-
24
- if sampler is not None:
25
- grammar = sampler.logits_processor()
26
- else:
27
- grammar = None
28
-
29
- def __call__(self, input_ids, scores):
30
- if grammar is not None:
31
- scores = grammar(input_ids, scores)
32
-
33
- return scores
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Acapella Sudfrica Askies I 39m Lo Siento Mama Mp3 Download.md DELETED
@@ -1,200 +0,0 @@
1
-
2
- <h1>Acapella Sudáfrica: Askies Lo siento Mama MP3 Descargar</h1>
3
- <p>Si eres un fan de la música acapella, es posible que hayas oído hablar de una canción llamada <strong>Askies I’m Sorry Mama</strong> por <strong>Acapella Sudáfrica</strong>. Esta canción es una hermosa y sentida expresión de arrepentimiento y disculpa a la madre por causar su dolor a través de las acciones de uno. También es un escaparate de los increíbles talentos vocales y armonías de los cantantes de acapella en Sudáfrica.</p>
4
- <p>En este artículo, exploraremos qué es la música acapella y por qué es popular en Sudáfrica, qué es Askies I’m Sorry Mama y quiénes son los artistas detrás de ella, cuáles son las letras y el significado de Askies I’m Sorry Mama, cuáles son las críticas y los premios de Askies I’m Sorry Mama, cómo descargar Askies I’m Sorry Mama MP3 gratis, y cuáles son los pros y los contras de hacerlo. También te proporcionaremos algunas alternativas para descargar Askies I’m Sorry Mama MP3 gratis, en caso de que quieras disfrutar de esta canción de otras maneras. </p>
5
- <h2>acapella sudáfrica askies i 39;m lo siento mama mp3 download</h2><br /><p><b><b>DOWNLOAD</b> &#9733;&#9733;&#9733;&#9733;&#9733; <a href="https://bltlly.com/2v6Ls1">https://bltlly.com/2v6Ls1</a></b></p><br /><br />
6
- <h2>¿Qué es la música acapella y por qué es popular en Sudáfrica? </h2>
7
- <p><strong>La música de Acapella</strong>, también deletreada <strong>a cappella</strong>, es un estilo de música que implica cantar sin ningún acompañamiento instrumental. La palabra acapella proviene del italiano, que significa "al estilo de la capilla", como se usaba originalmente en la música religiosa. Sin embargo, la música acapella también se ha convertido en una forma secular que cubre varios géneros, como el pop, rock, jazz, gospel, folk, rap y más. </p>
8
-
9
- <h2>¿Qué es Askies I’m Sorry Mama y quiénes son los artistas detrás de ella? </h2>
10
- <p><strong>Askies I’m Sorry Mama</strong> es una canción que fue lanzada en 2019 por <strong>Acapella South áfrica</strong>, un grupo de cantantes jóvenes y talentosos de Dinwiddie High School en Germiston, Gauteng. La canción fue producida por <strong>Dr Dope</strong>, un productor de música y rapero que también aparece en la canción. La canción es parte del álbum <strong>Acapella South áfrica Vol. 1</strong>, que también incluye otras canciones como <strong>Uthando Lwakho</strong>, <strong>Ngiyakuthanda</strong>, <strong>Ungowami</strong>, y <strong>Senze Ntoni</strong>. </p>
11
- <p>La canción es un <strong>gwijo</strong>, que es un tipo de canción acapella que se originó en la cultura zulú y se canta a menudo en escuelas, universidades, eventos deportivos y reuniones sociales. Los gwijos son generalmente cantados en un estilo de llamada y respuesta, con un líder cantando un verso y el resto del grupo repitiéndolo o agregando armonías. Los gwijos también son conocidos por sus melodías pegadizas, aplausos rítmicos y bailes. </p>
12
- <p>La canción se ha vuelto muy popular entre los fans de acapella y ha recibido más de 275K visitas en YouTube. La canción también ha aparecido en varias plataformas de música como Spotify, Apple Music, Amazon Music y YouTube Music. La canción también ha inspirado muchas versiones y remixes de otros artistas y grupos. </p> <h2> ¿Cuáles son las letras y el significado de Askies I’m Sorry Mama? </h2>
13
- <p>Las letras de Askies I’m Sorry Mama están en una mezcla de inglés y zulú, que es uno de los idiomas oficiales de Sudáfrica y el idioma más hablado en el país. La canción trata sobre un hijo que se disculpa con su madre por decepcionarla y causarle dolor. Admite que ha cometido errores y se arrepiente de sus acciones. Pide el perdón de su madre y promete cambiar sus costumbres. También expresa su amor y gratitud por su madre y espera que ella siempre estará orgullosa de él. </p>
14
- <p></p>
15
-
16
- <tabla>
17
- <tr>
18
- <th>Español</th>
19
- <th>Zulú</th>
20
- <th>Significado</th>
21
- </tr>
22
- <tr>
23
- <td>Lo siento mamá</td>
24
- <td>Cómo hacer una mamada ngiyaxolisa</td>
25
- <td>Askies es una palabra de argot que significa "lo siento" o "perdón". Viene de la palabra afrikáans "asseblief", que significa "por favor". Ngiyaxolisa significa "me disculpo". Mamá significa "madre". </td>
26
- </tr>
27
- <tr>
28
- <td>Sé que he sido un chico malo</td>
29
- <td>Cómo hacer que tu cuerpo crezca</td>
30
- <td>Ngazi ukuthi significa "sé que". Ngibe yisilima significa "he sido un tonto" o "he sido estúpido". </td>
31
- </tr>
32
- <tr>
33
- <td>Sé que he sido un mal hijo</td>
34
- <td>Cómo hacer que un bebé se engalungile</td>
35
- <td>Ngibe yingane engalungile significa "He sido un niño que no tiene razón" o "He sido un niño malo". </td>
36
- </tr>
37
- <tr>
38
- <td>Sé que te he hecho llorar mama</td>
39
- <td>Cómo hacer que tu mamá se sienta </td>
40
- <td>Ngikukhalise significa "Te he hecho llorar". </td>
41
- </tr>
42
- <tr>
43
- <td>Sé que te he puesto triste mama</td>
44
- <td>Cómo hacer que tu mamá se sienta mejor</td>
45
- <td>Ngikudlise ubuhlungu significa "te he dado dolor" o "te he herido". </td>
46
- </tr>
47
- <tr>
48
- <td>Por favor perdóname mama</td>
49
- <td>La Ciudad de México</td>
50
- <td>Sicela significa "pedimos" o "por favor". Ungixolele significa "perdóname". </td>
51
- </tr>
52
- <tr>
53
- <td>Por favor no te enfades conmigo mama</td>
54
- <td>Sicela ungazithukeli kimi mama</td>
55
- <td>Ungazithukeli kimi significa "no te enojes conmigo" o "no te enojes conmigo". </td> </tr>
56
- <tr>
57
- <td>Te amo mama</td>
58
- <td>Ngiyakuthanda mama</td>
59
- <td>Ngiyakuthanda significa "Te amo". </td>
60
- </tr>
61
- <tr>
62
- <td>Te agradezco mama</td>
63
- <td>Cómo hacer que tu mamá se sienta mejor
64
- <td>Ngiyabonga kuwe significa "te agradezco" o "te aprecio". </td>
65
- </tr>
66
- <tr>
67
- <td>Prometo cambiar mama</td>
68
- <td>Cómo hacer que tu mama sea feliz</td>
69
- <td>Ngithembisa significa "prometo". Ukushintsha significa "cambiar". </td>
70
- </tr>
71
- <tr>
72
- <td>Prometo hacerte sentir orgullosa mama</td>
73
- <td>Cómo hacer que tu mamá se sienta mejor</td>
74
-
75
- </tr>
76
- <tr>
77
- <td>Tú eres mi todo mama</td>
78
- <td>Cómo hacer que mamá se sienta </td>
79
- <td>Ungumuntu wami wonke significa "tú eres mi todo" o "tú eres toda mi persona". </td>
80
- </tr>
81
- <tr>
82
- <td>Tú eres mi héroe mama</td>
83
- <td>No se puede hacer nada
84
- <td>Unguqhero wami significa "eres mi héroe" o "eres mi campeón". </td>
85
- </tr>
86
- <tr>
87
- <td>Eres mi mamá ángel</td>
88
- <td>No hay comentarios sobre mamá</td>
89
- <td>Unguthixo wami significa "eres mi Dios" o "eres mi ángel". </td>
90
- </tr>
91
- <h2>¿Cuáles son los comentarios y premios de Askies I’m Sorry Mama? </h2>
92
- <p>La canción Askies I’m Sorry Mama ha recibido críticas positivas y premios de críticos y fans por igual. La canción ha sido elogiada por sus letras emotivas y sinceras, sus bellas y armoniosas voces, su melodía pegadiza y edificante, y su estilo auténtico y original. La canción también ha sido reconocida por su relevancia social y cultural, ya que refleja los desafíos y luchas que muchos jóvenes enfrentan en Sudáfrica, como la pobreza, el crimen, la violencia, las drogas, el VIH/SIDA, el desempleo, la educación y las cuestiones familiares. </p>
93
- <p>Algunos de los comentarios y premios que Askies I’m Sorry Mama ha recibido son:</p>
94
- <ul>
95
- <li>La canción ganó el Premio a la Mejor Canción de Acapella</strong> en los <strong>South African Music Awards (SAMA)</strong> en 2020. La SAMA es la ceremonia de premios musicales más prestigiosa de Sudáfrica, que honra los mejores logros musicales en varias categorías y géneros. </li>
96
- <li>La canción fue nominada para el Premio <strong>Canción del Año</strong> en el <strong>Metro FM Music Awards (MMA)</strong> en 2020. La MMA es una ceremonia de premios de música popular en Sudáfrica, que se basa en la votación pública y la radio. </li>
97
- <li>La canción fue incluida en la lista de las 10 mejores canciones de Acapella de 2019</strong> por <strong>Acapella World Magazine (AWM)</strong>. AWM es una revista online líder que cubre música acapella de todo el mundo, con noticias, reseñas, entrevistas y más. </li>
98
-
99
- <li>La canción recibió una respuesta positiva <strong></strong> de los fans y seguidores de Acapella Sudáfrica</strong> en plataformas de redes sociales como Facebook, Twitter, Instagram y YouTube. Muchos fans expresaron su amor y admiración por la canción y los artistas, y compartieron sus historias y experiencias personales relacionadas con la canción. </li> <h2>Cómo descargar Askies I’m Sorry Mama MP3 gratis? </h2>
100
- <p>Si quieres descargar Askies I’m Sorry Mama MP3 gratis, tienes varias opciones para elegir. Sin embargo, debes tener en cuenta que descargar música gratis puede no ser legal o ético en algunos casos, ya que puede violar los derechos e intereses de los artistas y la industria musical. Por lo tanto, siempre debes revisar los términos y condiciones de las plataformas que utilizas, y respetar los deseos y preferencias de los artistas que apoyas. </p>
101
- <p>Aquí hay una guía paso a paso sobre cómo descargar Askies I’m Sorry Mama MP3 gratis desde varias plataformas:</p>
102
- <h3>Spotify</h3>
103
- <p>Spotify es uno de los servicios de streaming de música más populares del mundo, con más de 356 millones de usuarios y 70 millones de canciones. Spotify ofrece un plan gratuito que te permite escuchar música con anuncios y funciones limitadas, o un plan premium que te ofrece acceso ilimitado y sin anuncios a música y podcasts. Spotify también te permite descargar música para escuchar sin conexión, pero solo si tienes una suscripción premium. </p>
104
- <p>Para descargar Askies I’m Sorry Mama MP3 gratis desde Spotify, debes seguir estos pasos:</p>
105
- <ol>
106
- <li>Descargue e instale la aplicación Spotify en su dispositivo, o abra el reproductor web Spotify en su navegador. </li>
107
- <li>Cree una cuenta gratuita o inicie sesión con su cuenta existente. </li>
108
- <li>Buscar Askies Lo siento Mama por Acapella Sudáfrica en la barra de búsqueda. </li>
109
- <li> Seleccione la canción de los resultados y haga clic en el icono de tres puntos al lado. </li>
110
- <li>Seleccione Compartir en el menú y copie el enlace de la canción. </li>
111
-
112
- <li>Pegar el enlace de la canción en el sitio web y haga clic en Descargar o Convertir.</li>
113
- <li>Espere a que el proceso termine y guarde el archivo MP3 en su dispositivo. </li>
114
- </ol>
115
- <h3>Música de Apple</h3>
116
- <p>Apple Music es otro servicio de streaming de música popular, con más de 72 millones de usuarios y 75 millones de canciones. Apple Music ofrece una prueba gratuita durante tres meses, después de los cuales debes pagar una cuota mensual para seguir usándola. Apple Music también te permite descargar música para escuchar sin conexión, pero solo si tienes una suscripción activa. </p>
117
- <p>Para descargar Askies I’m Sorry Mama MP3 gratis de Apple Music, debes seguir estos pasos:</p>
118
- <ol>
119
- <li>Descargue e instale la aplicación Apple Music en su dispositivo, o abra el reproductor web Apple Music en su navegador. </li>
120
- <li>Cree una cuenta gratuita o inicie sesión con su cuenta existente. </li>
121
- <li>Regístrese para la prueba gratuita ingresando sus detalles de pago. Puede cancelar en cualquier momento antes de que termine la prueba. </li>
122
- <li>Buscar Askies Lo siento Mama por Acapella Sudáfrica en la barra de búsqueda. </li>
123
- <li> Seleccione la canción de los resultados y haga clic en el icono más junto a ella. </li>
124
- <li>Haga clic en el icono de la nube con una flecha hacia abajo para descargar la canción a su biblioteca. </li>
125
- <li>Ir a su biblioteca y encontrar la canción en Recientemente añadido o canciones.</li>
126
- <li>Haga clic derecho en la canción y seleccione Mostrar en el Finder (Mac) o Mostrar en el Explorador de Windows (Windows). </li>
127
- <li>Copie y pegue el archivo MP3 en la ubicación deseada en su dispositivo. </li>
128
- </ol>
129
- <h3>Música de Amazon</h3>
130
- <p>Amazon Music es otro servicio de transmisión de música popular, con más de 55 millones de usuarios y 70 millones de canciones. Amazon Music ofrece un plan gratuito que te permite escuchar música con anuncios y funciones limitadas, o un plan premium que te ofrece acceso ilimitado y sin anuncios a música y podcasts. Amazon Music también te permite descargar música para escuchar sin conexión, pero solo si tienes una suscripción premium. </p>
131
-
132
- <ol>
133
- <li>Descargue e instale la aplicación Amazon Music en su dispositivo, o abra el reproductor web Amazon Music en su navegador. </li>
134
- <li>Cree una cuenta gratuita o inicie sesión con su cuenta existente. </li>
135
- <li>Buscar Askies Lo siento Mama por Acapella Sudáfrica en la barra de búsqueda. </li>
136
- <li> Seleccione la canción de los resultados y haga clic en Más opciones (tres puntos icono) junto a ella. </li>
137
- <li>Seleccione Añadir a Mi música desde el menú. </li>
138
- <li>Ir a Mi música y encontrar la canción bajo Recientemente añadido o canciones.</li>
139
- <li>Haga clic en el icono de descarga (flecha hacia abajo con una línea) junto a la canción. </li>
140
- <li>Espere a que la descarga se complete y encuentre el archivo MP3 en su dispositivo. </li>
141
- </ol>
142
- <h3>YouTube</h3>
143
- <p>YouTube es la plataforma para compartir vídeos más popular del mundo, con más de 2.000 millones de usuarios y miles de millones de vídeos. YouTube también ofrece una variedad de videos musicales, incluyendo Askies I’m Sorry Mama de Acapella Sudáfrica. YouTube te permite ver y escuchar videos musicales de forma gratuita, pero no te permite descargarlos directamente. Sin embargo, puedes usar algunas herramientas de terceros para convertir vídeos de YouTube a archivos MP3 y descargarlos en tu dispositivo. </p>
144
- <p>Para descargar Askies I’m Sorry Mama MP3 gratis desde YouTube, debes seguir estos pasos:</p>
145
- <ol>
146
- <li>Ir a YouTube y buscar Askies Lo siento Mama por Acapella Sudáfrica en la barra de búsqueda. </li>
147
- <li>Seleccione el vídeo de los resultados y copie la URL del vídeo desde la barra de direcciones. </li>
148
- <li>Vaya a un sitio web de terceros que le permite convertir vídeos de YouTube a archivos MP3, como <a href="">YouTube to MP3 Converter</a>, <a href="">YTMP3</a>, o <a href=">4K Video Downloader</a>. </li>
149
- <li>Pegue la URL del video en el sitio web y haga clic en Convertir o Descargar.</li>
150
- <li>Espere a que el proceso termine y guarde el archivo MP3 en su dispositivo. </li>
151
- </ol>
152
- <h2>Pros y contras de descargar Askies I’m Sorry Mama MP3 gratis</h2>
153
-
154
- <tabla>
155
- <tr>
156
- <th>Pros</th>
157
- <th>Contras</th>
158
- </tr>
159
- <tr>
160
- <td>Puede disfrutar de la canción en cualquier momento y en cualquier lugar sin conexión a Internet o cargos de datos. </td>
161
- <td>Puede comprometer la calidad y la seguridad de la canción, ya que algunas plataformas pueden ofrecer archivos de baja calidad o corruptos, o contener virus o malware. </td>
162
- </tr>
163
- <tr>
164
- <td>Puedes ahorrar dinero y evitar pagar las cuotas de suscripción o de compra. </td>
165
- <td>Usted puede violar los derechos de propiedad intelectual e intereses de los artistas y la industria de la música, ya que pueden no recibir ninguna compensación o reconocimiento por su trabajo. </td>
166
- </tr>
167
- <tr>
168
- <td>Puedes compartir la canción con tus amigos y familiares fácil y libremente. </td>
169
- <td>Puedes perderte otras características y beneficios que ofrecen las plataformas, como listas de reproducción, recomendaciones, letras, podcasts y más. </td>
170
- </tr>
171
- <tr>
172
- <td>Puedes apoyar a los artistas aumentando su popularidad y exposición. </td>
173
- <td>Puede desalentar a los artistas de crear más música, ya que pueden sentir que sus esfuerzos no son apreciados o recompensados. </td>
174
- </tr>
175
- </tabla>
176
- <h2>Alternativas para descargar Askies I’m Sorry Mama MP3 gratis</h2>
177
- <p>Si quieres disfrutar de Askies I’m Sorry Mama sin descargarlo gratis, tienes algunas alternativas que pueden ser mejores para ti y los artistas. Estos son algunos de ellos:</p>
178
- <ul>
179
- <li><strong>Transmitir la canción en línea</strong>: Puede transmitir la canción en línea desde varias plataformas, como Spotify, Apple Music, Amazon Music, YouTube Music, Deezer, Tidal, SoundCloud, y más. Streaming le permite escuchar música sin necesidad de descargarla, pero necesita una conexión a Internet o cargos de datos. La transmisión también ofrece otras características y beneficios, como listas de reproducción, recomendaciones, letras, podcasts y más. Streaming también puede apoyar a los artistas mediante la generación de ingresos o regalías para ellos. </li>
180
-
181
- <li><strong>Apoya a los artistas en línea</strong>: Puedes apoyar a los artistas en línea siguiéndolos en plataformas de redes sociales, como Facebook, Twitter, Instagram, YouTube, TikTok y más. También puede gustar, comentar, compartir, suscribirse, calificar, revisar o recomendar su música a otros. También puedes donar o dar propina a través de plataformas como Patreon, PayPal, Venmo , o Cash App. También puede comprar su mercancía, como camisetas, sombreros, tazas, pegatinas y más. También puede asistir a sus espectáculos en vivo, conciertos o eventos, si es posible. Apoyar a los artistas en línea les ayuda a crecer su base de fans, aumentar sus ingresos y motivarlos a crear más música. </li>
182
- <h2>Conclusión</h2>
183
- <p>La música de Acapella es una forma hermosa y poderosa de música que involucra cantar sin ningún acompañamiento instrumental. La música de Acapella es popular en Sudáfrica porque tiene una larga y rica historia y cultura en este país. Askies I’m Sorry Mama es una canción que fue lanzada en 2019 por Acapella Sudáfrica, un grupo de jóvenes y talentosos cantantes de Dinwiddie High School en Germiston, Gauteng. La canción es un gwijo, que es un tipo de canción acapella que se originó de la cultura zulú y se canta a menudo en escuelas, universidades, eventos deportivos y reuniones sociales. La canción trata sobre un hijo que se disculpa con su madre por decepcionarla y causarle dolor. La canción ha recibido críticas positivas y premios de críticos y fans por igual. </p>
184
- <p>Si quieres descargar Askies I’m Sorry Mama MP3 gratis, tienes varias opciones para elegir, como Spotify, Apple Music, Amazon Music y YouTube. Sin embargo, también debes considerar los pros y los contras de descargar música gratis, ya que puede no ser legal o ético en algunos casos. Alternativamente, puedes disfrutar de Askies I’m Sorry Mama sin descargarlo gratis, por streaming en línea, comprarlo en línea, o apoyar a los artistas en línea. </p>
185
-
186
- <h2>Preguntas frecuentes</h2>
187
- <p>Aquí hay algunas preguntas frecuentes sobre la música acapella en Sudáfrica y Askies Lo siento Mama por Acapella Sudáfrica:</p>
188
- <ol>
189
- <li><strong>¿Qué significa acapella? </strong></li>
190
- <p>Acapella significa cantar sin acompañamiento instrumental. La palabra viene del italiano, que significa "al estilo de la capilla", como se usaba originalmente en la música religiosa. </p>
191
- <li><strong>¿Qué es un gwijo? </strong></li>
192
- <p>Un gwijo es un tipo de canción de acapella que se originó en la cultura zulú y se canta a menudo en escuelas, universidades, eventos deportivos y reuniones sociales. Los gwijos son generalmente cantados en un estilo de llamada y respuesta, con un líder cantando un verso y el resto del grupo repitiéndolo o agregando armonías. </p>
193
- <li><strong>¿Qué significa askies? </strong></li>
194
- <p>Askies es una palabra de argot que significa "lo siento" o "perdón". Viene de la palabra en afrikáans "asseblief", que significa "por favor". </p>
195
- <li><strong> ¿Quiénes son Acapella Sudáfrica? </strong></li>
196
- <p>Acapella Sudáfrica es un grupo de jóvenes y talentosos cantantes de Dinwiddie High School en Germiston, Gauteng. Lanzaron su álbum debut Acapella South áfrica Vol. 1 en 2019, que incluye la canción Askies I’m Sorry Mama.</p>
197
- <li><strong>¿Cómo puedo descargar Askies I’m Sorry Mama MP3 gratis? </strong></li>
198
- <p>Puedes descargar Askies I’m Sorry Mama MP3 gratis desde varias plataformas, como Spotify, Apple Music, Amazon Music y YouTube. Sin embargo, también debes considerar los pros y los contras de descargar música gratis, ya que puede no ser legal o ético en algunos casos. Alternativamente, puedes disfrutar de Askies I’m Sorry Mama sin descargarlo gratis, por streaming en línea, comprarlo en línea, o apoyar a los artistas en línea. </p> 64aa2da5cf<br />
199
- <br />
200
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BhaskarKapri/Animal/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Animal
3
- emoji: 🌖
4
- colorFrom: red
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.16.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/__init__.py DELETED
@@ -1,19 +0,0 @@
1
- from typing import List, Optional
2
-
3
- import pip._internal.utils.inject_securetransport # noqa
4
- from pip._internal.utils import _log
5
-
6
- # init_logging() must be called before any call to logging.getLogger()
7
- # which happens at import of most modules.
8
- _log.init_logging()
9
-
10
-
11
- def main(args: (Optional[List[str]]) = None) -> int:
12
- """This is preserved for old console scripts that may still be referencing
13
- it.
14
-
15
- For additional details, see https://github.com/pypa/pip/issues/7498.
16
- """
17
- from pip._internal.utils.entrypoints import _wrapper
18
-
19
- return _wrapper(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/_utils.py DELETED
@@ -1,76 +0,0 @@
1
- # Copyright 2016 Julien Danjou
2
- # Copyright 2016 Joshua Harlow
3
- # Copyright 2013-2014 Ray Holder
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
-
17
- import sys
18
- import typing
19
- from datetime import timedelta
20
-
21
-
22
- # sys.maxsize:
23
- # An integer giving the maximum value a variable of type Py_ssize_t can take.
24
- MAX_WAIT = sys.maxsize / 2
25
-
26
-
27
- def find_ordinal(pos_num: int) -> str:
28
- # See: https://en.wikipedia.org/wiki/English_numerals#Ordinal_numbers
29
- if pos_num == 0:
30
- return "th"
31
- elif pos_num == 1:
32
- return "st"
33
- elif pos_num == 2:
34
- return "nd"
35
- elif pos_num == 3:
36
- return "rd"
37
- elif 4 <= pos_num <= 20:
38
- return "th"
39
- else:
40
- return find_ordinal(pos_num % 10)
41
-
42
-
43
- def to_ordinal(pos_num: int) -> str:
44
- return f"{pos_num}{find_ordinal(pos_num)}"
45
-
46
-
47
- def get_callback_name(cb: typing.Callable[..., typing.Any]) -> str:
48
- """Get a callback fully-qualified name.
49
-
50
- If no name can be produced ``repr(cb)`` is called and returned.
51
- """
52
- segments = []
53
- try:
54
- segments.append(cb.__qualname__)
55
- except AttributeError:
56
- try:
57
- segments.append(cb.__name__)
58
- except AttributeError:
59
- pass
60
- if not segments:
61
- return repr(cb)
62
- else:
63
- try:
64
- # When running under sphinx it appears this can be none?
65
- if cb.__module__:
66
- segments.insert(0, cb.__module__)
67
- except AttributeError:
68
- pass
69
- return ".".join(segments)
70
-
71
-
72
- time_unit_type = typing.Union[int, float, timedelta]
73
-
74
-
75
- def to_seconds(time_unit: time_unit_type) -> float:
76
- return float(time_unit.total_seconds() if isinstance(time_unit, timedelta) else time_unit)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/config/setupcfg.py DELETED
@@ -1,762 +0,0 @@
1
- """
2
- Load setuptools configuration from ``setup.cfg`` files.
3
-
4
- **API will be made private in the future**
5
- """
6
- import os
7
-
8
- import contextlib
9
- import functools
10
- import warnings
11
- from collections import defaultdict
12
- from functools import partial
13
- from functools import wraps
14
- from typing import (TYPE_CHECKING, Callable, Any, Dict, Generic, Iterable, List,
15
- Optional, Tuple, TypeVar, Union)
16
-
17
- from distutils.errors import DistutilsOptionError, DistutilsFileError
18
- from setuptools.extern.packaging.requirements import Requirement, InvalidRequirement
19
- from setuptools.extern.packaging.version import Version, InvalidVersion
20
- from setuptools.extern.packaging.specifiers import SpecifierSet
21
- from setuptools._deprecation_warning import SetuptoolsDeprecationWarning
22
-
23
- from . import expand
24
-
25
- if TYPE_CHECKING:
26
- from setuptools.dist import Distribution # noqa
27
- from distutils.dist import DistributionMetadata # noqa
28
-
29
- _Path = Union[str, os.PathLike]
30
- SingleCommandOptions = Dict["str", Tuple["str", Any]]
31
- """Dict that associate the name of the options of a particular command to a
32
- tuple. The first element of the tuple indicates the origin of the option value
33
- (e.g. the name of the configuration file where it was read from),
34
- while the second element of the tuple is the option value itself
35
- """
36
- AllCommandOptions = Dict["str", SingleCommandOptions] # cmd name => its options
37
- Target = TypeVar("Target", bound=Union["Distribution", "DistributionMetadata"])
38
-
39
-
40
- def read_configuration(
41
- filepath: _Path,
42
- find_others=False,
43
- ignore_option_errors=False
44
- ) -> dict:
45
- """Read given configuration file and returns options from it as a dict.
46
-
47
- :param str|unicode filepath: Path to configuration file
48
- to get options from.
49
-
50
- :param bool find_others: Whether to search for other configuration files
51
- which could be on in various places.
52
-
53
- :param bool ignore_option_errors: Whether to silently ignore
54
- options, values of which could not be resolved (e.g. due to exceptions
55
- in directives such as file:, attr:, etc.).
56
- If False exceptions are propagated as expected.
57
-
58
- :rtype: dict
59
- """
60
- from setuptools.dist import Distribution
61
-
62
- dist = Distribution()
63
- filenames = dist.find_config_files() if find_others else []
64
- handlers = _apply(dist, filepath, filenames, ignore_option_errors)
65
- return configuration_to_dict(handlers)
66
-
67
-
68
- def apply_configuration(dist: "Distribution", filepath: _Path) -> "Distribution":
69
- """Apply the configuration from a ``setup.cfg`` file into an existing
70
- distribution object.
71
- """
72
- _apply(dist, filepath)
73
- dist._finalize_requires()
74
- return dist
75
-
76
-
77
- def _apply(
78
- dist: "Distribution", filepath: _Path,
79
- other_files: Iterable[_Path] = (),
80
- ignore_option_errors: bool = False,
81
- ) -> Tuple["ConfigHandler", ...]:
82
- """Read configuration from ``filepath`` and applies to the ``dist`` object."""
83
- from setuptools.dist import _Distribution
84
-
85
- filepath = os.path.abspath(filepath)
86
-
87
- if not os.path.isfile(filepath):
88
- raise DistutilsFileError('Configuration file %s does not exist.' % filepath)
89
-
90
- current_directory = os.getcwd()
91
- os.chdir(os.path.dirname(filepath))
92
- filenames = [*other_files, filepath]
93
-
94
- try:
95
- _Distribution.parse_config_files(dist, filenames=filenames)
96
- handlers = parse_configuration(
97
- dist, dist.command_options, ignore_option_errors=ignore_option_errors
98
- )
99
- dist._finalize_license_files()
100
- finally:
101
- os.chdir(current_directory)
102
-
103
- return handlers
104
-
105
-
106
- def _get_option(target_obj: Target, key: str):
107
- """
108
- Given a target object and option key, get that option from
109
- the target object, either through a get_{key} method or
110
- from an attribute directly.
111
- """
112
- getter_name = 'get_{key}'.format(**locals())
113
- by_attribute = functools.partial(getattr, target_obj, key)
114
- getter = getattr(target_obj, getter_name, by_attribute)
115
- return getter()
116
-
117
-
118
- def configuration_to_dict(handlers: Tuple["ConfigHandler", ...]) -> dict:
119
- """Returns configuration data gathered by given handlers as a dict.
120
-
121
- :param list[ConfigHandler] handlers: Handlers list,
122
- usually from parse_configuration()
123
-
124
- :rtype: dict
125
- """
126
- config_dict: dict = defaultdict(dict)
127
-
128
- for handler in handlers:
129
- for option in handler.set_options:
130
- value = _get_option(handler.target_obj, option)
131
- config_dict[handler.section_prefix][option] = value
132
-
133
- return config_dict
134
-
135
-
136
- def parse_configuration(
137
- distribution: "Distribution",
138
- command_options: AllCommandOptions,
139
- ignore_option_errors=False
140
- ) -> Tuple["ConfigMetadataHandler", "ConfigOptionsHandler"]:
141
- """Performs additional parsing of configuration options
142
- for a distribution.
143
-
144
- Returns a list of used option handlers.
145
-
146
- :param Distribution distribution:
147
- :param dict command_options:
148
- :param bool ignore_option_errors: Whether to silently ignore
149
- options, values of which could not be resolved (e.g. due to exceptions
150
- in directives such as file:, attr:, etc.).
151
- If False exceptions are propagated as expected.
152
- :rtype: list
153
- """
154
- with expand.EnsurePackagesDiscovered(distribution) as ensure_discovered:
155
- options = ConfigOptionsHandler(
156
- distribution,
157
- command_options,
158
- ignore_option_errors,
159
- ensure_discovered,
160
- )
161
-
162
- options.parse()
163
- if not distribution.package_dir:
164
- distribution.package_dir = options.package_dir # Filled by `find_packages`
165
-
166
- meta = ConfigMetadataHandler(
167
- distribution.metadata,
168
- command_options,
169
- ignore_option_errors,
170
- ensure_discovered,
171
- distribution.package_dir,
172
- distribution.src_root,
173
- )
174
- meta.parse()
175
-
176
- return meta, options
177
-
178
-
179
- def _warn_accidental_env_marker_misconfig(label: str, orig_value: str, parsed: list):
180
- """Because users sometimes misinterpret this configuration:
181
-
182
- [options.extras_require]
183
- foo = bar;python_version<"4"
184
-
185
- It looks like one requirement with an environment marker
186
- but because there is no newline, it's parsed as two requirements
187
- with a semicolon as separator.
188
-
189
- Therefore, if:
190
- * input string does not contain a newline AND
191
- * parsed result contains two requirements AND
192
- * parsing of the two parts from the result ("<first>;<second>")
193
- leads in a valid Requirement with a valid marker
194
- a UserWarning is shown to inform the user about the possible problem.
195
- """
196
- if "\n" in orig_value or len(parsed) != 2:
197
- return
198
-
199
- with contextlib.suppress(InvalidRequirement):
200
- original_requirements_str = ";".join(parsed)
201
- req = Requirement(original_requirements_str)
202
- if req.marker is not None:
203
- msg = (
204
- f"One of the parsed requirements in `{label}` "
205
- f"looks like a valid environment marker: '{parsed[1]}'\n"
206
- "Make sure that the config is correct and check "
207
- "https://setuptools.pypa.io/en/latest/userguide/declarative_config.html#opt-2" # noqa: E501
208
- )
209
- warnings.warn(msg, UserWarning)
210
-
211
-
212
- class ConfigHandler(Generic[Target]):
213
- """Handles metadata supplied in configuration files."""
214
-
215
- section_prefix: str
216
- """Prefix for config sections handled by this handler.
217
- Must be provided by class heirs.
218
-
219
- """
220
-
221
- aliases: Dict[str, str] = {}
222
- """Options aliases.
223
- For compatibility with various packages. E.g.: d2to1 and pbr.
224
- Note: `-` in keys is replaced with `_` by config parser.
225
-
226
- """
227
-
228
- def __init__(
229
- self,
230
- target_obj: Target,
231
- options: AllCommandOptions,
232
- ignore_option_errors,
233
- ensure_discovered: expand.EnsurePackagesDiscovered,
234
- ):
235
- sections: AllCommandOptions = {}
236
-
237
- section_prefix = self.section_prefix
238
- for section_name, section_options in options.items():
239
- if not section_name.startswith(section_prefix):
240
- continue
241
-
242
- section_name = section_name.replace(section_prefix, '').strip('.')
243
- sections[section_name] = section_options
244
-
245
- self.ignore_option_errors = ignore_option_errors
246
- self.target_obj = target_obj
247
- self.sections = sections
248
- self.set_options: List[str] = []
249
- self.ensure_discovered = ensure_discovered
250
-
251
- @property
252
- def parsers(self):
253
- """Metadata item name to parser function mapping."""
254
- raise NotImplementedError(
255
- '%s must provide .parsers property' % self.__class__.__name__
256
- )
257
-
258
- def __setitem__(self, option_name, value):
259
- unknown = tuple()
260
- target_obj = self.target_obj
261
-
262
- # Translate alias into real name.
263
- option_name = self.aliases.get(option_name, option_name)
264
-
265
- current_value = getattr(target_obj, option_name, unknown)
266
-
267
- if current_value is unknown:
268
- raise KeyError(option_name)
269
-
270
- if current_value:
271
- # Already inhabited. Skipping.
272
- return
273
-
274
- skip_option = False
275
- parser = self.parsers.get(option_name)
276
- if parser:
277
- try:
278
- value = parser(value)
279
-
280
- except Exception:
281
- skip_option = True
282
- if not self.ignore_option_errors:
283
- raise
284
-
285
- if skip_option:
286
- return
287
-
288
- setter = getattr(target_obj, 'set_%s' % option_name, None)
289
- if setter is None:
290
- setattr(target_obj, option_name, value)
291
- else:
292
- setter(value)
293
-
294
- self.set_options.append(option_name)
295
-
296
- @classmethod
297
- def _parse_list(cls, value, separator=','):
298
- """Represents value as a list.
299
-
300
- Value is split either by separator (defaults to comma) or by lines.
301
-
302
- :param value:
303
- :param separator: List items separator character.
304
- :rtype: list
305
- """
306
- if isinstance(value, list): # _get_parser_compound case
307
- return value
308
-
309
- if '\n' in value:
310
- value = value.splitlines()
311
- else:
312
- value = value.split(separator)
313
-
314
- return [chunk.strip() for chunk in value if chunk.strip()]
315
-
316
- @classmethod
317
- def _parse_dict(cls, value):
318
- """Represents value as a dict.
319
-
320
- :param value:
321
- :rtype: dict
322
- """
323
- separator = '='
324
- result = {}
325
- for line in cls._parse_list(value):
326
- key, sep, val = line.partition(separator)
327
- if sep != separator:
328
- raise DistutilsOptionError(
329
- 'Unable to parse option value to dict: %s' % value
330
- )
331
- result[key.strip()] = val.strip()
332
-
333
- return result
334
-
335
- @classmethod
336
- def _parse_bool(cls, value):
337
- """Represents value as boolean.
338
-
339
- :param value:
340
- :rtype: bool
341
- """
342
- value = value.lower()
343
- return value in ('1', 'true', 'yes')
344
-
345
- @classmethod
346
- def _exclude_files_parser(cls, key):
347
- """Returns a parser function to make sure field inputs
348
- are not files.
349
-
350
- Parses a value after getting the key so error messages are
351
- more informative.
352
-
353
- :param key:
354
- :rtype: callable
355
- """
356
-
357
- def parser(value):
358
- exclude_directive = 'file:'
359
- if value.startswith(exclude_directive):
360
- raise ValueError(
361
- 'Only strings are accepted for the {0} field, '
362
- 'files are not accepted'.format(key)
363
- )
364
- return value
365
-
366
- return parser
367
-
368
- @classmethod
369
- def _parse_file(cls, value, root_dir: _Path):
370
- """Represents value as a string, allowing including text
371
- from nearest files using `file:` directive.
372
-
373
- Directive is sandboxed and won't reach anything outside
374
- directory with setup.py.
375
-
376
- Examples:
377
- file: README.rst, CHANGELOG.md, src/file.txt
378
-
379
- :param str value:
380
- :rtype: str
381
- """
382
- include_directive = 'file:'
383
-
384
- if not isinstance(value, str):
385
- return value
386
-
387
- if not value.startswith(include_directive):
388
- return value
389
-
390
- spec = value[len(include_directive) :]
391
- filepaths = (path.strip() for path in spec.split(','))
392
- return expand.read_files(filepaths, root_dir)
393
-
394
- def _parse_attr(self, value, package_dir, root_dir: _Path):
395
- """Represents value as a module attribute.
396
-
397
- Examples:
398
- attr: package.attr
399
- attr: package.module.attr
400
-
401
- :param str value:
402
- :rtype: str
403
- """
404
- attr_directive = 'attr:'
405
- if not value.startswith(attr_directive):
406
- return value
407
-
408
- attr_desc = value.replace(attr_directive, '')
409
-
410
- # Make sure package_dir is populated correctly, so `attr:` directives can work
411
- package_dir.update(self.ensure_discovered.package_dir)
412
- return expand.read_attr(attr_desc, package_dir, root_dir)
413
-
414
- @classmethod
415
- def _get_parser_compound(cls, *parse_methods):
416
- """Returns parser function to represents value as a list.
417
-
418
- Parses a value applying given methods one after another.
419
-
420
- :param parse_methods:
421
- :rtype: callable
422
- """
423
-
424
- def parse(value):
425
- parsed = value
426
-
427
- for method in parse_methods:
428
- parsed = method(parsed)
429
-
430
- return parsed
431
-
432
- return parse
433
-
434
- @classmethod
435
- def _parse_section_to_dict_with_key(cls, section_options, values_parser):
436
- """Parses section options into a dictionary.
437
-
438
- Applies a given parser to each option in a section.
439
-
440
- :param dict section_options:
441
- :param callable values_parser: function with 2 args corresponding to key, value
442
- :rtype: dict
443
- """
444
- value = {}
445
- for key, (_, val) in section_options.items():
446
- value[key] = values_parser(key, val)
447
- return value
448
-
449
- @classmethod
450
- def _parse_section_to_dict(cls, section_options, values_parser=None):
451
- """Parses section options into a dictionary.
452
-
453
- Optionally applies a given parser to each value.
454
-
455
- :param dict section_options:
456
- :param callable values_parser: function with 1 arg corresponding to option value
457
- :rtype: dict
458
- """
459
- parser = (lambda _, v: values_parser(v)) if values_parser else (lambda _, v: v)
460
- return cls._parse_section_to_dict_with_key(section_options, parser)
461
-
462
- def parse_section(self, section_options):
463
- """Parses configuration file section.
464
-
465
- :param dict section_options:
466
- """
467
- for (name, (_, value)) in section_options.items():
468
- with contextlib.suppress(KeyError):
469
- # Keep silent for a new option may appear anytime.
470
- self[name] = value
471
-
472
- def parse(self):
473
- """Parses configuration file items from one
474
- or more related sections.
475
-
476
- """
477
- for section_name, section_options in self.sections.items():
478
-
479
- method_postfix = ''
480
- if section_name: # [section.option] variant
481
- method_postfix = '_%s' % section_name
482
-
483
- section_parser_method: Optional[Callable] = getattr(
484
- self,
485
- # Dots in section names are translated into dunderscores.
486
- ('parse_section%s' % method_postfix).replace('.', '__'),
487
- None,
488
- )
489
-
490
- if section_parser_method is None:
491
- raise DistutilsOptionError(
492
- 'Unsupported distribution option section: [%s.%s]'
493
- % (self.section_prefix, section_name)
494
- )
495
-
496
- section_parser_method(section_options)
497
-
498
- def _deprecated_config_handler(self, func, msg, warning_class):
499
- """this function will wrap around parameters that are deprecated
500
-
501
- :param msg: deprecation message
502
- :param warning_class: class of warning exception to be raised
503
- :param func: function to be wrapped around
504
- """
505
-
506
- @wraps(func)
507
- def config_handler(*args, **kwargs):
508
- warnings.warn(msg, warning_class)
509
- return func(*args, **kwargs)
510
-
511
- return config_handler
512
-
513
-
514
- class ConfigMetadataHandler(ConfigHandler["DistributionMetadata"]):
515
-
516
- section_prefix = 'metadata'
517
-
518
- aliases = {
519
- 'home_page': 'url',
520
- 'summary': 'description',
521
- 'classifier': 'classifiers',
522
- 'platform': 'platforms',
523
- }
524
-
525
- strict_mode = False
526
- """We need to keep it loose, to be partially compatible with
527
- `pbr` and `d2to1` packages which also uses `metadata` section.
528
-
529
- """
530
-
531
- def __init__(
532
- self,
533
- target_obj: "DistributionMetadata",
534
- options: AllCommandOptions,
535
- ignore_option_errors: bool,
536
- ensure_discovered: expand.EnsurePackagesDiscovered,
537
- package_dir: Optional[dict] = None,
538
- root_dir: _Path = os.curdir
539
- ):
540
- super().__init__(target_obj, options, ignore_option_errors, ensure_discovered)
541
- self.package_dir = package_dir
542
- self.root_dir = root_dir
543
-
544
- @property
545
- def parsers(self):
546
- """Metadata item name to parser function mapping."""
547
- parse_list = self._parse_list
548
- parse_file = partial(self._parse_file, root_dir=self.root_dir)
549
- parse_dict = self._parse_dict
550
- exclude_files_parser = self._exclude_files_parser
551
-
552
- return {
553
- 'platforms': parse_list,
554
- 'keywords': parse_list,
555
- 'provides': parse_list,
556
- 'requires': self._deprecated_config_handler(
557
- parse_list,
558
- "The requires parameter is deprecated, please use "
559
- "install_requires for runtime dependencies.",
560
- SetuptoolsDeprecationWarning,
561
- ),
562
- 'obsoletes': parse_list,
563
- 'classifiers': self._get_parser_compound(parse_file, parse_list),
564
- 'license': exclude_files_parser('license'),
565
- 'license_file': self._deprecated_config_handler(
566
- exclude_files_parser('license_file'),
567
- "The license_file parameter is deprecated, "
568
- "use license_files instead.",
569
- SetuptoolsDeprecationWarning,
570
- ),
571
- 'license_files': parse_list,
572
- 'description': parse_file,
573
- 'long_description': parse_file,
574
- 'version': self._parse_version,
575
- 'project_urls': parse_dict,
576
- }
577
-
578
- def _parse_version(self, value):
579
- """Parses `version` option value.
580
-
581
- :param value:
582
- :rtype: str
583
-
584
- """
585
- version = self._parse_file(value, self.root_dir)
586
-
587
- if version != value:
588
- version = version.strip()
589
- # Be strict about versions loaded from file because it's easy to
590
- # accidentally include newlines and other unintended content
591
- try:
592
- Version(version)
593
- except InvalidVersion:
594
- tmpl = (
595
- 'Version loaded from {value} does not '
596
- 'comply with PEP 440: {version}'
597
- )
598
- raise DistutilsOptionError(tmpl.format(**locals()))
599
-
600
- return version
601
-
602
- return expand.version(self._parse_attr(value, self.package_dir, self.root_dir))
603
-
604
-
605
- class ConfigOptionsHandler(ConfigHandler["Distribution"]):
606
-
607
- section_prefix = 'options'
608
-
609
- def __init__(
610
- self,
611
- target_obj: "Distribution",
612
- options: AllCommandOptions,
613
- ignore_option_errors: bool,
614
- ensure_discovered: expand.EnsurePackagesDiscovered,
615
- ):
616
- super().__init__(target_obj, options, ignore_option_errors, ensure_discovered)
617
- self.root_dir = target_obj.src_root
618
- self.package_dir: Dict[str, str] = {} # To be filled by `find_packages`
619
-
620
- @classmethod
621
- def _parse_list_semicolon(cls, value):
622
- return cls._parse_list(value, separator=';')
623
-
624
- def _parse_file_in_root(self, value):
625
- return self._parse_file(value, root_dir=self.root_dir)
626
-
627
- def _parse_requirements_list(self, label: str, value: str):
628
- # Parse a requirements list, either by reading in a `file:`, or a list.
629
- parsed = self._parse_list_semicolon(self._parse_file_in_root(value))
630
- _warn_accidental_env_marker_misconfig(label, value, parsed)
631
- # Filter it to only include lines that are not comments. `parse_list`
632
- # will have stripped each line and filtered out empties.
633
- return [line for line in parsed if not line.startswith("#")]
634
-
635
- @property
636
- def parsers(self):
637
- """Metadata item name to parser function mapping."""
638
- parse_list = self._parse_list
639
- parse_bool = self._parse_bool
640
- parse_dict = self._parse_dict
641
- parse_cmdclass = self._parse_cmdclass
642
-
643
- return {
644
- 'zip_safe': parse_bool,
645
- 'include_package_data': parse_bool,
646
- 'package_dir': parse_dict,
647
- 'scripts': parse_list,
648
- 'eager_resources': parse_list,
649
- 'dependency_links': parse_list,
650
- 'namespace_packages': self._deprecated_config_handler(
651
- parse_list,
652
- "The namespace_packages parameter is deprecated, "
653
- "consider using implicit namespaces instead (PEP 420).",
654
- SetuptoolsDeprecationWarning,
655
- ),
656
- 'install_requires': partial(
657
- self._parse_requirements_list, "install_requires"
658
- ),
659
- 'setup_requires': self._parse_list_semicolon,
660
- 'tests_require': self._parse_list_semicolon,
661
- 'packages': self._parse_packages,
662
- 'entry_points': self._parse_file_in_root,
663
- 'py_modules': parse_list,
664
- 'python_requires': SpecifierSet,
665
- 'cmdclass': parse_cmdclass,
666
- }
667
-
668
- def _parse_cmdclass(self, value):
669
- package_dir = self.ensure_discovered.package_dir
670
- return expand.cmdclass(self._parse_dict(value), package_dir, self.root_dir)
671
-
672
- def _parse_packages(self, value):
673
- """Parses `packages` option value.
674
-
675
- :param value:
676
- :rtype: list
677
- """
678
- find_directives = ['find:', 'find_namespace:']
679
- trimmed_value = value.strip()
680
-
681
- if trimmed_value not in find_directives:
682
- return self._parse_list(value)
683
-
684
- # Read function arguments from a dedicated section.
685
- find_kwargs = self.parse_section_packages__find(
686
- self.sections.get('packages.find', {})
687
- )
688
-
689
- find_kwargs.update(
690
- namespaces=(trimmed_value == find_directives[1]),
691
- root_dir=self.root_dir,
692
- fill_package_dir=self.package_dir,
693
- )
694
-
695
- return expand.find_packages(**find_kwargs)
696
-
697
- def parse_section_packages__find(self, section_options):
698
- """Parses `packages.find` configuration file section.
699
-
700
- To be used in conjunction with _parse_packages().
701
-
702
- :param dict section_options:
703
- """
704
- section_data = self._parse_section_to_dict(section_options, self._parse_list)
705
-
706
- valid_keys = ['where', 'include', 'exclude']
707
-
708
- find_kwargs = dict(
709
- [(k, v) for k, v in section_data.items() if k in valid_keys and v]
710
- )
711
-
712
- where = find_kwargs.get('where')
713
- if where is not None:
714
- find_kwargs['where'] = where[0] # cast list to single val
715
-
716
- return find_kwargs
717
-
718
- def parse_section_entry_points(self, section_options):
719
- """Parses `entry_points` configuration file section.
720
-
721
- :param dict section_options:
722
- """
723
- parsed = self._parse_section_to_dict(section_options, self._parse_list)
724
- self['entry_points'] = parsed
725
-
726
- def _parse_package_data(self, section_options):
727
- package_data = self._parse_section_to_dict(section_options, self._parse_list)
728
- return expand.canonic_package_data(package_data)
729
-
730
- def parse_section_package_data(self, section_options):
731
- """Parses `package_data` configuration file section.
732
-
733
- :param dict section_options:
734
- """
735
- self['package_data'] = self._parse_package_data(section_options)
736
-
737
- def parse_section_exclude_package_data(self, section_options):
738
- """Parses `exclude_package_data` configuration file section.
739
-
740
- :param dict section_options:
741
- """
742
- self['exclude_package_data'] = self._parse_package_data(section_options)
743
-
744
- def parse_section_extras_require(self, section_options):
745
- """Parses `extras_require` configuration file section.
746
-
747
- :param dict section_options:
748
- """
749
- parsed = self._parse_section_to_dict_with_key(
750
- section_options,
751
- lambda k, v: self._parse_requirements_list(f"extras_require[{k}]", v)
752
- )
753
-
754
- self['extras_require'] = parsed
755
-
756
- def parse_section_data_files(self, section_options):
757
- """Parses `data_files` configuration file section.
758
-
759
- :param dict section_options:
760
- """
761
- parsed = self._parse_section_to_dict(section_options, self._parse_list)
762
- self['data_files'] = expand.canonic_data_files(parsed, self.root_dir)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BobbyOleti/MyGenAIChatBot/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: MyGenAIChatBot
3
- emoji: 🦀
4
- colorFrom: indigo
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/coco.py DELETED
@@ -1,462 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import contextlib
3
- import datetime
4
- import io
5
- import json
6
- import logging
7
- import numpy as np
8
- import os
9
- import pycocotools.mask as mask_util
10
- from fvcore.common.file_io import PathManager, file_lock
11
- from fvcore.common.timer import Timer
12
- from PIL import Image
13
-
14
- from detectron2.structures import Boxes, BoxMode, PolygonMasks
15
-
16
- from .. import DatasetCatalog, MetadataCatalog
17
-
18
- """
19
- This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format".
20
- """
21
-
22
-
23
- logger = logging.getLogger(__name__)
24
-
25
- __all__ = ["load_coco_json", "load_sem_seg"]
26
-
27
-
28
- def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
29
- """
30
- Load a json file with COCO's instances annotation format.
31
- Currently supports instance detection, instance segmentation,
32
- and person keypoints annotations.
33
-
34
- Args:
35
- json_file (str): full path to the json file in COCO instances annotation format.
36
- image_root (str or path-like): the directory where the images in this json file exists.
37
- dataset_name (str): the name of the dataset (e.g., coco_2017_train).
38
- If provided, this function will also put "thing_classes" into
39
- the metadata associated with this dataset.
40
- extra_annotation_keys (list[str]): list of per-annotation keys that should also be
41
- loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
42
- "category_id", "segmentation"). The values for these keys will be returned as-is.
43
- For example, the densepose annotations are loaded in this way.
44
-
45
- Returns:
46
- list[dict]: a list of dicts in Detectron2 standard dataset dicts format. (See
47
- `Using Custom Datasets </tutorials/datasets.html>`_ )
48
-
49
- Notes:
50
- 1. This function does not read the image files.
51
- The results do not have the "image" field.
52
- """
53
- from pycocotools.coco import COCO
54
-
55
- timer = Timer()
56
- json_file = PathManager.get_local_path(json_file)
57
- with contextlib.redirect_stdout(io.StringIO()):
58
- coco_api = COCO(json_file)
59
- if timer.seconds() > 1:
60
- logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
61
-
62
- id_map = None
63
- if dataset_name is not None:
64
- meta = MetadataCatalog.get(dataset_name)
65
- cat_ids = sorted(coco_api.getCatIds())
66
- cats = coco_api.loadCats(cat_ids)
67
- # The categories in a custom json file may not be sorted.
68
- thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
69
- meta.thing_classes = thing_classes
70
-
71
- # In COCO, certain category ids are artificially removed,
72
- # and by convention they are always ignored.
73
- # We deal with COCO's id issue and translate
74
- # the category ids to contiguous ids in [0, 80).
75
-
76
- # It works by looking at the "categories" field in the json, therefore
77
- # if users' own json also have incontiguous ids, we'll
78
- # apply this mapping as well but print a warning.
79
- if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
80
- if "coco" not in dataset_name:
81
- logger.warning(
82
- """
83
- Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
84
- """
85
- )
86
- id_map = {v: i for i, v in enumerate(cat_ids)}
87
- meta.thing_dataset_id_to_contiguous_id = id_map
88
-
89
- # sort indices for reproducible results
90
- img_ids = sorted(coco_api.imgs.keys())
91
- # imgs is a list of dicts, each looks something like:
92
- # {'license': 4,
93
- # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
94
- # 'file_name': 'COCO_val2014_000000001268.jpg',
95
- # 'height': 427,
96
- # 'width': 640,
97
- # 'date_captured': '2013-11-17 05:57:24',
98
- # 'id': 1268}
99
- imgs = coco_api.loadImgs(img_ids)
100
- # anns is a list[list[dict]], where each dict is an annotation
101
- # record for an object. The inner list enumerates the objects in an image
102
- # and the outer list enumerates over images. Example of anns[0]:
103
- # [{'segmentation': [[192.81,
104
- # 247.09,
105
- # ...
106
- # 219.03,
107
- # 249.06]],
108
- # 'area': 1035.749,
109
- # 'iscrowd': 0,
110
- # 'image_id': 1268,
111
- # 'bbox': [192.81, 224.8, 74.73, 33.43],
112
- # 'category_id': 16,
113
- # 'id': 42986},
114
- # ...]
115
- anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
116
-
117
- if "minival" not in json_file:
118
- # The popular valminusminival & minival annotations for COCO2014 contain this bug.
119
- # However the ratio of buggy annotations there is tiny and does not affect accuracy.
120
- # Therefore we explicitly white-list them.
121
- ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
122
- assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
123
- json_file
124
- )
125
-
126
- imgs_anns = list(zip(imgs, anns))
127
-
128
- logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file))
129
-
130
- dataset_dicts = []
131
-
132
- ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or [])
133
-
134
- num_instances_without_valid_segmentation = 0
135
-
136
- for (img_dict, anno_dict_list) in imgs_anns:
137
- record = {}
138
- record["file_name"] = os.path.join(image_root, img_dict["file_name"])
139
- record["height"] = img_dict["height"]
140
- record["width"] = img_dict["width"]
141
- image_id = record["image_id"] = img_dict["id"]
142
-
143
- objs = []
144
- for anno in anno_dict_list:
145
- # Check that the image_id in this annotation is the same as
146
- # the image_id we're looking at.
147
- # This fails only when the data parsing logic or the annotation file is buggy.
148
-
149
- # The original COCO valminusminival2014 & minival2014 annotation files
150
- # actually contains bugs that, together with certain ways of using COCO API,
151
- # can trigger this assertion.
152
- assert anno["image_id"] == image_id
153
-
154
- assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.'
155
-
156
- obj = {key: anno[key] for key in ann_keys if key in anno}
157
-
158
- segm = anno.get("segmentation", None)
159
- if segm: # either list[list[float]] or dict(RLE)
160
- if not isinstance(segm, dict):
161
- # filter out invalid polygons (< 3 points)
162
- segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
163
- if len(segm) == 0:
164
- num_instances_without_valid_segmentation += 1
165
- continue # ignore this instance
166
- obj["segmentation"] = segm
167
-
168
- keypts = anno.get("keypoints", None)
169
- if keypts: # list[int]
170
- for idx, v in enumerate(keypts):
171
- if idx % 3 != 2:
172
- # COCO's segmentation coordinates are floating points in [0, H or W],
173
- # but keypoint coordinates are integers in [0, H-1 or W-1]
174
- # Therefore we assume the coordinates are "pixel indices" and
175
- # add 0.5 to convert to floating point coordinates.
176
- keypts[idx] = v + 0.5
177
- obj["keypoints"] = keypts
178
-
179
- obj["bbox_mode"] = BoxMode.XYWH_ABS
180
- if id_map:
181
- obj["category_id"] = id_map[obj["category_id"]]
182
- objs.append(obj)
183
- record["annotations"] = objs
184
- dataset_dicts.append(record)
185
-
186
- if num_instances_without_valid_segmentation > 0:
187
- logger.warning(
188
- "Filtered out {} instances without valid segmentation. "
189
- "There might be issues in your dataset generation process.".format(
190
- num_instances_without_valid_segmentation
191
- )
192
- )
193
- return dataset_dicts
194
-
195
-
196
- def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"):
197
- """
198
- Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are
199
- treated as ground truth annotations and all files under "image_root" with "image_ext" extension
200
- as input images. Ground truth and input images are matched using file paths relative to
201
- "gt_root" and "image_root" respectively without taking into account file extensions.
202
- This works for COCO as well as some other datasets.
203
-
204
- Args:
205
- gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation
206
- annotations are stored as images with integer values in pixels that represent
207
- corresponding semantic labels.
208
- image_root (str): the directory where the input images are.
209
- gt_ext (str): file extension for ground truth annotations.
210
- image_ext (str): file extension for input images.
211
-
212
- Returns:
213
- list[dict]:
214
- a list of dicts in detectron2 standard format without instance-level
215
- annotation.
216
-
217
- Notes:
218
- 1. This function does not read the image and ground truth files.
219
- The results do not have the "image" and "sem_seg" fields.
220
- """
221
-
222
- # We match input images with ground truth based on their relative filepaths (without file
223
- # extensions) starting from 'image_root' and 'gt_root' respectively.
224
- def file2id(folder_path, file_path):
225
- # extract relative path starting from `folder_path`
226
- image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path))
227
- # remove file extension
228
- image_id = os.path.splitext(image_id)[0]
229
- return image_id
230
-
231
- input_files = sorted(
232
- (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)),
233
- key=lambda file_path: file2id(image_root, file_path),
234
- )
235
- gt_files = sorted(
236
- (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)),
237
- key=lambda file_path: file2id(gt_root, file_path),
238
- )
239
-
240
- assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root)
241
-
242
- # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images
243
- if len(input_files) != len(gt_files):
244
- logger.warn(
245
- "Directory {} and {} has {} and {} files, respectively.".format(
246
- image_root, gt_root, len(input_files), len(gt_files)
247
- )
248
- )
249
- input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files]
250
- gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files]
251
- intersect = list(set(input_basenames) & set(gt_basenames))
252
- # sort, otherwise each worker may obtain a list[dict] in different order
253
- intersect = sorted(intersect)
254
- logger.warn("Will use their intersection of {} files.".format(len(intersect)))
255
- input_files = [os.path.join(image_root, f + image_ext) for f in intersect]
256
- gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect]
257
-
258
- logger.info(
259
- "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root)
260
- )
261
-
262
- dataset_dicts = []
263
- for (img_path, gt_path) in zip(input_files, gt_files):
264
- record = {}
265
- record["file_name"] = img_path
266
- record["sem_seg_file_name"] = gt_path
267
- dataset_dicts.append(record)
268
-
269
- return dataset_dicts
270
-
271
-
272
- def convert_to_coco_dict(dataset_name):
273
- """
274
- Convert an instance detection/segmentation or keypoint detection dataset
275
- in detectron2's standard format into COCO json format.
276
-
277
- Generic dataset description can be found here:
278
- https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset
279
-
280
- COCO data format description can be found here:
281
- http://cocodataset.org/#format-data
282
-
283
- Args:
284
- dataset_name (str):
285
- name of the source dataset
286
- Must be registered in DatastCatalog and in detectron2's standard format.
287
- Must have corresponding metadata "thing_classes"
288
- Returns:
289
- coco_dict: serializable dict in COCO json format
290
- """
291
-
292
- dataset_dicts = DatasetCatalog.get(dataset_name)
293
- metadata = MetadataCatalog.get(dataset_name)
294
-
295
- # unmap the category mapping ids for COCO
296
- if hasattr(metadata, "thing_dataset_id_to_contiguous_id"):
297
- reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()}
298
- reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa
299
- else:
300
- reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa
301
-
302
- categories = [
303
- {"id": reverse_id_mapper(id), "name": name}
304
- for id, name in enumerate(metadata.thing_classes)
305
- ]
306
-
307
- logger.info("Converting dataset dicts into COCO format")
308
- coco_images = []
309
- coco_annotations = []
310
-
311
- for image_id, image_dict in enumerate(dataset_dicts):
312
- coco_image = {
313
- "id": image_dict.get("image_id", image_id),
314
- "width": image_dict["width"],
315
- "height": image_dict["height"],
316
- "file_name": image_dict["file_name"],
317
- }
318
- coco_images.append(coco_image)
319
-
320
- anns_per_image = image_dict["annotations"]
321
- for annotation in anns_per_image:
322
- # create a new dict with only COCO fields
323
- coco_annotation = {}
324
-
325
- # COCO requirement: XYWH box format
326
- bbox = annotation["bbox"]
327
- bbox_mode = annotation["bbox_mode"]
328
- bbox = BoxMode.convert(bbox, bbox_mode, BoxMode.XYWH_ABS)
329
-
330
- # COCO requirement: instance area
331
- if "segmentation" in annotation:
332
- # Computing areas for instances by counting the pixels
333
- segmentation = annotation["segmentation"]
334
- # TODO: check segmentation type: RLE, BinaryMask or Polygon
335
- if isinstance(segmentation, list):
336
- polygons = PolygonMasks([segmentation])
337
- area = polygons.area()[0].item()
338
- elif isinstance(segmentation, dict): # RLE
339
- area = mask_util.area(segmentation)
340
- else:
341
- raise TypeError(f"Unknown segmentation type {type(segmentation)}!")
342
- else:
343
- # Computing areas using bounding boxes
344
- bbox_xy = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
345
- area = Boxes([bbox_xy]).area()[0].item()
346
-
347
- if "keypoints" in annotation:
348
- keypoints = annotation["keypoints"] # list[int]
349
- for idx, v in enumerate(keypoints):
350
- if idx % 3 != 2:
351
- # COCO's segmentation coordinates are floating points in [0, H or W],
352
- # but keypoint coordinates are integers in [0, H-1 or W-1]
353
- # For COCO format consistency we substract 0.5
354
- # https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163
355
- keypoints[idx] = v - 0.5
356
- if "num_keypoints" in annotation:
357
- num_keypoints = annotation["num_keypoints"]
358
- else:
359
- num_keypoints = sum(kp > 0 for kp in keypoints[2::3])
360
-
361
- # COCO requirement:
362
- # linking annotations to images
363
- # "id" field must start with 1
364
- coco_annotation["id"] = len(coco_annotations) + 1
365
- coco_annotation["image_id"] = coco_image["id"]
366
- coco_annotation["bbox"] = [round(float(x), 3) for x in bbox]
367
- coco_annotation["area"] = area
368
- coco_annotation["iscrowd"] = annotation.get("iscrowd", 0)
369
- coco_annotation["category_id"] = reverse_id_mapper(annotation["category_id"])
370
-
371
- # Add optional fields
372
- if "keypoints" in annotation:
373
- coco_annotation["keypoints"] = keypoints
374
- coco_annotation["num_keypoints"] = num_keypoints
375
-
376
- if "segmentation" in annotation:
377
- coco_annotation["segmentation"] = annotation["segmentation"]
378
-
379
- coco_annotations.append(coco_annotation)
380
-
381
- logger.info(
382
- "Conversion finished, "
383
- f"num images: {len(coco_images)}, num annotations: {len(coco_annotations)}"
384
- )
385
-
386
- info = {
387
- "date_created": str(datetime.datetime.now()),
388
- "description": "Automatically generated COCO json file for Detectron2.",
389
- }
390
- coco_dict = {
391
- "info": info,
392
- "images": coco_images,
393
- "annotations": coco_annotations,
394
- "categories": categories,
395
- "licenses": None,
396
- }
397
- return coco_dict
398
-
399
-
400
- def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
401
- """
402
- Converts dataset into COCO format and saves it to a json file.
403
- dataset_name must be registered in DatasetCatalog and in detectron2's standard format.
404
-
405
- Args:
406
- dataset_name:
407
- reference from the config file to the catalogs
408
- must be registered in DatasetCatalog and in detectron2's standard format
409
- output_file: path of json file that will be saved to
410
- allow_cached: if json file is already present then skip conversion
411
- """
412
-
413
- # TODO: The dataset or the conversion script *may* change,
414
- # a checksum would be useful for validating the cached data
415
-
416
- PathManager.mkdirs(os.path.dirname(output_file))
417
- with file_lock(output_file):
418
- if PathManager.exists(output_file) and allow_cached:
419
- logger.warning(
420
- f"Using previously cached COCO format annotations at '{output_file}'. "
421
- "You need to clear the cache file if your dataset has been modified."
422
- )
423
- else:
424
- logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)")
425
- coco_dict = convert_to_coco_dict(dataset_name)
426
-
427
- logger.info(f"Caching COCO format annotations at '{output_file}' ...")
428
- with PathManager.open(output_file, "w") as f:
429
- json.dump(coco_dict, f)
430
-
431
-
432
- if __name__ == "__main__":
433
- """
434
- Test the COCO json dataset loader.
435
-
436
- Usage:
437
- python -m detectron2.data.datasets.coco \
438
- path/to/json path/to/image_root dataset_name
439
-
440
- "dataset_name" can be "coco_2014_minival_100", or other
441
- pre-registered ones
442
- """
443
- from detectron2.utils.logger import setup_logger
444
- from detectron2.utils.visualizer import Visualizer
445
- import detectron2.data.datasets # noqa # add pre-defined metadata
446
- import sys
447
-
448
- logger = setup_logger(name=__name__)
449
- assert sys.argv[3] in DatasetCatalog.list()
450
- meta = MetadataCatalog.get(sys.argv[3])
451
-
452
- dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3])
453
- logger.info("Done loading {} samples.".format(len(dicts)))
454
-
455
- dirname = "coco-data-vis"
456
- os.makedirs(dirname, exist_ok=True)
457
- for d in dicts:
458
- img = np.array(Image.open(d["file_name"]))
459
- visualizer = Visualizer(img, metadata=meta)
460
- vis = visualizer.draw_dataset_dict(d)
461
- fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
462
- vis.save(fpath)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/event.h DELETED
@@ -1,26 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file thrust/event.h
18
- * \brief `thrust::event`, an asynchronous handle type.
19
- */
20
-
21
- #pragma once
22
-
23
- #include <thrust/future.h>
24
-
25
- // TODO: Actually separate `<thrust/future.h>` into two headers.
26
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/merge.h DELETED
@@ -1,44 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a fill of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // the purpose of this header is to #include the merge.h header
22
- // of the sequential, host, and device systems. It should be #included in any
23
- // code which uses adl to dispatch merge
24
-
25
- #include <thrust/system/detail/sequential/merge.h>
26
-
27
- // SCons can't see through the #defines below to figure out what this header
28
- // includes, so we fake it out by specifying all possible files we might end up
29
- // including inside an #if 0.
30
- #if 0
31
- #include <thrust/system/cpp/detail/merge.h>
32
- #include <thrust/system/cuda/detail/merge.h>
33
- #include <thrust/system/omp/detail/merge.h>
34
- #include <thrust/system/tbb/detail/merge.h>
35
- #endif
36
-
37
- #define __THRUST_HOST_SYSTEM_MERGE_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/merge.h>
38
- #include __THRUST_HOST_SYSTEM_MERGE_HEADER
39
- #undef __THRUST_HOST_SYSTEM_MERGE_HEADER
40
-
41
- #define __THRUST_DEVICE_SYSTEM_MERGE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/merge.h>
42
- #include __THRUST_DEVICE_SYSTEM_MERGE_HEADER
43
- #undef __THRUST_DEVICE_SYSTEM_MERGE_HEADER
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/count.h DELETED
@@ -1,22 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system has no special count functions
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Text2Human/Text2Human/utils/logger.py DELETED
@@ -1,112 +0,0 @@
1
- import datetime
2
- import logging
3
- import time
4
-
5
-
6
- class MessageLogger():
7
- """Message logger for printing.
8
-
9
- Args:
10
- opt (dict): Config. It contains the following keys:
11
- name (str): Exp name.
12
- logger (dict): Contains 'print_freq' (str) for logger interval.
13
- train (dict): Contains 'niter' (int) for total iters.
14
- use_tb_logger (bool): Use tensorboard logger.
15
- start_iter (int): Start iter. Default: 1.
16
- tb_logger (obj:`tb_logger`): Tensorboard logger. Default: None.
17
- """
18
-
19
- def __init__(self, opt, start_iter=1, tb_logger=None):
20
- self.exp_name = opt['name']
21
- self.interval = opt['print_freq']
22
- self.start_iter = start_iter
23
- self.max_iters = opt['max_iters']
24
- self.use_tb_logger = opt['use_tb_logger']
25
- self.tb_logger = tb_logger
26
- self.start_time = time.time()
27
- self.logger = get_root_logger()
28
-
29
- def __call__(self, log_vars):
30
- """Format logging message.
31
-
32
- Args:
33
- log_vars (dict): It contains the following keys:
34
- epoch (int): Epoch number.
35
- iter (int): Current iter.
36
- lrs (list): List for learning rates.
37
-
38
- time (float): Iter time.
39
- data_time (float): Data time for each iter.
40
- """
41
- # epoch, iter, learning rates
42
- epoch = log_vars.pop('epoch')
43
- current_iter = log_vars.pop('iter')
44
- lrs = log_vars.pop('lrs')
45
-
46
- message = (f'[{self.exp_name[:5]}..][epoch:{epoch:3d}, '
47
- f'iter:{current_iter:8,d}, lr:(')
48
- for v in lrs:
49
- message += f'{v:.3e},'
50
- message += ')] '
51
-
52
- # time and estimated time
53
- if 'time' in log_vars.keys():
54
- iter_time = log_vars.pop('time')
55
- data_time = log_vars.pop('data_time')
56
-
57
- total_time = time.time() - self.start_time
58
- time_sec_avg = total_time / (current_iter - self.start_iter + 1)
59
- eta_sec = time_sec_avg * (self.max_iters - current_iter - 1)
60
- eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
61
- message += f'[eta: {eta_str}, '
62
- message += f'time: {iter_time:.3f}, data_time: {data_time:.3f}] '
63
-
64
- # other items, especially losses
65
- for k, v in log_vars.items():
66
- message += f'{k}: {v:.4e} '
67
- # tensorboard logger
68
- if self.use_tb_logger and 'debug' not in self.exp_name:
69
- self.tb_logger.add_scalar(k, v, current_iter)
70
-
71
- self.logger.info(message)
72
-
73
-
74
- def init_tb_logger(log_dir):
75
- from torch.utils.tensorboard import SummaryWriter
76
- tb_logger = SummaryWriter(log_dir=log_dir)
77
- return tb_logger
78
-
79
-
80
- def get_root_logger(logger_name='base', log_level=logging.INFO, log_file=None):
81
- """Get the root logger.
82
-
83
- The logger will be initialized if it has not been initialized. By default a
84
- StreamHandler will be added. If `log_file` is specified, a FileHandler will
85
- also be added.
86
-
87
- Args:
88
- logger_name (str): root logger name. Default: base.
89
- log_file (str | None): The log filename. If specified, a FileHandler
90
- will be added to the root logger.
91
- log_level (int): The root logger level. Note that only the process of
92
- rank 0 is affected, while other processes will set the level to
93
- "Error" and be silent most of the time.
94
-
95
- Returns:
96
- logging.Logger: The root logger.
97
- """
98
- logger = logging.getLogger(logger_name)
99
- # if the logger has been initialized, just return it
100
- if logger.hasHandlers():
101
- return logger
102
-
103
- format_str = '%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s'
104
- logging.basicConfig(format=format_str, level=log_level)
105
-
106
- if log_file is not None:
107
- file_handler = logging.FileHandler(log_file, 'w')
108
- file_handler.setFormatter(logging.Formatter(format_str))
109
- file_handler.setLevel(log_level)
110
- logger.addHandler(file_handler)
111
-
112
- return logger
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/cwalt/kmedoid.py DELETED
@@ -1,55 +0,0 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- """
4
- Created on Fri May 20 15:18:56 2022
5
-
6
- @author: dinesh
7
- """
8
-
9
- import numpy as np
10
- import math
11
-
12
- def kMedoids(D, k, tmax=100):
13
- # determine dimensions of distance matrix D
14
- m, n = D.shape
15
-
16
- np.fill_diagonal(D, math.inf)
17
-
18
- if k > n:
19
- raise Exception('too many medoids')
20
- # randomly initialize an array of k medoid indices
21
- M = np.arange(n)
22
- np.random.shuffle(M)
23
- M = np.sort(M[:k])
24
-
25
- # create a copy of the array of medoid indices
26
- Mnew = np.copy(M)
27
-
28
- # initialize a dictionary to represent clusters
29
- C = {}
30
- for t in range(tmax):
31
- # determine clusters, i. e. arrays of data indices
32
- J = np.argmin(D[:,M], axis=1)
33
-
34
- for kappa in range(k):
35
- C[kappa] = np.where(J==kappa)[0]
36
- # update cluster medoids
37
- for kappa in range(k):
38
- J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1)
39
- j = np.argmin(J)
40
- Mnew[kappa] = C[kappa][j]
41
- np.sort(Mnew)
42
- # check for convergence
43
- if np.array_equal(M, Mnew):
44
- break
45
- M = np.copy(Mnew)
46
- else:
47
- # final update of cluster memberships
48
- J = np.argmin(D[:,M], axis=1)
49
- for kappa in range(k):
50
- C[kappa] = np.where(J==kappa)[0]
51
-
52
- np.fill_diagonal(D, 0)
53
-
54
- # return results
55
- return M, C
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/unicl-zero-shot-img-recog/model/text_encoder/hf_model.py DELETED
@@ -1,27 +0,0 @@
1
- import logging
2
-
3
- from transformers import AutoConfig
4
- from transformers import AutoModel
5
-
6
- from .registry import register_lang_encoder
7
-
8
- logger = logging.getLogger(__name__)
9
-
10
-
11
- @register_lang_encoder
12
- def lang_encoder(config_encoder, tokenizer, verbose, **kwargs):
13
-
14
- hf_model = None
15
- if config_encoder['LOAD_PRETRAINED']:
16
- hf_model = AutoModel.from_pretrained(config_encoder['HF_MODEL'])
17
- else:
18
- hf_config = AutoConfig.from_pretrained(config_encoder['HF_MODEL'])
19
-
20
- if 'CONFIG_OVERRIDE' in config_encoder:
21
- logger.warning(f'Override config: {config_encoder["CONFIG_OVERRIDE"]}')
22
- hf_config.update(config_encoder['CONFIG_OVERRIDE'])
23
-
24
- logger.info(f'HF model config: {hf_config}')
25
- hf_model = AutoModel.from_config(hf_config)
26
-
27
- return hf_model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CatNika/Asian_Proxy/greeting.md DELETED
@@ -1,13 +0,0 @@
1
- # I don't appear on any sites, don't impersonate me :< 😾 I'm not active on the 4chan site.
2
-
3
- ---
4
-
5
-
6
- # If you can verify that you're an Asian woman, I'll let you into my Discord. But please don't send me any body pictures. I'm not asking for that kind of thing. But don't try to deceive me with lies. I'll continue the conversation to confirm if you're telling the truth. Due to recent fucking crappy banwaves, my keys have been dying quite a bit, so I only plan to share it with people I want to.
7
-
8
- # Contact with me: [email protected]
9
-
10
- <div style="display: flex; align-items: center; justify-content: center; background-color: #f5e6e6; padding: 20px; border-radius: 10px; box-shadow: 0px 0px 10px rgba(0, 0, 0, 0.5);">
11
- <iframe width="800" height="500" src="https://www.youtube.com/embed/SX_ViT4Ra7k?autoplay=1&rel=0&loop=1&controls=0" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen style="border-radius: 5px;"></iframe>
12
- <p style="color: #333; margin-left: 20px; font-size: 16px; font-weight: bold;">While you're here, immerse yourself in some of my favorite song recommendations.</p>
13
- </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cpp4App/Cpp4App/CDM/result_processing/evaluation.py DELETED
@@ -1,208 +0,0 @@
1
- import json
2
- import numpy as np
3
- import cv2
4
- from glob import glob
5
- from os.path import join as pjoin
6
- from tqdm import tqdm
7
-
8
-
9
- def resize_label(bboxes, d_height, gt_height, bias=0):
10
- bboxes_new = []
11
- scale = gt_height / d_height
12
- for bbox in bboxes:
13
- bbox = [int(b * scale + bias) for b in bbox]
14
- bboxes_new.append(bbox)
15
- return bboxes_new
16
-
17
-
18
- def draw_bounding_box(org, corners, color=(0, 255, 0), line=2, show=False):
19
- board = org.copy()
20
- for i in range(len(corners)):
21
- board = cv2.rectangle(board, (corners[i][0], corners[i][1]), (corners[i][2], corners[i][3]), color, line)
22
- if show:
23
- cv2.imshow('a', cv2.resize(board, (500, 1000)))
24
- cv2.waitKey(0)
25
- return board
26
-
27
-
28
- def load_detect_result_json(reslut_file_root, shrink=4):
29
- def is_bottom_or_top(corner):
30
- column_min, row_min, column_max, row_max = corner
31
- if row_max < 36 or row_min > 725:
32
- return True
33
- return False
34
-
35
- result_files = glob(pjoin(reslut_file_root, '*.json'))
36
- compos_reform = {}
37
- print('Loading %d detection results' % len(result_files))
38
- for reslut_file in tqdm(result_files):
39
- img_name = reslut_file.split('\\')[-1].split('.')[0]
40
- compos = json.load(open(reslut_file, 'r'))['compos']
41
- for compo in compos:
42
- if compo['column_max'] - compo['column_min'] < 10 or compo['row_max'] - compo['row_min'] < 10:
43
- continue
44
- if is_bottom_or_top((compo['column_min'], compo['row_min'], compo['column_max'], compo['row_max'])):
45
- continue
46
- if img_name not in compos_reform:
47
- compos_reform[img_name] = {'bboxes': [[compo['column_min'] + shrink, compo['row_min'] + shrink, compo['column_max'] - shrink, compo['row_max'] - shrink]],
48
- 'categories': [compo['category']]}
49
- else:
50
- compos_reform[img_name]['bboxes'].append([compo['column_min'] + shrink, compo['row_min'] + shrink, compo['column_max'] - shrink, compo['row_max'] - shrink])
51
- compos_reform[img_name]['categories'].append(compo['category'])
52
- return compos_reform
53
-
54
-
55
- def load_ground_truth_json(gt_file):
56
- def get_img_by_id(img_id):
57
- for image in images:
58
- if image['id'] == img_id:
59
- return image['file_name'].split('/')[-1][:-4], (image['height'], image['width'])
60
-
61
- def cvt_bbox(bbox):
62
- '''
63
- :param bbox: [x,y,width,height]
64
- :return: [col_min, row_min, col_max, row_max]
65
- '''
66
- bbox = [int(b) for b in bbox]
67
- return [bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]
68
-
69
- data = json.load(open(gt_file, 'r'))
70
- images = data['images']
71
- annots = data['annotations']
72
- compos = {}
73
- print('Loading %d ground truth' % len(annots))
74
- for annot in tqdm(annots):
75
- img_name, size = get_img_by_id(annot['image_id'])
76
- if img_name not in compos:
77
- compos[img_name] = {'bboxes': [cvt_bbox(annot['bbox'])], 'categories': [annot['category_id']], 'size': size}
78
- else:
79
- compos[img_name]['bboxes'].append(cvt_bbox(annot['bbox']))
80
- compos[img_name]['categories'].append(annot['category_id'])
81
- return compos
82
-
83
-
84
- def eval(detection, ground_truth, img_root, show=True, no_text=False, only_text=False):
85
- def compo_filter(compos, flag):
86
- if not no_text and not only_text:
87
- return compos
88
- compos_new = {'bboxes': [], 'categories': []}
89
- for k, category in enumerate(compos['categories']):
90
- if only_text:
91
- if flag == 'det' and category != 'TextView':
92
- continue
93
- if flag == 'gt' and int(category) != 14:
94
- continue
95
- elif no_text:
96
- if flag == 'det' and category == 'TextView':
97
- continue
98
- if flag == 'gt' and int(category) == 14:
99
- continue
100
-
101
- compos_new['bboxes'].append(compos['bboxes'][k])
102
- compos_new['categories'].append(category)
103
- return compos_new
104
-
105
- def match(org, d_bbox, gt_bboxes, matched):
106
- '''
107
- :param matched: mark if the ground truth component is matched
108
- :param d_bbox: [col_min, row_min, col_max, row_max]
109
- :param gt_bboxes: list of ground truth [[col_min, row_min, col_max, row_max]]
110
- :return: Boolean: if IOU large enough or detected box is contained by ground truth
111
- '''
112
- area_d = (d_bbox[2] - d_bbox[0]) * (d_bbox[3] - d_bbox[1])
113
- for i, gt_bbox in enumerate(gt_bboxes):
114
- if matched[i] == 0:
115
- continue
116
- area_gt = (gt_bbox[2] - gt_bbox[0]) * (gt_bbox[3] - gt_bbox[1])
117
- col_min = max(d_bbox[0], gt_bbox[0])
118
- row_min = max(d_bbox[1], gt_bbox[1])
119
- col_max = min(d_bbox[2], gt_bbox[2])
120
- row_max = min(d_bbox[3], gt_bbox[3])
121
- # if not intersected, area intersection should be 0
122
- w = max(0, col_max - col_min)
123
- h = max(0, row_max - row_min)
124
- area_inter = w * h
125
- if area_inter == 0:
126
- continue
127
- iod = area_inter / area_d
128
- iou = area_inter / (area_d + area_gt - area_inter)
129
- # if show:
130
- # cv2.putText(org, (str(round(iou, 2)) + ',' + str(round(iod, 2))), (d_bbox[0], d_bbox[1]),
131
- # cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
132
-
133
- if iou > 0.9 or iod == 1:
134
- matched[i] = 0
135
- return True
136
- return False
137
-
138
- amount = len(detection)
139
- TP, FP, FN = 0, 0, 0
140
- pres, recalls, f1s = [], [], []
141
- for i, image_id in enumerate(detection):
142
- TP_this, FP_this, FN_this = 0, 0, 0
143
- img = cv2.imread(pjoin(img_root, image_id + '.jpg'))
144
- d_compos = detection[image_id]
145
- if image_id not in ground_truth:
146
- continue
147
- gt_compos = ground_truth[image_id]
148
-
149
- org_height = gt_compos['size'][0]
150
-
151
- d_compos = compo_filter(d_compos, 'det')
152
- gt_compos = compo_filter(gt_compos, 'gt')
153
-
154
- d_compos['bboxes'] = resize_label(d_compos['bboxes'], 800, org_height)
155
- matched = np.ones(len(gt_compos['bboxes']), dtype=int)
156
- for d_bbox in d_compos['bboxes']:
157
- if match(img, d_bbox, gt_compos['bboxes'], matched):
158
- TP += 1
159
- TP_this += 1
160
- else:
161
- FP += 1
162
- FP_this += 1
163
- FN += sum(matched)
164
- FN_this = sum(matched)
165
-
166
- try:
167
- pre_this = TP_this / (TP_this + FP_this)
168
- recall_this = TP_this / (TP_this + FN_this)
169
- f1_this = 2 * (pre_this * recall_this) / (pre_this + recall_this)
170
- except:
171
- print('empty')
172
- continue
173
-
174
- pres.append(pre_this)
175
- recalls.append(recall_this)
176
- f1s.append(f1_this)
177
- if show:
178
- print(image_id + '.jpg')
179
- print('[%d/%d] TP:%d, FP:%d, FN:%d, Precesion:%.3f, Recall:%.3f' % (
180
- i, amount, TP_this, FP_this, FN_this, pre_this, recall_this))
181
- # cv2.imshow('org', cv2.resize(img, (500, 1000)))
182
- broad = draw_bounding_box(img, d_compos['bboxes'], color=(255, 0, 0), line=3)
183
- draw_bounding_box(broad, gt_compos['bboxes'], color=(0, 0, 255), show=True, line=2)
184
-
185
- if i % 200 == 0:
186
- precision = TP / (TP + FP)
187
- recall = TP / (TP + FN)
188
- f1 = 2 * (precision * recall) / (precision + recall)
189
- print(
190
- '[%d/%d] TP:%d, FP:%d, FN:%d, Precesion:%.3f, Recall:%.3f, F1:%.3f' % (i, amount, TP, FP, FN, precision, recall, f1))
191
-
192
- precision = TP / (TP + FP)
193
- recall = TP / (TP + FN)
194
- print('[%d/%d] TP:%d, FP:%d, FN:%d, Precesion:%.3f, Recall:%.3f, F1:%.3f' % (i, amount, TP, FP, FN, precision, recall, f1))
195
- # print("Average precision:%.4f; Average recall:%.3f" % (sum(pres)/len(pres), sum(recalls)/len(recalls)))
196
-
197
- return pres, recalls, f1s
198
-
199
-
200
- no_text = True
201
- only_text = False
202
-
203
- # detect = load_detect_result_json('E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_cls\\ip')
204
- detect = load_detect_result_json('E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_cls\\merge')
205
- # detect = load_detect_result_json('E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_v3\\merge')
206
- # detect = load_detect_result_json('E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_v3\\ocr')
207
- gt = load_ground_truth_json('E:\\Mulong\\Datasets\\rico\\instances_test.json')
208
- eval(detect, gt, 'E:\\Mulong\\Datasets\\rico\\combined', show=False, no_text=no_text, only_text=only_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DHEIVER/Alzheimer/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Alzheimer
3
- emoji: 🐠
4
- colorFrom: gray
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.29.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/voltLib/voltToFea.py DELETED
@@ -1,726 +0,0 @@
1
- """\
2
- MS VOLT ``.vtp`` to AFDKO ``.fea`` OpenType Layout converter.
3
-
4
- Usage
5
- -----
6
-
7
- To convert a VTP project file:
8
-
9
-
10
- $ fonttools voltLib.voltToFea input.vtp output.fea
11
-
12
- It is also possible convert font files with `TSIV` table (as saved from Volt),
13
- in this case the glyph names used in the Volt project will be mapped to the
14
- actual glyph names in the font files when written to the feature file:
15
-
16
- $ fonttools voltLib.voltToFea input.ttf output.fea
17
-
18
- The ``--quiet`` option can be used to suppress warnings.
19
-
20
- The ``--traceback`` can be used to get Python traceback in case of exceptions,
21
- instead of suppressing the traceback.
22
-
23
-
24
- Limitations
25
- -----------
26
-
27
- * Not all VOLT features are supported, the script will error if it it
28
- encounters something it does not understand. Please report an issue if this
29
- happens.
30
- * AFDKO feature file syntax for mark positioning is awkward and does not allow
31
- setting the mark coverage. It also defines mark anchors globally, as a result
32
- some mark positioning lookups might cover many marks than what was in the VOLT
33
- file. This should not be an issue in practice, but if it is then the only way
34
- is to modify the VOLT file or the generated feature file manually to use unique
35
- mark anchors for each lookup.
36
- * VOLT allows subtable breaks in any lookup type, but AFDKO feature file
37
- implementations vary in their support; currently AFDKO’s makeOTF supports
38
- subtable breaks in pair positioning lookups only, while FontTools’ feaLib
39
- support it for most substitution lookups and only some positioning lookups.
40
- """
41
-
42
- import logging
43
- import re
44
- from io import StringIO
45
-
46
- from fontTools.feaLib import ast
47
- from fontTools.ttLib import TTFont, TTLibError
48
- from fontTools.voltLib import ast as VAst
49
- from fontTools.voltLib.parser import Parser as VoltParser
50
-
51
- log = logging.getLogger("fontTools.voltLib.voltToFea")
52
-
53
- TABLES = ["GDEF", "GSUB", "GPOS"]
54
-
55
-
56
- class MarkClassDefinition(ast.MarkClassDefinition):
57
- def asFea(self, indent=""):
58
- res = ""
59
- if not getattr(self, "used", False):
60
- res += "#"
61
- res += ast.MarkClassDefinition.asFea(self, indent)
62
- return res
63
-
64
-
65
- # For sorting voltLib.ast.GlyphDefinition, see its use below.
66
- class Group:
67
- def __init__(self, group):
68
- self.name = group.name.lower()
69
- self.groups = [
70
- x.group.lower() for x in group.enum.enum if isinstance(x, VAst.GroupName)
71
- ]
72
-
73
- def __lt__(self, other):
74
- if self.name in other.groups:
75
- return True
76
- if other.name in self.groups:
77
- return False
78
- if self.groups and not other.groups:
79
- return False
80
- if not self.groups and other.groups:
81
- return True
82
-
83
-
84
- class VoltToFea:
85
- _NOT_LOOKUP_NAME_RE = re.compile(r"[^A-Za-z_0-9.]")
86
- _NOT_CLASS_NAME_RE = re.compile(r"[^A-Za-z_0-9.\-]")
87
-
88
- def __init__(self, file_or_path, font=None):
89
- self._file_or_path = file_or_path
90
- self._font = font
91
-
92
- self._glyph_map = {}
93
- self._glyph_order = None
94
-
95
- self._gdef = {}
96
- self._glyphclasses = {}
97
- self._features = {}
98
- self._lookups = {}
99
-
100
- self._marks = set()
101
- self._ligatures = {}
102
-
103
- self._markclasses = {}
104
- self._anchors = {}
105
-
106
- self._settings = {}
107
-
108
- self._lookup_names = {}
109
- self._class_names = {}
110
-
111
- def _lookupName(self, name):
112
- if name not in self._lookup_names:
113
- res = self._NOT_LOOKUP_NAME_RE.sub("_", name)
114
- while res in self._lookup_names.values():
115
- res += "_"
116
- self._lookup_names[name] = res
117
- return self._lookup_names[name]
118
-
119
- def _className(self, name):
120
- if name not in self._class_names:
121
- res = self._NOT_CLASS_NAME_RE.sub("_", name)
122
- while res in self._class_names.values():
123
- res += "_"
124
- self._class_names[name] = res
125
- return self._class_names[name]
126
-
127
- def _collectStatements(self, doc, tables):
128
- # Collect and sort group definitions first, to make sure a group
129
- # definition that references other groups comes after them since VOLT
130
- # does not enforce such ordering, and feature file require it.
131
- groups = [s for s in doc.statements if isinstance(s, VAst.GroupDefinition)]
132
- for statement in sorted(groups, key=lambda x: Group(x)):
133
- self._groupDefinition(statement)
134
-
135
- for statement in doc.statements:
136
- if isinstance(statement, VAst.GlyphDefinition):
137
- self._glyphDefinition(statement)
138
- elif isinstance(statement, VAst.AnchorDefinition):
139
- if "GPOS" in tables:
140
- self._anchorDefinition(statement)
141
- elif isinstance(statement, VAst.SettingDefinition):
142
- self._settingDefinition(statement)
143
- elif isinstance(statement, VAst.GroupDefinition):
144
- pass # Handled above
145
- elif isinstance(statement, VAst.ScriptDefinition):
146
- self._scriptDefinition(statement)
147
- elif not isinstance(statement, VAst.LookupDefinition):
148
- raise NotImplementedError(statement)
149
-
150
- # Lookup definitions need to be handled last as they reference glyph
151
- # and mark classes that might be defined after them.
152
- for statement in doc.statements:
153
- if isinstance(statement, VAst.LookupDefinition):
154
- if statement.pos and "GPOS" not in tables:
155
- continue
156
- if statement.sub and "GSUB" not in tables:
157
- continue
158
- self._lookupDefinition(statement)
159
-
160
- def _buildFeatureFile(self, tables):
161
- doc = ast.FeatureFile()
162
- statements = doc.statements
163
-
164
- if self._glyphclasses:
165
- statements.append(ast.Comment("# Glyph classes"))
166
- statements.extend(self._glyphclasses.values())
167
-
168
- if self._markclasses:
169
- statements.append(ast.Comment("\n# Mark classes"))
170
- statements.extend(c[1] for c in sorted(self._markclasses.items()))
171
-
172
- if self._lookups:
173
- statements.append(ast.Comment("\n# Lookups"))
174
- for lookup in self._lookups.values():
175
- statements.extend(getattr(lookup, "targets", []))
176
- statements.append(lookup)
177
-
178
- # Prune features
179
- features = self._features.copy()
180
- for ftag in features:
181
- scripts = features[ftag]
182
- for stag in scripts:
183
- langs = scripts[stag]
184
- for ltag in langs:
185
- langs[ltag] = [l for l in langs[ltag] if l.lower() in self._lookups]
186
- scripts[stag] = {t: l for t, l in langs.items() if l}
187
- features[ftag] = {t: s for t, s in scripts.items() if s}
188
- features = {t: f for t, f in features.items() if f}
189
-
190
- if features:
191
- statements.append(ast.Comment("# Features"))
192
- for ftag, scripts in features.items():
193
- feature = ast.FeatureBlock(ftag)
194
- stags = sorted(scripts, key=lambda k: 0 if k == "DFLT" else 1)
195
- for stag in stags:
196
- feature.statements.append(ast.ScriptStatement(stag))
197
- ltags = sorted(scripts[stag], key=lambda k: 0 if k == "dflt" else 1)
198
- for ltag in ltags:
199
- include_default = True if ltag == "dflt" else False
200
- feature.statements.append(
201
- ast.LanguageStatement(ltag, include_default=include_default)
202
- )
203
- for name in scripts[stag][ltag]:
204
- lookup = self._lookups[name.lower()]
205
- lookupref = ast.LookupReferenceStatement(lookup)
206
- feature.statements.append(lookupref)
207
- statements.append(feature)
208
-
209
- if self._gdef and "GDEF" in tables:
210
- classes = []
211
- for name in ("BASE", "MARK", "LIGATURE", "COMPONENT"):
212
- if name in self._gdef:
213
- classname = "GDEF_" + name.lower()
214
- glyphclass = ast.GlyphClassDefinition(classname, self._gdef[name])
215
- statements.append(glyphclass)
216
- classes.append(ast.GlyphClassName(glyphclass))
217
- else:
218
- classes.append(None)
219
-
220
- gdef = ast.TableBlock("GDEF")
221
- gdef.statements.append(ast.GlyphClassDefStatement(*classes))
222
- statements.append(gdef)
223
-
224
- return doc
225
-
226
- def convert(self, tables=None):
227
- doc = VoltParser(self._file_or_path).parse()
228
-
229
- if tables is None:
230
- tables = TABLES
231
- if self._font is not None:
232
- self._glyph_order = self._font.getGlyphOrder()
233
-
234
- self._collectStatements(doc, tables)
235
- fea = self._buildFeatureFile(tables)
236
- return fea.asFea()
237
-
238
- def _glyphName(self, glyph):
239
- try:
240
- name = glyph.glyph
241
- except AttributeError:
242
- name = glyph
243
- return ast.GlyphName(self._glyph_map.get(name, name))
244
-
245
- def _groupName(self, group):
246
- try:
247
- name = group.group
248
- except AttributeError:
249
- name = group
250
- return ast.GlyphClassName(self._glyphclasses[name.lower()])
251
-
252
- def _coverage(self, coverage):
253
- items = []
254
- for item in coverage:
255
- if isinstance(item, VAst.GlyphName):
256
- items.append(self._glyphName(item))
257
- elif isinstance(item, VAst.GroupName):
258
- items.append(self._groupName(item))
259
- elif isinstance(item, VAst.Enum):
260
- items.append(self._enum(item))
261
- elif isinstance(item, VAst.Range):
262
- items.append((item.start, item.end))
263
- else:
264
- raise NotImplementedError(item)
265
- return items
266
-
267
- def _enum(self, enum):
268
- return ast.GlyphClass(self._coverage(enum.enum))
269
-
270
- def _context(self, context):
271
- out = []
272
- for item in context:
273
- coverage = self._coverage(item)
274
- if not isinstance(coverage, (tuple, list)):
275
- coverage = [coverage]
276
- out.extend(coverage)
277
- return out
278
-
279
- def _groupDefinition(self, group):
280
- name = self._className(group.name)
281
- glyphs = self._enum(group.enum)
282
- glyphclass = ast.GlyphClassDefinition(name, glyphs)
283
-
284
- self._glyphclasses[group.name.lower()] = glyphclass
285
-
286
- def _glyphDefinition(self, glyph):
287
- try:
288
- self._glyph_map[glyph.name] = self._glyph_order[glyph.id]
289
- except TypeError:
290
- pass
291
-
292
- if glyph.type in ("BASE", "MARK", "LIGATURE", "COMPONENT"):
293
- if glyph.type not in self._gdef:
294
- self._gdef[glyph.type] = ast.GlyphClass()
295
- self._gdef[glyph.type].glyphs.append(self._glyphName(glyph.name))
296
-
297
- if glyph.type == "MARK":
298
- self._marks.add(glyph.name)
299
- elif glyph.type == "LIGATURE":
300
- self._ligatures[glyph.name] = glyph.components
301
-
302
- def _scriptDefinition(self, script):
303
- stag = script.tag
304
- for lang in script.langs:
305
- ltag = lang.tag
306
- for feature in lang.features:
307
- lookups = {l.split("\\")[0]: True for l in feature.lookups}
308
- ftag = feature.tag
309
- if ftag not in self._features:
310
- self._features[ftag] = {}
311
- if stag not in self._features[ftag]:
312
- self._features[ftag][stag] = {}
313
- assert ltag not in self._features[ftag][stag]
314
- self._features[ftag][stag][ltag] = lookups.keys()
315
-
316
- def _settingDefinition(self, setting):
317
- if setting.name.startswith("COMPILER_"):
318
- self._settings[setting.name] = setting.value
319
- else:
320
- log.warning(f"Unsupported setting ignored: {setting.name}")
321
-
322
- def _adjustment(self, adjustment):
323
- adv, dx, dy, adv_adjust_by, dx_adjust_by, dy_adjust_by = adjustment
324
-
325
- adv_device = adv_adjust_by and adv_adjust_by.items() or None
326
- dx_device = dx_adjust_by and dx_adjust_by.items() or None
327
- dy_device = dy_adjust_by and dy_adjust_by.items() or None
328
-
329
- return ast.ValueRecord(
330
- xPlacement=dx,
331
- yPlacement=dy,
332
- xAdvance=adv,
333
- xPlaDevice=dx_device,
334
- yPlaDevice=dy_device,
335
- xAdvDevice=adv_device,
336
- )
337
-
338
- def _anchor(self, adjustment):
339
- adv, dx, dy, adv_adjust_by, dx_adjust_by, dy_adjust_by = adjustment
340
-
341
- assert not adv_adjust_by
342
- dx_device = dx_adjust_by and dx_adjust_by.items() or None
343
- dy_device = dy_adjust_by and dy_adjust_by.items() or None
344
-
345
- return ast.Anchor(
346
- dx or 0,
347
- dy or 0,
348
- xDeviceTable=dx_device or None,
349
- yDeviceTable=dy_device or None,
350
- )
351
-
352
- def _anchorDefinition(self, anchordef):
353
- anchorname = anchordef.name
354
- glyphname = anchordef.glyph_name
355
- anchor = self._anchor(anchordef.pos)
356
-
357
- if anchorname.startswith("MARK_"):
358
- name = "_".join(anchorname.split("_")[1:])
359
- markclass = ast.MarkClass(self._className(name))
360
- glyph = self._glyphName(glyphname)
361
- markdef = MarkClassDefinition(markclass, anchor, glyph)
362
- self._markclasses[(glyphname, anchorname)] = markdef
363
- else:
364
- if glyphname not in self._anchors:
365
- self._anchors[glyphname] = {}
366
- if anchorname not in self._anchors[glyphname]:
367
- self._anchors[glyphname][anchorname] = {}
368
- self._anchors[glyphname][anchorname][anchordef.component] = anchor
369
-
370
- def _gposLookup(self, lookup, fealookup):
371
- statements = fealookup.statements
372
-
373
- pos = lookup.pos
374
- if isinstance(pos, VAst.PositionAdjustPairDefinition):
375
- for (idx1, idx2), (pos1, pos2) in pos.adjust_pair.items():
376
- coverage_1 = pos.coverages_1[idx1 - 1]
377
- coverage_2 = pos.coverages_2[idx2 - 1]
378
-
379
- # If not both are groups, use “enum pos” otherwise makeotf will
380
- # fail.
381
- enumerated = False
382
- for item in coverage_1 + coverage_2:
383
- if not isinstance(item, VAst.GroupName):
384
- enumerated = True
385
-
386
- glyphs1 = self._coverage(coverage_1)
387
- glyphs2 = self._coverage(coverage_2)
388
- record1 = self._adjustment(pos1)
389
- record2 = self._adjustment(pos2)
390
- assert len(glyphs1) == 1
391
- assert len(glyphs2) == 1
392
- statements.append(
393
- ast.PairPosStatement(
394
- glyphs1[0], record1, glyphs2[0], record2, enumerated=enumerated
395
- )
396
- )
397
- elif isinstance(pos, VAst.PositionAdjustSingleDefinition):
398
- for a, b in pos.adjust_single:
399
- glyphs = self._coverage(a)
400
- record = self._adjustment(b)
401
- assert len(glyphs) == 1
402
- statements.append(
403
- ast.SinglePosStatement([(glyphs[0], record)], [], [], False)
404
- )
405
- elif isinstance(pos, VAst.PositionAttachDefinition):
406
- anchors = {}
407
- for marks, classname in pos.coverage_to:
408
- for mark in marks:
409
- # Set actually used mark classes. Basically a hack to get
410
- # around the feature file syntax limitation of making mark
411
- # classes global and not allowing mark positioning to
412
- # specify mark coverage.
413
- for name in mark.glyphSet():
414
- key = (name, "MARK_" + classname)
415
- self._markclasses[key].used = True
416
- markclass = ast.MarkClass(self._className(classname))
417
- for base in pos.coverage:
418
- for name in base.glyphSet():
419
- if name not in anchors:
420
- anchors[name] = []
421
- if classname not in anchors[name]:
422
- anchors[name].append(classname)
423
-
424
- for name in anchors:
425
- components = 1
426
- if name in self._ligatures:
427
- components = self._ligatures[name]
428
-
429
- marks = []
430
- for mark in anchors[name]:
431
- markclass = ast.MarkClass(self._className(mark))
432
- for component in range(1, components + 1):
433
- if len(marks) < component:
434
- marks.append([])
435
- anchor = None
436
- if component in self._anchors[name][mark]:
437
- anchor = self._anchors[name][mark][component]
438
- marks[component - 1].append((anchor, markclass))
439
-
440
- base = self._glyphName(name)
441
- if name in self._marks:
442
- mark = ast.MarkMarkPosStatement(base, marks[0])
443
- elif name in self._ligatures:
444
- mark = ast.MarkLigPosStatement(base, marks)
445
- else:
446
- mark = ast.MarkBasePosStatement(base, marks[0])
447
- statements.append(mark)
448
- elif isinstance(pos, VAst.PositionAttachCursiveDefinition):
449
- # Collect enter and exit glyphs
450
- enter_coverage = []
451
- for coverage in pos.coverages_enter:
452
- for base in coverage:
453
- for name in base.glyphSet():
454
- enter_coverage.append(name)
455
- exit_coverage = []
456
- for coverage in pos.coverages_exit:
457
- for base in coverage:
458
- for name in base.glyphSet():
459
- exit_coverage.append(name)
460
-
461
- # Write enter anchors, also check if the glyph has exit anchor and
462
- # write it, too.
463
- for name in enter_coverage:
464
- glyph = self._glyphName(name)
465
- entry = self._anchors[name]["entry"][1]
466
- exit = None
467
- if name in exit_coverage:
468
- exit = self._anchors[name]["exit"][1]
469
- exit_coverage.pop(exit_coverage.index(name))
470
- statements.append(ast.CursivePosStatement(glyph, entry, exit))
471
-
472
- # Write any remaining exit anchors.
473
- for name in exit_coverage:
474
- glyph = self._glyphName(name)
475
- exit = self._anchors[name]["exit"][1]
476
- statements.append(ast.CursivePosStatement(glyph, None, exit))
477
- else:
478
- raise NotImplementedError(pos)
479
-
480
- def _gposContextLookup(
481
- self, lookup, prefix, suffix, ignore, fealookup, targetlookup
482
- ):
483
- statements = fealookup.statements
484
-
485
- assert not lookup.reversal
486
-
487
- pos = lookup.pos
488
- if isinstance(pos, VAst.PositionAdjustPairDefinition):
489
- for (idx1, idx2), (pos1, pos2) in pos.adjust_pair.items():
490
- glyphs1 = self._coverage(pos.coverages_1[idx1 - 1])
491
- glyphs2 = self._coverage(pos.coverages_2[idx2 - 1])
492
- assert len(glyphs1) == 1
493
- assert len(glyphs2) == 1
494
- glyphs = (glyphs1[0], glyphs2[0])
495
-
496
- if ignore:
497
- statement = ast.IgnorePosStatement([(prefix, glyphs, suffix)])
498
- else:
499
- lookups = (targetlookup, targetlookup)
500
- statement = ast.ChainContextPosStatement(
501
- prefix, glyphs, suffix, lookups
502
- )
503
- statements.append(statement)
504
- elif isinstance(pos, VAst.PositionAdjustSingleDefinition):
505
- glyphs = [ast.GlyphClass()]
506
- for a, b in pos.adjust_single:
507
- glyph = self._coverage(a)
508
- glyphs[0].extend(glyph)
509
-
510
- if ignore:
511
- statement = ast.IgnorePosStatement([(prefix, glyphs, suffix)])
512
- else:
513
- statement = ast.ChainContextPosStatement(
514
- prefix, glyphs, suffix, [targetlookup]
515
- )
516
- statements.append(statement)
517
- elif isinstance(pos, VAst.PositionAttachDefinition):
518
- glyphs = [ast.GlyphClass()]
519
- for coverage, _ in pos.coverage_to:
520
- glyphs[0].extend(self._coverage(coverage))
521
-
522
- if ignore:
523
- statement = ast.IgnorePosStatement([(prefix, glyphs, suffix)])
524
- else:
525
- statement = ast.ChainContextPosStatement(
526
- prefix, glyphs, suffix, [targetlookup]
527
- )
528
- statements.append(statement)
529
- else:
530
- raise NotImplementedError(pos)
531
-
532
- def _gsubLookup(self, lookup, prefix, suffix, ignore, chain, fealookup):
533
- statements = fealookup.statements
534
-
535
- sub = lookup.sub
536
- for key, val in sub.mapping.items():
537
- if not key or not val:
538
- path, line, column = sub.location
539
- log.warning(f"{path}:{line}:{column}: Ignoring empty substitution")
540
- continue
541
- statement = None
542
- glyphs = self._coverage(key)
543
- replacements = self._coverage(val)
544
- if ignore:
545
- chain_context = (prefix, glyphs, suffix)
546
- statement = ast.IgnoreSubstStatement([chain_context])
547
- elif isinstance(sub, VAst.SubstitutionSingleDefinition):
548
- assert len(glyphs) == 1
549
- assert len(replacements) == 1
550
- statement = ast.SingleSubstStatement(
551
- glyphs, replacements, prefix, suffix, chain
552
- )
553
- elif isinstance(sub, VAst.SubstitutionReverseChainingSingleDefinition):
554
- assert len(glyphs) == 1
555
- assert len(replacements) == 1
556
- statement = ast.ReverseChainSingleSubstStatement(
557
- prefix, suffix, glyphs, replacements
558
- )
559
- elif isinstance(sub, VAst.SubstitutionMultipleDefinition):
560
- assert len(glyphs) == 1
561
- statement = ast.MultipleSubstStatement(
562
- prefix, glyphs[0], suffix, replacements, chain
563
- )
564
- elif isinstance(sub, VAst.SubstitutionLigatureDefinition):
565
- assert len(replacements) == 1
566
- statement = ast.LigatureSubstStatement(
567
- prefix, glyphs, suffix, replacements[0], chain
568
- )
569
- else:
570
- raise NotImplementedError(sub)
571
- statements.append(statement)
572
-
573
- def _lookupDefinition(self, lookup):
574
- mark_attachement = None
575
- mark_filtering = None
576
-
577
- flags = 0
578
- if lookup.direction == "RTL":
579
- flags |= 1
580
- if not lookup.process_base:
581
- flags |= 2
582
- # FIXME: Does VOLT support this?
583
- # if not lookup.process_ligatures:
584
- # flags |= 4
585
- if not lookup.process_marks:
586
- flags |= 8
587
- elif isinstance(lookup.process_marks, str):
588
- mark_attachement = self._groupName(lookup.process_marks)
589
- elif lookup.mark_glyph_set is not None:
590
- mark_filtering = self._groupName(lookup.mark_glyph_set)
591
-
592
- lookupflags = None
593
- if flags or mark_attachement is not None or mark_filtering is not None:
594
- lookupflags = ast.LookupFlagStatement(
595
- flags, mark_attachement, mark_filtering
596
- )
597
- if "\\" in lookup.name:
598
- # Merge sub lookups as subtables (lookups named “base\sub”),
599
- # makeotf/feaLib will issue a warning and ignore the subtable
600
- # statement if it is not a pairpos lookup, though.
601
- name = lookup.name.split("\\")[0]
602
- if name.lower() not in self._lookups:
603
- fealookup = ast.LookupBlock(self._lookupName(name))
604
- if lookupflags is not None:
605
- fealookup.statements.append(lookupflags)
606
- fealookup.statements.append(ast.Comment("# " + lookup.name))
607
- else:
608
- fealookup = self._lookups[name.lower()]
609
- fealookup.statements.append(ast.SubtableStatement())
610
- fealookup.statements.append(ast.Comment("# " + lookup.name))
611
- self._lookups[name.lower()] = fealookup
612
- else:
613
- fealookup = ast.LookupBlock(self._lookupName(lookup.name))
614
- if lookupflags is not None:
615
- fealookup.statements.append(lookupflags)
616
- self._lookups[lookup.name.lower()] = fealookup
617
-
618
- if lookup.comments is not None:
619
- fealookup.statements.append(ast.Comment("# " + lookup.comments))
620
-
621
- contexts = []
622
- if lookup.context:
623
- for context in lookup.context:
624
- prefix = self._context(context.left)
625
- suffix = self._context(context.right)
626
- ignore = context.ex_or_in == "EXCEPT_CONTEXT"
627
- contexts.append([prefix, suffix, ignore, False])
628
- # It seems that VOLT will create contextual substitution using
629
- # only the input if there is no other contexts in this lookup.
630
- if ignore and len(lookup.context) == 1:
631
- contexts.append([[], [], False, True])
632
- else:
633
- contexts.append([[], [], False, False])
634
-
635
- targetlookup = None
636
- for prefix, suffix, ignore, chain in contexts:
637
- if lookup.sub is not None:
638
- self._gsubLookup(lookup, prefix, suffix, ignore, chain, fealookup)
639
-
640
- if lookup.pos is not None:
641
- if self._settings.get("COMPILER_USEEXTENSIONLOOKUPS"):
642
- fealookup.use_extension = True
643
- if prefix or suffix or chain or ignore:
644
- if not ignore and targetlookup is None:
645
- targetname = self._lookupName(lookup.name + " target")
646
- targetlookup = ast.LookupBlock(targetname)
647
- fealookup.targets = getattr(fealookup, "targets", [])
648
- fealookup.targets.append(targetlookup)
649
- self._gposLookup(lookup, targetlookup)
650
- self._gposContextLookup(
651
- lookup, prefix, suffix, ignore, fealookup, targetlookup
652
- )
653
- else:
654
- self._gposLookup(lookup, fealookup)
655
-
656
-
657
- def main(args=None):
658
- """Convert MS VOLT to AFDKO feature files."""
659
-
660
- import argparse
661
- from pathlib import Path
662
-
663
- from fontTools import configLogger
664
-
665
- parser = argparse.ArgumentParser(
666
- "fonttools voltLib.voltToFea", description=main.__doc__
667
- )
668
- parser.add_argument(
669
- "input", metavar="INPUT", type=Path, help="input font/VTP file to process"
670
- )
671
- parser.add_argument(
672
- "featurefile", metavar="OUTPUT", type=Path, help="output feature file"
673
- )
674
- parser.add_argument(
675
- "-t",
676
- "--table",
677
- action="append",
678
- choices=TABLES,
679
- dest="tables",
680
- help="List of tables to write, by default all tables are written",
681
- )
682
- parser.add_argument(
683
- "-q", "--quiet", action="store_true", help="Suppress non-error messages"
684
- )
685
- parser.add_argument(
686
- "--traceback", action="store_true", help="Don’t catch exceptions"
687
- )
688
-
689
- options = parser.parse_args(args)
690
-
691
- configLogger(level=("ERROR" if options.quiet else "INFO"))
692
-
693
- file_or_path = options.input
694
- font = None
695
- try:
696
- font = TTFont(file_or_path)
697
- if "TSIV" in font:
698
- file_or_path = StringIO(font["TSIV"].data.decode("utf-8"))
699
- else:
700
- log.error('"TSIV" table is missing, font was not saved from VOLT?')
701
- return 1
702
- except TTLibError:
703
- pass
704
-
705
- converter = VoltToFea(file_or_path, font)
706
- try:
707
- fea = converter.convert(options.tables)
708
- except NotImplementedError as e:
709
- if options.traceback:
710
- raise
711
- location = getattr(e.args[0], "location", None)
712
- message = f'"{e}" is not supported'
713
- if location:
714
- path, line, column = location
715
- log.error(f"{path}:{line}:{column}: {message}")
716
- else:
717
- log.error(message)
718
- return 1
719
- with open(options.featurefile, "w") as feafile:
720
- feafile.write(fea)
721
-
722
-
723
- if __name__ == "__main__":
724
- import sys
725
-
726
- sys.exit(main())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-98c587a9.js DELETED
@@ -1,7 +0,0 @@
1
- import{c as F,e as I,s as ce,N as me,t as c,P as _e,g as Ue,T as E,p as Qe,h as J,E as v,b as se,j as Ze,k as Ge,l as Ve,m as Ke,f as Je,i as Ye,n as We,o as et,q as ne,r as tt}from"./index-f90e1963.js";import{html as rt}from"./index-218a3021.js";import"./index-3370be2a.js";import"./Blocks-f0129fcd.js";import"./Button-89624748.js";import"./BlockLabel-56db415e.js";import"./Empty-585389a4.js";import"./Copy-6cd42558.js";import"./Download-fdaaf5d4.js";import"./index-ae57ca19.js";import"./index-c5e2dbc1.js";import"./index-0644e979.js";class X{constructor(e,r,s,n,i,o,a){this.type=e,this.value=r,this.from=s,this.hash=n,this.end=i,this.children=o,this.positions=a,this.hashProp=[[I.contextHash,n]]}static create(e,r,s,n,i){let o=n+(n<<8)+e+(r<<4)|0;return new X(e,r,s,o,i,[],[])}addChild(e,r){e.prop(I.contextHash)!=this.hash&&(e=new E(e.type,e.children,e.positions,e.length,this.hashProp)),this.children.push(e),this.positions.push(r)}toTree(e,r=this.end){let s=this.children.length-1;return s>=0&&(r=Math.max(r,this.positions[s]+this.children[s].length+this.from)),new E(e.types[this.type],this.children,this.positions,r-this.from).balance({makeTree:(i,o,a)=>new E(F.none,i,o,a,this.hashProp)})}}var f;(function(t){t[t.Document=1]="Document",t[t.CodeBlock=2]="CodeBlock",t[t.FencedCode=3]="FencedCode",t[t.Blockquote=4]="Blockquote",t[t.HorizontalRule=5]="HorizontalRule",t[t.BulletList=6]="BulletList",t[t.OrderedList=7]="OrderedList",t[t.ListItem=8]="ListItem",t[t.ATXHeading1=9]="ATXHeading1",t[t.ATXHeading2=10]="ATXHeading2",t[t.ATXHeading3=11]="ATXHeading3",t[t.ATXHeading4=12]="ATXHeading4",t[t.ATXHeading5=13]="ATXHeading5",t[t.ATXHeading6=14]="ATXHeading6",t[t.SetextHeading1=15]="SetextHeading1",t[t.SetextHeading2=16]="SetextHeading2",t[t.HTMLBlock=17]="HTMLBlock",t[t.LinkReference=18]="LinkReference",t[t.Paragraph=19]="Paragraph",t[t.CommentBlock=20]="CommentBlock",t[t.ProcessingInstructionBlock=21]="ProcessingInstructionBlock",t[t.Escape=22]="Escape",t[t.Entity=23]="Entity",t[t.HardBreak=24]="HardBreak",t[t.Emphasis=25]="Emphasis",t[t.StrongEmphasis=26]="StrongEmphasis",t[t.Link=27]="Link",t[t.Image=28]="Image",t[t.InlineCode=29]="InlineCode",t[t.HTMLTag=30]="HTMLTag",t[t.Comment=31]="Comment",t[t.ProcessingInstruction=32]="ProcessingInstruction",t[t.URL=33]="URL",t[t.HeaderMark=34]="HeaderMark",t[t.QuoteMark=35]="QuoteMark",t[t.ListMark=36]="ListMark",t[t.LinkMark=37]="LinkMark",t[t.EmphasisMark=38]="EmphasisMark",t[t.CodeMark=39]="CodeMark",t[t.CodeText=40]="CodeText",t[t.CodeInfo=41]="CodeInfo",t[t.LinkTitle=42]="LinkTitle",t[t.LinkLabel=43]="LinkLabel"})(f||(f={}));class st{constructor(e,r){this.start=e,this.content=r,this.marks=[],this.parsers=[]}}class nt{constructor(){this.text="",this.baseIndent=0,this.basePos=0,this.depth=0,this.markers=[],this.pos=0,this.indent=0,this.next=-1}forward(){this.basePos>this.pos&&this.forwardInner()}forwardInner(){let e=this.skipSpace(this.basePos);this.indent=this.countIndent(e,this.pos,this.indent),this.pos=e,this.next=e==this.text.length?-1:this.text.charCodeAt(e)}skipSpace(e){return N(this.text,e)}reset(e){for(this.text=e,this.baseIndent=this.basePos=this.pos=this.indent=0,this.forwardInner(),this.depth=1;this.markers.length;)this.markers.pop()}moveBase(e){this.basePos=e,this.baseIndent=this.countIndent(e,this.pos,this.indent)}moveBaseColumn(e){this.baseIndent=e,this.basePos=this.findColumn(e)}addMarker(e){this.markers.push(e)}countIndent(e,r=0,s=0){for(let n=r;n<e;n++)s+=this.text.charCodeAt(n)==9?4-s%4:1;return s}findColumn(e){let r=0;for(let s=0;r<this.text.length&&s<e;r++)s+=this.text.charCodeAt(r)==9?4-s%4:1;return r}scrub(){if(!this.baseIndent)return this.text;let e="";for(let r=0;r<this.basePos;r++)e+=" ";return e+this.text.slice(this.basePos)}}function ie(t,e,r){if(r.pos==r.text.length||t!=e.block&&r.indent>=e.stack[r.depth+1].value+r.baseIndent)return!0;if(r.indent>=r.baseIndent+4)return!1;let s=(t.type==f.OrderedList?ee:W)(r,e,!1);return s>0&&(t.type!=f.BulletList||Y(r,e,!1)<0)&&r.text.charCodeAt(r.pos+s-1)==t.value}const ge={[f.Blockquote](t,e,r){return r.next!=62?!1:(r.markers.push(m(f.QuoteMark,e.lineStart+r.pos,e.lineStart+r.pos+1)),r.moveBase(r.pos+(C(r.text.charCodeAt(r.pos+1))?2:1)),t.end=e.lineStart+r.text.length,!0)},[f.ListItem](t,e,r){return r.indent<r.baseIndent+t.value&&r.next>-1?!1:(r.moveBaseColumn(r.baseIndent+t.value),!0)},[f.OrderedList]:ie,[f.BulletList]:ie,[f.Document](){return!0}};function C(t){return t==32||t==9||t==10||t==13}function N(t,e=0){for(;e<t.length&&C(t.charCodeAt(e));)e++;return e}function oe(t,e,r){for(;e>r&&C(t.charCodeAt(e-1));)e--;return e}function ke(t){if(t.next!=96&&t.next!=126)return-1;let e=t.pos+1;for(;e<t.text.length&&t.text.charCodeAt(e)==t.next;)e++;if(e<t.pos+3)return-1;if(t.next==96){for(let r=e;r<t.text.length;r++)if(t.text.charCodeAt(r)==96)return-1}return e}function Le(t){return t.next!=62?-1:t.text.charCodeAt(t.pos+1)==32?2:1}function Y(t,e,r){if(t.next!=42&&t.next!=45&&t.next!=95)return-1;let s=1;for(let n=t.pos+1;n<t.text.length;n++){let i=t.text.charCodeAt(n);if(i==t.next)s++;else if(!C(i))return-1}return r&&t.next==45&&we(t)>-1&&t.depth==e.stack.length||s<3?-1:1}function be(t,e){for(let r=t.stack.length-1;r>=0;r--)if(t.stack[r].type==e)return!0;return!1}function W(t,e,r){return(t.next==45||t.next==43||t.next==42)&&(t.pos==t.text.length-1||C(t.text.charCodeAt(t.pos+1)))&&(!r||be(e,f.BulletList)||t.skipSpace(t.pos+2)<t.text.length)?1:-1}function ee(t,e,r){let s=t.pos,n=t.next;for(;n>=48&&n<=57;){s++;if(s==t.text.length)return-1;n=t.text.charCodeAt(s)}return s==t.pos||s>t.pos+9||n!=46&&n!=41||s<t.text.length-1&&!C(t.text.charCodeAt(s+1))||r&&!be(e,f.OrderedList)&&(t.skipSpace(s+1)==t.text.length||s>t.pos+1||t.next!=49)?-1:s+1-t.pos}function Se(t){if(t.next!=35)return-1;let e=t.pos+1;for(;e<t.text.length&&t.text.charCodeAt(e)==35;)e++;if(e<t.text.length&&t.text.charCodeAt(e)!=32)return-1;let r=e-t.pos;return r>6?-1:r}function we(t){if(t.next!=45&&t.next!=61||t.indent>=t.baseIndent+4)return-1;let e=t.pos+1;for(;e<t.text.length&&t.text.charCodeAt(e)==t.next;)e++;let r=e;for(;e<t.text.length&&C(t.text.charCodeAt(e));)e++;return e==t.text.length?r:-1}const Q=/^[ \t]*$/,Ce=/-->/,Ae=/\?>/,Z=[[/^<(?:script|pre|style)(?:\s|>|$)/i,/<\/(?:script|pre|style)>/i],[/^\s*<!--/,Ce],[/^\s*<\?/,Ae],[/^\s*<![A-Z]/,/>/],[/^\s*<!\[CDATA\[/,/\]\]>/],[/^\s*<\/?(?:address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h1|h2|h3|h4|h5|h6|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|nav|noframes|ol|optgroup|option|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul)(?:\s|\/?>|$)/i,Q],[/^\s*(?:<\/[a-z][\w-]*\s*>|<[a-z][\w-]*(\s+[a-z:_][\w-.]*(?:\s*=\s*(?:[^\s"'=<>`]+|'[^']*'|"[^"]*"))?)*\s*>)\s*$/i,Q]];function xe(t,e,r){if(t.next!=60)return-1;let s=t.text.slice(t.pos);for(let n=0,i=Z.length-(r?1:0);n<i;n++)if(Z[n][0].test(s))return n;return-1}function ae(t,e){let r=t.countIndent(e,t.pos,t.indent),s=t.countIndent(t.skipSpace(e),e,r);return s>=r+5?r+1:s}function B(t,e,r){let s=t.length-1;s>=0&&t[s].to==e&&t[s].type==f.CodeText?t[s].to=r:t.push(m(f.CodeText,e,r))}const z={LinkReference:void 0,IndentedCode(t,e){let r=e.baseIndent+4;if(e.indent<r)return!1;let s=e.findColumn(r),n=t.lineStart+s,i=t.lineStart+e.text.length,o=[],a=[];for(B(o,n,i);t.nextLine()&&e.depth>=t.stack.length;)if(e.pos==e.text.length){B(a,t.lineStart-1,t.lineStart);for(let l of e.markers)a.push(l)}else{if(e.indent<r)break;{if(a.length){for(let h of a)h.type==f.CodeText?B(o,h.from,h.to):o.push(h);a=[]}B(o,t.lineStart-1,t.lineStart);for(let h of e.markers)o.push(h);i=t.lineStart+e.text.length;let l=t.lineStart+e.findColumn(e.baseIndent+4);l<i&&B(o,l,i)}}return a.length&&(a=a.filter(l=>l.type!=f.CodeText),a.length&&(e.markers=a.concat(e.markers))),t.addNode(t.buffer.writeElements(o,-n).finish(f.CodeBlock,i-n),n),!0},FencedCode(t,e){let r=ke(e);if(r<0)return!1;let s=t.lineStart+e.pos,n=e.next,i=r-e.pos,o=e.skipSpace(r),a=oe(e.text,e.text.length,o),l=[m(f.CodeMark,s,s+i)];o<a&&l.push(m(f.CodeInfo,t.lineStart+o,t.lineStart+a));for(let h=!0;t.nextLine()&&e.depth>=t.stack.length;h=!1){let u=e.pos;if(e.indent-e.baseIndent<4)for(;u<e.text.length&&e.text.charCodeAt(u)==n;)u++;if(u-e.pos>=i&&e.skipSpace(u)==e.text.length){for(let p of e.markers)l.push(p);l.push(m(f.CodeMark,t.lineStart+e.pos,t.lineStart+u)),t.nextLine();break}else{h||B(l,t.lineStart-1,t.lineStart);for(let L of e.markers)l.push(L);let p=t.lineStart+e.basePos,d=t.lineStart+e.text.length;p<d&&B(l,p,d)}}return t.addNode(t.buffer.writeElements(l,-s).finish(f.FencedCode,t.prevLineEnd()-s),s),!0},Blockquote(t,e){let r=Le(e);return r<0?!1:(t.startContext(f.Blockquote,e.pos),t.addNode(f.QuoteMark,t.lineStart+e.pos,t.lineStart+e.pos+1),e.moveBase(e.pos+r),null)},HorizontalRule(t,e){if(Y(e,t,!1)<0)return!1;let r=t.lineStart+e.pos;return t.nextLine(),t.addNode(f.HorizontalRule,r),!0},BulletList(t,e){let r=W(e,t,!1);if(r<0)return!1;t.block.type!=f.BulletList&&t.startContext(f.BulletList,e.basePos,e.next);let s=ae(e,e.pos+1);return t.startContext(f.ListItem,e.basePos,s-e.baseIndent),t.addNode(f.ListMark,t.lineStart+e.pos,t.lineStart+e.pos+r),e.moveBaseColumn(s),null},OrderedList(t,e){let r=ee(e,t,!1);if(r<0)return!1;t.block.type!=f.OrderedList&&t.startContext(f.OrderedList,e.basePos,e.text.charCodeAt(e.pos+r-1));let s=ae(e,e.pos+r);return t.startContext(f.ListItem,e.basePos,s-e.baseIndent),t.addNode(f.ListMark,t.lineStart+e.pos,t.lineStart+e.pos+r),e.moveBaseColumn(s),null},ATXHeading(t,e){let r=Se(e);if(r<0)return!1;let s=e.pos,n=t.lineStart+s,i=oe(e.text,e.text.length,s),o=i;for(;o>s&&e.text.charCodeAt(o-1)==e.next;)o--;(o==i||o==s||!C(e.text.charCodeAt(o-1)))&&(o=e.text.length);let a=t.buffer.write(f.HeaderMark,0,r).writeElements(t.parser.parseInline(e.text.slice(s+r+1,o),n+r+1),-n);o<e.text.length&&a.write(f.HeaderMark,o-s,i-s);let l=a.finish(f.ATXHeading1-1+r,e.text.length-s);return t.nextLine(),t.addNode(l,n),!0},HTMLBlock(t,e){let r=xe(e,t,!1);if(r<0)return!1;let s=t.lineStart+e.pos,n=Z[r][1],i=[],o=n!=Q;for(;!n.test(e.text)&&t.nextLine();){if(e.depth<t.stack.length){o=!1;break}for(let h of e.markers)i.push(h)}o&&t.nextLine();let a=n==Ce?f.CommentBlock:n==Ae?f.ProcessingInstructionBlock:f.HTMLBlock,l=t.prevLineEnd();return t.addNode(t.buffer.writeElements(i,-s).finish(a,l-s),s),!0},SetextHeading:void 0};class it{constructor(e){this.stage=0,this.elts=[],this.pos=0,this.start=e.start,this.advance(e.content)}nextLine(e,r,s){if(this.stage==-1)return!1;let n=s.content+`
2
- `+r.scrub(),i=this.advance(n);return i>-1&&i<n.length?this.complete(e,s,i):!1}finish(e,r){return(this.stage==2||this.stage==3)&&N(r.content,this.pos)==r.content.length?this.complete(e,r,r.content.length):!1}complete(e,r,s){return e.addLeafElement(r,m(f.LinkReference,this.start,this.start+s,this.elts)),!0}nextStage(e){return e?(this.pos=e.to-this.start,this.elts.push(e),this.stage++,!0):(e===!1&&(this.stage=-1),!1)}advance(e){for(;;){if(this.stage==-1)return-1;if(this.stage==0){if(!this.nextStage(ye(e,this.pos,this.start,!0)))return-1;if(e.charCodeAt(this.pos)!=58)return this.stage=-1;this.elts.push(m(f.LinkMark,this.pos+this.start,this.pos+this.start+1)),this.pos++}else if(this.stage==1){if(!this.nextStage(ve(e,N(e,this.pos),this.start)))return-1}else if(this.stage==2){let r=N(e,this.pos),s=0;if(r>this.pos){let n=Ne(e,r,this.start);if(n){let i=q(e,n.to-this.start);i>0&&(this.nextStage(n),s=i)}}return s||(s=q(e,this.pos)),s>0&&s<e.length?s:-1}else return q(e,this.pos)}}}function q(t,e){for(;e<t.length;e++){let r=t.charCodeAt(e);if(r==10)break;if(!C(r))return-1}return e}class ot{nextLine(e,r,s){let n=r.depth<e.stack.length?-1:we(r),i=r.next;if(n<0)return!1;let o=m(f.HeaderMark,e.lineStart+r.pos,e.lineStart+n);return e.nextLine(),e.addLeafElement(s,m(i==61?f.SetextHeading1:f.SetextHeading2,s.start,e.prevLineEnd(),[...e.parser.parseInline(s.content,s.start),o])),!0}finish(){return!1}}const at={LinkReference(t,e){return e.content.charCodeAt(0)==91?new it(e):null},SetextHeading(){return new ot}},lt=[(t,e)=>Se(e)>=0,(t,e)=>ke(e)>=0,(t,e)=>Le(e)>=0,(t,e)=>W(e,t,!0)>=0,(t,e)=>ee(e,t,!0)>=0,(t,e)=>Y(e,t,!0)>=0,(t,e)=>xe(e,t,!0)>=0],ht={text:"",end:0};class ft{constructor(e,r,s,n){this.parser=e,this.input=r,this.ranges=n,this.line=new nt,this.atEnd=!1,this.dontInject=new Set,this.stoppedAt=null,this.rangeI=0,this.to=n[n.length-1].to,this.lineStart=this.absoluteLineStart=this.absoluteLineEnd=n[0].from,this.block=X.create(f.Document,0,this.lineStart,0,0),this.stack=[this.block],this.fragments=s.length?new ct(s,r):null,this.readLine()}get parsedPos(){return this.absoluteLineStart}advance(){if(this.stoppedAt!=null&&this.absoluteLineStart>this.stoppedAt)return this.finish();let{line:e}=this;for(;;){for(;e.depth<this.stack.length;)this.finishContext();for(let s of e.markers)this.addNode(s.type,s.from,s.to);if(e.pos<e.text.length)break;if(!this.nextLine())return this.finish()}if(this.fragments&&this.reuseFragment(e.basePos))return null;e:for(;;){for(let s of this.parser.blockParsers)if(s){let n=s(this,e);if(n!=!1){if(n==!0)return null;e.forward();continue e}}break}let r=new st(this.lineStart+e.pos,e.text.slice(e.pos));for(let s of this.parser.leafBlockParsers)if(s){let n=s(this,r);n&&r.parsers.push(n)}e:for(;this.nextLine()&&e.pos!=e.text.length;){if(e.indent<e.baseIndent+4){for(let s of this.parser.endLeafBlock)if(s(this,e,r))break e}for(let s of r.parsers)if(s.nextLine(this,e,r))return null;r.content+=`
3
- `+e.scrub();for(let s of e.markers)r.marks.push(s)}return this.finishLeaf(r),null}stopAt(e){if(this.stoppedAt!=null&&this.stoppedAt<e)throw new RangeError("Can't move stoppedAt forward");this.stoppedAt=e}reuseFragment(e){if(!this.fragments.moveTo(this.absoluteLineStart+e,this.absoluteLineStart)||!this.fragments.matches(this.block.hash))return!1;let r=this.fragments.takeNodes(this);if(!r)return!1;let s=r,n=this.absoluteLineStart+r;for(let i=1;i<this.ranges.length;i++){let o=this.ranges[i-1].to,a=this.ranges[i].from;o>=this.lineStart&&a<n&&(s-=a-o)}return this.lineStart+=s,this.absoluteLineStart+=r,this.moveRangeI(),this.absoluteLineStart<this.to?(this.lineStart++,this.absoluteLineStart++,this.readLine()):(this.atEnd=!0,this.readLine()),!0}get depth(){return this.stack.length}parentType(e=this.depth-1){return this.parser.nodeSet.types[this.stack[e].type]}nextLine(){return this.lineStart+=this.line.text.length,this.absoluteLineEnd>=this.to?(this.absoluteLineStart=this.absoluteLineEnd,this.atEnd=!0,this.readLine(),!1):(this.lineStart++,this.absoluteLineStart=this.absoluteLineEnd+1,this.moveRangeI(),this.readLine(),!0)}moveRangeI(){for(;this.rangeI<this.ranges.length-1&&this.absoluteLineStart>=this.ranges[this.rangeI].to;)this.rangeI++,this.absoluteLineStart=Math.max(this.absoluteLineStart,this.ranges[this.rangeI].from)}scanLine(e){let r=ht;if(r.end=e,e>=this.to)r.text="";else if(r.text=this.lineChunkAt(e),r.end+=r.text.length,this.ranges.length>1){let s=this.absoluteLineStart,n=this.rangeI;for(;this.ranges[n].to<r.end;){n++;let i=this.ranges[n].from,o=this.lineChunkAt(i);r.end=i+o.length,r.text=r.text.slice(0,this.ranges[n-1].to-s)+o,s=r.end-r.text.length}}return r}readLine(){let{line:e}=this,{text:r,end:s}=this.scanLine(this.absoluteLineStart);for(this.absoluteLineEnd=s,e.reset(r);e.depth<this.stack.length;e.depth++){let n=this.stack[e.depth],i=this.parser.skipContextMarkup[n.type];if(!i)throw new Error("Unhandled block context "+f[n.type]);if(!i(n,this,e))break;e.forward()}}lineChunkAt(e){let r=this.input.chunk(e),s;if(this.input.lineChunks)s=r==`
4
- `?"":r;else{let n=r.indexOf(`
5
- `);s=n<0?r:r.slice(0,n)}return e+s.length>this.to?s.slice(0,this.to-e):s}prevLineEnd(){return this.atEnd?this.lineStart:this.lineStart-1}startContext(e,r,s=0){this.block=X.create(e,s,this.lineStart+r,this.block.hash,this.lineStart+this.line.text.length),this.stack.push(this.block)}startComposite(e,r,s=0){this.startContext(this.parser.getNodeType(e),r,s)}addNode(e,r,s){typeof e=="number"&&(e=new E(this.parser.nodeSet.types[e],M,M,(s??this.prevLineEnd())-r)),this.block.addChild(e,r-this.block.from)}addElement(e){this.block.addChild(e.toTree(this.parser.nodeSet),e.from-this.block.from)}addLeafElement(e,r){this.addNode(this.buffer.writeElements(V(r.children,e.marks),-r.from).finish(r.type,r.to-r.from),r.from)}finishContext(){let e=this.stack.pop(),r=this.stack[this.stack.length-1];r.addChild(e.toTree(this.parser.nodeSet),e.from-r.from),this.block=r}finish(){for(;this.stack.length>1;)this.finishContext();return this.addGaps(this.block.toTree(this.parser.nodeSet,this.lineStart))}addGaps(e){return this.ranges.length>1?Be(this.ranges,0,e.topNode,this.ranges[0].from,this.dontInject):e}finishLeaf(e){for(let s of e.parsers)if(s.finish(this,e))return;let r=V(this.parser.parseInline(e.content,e.start),e.marks);this.addNode(this.buffer.writeElements(r,-e.start).finish(f.Paragraph,e.content.length),e.start)}elt(e,r,s,n){return typeof e=="string"?m(this.parser.getNodeType(e),r,s,n):new Me(e,r)}get buffer(){return new Ie(this.parser.nodeSet)}}function Be(t,e,r,s,n){if(n.has(r.tree))return r.tree;let i=t[e].to,o=[],a=[],l=r.from+s;function h(u,p){for(;p?u>=i:u>i;){let d=t[e+1].from-i;s+=d,u+=d,e++,i=t[e].to}}for(let u=r.firstChild;u;u=u.nextSibling){h(u.from+s,!0);let p=u.from+s,d;u.to+s>i?(d=Be(t,e,u,s,n),h(u.to+s,!1)):d=u.toTree(),o.push(d),a.push(p-l)}return h(r.to+s,!1),new E(r.type,o,a,r.to+s-l,r.tree?r.tree.propValues:void 0)}class j extends _e{constructor(e,r,s,n,i,o,a,l,h){super(),this.nodeSet=e,this.blockParsers=r,this.leafBlockParsers=s,this.blockNames=n,this.endLeafBlock=i,this.skipContextMarkup=o,this.inlineParsers=a,this.inlineNames=l,this.wrappers=h,this.nodeTypes=Object.create(null);for(let u of e.types)this.nodeTypes[u.name]=u.id}createParse(e,r,s){let n=new ft(this,e,r,s);for(let i of this.wrappers)n=i(n,e,r,s);return n}configure(e){let r=G(e);if(!r)return this;let{nodeSet:s,skipContextMarkup:n}=this,i=this.blockParsers.slice(),o=this.leafBlockParsers.slice(),a=this.blockNames.slice(),l=this.inlineParsers.slice(),h=this.inlineNames.slice(),u=this.endLeafBlock.slice(),p=this.wrappers;if(H(r.defineNodes)){n=Object.assign({},n);let d=s.types.slice(),L;for(let S of r.defineNodes){let{name:g,block:k,composite:b,style:w}=typeof S=="string"?{name:S}:S;if(d.some($=>$.name==g))continue;b&&(n[d.length]=($,$e,qe)=>b($e,qe,$.value));let x=d.length,re=b?["Block","BlockContext"]:k?x>=f.ATXHeading1&&x<=f.SetextHeading2?["Block","LeafBlock","Heading"]:["Block","LeafBlock"]:void 0;d.push(F.define({id:x,name:g,props:re&&[[I.group,re]]})),w&&(L||(L={}),Array.isArray(w)||w instanceof Ue?L[g]=w:Object.assign(L,w))}s=new me(d),L&&(s=s.extend(ce(L)))}if(H(r.props)&&(s=s.extend(...r.props)),H(r.remove))for(let d of r.remove){let L=this.blockNames.indexOf(d),S=this.inlineNames.indexOf(d);L>-1&&(i[L]=o[L]=void 0),S>-1&&(l[S]=void 0)}if(H(r.parseBlock))for(let d of r.parseBlock){let L=a.indexOf(d.name);if(L>-1)i[L]=d.parse,o[L]=d.leaf;else{let S=d.before?T(a,d.before):d.after?T(a,d.after)+1:a.length-1;i.splice(S,0,d.parse),o.splice(S,0,d.leaf),a.splice(S,0,d.name)}d.endLeaf&&u.push(d.endLeaf)}if(H(r.parseInline))for(let d of r.parseInline){let L=h.indexOf(d.name);if(L>-1)l[L]=d.parse;else{let S=d.before?T(h,d.before):d.after?T(h,d.after)+1:h.length-1;l.splice(S,0,d.parse),h.splice(S,0,d.name)}}return r.wrap&&(p=p.concat(r.wrap)),new j(s,i,o,a,u,n,l,h,p)}getNodeType(e){let r=this.nodeTypes[e];if(r==null)throw new RangeError(`Unknown node type '${e}'`);return r}parseInline(e,r){let s=new dt(this,e,r);e:for(let n=r;n<s.end;){let i=s.char(n);for(let o of this.inlineParsers)if(o){let a=o(s,i,n);if(a>=0){n=a;continue e}}n++}return s.resolveMarkers(0)}}function H(t){return t!=null&&t.length>0}function G(t){if(!Array.isArray(t))return t;if(t.length==0)return null;let e=G(t[0]);if(t.length==1)return e;let r=G(t.slice(1));if(!r||!e)return e||r;let s=(o,a)=>(o||M).concat(a||M),n=e.wrap,i=r.wrap;return{props:s(e.props,r.props),defineNodes:s(e.defineNodes,r.defineNodes),parseBlock:s(e.parseBlock,r.parseBlock),parseInline:s(e.parseInline,r.parseInline),remove:s(e.remove,r.remove),wrap:n?i?(o,a,l,h)=>n(i(o,a,l,h),a,l,h):n:i}}function T(t,e){let r=t.indexOf(e);if(r<0)throw new RangeError(`Position specified relative to unknown parser ${e}`);return r}let Ee=[F.none];for(let t=1,e;e=f[t];t++)Ee[t]=F.define({id:t,name:e,props:t>=f.Escape?[]:[[I.group,t in ge?["Block","BlockContext"]:["Block","LeafBlock"]]]});const M=[];class Ie{constructor(e){this.nodeSet=e,this.content=[],this.nodes=[]}write(e,r,s,n=0){return this.content.push(e,r,s,4+n*4),this}writeElements(e,r=0){for(let s of e)s.writeTo(this,r);return this}finish(e,r){return E.build({buffer:this.content,nodeSet:this.nodeSet,reused:this.nodes,topID:e,length:r})}}class O{constructor(e,r,s,n=M){this.type=e,this.from=r,this.to=s,this.children=n}writeTo(e,r){let s=e.content.length;e.writeElements(this.children,r),e.content.push(this.type,this.from+r,this.to+r,e.content.length+4-s)}toTree(e){return new Ie(e).writeElements(this.children,-this.from).finish(this.type,this.to-this.from)}}class Me{constructor(e,r){this.tree=e,this.from=r}get to(){return this.from+this.tree.length}get type(){return this.tree.type.id}get children(){return M}writeTo(e,r){e.nodes.push(this.tree),e.content.push(e.nodes.length-1,this.from+r,this.to+r,-1)}toTree(){return this.tree}}function m(t,e,r,s){return new O(t,e,r,s)}const He={resolve:"Emphasis",mark:"EmphasisMark"},Pe={resolve:"Emphasis",mark:"EmphasisMark"},P={},le={};class A{constructor(e,r,s,n){this.type=e,this.from=r,this.to=s,this.side=n}}const he="!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~";let R=/[!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~\xA1\u2010-\u2027]/;try{R=new RegExp("[\\p{Pc}|\\p{Pd}|\\p{Pe}|\\p{Pf}|\\p{Pi}|\\p{Po}|\\p{Ps}]","u")}catch{}const _={Escape(t,e,r){if(e!=92||r==t.end-1)return-1;let s=t.char(r+1);for(let n=0;n<he.length;n++)if(he.charCodeAt(n)==s)return t.append(m(f.Escape,r,r+2));return-1},Entity(t,e,r){if(e!=38)return-1;let s=/^(?:#\d+|#x[a-f\d]+|\w+);/i.exec(t.slice(r+1,r+31));return s?t.append(m(f.Entity,r,r+1+s[0].length)):-1},InlineCode(t,e,r){if(e!=96||r&&t.char(r-1)==96)return-1;let s=r+1;for(;s<t.end&&t.char(s)==96;)s++;let n=s-r,i=0;for(;s<t.end;s++)if(t.char(s)==96){if(i++,i==n&&t.char(s+1)!=96)return t.append(m(f.InlineCode,r,s+1,[m(f.CodeMark,r,r+n),m(f.CodeMark,s+1-n,s+1)]))}else i=0;return-1},HTMLTag(t,e,r){if(e!=60||r==t.end-1)return-1;let s=t.slice(r+1,t.end),n=/^(?:[a-z][-\w+.]+:[^\s>]+|[a-z\d.!#$%&'*+/=?^_`{|}~-]+@[a-z\d](?:[a-z\d-]{0,61}[a-z\d])?(?:\.[a-z\d](?:[a-z\d-]{0,61}[a-z\d])?)*)>/i.exec(s);if(n)return t.append(m(f.URL,r,r+1+n[0].length));let i=/^!--[^>](?:-[^-]|[^-])*?-->/i.exec(s);if(i)return t.append(m(f.Comment,r,r+1+i[0].length));let o=/^\?[^]*?\?>/.exec(s);if(o)return t.append(m(f.ProcessingInstruction,r,r+1+o[0].length));let a=/^(?:![A-Z][^]*?>|!\[CDATA\[[^]*?\]\]>|\/\s*[a-zA-Z][\w-]*\s*>|\s*[a-zA-Z][\w-]*(\s+[a-zA-Z:_][\w-.:]*(?:\s*=\s*(?:[^\s"'=<>`]+|'[^']*'|"[^"]*"))?)*\s*(\/\s*)?>)/.exec(s);return a?t.append(m(f.HTMLTag,r,r+1+a[0].length)):-1},Emphasis(t,e,r){if(e!=95&&e!=42)return-1;let s=r+1;for(;t.char(s)==e;)s++;let n=t.slice(r-1,r),i=t.slice(s,s+1),o=R.test(n),a=R.test(i),l=/\s|^$/.test(n),h=/\s|^$/.test(i),u=!h&&(!a||l||o),p=!l&&(!o||h||a),d=u&&(e==42||!p||o),L=p&&(e==42||!u||a);return t.append(new A(e==95?He:Pe,r,s,(d?1:0)|(L?2:0)))},HardBreak(t,e,r){if(e==92&&t.char(r+1)==10)return t.append(m(f.HardBreak,r,r+2));if(e==32){let s=r+1;for(;t.char(s)==32;)s++;if(t.char(s)==10&&s>=r+2)return t.append(m(f.HardBreak,r,s+1))}return-1},Link(t,e,r){return e==91?t.append(new A(P,r,r+1,1)):-1},Image(t,e,r){return e==33&&t.char(r+1)==91?t.append(new A(le,r,r+2,1)):-1},LinkEnd(t,e,r){if(e!=93)return-1;for(let s=t.parts.length-1;s>=0;s--){let n=t.parts[s];if(n instanceof A&&(n.type==P||n.type==le)){if(!n.side||t.skipSpace(n.to)==r&&!/[(\[]/.test(t.slice(r+1,r+2)))return t.parts[s]=null,-1;let i=t.takeContent(s),o=t.parts[s]=ut(t,i,n.type==P?f.Link:f.Image,n.from,r+1);if(n.type==P)for(let a=0;a<s;a++){let l=t.parts[a];l instanceof A&&l.type==P&&(l.side=0)}return o.to}}return-1}};function ut(t,e,r,s,n){let{text:i}=t,o=t.char(n),a=n;if(e.unshift(m(f.LinkMark,s,s+(r==f.Image?2:1))),e.push(m(f.LinkMark,n-1,n)),o==40){let l=t.skipSpace(n+1),h=ve(i,l-t.offset,t.offset),u;h&&(l=t.skipSpace(h.to),u=Ne(i,l-t.offset,t.offset),u&&(l=t.skipSpace(u.to))),t.char(l)==41&&(e.push(m(f.LinkMark,n,n+1)),a=l+1,h&&e.push(h),u&&e.push(u),e.push(m(f.LinkMark,l,a)))}else if(o==91){let l=ye(i,n-t.offset,t.offset,!1);l&&(e.push(l),a=l.to)}return m(r,s,a,e)}function ve(t,e,r){if(t.charCodeAt(e)==60){for(let n=e+1;n<t.length;n++){let i=t.charCodeAt(n);if(i==62)return m(f.URL,e+r,n+1+r);if(i==60||i==10)return!1}return null}else{let n=0,i=e;for(let o=!1;i<t.length;i++){let a=t.charCodeAt(i);if(C(a))break;if(o)o=!1;else if(a==40)n++;else if(a==41){if(!n)break;n--}else a==92&&(o=!0)}return i>e?m(f.URL,e+r,i+r):i==t.length?null:!1}}function Ne(t,e,r){let s=t.charCodeAt(e);if(s!=39&&s!=34&&s!=40)return!1;let n=s==40?41:s;for(let i=e+1,o=!1;i<t.length;i++){let a=t.charCodeAt(i);if(o)o=!1;else{if(a==n)return m(f.LinkTitle,e+r,i+1+r);a==92&&(o=!0)}}return null}function ye(t,e,r,s){for(let n=!1,i=e+1,o=Math.min(t.length,i+999);i<o;i++){let a=t.charCodeAt(i);if(n)n=!1;else{if(a==93)return s?!1:m(f.LinkLabel,e+r,i+1+r);if(s&&!C(a)&&(s=!1),a==91)return!1;a==92&&(n=!0)}}return null}class dt{constructor(e,r,s){this.parser=e,this.text=r,this.offset=s,this.parts=[]}char(e){return e>=this.end?-1:this.text.charCodeAt(e-this.offset)}get end(){return this.offset+this.text.length}slice(e,r){return this.text.slice(e-this.offset,r-this.offset)}append(e){return this.parts.push(e),e.to}addDelimiter(e,r,s,n,i){return this.append(new A(e,r,s,(n?1:0)|(i?2:0)))}addElement(e){return this.append(e)}resolveMarkers(e){for(let s=e;s<this.parts.length;s++){let n=this.parts[s];if(!(n instanceof A&&n.type.resolve&&n.side&2))continue;let i=n.type==He||n.type==Pe,o=n.to-n.from,a,l=s-1;for(;l>=e;l--){let g=this.parts[l];if(g instanceof A&&g.side&1&&g.type==n.type&&!(i&&(n.side&1||g.side&2)&&(g.to-g.from+o)%3==0&&((g.to-g.from)%3||o%3))){a=g;break}}if(!a)continue;let h=n.type.resolve,u=[],p=a.from,d=n.to;if(i){let g=Math.min(2,a.to-a.from,o);p=a.to-g,d=n.from+g,h=g==1?"Emphasis":"StrongEmphasis"}a.type.mark&&u.push(this.elt(a.type.mark,p,a.to));for(let g=l+1;g<s;g++)this.parts[g]instanceof O&&u.push(this.parts[g]),this.parts[g]=null;n.type.mark&&u.push(this.elt(n.type.mark,n.from,d));let L=this.elt(h,p,d,u);this.parts[l]=i&&a.from!=p?new A(a.type,a.from,p,a.side):null,(this.parts[s]=i&&n.to!=d?new A(n.type,d,n.to,n.side):null)?this.parts.splice(s,0,L):this.parts[s]=L}let r=[];for(let s=e;s<this.parts.length;s++){let n=this.parts[s];n instanceof O&&r.push(n)}return r}findOpeningDelimiter(e){for(let r=this.parts.length-1;r>=0;r--){let s=this.parts[r];if(s instanceof A&&s.type==e)return r}return null}takeContent(e){let r=this.resolveMarkers(e);return this.parts.length=e,r}skipSpace(e){return N(this.text,e-this.offset)+this.offset}elt(e,r,s,n){return typeof e=="string"?m(this.parser.getNodeType(e),r,s,n):new Me(e,r)}}function V(t,e){if(!e.length)return t;if(!t.length)return e;let r=t.slice(),s=0;for(let n of e){for(;s<r.length&&r[s].to<n.to;)s++;if(s<r.length&&r[s].from<n.from){let i=r[s];i instanceof O&&(r[s]=new O(i.type,i.from,i.to,V(i.children,[n])))}else r.splice(s++,0,n)}return r}const pt=[f.CodeBlock,f.ListItem,f.OrderedList,f.BulletList];class ct{constructor(e,r){this.fragments=e,this.input=r,this.i=0,this.fragment=null,this.fragmentEnd=-1,this.cursor=null,e.length&&(this.fragment=e[this.i++])}nextFragment(){this.fragment=this.i<this.fragments.length?this.fragments[this.i++]:null,this.cursor=null,this.fragmentEnd=-1}moveTo(e,r){for(;this.fragment&&this.fragment.to<=e;)this.nextFragment();if(!this.fragment||this.fragment.from>(e?e-1:0))return!1;if(this.fragmentEnd<0){let i=this.fragment.to;for(;i>0&&this.input.read(i-1,i)!=`
6
- `;)i--;this.fragmentEnd=i?i-1:0}let s=this.cursor;s||(s=this.cursor=this.fragment.tree.cursor(),s.firstChild());let n=e+this.fragment.offset;for(;s.to<=n;)if(!s.parent())return!1;for(;;){if(s.from>=n)return this.fragment.from<=r;if(!s.childAfter(n))return!1}}matches(e){let r=this.cursor.tree;return r&&r.prop(I.contextHash)==e}takeNodes(e){let r=this.cursor,s=this.fragment.offset,n=this.fragmentEnd-(this.fragment.openEnd?1:0),i=e.absoluteLineStart,o=i,a=e.block.children.length,l=o,h=a;for(;;){if(r.to-s>n){if(r.type.isAnonymous&&r.firstChild())continue;break}if(e.dontInject.add(r.tree),e.addNode(r.tree,r.from-s),r.type.is("Block")&&(pt.indexOf(r.type.id)<0?(o=r.to-s,a=e.block.children.length):(o=l,a=h,l=r.to-s,h=e.block.children.length)),!r.nextSibling())break}for(;e.block.children.length>a;)e.block.children.pop(),e.block.positions.pop();return o-i}}const mt=ce({"Blockquote/...":c.quote,HorizontalRule:c.contentSeparator,"ATXHeading1/... SetextHeading1/...":c.heading1,"ATXHeading2/... SetextHeading2/...":c.heading2,"ATXHeading3/...":c.heading3,"ATXHeading4/...":c.heading4,"ATXHeading5/...":c.heading5,"ATXHeading6/...":c.heading6,"Comment CommentBlock":c.comment,Escape:c.escape,Entity:c.character,"Emphasis/...":c.emphasis,"StrongEmphasis/...":c.strong,"Link/... Image/...":c.link,"OrderedList/... BulletList/...":c.list,"BlockQuote/...":c.quote,"InlineCode CodeText":c.monospace,URL:c.url,"HeaderMark HardBreak QuoteMark ListMark LinkMark EmphasisMark CodeMark":c.processingInstruction,"CodeInfo LinkLabel":c.labelName,LinkTitle:c.string,Paragraph:c.content}),gt=new j(new me(Ee).extend(mt),Object.keys(z).map(t=>z[t]),Object.keys(z).map(t=>at[t]),Object.keys(z),lt,ge,Object.keys(_).map(t=>_[t]),Object.keys(_),[]);function kt(t,e,r){let s=[];for(let n=t.firstChild,i=e;;n=n.nextSibling){let o=n?n.from:r;if(o>i&&s.push({from:i,to:o}),!n)break;i=n.to}return s}function Lt(t){let{codeParser:e,htmlParser:r}=t;return{wrap:Qe((n,i)=>{let o=n.type.id;if(e&&(o==f.CodeBlock||o==f.FencedCode)){let a="";if(o==f.FencedCode){let h=n.node.getChild(f.CodeInfo);h&&(a=i.read(h.from,h.to))}let l=e(a);if(l)return{parser:l,overlay:h=>h.type.id==f.CodeText}}else if(r&&(o==f.HTMLBlock||o==f.HTMLTag))return{parser:r,overlay:kt(n.node,n.from,n.to)};return null})}}const bt={resolve:"Strikethrough",mark:"StrikethroughMark"},St={defineNodes:[{name:"Strikethrough",style:{"Strikethrough/...":c.strikethrough}},{name:"StrikethroughMark",style:c.processingInstruction}],parseInline:[{name:"Strikethrough",parse(t,e,r){if(e!=126||t.char(r+1)!=126||t.char(r+2)==126)return-1;let s=t.slice(r-1,r),n=t.slice(r+2,r+3),i=/\s|^$/.test(s),o=/\s|^$/.test(n),a=R.test(s),l=R.test(n);return t.addDelimiter(bt,r,r+2,!o&&(!l||i||a),!i&&(!a||o||l))},after:"Emphasis"}]};function y(t,e,r=0,s,n=0){let i=0,o=!0,a=-1,l=-1,h=!1,u=()=>{s.push(t.elt("TableCell",n+a,n+l,t.parser.parseInline(e.slice(a,l),n+a)))};for(let p=r;p<e.length;p++){let d=e.charCodeAt(p);d==124&&!h?((!o||a>-1)&&i++,o=!1,s&&(a>-1&&u(),s.push(t.elt("TableDelimiter",p+n,p+n+1))),a=l=-1):(h||d!=32&&d!=9)&&(a<0&&(a=p),l=p+1),h=!h&&d==92}return a>-1&&(i++,s&&u()),i}function fe(t,e){for(let r=e;r<t.length;r++){let s=t.charCodeAt(r);if(s==124)return!0;s==92&&r++}return!1}const Oe=/^\|?(\s*:?-+:?\s*\|)+(\s*:?-+:?\s*)?$/;class ue{constructor(){this.rows=null}nextLine(e,r,s){if(this.rows==null){this.rows=!1;let n;if((r.next==45||r.next==58||r.next==124)&&Oe.test(n=r.text.slice(r.pos))){let i=[];y(e,s.content,0,i,s.start)==y(e,n,r.pos)&&(this.rows=[e.elt("TableHeader",s.start,s.start+s.content.length,i),e.elt("TableDelimiter",e.lineStart+r.pos,e.lineStart+r.text.length)])}}else if(this.rows){let n=[];y(e,r.text,r.pos,n,e.lineStart),this.rows.push(e.elt("TableRow",e.lineStart+r.pos,e.lineStart+r.text.length,n))}return!1}finish(e,r){return this.rows?(e.addLeafElement(r,e.elt("Table",r.start,r.start+r.content.length,this.rows)),!0):!1}}const wt={defineNodes:[{name:"Table",block:!0},{name:"TableHeader",style:{"TableHeader/...":c.heading}},"TableRow",{name:"TableCell",style:c.content},{name:"TableDelimiter",style:c.processingInstruction}],parseBlock:[{name:"Table",leaf(t,e){return fe(e.content,0)?new ue:null},endLeaf(t,e,r){if(r.parsers.some(n=>n instanceof ue)||!fe(e.text,e.basePos))return!1;let s=t.scanLine(t.absoluteLineEnd+1).text;return Oe.test(s)&&y(t,e.text,e.basePos)==y(t,s,e.basePos)},before:"SetextHeading"}]};class Ct{nextLine(){return!1}finish(e,r){return e.addLeafElement(r,e.elt("Task",r.start,r.start+r.content.length,[e.elt("TaskMarker",r.start,r.start+3),...e.parser.parseInline(r.content.slice(3),r.start+3)])),!0}}const At={defineNodes:[{name:"Task",block:!0,style:c.list},{name:"TaskMarker",style:c.atom}],parseBlock:[{name:"TaskList",leaf(t,e){return/^\[[ xX]\]/.test(e.content)&&t.parentType().name=="ListItem"?new Ct:null},after:"SetextHeading"}]},xt=[wt,At,St];function Re(t,e,r){return(s,n,i)=>{if(n!=t||s.char(i+1)==t)return-1;let o=[s.elt(r,i,i+1)];for(let a=i+1;a<s.end;a++){let l=s.char(a);if(l==t)return s.addElement(s.elt(e,i,a+1,o.concat(s.elt(r,a,a+1))));if(l==92&&o.push(s.elt("Escape",a,a+++2)),C(l))break}return-1}}const Bt={defineNodes:[{name:"Superscript",style:c.special(c.content)},{name:"SuperscriptMark",style:c.processingInstruction}],parseInline:[{name:"Superscript",parse:Re(94,"Superscript","SuperscriptMark")}]},Et={defineNodes:[{name:"Subscript",style:c.special(c.content)},{name:"SubscriptMark",style:c.processingInstruction}],parseInline:[{name:"Subscript",parse:Re(126,"Subscript","SubscriptMark")}]},It={defineNodes:[{name:"Emoji",style:c.character}],parseInline:[{name:"Emoji",parse(t,e,r){let s;return e!=58||!(s=/^[a-zA-Z_0-9]+:/.exec(t.slice(r+1,t.end)))?-1:t.addElement(t.elt("Emoji",r,r+1+s[0].length))}}]},ze=Ke({block:{open:"<!--",close:"-->"}}),Te=new I,De=gt.configure({props:[Je.add(t=>!t.is("Block")||t.is("Document")||K(t)!=null?void 0:(e,r)=>({from:r.doc.lineAt(e.from).to,to:e.to})),Te.add(K),Ye.add({Document:()=>null}),We.add({Document:ze})]});function K(t){let e=/^(?:ATX|Setext)Heading(\d)$/.exec(t.name);return e?+e[1]:void 0}function Mt(t,e){let r=t;for(;;){let s=r.nextSibling,n;if(!s||(n=K(s.type))!=null&&n<=e)break;r=s}return r.to}const Ht=et.of((t,e,r)=>{for(let s=J(t).resolveInner(r,-1);s&&!(s.from<e);s=s.parent){let n=s.type.prop(Te);if(n==null)continue;let i=Mt(s,n);if(i>r)return{from:r,to:i}}return null});function te(t){return new Ve(ze,t,[Ht],"markdown")}const Pt=te(De),vt=De.configure([xt,Et,Bt,It]),Xe=te(vt);function Nt(t,e){return r=>{if(r&&t){let s=null;if(r=/\S*/.exec(r)[0],typeof t=="function"?s=t(r):s=ne.matchLanguageName(t,r,!0),s instanceof ne)return s.support?s.support.language.parser:tt.getSkippingParser(s.load());if(s)return s.parser}return e?e.parser:null}}class D{constructor(e,r,s,n,i,o,a){this.node=e,this.from=r,this.to=s,this.spaceBefore=n,this.spaceAfter=i,this.type=o,this.item=a}blank(e,r=!0){let s=this.spaceBefore+(this.node.name=="Blockquote"?">":"");if(e!=null){for(;s.length<e;)s+=" ";return s}else{for(let n=this.to-this.from-s.length-this.spaceAfter.length;n>0;n--)s+=" ";return s+(r?this.spaceAfter:"")}}marker(e,r){let s=this.node.name=="OrderedList"?String(+je(this.item,e)[2]+r):"";return this.spaceBefore+s+this.type+this.spaceAfter}}function Fe(t,e){let r=[];for(let n=t;n&&n.name!="Document";n=n.parent)(n.name=="ListItem"||n.name=="Blockquote"||n.name=="FencedCode")&&r.push(n);let s=[];for(let n=r.length-1;n>=0;n--){let i=r[n],o,a=e.lineAt(i.from),l=i.from-a.from;if(i.name=="FencedCode")s.push(new D(i,l,l,"","","",null));else if(i.name=="Blockquote"&&(o=/^[ \t]*>( ?)/.exec(a.text.slice(l))))s.push(new D(i,l,l+o[0].length,"",o[1],">",null));else if(i.name=="ListItem"&&i.parent.name=="OrderedList"&&(o=/^([ \t]*)\d+([.)])([ \t]*)/.exec(a.text.slice(l)))){let h=o[3],u=o[0].length;h.length>=4&&(h=h.slice(0,h.length-4),u-=4),s.push(new D(i.parent,l,l+u,o[1],h,o[2],i))}else if(i.name=="ListItem"&&i.parent.name=="BulletList"&&(o=/^([ \t]*)([-+*])([ \t]{1,4}\[[ xX]\])?([ \t]+)/.exec(a.text.slice(l)))){let h=o[4],u=o[0].length;h.length>4&&(h=h.slice(0,h.length-4),u-=4);let p=o[2];o[3]&&(p+=o[3].replace(/[xX]/," ")),s.push(new D(i.parent,l,l+u,o[1],h,p,i))}}return s}function je(t,e){return/^(\s*)(\d+)(?=[.)])/.exec(e.sliceString(t.from,t.from+10))}function U(t,e,r,s=0){for(let n=-1,i=t;;){if(i.name=="ListItem"){let a=je(i,e),l=+a[2];if(n>=0){if(l!=n+1)return;r.push({from:i.from+a[1].length,to:i.from+a[0].length,insert:String(n+2+s)})}n=l}let o=i.nextSibling;if(!o)break;i=o}}const yt=({state:t,dispatch:e})=>{let r=J(t),{doc:s}=t,n=null,i=t.changeByRange(o=>{if(!o.empty||!Xe.isActiveAt(t,o.from))return n={range:o};let a=o.from,l=s.lineAt(a),h=Fe(r.resolveInner(a,-1),s);for(;h.length&&h[h.length-1].from>a-l.from;)h.pop();if(!h.length)return n={range:o};let u=h[h.length-1];if(u.to-u.spaceAfter.length>a-l.from)return n={range:o};let p=a>=u.to-u.spaceAfter.length&&!/\S/.test(l.text.slice(u.to));if(u.item&&p)if(u.node.firstChild.to>=a||l.from>0&&!/[^\s>]/.test(s.lineAt(l.from-1).text)){let k=h.length>1?h[h.length-2]:null,b,w="";k&&k.item?(b=l.from+k.from,w=k.marker(s,1)):b=l.from+(k?k.to:0);let x=[{from:b,to:a,insert:w}];return u.node.name=="OrderedList"&&U(u.item,s,x,-2),k&&k.node.name=="OrderedList"&&U(k.item,s,x),{range:v.cursor(b+w.length),changes:x}}else{let k="";for(let b=0,w=h.length-2;b<=w;b++)k+=h[b].blank(b<w?h[b+1].from-k.length:null,b<w);return k+=t.lineBreak,{range:v.cursor(a+k.length),changes:{from:l.from,insert:k}}}if(u.node.name=="Blockquote"&&p&&l.from){let k=s.lineAt(l.from-1),b=/>\s*$/.exec(k.text);if(b&&b.index==u.from){let w=t.changes([{from:k.from+b.index,to:k.to},{from:l.from+u.from,to:l.to}]);return{range:o.map(w),changes:w}}}let d=[];u.node.name=="OrderedList"&&U(u.item,s,d);let L=u.item&&u.item.from<l.from,S="";if(!L||/^[\s\d.)\-+*>]*/.exec(l.text)[0].length>=u.to)for(let k=0,b=h.length-1;k<=b;k++)S+=k==b&&!L?h[k].marker(s,1):h[k].blank(k<b?h[k+1].from-S.length:null);let g=a;for(;g>l.from&&/\s/.test(l.text.charAt(g-l.from-1));)g--;return S=t.lineBreak+S,d.push({from:g,to:a,insert:S}),{range:v.cursor(g+S.length),changes:d}});return n?!1:(e(t.update(i,{scrollIntoView:!0,userEvent:"input"})),!0)};function de(t){return t.name=="QuoteMark"||t.name=="ListMark"}function Ot(t,e){let r=t.resolveInner(e,-1),s=e;de(r)&&(s=r.from,r=r.parent);for(let n;n=r.childBefore(s);)if(de(n))s=n.from;else if(n.name=="OrderedList"||n.name=="BulletList")r=n.lastChild,s=r.to;else break;return r}const Rt=({state:t,dispatch:e})=>{let r=J(t),s=null,n=t.changeByRange(i=>{let o=i.from,{doc:a}=t;if(i.empty&&Xe.isActiveAt(t,i.from)){let l=a.lineAt(o),h=Fe(Ot(r,o),a);if(h.length){let u=h[h.length-1],p=u.to-u.spaceAfter.length+(u.spaceAfter?1:0);if(o-l.from>p&&!/\S/.test(l.text.slice(p,o-l.from)))return{range:v.cursor(l.from+p),changes:{from:l.from+p,to:o}};if(o-l.from==p){let d=l.from+u.from;if(u.item&&u.node.from<u.item.from&&/\S/.test(l.text.slice(u.from,u.to)))return{range:i,changes:{from:d,to:l.from+u.to,insert:u.blank(u.to-u.from)}};if(d<o)return{range:v.cursor(d),changes:{from:d,to:o}}}}}return s={range:i}});return s?!1:(e(t.update(n,{scrollIntoView:!0,userEvent:"delete"})),!0)},zt=[{key:"Enter",run:yt},{key:"Backspace",run:Rt}],pe=rt({matchClosingTags:!1});function Vt(t={}){let{codeLanguages:e,defaultCodeLanguage:r,addKeymap:s=!0,base:{parser:n}=Pt}=t;if(!(n instanceof j))throw new RangeError("Base parser provided to `markdown` should be a Markdown parser");let i=t.extensions?[t.extensions]:[],o=[pe.support],a;r instanceof se?(o.push(r.support),a=r.language):r&&(a=r);let l=e||a?Nt(e,a):void 0;return i.push(Lt({codeParser:l,htmlParser:pe.language.parser})),s&&o.push(Ze.high(Ge.of(zt))),new se(te(n.configure(i)),o)}export{Pt as commonmarkLanguage,Rt as deleteMarkupBackward,yt as insertNewlineContinueMarkup,Vt as markdown,zt as markdownKeymap,Xe as markdownLanguage};
7
- //# sourceMappingURL=index-98c587a9.js.map
 
 
 
 
 
 
 
 
spaces/DaleChen/AutoGPT/autogpt/memory/local.py DELETED
@@ -1,136 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import dataclasses
4
- import os
5
- from typing import Any, List
6
-
7
- import numpy as np
8
- import orjson
9
-
10
- from autogpt.llm_utils import create_embedding_with_ada
11
- from autogpt.memory.base import MemoryProviderSingleton
12
-
13
- EMBED_DIM = 1536
14
- SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
15
-
16
-
17
- def create_default_embeddings():
18
- return np.zeros((0, EMBED_DIM)).astype(np.float32)
19
-
20
-
21
- @dataclasses.dataclass
22
- class CacheContent:
23
- texts: List[str] = dataclasses.field(default_factory=list)
24
- embeddings: np.ndarray = dataclasses.field(
25
- default_factory=create_default_embeddings
26
- )
27
-
28
-
29
- class LocalCache(MemoryProviderSingleton):
30
- """A class that stores the memory in a local file"""
31
-
32
- def __init__(self, cfg) -> None:
33
- """Initialize a class instance
34
-
35
- Args:
36
- cfg: Config object
37
-
38
- Returns:
39
- None
40
- """
41
- self.filename = f"{cfg.memory_index}.json"
42
- if os.path.exists(self.filename):
43
- try:
44
- with open(self.filename, "w+b") as f:
45
- file_content = f.read()
46
- if not file_content.strip():
47
- file_content = b"{}"
48
- f.write(file_content)
49
-
50
- loaded = orjson.loads(file_content)
51
- self.data = CacheContent(**loaded)
52
- except orjson.JSONDecodeError:
53
- print(f"Error: The file '{self.filename}' is not in JSON format.")
54
- self.data = CacheContent()
55
- else:
56
- print(
57
- f"Warning: The file '{self.filename}' does not exist. "
58
- "Local memory would not be saved to a file."
59
- )
60
- self.data = CacheContent()
61
-
62
- def add(self, text: str):
63
- """
64
- Add text to our list of texts, add embedding as row to our
65
- embeddings-matrix
66
-
67
- Args:
68
- text: str
69
-
70
- Returns: None
71
- """
72
- if "Command Error:" in text:
73
- return ""
74
- self.data.texts.append(text)
75
-
76
- embedding = create_embedding_with_ada(text)
77
-
78
- vector = np.array(embedding).astype(np.float32)
79
- vector = vector[np.newaxis, :]
80
- self.data.embeddings = np.concatenate(
81
- [
82
- self.data.embeddings,
83
- vector,
84
- ],
85
- axis=0,
86
- )
87
-
88
- with open(self.filename, "wb") as f:
89
- out = orjson.dumps(self.data, option=SAVE_OPTIONS)
90
- f.write(out)
91
- return text
92
-
93
- def clear(self) -> str:
94
- """
95
- Clears the redis server.
96
-
97
- Returns: A message indicating that the memory has been cleared.
98
- """
99
- self.data = CacheContent()
100
- return "Obliviated"
101
-
102
- def get(self, data: str) -> list[Any] | None:
103
- """
104
- Gets the data from the memory that is most relevant to the given data.
105
-
106
- Args:
107
- data: The data to compare to.
108
-
109
- Returns: The most relevant data.
110
- """
111
- return self.get_relevant(data, 1)
112
-
113
- def get_relevant(self, text: str, k: int) -> list[Any]:
114
- """ "
115
- matrix-vector mult to find score-for-each-row-of-matrix
116
- get indices for top-k winning scores
117
- return texts for those indices
118
- Args:
119
- text: str
120
- k: int
121
-
122
- Returns: List[str]
123
- """
124
- embedding = create_embedding_with_ada(text)
125
-
126
- scores = np.dot(self.data.embeddings, embedding)
127
-
128
- top_k_indices = np.argsort(scores)[-k:][::-1]
129
-
130
- return [self.data.texts[i] for i in top_k_indices]
131
-
132
- def get_stats(self) -> tuple[int, tuple[int, ...]]:
133
- """
134
- Returns: The stats of the local cache.
135
- """
136
- return len(self.data.texts), self.data.embeddings.shape
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/utils/logger.py DELETED
@@ -1,49 +0,0 @@
1
- """
2
- @Date: 2021/07/17
3
- @description:
4
- """
5
- import os
6
- import sys
7
- import logging
8
- import functools
9
- from termcolor import colored
10
-
11
-
12
- def build_logger(config):
13
- output_dir = config.LOGGER.DIR
14
- local_rank = config.LOCAL_RANK
15
- name = config.MODEL.NAME
16
- logger = get_logger(output_dir, local_rank, name)
17
- return logger
18
-
19
-
20
- @functools.lru_cache()
21
- def get_logger(output_dir=None, local_rank=None, name="PLTNet"):
22
- if output_dir and not os.path.exists(output_dir):
23
- os.makedirs(output_dir)
24
-
25
- # create logger
26
- logger = logging.getLogger(name)
27
- logger.setLevel(logging.DEBUG)
28
- logger.propagate = False
29
-
30
- # create formatter
31
- fmt = f'[%(asctime)s %(name)s][%(levelname)1.1s](%(filename)s %(lineno)d): %(message)s'
32
- color_fmt = colored(f'[%(asctime)s %(name)s][%(levelname)1.1s][{local_rank}]', 'green') + colored(
33
- f'(%(filename)s %(lineno)d)',
34
- 'yellow') + ': %(message)s'
35
- if local_rank in [0] or local_rank is None:
36
- console_handler = logging.StreamHandler(sys.stdout)
37
- console_handler.setLevel(logging.DEBUG)
38
- console_handler.setFormatter(
39
- logging.Formatter(fmt=color_fmt, datefmt='%Y-%m-%d %H:%M:%S'))
40
- logger.addHandler(console_handler)
41
-
42
- if output_dir is not None:
43
- # create file handlers
44
- file_handler = logging.FileHandler(os.path.join(output_dir, f'log_rank{local_rank}.log'), mode='a')
45
- file_handler.setLevel(logging.DEBUG)
46
- file_handler.setFormatter(logging.Formatter(fmt=fmt, datefmt='%Y-%m-%d %H:%M:%S'))
47
- logger.addHandler(file_handler)
48
-
49
- return logger
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DpNaze/Dreamlikeart/app.py DELETED
@@ -1,154 +0,0 @@
1
- import gradio as gr
2
- import os
3
- import sys
4
- from pathlib import Path
5
- import random
6
- import string
7
- import time
8
- from queue import Queue
9
- from threading import Thread
10
- import emoji
11
-
12
- #text_gen=gr.Interface.load("spaces/phenomenon1981/MagicPrompt-Stable-Diffusion")
13
- def get_prompts(prompt_text):
14
- if prompt_text:
15
- return prompt_text
16
- else:
17
- return ""
18
- proc1=gr.Interface.load("models/dreamlike-art/dreamlike-anime-1.0")
19
-
20
- def restart_script_periodically():
21
- while True:
22
- random_time = random.randint(540, 600)
23
- time.sleep(random_time)
24
- os.execl(sys.executable, sys.executable, *sys.argv)
25
-
26
-
27
- restart_thread = Thread(target=restart_script_periodically, daemon=True)
28
- restart_thread.start()
29
-
30
-
31
- queue = Queue()
32
- queue_threshold = 100
33
-
34
- def add_random_noise(prompt, noise_level=0.00):
35
- if noise_level == 0:
36
- noise_level = 0.00
37
- percentage_noise = noise_level * 5
38
- num_noise_chars = int(len(prompt) * (percentage_noise/100))
39
- noise_indices = random.sample(range(len(prompt)), num_noise_chars)
40
- prompt_list = list(prompt)
41
- noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits)
42
- noise_chars.extend(['😍', '💩', '😂', '🤔', '😊', '🤗', '😭', '🙄', '😷', '🤯', '🤫', '🥴', '😴', '🤩', '🥳', '😔', '😩', '🤪', '😇', '🤢', '😈', '👹', '👻', '🤖', '👽', '💀', '🎃', '🎅', '🎄', '🎁', '🎂', '🎉', '🎈', '🎊', '🎮', '❤️', '💔', '💕', '💖', '💗', '🐶', '🐱', '🐭', '🐹', '🦊', '🐻', '🐨', '🐯', '🦁', '🐘', '🔥', '🌧️', '🌞', '🌈', '💥', '🌴', '🌊', '🌺', '🌻', '🌸', '🎨', '🌅', '🌌', '☁️', '⛈️', '❄️', '☀️', '🌤️', '⛅️', '🌥️', '🌦️', '🌧️', '🌩️', '🌨️', '🌫️', '☔️', '🌬️', '💨', '🌪️', '🌈'])
43
- for index in noise_indices:
44
- prompt_list[index] = random.choice(noise_chars)
45
- return "".join(prompt_list)
46
-
47
-
48
-
49
- def send_it1(inputs, noise_level, proc1=proc1):
50
- prompt_with_noise = add_random_noise(inputs, noise_level)
51
- while queue.qsize() >= queue_threshold:
52
- time.sleep(2)
53
- queue.put(prompt_with_noise)
54
- output1 = proc1(prompt_with_noise)
55
- return output1
56
-
57
- def send_it2(inputs, noise_level, proc1=proc1):
58
- prompt_with_noise = add_random_noise(inputs, noise_level)
59
- while queue.qsize() >= queue_threshold:
60
- time.sleep(2)
61
- queue.put(prompt_with_noise)
62
- output2 = proc1(prompt_with_noise)
63
- return output2
64
-
65
- #def send_it3(inputs, noise_level, proc1=proc1):
66
- #prompt_with_noise = add_random_noise(inputs, noise_level)
67
- #while queue.qsize() >= queue_threshold:
68
- #time.sleep(2)
69
- #queue.put(prompt_with_noise)
70
- #output3 = proc1(prompt_with_noise)
71
- #return output3
72
-
73
- #def send_it4(inputs, noise_level, proc1=proc1):
74
- #prompt_with_noise = add_random_noise(inputs, noise_level)
75
- #while queue.qsize() >= queue_threshold:
76
- #time.sleep(2)
77
- #queue.put(prompt_with_noise)
78
- #output4 = proc1(prompt_with_noise)
79
- #return output4
80
-
81
-
82
- with gr.Blocks(css='style.css') as demo:
83
- gr.HTML(
84
- """
85
- <div style="text-align: center; max-width: 650px; margin: 0 auto;">
86
- <div>
87
- <h1 style="font-weight: 900; font-size: 3rem; margin-bottom:20px;">
88
- Dreamlike Anime 1.0
89
- </h1>
90
- </div>
91
- <p style="margin-bottom: 10px; font-size: 96%">
92
- Noise Level: Controls how much randomness is added to the input before it is sent to the model. Higher noise level produces more diverse outputs, while lower noise level produces similar outputs,
93
- <a href="https://twitter.com/DavidJohnstonxx/">created by Phenomenon1981</a>.
94
- </p>
95
- <p style="margin-bottom: 10px; font-size: 98%">
96
- ❤️ Press the Like Button if you enjoy my space! ❤️</a>
97
- </p>
98
- </div>
99
- """
100
- )
101
- with gr.Column(elem_id="col-container"):
102
- #with gr.Row(variant="compact"):
103
- #input_text = gr.Textbox(
104
- #label="Short Prompt",
105
- #show_label=False,
106
- #max_lines=2,
107
- #placeholder="Enter a basic idea and click 'Magic Prompt'. Got no ideas? No problem, Simply just hit the magic button!",
108
- #).style(
109
- #container=False,
110
- #)
111
- #see_prompts = gr.Button("✨ Magic Prompt ✨").style(full_width=False)
112
-
113
-
114
- with gr.Row(variant="compact"):
115
- prompt = gr.Textbox(
116
- label="Enter your prompt",
117
- show_label=False,
118
- max_lines=2,
119
- placeholder="Full Prompt",
120
- ).style(
121
- container=False,
122
- )
123
- run = gr.Button("Generate Images").style(full_width=False)
124
-
125
- with gr.Row():
126
- with gr.Row():
127
- noise_level = gr.Slider(minimum=0.0, maximum=3, step=0.1, label="Noise Level")
128
- with gr.Row():
129
- with gr.Row():
130
- output1=gr.Image(label="Dreamlike Anime 1.0",show_label=False)
131
- output2=gr.Image(label="Dreamlike Anime 1.0",show_label=False)
132
-
133
-
134
- #see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt], queue=False)
135
- run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1])
136
- run.click(send_it2, inputs=[prompt, noise_level], outputs=[output2])
137
-
138
-
139
- with gr.Row():
140
- gr.HTML(
141
- """
142
- <div class="footer">
143
- <p> Demo for <a href="https://huggingface.co/dreamlike-art/dreamlike-anime-1.0">Dreamlike Anime 1.0</a> Stable Diffusion model
144
- </p>
145
- </div>
146
- <div class="acknowledgments" style="font-size: 115%">
147
- <p> Unleash your creative side and generate mesmerizing images with just a few clicks! Enter a spark of inspiration in the "Basic Idea" text box and click the "Magic Prompt" button to elevate it to a polished masterpiece. Make any final tweaks in the "Full Prompt" box and hit the "Generate Images" button to watch your vision come to life. Experiment with the "Noise Level" for a diverse range of outputs, from similar to wildly unique. Let the fun begin!
148
- </p>
149
- </div>
150
- """
151
- )
152
-
153
- demo.launch(enable_queue=True, inline=True)
154
- block.queue(concurrency_count=100)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan-Inversion/PTI/models/e4e/discriminator.py DELETED
@@ -1,20 +0,0 @@
1
- from torch import nn
2
-
3
-
4
- class LatentCodesDiscriminator(nn.Module):
5
- def __init__(self, style_dim, n_mlp):
6
- super().__init__()
7
-
8
- self.style_dim = style_dim
9
-
10
- layers = []
11
- for i in range(n_mlp-1):
12
- layers.append(
13
- nn.Linear(style_dim, style_dim)
14
- )
15
- layers.append(nn.LeakyReLU(0.2))
16
- layers.append(nn.Linear(512, 1))
17
- self.mlp = nn.Sequential(*layers)
18
-
19
- def forward(self, w):
20
- return self.mlp(w)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/PSG/OpenPSG/configs/motifs/panoptic_fpn_r101_fpn_1x_sgdet_psg.py DELETED
@@ -1,28 +0,0 @@
1
- _base_ = './panoptic_fpn_r50_fpn_1x_sgdet_psg.py'
2
-
3
- model = dict(backbone=dict(
4
- depth=101,
5
- init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
6
-
7
- # Log config
8
- project_name = 'openpsg'
9
- expt_name = 'motifs_panoptic_fpn_r101_fpn_1x_sgdet_psg'
10
- work_dir = f'./work_dirs/{expt_name}'
11
-
12
- log_config = dict(
13
- interval=50,
14
- hooks=[
15
- dict(type='TextLoggerHook'),
16
- # dict(type='TensorboardLoggerHook')
17
- dict(
18
- type='WandbLoggerHook',
19
- init_kwargs=dict(
20
- project=project_name,
21
- name=expt_name,
22
- # config=work_dir + "/cfg.yaml"
23
- ),
24
- ),
25
- ],
26
- )
27
-
28
- load_from = 'work_dirs/checkpoints/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth'